+#define JOB_TIMEOUT 300
+
+#define INFINITE_INTERVAL INT_MAX
+#define SAMPLERATE_INTERVAL -1
+
+static const struct agdmm_job *job_current(const struct dev_context *devc)
+{
+ return &devc->profile->jobs[devc->current_job];
+}
+
+static void job_done(struct dev_context *devc)
+{
+ devc->job_running = FALSE;
+}
+
+static void job_again(struct dev_context *devc)
+{
+ devc->job_again = TRUE;
+}
+
+static gboolean job_is_running(const struct dev_context *devc)
+{
+ return devc->job_running;
+}
+
+static gboolean job_in_interval(const struct dev_context *devc)
+{
+ int64_t job_start = devc->jobs_start[devc->current_job];
+ int64_t now = g_get_monotonic_time() / 1000;
+ int interval = job_current(devc)->interval;
+ if (interval == SAMPLERATE_INTERVAL)
+ interval = 1000 / devc->cur_samplerate;
+ return (now - job_start) < interval || interval == INFINITE_INTERVAL;
+}
+
+static gboolean job_has_timeout(const struct dev_context *devc)
+{
+ int64_t job_start = devc->jobs_start[devc->current_job];
+ int64_t now = g_get_monotonic_time() / 1000;
+ return job_is_running(devc) && (now - job_start) > JOB_TIMEOUT;
+}
+
+static const struct agdmm_job *job_next(struct dev_context *devc)
+{
+ int current_job = devc->current_job;
+ do {
+ devc->current_job++;
+ if (!job_current(devc)->send)
+ devc->current_job = 0;
+ } while(job_in_interval(devc) && devc->current_job != current_job);
+ return job_current(devc);
+}
+
+static void job_run_again(const struct sr_dev_inst *sdi)
+{
+ struct dev_context *devc = sdi->priv;
+ devc->job_again = FALSE;
+ devc->job_running = TRUE;
+ if (job_current(devc)->send(sdi) == SR_ERR_NA)
+ job_done(devc);
+}
+
+static void job_run(const struct sr_dev_inst *sdi)
+{
+ struct dev_context *devc = sdi->priv;
+ int64_t now = g_get_monotonic_time() / 1000;
+ devc->jobs_start[devc->current_job] = now;
+ job_run_again(sdi);
+}
+