}
/*
- * Sigma doesn't support limiting the number of samples, so we have to
- * translate the number and the samplerate to an elapsed time.
+ * The driver supports user specified time or sample count limits. The
+ * device's hardware supports neither, and hardware compression prevents
+ * reliable detection of "fill levels" (currently reached sample counts)
+ * from register values during acquisition. That's why the driver needs
+ * to apply some heuristics:
*
- * In addition we need to ensure that the last data cluster has passed
- * the hardware pipeline, and became available to the PC side. With RLE
- * compression up to 327ms could pass before another cluster accumulates
- * at 200kHz samplerate when input pins don't change.
+ * - The (optional) sample count limit and the (normalized) samplerate
+ * get mapped to an estimated duration for these samples' acquisition.
+ * - The (optional) time limit gets checked as well. The lesser of the
+ * two limits will terminate the data acquisition phase. The exact
+ * sample count limit gets enforced in session feed submission paths.
+ * - Some slack needs to be given to account for hardware pipelines as
+ * well as late storage of last chunks after compression thresholds
+ * are tripped. The resulting data set will span at least the caller
+ * specified period of time, which shall be perfectly acceptable.
+ *
+ * With RLE compression active, up to 64K sample periods can pass before
+ * a cluster accumulates. Which translates to 327ms at 200kHz. Add two
+ * times that period for good measure, one is not enough to flush the
+ * hardware pipeline (observation from an earlier experiment).
*/
-SR_PRIV uint64_t sigma_limit_samples_to_msec(const struct dev_context *devc,
- uint64_t limit_samples)
+SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc)
{
- uint64_t limit_msec;
+ int ret;
+ GVariant *data;
+ uint64_t user_count, user_msecs;
uint64_t worst_cluster_time_ms;
+ uint64_t count_msecs, acquire_msecs;
- limit_msec = limit_samples * 1000 / devc->cur_samplerate;
- worst_cluster_time_ms = 65536 * 1000 / devc->cur_samplerate;
- /*
- * One cluster time is not enough to flush pipeline when sampling
- * grounded pins with 1 sample limit at 200kHz. Hence the 2* fix.
- */
- return limit_msec + 2 * worst_cluster_time_ms;
+ sr_sw_limits_init(&devc->acq_limits);
+
+ /* Get sample count limit, convert to msecs. */
+ ret = sr_sw_limits_config_get(&devc->cfg_limits,
+ SR_CONF_LIMIT_SAMPLES, &data);
+ if (ret != SR_OK)
+ return ret;
+ user_count = g_variant_get_uint64(data);
+ g_variant_unref(data);
+ count_msecs = 0;
+ if (user_count)
+ count_msecs = 1000 * user_count / devc->samplerate + 1;
+
+ /* Get time limit, which is in msecs. */
+ ret = sr_sw_limits_config_get(&devc->cfg_limits,
+ SR_CONF_LIMIT_MSEC, &data);
+ if (ret != SR_OK)
+ return ret;
+ user_msecs = g_variant_get_uint64(data);
+ g_variant_unref(data);
+
+ /* Get the lesser of them, with both being optional. */
+ acquire_msecs = ~0ull;
+ if (user_count && count_msecs < acquire_msecs)
+ acquire_msecs = count_msecs;
+ if (user_msecs && user_msecs < acquire_msecs)
+ acquire_msecs = user_msecs;
+ if (acquire_msecs == ~0ull)
+ return SR_OK;
+
+ /* Add some slack, and use that timeout for acquisition. */
+ worst_cluster_time_ms = 1000 * 65536 / devc->samplerate;
+ acquire_msecs += 2 * worst_cluster_time_ms;
+ data = g_variant_new_uint64(acquire_msecs);
+ ret = sr_sw_limits_config_set(&devc->acq_limits,
+ SR_CONF_LIMIT_MSEC, data);
+ g_variant_unref(data);
+ if (ret != SR_OK)
+ return ret;
+
+ sr_sw_limits_acquisition_start(&devc->acq_limits);
+ return SR_OK;
+}
+
+/*
+ * Check whether a caller specified samplerate matches the device's
+ * hardware constraints (can be used for acquisition). Optionally yield
+ * a value that approximates the original spec.
+ *
+ * This routine assumes that input specs are in the 200kHz to 200MHz
+ * range of supported rates, and callers typically want to normalize a
+ * given value to the hardware capabilities. Values in the 50MHz range
+ * get rounded up by default, to avoid a more expensive check for the
+ * closest match, while higher sampling rate is always desirable during
+ * measurement. Input specs which exactly match hardware capabilities
+ * remain unaffected. Because 100/200MHz rates also limit the number of
+ * available channels, they are not suggested by this routine, instead
+ * callers need to pick them consciously.
+ */
+SR_PRIV int sigma_normalize_samplerate(uint64_t want_rate, uint64_t *have_rate)
+{
+ uint64_t div, rate;
+
+ /* Accept exact matches for 100/200MHz. */
+ if (want_rate == SR_MHZ(200) || want_rate == SR_MHZ(100)) {
+ if (have_rate)
+ *have_rate = want_rate;
+ return SR_OK;
+ }
+
+ /* Accept 200kHz to 50MHz range, and map to near value. */
+ if (want_rate >= SR_KHZ(200) && want_rate <= SR_MHZ(50)) {
+ div = SR_MHZ(50) / want_rate;
+ rate = SR_MHZ(50) / div;
+ if (have_rate)
+ *have_rate = rate;
+ return SR_OK;
+ }
+
+ return SR_ERR_ARG;
}
-SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi, uint64_t samplerate)
+SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi)
{
struct dev_context *devc;
struct drv_context *drvc;
- size_t i;
+ uint64_t samplerate;
int ret;
int num_channels;
devc = sdi->priv;
drvc = sdi->driver->context;
- ret = SR_OK;
- /* Reject rates that are not in the list of supported rates. */
- for (i = 0; i < samplerates_count; i++) {
- if (samplerates[i] == samplerate)
- break;
- }
- if (i >= samplerates_count || samplerates[i] == 0)
- return SR_ERR_SAMPLERATE;
+ /* Accept any caller specified rate which the hardware supports. */
+ ret = sigma_normalize_samplerate(devc->samplerate, &samplerate);
+ if (ret != SR_OK)
+ return ret;
/*
* Depending on the samplerates of 200/100/50- MHz, specific
}
/*
- * Derive the sample period from the sample rate as well as the
- * number of samples that the device will communicate within
- * an "event" (memory organization internal to the device).
+ * The samplerate affects the number of available logic channels
+ * as well as a sample memory layout detail (the number of samples
+ * which the device will communicate within an "event").
*/
if (ret == SR_OK) {
devc->num_channels = num_channels;
- devc->cur_samplerate = samplerate;
devc->samples_per_event = 16 / devc->num_channels;
devc->state.state = SIGMA_IDLE;
}
+ return ret;
+}
+
+/*
+ * Arrange for a session feed submit buffer. A queue where a number of
+ * samples gets accumulated to reduce the number of send calls. Which
+ * also enforces an optional sample count limit for data acquisition.
+ *
+ * The buffer holds up to CHUNK_SIZE bytes. The unit size is fixed (the
+ * driver provides a fixed channel layout regardless of samplerate).
+ */
+
+#define CHUNK_SIZE (4 * 1024 * 1024)
+
+struct submit_buffer {
+ size_t unit_size;
+ size_t max_samples, curr_samples;
+ uint8_t *sample_data;
+ uint8_t *write_pointer;
+ struct sr_dev_inst *sdi;
+ struct sr_datafeed_packet packet;
+ struct sr_datafeed_logic logic;
+};
+
+static int alloc_submit_buffer(struct sr_dev_inst *sdi)
+{
+ struct dev_context *devc;
+ struct submit_buffer *buffer;
+ size_t size;
+
+ devc = sdi->priv;
+
+ buffer = g_malloc0(sizeof(*buffer));
+ devc->buffer = buffer;
+
+ buffer->unit_size = sizeof(uint16_t);
+ size = CHUNK_SIZE;
+ size /= buffer->unit_size;
+ buffer->max_samples = size;
+ size *= buffer->unit_size;
+ buffer->sample_data = g_try_malloc0(size);
+ if (!buffer->sample_data)
+ return SR_ERR_MALLOC;
+ buffer->write_pointer = buffer->sample_data;
+ sr_sw_limits_init(&devc->feed_limits);
+
+ buffer->sdi = sdi;
+ memset(&buffer->logic, 0, sizeof(buffer->logic));
+ buffer->logic.unitsize = buffer->unit_size;
+ buffer->logic.data = buffer->sample_data;
+ memset(&buffer->packet, 0, sizeof(buffer->packet));
+ buffer->packet.type = SR_DF_LOGIC;
+ buffer->packet.payload = &buffer->logic;
+
+ return SR_OK;
+}
+
+static int setup_submit_limit(struct dev_context *devc)
+{
+ struct sr_sw_limits *limits;
+ int ret;
+ GVariant *data;
+ uint64_t total;
+
+ limits = &devc->feed_limits;
+
+ ret = sr_sw_limits_config_get(&devc->cfg_limits,
+ SR_CONF_LIMIT_SAMPLES, &data);
+ if (ret != SR_OK)
+ return ret;
+ total = g_variant_get_uint64(data);
+ g_variant_unref(data);
+
+ sr_sw_limits_init(limits);
+ if (total) {
+ data = g_variant_new_uint64(total);
+ ret = sr_sw_limits_config_set(limits,
+ SR_CONF_LIMIT_SAMPLES, data);
+ g_variant_unref(data);
+ if (ret != SR_OK)
+ return ret;
+ }
+
+ sr_sw_limits_acquisition_start(limits);
+
+ return SR_OK;
+}
+
+static void free_submit_buffer(struct dev_context *devc)
+{
+ struct submit_buffer *buffer;
+
+ if (!devc)
+ return;
+
+ buffer = devc->buffer;
+ if (!buffer)
+ return;
+ devc->buffer = NULL;
+
+ g_free(buffer->sample_data);
+ g_free(buffer);
+}
+
+static int flush_submit_buffer(struct dev_context *devc)
+{
+ struct submit_buffer *buffer;
+ int ret;
+
+ buffer = devc->buffer;
+
+ /* Is queued sample data available? */
+ if (!buffer->curr_samples)
+ return SR_OK;
+
+ /* Submit to the session feed. */
+ buffer->logic.length = buffer->curr_samples * buffer->unit_size;
+ ret = sr_session_send(buffer->sdi, &buffer->packet);
+ if (ret != SR_OK)
+ return ret;
+
+ /* Rewind queue position. */
+ buffer->curr_samples = 0;
+ buffer->write_pointer = buffer->sample_data;
+
+ return SR_OK;
+}
+
+static int addto_submit_buffer(struct dev_context *devc,
+ uint16_t sample, size_t count)
+{
+ struct submit_buffer *buffer;
+ struct sr_sw_limits *limits;
+ int ret;
+
+ buffer = devc->buffer;
+ limits = &devc->feed_limits;
+ if (sr_sw_limits_check(limits))
+ count = 0;
+
/*
- * Support for "limit_samples" is implemented by stopping
- * acquisition after a corresponding period of time.
- * Re-calculate that period of time, in case the limit is
- * set first and the samplerate gets (re-)configured later.
+ * Individually accumulate and check each sample, such that
+ * accumulation between flushes won't exceed local storage, and
+ * enforcement of user specified limits is exact.
*/
- if (ret == SR_OK && devc->limit_samples) {
- uint64_t msecs;
- msecs = sigma_limit_samples_to_msec(devc, devc->limit_samples);
- devc->limit_msec = msecs;
+ while (count--) {
+ WL16(buffer->write_pointer, sample);
+ buffer->write_pointer += buffer->unit_size;
+ buffer->curr_samples++;
+ if (buffer->curr_samples == buffer->max_samples) {
+ ret = flush_submit_buffer(devc);
+ if (ret != SR_OK)
+ return ret;
+ }
+ sr_sw_limits_update_samples_read(limits, 1);
+ if (sr_sw_limits_check(limits))
+ break;
}
- return ret;
+ return SR_OK;
}
/*
/* Ignore disabled channels with a trigger. */
continue;
channelbit = 1 << (match->channel->index);
- if (devc->cur_samplerate >= SR_MHZ(100)) {
+ if (devc->samplerate >= SR_MHZ(100)) {
/* Fast trigger support. */
if (trigger_set) {
sr_err("Only a single pin trigger is "
return i & 0x7;
}
+static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample)
+{
+ /* TODO
+ * Check whether the combination of this very sample and the
+ * previous state match the configured trigger condition. This
+ * improves the resolution of the trigger marker's position.
+ * The hardware provided position is coarse, and may point to
+ * a position before the actual match.
+ *
+ * See the previous get_trigger_offset() implementation. This
+ * code needs to get re-used here.
+ */
+ (void)devc;
+ (void)sample;
+ (void)get_trigger_offset;
+
+ return FALSE;
+}
+
+static int check_and_submit_sample(struct dev_context *devc,
+ uint16_t sample, size_t count, gboolean check_trigger)
+{
+ gboolean triggered;
+ int ret;
+
+ triggered = check_trigger && sample_matches_trigger(devc, sample);
+ if (triggered) {
+ ret = flush_submit_buffer(devc);
+ if (ret != SR_OK)
+ return ret;
+ ret = std_session_send_df_trigger(devc->buffer->sdi);
+ if (ret != SR_OK)
+ return ret;
+ }
+
+ ret = addto_submit_buffer(devc, sample, count);
+ if (ret != SR_OK)
+ return ret;
+
+ return SR_OK;
+}
+
/*
* Return the timestamp of "DRAM cluster".
*/
return outdata;
}
-static void store_sr_sample(uint8_t *samples, int idx, uint16_t data)
-{
- samples[2 * idx + 0] = (data >> 0) & 0xff;
- samples[2 * idx + 1] = (data >> 8) & 0xff;
-}
-
-/*
- * Local wrapper around sr_session_send() calls. Make sure to not send
- * more samples to the session's datafeed than what was requested by a
- * previously configured (optional) sample count.
- */
-static void sigma_session_send(struct sr_dev_inst *sdi,
- struct sr_datafeed_packet *packet)
-{
- struct dev_context *devc;
- struct sr_datafeed_logic *logic;
- uint64_t send_now;
-
- devc = sdi->priv;
- if (devc->limit_samples) {
- logic = (void *)packet->payload;
- send_now = logic->length / logic->unitsize;
- if (devc->sent_samples + send_now > devc->limit_samples) {
- send_now = devc->limit_samples - devc->sent_samples;
- logic->length = send_now * logic->unitsize;
- }
- if (!send_now)
- return;
- devc->sent_samples += send_now;
- }
-
- sr_session_send(sdi, packet);
-}
-
-/*
- * This size translates to: number of events per row (strictly speaking
- * 448, assuming "up to 512" does not harm here) times the sample data's
- * unit size (16 bits), times the maximum number of samples per event (4).
- */
-#define SAMPLES_BUFFER_SIZE (ROW_LENGTH_U16 * sizeof(uint16_t) * 4)
-
-static void sigma_decode_dram_cluster(struct sigma_dram_cluster *dram_cluster,
- unsigned int events_in_cluster,
- unsigned int triggered,
- struct sr_dev_inst *sdi)
+static void sigma_decode_dram_cluster(struct dev_context *devc,
+ struct sigma_dram_cluster *dram_cluster,
+ size_t events_in_cluster, gboolean triggered)
{
- struct dev_context *devc = sdi->priv;
- struct sigma_state *ss = &devc->state;
- struct sr_datafeed_packet packet;
- struct sr_datafeed_logic logic;
+ struct sigma_state *ss;
uint16_t tsdiff, ts, sample, item16;
- uint8_t samples[SAMPLES_BUFFER_SIZE];
- uint8_t *send_ptr;
- size_t send_count, trig_count;
unsigned int i;
- int j;
- ts = sigma_dram_cluster_ts(dram_cluster);
- tsdiff = ts - ss->lastts;
- ss->lastts = ts + EVENTS_PER_CLUSTER;
-
- packet.type = SR_DF_LOGIC;
- packet.payload = &logic;
- logic.unitsize = 2;
- logic.data = samples;
+ if (!devc->use_triggers || !ASIX_SIGMA_WITH_TRIGGER)
+ triggered = FALSE;
/*
* If this cluster is not adjacent to the previously received
* cluster, then send the appropriate number of samples with the
* previous values to the sigrok session. This "decodes RLE".
*
- * TODO Improve (mostly: generalize) support for queueing data
- * before submission to the session bus. This implementation
- * happens to work for "up to 1024 samples" despite the "up to
- * 512 entities of 16 bits", due to the "up to 4 sample points
- * per event" factor. A better implementation would eliminate
- * these magic numbers.
+ * These samples cannot match the trigger since they just repeat
+ * the previously submitted data pattern. (This assumption holds
+ * for simple level and edge triggers. It would not for timed or
+ * counted conditions, which currently are not supported.)
*/
- for (ts = 0; ts < tsdiff; ts++) {
- i = ts % 1024;
- store_sr_sample(samples, i, ss->lastsample);
-
- /*
- * If we have 1024 samples ready or we're at the
- * end of submitting the padding samples, submit
- * the packet to Sigrok. Since constant data is
- * sent, duplication of data for rates above 50MHz
- * is simple.
- */
- if ((i == 1023) || (ts == tsdiff - 1)) {
- logic.length = (i + 1) * logic.unitsize;
- for (j = 0; j < devc->samples_per_event; j++)
- sigma_session_send(sdi, &packet);
- }
+ ss = &devc->state;
+ ts = sigma_dram_cluster_ts(dram_cluster);
+ tsdiff = ts - ss->lastts;
+ if (tsdiff > 0) {
+ size_t count;
+ count = tsdiff * devc->samples_per_event;
+ (void)check_and_submit_sample(devc, ss->lastsample, count, FALSE);
}
+ ss->lastts = ts + EVENTS_PER_CLUSTER;
/*
- * Parse the samples in current cluster and prepare them
- * to be submitted to Sigrok. Cope with memory layouts that
- * vary with the samplerate.
+ * Grab sample data from the current cluster and prepare their
+ * submission to the session feed. Handle samplerate dependent
+ * memory layout of sample data. Accumulation of data chunks
+ * before submission is transparent to this code path, specific
+ * buffer depth is neither assumed nor required here.
*/
- send_ptr = &samples[0];
- send_count = 0;
sample = 0;
for (i = 0; i < events_in_cluster; i++) {
item16 = sigma_dram_cluster_data(dram_cluster, i);
- if (devc->cur_samplerate == SR_MHZ(200)) {
+ if (devc->samplerate == SR_MHZ(200)) {
sample = sigma_deinterlace_200mhz_data(item16, 0);
- store_sr_sample(samples, send_count++, sample);
+ check_and_submit_sample(devc, sample, 1, triggered);
sample = sigma_deinterlace_200mhz_data(item16, 1);
- store_sr_sample(samples, send_count++, sample);
+ check_and_submit_sample(devc, sample, 1, triggered);
sample = sigma_deinterlace_200mhz_data(item16, 2);
- store_sr_sample(samples, send_count++, sample);
+ check_and_submit_sample(devc, sample, 1, triggered);
sample = sigma_deinterlace_200mhz_data(item16, 3);
- store_sr_sample(samples, send_count++, sample);
- } else if (devc->cur_samplerate == SR_MHZ(100)) {
+ check_and_submit_sample(devc, sample, 1, triggered);
+ } else if (devc->samplerate == SR_MHZ(100)) {
sample = sigma_deinterlace_100mhz_data(item16, 0);
- store_sr_sample(samples, send_count++, sample);
+ check_and_submit_sample(devc, sample, 1, triggered);
sample = sigma_deinterlace_100mhz_data(item16, 1);
- store_sr_sample(samples, send_count++, sample);
+ check_and_submit_sample(devc, sample, 1, triggered);
} else {
sample = item16;
- store_sr_sample(samples, send_count++, sample);
- }
- }
-
- /*
- * If a trigger position applies, then provide the datafeed with
- * the first part of data up to that position, then send the
- * trigger marker.
- */
- int trigger_offset = 0;
- if (triggered) {
- /*
- * Trigger is not always accurate to sample because of
- * pipeline delay. However, it always triggers before
- * the actual event. We therefore look at the next
- * samples to pinpoint the exact position of the trigger.
- */
- trigger_offset = get_trigger_offset(samples,
- ss->lastsample, &devc->trigger);
-
- if (trigger_offset > 0) {
- trig_count = trigger_offset * devc->samples_per_event;
- packet.type = SR_DF_LOGIC;
- logic.length = trig_count * logic.unitsize;
- sigma_session_send(sdi, &packet);
- send_ptr += trig_count * logic.unitsize;
- send_count -= trig_count;
+ check_and_submit_sample(devc, sample, 1, triggered);
}
-
- /* Only send trigger if explicitly enabled. */
- if (devc->use_triggers)
- std_session_send_df_trigger(sdi);
}
-
- /*
- * Send the data after the trigger, or all of the received data
- * if no trigger position applies.
- */
- if (send_count) {
- packet.type = SR_DF_LOGIC;
- logic.length = send_count * logic.unitsize;
- logic.data = send_ptr;
- sigma_session_send(sdi, &packet);
- }
-
ss->lastsample = sample;
}
* For 50 MHz and below, events contain one sample for each channel,
* spread 20 ns apart.
*/
-static int decode_chunk_ts(struct sigma_dram_line *dram_line,
- uint16_t events_in_line,
- uint32_t trigger_event,
- struct sr_dev_inst *sdi)
+static int decode_chunk_ts(struct dev_context *devc,
+ struct sigma_dram_line *dram_line,
+ size_t events_in_line, size_t trigger_event)
{
struct sigma_dram_cluster *dram_cluster;
- struct dev_context *devc;
unsigned int clusters_in_line;
unsigned int events_in_cluster;
unsigned int i;
- uint32_t trigger_cluster, triggered;
+ uint32_t trigger_cluster;
- devc = sdi->priv;
clusters_in_line = events_in_line;
clusters_in_line += EVENTS_PER_CLUSTER - 1;
clusters_in_line /= EVENTS_PER_CLUSTER;
trigger_cluster = ~0;
- triggered = 0;
/* Check if trigger is in this chunk. */
if (trigger_event < EVENTS_PER_ROW) {
- if (devc->cur_samplerate <= SR_MHZ(50)) {
+ if (devc->samplerate <= SR_MHZ(50)) {
trigger_event -= MIN(EVENTS_PER_CLUSTER - 1,
trigger_event);
}
events_in_cluster = EVENTS_PER_CLUSTER;
}
- triggered = (i == trigger_cluster);
- sigma_decode_dram_cluster(dram_cluster, events_in_cluster,
- triggered, sdi);
+ sigma_decode_dram_cluster(devc, dram_cluster,
+ events_in_cluster, i == trigger_cluster);
}
return SR_OK;
uint32_t dl_first_line, dl_line;
uint32_t dl_events_in_line;
uint32_t trg_line, trg_event;
+ int ret;
devc = sdi->priv;
dl_events_in_line = EVENTS_PER_ROW;
trg_event = triggerpos & 0x1ff;
}
- devc->sent_samples = 0;
-
/*
* Determine how many "DRAM lines" of 1024 bytes each we need to
* retrieve from the Sigma hardware, so that we have a complete
dram_line = g_try_malloc0(chunks_per_read * sizeof(*dram_line));
if (!dram_line)
return FALSE;
+ ret = alloc_submit_buffer(sdi);
+ if (ret != SR_OK)
+ return FALSE;
+ ret = setup_submit_limit(devc);
+ if (ret != SR_OK)
+ return FALSE;
dl_lines_done = 0;
while (dl_lines_total > dl_lines_done) {
/* We can download only up-to 32 DRAM lines in one go! */
if (dl_lines_done + i == trg_line)
trigger_event = trg_event;
- decode_chunk_ts(dram_line + i, dl_events_in_line,
- trigger_event, sdi);
+ decode_chunk_ts(devc, dram_line + i,
+ dl_events_in_line, trigger_event);
}
dl_lines_done += dl_lines_curr;
}
+ flush_submit_buffer(devc);
+ free_submit_buffer(devc);
g_free(dram_line);
std_session_send_df_end(sdi);
static int sigma_capture_mode(struct sr_dev_inst *sdi)
{
struct dev_context *devc;
- uint64_t running_msec;
- uint64_t current_time;
devc = sdi->priv;
-
- /*
- * Check if the selected sampling duration passed. Sample count
- * limits are covered by this enforced timeout as well.
- */
- current_time = g_get_monotonic_time();
- running_msec = (current_time - devc->start_time) / 1000;
- if (running_msec >= devc->limit_msec)
+ if (sr_sw_limits_check(&devc->acq_limits))
return download_capture(sdi);
return TRUE;