+static void sigma_location_break_down(struct sigma_location *loc)
+{
+
+ loc->line = loc->raw / ROW_LENGTH_U16;
+ loc->line += ROW_COUNT;
+ loc->line %= ROW_COUNT;
+ loc->cluster = loc->raw % ROW_LENGTH_U16;
+ loc->event = loc->cluster % EVENTS_PER_CLUSTER;
+ loc->cluster = loc->cluster / EVENTS_PER_CLUSTER;
+}
+
+static gboolean sigma_location_is_eq(struct sigma_location *loc1,
+ struct sigma_location *loc2, gboolean with_event)
+{
+
+ if (!loc1 || !loc2)
+ return FALSE;
+
+ if (loc1->line != loc2->line)
+ return FALSE;
+ if (loc1->cluster != loc2->cluster)
+ return FALSE;
+
+ if (with_event && loc1->event != loc2->event)
+ return FALSE;
+
+ return TRUE;
+}
+
+/* Decrement the broken-down location fields (leave 'raw' as is). */
+static void sigma_location_decrement(struct sigma_location *loc,
+ gboolean with_event)
+{
+
+ if (!loc)
+ return;
+
+ if (with_event) {
+ if (loc->event--)
+ return;
+ loc->event = EVENTS_PER_CLUSTER - 1;
+ }
+
+ if (loc->cluster--)
+ return;
+ loc->cluster = CLUSTERS_PER_ROW - 1;
+
+ if (loc->line--)
+ return;
+ loc->line = ROW_COUNT - 1;
+}
+
+static void sigma_location_increment(struct sigma_location *loc)
+{
+
+ if (!loc)
+ return;
+
+ if (++loc->event < EVENTS_PER_CLUSTER)
+ return;
+ loc->event = 0;
+ if (++loc->cluster < CLUSTERS_PER_ROW)
+ return;
+ loc->cluster = 0;
+ if (++loc->line < ROW_COUNT)
+ return;
+ loc->line = 0;
+}
+
+/*
+ * Determine the position where to open the period of trigger match
+ * checks. Setup an "impossible" location when triggers are not used.
+ * Start from the hardware provided 'trig' position otherwise, and
+ * go back a few clusters, but don't go before the 'start' position.
+ */
+static void rewind_trig_arm_pos(struct dev_context *devc, size_t count)
+{
+ struct sigma_sample_interp *interp;
+
+ if (!devc)
+ return;
+ interp = &devc->interp;
+
+ if (!devc->use_triggers) {
+ interp->trig_arm.raw = ~0;
+ sigma_location_break_down(&interp->trig_arm);
+ return;
+ }
+
+ interp->trig_arm = interp->trig;
+ while (count--) {
+ if (sigma_location_is_eq(&interp->trig_arm, &interp->start, TRUE))
+ break;
+ sigma_location_decrement(&interp->trig_arm, TRUE);
+ }
+}
+
+static int alloc_sample_buffer(struct dev_context *devc,
+ size_t stop_pos, size_t trig_pos, uint8_t mode)
+{
+ struct sigma_sample_interp *interp;
+ gboolean wrapped;
+ size_t alloc_size;
+
+ interp = &devc->interp;
+
+ /*
+ * Either fetch sample memory from absolute start of DRAM to the
+ * current write position. Or from after the current write position
+ * to before the current write position, if the write pointer has
+ * wrapped around at the upper DRAM boundary. Assume that the line
+ * which most recently got written to is of unknown state, ignore
+ * its content in the "wrapped" case.
+ */
+ wrapped = mode & RMR_ROUND;
+ interp->start.raw = 0;
+ interp->stop.raw = stop_pos;
+ if (wrapped) {
+ interp->start.raw = stop_pos;
+ interp->start.raw >>= ROW_SHIFT;
+ interp->start.raw++;
+ interp->start.raw <<= ROW_SHIFT;
+ interp->stop.raw = stop_pos;
+ interp->stop.raw >>= ROW_SHIFT;
+ interp->stop.raw--;
+ interp->stop.raw <<= ROW_SHIFT;
+ }
+ interp->trig.raw = trig_pos;
+ interp->iter.raw = 0;
+
+ /* Break down raw values to line, cluster, event fields. */
+ sigma_location_break_down(&interp->start);
+ sigma_location_break_down(&interp->stop);
+ sigma_location_break_down(&interp->trig);
+ sigma_location_break_down(&interp->iter);
+
+ /*
+ * The hardware provided trigger location "is late" because of
+ * latency in hardware pipelines. It points to after the trigger
+ * condition match. Arrange for a software check of sample data
+ * matches starting just a little before the hardware provided
+ * location. The "4 clusters" distance is an arbitrary choice.
+ */
+ rewind_trig_arm_pos(devc, 4 * EVENTS_PER_CLUSTER);
+ memset(&interp->trig_chk, 0, sizeof(interp->trig_chk));
+
+ /* Determine which DRAM lines to fetch from the device. */
+ memset(&interp->fetch, 0, sizeof(interp->fetch));
+ interp->fetch.lines_total = interp->stop.line + 1;
+ interp->fetch.lines_total -= interp->start.line;
+ interp->fetch.lines_total += ROW_COUNT;
+ interp->fetch.lines_total %= ROW_COUNT;
+ interp->fetch.lines_done = 0;
+
+ /* Arrange for chunked download, N lines per USB request. */
+ interp->fetch.lines_per_read = 32;
+ alloc_size = sizeof(devc->interp.fetch.rcvd_lines[0]);
+ alloc_size *= devc->interp.fetch.lines_per_read;
+ devc->interp.fetch.rcvd_lines = g_try_malloc0(alloc_size);
+ if (!devc->interp.fetch.rcvd_lines)
+ return SR_ERR_MALLOC;
+
+ return SR_OK;
+}
+
+static uint16_t sigma_deinterlace_data_4x4(uint16_t indata, int idx);
+static uint16_t sigma_deinterlace_data_2x8(uint16_t indata, int idx);
+
+static int fetch_sample_buffer(struct dev_context *devc)
+{
+ struct sigma_sample_interp *interp;
+ size_t count;
+ int ret;
+ const uint8_t *rdptr;
+ uint16_t ts, data;
+
+ interp = &devc->interp;
+
+ /* First invocation? Seed the iteration position. */
+ if (!interp->fetch.lines_done) {
+ interp->iter = interp->start;
+ }
+
+ /* Get another set of DRAM lines in one read call. */
+ count = interp->fetch.lines_total - interp->fetch.lines_done;
+ if (count > interp->fetch.lines_per_read)
+ count = interp->fetch.lines_per_read;
+ ret = sigma_read_dram(devc, interp->iter.line, count,
+ (uint8_t *)interp->fetch.rcvd_lines);
+ if (ret != SR_OK)
+ return ret;
+ interp->fetch.lines_rcvd = count;
+ interp->fetch.curr_line = &interp->fetch.rcvd_lines[0];
+
+ /* First invocation? Get initial timestamp and sample data. */
+ if (!interp->fetch.lines_done) {
+ rdptr = (void *)interp->fetch.curr_line;
+ ts = read_u16le_inc(&rdptr);
+ data = read_u16le_inc(&rdptr);
+ if (interp->samples_per_event == 4) {
+ data = sigma_deinterlace_data_4x4(data, 0);
+ } else if (interp->samples_per_event == 2) {
+ data = sigma_deinterlace_data_2x8(data, 0);
+ }
+ interp->last.ts = ts;
+ interp->last.sample = data;
+ }
+
+ return SR_OK;
+}
+
+static void free_sample_buffer(struct dev_context *devc)
+{
+ g_free(devc->interp.fetch.rcvd_lines);
+ devc->interp.fetch.rcvd_lines = NULL;
+ devc->interp.fetch.lines_per_read = 0;
+}
+