]> sigrok.org Git - libsigrok.git/blame_incremental - src/hardware/asix-sigma/protocol.c
asix-sigma: rework time/count limits support, accept more samplerates
[libsigrok.git] / src / hardware / asix-sigma / protocol.c
... / ...
CommitLineData
1/*
2 * This file is part of the libsigrok project.
3 *
4 * Copyright (C) 2010-2012 Håvard Espeland <gus@ping.uio.no>,
5 * Copyright (C) 2010 Martin Stensgård <mastensg@ping.uio.no>
6 * Copyright (C) 2010 Carl Henrik Lunde <chlunde@ping.uio.no>
7 *
8 * This program is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, either version 3 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22/*
23 * ASIX SIGMA/SIGMA2 logic analyzer driver
24 */
25
26#include <config.h>
27#include "protocol.h"
28
29/*
30 * The ASIX Sigma supports arbitrary integer frequency divider in
31 * the 50MHz mode. The divider is in range 1...256 , allowing for
32 * very precise sampling rate selection. This driver supports only
33 * a subset of the sampling rates.
34 */
35SR_PRIV const uint64_t samplerates[] = {
36 SR_KHZ(200), /* div=250 */
37 SR_KHZ(250), /* div=200 */
38 SR_KHZ(500), /* div=100 */
39 SR_MHZ(1), /* div=50 */
40 SR_MHZ(5), /* div=10 */
41 SR_MHZ(10), /* div=5 */
42 SR_MHZ(25), /* div=2 */
43 SR_MHZ(50), /* div=1 */
44 SR_MHZ(100), /* Special FW needed */
45 SR_MHZ(200), /* Special FW needed */
46};
47
48SR_PRIV const size_t samplerates_count = ARRAY_SIZE(samplerates);
49
50static const char *firmware_files[] = {
51 "asix-sigma-50.fw", /* Up to 50MHz sample rate, 8bit divider. */
52 "asix-sigma-100.fw", /* 100MHz sample rate, fixed. */
53 "asix-sigma-200.fw", /* 200MHz sample rate, fixed. */
54 "asix-sigma-50sync.fw", /* Synchronous clock from external pin. */
55 "asix-sigma-phasor.fw", /* Frequency counter. */
56};
57
58#define SIGMA_FIRMWARE_SIZE_LIMIT (256 * 1024)
59
60static int sigma_read(void *buf, size_t size, struct dev_context *devc)
61{
62 int ret;
63
64 ret = ftdi_read_data(&devc->ftdic, (unsigned char *)buf, size);
65 if (ret < 0) {
66 sr_err("ftdi_read_data failed: %s",
67 ftdi_get_error_string(&devc->ftdic));
68 }
69
70 return ret;
71}
72
73static int sigma_write(void *buf, size_t size, struct dev_context *devc)
74{
75 int ret;
76
77 ret = ftdi_write_data(&devc->ftdic, (unsigned char *)buf, size);
78 if (ret < 0)
79 sr_err("ftdi_write_data failed: %s",
80 ftdi_get_error_string(&devc->ftdic));
81 else if ((size_t) ret != size)
82 sr_err("ftdi_write_data did not complete write.");
83
84 return ret;
85}
86
87/*
88 * NOTE: We chose the buffer size to be large enough to hold any write to the
89 * device. We still print a message just in case.
90 */
91SR_PRIV int sigma_write_register(uint8_t reg, uint8_t *data, size_t len,
92 struct dev_context *devc)
93{
94 size_t i;
95 uint8_t buf[80];
96 int idx = 0;
97
98 if ((2 * len + 2) > sizeof(buf)) {
99 sr_err("Attempted to write %zu bytes, but buffer is too small.",
100 len);
101 return SR_ERR_BUG;
102 }
103
104 buf[idx++] = REG_ADDR_LOW | (reg & 0xf);
105 buf[idx++] = REG_ADDR_HIGH | (reg >> 4);
106
107 for (i = 0; i < len; i++) {
108 buf[idx++] = REG_DATA_LOW | (data[i] & 0xf);
109 buf[idx++] = REG_DATA_HIGH_WRITE | (data[i] >> 4);
110 }
111
112 return sigma_write(buf, idx, devc);
113}
114
115SR_PRIV int sigma_set_register(uint8_t reg, uint8_t value, struct dev_context *devc)
116{
117 return sigma_write_register(reg, &value, 1, devc);
118}
119
120static int sigma_read_register(uint8_t reg, uint8_t *data, size_t len,
121 struct dev_context *devc)
122{
123 uint8_t buf[3];
124
125 buf[0] = REG_ADDR_LOW | (reg & 0xf);
126 buf[1] = REG_ADDR_HIGH | (reg >> 4);
127 buf[2] = REG_READ_ADDR;
128
129 sigma_write(buf, sizeof(buf), devc);
130
131 return sigma_read(data, len, devc);
132}
133
134static int sigma_read_pos(uint32_t *stoppos, uint32_t *triggerpos,
135 struct dev_context *devc)
136{
137 /*
138 * Read 6 registers starting at trigger position LSB.
139 * Which yields two 24bit counter values.
140 */
141 uint8_t buf[] = {
142 REG_ADDR_LOW | READ_TRIGGER_POS_LOW,
143 REG_READ_ADDR | REG_ADDR_INC,
144 REG_READ_ADDR | REG_ADDR_INC,
145 REG_READ_ADDR | REG_ADDR_INC,
146 REG_READ_ADDR | REG_ADDR_INC,
147 REG_READ_ADDR | REG_ADDR_INC,
148 REG_READ_ADDR | REG_ADDR_INC,
149 };
150 uint8_t result[6];
151
152 sigma_write(buf, sizeof(buf), devc);
153
154 sigma_read(result, sizeof(result), devc);
155
156 *triggerpos = result[0] | (result[1] << 8) | (result[2] << 16);
157 *stoppos = result[3] | (result[4] << 8) | (result[5] << 16);
158
159 /*
160 * These "position" values point to after the event (end of
161 * capture data, trigger condition matched). This is why they
162 * get decremented here. Sample memory consists of 512-byte
163 * chunks with meta data in the upper 64 bytes. Thus when the
164 * decrements takes us into this upper part of the chunk, then
165 * further move backwards to the end of the chunk's data part.
166 *
167 * TODO Re-consider the above comment's validity. It's true
168 * that a 1024byte row contains 512 u16 entities, of which 64
169 * are timestamps and 448 are events with sample data. It's not
170 * true that 64bytes of metadata reside at the top of a 512byte
171 * block in a row.
172 *
173 * TODO Use ROW_MASK and CLUSTERS_PER_ROW here?
174 */
175 if ((--*stoppos & 0x1ff) == 0x1ff)
176 *stoppos -= 64;
177 if ((--*triggerpos & 0x1ff) == 0x1ff)
178 *triggerpos -= 64;
179
180 return 1;
181}
182
183static int sigma_read_dram(uint16_t startchunk, size_t numchunks,
184 uint8_t *data, struct dev_context *devc)
185{
186 uint8_t buf[4096];
187 int idx;
188 size_t chunk;
189 int sel;
190 gboolean is_last;
191
192 /* Communicate DRAM start address (memory row, aka samples line). */
193 idx = 0;
194 buf[idx++] = startchunk >> 8;
195 buf[idx++] = startchunk & 0xff;
196 sigma_write_register(WRITE_MEMROW, buf, idx, devc);
197
198 /*
199 * Access DRAM content. Fetch from DRAM to FPGA's internal RAM,
200 * then transfer via USB. Interleave the FPGA's DRAM access and
201 * USB transfer, use alternating buffers (0/1) in the process.
202 */
203 idx = 0;
204 buf[idx++] = REG_DRAM_BLOCK;
205 buf[idx++] = REG_DRAM_WAIT_ACK;
206 for (chunk = 0; chunk < numchunks; chunk++) {
207 sel = chunk % 2;
208 is_last = chunk == numchunks - 1;
209 if (!is_last)
210 buf[idx++] = REG_DRAM_BLOCK | REG_DRAM_SEL_BOOL(!sel);
211 buf[idx++] = REG_DRAM_BLOCK_DATA | REG_DRAM_SEL_BOOL(sel);
212 if (!is_last)
213 buf[idx++] = REG_DRAM_WAIT_ACK;
214 }
215 sigma_write(buf, idx, devc);
216
217 return sigma_read(data, numchunks * ROW_LENGTH_BYTES, devc);
218}
219
220/* Upload trigger look-up tables to Sigma. */
221SR_PRIV int sigma_write_trigger_lut(struct triggerlut *lut, struct dev_context *devc)
222{
223 int i;
224 uint8_t tmp[2];
225 uint16_t bit;
226
227 /* Transpose the table and send to Sigma. */
228 for (i = 0; i < 16; i++) {
229 bit = 1 << i;
230
231 tmp[0] = tmp[1] = 0;
232
233 if (lut->m2d[0] & bit)
234 tmp[0] |= 0x01;
235 if (lut->m2d[1] & bit)
236 tmp[0] |= 0x02;
237 if (lut->m2d[2] & bit)
238 tmp[0] |= 0x04;
239 if (lut->m2d[3] & bit)
240 tmp[0] |= 0x08;
241
242 if (lut->m3 & bit)
243 tmp[0] |= 0x10;
244 if (lut->m3s & bit)
245 tmp[0] |= 0x20;
246 if (lut->m4 & bit)
247 tmp[0] |= 0x40;
248
249 if (lut->m0d[0] & bit)
250 tmp[1] |= 0x01;
251 if (lut->m0d[1] & bit)
252 tmp[1] |= 0x02;
253 if (lut->m0d[2] & bit)
254 tmp[1] |= 0x04;
255 if (lut->m0d[3] & bit)
256 tmp[1] |= 0x08;
257
258 if (lut->m1d[0] & bit)
259 tmp[1] |= 0x10;
260 if (lut->m1d[1] & bit)
261 tmp[1] |= 0x20;
262 if (lut->m1d[2] & bit)
263 tmp[1] |= 0x40;
264 if (lut->m1d[3] & bit)
265 tmp[1] |= 0x80;
266
267 sigma_write_register(WRITE_TRIGGER_SELECT, tmp, sizeof(tmp),
268 devc);
269 sigma_set_register(WRITE_TRIGGER_SELECT2, 0x30 | i, devc);
270 }
271
272 /* Send the parameters */
273 sigma_write_register(WRITE_TRIGGER_SELECT, (uint8_t *) &lut->params,
274 sizeof(lut->params), devc);
275
276 return SR_OK;
277}
278
279/*
280 * See Xilinx UG332 for Spartan-3 FPGA configuration. The SIGMA device
281 * uses FTDI bitbang mode for netlist download in slave serial mode.
282 * (LATER: The OMEGA device's cable contains a more capable FTDI chip
283 * and uses MPSSE mode for bitbang. -- Can we also use FT232H in FT245
284 * compatible bitbang mode? For maximum code re-use and reduced libftdi
285 * dependency? See section 3.5.5 of FT232H: D0 clk, D1 data (out), D2
286 * data (in), D3 select, D4-7 GPIOL. See section 3.5.7 for MCU FIFO.)
287 *
288 * 750kbps rate (four times the speed of sigmalogan) works well for
289 * netlist download. All pins except INIT_B are output pins during
290 * configuration download.
291 *
292 * Some pins are inverted as a byproduct of level shifting circuitry.
293 * That's why high CCLK level (from the cable's point of view) is idle
294 * from the FPGA's perspective.
295 *
296 * The vendor's literature discusses a "suicide sequence" which ends
297 * regular FPGA execution and should be sent before entering bitbang
298 * mode and sending configuration data. Set D7 and toggle D2, D3, D4
299 * a few times.
300 */
301#define BB_PIN_CCLK (1 << 0) /* D0, CCLK */
302#define BB_PIN_PROG (1 << 1) /* D1, PROG */
303#define BB_PIN_D2 (1 << 2) /* D2, (part of) SUICIDE */
304#define BB_PIN_D3 (1 << 3) /* D3, (part of) SUICIDE */
305#define BB_PIN_D4 (1 << 4) /* D4, (part of) SUICIDE (unused?) */
306#define BB_PIN_INIT (1 << 5) /* D5, INIT, input pin */
307#define BB_PIN_DIN (1 << 6) /* D6, DIN */
308#define BB_PIN_D7 (1 << 7) /* D7, (part of) SUICIDE */
309
310#define BB_BITRATE (750 * 1000)
311#define BB_PINMASK (0xff & ~BB_PIN_INIT)
312
313/*
314 * Initiate slave serial mode for configuration download. Which is done
315 * by pulsing PROG_B and sensing INIT_B. Make sure CCLK is idle before
316 * initiating the configuration download. Run a "suicide sequence" first
317 * to terminate the regular FPGA operation before reconfiguration.
318 */
319static int sigma_fpga_init_bitbang(struct dev_context *devc)
320{
321 uint8_t suicide[] = {
322 BB_PIN_D7 | BB_PIN_D2,
323 BB_PIN_D7 | BB_PIN_D2,
324 BB_PIN_D7 | BB_PIN_D3,
325 BB_PIN_D7 | BB_PIN_D2,
326 BB_PIN_D7 | BB_PIN_D3,
327 BB_PIN_D7 | BB_PIN_D2,
328 BB_PIN_D7 | BB_PIN_D3,
329 BB_PIN_D7 | BB_PIN_D2,
330 };
331 uint8_t init_array[] = {
332 BB_PIN_CCLK,
333 BB_PIN_CCLK | BB_PIN_PROG,
334 BB_PIN_CCLK | BB_PIN_PROG,
335 BB_PIN_CCLK,
336 BB_PIN_CCLK,
337 BB_PIN_CCLK,
338 BB_PIN_CCLK,
339 BB_PIN_CCLK,
340 BB_PIN_CCLK,
341 BB_PIN_CCLK,
342 };
343 int retries, ret;
344 uint8_t data;
345
346 /* Section 2. part 1), do the FPGA suicide. */
347 sigma_write(suicide, sizeof(suicide), devc);
348 sigma_write(suicide, sizeof(suicide), devc);
349 sigma_write(suicide, sizeof(suicide), devc);
350 sigma_write(suicide, sizeof(suicide), devc);
351
352 /* Section 2. part 2), pulse PROG. */
353 sigma_write(init_array, sizeof(init_array), devc);
354 ftdi_usb_purge_buffers(&devc->ftdic);
355
356 /* Wait until the FPGA asserts INIT_B. */
357 retries = 10;
358 while (retries--) {
359 ret = sigma_read(&data, 1, devc);
360 if (ret < 0)
361 return ret;
362 if (data & BB_PIN_INIT)
363 return SR_OK;
364 g_usleep(10 * 1000);
365 }
366
367 return SR_ERR_TIMEOUT;
368}
369
370/*
371 * Configure the FPGA for logic-analyzer mode.
372 */
373static int sigma_fpga_init_la(struct dev_context *devc)
374{
375 /*
376 * TODO Construct the sequence at runtime? Such that request data
377 * and response check values will match more apparently?
378 */
379 uint8_t mode_regval = WMR_SDRAMINIT;
380 uint8_t logic_mode_start[] = {
381 /* Read ID register. */
382 REG_ADDR_LOW | (READ_ID & 0xf),
383 REG_ADDR_HIGH | (READ_ID >> 4),
384 REG_READ_ADDR,
385
386 /* Write 0x55 to scratch register, read back. */
387 REG_ADDR_LOW | (WRITE_TEST & 0xf),
388 REG_DATA_LOW | 0x5,
389 REG_DATA_HIGH_WRITE | 0x5,
390 REG_READ_ADDR,
391
392 /* Write 0xaa to scratch register, read back. */
393 REG_DATA_LOW | 0xa,
394 REG_DATA_HIGH_WRITE | 0xa,
395 REG_READ_ADDR,
396
397 /* Initiate SDRAM initialization in mode register. */
398 REG_ADDR_LOW | (WRITE_MODE & 0xf),
399 REG_DATA_LOW | (mode_regval & 0xf),
400 REG_DATA_HIGH_WRITE | (mode_regval >> 4),
401 };
402 uint8_t result[3];
403 int ret;
404
405 /*
406 * Send the command sequence which contains 3 READ requests.
407 * Expect to see the corresponding 3 response bytes.
408 */
409 sigma_write(logic_mode_start, sizeof(logic_mode_start), devc);
410 ret = sigma_read(result, ARRAY_SIZE(result), devc);
411 if (ret != ARRAY_SIZE(result))
412 goto err;
413 if (result[0] != 0xa6 || result[1] != 0x55 || result[2] != 0xaa)
414 goto err;
415
416 return SR_OK;
417
418err:
419 sr_err("Configuration failed. Invalid reply received.");
420 return SR_ERR;
421}
422
423/*
424 * Read the firmware from a file and transform it into a series of bitbang
425 * pulses used to program the FPGA. Note that the *bb_cmd must be free()'d
426 * by the caller of this function.
427 */
428static int sigma_fw_2_bitbang(struct sr_context *ctx, const char *name,
429 uint8_t **bb_cmd, gsize *bb_cmd_size)
430{
431 uint8_t *firmware;
432 size_t file_size;
433 uint8_t *p;
434 size_t l;
435 uint32_t imm;
436 size_t bb_size;
437 uint8_t *bb_stream, *bbs, byte, mask, v;
438
439 /* Retrieve the on-disk firmware file content. */
440 firmware = sr_resource_load(ctx, SR_RESOURCE_FIRMWARE, name,
441 &file_size, SIGMA_FIRMWARE_SIZE_LIMIT);
442 if (!firmware)
443 return SR_ERR_IO;
444
445 /* Unscramble the file content (XOR with "random" sequence). */
446 p = firmware;
447 l = file_size;
448 imm = 0x3f6df2ab;
449 while (l--) {
450 imm = (imm + 0xa853753) % 177 + (imm * 0x8034052);
451 *p++ ^= imm & 0xff;
452 }
453
454 /*
455 * Generate a sequence of bitbang samples. With two samples per
456 * FPGA configuration bit, providing the level for the DIN signal
457 * as well as two edges for CCLK. See Xilinx UG332 for details
458 * ("slave serial" mode).
459 *
460 * Note that CCLK is inverted in hardware. That's why the
461 * respective bit is first set and then cleared in the bitbang
462 * sample sets. So that the DIN level will be stable when the
463 * data gets sampled at the rising CCLK edge, and the signals'
464 * setup time constraint will be met.
465 *
466 * The caller will put the FPGA into download mode, will send
467 * the bitbang samples, and release the allocated memory.
468 */
469 bb_size = file_size * 8 * 2;
470 bb_stream = g_try_malloc(bb_size);
471 if (!bb_stream) {
472 sr_err("%s: Failed to allocate bitbang stream", __func__);
473 g_free(firmware);
474 return SR_ERR_MALLOC;
475 }
476 bbs = bb_stream;
477 p = firmware;
478 l = file_size;
479 while (l--) {
480 byte = *p++;
481 mask = 0x80;
482 while (mask) {
483 v = (byte & mask) ? BB_PIN_DIN : 0;
484 mask >>= 1;
485 *bbs++ = v | BB_PIN_CCLK;
486 *bbs++ = v;
487 }
488 }
489 g_free(firmware);
490
491 /* The transformation completed successfully, return the result. */
492 *bb_cmd = bb_stream;
493 *bb_cmd_size = bb_size;
494
495 return SR_OK;
496}
497
498static int upload_firmware(struct sr_context *ctx,
499 int firmware_idx, struct dev_context *devc)
500{
501 int ret;
502 unsigned char *buf;
503 unsigned char pins;
504 size_t buf_size;
505 const char *firmware;
506
507 /* Avoid downloading the same firmware multiple times. */
508 firmware = firmware_files[firmware_idx];
509 if (devc->cur_firmware == firmware_idx) {
510 sr_info("Not uploading firmware file '%s' again.", firmware);
511 return SR_OK;
512 }
513
514 /* Set the cable to bitbang mode. */
515 ret = ftdi_set_bitmode(&devc->ftdic, BB_PINMASK, BITMODE_BITBANG);
516 if (ret < 0) {
517 sr_err("ftdi_set_bitmode failed: %s",
518 ftdi_get_error_string(&devc->ftdic));
519 return SR_ERR;
520 }
521 ret = ftdi_set_baudrate(&devc->ftdic, BB_BITRATE);
522 if (ret < 0) {
523 sr_err("ftdi_set_baudrate failed: %s",
524 ftdi_get_error_string(&devc->ftdic));
525 return SR_ERR;
526 }
527
528 /* Initiate FPGA configuration mode. */
529 ret = sigma_fpga_init_bitbang(devc);
530 if (ret)
531 return ret;
532
533 /* Prepare wire format of the firmware image. */
534 ret = sigma_fw_2_bitbang(ctx, firmware, &buf, &buf_size);
535 if (ret != SR_OK) {
536 sr_err("An error occurred while reading the firmware: %s",
537 firmware);
538 return ret;
539 }
540
541 /* Write the FPGA netlist to the cable. */
542 sr_info("Uploading firmware file '%s'.", firmware);
543 sigma_write(buf, buf_size, devc);
544
545 g_free(buf);
546
547 /* Leave bitbang mode and discard pending input data. */
548 ret = ftdi_set_bitmode(&devc->ftdic, 0, BITMODE_RESET);
549 if (ret < 0) {
550 sr_err("ftdi_set_bitmode failed: %s",
551 ftdi_get_error_string(&devc->ftdic));
552 return SR_ERR;
553 }
554 ftdi_usb_purge_buffers(&devc->ftdic);
555 while (sigma_read(&pins, 1, devc) == 1)
556 ;
557
558 /* Initialize the FPGA for logic-analyzer mode. */
559 ret = sigma_fpga_init_la(devc);
560 if (ret != SR_OK)
561 return ret;
562
563 /* Keep track of successful firmware download completion. */
564 devc->cur_firmware = firmware_idx;
565 sr_info("Firmware uploaded.");
566
567 return SR_OK;
568}
569
570/*
571 * The driver supports user specified time or sample count limits. The
572 * device's hardware supports neither, and hardware compression prevents
573 * reliable detection of "fill levels" (currently reached sample counts)
574 * from register values during acquisition. That's why the driver needs
575 * to apply some heuristics:
576 *
577 * - The (optional) sample count limit and the (normalized) samplerate
578 * get mapped to an estimated duration for these samples' acquisition.
579 * - The (optional) time limit gets checked as well. The lesser of the
580 * two limits will terminate the data acquisition phase. The exact
581 * sample count limit gets enforced in session feed submission paths.
582 * - Some slack needs to be given to account for hardware pipelines as
583 * well as late storage of last chunks after compression thresholds
584 * are tripped. The resulting data set will span at least the caller
585 * specified period of time, which shall be perfectly acceptable.
586 *
587 * With RLE compression active, up to 64K sample periods can pass before
588 * a cluster accumulates. Which translates to 327ms at 200kHz. Add two
589 * times that period for good measure, one is not enough to flush the
590 * hardware pipeline (observation from an earlier experiment).
591 */
592SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc)
593{
594 int ret;
595 GVariant *data;
596 uint64_t user_count, user_msecs;
597 uint64_t worst_cluster_time_ms;
598 uint64_t count_msecs, acquire_msecs;
599
600 sr_sw_limits_init(&devc->acq_limits);
601
602 /* Get sample count limit, convert to msecs. */
603 ret = sr_sw_limits_config_get(&devc->cfg_limits,
604 SR_CONF_LIMIT_SAMPLES, &data);
605 if (ret != SR_OK)
606 return ret;
607 user_count = g_variant_get_uint64(data);
608 g_variant_unref(data);
609 count_msecs = 0;
610 if (user_count)
611 count_msecs = 1000 * user_count / devc->samplerate + 1;
612
613 /* Get time limit, which is in msecs. */
614 ret = sr_sw_limits_config_get(&devc->cfg_limits,
615 SR_CONF_LIMIT_MSEC, &data);
616 if (ret != SR_OK)
617 return ret;
618 user_msecs = g_variant_get_uint64(data);
619 g_variant_unref(data);
620
621 /* Get the lesser of them, with both being optional. */
622 acquire_msecs = ~0ull;
623 if (user_count && count_msecs < acquire_msecs)
624 acquire_msecs = count_msecs;
625 if (user_msecs && user_msecs < acquire_msecs)
626 acquire_msecs = user_msecs;
627 if (acquire_msecs == ~0ull)
628 return SR_OK;
629
630 /* Add some slack, and use that timeout for acquisition. */
631 worst_cluster_time_ms = 1000 * 65536 / devc->samplerate;
632 acquire_msecs += 2 * worst_cluster_time_ms;
633 data = g_variant_new_uint64(acquire_msecs);
634 ret = sr_sw_limits_config_set(&devc->acq_limits,
635 SR_CONF_LIMIT_MSEC, data);
636 g_variant_unref(data);
637 if (ret != SR_OK)
638 return ret;
639
640 sr_sw_limits_acquisition_start(&devc->acq_limits);
641 return SR_OK;
642}
643
644/*
645 * Check whether a caller specified samplerate matches the device's
646 * hardware constraints (can be used for acquisition). Optionally yield
647 * a value that approximates the original spec.
648 *
649 * This routine assumes that input specs are in the 200kHz to 200MHz
650 * range of supported rates, and callers typically want to normalize a
651 * given value to the hardware capabilities. Values in the 50MHz range
652 * get rounded up by default, to avoid a more expensive check for the
653 * closest match, while higher sampling rate is always desirable during
654 * measurement. Input specs which exactly match hardware capabilities
655 * remain unaffected. Because 100/200MHz rates also limit the number of
656 * available channels, they are not suggested by this routine, instead
657 * callers need to pick them consciously.
658 */
659SR_PRIV int sigma_normalize_samplerate(uint64_t want_rate, uint64_t *have_rate)
660{
661 uint64_t div, rate;
662
663 /* Accept exact matches for 100/200MHz. */
664 if (want_rate == SR_MHZ(200) || want_rate == SR_MHZ(100)) {
665 if (have_rate)
666 *have_rate = want_rate;
667 return SR_OK;
668 }
669
670 /* Accept 200kHz to 50MHz range, and map to near value. */
671 if (want_rate >= SR_KHZ(200) && want_rate <= SR_MHZ(50)) {
672 div = SR_MHZ(50) / want_rate;
673 rate = SR_MHZ(50) / div;
674 if (have_rate)
675 *have_rate = rate;
676 return SR_OK;
677 }
678
679 return SR_ERR_ARG;
680}
681
682SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi)
683{
684 struct dev_context *devc;
685 struct drv_context *drvc;
686 uint64_t samplerate;
687 int ret;
688 int num_channels;
689
690 devc = sdi->priv;
691 drvc = sdi->driver->context;
692
693 /* Accept any caller specified rate which the hardware supports. */
694 ret = sigma_normalize_samplerate(devc->samplerate, &samplerate);
695 if (ret != SR_OK)
696 return ret;
697
698 /*
699 * Depending on the samplerates of 200/100/50- MHz, specific
700 * firmware is required and higher rates might limit the set
701 * of available channels.
702 */
703 num_channels = devc->num_channels;
704 if (samplerate <= SR_MHZ(50)) {
705 ret = upload_firmware(drvc->sr_ctx, 0, devc);
706 num_channels = 16;
707 } else if (samplerate == SR_MHZ(100)) {
708 ret = upload_firmware(drvc->sr_ctx, 1, devc);
709 num_channels = 8;
710 } else if (samplerate == SR_MHZ(200)) {
711 ret = upload_firmware(drvc->sr_ctx, 2, devc);
712 num_channels = 4;
713 }
714
715 /*
716 * The samplerate affects the number of available logic channels
717 * as well as a sample memory layout detail (the number of samples
718 * which the device will communicate within an "event").
719 */
720 if (ret == SR_OK) {
721 devc->num_channels = num_channels;
722 devc->samples_per_event = 16 / devc->num_channels;
723 devc->state.state = SIGMA_IDLE;
724 }
725
726 return ret;
727}
728
729/*
730 * Arrange for a session feed submit buffer. A queue where a number of
731 * samples gets accumulated to reduce the number of send calls. Which
732 * also enforces an optional sample count limit for data acquisition.
733 *
734 * The buffer holds up to CHUNK_SIZE bytes. The unit size is fixed (the
735 * driver provides a fixed channel layout regardless of samplerate).
736 */
737
738#define CHUNK_SIZE (4 * 1024 * 1024)
739
740struct submit_buffer {
741 size_t unit_size;
742 size_t max_samples, curr_samples;
743 uint8_t *sample_data;
744 uint8_t *write_pointer;
745 struct sr_dev_inst *sdi;
746 struct sr_datafeed_packet packet;
747 struct sr_datafeed_logic logic;
748};
749
750static int alloc_submit_buffer(struct sr_dev_inst *sdi)
751{
752 struct dev_context *devc;
753 struct submit_buffer *buffer;
754 size_t size;
755
756 devc = sdi->priv;
757
758 buffer = g_malloc0(sizeof(*buffer));
759 devc->buffer = buffer;
760
761 buffer->unit_size = sizeof(uint16_t);
762 size = CHUNK_SIZE;
763 size /= buffer->unit_size;
764 buffer->max_samples = size;
765 size *= buffer->unit_size;
766 buffer->sample_data = g_try_malloc0(size);
767 if (!buffer->sample_data)
768 return SR_ERR_MALLOC;
769 buffer->write_pointer = buffer->sample_data;
770 sr_sw_limits_init(&devc->feed_limits);
771
772 buffer->sdi = sdi;
773 memset(&buffer->logic, 0, sizeof(buffer->logic));
774 buffer->logic.unitsize = buffer->unit_size;
775 buffer->logic.data = buffer->sample_data;
776 memset(&buffer->packet, 0, sizeof(buffer->packet));
777 buffer->packet.type = SR_DF_LOGIC;
778 buffer->packet.payload = &buffer->logic;
779
780 return SR_OK;
781}
782
783static int setup_submit_limit(struct dev_context *devc)
784{
785 struct sr_sw_limits *limits;
786 int ret;
787 GVariant *data;
788 uint64_t total;
789
790 limits = &devc->feed_limits;
791
792 ret = sr_sw_limits_config_get(&devc->cfg_limits,
793 SR_CONF_LIMIT_SAMPLES, &data);
794 if (ret != SR_OK)
795 return ret;
796 total = g_variant_get_uint64(data);
797 g_variant_unref(data);
798
799 sr_sw_limits_init(limits);
800 if (total) {
801 data = g_variant_new_uint64(total);
802 ret = sr_sw_limits_config_set(limits,
803 SR_CONF_LIMIT_SAMPLES, data);
804 g_variant_unref(data);
805 if (ret != SR_OK)
806 return ret;
807 }
808
809 sr_sw_limits_acquisition_start(limits);
810
811 return SR_OK;
812}
813
814static void free_submit_buffer(struct dev_context *devc)
815{
816 struct submit_buffer *buffer;
817
818 if (!devc)
819 return;
820
821 buffer = devc->buffer;
822 if (!buffer)
823 return;
824 devc->buffer = NULL;
825
826 g_free(buffer->sample_data);
827 g_free(buffer);
828}
829
830static int flush_submit_buffer(struct dev_context *devc)
831{
832 struct submit_buffer *buffer;
833 int ret;
834
835 buffer = devc->buffer;
836
837 /* Is queued sample data available? */
838 if (!buffer->curr_samples)
839 return SR_OK;
840
841 /* Submit to the session feed. */
842 buffer->logic.length = buffer->curr_samples * buffer->unit_size;
843 ret = sr_session_send(buffer->sdi, &buffer->packet);
844 if (ret != SR_OK)
845 return ret;
846
847 /* Rewind queue position. */
848 buffer->curr_samples = 0;
849 buffer->write_pointer = buffer->sample_data;
850
851 return SR_OK;
852}
853
854static int addto_submit_buffer(struct dev_context *devc,
855 uint16_t sample, size_t count)
856{
857 struct submit_buffer *buffer;
858 struct sr_sw_limits *limits;
859 int ret;
860
861 buffer = devc->buffer;
862 limits = &devc->feed_limits;
863 if (sr_sw_limits_check(limits))
864 count = 0;
865
866 /*
867 * Individually accumulate and check each sample, such that
868 * accumulation between flushes won't exceed local storage, and
869 * enforcement of user specified limits is exact.
870 */
871 while (count--) {
872 WL16(buffer->write_pointer, sample);
873 buffer->write_pointer += buffer->unit_size;
874 buffer->curr_samples++;
875 if (buffer->curr_samples == buffer->max_samples) {
876 ret = flush_submit_buffer(devc);
877 if (ret != SR_OK)
878 return ret;
879 }
880 sr_sw_limits_update_samples_read(limits, 1);
881 if (sr_sw_limits_check(limits))
882 break;
883 }
884
885 return SR_OK;
886}
887
888/*
889 * In 100 and 200 MHz mode, only a single pin rising/falling can be
890 * set as trigger. In other modes, two rising/falling triggers can be set,
891 * in addition to value/mask trigger for any number of channels.
892 *
893 * The Sigma supports complex triggers using boolean expressions, but this
894 * has not been implemented yet.
895 */
896SR_PRIV int sigma_convert_trigger(const struct sr_dev_inst *sdi)
897{
898 struct dev_context *devc;
899 struct sr_trigger *trigger;
900 struct sr_trigger_stage *stage;
901 struct sr_trigger_match *match;
902 const GSList *l, *m;
903 int channelbit, trigger_set;
904
905 devc = sdi->priv;
906 memset(&devc->trigger, 0, sizeof(struct sigma_trigger));
907 if (!(trigger = sr_session_trigger_get(sdi->session)))
908 return SR_OK;
909
910 trigger_set = 0;
911 for (l = trigger->stages; l; l = l->next) {
912 stage = l->data;
913 for (m = stage->matches; m; m = m->next) {
914 match = m->data;
915 if (!match->channel->enabled)
916 /* Ignore disabled channels with a trigger. */
917 continue;
918 channelbit = 1 << (match->channel->index);
919 if (devc->samplerate >= SR_MHZ(100)) {
920 /* Fast trigger support. */
921 if (trigger_set) {
922 sr_err("Only a single pin trigger is "
923 "supported in 100 and 200MHz mode.");
924 return SR_ERR;
925 }
926 if (match->match == SR_TRIGGER_FALLING)
927 devc->trigger.fallingmask |= channelbit;
928 else if (match->match == SR_TRIGGER_RISING)
929 devc->trigger.risingmask |= channelbit;
930 else {
931 sr_err("Only rising/falling trigger is "
932 "supported in 100 and 200MHz mode.");
933 return SR_ERR;
934 }
935
936 trigger_set++;
937 } else {
938 /* Simple trigger support (event). */
939 if (match->match == SR_TRIGGER_ONE) {
940 devc->trigger.simplevalue |= channelbit;
941 devc->trigger.simplemask |= channelbit;
942 } else if (match->match == SR_TRIGGER_ZERO) {
943 devc->trigger.simplevalue &= ~channelbit;
944 devc->trigger.simplemask |= channelbit;
945 } else if (match->match == SR_TRIGGER_FALLING) {
946 devc->trigger.fallingmask |= channelbit;
947 trigger_set++;
948 } else if (match->match == SR_TRIGGER_RISING) {
949 devc->trigger.risingmask |= channelbit;
950 trigger_set++;
951 }
952
953 /*
954 * Actually, Sigma supports 2 rising/falling triggers,
955 * but they are ORed and the current trigger syntax
956 * does not permit ORed triggers.
957 */
958 if (trigger_set > 1) {
959 sr_err("Only 1 rising/falling trigger "
960 "is supported.");
961 return SR_ERR;
962 }
963 }
964 }
965 }
966
967 return SR_OK;
968}
969
970/* Software trigger to determine exact trigger position. */
971static int get_trigger_offset(uint8_t *samples, uint16_t last_sample,
972 struct sigma_trigger *t)
973{
974 int i;
975 uint16_t sample = 0;
976
977 for (i = 0; i < 8; i++) {
978 if (i > 0)
979 last_sample = sample;
980 sample = samples[2 * i] | (samples[2 * i + 1] << 8);
981
982 /* Simple triggers. */
983 if ((sample & t->simplemask) != t->simplevalue)
984 continue;
985
986 /* Rising edge. */
987 if (((last_sample & t->risingmask) != 0) ||
988 ((sample & t->risingmask) != t->risingmask))
989 continue;
990
991 /* Falling edge. */
992 if ((last_sample & t->fallingmask) != t->fallingmask ||
993 (sample & t->fallingmask) != 0)
994 continue;
995
996 break;
997 }
998
999 /* If we did not match, return original trigger pos. */
1000 return i & 0x7;
1001}
1002
1003static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample)
1004{
1005 /* TODO
1006 * Check whether the combination of this very sample and the
1007 * previous state match the configured trigger condition. This
1008 * improves the resolution of the trigger marker's position.
1009 * The hardware provided position is coarse, and may point to
1010 * a position before the actual match.
1011 *
1012 * See the previous get_trigger_offset() implementation. This
1013 * code needs to get re-used here.
1014 */
1015 (void)devc;
1016 (void)sample;
1017 (void)get_trigger_offset;
1018
1019 return FALSE;
1020}
1021
1022static int check_and_submit_sample(struct dev_context *devc,
1023 uint16_t sample, size_t count, gboolean check_trigger)
1024{
1025 gboolean triggered;
1026 int ret;
1027
1028 triggered = check_trigger && sample_matches_trigger(devc, sample);
1029 if (triggered) {
1030 ret = flush_submit_buffer(devc);
1031 if (ret != SR_OK)
1032 return ret;
1033 ret = std_session_send_df_trigger(devc->buffer->sdi);
1034 if (ret != SR_OK)
1035 return ret;
1036 }
1037
1038 ret = addto_submit_buffer(devc, sample, count);
1039 if (ret != SR_OK)
1040 return ret;
1041
1042 return SR_OK;
1043}
1044
1045/*
1046 * Return the timestamp of "DRAM cluster".
1047 */
1048static uint16_t sigma_dram_cluster_ts(struct sigma_dram_cluster *cluster)
1049{
1050 return (cluster->timestamp_hi << 8) | cluster->timestamp_lo;
1051}
1052
1053/*
1054 * Return one 16bit data entity of a DRAM cluster at the specified index.
1055 */
1056static uint16_t sigma_dram_cluster_data(struct sigma_dram_cluster *cl, int idx)
1057{
1058 uint16_t sample;
1059
1060 sample = 0;
1061 sample |= cl->samples[idx].sample_lo << 0;
1062 sample |= cl->samples[idx].sample_hi << 8;
1063 sample = (sample >> 8) | (sample << 8);
1064 return sample;
1065}
1066
1067/*
1068 * Deinterlace sample data that was retrieved at 100MHz samplerate.
1069 * One 16bit item contains two samples of 8bits each. The bits of
1070 * multiple samples are interleaved.
1071 */
1072static uint16_t sigma_deinterlace_100mhz_data(uint16_t indata, int idx)
1073{
1074 uint16_t outdata;
1075
1076 indata >>= idx;
1077 outdata = 0;
1078 outdata |= (indata >> (0 * 2 - 0)) & (1 << 0);
1079 outdata |= (indata >> (1 * 2 - 1)) & (1 << 1);
1080 outdata |= (indata >> (2 * 2 - 2)) & (1 << 2);
1081 outdata |= (indata >> (3 * 2 - 3)) & (1 << 3);
1082 outdata |= (indata >> (4 * 2 - 4)) & (1 << 4);
1083 outdata |= (indata >> (5 * 2 - 5)) & (1 << 5);
1084 outdata |= (indata >> (6 * 2 - 6)) & (1 << 6);
1085 outdata |= (indata >> (7 * 2 - 7)) & (1 << 7);
1086 return outdata;
1087}
1088
1089/*
1090 * Deinterlace sample data that was retrieved at 200MHz samplerate.
1091 * One 16bit item contains four samples of 4bits each. The bits of
1092 * multiple samples are interleaved.
1093 */
1094static uint16_t sigma_deinterlace_200mhz_data(uint16_t indata, int idx)
1095{
1096 uint16_t outdata;
1097
1098 indata >>= idx;
1099 outdata = 0;
1100 outdata |= (indata >> (0 * 4 - 0)) & (1 << 0);
1101 outdata |= (indata >> (1 * 4 - 1)) & (1 << 1);
1102 outdata |= (indata >> (2 * 4 - 2)) & (1 << 2);
1103 outdata |= (indata >> (3 * 4 - 3)) & (1 << 3);
1104 return outdata;
1105}
1106
1107static void sigma_decode_dram_cluster(struct dev_context *devc,
1108 struct sigma_dram_cluster *dram_cluster,
1109 size_t events_in_cluster, gboolean triggered)
1110{
1111 struct sigma_state *ss;
1112 uint16_t tsdiff, ts, sample, item16;
1113 unsigned int i;
1114
1115 if (!devc->use_triggers || !ASIX_SIGMA_WITH_TRIGGER)
1116 triggered = FALSE;
1117
1118 /*
1119 * If this cluster is not adjacent to the previously received
1120 * cluster, then send the appropriate number of samples with the
1121 * previous values to the sigrok session. This "decodes RLE".
1122 *
1123 * These samples cannot match the trigger since they just repeat
1124 * the previously submitted data pattern. (This assumption holds
1125 * for simple level and edge triggers. It would not for timed or
1126 * counted conditions, which currently are not supported.)
1127 */
1128 ss = &devc->state;
1129 ts = sigma_dram_cluster_ts(dram_cluster);
1130 tsdiff = ts - ss->lastts;
1131 if (tsdiff > 0) {
1132 size_t count;
1133 count = tsdiff * devc->samples_per_event;
1134 (void)check_and_submit_sample(devc, ss->lastsample, count, FALSE);
1135 }
1136 ss->lastts = ts + EVENTS_PER_CLUSTER;
1137
1138 /*
1139 * Grab sample data from the current cluster and prepare their
1140 * submission to the session feed. Handle samplerate dependent
1141 * memory layout of sample data. Accumulation of data chunks
1142 * before submission is transparent to this code path, specific
1143 * buffer depth is neither assumed nor required here.
1144 */
1145 sample = 0;
1146 for (i = 0; i < events_in_cluster; i++) {
1147 item16 = sigma_dram_cluster_data(dram_cluster, i);
1148 if (devc->samplerate == SR_MHZ(200)) {
1149 sample = sigma_deinterlace_200mhz_data(item16, 0);
1150 check_and_submit_sample(devc, sample, 1, triggered);
1151 sample = sigma_deinterlace_200mhz_data(item16, 1);
1152 check_and_submit_sample(devc, sample, 1, triggered);
1153 sample = sigma_deinterlace_200mhz_data(item16, 2);
1154 check_and_submit_sample(devc, sample, 1, triggered);
1155 sample = sigma_deinterlace_200mhz_data(item16, 3);
1156 check_and_submit_sample(devc, sample, 1, triggered);
1157 } else if (devc->samplerate == SR_MHZ(100)) {
1158 sample = sigma_deinterlace_100mhz_data(item16, 0);
1159 check_and_submit_sample(devc, sample, 1, triggered);
1160 sample = sigma_deinterlace_100mhz_data(item16, 1);
1161 check_and_submit_sample(devc, sample, 1, triggered);
1162 } else {
1163 sample = item16;
1164 check_and_submit_sample(devc, sample, 1, triggered);
1165 }
1166 }
1167 ss->lastsample = sample;
1168}
1169
1170/*
1171 * Decode chunk of 1024 bytes, 64 clusters, 7 events per cluster.
1172 * Each event is 20ns apart, and can contain multiple samples.
1173 *
1174 * For 200 MHz, events contain 4 samples for each channel, spread 5 ns apart.
1175 * For 100 MHz, events contain 2 samples for each channel, spread 10 ns apart.
1176 * For 50 MHz and below, events contain one sample for each channel,
1177 * spread 20 ns apart.
1178 */
1179static int decode_chunk_ts(struct dev_context *devc,
1180 struct sigma_dram_line *dram_line,
1181 size_t events_in_line, size_t trigger_event)
1182{
1183 struct sigma_dram_cluster *dram_cluster;
1184 unsigned int clusters_in_line;
1185 unsigned int events_in_cluster;
1186 unsigned int i;
1187 uint32_t trigger_cluster;
1188
1189 clusters_in_line = events_in_line;
1190 clusters_in_line += EVENTS_PER_CLUSTER - 1;
1191 clusters_in_line /= EVENTS_PER_CLUSTER;
1192 trigger_cluster = ~0;
1193
1194 /* Check if trigger is in this chunk. */
1195 if (trigger_event < EVENTS_PER_ROW) {
1196 if (devc->samplerate <= SR_MHZ(50)) {
1197 trigger_event -= MIN(EVENTS_PER_CLUSTER - 1,
1198 trigger_event);
1199 }
1200
1201 /* Find in which cluster the trigger occurred. */
1202 trigger_cluster = trigger_event / EVENTS_PER_CLUSTER;
1203 }
1204
1205 /* For each full DRAM cluster. */
1206 for (i = 0; i < clusters_in_line; i++) {
1207 dram_cluster = &dram_line->cluster[i];
1208
1209 /* The last cluster might not be full. */
1210 if ((i == clusters_in_line - 1) &&
1211 (events_in_line % EVENTS_PER_CLUSTER)) {
1212 events_in_cluster = events_in_line % EVENTS_PER_CLUSTER;
1213 } else {
1214 events_in_cluster = EVENTS_PER_CLUSTER;
1215 }
1216
1217 sigma_decode_dram_cluster(devc, dram_cluster,
1218 events_in_cluster, i == trigger_cluster);
1219 }
1220
1221 return SR_OK;
1222}
1223
1224static int download_capture(struct sr_dev_inst *sdi)
1225{
1226 const uint32_t chunks_per_read = 32;
1227
1228 struct dev_context *devc;
1229 struct sigma_dram_line *dram_line;
1230 int bufsz;
1231 uint32_t stoppos, triggerpos;
1232 uint8_t modestatus;
1233 uint32_t i;
1234 uint32_t dl_lines_total, dl_lines_curr, dl_lines_done;
1235 uint32_t dl_first_line, dl_line;
1236 uint32_t dl_events_in_line;
1237 uint32_t trg_line, trg_event;
1238 int ret;
1239
1240 devc = sdi->priv;
1241 dl_events_in_line = EVENTS_PER_ROW;
1242
1243 sr_info("Downloading sample data.");
1244 devc->state.state = SIGMA_DOWNLOAD;
1245
1246 /*
1247 * Ask the hardware to stop data acquisition. Reception of the
1248 * FORCESTOP request makes the hardware "disable RLE" (store
1249 * clusters to DRAM regardless of whether pin state changes) and
1250 * raise the POSTTRIGGERED flag.
1251 */
1252 sigma_set_register(WRITE_MODE, WMR_FORCESTOP | WMR_SDRAMWRITEEN, devc);
1253 do {
1254 if (sigma_read_register(READ_MODE, &modestatus, 1, devc) != 1) {
1255 sr_err("failed while waiting for RMR_POSTTRIGGERED bit");
1256 return FALSE;
1257 }
1258 } while (!(modestatus & RMR_POSTTRIGGERED));
1259
1260 /* Set SDRAM Read Enable. */
1261 sigma_set_register(WRITE_MODE, WMR_SDRAMREADEN, devc);
1262
1263 /* Get the current position. */
1264 sigma_read_pos(&stoppos, &triggerpos, devc);
1265
1266 /* Check if trigger has fired. */
1267 if (sigma_read_register(READ_MODE, &modestatus, 1, devc) != 1) {
1268 sr_err("failed to read READ_MODE register");
1269 return FALSE;
1270 }
1271 trg_line = ~0;
1272 trg_event = ~0;
1273 if (modestatus & RMR_TRIGGERED) {
1274 trg_line = triggerpos >> 9;
1275 trg_event = triggerpos & 0x1ff;
1276 }
1277
1278 /*
1279 * Determine how many "DRAM lines" of 1024 bytes each we need to
1280 * retrieve from the Sigma hardware, so that we have a complete
1281 * set of samples. Note that the last line need not contain 64
1282 * clusters, it might be partially filled only.
1283 *
1284 * When RMR_ROUND is set, the circular buffer in DRAM has wrapped
1285 * around. Since the status of the very next line is uncertain in
1286 * that case, we skip it and start reading from the next line.
1287 */
1288 dl_first_line = 0;
1289 dl_lines_total = (stoppos >> ROW_SHIFT) + 1;
1290 if (modestatus & RMR_ROUND) {
1291 dl_first_line = dl_lines_total + 1;
1292 dl_lines_total = ROW_COUNT - 2;
1293 }
1294 dram_line = g_try_malloc0(chunks_per_read * sizeof(*dram_line));
1295 if (!dram_line)
1296 return FALSE;
1297 ret = alloc_submit_buffer(sdi);
1298 if (ret != SR_OK)
1299 return FALSE;
1300 ret = setup_submit_limit(devc);
1301 if (ret != SR_OK)
1302 return FALSE;
1303 dl_lines_done = 0;
1304 while (dl_lines_total > dl_lines_done) {
1305 /* We can download only up-to 32 DRAM lines in one go! */
1306 dl_lines_curr = MIN(chunks_per_read, dl_lines_total - dl_lines_done);
1307
1308 dl_line = dl_first_line + dl_lines_done;
1309 dl_line %= ROW_COUNT;
1310 bufsz = sigma_read_dram(dl_line, dl_lines_curr,
1311 (uint8_t *)dram_line, devc);
1312 /* TODO: Check bufsz. For now, just avoid compiler warnings. */
1313 (void)bufsz;
1314
1315 /* This is the first DRAM line, so find the initial timestamp. */
1316 if (dl_lines_done == 0) {
1317 devc->state.lastts =
1318 sigma_dram_cluster_ts(&dram_line[0].cluster[0]);
1319 devc->state.lastsample = 0;
1320 }
1321
1322 for (i = 0; i < dl_lines_curr; i++) {
1323 uint32_t trigger_event = ~0;
1324 /* The last "DRAM line" can be only partially full. */
1325 if (dl_lines_done + i == dl_lines_total - 1)
1326 dl_events_in_line = stoppos & 0x1ff;
1327
1328 /* Test if the trigger happened on this line. */
1329 if (dl_lines_done + i == trg_line)
1330 trigger_event = trg_event;
1331
1332 decode_chunk_ts(devc, dram_line + i,
1333 dl_events_in_line, trigger_event);
1334 }
1335
1336 dl_lines_done += dl_lines_curr;
1337 }
1338 flush_submit_buffer(devc);
1339 free_submit_buffer(devc);
1340 g_free(dram_line);
1341
1342 std_session_send_df_end(sdi);
1343
1344 devc->state.state = SIGMA_IDLE;
1345 sr_dev_acquisition_stop(sdi);
1346
1347 return TRUE;
1348}
1349
1350/*
1351 * Periodically check the Sigma status when in CAPTURE mode. This routine
1352 * checks whether the configured sample count or sample time have passed,
1353 * and will stop acquisition and download the acquired samples.
1354 */
1355static int sigma_capture_mode(struct sr_dev_inst *sdi)
1356{
1357 struct dev_context *devc;
1358
1359 devc = sdi->priv;
1360 if (sr_sw_limits_check(&devc->acq_limits))
1361 return download_capture(sdi);
1362
1363 return TRUE;
1364}
1365
1366SR_PRIV int sigma_receive_data(int fd, int revents, void *cb_data)
1367{
1368 struct sr_dev_inst *sdi;
1369 struct dev_context *devc;
1370
1371 (void)fd;
1372 (void)revents;
1373
1374 sdi = cb_data;
1375 devc = sdi->priv;
1376
1377 if (devc->state.state == SIGMA_IDLE)
1378 return TRUE;
1379
1380 /*
1381 * When the application has requested to stop the acquisition,
1382 * then immediately start downloading sample data. Otherwise
1383 * keep checking configured limits which will terminate the
1384 * acquisition and initiate download.
1385 */
1386 if (devc->state.state == SIGMA_STOPPING)
1387 return download_capture(sdi);
1388 if (devc->state.state == SIGMA_CAPTURE)
1389 return sigma_capture_mode(sdi);
1390
1391 return TRUE;
1392}
1393
1394/* Build a LUT entry used by the trigger functions. */
1395static void build_lut_entry(uint16_t value, uint16_t mask, uint16_t *entry)
1396{
1397 int i, j, k, bit;
1398
1399 /* For each quad channel. */
1400 for (i = 0; i < 4; i++) {
1401 entry[i] = 0xffff;
1402
1403 /* For each bit in LUT. */
1404 for (j = 0; j < 16; j++)
1405
1406 /* For each channel in quad. */
1407 for (k = 0; k < 4; k++) {
1408 bit = 1 << (i * 4 + k);
1409
1410 /* Set bit in entry */
1411 if ((mask & bit) && ((!(value & bit)) !=
1412 (!(j & (1 << k)))))
1413 entry[i] &= ~(1 << j);
1414 }
1415 }
1416}
1417
1418/* Add a logical function to LUT mask. */
1419static void add_trigger_function(enum triggerop oper, enum triggerfunc func,
1420 int index, int neg, uint16_t *mask)
1421{
1422 int i, j;
1423 int x[2][2], tmp, a, b, aset, bset, rset;
1424
1425 memset(x, 0, 4 * sizeof(int));
1426
1427 /* Trigger detect condition. */
1428 switch (oper) {
1429 case OP_LEVEL:
1430 x[0][1] = 1;
1431 x[1][1] = 1;
1432 break;
1433 case OP_NOT:
1434 x[0][0] = 1;
1435 x[1][0] = 1;
1436 break;
1437 case OP_RISE:
1438 x[0][1] = 1;
1439 break;
1440 case OP_FALL:
1441 x[1][0] = 1;
1442 break;
1443 case OP_RISEFALL:
1444 x[0][1] = 1;
1445 x[1][0] = 1;
1446 break;
1447 case OP_NOTRISE:
1448 x[1][1] = 1;
1449 x[0][0] = 1;
1450 x[1][0] = 1;
1451 break;
1452 case OP_NOTFALL:
1453 x[1][1] = 1;
1454 x[0][0] = 1;
1455 x[0][1] = 1;
1456 break;
1457 case OP_NOTRISEFALL:
1458 x[1][1] = 1;
1459 x[0][0] = 1;
1460 break;
1461 }
1462
1463 /* Transpose if neg is set. */
1464 if (neg) {
1465 for (i = 0; i < 2; i++) {
1466 for (j = 0; j < 2; j++) {
1467 tmp = x[i][j];
1468 x[i][j] = x[1 - i][1 - j];
1469 x[1 - i][1 - j] = tmp;
1470 }
1471 }
1472 }
1473
1474 /* Update mask with function. */
1475 for (i = 0; i < 16; i++) {
1476 a = (i >> (2 * index + 0)) & 1;
1477 b = (i >> (2 * index + 1)) & 1;
1478
1479 aset = (*mask >> i) & 1;
1480 bset = x[b][a];
1481
1482 rset = 0;
1483 if (func == FUNC_AND || func == FUNC_NAND)
1484 rset = aset & bset;
1485 else if (func == FUNC_OR || func == FUNC_NOR)
1486 rset = aset | bset;
1487 else if (func == FUNC_XOR || func == FUNC_NXOR)
1488 rset = aset ^ bset;
1489
1490 if (func == FUNC_NAND || func == FUNC_NOR || func == FUNC_NXOR)
1491 rset = !rset;
1492
1493 *mask &= ~(1 << i);
1494
1495 if (rset)
1496 *mask |= 1 << i;
1497 }
1498}
1499
1500/*
1501 * Build trigger LUTs used by 50 MHz and lower sample rates for supporting
1502 * simple pin change and state triggers. Only two transitions (rise/fall) can be
1503 * set at any time, but a full mask and value can be set (0/1).
1504 */
1505SR_PRIV int sigma_build_basic_trigger(struct triggerlut *lut, struct dev_context *devc)
1506{
1507 int i,j;
1508 uint16_t masks[2] = { 0, 0 };
1509
1510 memset(lut, 0, sizeof(struct triggerlut));
1511
1512 /* Constant for simple triggers. */
1513 lut->m4 = 0xa000;
1514
1515 /* Value/mask trigger support. */
1516 build_lut_entry(devc->trigger.simplevalue, devc->trigger.simplemask,
1517 lut->m2d);
1518
1519 /* Rise/fall trigger support. */
1520 for (i = 0, j = 0; i < 16; i++) {
1521 if (devc->trigger.risingmask & (1 << i) ||
1522 devc->trigger.fallingmask & (1 << i))
1523 masks[j++] = 1 << i;
1524 }
1525
1526 build_lut_entry(masks[0], masks[0], lut->m0d);
1527 build_lut_entry(masks[1], masks[1], lut->m1d);
1528
1529 /* Add glue logic */
1530 if (masks[0] || masks[1]) {
1531 /* Transition trigger. */
1532 if (masks[0] & devc->trigger.risingmask)
1533 add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3);
1534 if (masks[0] & devc->trigger.fallingmask)
1535 add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3);
1536 if (masks[1] & devc->trigger.risingmask)
1537 add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3);
1538 if (masks[1] & devc->trigger.fallingmask)
1539 add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3);
1540 } else {
1541 /* Only value/mask trigger. */
1542 lut->m3 = 0xffff;
1543 }
1544
1545 /* Triggertype: event. */
1546 lut->params.selres = 3;
1547
1548 return SR_OK;
1549}