2 * This file is part of the libsigrok project.
4 * Copyright (C) 2010-2012 Håvard Espeland <gus@ping.uio.no>,
5 * Copyright (C) 2010 Martin Stensgård <mastensg@ping.uio.no>
6 * Copyright (C) 2010 Carl Henrik Lunde <chlunde@ping.uio.no>
7 * Copyright (C) 2020 Gerhard Sittig <gerhard.sittig@gmx.net>
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation, either version 3 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * ASIX SIGMA/SIGMA2 logic analyzer driver
31 * The ASIX SIGMA hardware supports fixed 200MHz and 100MHz sample rates
32 * (by means of separate firmware images). As well as 50MHz divided by
33 * an integer divider in the 1..256 range (by the "typical" firmware).
34 * Which translates to a strict lower boundary of around 195kHz.
36 * This driver "suggests" a subset of the available rates by listing a
37 * few discrete values, while setter routines accept any user specified
38 * rate that is supported by the hardware.
40 SR_PRIV const uint64_t samplerates[] = {
41 /* 50MHz and integer divider. 1/2/5 steps (where possible). */
42 SR_KHZ(200), SR_KHZ(500),
43 SR_MHZ(1), SR_MHZ(2), SR_MHZ(5),
44 SR_MHZ(10), SR_MHZ(25), SR_MHZ(50),
45 /* 100MHz/200MHz, fixed rates in special firmware. */
46 SR_MHZ(100), SR_MHZ(200),
49 SR_PRIV const size_t samplerates_count = ARRAY_SIZE(samplerates);
51 static const char *firmware_files[] = {
52 [SIGMA_FW_50MHZ] = "asix-sigma-50.fw", /* 50MHz, 8bit divider. */
53 [SIGMA_FW_100MHZ] = "asix-sigma-100.fw", /* 100MHz, fixed. */
54 [SIGMA_FW_200MHZ] = "asix-sigma-200.fw", /* 200MHz, fixed. */
55 [SIGMA_FW_SYNC] = "asix-sigma-50sync.fw", /* Sync from external pin. */
56 [SIGMA_FW_FREQ] = "asix-sigma-phasor.fw", /* Frequency counter. */
59 #define SIGMA_FIRMWARE_SIZE_LIMIT (256 * 1024)
61 static int sigma_ftdi_open(const struct sr_dev_inst *sdi)
63 struct dev_context *devc;
72 if (devc->ftdi.is_open)
77 serno = sdi->serial_num;
78 if (!vid || !pid || !serno || !*serno)
81 ret = ftdi_init(&devc->ftdi.ctx);
83 sr_err("Cannot initialize FTDI context (%d): %s.",
84 ret, ftdi_get_error_string(&devc->ftdi.ctx));
87 ret = ftdi_usb_open_desc_index(&devc->ftdi.ctx,
88 vid, pid, NULL, serno, 0);
90 sr_err("Cannot open device (%d): %s.",
91 ret, ftdi_get_error_string(&devc->ftdi.ctx));
94 devc->ftdi.is_open = TRUE;
99 static int sigma_ftdi_close(struct dev_context *devc)
103 ret = ftdi_usb_close(&devc->ftdi.ctx);
104 devc->ftdi.is_open = FALSE;
105 devc->ftdi.must_close = FALSE;
106 ftdi_deinit(&devc->ftdi.ctx);
108 return ret == 0 ? SR_OK : SR_ERR_IO;
111 SR_PRIV int sigma_check_open(const struct sr_dev_inst *sdi)
113 struct dev_context *devc;
122 if (devc->ftdi.is_open)
125 ret = sigma_ftdi_open(sdi);
128 devc->ftdi.must_close = TRUE;
133 SR_PRIV int sigma_check_close(struct dev_context *devc)
140 if (devc->ftdi.must_close) {
141 ret = sigma_ftdi_close(devc);
144 devc->ftdi.must_close = FALSE;
150 SR_PRIV int sigma_force_open(const struct sr_dev_inst *sdi)
152 struct dev_context *devc;
161 ret = sigma_ftdi_open(sdi);
164 devc->ftdi.must_close = FALSE;
169 SR_PRIV int sigma_force_close(struct dev_context *devc)
171 return sigma_ftdi_close(devc);
175 * BEWARE! Error propagation is important, as are kinds of return values.
177 * - Raw USB tranport communicates the number of sent or received bytes,
178 * or negative error codes in the external library's(!) range of codes.
179 * - Internal routines at the "sigrok driver level" communicate success
180 * or failure in terms of SR_OK et al error codes.
181 * - Main loop style receive callbacks communicate booleans which arrange
182 * for repeated calls to drive progress during acquisition.
184 * Careful consideration by maintainers is essential, because all of the
185 * above kinds of values are assignment compatbile from the compiler's
186 * point of view. Implementation errors will go unnoticed at build time.
189 static int sigma_read_raw(struct dev_context *devc, void *buf, size_t size)
193 ret = ftdi_read_data(&devc->ftdi.ctx, (unsigned char *)buf, size);
195 sr_err("USB data read failed: %s",
196 ftdi_get_error_string(&devc->ftdi.ctx));
202 static int sigma_write_raw(struct dev_context *devc, const void *buf, size_t size)
206 ret = ftdi_write_data(&devc->ftdi.ctx, buf, size);
208 sr_err("USB data write failed: %s",
209 ftdi_get_error_string(&devc->ftdi.ctx));
210 } else if ((size_t)ret != size) {
211 sr_err("USB data write length mismatch.");
217 static int sigma_read_sr(struct dev_context *devc, void *buf, size_t size)
221 ret = sigma_read_raw(devc, buf, size);
222 if (ret < 0 || (size_t)ret != size)
228 static int sigma_write_sr(struct dev_context *devc, const void *buf, size_t size)
232 ret = sigma_write_raw(devc, buf, size);
233 if (ret < 0 || (size_t)ret != size)
240 * Implementor's note: The local write buffer's size shall suffice for
241 * any know FPGA register transaction that is involved in the supported
242 * feature set of this sigrok device driver. If the length check trips,
243 * that's a programmer's error and needs adjustment in the complete call
244 * stack of the respective code path.
246 SR_PRIV int sigma_write_register(struct dev_context *devc,
247 uint8_t reg, uint8_t *data, size_t len)
249 uint8_t buf[80], *wrptr;
252 if (2 + 2 * len > sizeof(buf)) {
253 sr_err("Short write buffer for %zu bytes to reg %u.", len, reg);
258 write_u8_inc(&wrptr, REG_ADDR_LOW | (reg & 0xf));
259 write_u8_inc(&wrptr, REG_ADDR_HIGH | (reg >> 4));
260 for (idx = 0; idx < len; idx++) {
261 write_u8_inc(&wrptr, REG_DATA_LOW | (data[idx] & 0xf));
262 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (data[idx] >> 4));
265 return sigma_write_sr(devc, buf, wrptr - buf);
268 SR_PRIV int sigma_set_register(struct dev_context *devc,
269 uint8_t reg, uint8_t value)
271 return sigma_write_register(devc, reg, &value, sizeof(value));
274 static int sigma_read_register(struct dev_context *devc,
275 uint8_t reg, uint8_t *data, size_t len)
277 uint8_t buf[3], *wrptr;
281 write_u8_inc(&wrptr, REG_ADDR_LOW | (reg & 0xf));
282 write_u8_inc(&wrptr, REG_ADDR_HIGH | (reg >> 4));
283 write_u8_inc(&wrptr, REG_READ_ADDR);
284 ret = sigma_write_sr(devc, buf, wrptr - buf);
288 return sigma_read_sr(devc, data, len);
291 static int sigma_read_pos(struct dev_context *devc,
292 uint32_t *stoppos, uint32_t *triggerpos, uint8_t *mode)
295 * Read 7 registers starting at trigger position LSB.
296 * Which yields two 24bit counter values, and mode flags.
298 const uint8_t buf[] = {
299 /* Setup first register address. */
300 REG_ADDR_LOW | READ_TRIGGER_POS_LOW,
301 /* Retrieve trigger position. */
302 REG_READ_ADDR | REG_ADDR_INC,
303 REG_READ_ADDR | REG_ADDR_INC,
304 REG_READ_ADDR | REG_ADDR_INC,
305 /* Retrieve stop position. */
306 REG_READ_ADDR | REG_ADDR_INC,
307 REG_READ_ADDR | REG_ADDR_INC,
308 REG_READ_ADDR | REG_ADDR_INC,
309 /* Retrieve mode register. */
310 REG_READ_ADDR | REG_ADDR_INC,
317 ret = sigma_write_sr(devc, buf, sizeof(buf));
321 ret = sigma_read_sr(devc, result, sizeof(result));
326 v32 = read_u24le_inc(&rdptr);
329 v32 = read_u24le_inc(&rdptr);
332 v8 = read_u8_inc(&rdptr);
337 * These positions consist of "the memory row" in the MSB fields,
338 * and "an event index" within the row in the LSB fields. Part
339 * of the memory row's content is sample data, another part is
342 * The retrieved register values point to after the captured
343 * position. So they need to get decremented, and adjusted to
344 * cater for the timestamps when the decrement carries over to
345 * a different memory row.
347 if (stoppos && (--*stoppos & ROW_MASK) == ROW_MASK)
348 *stoppos -= CLUSTERS_PER_ROW;
349 if (triggerpos && (--*triggerpos & ROW_MASK) == ROW_MASK)
350 *triggerpos -= CLUSTERS_PER_ROW;
355 static int sigma_read_dram(struct dev_context *devc,
356 uint16_t startchunk, size_t numchunks, uint8_t *data)
358 uint8_t buf[128], *wrptr;
363 if (2 + 3 * numchunks > ARRAY_SIZE(buf)) {
364 sr_err("Short write buffer for %zu DRAM row reads.", numchunks);
368 /* Communicate DRAM start address (memory row, aka samples line). */
370 write_u8_inc(&wrptr, startchunk >> 8);
371 write_u8_inc(&wrptr, startchunk & 0xff);
372 ret = sigma_write_register(devc, WRITE_MEMROW, buf, wrptr - buf);
377 * Access DRAM content. Fetch from DRAM to FPGA's internal RAM,
378 * then transfer via USB. Interleave the FPGA's DRAM access and
379 * USB transfer, use alternating buffers (0/1) in the process.
382 write_u8_inc(&wrptr, REG_DRAM_BLOCK);
383 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
384 for (chunk = 0; chunk < numchunks; chunk++) {
386 is_last = chunk == numchunks - 1;
388 write_u8_inc(&wrptr, REG_DRAM_BLOCK | REG_DRAM_SEL_BOOL(!sel));
389 write_u8_inc(&wrptr, REG_DRAM_BLOCK_DATA | REG_DRAM_SEL_BOOL(sel));
391 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
393 ret = sigma_write_sr(devc, buf, wrptr - buf);
397 return sigma_read_sr(devc, data, numchunks * ROW_LENGTH_BYTES);
400 /* Upload trigger look-up tables to Sigma. */
401 SR_PRIV int sigma_write_trigger_lut(struct dev_context *devc,
402 struct triggerlut *lut)
407 uint8_t buf[6], *wrptr, regval;
410 /* Transpose the table and send to Sigma. */
411 for (i = 0; i < 16; i++) {
416 if (lut->m2d[0] & bit)
418 if (lut->m2d[1] & bit)
420 if (lut->m2d[2] & bit)
422 if (lut->m2d[3] & bit)
432 if (lut->m0d[0] & bit)
434 if (lut->m0d[1] & bit)
436 if (lut->m0d[2] & bit)
438 if (lut->m0d[3] & bit)
441 if (lut->m1d[0] & bit)
443 if (lut->m1d[1] & bit)
445 if (lut->m1d[2] & bit)
447 if (lut->m1d[3] & bit)
451 * This logic seems redundant, but separates the value
452 * determination from the wire format, and is useful
453 * during future maintenance and research.
456 write_u8_inc(&wrptr, tmp[0]);
457 write_u8_inc(&wrptr, tmp[1]);
458 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, buf, wrptr - buf);
461 ret = sigma_set_register(devc, WRITE_TRIGGER_SELECT2, 0x30 | i);
466 /* Send the parameters */
469 regval |= lut->params.selc << 6;
470 regval |= lut->params.selpresc << 0;
471 write_u8_inc(&wrptr, regval);
473 regval |= lut->params.selinc << 6;
474 regval |= lut->params.selres << 4;
475 regval |= lut->params.sela << 2;
476 regval |= lut->params.selb << 0;
477 write_u8_inc(&wrptr, regval);
478 write_u16le_inc(&wrptr, lut->params.cmpb);
479 write_u16le_inc(&wrptr, lut->params.cmpa);
480 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, buf, wrptr - buf);
488 * See Xilinx UG332 for Spartan-3 FPGA configuration. The SIGMA device
489 * uses FTDI bitbang mode for netlist download in slave serial mode.
490 * (LATER: The OMEGA device's cable contains a more capable FTDI chip
491 * and uses MPSSE mode for bitbang. -- Can we also use FT232H in FT245
492 * compatible bitbang mode? For maximum code re-use and reduced libftdi
493 * dependency? See section 3.5.5 of FT232H: D0 clk, D1 data (out), D2
494 * data (in), D3 select, D4-7 GPIOL. See section 3.5.7 for MCU FIFO.)
496 * 750kbps rate (four times the speed of sigmalogan) works well for
497 * netlist download. All pins except INIT_B are output pins during
498 * configuration download.
500 * Some pins are inverted as a byproduct of level shifting circuitry.
501 * That's why high CCLK level (from the cable's point of view) is idle
502 * from the FPGA's perspective.
504 * The vendor's literature discusses a "suicide sequence" which ends
505 * regular FPGA execution and should be sent before entering bitbang
506 * mode and sending configuration data. Set D7 and toggle D2, D3, D4
509 #define BB_PIN_CCLK (1 << 0) /* D0, CCLK */
510 #define BB_PIN_PROG (1 << 1) /* D1, PROG */
511 #define BB_PIN_D2 (1 << 2) /* D2, (part of) SUICIDE */
512 #define BB_PIN_D3 (1 << 3) /* D3, (part of) SUICIDE */
513 #define BB_PIN_D4 (1 << 4) /* D4, (part of) SUICIDE (unused?) */
514 #define BB_PIN_INIT (1 << 5) /* D5, INIT, input pin */
515 #define BB_PIN_DIN (1 << 6) /* D6, DIN */
516 #define BB_PIN_D7 (1 << 7) /* D7, (part of) SUICIDE */
518 #define BB_BITRATE (750 * 1000)
519 #define BB_PINMASK (0xff & ~BB_PIN_INIT)
522 * Initiate slave serial mode for configuration download. Which is done
523 * by pulsing PROG_B and sensing INIT_B. Make sure CCLK is idle before
524 * initiating the configuration download.
526 * Run a "suicide sequence" first to terminate the regular FPGA operation
527 * before reconfiguration. The FTDI cable is single channel, and shares
528 * pins which are used for data communication in FIFO mode with pins that
529 * are used for FPGA configuration in bitbang mode. Hardware defaults for
530 * unconfigured hardware, and runtime conditions after FPGA configuration
531 * need to cooperate such that re-configuration of the FPGA can start.
533 static int sigma_fpga_init_bitbang_once(struct dev_context *devc)
535 const uint8_t suicide[] = {
536 BB_PIN_D7 | BB_PIN_D2,
537 BB_PIN_D7 | BB_PIN_D2,
538 BB_PIN_D7 | BB_PIN_D3,
539 BB_PIN_D7 | BB_PIN_D2,
540 BB_PIN_D7 | BB_PIN_D3,
541 BB_PIN_D7 | BB_PIN_D2,
542 BB_PIN_D7 | BB_PIN_D3,
543 BB_PIN_D7 | BB_PIN_D2,
545 const uint8_t init_array[] = {
547 BB_PIN_CCLK | BB_PIN_PROG,
548 BB_PIN_CCLK | BB_PIN_PROG,
560 /* Section 2. part 1), do the FPGA suicide. */
562 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
563 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
564 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
565 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
570 /* Section 2. part 2), pulse PROG. */
571 ret = sigma_write_sr(devc, init_array, sizeof(init_array));
575 ftdi_usb_purge_buffers(&devc->ftdi.ctx);
578 * Wait until the FPGA asserts INIT_B. Check in a maximum number
579 * of bursts with a given delay between them. Read as many pin
580 * capture results as the combination of FTDI chip and FTID lib
581 * may provide. Cope with absence of pin capture data in a cycle.
582 * This approach shall result in fast reponse in case of success,
583 * low cost of execution during wait, reliable error handling in
584 * the transport layer, and robust response to failure or absence
585 * of result data (hardware inactivity after stimulus).
590 ret = sigma_read_raw(devc, &data, sizeof(data));
593 if (ret == sizeof(data) && (data & BB_PIN_INIT))
595 } while (ret == sizeof(data));
600 return SR_ERR_TIMEOUT;
604 * This is belt and braces. Re-run the bitbang initiation sequence a few
605 * times should first attempts fail. Failure is rare but can happen (was
606 * observed during driver development).
608 static int sigma_fpga_init_bitbang(struct dev_context *devc)
615 ret = sigma_fpga_init_bitbang_once(devc);
618 if (ret != SR_ERR_TIMEOUT)
625 * Configure the FPGA for logic-analyzer mode.
627 static int sigma_fpga_init_la(struct dev_context *devc)
629 uint8_t buf[16], *wrptr;
630 uint8_t data_55, data_aa, mode;
632 const uint8_t *rdptr;
637 /* Read ID register. */
638 write_u8_inc(&wrptr, REG_ADDR_LOW | (READ_ID & 0xf));
639 write_u8_inc(&wrptr, REG_ADDR_HIGH | (READ_ID >> 4));
640 write_u8_inc(&wrptr, REG_READ_ADDR);
642 /* Write 0x55 to scratch register, read back. */
644 write_u8_inc(&wrptr, REG_ADDR_LOW | (WRITE_TEST & 0xf));
645 write_u8_inc(&wrptr, REG_DATA_LOW | (data_55 & 0xf));
646 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (data_55 >> 4));
647 write_u8_inc(&wrptr, REG_READ_ADDR);
649 /* Write 0xaa to scratch register, read back. */
651 write_u8_inc(&wrptr, REG_ADDR_LOW | (WRITE_TEST & 0xf));
652 write_u8_inc(&wrptr, REG_DATA_LOW | (data_aa & 0xf));
653 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (data_aa >> 4));
654 write_u8_inc(&wrptr, REG_READ_ADDR);
656 /* Initiate SDRAM initialization in mode register. */
657 mode = WMR_SDRAMINIT;
658 write_u8_inc(&wrptr, REG_ADDR_LOW | (WRITE_MODE & 0xf));
659 write_u8_inc(&wrptr, REG_DATA_LOW | (mode & 0xf));
660 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (mode >> 4));
663 * Send the command sequence which contains 3 READ requests.
664 * Expect to see the corresponding 3 response bytes.
666 ret = sigma_write_sr(devc, buf, wrptr - buf);
668 sr_err("Could not request LA start response.");
671 ret = sigma_read_sr(devc, result, ARRAY_SIZE(result));
673 sr_err("Could not receive LA start response.");
677 if (read_u8_inc(&rdptr) != 0xa6) {
678 sr_err("Unexpected ID response.");
681 if (read_u8_inc(&rdptr) != data_55) {
682 sr_err("Unexpected scratch read-back (55).");
685 if (read_u8_inc(&rdptr) != data_aa) {
686 sr_err("Unexpected scratch read-back (aa).");
694 * Read the firmware from a file and transform it into a series of bitbang
695 * pulses used to program the FPGA. Note that the *bb_cmd must be free()'d
696 * by the caller of this function.
698 static int sigma_fw_2_bitbang(struct sr_context *ctx, const char *name,
699 uint8_t **bb_cmd, gsize *bb_cmd_size)
707 uint8_t *bb_stream, *bbs, byte, mask, v;
709 /* Retrieve the on-disk firmware file content. */
710 firmware = sr_resource_load(ctx, SR_RESOURCE_FIRMWARE, name,
711 &file_size, SIGMA_FIRMWARE_SIZE_LIMIT);
715 /* Unscramble the file content (XOR with "random" sequence). */
720 imm = (imm + 0xa853753) % 177 + (imm * 0x8034052);
725 * Generate a sequence of bitbang samples. With two samples per
726 * FPGA configuration bit, providing the level for the DIN signal
727 * as well as two edges for CCLK. See Xilinx UG332 for details
728 * ("slave serial" mode).
730 * Note that CCLK is inverted in hardware. That's why the
731 * respective bit is first set and then cleared in the bitbang
732 * sample sets. So that the DIN level will be stable when the
733 * data gets sampled at the rising CCLK edge, and the signals'
734 * setup time constraint will be met.
736 * The caller will put the FPGA into download mode, will send
737 * the bitbang samples, and release the allocated memory.
739 bb_size = file_size * 8 * 2;
740 bb_stream = g_try_malloc(bb_size);
742 sr_err("Memory allocation failed during firmware upload.");
744 return SR_ERR_MALLOC;
753 v = (byte & mask) ? BB_PIN_DIN : 0;
755 *bbs++ = v | BB_PIN_CCLK;
761 /* The transformation completed successfully, return the result. */
763 *bb_cmd_size = bb_size;
768 static int upload_firmware(struct sr_context *ctx, struct dev_context *devc,
769 enum sigma_firmware_idx firmware_idx)
775 const char *firmware;
777 /* Check for valid firmware file selection. */
778 if (firmware_idx >= ARRAY_SIZE(firmware_files))
780 firmware = firmware_files[firmware_idx];
781 if (!firmware || !*firmware)
784 /* Avoid downloading the same firmware multiple times. */
785 if (devc->firmware_idx == firmware_idx) {
786 sr_info("Not uploading firmware file '%s' again.", firmware);
790 devc->state.state = SIGMA_CONFIG;
792 /* Set the cable to bitbang mode. */
793 ret = ftdi_set_bitmode(&devc->ftdi.ctx, BB_PINMASK, BITMODE_BITBANG);
795 sr_err("Could not setup cable mode for upload: %s",
796 ftdi_get_error_string(&devc->ftdi.ctx));
799 ret = ftdi_set_baudrate(&devc->ftdi.ctx, BB_BITRATE);
801 sr_err("Could not setup bitrate for upload: %s",
802 ftdi_get_error_string(&devc->ftdi.ctx));
806 /* Initiate FPGA configuration mode. */
807 ret = sigma_fpga_init_bitbang(devc);
809 sr_err("Could not initiate firmware upload to hardware");
813 /* Prepare wire format of the firmware image. */
814 ret = sigma_fw_2_bitbang(ctx, firmware, &buf, &buf_size);
816 sr_err("Could not prepare file %s for upload.", firmware);
820 /* Write the FPGA netlist to the cable. */
821 sr_info("Uploading firmware file '%s'.", firmware);
822 ret = sigma_write_sr(devc, buf, buf_size);
825 sr_err("Could not upload firmware file '%s'.", firmware);
829 /* Leave bitbang mode and discard pending input data. */
830 ret = ftdi_set_bitmode(&devc->ftdi.ctx, 0, BITMODE_RESET);
832 sr_err("Could not setup cable mode after upload: %s",
833 ftdi_get_error_string(&devc->ftdi.ctx));
836 ftdi_usb_purge_buffers(&devc->ftdi.ctx);
837 while (sigma_read_raw(devc, &pins, sizeof(pins)) > 0)
840 /* Initialize the FPGA for logic-analyzer mode. */
841 ret = sigma_fpga_init_la(devc);
843 sr_err("Hardware response after firmware upload failed.");
847 /* Keep track of successful firmware download completion. */
848 devc->state.state = SIGMA_IDLE;
849 devc->firmware_idx = firmware_idx;
850 sr_info("Firmware uploaded.");
856 * The driver supports user specified time or sample count limits. The
857 * device's hardware supports neither, and hardware compression prevents
858 * reliable detection of "fill levels" (currently reached sample counts)
859 * from register values during acquisition. That's why the driver needs
860 * to apply some heuristics:
862 * - The (optional) sample count limit and the (normalized) samplerate
863 * get mapped to an estimated duration for these samples' acquisition.
864 * - The (optional) time limit gets checked as well. The lesser of the
865 * two limits will terminate the data acquisition phase. The exact
866 * sample count limit gets enforced in session feed submission paths.
867 * - Some slack needs to be given to account for hardware pipelines as
868 * well as late storage of last chunks after compression thresholds
869 * are tripped. The resulting data set will span at least the caller
870 * specified period of time, which shall be perfectly acceptable.
872 * With RLE compression active, up to 64K sample periods can pass before
873 * a cluster accumulates. Which translates to 327ms at 200kHz. Add two
874 * times that period for good measure, one is not enough to flush the
875 * hardware pipeline (observation from an earlier experiment).
877 SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc)
881 uint64_t user_count, user_msecs;
882 uint64_t worst_cluster_time_ms;
883 uint64_t count_msecs, acquire_msecs;
885 sr_sw_limits_init(&devc->acq_limits);
887 /* Get sample count limit, convert to msecs. */
888 ret = sr_sw_limits_config_get(&devc->cfg_limits,
889 SR_CONF_LIMIT_SAMPLES, &data);
892 user_count = g_variant_get_uint64(data);
893 g_variant_unref(data);
896 count_msecs = 1000 * user_count / devc->samplerate + 1;
898 /* Get time limit, which is in msecs. */
899 ret = sr_sw_limits_config_get(&devc->cfg_limits,
900 SR_CONF_LIMIT_MSEC, &data);
903 user_msecs = g_variant_get_uint64(data);
904 g_variant_unref(data);
906 /* Get the lesser of them, with both being optional. */
907 acquire_msecs = ~0ull;
908 if (user_count && count_msecs < acquire_msecs)
909 acquire_msecs = count_msecs;
910 if (user_msecs && user_msecs < acquire_msecs)
911 acquire_msecs = user_msecs;
912 if (acquire_msecs == ~0ull)
915 /* Add some slack, and use that timeout for acquisition. */
916 worst_cluster_time_ms = 1000 * 65536 / devc->samplerate;
917 acquire_msecs += 2 * worst_cluster_time_ms;
918 data = g_variant_new_uint64(acquire_msecs);
919 ret = sr_sw_limits_config_set(&devc->acq_limits,
920 SR_CONF_LIMIT_MSEC, data);
921 g_variant_unref(data);
925 sr_sw_limits_acquisition_start(&devc->acq_limits);
930 * Check whether a caller specified samplerate matches the device's
931 * hardware constraints (can be used for acquisition). Optionally yield
932 * a value that approximates the original spec.
934 * This routine assumes that input specs are in the 200kHz to 200MHz
935 * range of supported rates, and callers typically want to normalize a
936 * given value to the hardware capabilities. Values in the 50MHz range
937 * get rounded up by default, to avoid a more expensive check for the
938 * closest match, while higher sampling rate is always desirable during
939 * measurement. Input specs which exactly match hardware capabilities
940 * remain unaffected. Because 100/200MHz rates also limit the number of
941 * available channels, they are not suggested by this routine, instead
942 * callers need to pick them consciously.
944 SR_PRIV int sigma_normalize_samplerate(uint64_t want_rate, uint64_t *have_rate)
948 /* Accept exact matches for 100/200MHz. */
949 if (want_rate == SR_MHZ(200) || want_rate == SR_MHZ(100)) {
951 *have_rate = want_rate;
955 /* Accept 200kHz to 50MHz range, and map to near value. */
956 if (want_rate >= SR_KHZ(200) && want_rate <= SR_MHZ(50)) {
957 div = SR_MHZ(50) / want_rate;
958 rate = SR_MHZ(50) / div;
967 SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi)
969 struct dev_context *devc;
970 struct drv_context *drvc;
976 drvc = sdi->driver->context;
978 /* Accept any caller specified rate which the hardware supports. */
979 ret = sigma_normalize_samplerate(devc->samplerate, &samplerate);
984 * Depending on the samplerates of 200/100/50- MHz, specific
985 * firmware is required and higher rates might limit the set
986 * of available channels.
988 num_channels = devc->num_channels;
989 if (samplerate <= SR_MHZ(50)) {
990 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_50MHZ);
992 } else if (samplerate == SR_MHZ(100)) {
993 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_100MHZ);
995 } else if (samplerate == SR_MHZ(200)) {
996 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_200MHZ);
1001 * The samplerate affects the number of available logic channels
1002 * as well as a sample memory layout detail (the number of samples
1003 * which the device will communicate within an "event").
1006 devc->num_channels = num_channels;
1007 devc->samples_per_event = 16 / devc->num_channels;
1014 * Arrange for a session feed submit buffer. A queue where a number of
1015 * samples gets accumulated to reduce the number of send calls. Which
1016 * also enforces an optional sample count limit for data acquisition.
1018 * The buffer holds up to CHUNK_SIZE bytes. The unit size is fixed (the
1019 * driver provides a fixed channel layout regardless of samplerate).
1022 #define CHUNK_SIZE (4 * 1024 * 1024)
1024 struct submit_buffer {
1026 size_t max_samples, curr_samples;
1027 uint8_t *sample_data;
1028 uint8_t *write_pointer;
1029 struct sr_dev_inst *sdi;
1030 struct sr_datafeed_packet packet;
1031 struct sr_datafeed_logic logic;
1034 static int alloc_submit_buffer(struct sr_dev_inst *sdi)
1036 struct dev_context *devc;
1037 struct submit_buffer *buffer;
1042 buffer = g_malloc0(sizeof(*buffer));
1043 devc->buffer = buffer;
1045 buffer->unit_size = sizeof(uint16_t);
1047 size /= buffer->unit_size;
1048 buffer->max_samples = size;
1049 size *= buffer->unit_size;
1050 buffer->sample_data = g_try_malloc0(size);
1051 if (!buffer->sample_data)
1052 return SR_ERR_MALLOC;
1053 buffer->write_pointer = buffer->sample_data;
1054 sr_sw_limits_init(&devc->feed_limits);
1057 memset(&buffer->logic, 0, sizeof(buffer->logic));
1058 buffer->logic.unitsize = buffer->unit_size;
1059 buffer->logic.data = buffer->sample_data;
1060 memset(&buffer->packet, 0, sizeof(buffer->packet));
1061 buffer->packet.type = SR_DF_LOGIC;
1062 buffer->packet.payload = &buffer->logic;
1067 static int setup_submit_limit(struct dev_context *devc)
1069 struct sr_sw_limits *limits;
1074 limits = &devc->feed_limits;
1076 ret = sr_sw_limits_config_get(&devc->cfg_limits,
1077 SR_CONF_LIMIT_SAMPLES, &data);
1080 total = g_variant_get_uint64(data);
1081 g_variant_unref(data);
1083 sr_sw_limits_init(limits);
1085 data = g_variant_new_uint64(total);
1086 ret = sr_sw_limits_config_set(limits,
1087 SR_CONF_LIMIT_SAMPLES, data);
1088 g_variant_unref(data);
1093 sr_sw_limits_acquisition_start(limits);
1098 static void free_submit_buffer(struct dev_context *devc)
1100 struct submit_buffer *buffer;
1105 buffer = devc->buffer;
1108 devc->buffer = NULL;
1110 g_free(buffer->sample_data);
1114 static int flush_submit_buffer(struct dev_context *devc)
1116 struct submit_buffer *buffer;
1119 buffer = devc->buffer;
1121 /* Is queued sample data available? */
1122 if (!buffer->curr_samples)
1125 /* Submit to the session feed. */
1126 buffer->logic.length = buffer->curr_samples * buffer->unit_size;
1127 ret = sr_session_send(buffer->sdi, &buffer->packet);
1131 /* Rewind queue position. */
1132 buffer->curr_samples = 0;
1133 buffer->write_pointer = buffer->sample_data;
1138 static int addto_submit_buffer(struct dev_context *devc,
1139 uint16_t sample, size_t count)
1141 struct submit_buffer *buffer;
1142 struct sr_sw_limits *limits;
1145 buffer = devc->buffer;
1146 limits = &devc->feed_limits;
1147 if (sr_sw_limits_check(limits))
1151 * Individually accumulate and check each sample, such that
1152 * accumulation between flushes won't exceed local storage, and
1153 * enforcement of user specified limits is exact.
1156 write_u16le_inc(&buffer->write_pointer, sample);
1157 buffer->curr_samples++;
1158 if (buffer->curr_samples == buffer->max_samples) {
1159 ret = flush_submit_buffer(devc);
1163 sr_sw_limits_update_samples_read(limits, 1);
1164 if (sr_sw_limits_check(limits))
1172 * In 100 and 200 MHz mode, only a single pin rising/falling can be
1173 * set as trigger. In other modes, two rising/falling triggers can be set,
1174 * in addition to value/mask trigger for any number of channels.
1176 * The Sigma supports complex triggers using boolean expressions, but this
1177 * has not been implemented yet.
1179 SR_PRIV int sigma_convert_trigger(const struct sr_dev_inst *sdi)
1181 struct dev_context *devc;
1182 struct sr_trigger *trigger;
1183 struct sr_trigger_stage *stage;
1184 struct sr_trigger_match *match;
1185 const GSList *l, *m;
1186 int channelbit, trigger_set;
1189 memset(&devc->trigger, 0, sizeof(devc->trigger));
1190 trigger = sr_session_trigger_get(sdi->session);
1195 for (l = trigger->stages; l; l = l->next) {
1197 for (m = stage->matches; m; m = m->next) {
1199 /* Ignore disabled channels with a trigger. */
1200 if (!match->channel->enabled)
1202 channelbit = 1 << match->channel->index;
1203 if (devc->samplerate >= SR_MHZ(100)) {
1204 /* Fast trigger support. */
1206 sr_err("100/200MHz modes limited to single trigger pin.");
1209 if (match->match == SR_TRIGGER_FALLING) {
1210 devc->trigger.fallingmask |= channelbit;
1211 } else if (match->match == SR_TRIGGER_RISING) {
1212 devc->trigger.risingmask |= channelbit;
1214 sr_err("100/200MHz modes limited to edge trigger.");
1220 /* Simple trigger support (event). */
1221 if (match->match == SR_TRIGGER_ONE) {
1222 devc->trigger.simplevalue |= channelbit;
1223 devc->trigger.simplemask |= channelbit;
1224 } else if (match->match == SR_TRIGGER_ZERO) {
1225 devc->trigger.simplevalue &= ~channelbit;
1226 devc->trigger.simplemask |= channelbit;
1227 } else if (match->match == SR_TRIGGER_FALLING) {
1228 devc->trigger.fallingmask |= channelbit;
1230 } else if (match->match == SR_TRIGGER_RISING) {
1231 devc->trigger.risingmask |= channelbit;
1236 * Actually, Sigma supports 2 rising/falling triggers,
1237 * but they are ORed and the current trigger syntax
1238 * does not permit ORed triggers.
1240 if (trigger_set > 1) {
1241 sr_err("Limited to 1 edge trigger.");
1251 /* Software trigger to determine exact trigger position. */
1252 static int get_trigger_offset(uint8_t *samples, uint16_t last_sample,
1253 struct sigma_trigger *t)
1255 const uint8_t *rdptr;
1261 for (i = 0; i < 8; i++) {
1263 last_sample = sample;
1264 sample = read_u16le_inc(&rdptr);
1266 /* Simple triggers. */
1267 if ((sample & t->simplemask) != t->simplevalue)
1271 if (((last_sample & t->risingmask) != 0) ||
1272 ((sample & t->risingmask) != t->risingmask))
1276 if ((last_sample & t->fallingmask) != t->fallingmask ||
1277 (sample & t->fallingmask) != 0)
1283 /* If we did not match, return original trigger pos. */
1287 static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample)
1290 * Check whether the combination of this very sample and the
1291 * previous state match the configured trigger condition. This
1292 * improves the resolution of the trigger marker's position.
1293 * The hardware provided position is coarse, and may point to
1294 * a position before the actual match.
1296 * See the previous get_trigger_offset() implementation. This
1297 * code needs to get re-used here.
1301 (void)get_trigger_offset;
1306 static int check_and_submit_sample(struct dev_context *devc,
1307 uint16_t sample, size_t count, gboolean check_trigger)
1312 triggered = check_trigger && sample_matches_trigger(devc, sample);
1314 ret = flush_submit_buffer(devc);
1317 ret = std_session_send_df_trigger(devc->buffer->sdi);
1322 ret = addto_submit_buffer(devc, sample, count);
1330 * Return the timestamp of "DRAM cluster".
1332 static uint16_t sigma_dram_cluster_ts(struct sigma_dram_cluster *cluster)
1334 return read_u16le((const uint8_t *)&cluster->timestamp);
1338 * Return one 16bit data entity of a DRAM cluster at the specified index.
1340 static uint16_t sigma_dram_cluster_data(struct sigma_dram_cluster *cl, int idx)
1342 return read_u16le((const uint8_t *)&cl->samples[idx]);
1346 * Deinterlace sample data that was retrieved at 100MHz samplerate.
1347 * One 16bit item contains two samples of 8bits each. The bits of
1348 * multiple samples are interleaved.
1350 static uint16_t sigma_deinterlace_100mhz_data(uint16_t indata, int idx)
1356 outdata |= (indata >> (0 * 2 - 0)) & (1 << 0);
1357 outdata |= (indata >> (1 * 2 - 1)) & (1 << 1);
1358 outdata |= (indata >> (2 * 2 - 2)) & (1 << 2);
1359 outdata |= (indata >> (3 * 2 - 3)) & (1 << 3);
1360 outdata |= (indata >> (4 * 2 - 4)) & (1 << 4);
1361 outdata |= (indata >> (5 * 2 - 5)) & (1 << 5);
1362 outdata |= (indata >> (6 * 2 - 6)) & (1 << 6);
1363 outdata |= (indata >> (7 * 2 - 7)) & (1 << 7);
1368 * Deinterlace sample data that was retrieved at 200MHz samplerate.
1369 * One 16bit item contains four samples of 4bits each. The bits of
1370 * multiple samples are interleaved.
1372 static uint16_t sigma_deinterlace_200mhz_data(uint16_t indata, int idx)
1378 outdata |= (indata >> (0 * 4 - 0)) & (1 << 0);
1379 outdata |= (indata >> (1 * 4 - 1)) & (1 << 1);
1380 outdata |= (indata >> (2 * 4 - 2)) & (1 << 2);
1381 outdata |= (indata >> (3 * 4 - 3)) & (1 << 3);
1385 static void sigma_decode_dram_cluster(struct dev_context *devc,
1386 struct sigma_dram_cluster *dram_cluster,
1387 size_t events_in_cluster, gboolean triggered)
1389 struct sigma_state *ss;
1390 uint16_t tsdiff, ts, sample, item16;
1393 if (!devc->use_triggers || !ASIX_SIGMA_WITH_TRIGGER)
1397 * If this cluster is not adjacent to the previously received
1398 * cluster, then send the appropriate number of samples with the
1399 * previous values to the sigrok session. This "decodes RLE".
1401 * These samples cannot match the trigger since they just repeat
1402 * the previously submitted data pattern. (This assumption holds
1403 * for simple level and edge triggers. It would not for timed or
1404 * counted conditions, which currently are not supported.)
1407 ts = sigma_dram_cluster_ts(dram_cluster);
1408 tsdiff = ts - ss->lastts;
1411 sample = ss->lastsample;
1412 count = tsdiff * devc->samples_per_event;
1413 (void)check_and_submit_sample(devc, sample, count, FALSE);
1415 ss->lastts = ts + EVENTS_PER_CLUSTER;
1418 * Grab sample data from the current cluster and prepare their
1419 * submission to the session feed. Handle samplerate dependent
1420 * memory layout of sample data. Accumulation of data chunks
1421 * before submission is transparent to this code path, specific
1422 * buffer depth is neither assumed nor required here.
1425 for (i = 0; i < events_in_cluster; i++) {
1426 item16 = sigma_dram_cluster_data(dram_cluster, i);
1427 if (devc->samplerate == SR_MHZ(200)) {
1428 sample = sigma_deinterlace_200mhz_data(item16, 0);
1429 check_and_submit_sample(devc, sample, 1, triggered);
1430 sample = sigma_deinterlace_200mhz_data(item16, 1);
1431 check_and_submit_sample(devc, sample, 1, triggered);
1432 sample = sigma_deinterlace_200mhz_data(item16, 2);
1433 check_and_submit_sample(devc, sample, 1, triggered);
1434 sample = sigma_deinterlace_200mhz_data(item16, 3);
1435 check_and_submit_sample(devc, sample, 1, triggered);
1436 } else if (devc->samplerate == SR_MHZ(100)) {
1437 sample = sigma_deinterlace_100mhz_data(item16, 0);
1438 check_and_submit_sample(devc, sample, 1, triggered);
1439 sample = sigma_deinterlace_100mhz_data(item16, 1);
1440 check_and_submit_sample(devc, sample, 1, triggered);
1443 check_and_submit_sample(devc, sample, 1, triggered);
1446 ss->lastsample = sample;
1450 * Decode chunk of 1024 bytes, 64 clusters, 7 events per cluster.
1451 * Each event is 20ns apart, and can contain multiple samples.
1453 * For 200 MHz, events contain 4 samples for each channel, spread 5 ns apart.
1454 * For 100 MHz, events contain 2 samples for each channel, spread 10 ns apart.
1455 * For 50 MHz and below, events contain one sample for each channel,
1456 * spread 20 ns apart.
1458 static int decode_chunk_ts(struct dev_context *devc,
1459 struct sigma_dram_line *dram_line,
1460 size_t events_in_line, size_t trigger_event)
1462 struct sigma_dram_cluster *dram_cluster;
1463 unsigned int clusters_in_line;
1464 unsigned int events_in_cluster;
1466 uint32_t trigger_cluster;
1468 clusters_in_line = events_in_line;
1469 clusters_in_line += EVENTS_PER_CLUSTER - 1;
1470 clusters_in_line /= EVENTS_PER_CLUSTER;
1471 trigger_cluster = ~0;
1473 /* Check if trigger is in this chunk. */
1474 if (trigger_event < EVENTS_PER_ROW) {
1475 if (devc->samplerate <= SR_MHZ(50)) {
1476 trigger_event -= MIN(EVENTS_PER_CLUSTER - 1,
1480 /* Find in which cluster the trigger occurred. */
1481 trigger_cluster = trigger_event / EVENTS_PER_CLUSTER;
1484 /* For each full DRAM cluster. */
1485 for (i = 0; i < clusters_in_line; i++) {
1486 dram_cluster = &dram_line->cluster[i];
1488 /* The last cluster might not be full. */
1489 if ((i == clusters_in_line - 1) &&
1490 (events_in_line % EVENTS_PER_CLUSTER)) {
1491 events_in_cluster = events_in_line % EVENTS_PER_CLUSTER;
1493 events_in_cluster = EVENTS_PER_CLUSTER;
1496 sigma_decode_dram_cluster(devc, dram_cluster,
1497 events_in_cluster, i == trigger_cluster);
1503 static int download_capture(struct sr_dev_inst *sdi)
1505 const uint32_t chunks_per_read = 32;
1507 struct dev_context *devc;
1508 struct sigma_dram_line *dram_line;
1509 uint32_t stoppos, triggerpos;
1512 uint32_t dl_lines_total, dl_lines_curr, dl_lines_done;
1513 uint32_t dl_first_line, dl_line;
1514 uint32_t dl_events_in_line, trigger_event;
1515 uint32_t trg_line, trg_event;
1520 sr_info("Downloading sample data.");
1521 devc->state.state = SIGMA_DOWNLOAD;
1524 * Ask the hardware to stop data acquisition. Reception of the
1525 * FORCESTOP request makes the hardware "disable RLE" (store
1526 * clusters to DRAM regardless of whether pin state changes) and
1527 * raise the POSTTRIGGERED flag.
1529 modestatus = WMR_FORCESTOP | WMR_SDRAMWRITEEN;
1530 ret = sigma_set_register(devc, WRITE_MODE, modestatus);
1534 ret = sigma_read_register(devc, READ_MODE,
1535 &modestatus, sizeof(modestatus));
1537 sr_err("Could not poll for post-trigger state.");
1540 } while (!(modestatus & RMR_POSTTRIGGERED));
1542 /* Set SDRAM Read Enable. */
1543 ret = sigma_set_register(devc, WRITE_MODE, WMR_SDRAMREADEN);
1547 /* Get the current position. Check if trigger has fired. */
1548 ret = sigma_read_pos(devc, &stoppos, &triggerpos, &modestatus);
1550 sr_err("Could not query capture positions/state.");
1555 if (modestatus & RMR_TRIGGERED) {
1556 trg_line = triggerpos >> ROW_SHIFT;
1557 trg_event = triggerpos & ROW_MASK;
1561 * Determine how many "DRAM lines" of 1024 bytes each we need to
1562 * retrieve from the Sigma hardware, so that we have a complete
1563 * set of samples. Note that the last line need not contain 64
1564 * clusters, it might be partially filled only.
1566 * When RMR_ROUND is set, the circular buffer in DRAM has wrapped
1567 * around. Since the status of the very next line is uncertain in
1568 * that case, we skip it and start reading from the next line.
1571 dl_lines_total = (stoppos >> ROW_SHIFT) + 1;
1572 if (modestatus & RMR_ROUND) {
1573 dl_first_line = dl_lines_total + 1;
1574 dl_lines_total = ROW_COUNT - 2;
1576 dram_line = g_try_malloc0(chunks_per_read * sizeof(*dram_line));
1579 ret = alloc_submit_buffer(sdi);
1582 ret = setup_submit_limit(devc);
1586 while (dl_lines_total > dl_lines_done) {
1587 /* We can download only up-to 32 DRAM lines in one go! */
1588 dl_lines_curr = MIN(chunks_per_read, dl_lines_total - dl_lines_done);
1590 dl_line = dl_first_line + dl_lines_done;
1591 dl_line %= ROW_COUNT;
1592 ret = sigma_read_dram(devc, dl_line, dl_lines_curr,
1593 (uint8_t *)dram_line);
1597 /* This is the first DRAM line, so find the initial timestamp. */
1598 if (dl_lines_done == 0) {
1599 devc->state.lastts =
1600 sigma_dram_cluster_ts(&dram_line[0].cluster[0]);
1601 devc->state.lastsample = 0;
1604 for (i = 0; i < dl_lines_curr; i++) {
1605 /* The last "DRAM line" need not span its full length. */
1606 dl_events_in_line = EVENTS_PER_ROW;
1607 if (dl_lines_done + i == dl_lines_total - 1)
1608 dl_events_in_line = stoppos & ROW_MASK;
1610 /* Test if the trigger happened on this line. */
1612 if (dl_lines_done + i == trg_line)
1613 trigger_event = trg_event;
1615 decode_chunk_ts(devc, dram_line + i,
1616 dl_events_in_line, trigger_event);
1619 dl_lines_done += dl_lines_curr;
1621 flush_submit_buffer(devc);
1622 free_submit_buffer(devc);
1625 std_session_send_df_end(sdi);
1627 devc->state.state = SIGMA_IDLE;
1628 sr_dev_acquisition_stop(sdi);
1634 * Periodically check the Sigma status when in CAPTURE mode. This routine
1635 * checks whether the configured sample count or sample time have passed,
1636 * and will stop acquisition and download the acquired samples.
1638 static int sigma_capture_mode(struct sr_dev_inst *sdi)
1640 struct dev_context *devc;
1643 if (sr_sw_limits_check(&devc->acq_limits))
1644 return download_capture(sdi);
1649 SR_PRIV int sigma_receive_data(int fd, int revents, void *cb_data)
1651 struct sr_dev_inst *sdi;
1652 struct dev_context *devc;
1660 if (devc->state.state == SIGMA_IDLE)
1664 * When the application has requested to stop the acquisition,
1665 * then immediately start downloading sample data. Otherwise
1666 * keep checking configured limits which will terminate the
1667 * acquisition and initiate download.
1669 if (devc->state.state == SIGMA_STOPPING)
1670 return download_capture(sdi);
1671 if (devc->state.state == SIGMA_CAPTURE)
1672 return sigma_capture_mode(sdi);
1677 /* Build a LUT entry used by the trigger functions. */
1678 static void build_lut_entry(uint16_t value, uint16_t mask, uint16_t *entry)
1682 /* For each quad channel. */
1683 for (i = 0; i < 4; i++) {
1686 /* For each bit in LUT. */
1687 for (j = 0; j < 16; j++) {
1689 /* For each channel in quad. */
1690 for (k = 0; k < 4; k++) {
1691 bit = 1 << (i * 4 + k);
1693 /* Set bit in entry */
1694 if ((mask & bit) && ((!(value & bit)) !=
1696 entry[i] &= ~(1 << j);
1702 /* Add a logical function to LUT mask. */
1703 static void add_trigger_function(enum triggerop oper, enum triggerfunc func,
1704 int index, int neg, uint16_t *mask)
1707 int x[2][2], tmp, a, b, aset, bset, rset;
1709 memset(x, 0, sizeof(x));
1711 /* Trigger detect condition. */
1741 case OP_NOTRISEFALL:
1747 /* Transpose if neg is set. */
1749 for (i = 0; i < 2; i++) {
1750 for (j = 0; j < 2; j++) {
1752 x[i][j] = x[1 - i][1 - j];
1753 x[1 - i][1 - j] = tmp;
1758 /* Update mask with function. */
1759 for (i = 0; i < 16; i++) {
1760 a = (i >> (2 * index + 0)) & 1;
1761 b = (i >> (2 * index + 1)) & 1;
1763 aset = (*mask >> i) & 1;
1767 if (func == FUNC_AND || func == FUNC_NAND)
1769 else if (func == FUNC_OR || func == FUNC_NOR)
1771 else if (func == FUNC_XOR || func == FUNC_NXOR)
1774 if (func == FUNC_NAND || func == FUNC_NOR || func == FUNC_NXOR)
1785 * Build trigger LUTs used by 50 MHz and lower sample rates for supporting
1786 * simple pin change and state triggers. Only two transitions (rise/fall) can be
1787 * set at any time, but a full mask and value can be set (0/1).
1789 SR_PRIV int sigma_build_basic_trigger(struct dev_context *devc,
1790 struct triggerlut *lut)
1795 memset(lut, 0, sizeof(*lut));
1796 memset(&masks, 0, sizeof(masks));
1798 /* Constant for simple triggers. */
1801 /* Value/mask trigger support. */
1802 build_lut_entry(devc->trigger.simplevalue, devc->trigger.simplemask,
1805 /* Rise/fall trigger support. */
1806 for (i = 0, j = 0; i < 16; i++) {
1807 if (devc->trigger.risingmask & (1 << i) ||
1808 devc->trigger.fallingmask & (1 << i))
1809 masks[j++] = 1 << i;
1812 build_lut_entry(masks[0], masks[0], lut->m0d);
1813 build_lut_entry(masks[1], masks[1], lut->m1d);
1815 /* Add glue logic */
1816 if (masks[0] || masks[1]) {
1817 /* Transition trigger. */
1818 if (masks[0] & devc->trigger.risingmask)
1819 add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3);
1820 if (masks[0] & devc->trigger.fallingmask)
1821 add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3);
1822 if (masks[1] & devc->trigger.risingmask)
1823 add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3);
1824 if (masks[1] & devc->trigger.fallingmask)
1825 add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3);
1827 /* Only value/mask trigger. */
1831 /* Triggertype: event. */
1832 lut->params.selres = 3;