2 * This file is part of the libsigrok project.
4 * Copyright (C) 2010-2012 Håvard Espeland <gus@ping.uio.no>,
5 * Copyright (C) 2010 Martin Stensgård <mastensg@ping.uio.no>
6 * Copyright (C) 2010 Carl Henrik Lunde <chlunde@ping.uio.no>
7 * Copyright (C) 2020 Gerhard Sittig <gerhard.sittig@gmx.net>
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation, either version 3 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * ASIX SIGMA/SIGMA2 logic analyzer driver
31 * The ASIX SIGMA hardware supports fixed 200MHz and 100MHz sample rates
32 * (by means of separate firmware images). As well as 50MHz divided by
33 * an integer divider in the 1..256 range (by the "typical" firmware).
34 * Which translates to a strict lower boundary of around 195kHz.
36 * This driver "suggests" a subset of the available rates by listing a
37 * few discrete values, while setter routines accept any user specified
38 * rate that is supported by the hardware.
40 static const uint64_t samplerates[] = {
41 /* 50MHz and integer divider. 1/2/5 steps (where possible). */
42 SR_KHZ(200), SR_KHZ(500),
43 SR_MHZ(1), SR_MHZ(2), SR_MHZ(5),
44 SR_MHZ(10), SR_MHZ(25), SR_MHZ(50),
45 /* 100MHz/200MHz, fixed rates in special firmware. */
46 SR_MHZ(100), SR_MHZ(200),
49 SR_PRIV GVariant *sigma_get_samplerates_list(void)
51 return std_gvar_samplerates(samplerates, ARRAY_SIZE(samplerates));
54 static const char *firmware_files[] = {
55 [SIGMA_FW_50MHZ] = "asix-sigma-50.fw", /* 50MHz, 8bit divider. */
56 [SIGMA_FW_100MHZ] = "asix-sigma-100.fw", /* 100MHz, fixed. */
57 [SIGMA_FW_200MHZ] = "asix-sigma-200.fw", /* 200MHz, fixed. */
58 [SIGMA_FW_SYNC] = "asix-sigma-50sync.fw", /* Sync from external pin. */
59 [SIGMA_FW_FREQ] = "asix-sigma-phasor.fw", /* Frequency counter. */
62 #define SIGMA_FIRMWARE_SIZE_LIMIT (256 * 1024)
64 static int sigma_ftdi_open(const struct sr_dev_inst *sdi)
66 struct dev_context *devc;
75 if (devc->ftdi.is_open)
80 serno = sdi->serial_num;
81 if (!vid || !pid || !serno || !*serno)
84 ret = ftdi_init(&devc->ftdi.ctx);
86 sr_err("Cannot initialize FTDI context (%d): %s.",
87 ret, ftdi_get_error_string(&devc->ftdi.ctx));
90 ret = ftdi_usb_open_desc_index(&devc->ftdi.ctx,
91 vid, pid, NULL, serno, 0);
93 sr_err("Cannot open device (%d): %s.",
94 ret, ftdi_get_error_string(&devc->ftdi.ctx));
97 devc->ftdi.is_open = TRUE;
102 static int sigma_ftdi_close(struct dev_context *devc)
106 ret = ftdi_usb_close(&devc->ftdi.ctx);
107 devc->ftdi.is_open = FALSE;
108 devc->ftdi.must_close = FALSE;
109 ftdi_deinit(&devc->ftdi.ctx);
111 return ret == 0 ? SR_OK : SR_ERR_IO;
114 SR_PRIV int sigma_check_open(const struct sr_dev_inst *sdi)
116 struct dev_context *devc;
125 if (devc->ftdi.is_open)
128 ret = sigma_ftdi_open(sdi);
131 devc->ftdi.must_close = TRUE;
136 SR_PRIV int sigma_check_close(struct dev_context *devc)
143 if (devc->ftdi.must_close) {
144 ret = sigma_ftdi_close(devc);
147 devc->ftdi.must_close = FALSE;
153 SR_PRIV int sigma_force_open(const struct sr_dev_inst *sdi)
155 struct dev_context *devc;
164 ret = sigma_ftdi_open(sdi);
167 devc->ftdi.must_close = FALSE;
172 SR_PRIV int sigma_force_close(struct dev_context *devc)
174 return sigma_ftdi_close(devc);
178 * BEWARE! Error propagation is important, as are kinds of return values.
180 * - Raw USB tranport communicates the number of sent or received bytes,
181 * or negative error codes in the external library's(!) range of codes.
182 * - Internal routines at the "sigrok driver level" communicate success
183 * or failure in terms of SR_OK et al error codes.
184 * - Main loop style receive callbacks communicate booleans which arrange
185 * for repeated calls to drive progress during acquisition.
187 * Careful consideration by maintainers is essential, because all of the
188 * above kinds of values are assignment compatbile from the compiler's
189 * point of view. Implementation errors will go unnoticed at build time.
192 static int sigma_read_raw(struct dev_context *devc, void *buf, size_t size)
196 ret = ftdi_read_data(&devc->ftdi.ctx, (unsigned char *)buf, size);
198 sr_err("USB data read failed: %s",
199 ftdi_get_error_string(&devc->ftdi.ctx));
205 static int sigma_write_raw(struct dev_context *devc, const void *buf, size_t size)
209 ret = ftdi_write_data(&devc->ftdi.ctx, buf, size);
211 sr_err("USB data write failed: %s",
212 ftdi_get_error_string(&devc->ftdi.ctx));
213 } else if ((size_t)ret != size) {
214 sr_err("USB data write length mismatch.");
220 static int sigma_read_sr(struct dev_context *devc, void *buf, size_t size)
224 ret = sigma_read_raw(devc, buf, size);
225 if (ret < 0 || (size_t)ret != size)
231 static int sigma_write_sr(struct dev_context *devc, const void *buf, size_t size)
235 ret = sigma_write_raw(devc, buf, size);
236 if (ret < 0 || (size_t)ret != size)
243 * Implementor's note: The local write buffer's size shall suffice for
244 * any know FPGA register transaction that is involved in the supported
245 * feature set of this sigrok device driver. If the length check trips,
246 * that's a programmer's error and needs adjustment in the complete call
247 * stack of the respective code path.
249 #define SIGMA_MAX_REG_DEPTH 32
252 * Implementor's note: The FPGA command set supports register access
253 * with automatic address adjustment. This operation is documented to
254 * wrap within a 16-address range, it cannot cross boundaries where the
255 * register address' nibble overflows. An internal helper assumes that
256 * callers remain within this auto-adjustment range, and thus multi
257 * register access requests can never exceed that count.
259 #define SIGMA_MAX_REG_COUNT 16
261 SR_PRIV int sigma_write_register(struct dev_context *devc,
262 uint8_t reg, uint8_t *data, size_t len)
264 uint8_t buf[2 + SIGMA_MAX_REG_DEPTH * 2], *wrptr;
267 if (len > SIGMA_MAX_REG_DEPTH) {
268 sr_err("Short write buffer for %zu bytes to reg %u.", len, reg);
273 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
274 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
275 for (idx = 0; idx < len; idx++) {
276 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data[idx]));
277 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data[idx]));
280 return sigma_write_sr(devc, buf, wrptr - buf);
283 SR_PRIV int sigma_set_register(struct dev_context *devc,
284 uint8_t reg, uint8_t value)
286 return sigma_write_register(devc, reg, &value, sizeof(value));
289 static int sigma_read_register(struct dev_context *devc,
290 uint8_t reg, uint8_t *data, size_t len)
292 uint8_t buf[3], *wrptr;
296 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
297 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
298 write_u8_inc(&wrptr, REG_READ_ADDR);
299 ret = sigma_write_sr(devc, buf, wrptr - buf);
303 return sigma_read_sr(devc, data, len);
306 static int sigma_get_register(struct dev_context *devc,
307 uint8_t reg, uint8_t *data)
309 return sigma_read_register(devc, reg, data, sizeof(*data));
312 static int sigma_get_registers(struct dev_context *devc,
313 uint8_t reg, uint8_t *data, size_t count)
315 uint8_t buf[2 + SIGMA_MAX_REG_COUNT], *wrptr;
319 if (count > SIGMA_MAX_REG_COUNT) {
320 sr_err("Short command buffer for %zu reg reads at %u.", count, reg);
325 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
326 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
327 for (idx = 0; idx < count; idx++)
328 write_u8_inc(&wrptr, REG_READ_ADDR | REG_ADDR_INC);
329 ret = sigma_write_sr(devc, buf, wrptr - buf);
333 return sigma_read_sr(devc, data, count);
336 static int sigma_read_pos(struct dev_context *devc,
337 uint32_t *stoppos, uint32_t *triggerpos, uint8_t *mode)
340 const uint8_t *rdptr;
346 * Read 7 registers starting at trigger position LSB.
347 * Which yields two 24bit counter values, and mode flags.
349 ret = sigma_get_registers(devc, READ_TRIGGER_POS_LOW,
350 result, sizeof(result));
355 v32 = read_u24le_inc(&rdptr);
358 v32 = read_u24le_inc(&rdptr);
361 v8 = read_u8_inc(&rdptr);
366 * These positions consist of "the memory row" in the MSB fields,
367 * and "an event index" within the row in the LSB fields. Part
368 * of the memory row's content is sample data, another part is
371 * The retrieved register values point to after the captured
372 * position. So they need to get decremented, and adjusted to
373 * cater for the timestamps when the decrement carries over to
374 * a different memory row.
376 if (stoppos && (--*stoppos & ROW_MASK) == ROW_MASK)
377 *stoppos -= CLUSTERS_PER_ROW;
378 if (triggerpos && (--*triggerpos & ROW_MASK) == ROW_MASK)
379 *triggerpos -= CLUSTERS_PER_ROW;
384 static int sigma_read_dram(struct dev_context *devc,
385 size_t startchunk, size_t numchunks, uint8_t *data)
387 uint8_t buf[128], *wrptr, regval;
392 if (2 + 3 * numchunks > ARRAY_SIZE(buf)) {
393 sr_err("Short write buffer for %zu DRAM row reads.", numchunks);
397 /* Communicate DRAM start address (memory row, aka samples line). */
399 write_u16be_inc(&wrptr, startchunk);
400 ret = sigma_write_register(devc, WRITE_MEMROW, buf, wrptr - buf);
405 * Access DRAM content. Fetch from DRAM to FPGA's internal RAM,
406 * then transfer via USB. Interleave the FPGA's DRAM access and
407 * USB transfer, use alternating buffers (0/1) in the process.
410 write_u8_inc(&wrptr, REG_DRAM_BLOCK);
411 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
412 for (chunk = 0; chunk < numchunks; chunk++) {
414 is_last = chunk == numchunks - 1;
416 regval = REG_DRAM_BLOCK | REG_DRAM_SEL_BOOL(!sel);
417 write_u8_inc(&wrptr, regval);
419 regval = REG_DRAM_BLOCK_DATA | REG_DRAM_SEL_BOOL(sel);
420 write_u8_inc(&wrptr, regval);
422 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
424 ret = sigma_write_sr(devc, buf, wrptr - buf);
428 return sigma_read_sr(devc, data, numchunks * ROW_LENGTH_BYTES);
431 /* Upload trigger look-up tables to Sigma. */
432 SR_PRIV int sigma_write_trigger_lut(struct dev_context *devc,
433 struct triggerlut *lut)
437 uint8_t m3d, m2d, m1d, m0d;
438 uint8_t buf[6], *wrptr;
440 uint16_t lutreg, selreg;
444 * Translate the LUT part of the trigger configuration from the
445 * application's perspective to the hardware register's bitfield
446 * layout. Send the LUT to the device. This configures the logic
447 * which combines pin levels or edges.
449 for (lut_addr = 0; lut_addr < 16; lut_addr++) {
461 /* M2D3 M2D2 M2D1 M2D0 */
463 if (lut->m2d[3] & bit)
465 if (lut->m2d[2] & bit)
467 if (lut->m2d[1] & bit)
469 if (lut->m2d[0] & bit)
472 /* M1D3 M1D2 M1D1 M1D0 */
474 if (lut->m1d[3] & bit)
476 if (lut->m1d[2] & bit)
478 if (lut->m1d[1] & bit)
480 if (lut->m1d[0] & bit)
483 /* M0D3 M0D2 M0D1 M0D0 */
485 if (lut->m0d[3] & bit)
487 if (lut->m0d[2] & bit)
489 if (lut->m0d[1] & bit)
491 if (lut->m0d[0] & bit)
495 * Send 16bits with M3D/M2D and M1D/M0D bit masks to the
496 * TriggerSelect register, then strobe the LUT write by
497 * passing A3-A0 to TriggerSelect2. Hold RESET during LUT
502 lutreg <<= 4; lutreg |= m3d;
503 lutreg <<= 4; lutreg |= m2d;
504 lutreg <<= 4; lutreg |= m1d;
505 lutreg <<= 4; lutreg |= m0d;
506 write_u16be_inc(&wrptr, lutreg);
507 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT,
511 trgsel2 = TRGSEL2_RESET | TRGSEL2_LUT_WRITE |
512 (lut_addr & TRGSEL2_LUT_ADDR_MASK);
513 ret = sigma_set_register(devc, WRITE_TRIGGER_SELECT2, trgsel2);
519 * Send the parameters. This covers counters and durations.
523 selreg |= (lut->params.selinc & TRGSEL_SELINC_MASK) << TRGSEL_SELINC_SHIFT;
524 selreg |= (lut->params.selres & TRGSEL_SELRES_MASK) << TRGSEL_SELRES_SHIFT;
525 selreg |= (lut->params.sela & TRGSEL_SELA_MASK) << TRGSEL_SELA_SHIFT;
526 selreg |= (lut->params.selb & TRGSEL_SELB_MASK) << TRGSEL_SELB_SHIFT;
527 selreg |= (lut->params.selc & TRGSEL_SELC_MASK) << TRGSEL_SELC_SHIFT;
528 selreg |= (lut->params.selpresc & TRGSEL_SELPRESC_MASK) << TRGSEL_SELPRESC_SHIFT;
529 write_u16be_inc(&wrptr, selreg);
530 write_u16be_inc(&wrptr, lut->params.cmpb);
531 write_u16be_inc(&wrptr, lut->params.cmpa);
532 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, buf, wrptr - buf);
540 * See Xilinx UG332 for Spartan-3 FPGA configuration. The SIGMA device
541 * uses FTDI bitbang mode for netlist download in slave serial mode.
542 * (LATER: The OMEGA device's cable contains a more capable FTDI chip
543 * and uses MPSSE mode for bitbang. -- Can we also use FT232H in FT245
544 * compatible bitbang mode? For maximum code re-use and reduced libftdi
545 * dependency? See section 3.5.5 of FT232H: D0 clk, D1 data (out), D2
546 * data (in), D3 select, D4-7 GPIOL. See section 3.5.7 for MCU FIFO.)
548 * 750kbps rate (four times the speed of sigmalogan) works well for
549 * netlist download. All pins except INIT_B are output pins during
550 * configuration download.
552 * Some pins are inverted as a byproduct of level shifting circuitry.
553 * That's why high CCLK level (from the cable's point of view) is idle
554 * from the FPGA's perspective.
556 * The vendor's literature discusses a "suicide sequence" which ends
557 * regular FPGA execution and should be sent before entering bitbang
558 * mode and sending configuration data. Set D7 and toggle D2, D3, D4
561 #define BB_PIN_CCLK BIT(0) /* D0, CCLK */
562 #define BB_PIN_PROG BIT(1) /* D1, PROG */
563 #define BB_PIN_D2 BIT(2) /* D2, (part of) SUICIDE */
564 #define BB_PIN_D3 BIT(3) /* D3, (part of) SUICIDE */
565 #define BB_PIN_D4 BIT(4) /* D4, (part of) SUICIDE (unused?) */
566 #define BB_PIN_INIT BIT(5) /* D5, INIT, input pin */
567 #define BB_PIN_DIN BIT(6) /* D6, DIN */
568 #define BB_PIN_D7 BIT(7) /* D7, (part of) SUICIDE */
570 #define BB_BITRATE (750 * 1000)
571 #define BB_PINMASK (0xff & ~BB_PIN_INIT)
574 * Initiate slave serial mode for configuration download. Which is done
575 * by pulsing PROG_B and sensing INIT_B. Make sure CCLK is idle before
576 * initiating the configuration download.
578 * Run a "suicide sequence" first to terminate the regular FPGA operation
579 * before reconfiguration. The FTDI cable is single channel, and shares
580 * pins which are used for data communication in FIFO mode with pins that
581 * are used for FPGA configuration in bitbang mode. Hardware defaults for
582 * unconfigured hardware, and runtime conditions after FPGA configuration
583 * need to cooperate such that re-configuration of the FPGA can start.
585 static int sigma_fpga_init_bitbang_once(struct dev_context *devc)
587 const uint8_t suicide[] = {
588 BB_PIN_D7 | BB_PIN_D2,
589 BB_PIN_D7 | BB_PIN_D2,
590 BB_PIN_D7 | BB_PIN_D3,
591 BB_PIN_D7 | BB_PIN_D2,
592 BB_PIN_D7 | BB_PIN_D3,
593 BB_PIN_D7 | BB_PIN_D2,
594 BB_PIN_D7 | BB_PIN_D3,
595 BB_PIN_D7 | BB_PIN_D2,
597 const uint8_t init_array[] = {
599 BB_PIN_CCLK | BB_PIN_PROG,
600 BB_PIN_CCLK | BB_PIN_PROG,
613 /* Section 2. part 1), do the FPGA suicide. */
615 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
616 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
617 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
618 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
623 /* Section 2. part 2), pulse PROG. */
624 ret = sigma_write_sr(devc, init_array, sizeof(init_array));
628 ftdi_usb_purge_buffers(&devc->ftdi.ctx);
631 * Wait until the FPGA asserts INIT_B. Check in a maximum number
632 * of bursts with a given delay between them. Read as many pin
633 * capture results as the combination of FTDI chip and FTID lib
634 * may provide. Cope with absence of pin capture data in a cycle.
635 * This approach shall result in fast reponse in case of success,
636 * low cost of execution during wait, reliable error handling in
637 * the transport layer, and robust response to failure or absence
638 * of result data (hardware inactivity after stimulus).
643 ret = sigma_read_raw(devc, &data, sizeof(data));
646 if (ret == sizeof(data) && (data & BB_PIN_INIT))
648 } while (ret == sizeof(data));
653 return SR_ERR_TIMEOUT;
657 * This is belt and braces. Re-run the bitbang initiation sequence a few
658 * times should first attempts fail. Failure is rare but can happen (was
659 * observed during driver development).
661 static int sigma_fpga_init_bitbang(struct dev_context *devc)
668 ret = sigma_fpga_init_bitbang_once(devc);
671 if (ret != SR_ERR_TIMEOUT)
678 * Configure the FPGA for logic-analyzer mode.
680 static int sigma_fpga_init_la(struct dev_context *devc)
682 uint8_t buf[20], *wrptr;
683 uint8_t data_55, data_aa, mode;
685 const uint8_t *rdptr;
690 /* Read ID register. */
691 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(READ_ID));
692 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(READ_ID));
693 write_u8_inc(&wrptr, REG_READ_ADDR);
695 /* Write 0x55 to scratch register, read back. */
697 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST));
698 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST));
699 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_55));
700 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_55));
701 write_u8_inc(&wrptr, REG_READ_ADDR);
703 /* Write 0xaa to scratch register, read back. */
705 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST));
706 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST));
707 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_aa));
708 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_aa));
709 write_u8_inc(&wrptr, REG_READ_ADDR);
711 /* Initiate SDRAM initialization in mode register. */
712 mode = WMR_SDRAMINIT;
713 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_MODE));
714 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_MODE));
715 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(mode));
716 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(mode));
719 * Send the command sequence which contains 3 READ requests.
720 * Expect to see the corresponding 3 response bytes.
722 ret = sigma_write_sr(devc, buf, wrptr - buf);
724 sr_err("Could not request LA start response.");
727 ret = sigma_read_sr(devc, result, ARRAY_SIZE(result));
729 sr_err("Could not receive LA start response.");
733 if (read_u8_inc(&rdptr) != 0xa6) {
734 sr_err("Unexpected ID response.");
737 if (read_u8_inc(&rdptr) != data_55) {
738 sr_err("Unexpected scratch read-back (55).");
741 if (read_u8_inc(&rdptr) != data_aa) {
742 sr_err("Unexpected scratch read-back (aa).");
750 * Read the firmware from a file and transform it into a series of bitbang
751 * pulses used to program the FPGA. Note that the *bb_cmd must be free()'d
752 * by the caller of this function.
754 static int sigma_fw_2_bitbang(struct sr_context *ctx, const char *name,
755 uint8_t **bb_cmd, size_t *bb_cmd_size)
763 uint8_t *bb_stream, *bbs, byte, mask, v;
765 /* Retrieve the on-disk firmware file content. */
766 firmware = sr_resource_load(ctx, SR_RESOURCE_FIRMWARE, name,
767 &file_size, SIGMA_FIRMWARE_SIZE_LIMIT);
771 /* Unscramble the file content (XOR with "random" sequence). */
776 imm = (imm + 0xa853753) % 177 + (imm * 0x8034052);
781 * Generate a sequence of bitbang samples. With two samples per
782 * FPGA configuration bit, providing the level for the DIN signal
783 * as well as two edges for CCLK. See Xilinx UG332 for details
784 * ("slave serial" mode).
786 * Note that CCLK is inverted in hardware. That's why the
787 * respective bit is first set and then cleared in the bitbang
788 * sample sets. So that the DIN level will be stable when the
789 * data gets sampled at the rising CCLK edge, and the signals'
790 * setup time constraint will be met.
792 * The caller will put the FPGA into download mode, will send
793 * the bitbang samples, and release the allocated memory.
795 bb_size = file_size * 8 * 2;
796 bb_stream = g_try_malloc(bb_size);
798 sr_err("Memory allocation failed during firmware upload.");
800 return SR_ERR_MALLOC;
809 v = (byte & mask) ? BB_PIN_DIN : 0;
811 *bbs++ = v | BB_PIN_CCLK;
817 /* The transformation completed successfully, return the result. */
819 *bb_cmd_size = bb_size;
824 static int upload_firmware(struct sr_context *ctx, struct dev_context *devc,
825 enum sigma_firmware_idx firmware_idx)
831 const char *firmware;
833 /* Check for valid firmware file selection. */
834 if (firmware_idx >= ARRAY_SIZE(firmware_files))
836 firmware = firmware_files[firmware_idx];
837 if (!firmware || !*firmware)
840 /* Avoid downloading the same firmware multiple times. */
841 if (devc->firmware_idx == firmware_idx) {
842 sr_info("Not uploading firmware file '%s' again.", firmware);
846 devc->state = SIGMA_CONFIG;
848 /* Set the cable to bitbang mode. */
849 ret = ftdi_set_bitmode(&devc->ftdi.ctx, BB_PINMASK, BITMODE_BITBANG);
851 sr_err("Could not setup cable mode for upload: %s",
852 ftdi_get_error_string(&devc->ftdi.ctx));
855 ret = ftdi_set_baudrate(&devc->ftdi.ctx, BB_BITRATE);
857 sr_err("Could not setup bitrate for upload: %s",
858 ftdi_get_error_string(&devc->ftdi.ctx));
862 /* Initiate FPGA configuration mode. */
863 ret = sigma_fpga_init_bitbang(devc);
865 sr_err("Could not initiate firmware upload to hardware");
869 /* Prepare wire format of the firmware image. */
870 ret = sigma_fw_2_bitbang(ctx, firmware, &buf, &buf_size);
872 sr_err("Could not prepare file %s for upload.", firmware);
876 /* Write the FPGA netlist to the cable. */
877 sr_info("Uploading firmware file '%s'.", firmware);
878 ret = sigma_write_sr(devc, buf, buf_size);
881 sr_err("Could not upload firmware file '%s'.", firmware);
885 /* Leave bitbang mode and discard pending input data. */
886 ret = ftdi_set_bitmode(&devc->ftdi.ctx, 0, BITMODE_RESET);
888 sr_err("Could not setup cable mode after upload: %s",
889 ftdi_get_error_string(&devc->ftdi.ctx));
892 ftdi_usb_purge_buffers(&devc->ftdi.ctx);
893 while (sigma_read_raw(devc, &pins, sizeof(pins)) > 0)
896 /* Initialize the FPGA for logic-analyzer mode. */
897 ret = sigma_fpga_init_la(devc);
899 sr_err("Hardware response after firmware upload failed.");
903 /* Keep track of successful firmware download completion. */
904 devc->state = SIGMA_IDLE;
905 devc->firmware_idx = firmware_idx;
906 sr_info("Firmware uploaded.");
912 * The driver supports user specified time or sample count limits. The
913 * device's hardware supports neither, and hardware compression prevents
914 * reliable detection of "fill levels" (currently reached sample counts)
915 * from register values during acquisition. That's why the driver needs
916 * to apply some heuristics:
918 * - The (optional) sample count limit and the (normalized) samplerate
919 * get mapped to an estimated duration for these samples' acquisition.
920 * - The (optional) time limit gets checked as well. The lesser of the
921 * two limits will terminate the data acquisition phase. The exact
922 * sample count limit gets enforced in session feed submission paths.
923 * - Some slack needs to be given to account for hardware pipelines as
924 * well as late storage of last chunks after compression thresholds
925 * are tripped. The resulting data set will span at least the caller
926 * specified period of time, which shall be perfectly acceptable.
928 * With RLE compression active, up to 64K sample periods can pass before
929 * a cluster accumulates. Which translates to 327ms at 200kHz. Add two
930 * times that period for good measure, one is not enough to flush the
931 * hardware pipeline (observation from an earlier experiment).
933 SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc)
937 uint64_t user_count, user_msecs;
938 uint64_t worst_cluster_time_ms;
939 uint64_t count_msecs, acquire_msecs;
941 sr_sw_limits_init(&devc->limit.acquire);
943 /* Get sample count limit, convert to msecs. */
944 ret = sr_sw_limits_config_get(&devc->limit.config,
945 SR_CONF_LIMIT_SAMPLES, &data);
948 user_count = g_variant_get_uint64(data);
949 g_variant_unref(data);
952 count_msecs = 1000 * user_count / devc->clock.samplerate + 1;
954 /* Get time limit, which is in msecs. */
955 ret = sr_sw_limits_config_get(&devc->limit.config,
956 SR_CONF_LIMIT_MSEC, &data);
959 user_msecs = g_variant_get_uint64(data);
960 g_variant_unref(data);
962 /* Get the lesser of them, with both being optional. */
963 acquire_msecs = ~0ull;
964 if (user_count && count_msecs < acquire_msecs)
965 acquire_msecs = count_msecs;
966 if (user_msecs && user_msecs < acquire_msecs)
967 acquire_msecs = user_msecs;
968 if (acquire_msecs == ~0ull)
971 /* Add some slack, and use that timeout for acquisition. */
972 worst_cluster_time_ms = 1000 * 65536 / devc->clock.samplerate;
973 acquire_msecs += 2 * worst_cluster_time_ms;
974 data = g_variant_new_uint64(acquire_msecs);
975 ret = sr_sw_limits_config_set(&devc->limit.acquire,
976 SR_CONF_LIMIT_MSEC, data);
977 g_variant_unref(data);
981 sr_sw_limits_acquisition_start(&devc->limit.acquire);
986 * Check whether a caller specified samplerate matches the device's
987 * hardware constraints (can be used for acquisition). Optionally yield
988 * a value that approximates the original spec.
990 * This routine assumes that input specs are in the 200kHz to 200MHz
991 * range of supported rates, and callers typically want to normalize a
992 * given value to the hardware capabilities. Values in the 50MHz range
993 * get rounded up by default, to avoid a more expensive check for the
994 * closest match, while higher sampling rate is always desirable during
995 * measurement. Input specs which exactly match hardware capabilities
996 * remain unaffected. Because 100/200MHz rates also limit the number of
997 * available channels, they are not suggested by this routine, instead
998 * callers need to pick them consciously.
1000 SR_PRIV int sigma_normalize_samplerate(uint64_t want_rate, uint64_t *have_rate)
1004 /* Accept exact matches for 100/200MHz. */
1005 if (want_rate == SR_MHZ(200) || want_rate == SR_MHZ(100)) {
1007 *have_rate = want_rate;
1011 /* Accept 200kHz to 50MHz range, and map to near value. */
1012 if (want_rate >= SR_KHZ(200) && want_rate <= SR_MHZ(50)) {
1013 div = SR_MHZ(50) / want_rate;
1014 rate = SR_MHZ(50) / div;
1023 /* Gets called at probe time. Can seed software settings from hardware state. */
1024 SR_PRIV int sigma_fetch_hw_config(const struct sr_dev_inst *sdi)
1026 struct dev_context *devc;
1028 uint8_t regaddr, regval;
1034 /* Seed configuration values from defaults. */
1035 devc->firmware_idx = SIGMA_FW_NONE;
1036 devc->clock.samplerate = samplerates[0];
1039 * Ideally the device driver could retrieve recently stored
1040 * details from hardware registers, thus re-use user specified
1041 * configuration values across sigrok sessions. Which could
1042 * avoid repeated expensive though unnecessary firmware uploads,
1043 * improve performance and usability. Unfortunately it appears
1044 * that the registers range which is documented as available for
1045 * application use keeps providing 0xff data content. At least
1046 * with the netlist version which ships with sigrok. The same
1047 * was observed with unused registers in the first page.
1051 /* This is for research, currently does not work yet. */
1052 ret = sigma_check_open(sdi);
1055 ret = sigma_set_register(devc, regaddr, 'F');
1056 ret = sigma_get_register(devc, regaddr, ®val);
1057 sr_warn("%s() reg[%u] val[%u] rc[%d]", __func__, regaddr, regval, ret);
1058 ret = sigma_check_close(devc);
1062 /* Gets called after successful (volatile) hardware configuration. */
1063 SR_PRIV int sigma_store_hw_config(const struct sr_dev_inst *sdi)
1065 /* TODO See above, registers seem to not hold written data. */
1070 SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi)
1072 struct dev_context *devc;
1073 struct drv_context *drvc;
1074 uint64_t samplerate;
1076 size_t num_channels;
1079 drvc = sdi->driver->context;
1081 /* Accept any caller specified rate which the hardware supports. */
1082 ret = sigma_normalize_samplerate(devc->clock.samplerate, &samplerate);
1087 * Depending on the samplerates of 200/100/50- MHz, specific
1088 * firmware is required and higher rates might limit the set
1089 * of available channels.
1091 num_channels = devc->interp.num_channels;
1092 if (samplerate <= SR_MHZ(50)) {
1093 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_50MHZ);
1095 } else if (samplerate == SR_MHZ(100)) {
1096 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_100MHZ);
1098 } else if (samplerate == SR_MHZ(200)) {
1099 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_200MHZ);
1104 * The samplerate affects the number of available logic channels
1105 * as well as a sample memory layout detail (the number of samples
1106 * which the device will communicate within an "event").
1109 devc->interp.num_channels = num_channels;
1110 devc->interp.samples_per_event = 16 / devc->interp.num_channels;
1114 * Store the firmware type and most recently configured samplerate
1115 * in hardware, such that subsequent sessions can start from there.
1116 * This is a "best effort" approach. Failure is non-fatal.
1119 (void)sigma_store_hw_config(sdi);
1125 * Arrange for a session feed submit buffer. A queue where a number of
1126 * samples gets accumulated to reduce the number of send calls. Which
1127 * also enforces an optional sample count limit for data acquisition.
1129 * The buffer holds up to CHUNK_SIZE bytes. The unit size is fixed (the
1130 * driver provides a fixed channel layout regardless of samplerate).
1133 #define CHUNK_SIZE (4 * 1024 * 1024)
1135 struct submit_buffer {
1137 size_t max_samples, curr_samples;
1138 uint8_t *sample_data;
1139 uint8_t *write_pointer;
1140 struct sr_dev_inst *sdi;
1141 struct sr_datafeed_packet packet;
1142 struct sr_datafeed_logic logic;
1145 static int alloc_submit_buffer(struct sr_dev_inst *sdi)
1147 struct dev_context *devc;
1148 struct submit_buffer *buffer;
1153 buffer = g_malloc0(sizeof(*buffer));
1154 devc->buffer = buffer;
1156 buffer->unit_size = sizeof(uint16_t);
1158 size /= buffer->unit_size;
1159 buffer->max_samples = size;
1160 size *= buffer->unit_size;
1161 buffer->sample_data = g_try_malloc0(size);
1162 if (!buffer->sample_data)
1163 return SR_ERR_MALLOC;
1164 buffer->write_pointer = buffer->sample_data;
1165 sr_sw_limits_init(&devc->limit.submit);
1168 memset(&buffer->logic, 0, sizeof(buffer->logic));
1169 buffer->logic.unitsize = buffer->unit_size;
1170 buffer->logic.data = buffer->sample_data;
1171 memset(&buffer->packet, 0, sizeof(buffer->packet));
1172 buffer->packet.type = SR_DF_LOGIC;
1173 buffer->packet.payload = &buffer->logic;
1178 static int setup_submit_limit(struct dev_context *devc)
1180 struct sr_sw_limits *limits;
1185 limits = &devc->limit.submit;
1187 ret = sr_sw_limits_config_get(&devc->limit.config,
1188 SR_CONF_LIMIT_SAMPLES, &data);
1191 total = g_variant_get_uint64(data);
1192 g_variant_unref(data);
1194 sr_sw_limits_init(limits);
1196 data = g_variant_new_uint64(total);
1197 ret = sr_sw_limits_config_set(limits,
1198 SR_CONF_LIMIT_SAMPLES, data);
1199 g_variant_unref(data);
1204 sr_sw_limits_acquisition_start(limits);
1209 static void free_submit_buffer(struct dev_context *devc)
1211 struct submit_buffer *buffer;
1216 buffer = devc->buffer;
1219 devc->buffer = NULL;
1221 g_free(buffer->sample_data);
1225 static int flush_submit_buffer(struct dev_context *devc)
1227 struct submit_buffer *buffer;
1230 buffer = devc->buffer;
1232 /* Is queued sample data available? */
1233 if (!buffer->curr_samples)
1236 /* Submit to the session feed. */
1237 buffer->logic.length = buffer->curr_samples * buffer->unit_size;
1238 ret = sr_session_send(buffer->sdi, &buffer->packet);
1242 /* Rewind queue position. */
1243 buffer->curr_samples = 0;
1244 buffer->write_pointer = buffer->sample_data;
1249 static int addto_submit_buffer(struct dev_context *devc,
1250 uint16_t sample, size_t count)
1252 struct submit_buffer *buffer;
1253 struct sr_sw_limits *limits;
1256 buffer = devc->buffer;
1257 limits = &devc->limit.submit;
1258 if (sr_sw_limits_check(limits))
1262 * Individually accumulate and check each sample, such that
1263 * accumulation between flushes won't exceed local storage, and
1264 * enforcement of user specified limits is exact.
1267 write_u16le_inc(&buffer->write_pointer, sample);
1268 buffer->curr_samples++;
1269 if (buffer->curr_samples == buffer->max_samples) {
1270 ret = flush_submit_buffer(devc);
1274 sr_sw_limits_update_samples_read(limits, 1);
1275 if (sr_sw_limits_check(limits))
1282 static void sigma_location_break_down(struct sigma_location *loc)
1285 loc->line = loc->raw / ROW_LENGTH_U16;
1286 loc->line += ROW_COUNT;
1287 loc->line %= ROW_COUNT;
1288 loc->cluster = loc->raw % ROW_LENGTH_U16;
1289 loc->event = loc->cluster % EVENTS_PER_CLUSTER;
1290 loc->cluster = loc->cluster / EVENTS_PER_CLUSTER;
1293 static gboolean sigma_location_is_eq(struct sigma_location *loc1,
1294 struct sigma_location *loc2, gboolean with_event)
1300 if (loc1->line != loc2->line)
1302 if (loc1->cluster != loc2->cluster)
1305 if (with_event && loc1->event != loc2->event)
1311 /* Decrement the broken-down location fields (leave 'raw' as is). */
1312 static void sigma_location_decrement(struct sigma_location *loc,
1313 gboolean with_event)
1322 loc->event = EVENTS_PER_CLUSTER - 1;
1327 loc->cluster = CLUSTERS_PER_ROW - 1;
1331 loc->line = ROW_COUNT - 1;
1334 static void sigma_location_increment(struct sigma_location *loc)
1340 if (++loc->event < EVENTS_PER_CLUSTER)
1343 if (++loc->cluster < CLUSTERS_PER_ROW)
1346 if (++loc->line < ROW_COUNT)
1352 * Determine the position where to open the period of trigger match
1353 * checks. Setup an "impossible" location when triggers are not used.
1354 * Start from the hardware provided 'trig' position otherwise, and
1355 * go back a few clusters, but don't go before the 'start' position.
1357 static void rewind_trig_arm_pos(struct dev_context *devc, size_t count)
1359 struct sigma_sample_interp *interp;
1363 interp = &devc->interp;
1365 if (!devc->use_triggers) {
1366 interp->trig_arm.raw = ~0;
1367 sigma_location_break_down(&interp->trig_arm);
1371 interp->trig_arm = interp->trig;
1373 if (sigma_location_is_eq(&interp->trig_arm, &interp->start, TRUE))
1375 sigma_location_decrement(&interp->trig_arm, TRUE);
1379 static int alloc_sample_buffer(struct dev_context *devc,
1380 size_t stop_pos, size_t trig_pos, uint8_t mode)
1382 struct sigma_sample_interp *interp;
1386 interp = &devc->interp;
1389 * Either fetch sample memory from absolute start of DRAM to the
1390 * current write position. Or from after the current write position
1391 * to before the current write position, if the write pointer has
1392 * wrapped around at the upper DRAM boundary. Assume that the line
1393 * which most recently got written to is of unknown state, ignore
1394 * its content in the "wrapped" case.
1396 wrapped = mode & RMR_ROUND;
1397 interp->start.raw = 0;
1398 interp->stop.raw = stop_pos;
1400 interp->start.raw = stop_pos;
1401 interp->start.raw >>= ROW_SHIFT;
1402 interp->start.raw++;
1403 interp->start.raw <<= ROW_SHIFT;
1404 interp->stop.raw = stop_pos;
1405 interp->stop.raw >>= ROW_SHIFT;
1407 interp->stop.raw <<= ROW_SHIFT;
1409 interp->trig.raw = trig_pos;
1410 interp->iter.raw = 0;
1412 /* Break down raw values to line, cluster, event fields. */
1413 sigma_location_break_down(&interp->start);
1414 sigma_location_break_down(&interp->stop);
1415 sigma_location_break_down(&interp->trig);
1416 sigma_location_break_down(&interp->iter);
1419 * The hardware provided trigger location "is late" because of
1420 * latency in hardware pipelines. It points to after the trigger
1421 * condition match. Arrange for a software check of sample data
1422 * matches starting just a little before the hardware provided
1423 * location. The "4 clusters" distance is an arbitrary choice.
1425 rewind_trig_arm_pos(devc, 4 * EVENTS_PER_CLUSTER);
1427 /* Determine which DRAM lines to fetch from the device. */
1428 memset(&interp->fetch, 0, sizeof(interp->fetch));
1429 interp->fetch.lines_total = interp->stop.line + 1;
1430 interp->fetch.lines_total -= interp->start.line;
1431 interp->fetch.lines_total += ROW_COUNT;
1432 interp->fetch.lines_total %= ROW_COUNT;
1433 interp->fetch.lines_done = 0;
1435 /* Arrange for chunked download, N lines per USB request. */
1436 interp->fetch.lines_per_read = 32;
1437 alloc_size = sizeof(devc->interp.fetch.rcvd_lines[0]);
1438 alloc_size *= devc->interp.fetch.lines_per_read;
1439 devc->interp.fetch.rcvd_lines = g_try_malloc0(alloc_size);
1440 if (!devc->interp.fetch.rcvd_lines)
1441 return SR_ERR_MALLOC;
1446 static uint16_t sigma_deinterlace_data_4x4(uint16_t indata, int idx);
1447 static uint16_t sigma_deinterlace_data_2x8(uint16_t indata, int idx);
1449 static int fetch_sample_buffer(struct dev_context *devc)
1451 struct sigma_sample_interp *interp;
1454 const uint8_t *rdptr;
1457 interp = &devc->interp;
1459 /* First invocation? Seed the iteration position. */
1460 if (!interp->fetch.lines_done) {
1461 interp->iter = interp->start;
1464 /* Get another set of DRAM lines in one read call. */
1465 count = interp->fetch.lines_total - interp->fetch.lines_done;
1466 if (count > interp->fetch.lines_per_read)
1467 count = interp->fetch.lines_per_read;
1468 ret = sigma_read_dram(devc, interp->iter.line, count,
1469 (uint8_t *)interp->fetch.rcvd_lines);
1472 interp->fetch.lines_rcvd = count;
1473 interp->fetch.curr_line = &interp->fetch.rcvd_lines[0];
1475 /* First invocation? Get initial timestamp and sample data. */
1476 if (!interp->fetch.lines_done) {
1477 rdptr = (void *)interp->fetch.curr_line;
1478 ts = read_u16le_inc(&rdptr);
1479 data = read_u16le_inc(&rdptr);
1480 if (interp->samples_per_event == 4) {
1481 data = sigma_deinterlace_data_4x4(data, 0);
1482 } else if (interp->samples_per_event == 2) {
1483 data = sigma_deinterlace_data_2x8(data, 0);
1485 interp->last.ts = ts;
1486 interp->last.sample = data;
1492 static void free_sample_buffer(struct dev_context *devc)
1494 g_free(devc->interp.fetch.rcvd_lines);
1495 devc->interp.fetch.rcvd_lines = NULL;
1499 * In 100 and 200 MHz mode, only a single pin rising/falling can be
1500 * set as trigger. In other modes, two rising/falling triggers can be set,
1501 * in addition to value/mask trigger for any number of channels.
1503 * The Sigma supports complex triggers using boolean expressions, but this
1504 * has not been implemented yet.
1506 SR_PRIV int sigma_convert_trigger(const struct sr_dev_inst *sdi)
1508 struct dev_context *devc;
1509 struct sr_trigger *trigger;
1510 struct sr_trigger_stage *stage;
1511 struct sr_trigger_match *match;
1512 const GSList *l, *m;
1513 uint16_t channelbit;
1517 memset(&devc->trigger, 0, sizeof(devc->trigger));
1518 devc->use_triggers = FALSE;
1519 trigger = sr_session_trigger_get(sdi->session);
1523 if (!ASIX_SIGMA_WITH_TRIGGER) {
1524 sr_warn("Trigger support is not implemented. Ignoring the spec.");
1529 for (l = trigger->stages; l; l = l->next) {
1531 for (m = stage->matches; m; m = m->next) {
1533 /* Ignore disabled channels with a trigger. */
1534 if (!match->channel->enabled)
1536 channelbit = BIT(match->channel->index);
1537 if (devc->clock.samplerate >= SR_MHZ(100)) {
1538 /* Fast trigger support. */
1540 sr_err("100/200MHz modes limited to single trigger pin.");
1543 if (match->match == SR_TRIGGER_FALLING) {
1544 devc->trigger.fallingmask |= channelbit;
1545 } else if (match->match == SR_TRIGGER_RISING) {
1546 devc->trigger.risingmask |= channelbit;
1548 sr_err("100/200MHz modes limited to edge trigger.");
1554 /* Simple trigger support (event). */
1555 if (match->match == SR_TRIGGER_ONE) {
1556 devc->trigger.simplevalue |= channelbit;
1557 devc->trigger.simplemask |= channelbit;
1558 } else if (match->match == SR_TRIGGER_ZERO) {
1559 devc->trigger.simplevalue &= ~channelbit;
1560 devc->trigger.simplemask |= channelbit;
1561 } else if (match->match == SR_TRIGGER_FALLING) {
1562 devc->trigger.fallingmask |= channelbit;
1564 } else if (match->match == SR_TRIGGER_RISING) {
1565 devc->trigger.risingmask |= channelbit;
1570 * Actually, Sigma supports 2 rising/falling triggers,
1571 * but they are ORed and the current trigger syntax
1572 * does not permit ORed triggers.
1574 if (trigger_set > 1) {
1575 sr_err("Limited to 1 edge trigger.");
1582 /* Keep track whether triggers are involved during acquisition. */
1583 devc->use_triggers = TRUE;
1588 /* Software trigger to determine exact trigger position. */
1589 static int get_trigger_offset(uint8_t *samples, uint16_t last_sample,
1590 struct sigma_trigger *t)
1592 const uint8_t *rdptr;
1598 for (i = 0; i < 8; i++) {
1600 last_sample = sample;
1601 sample = read_u16le_inc(&rdptr);
1603 /* Simple triggers. */
1604 if ((sample & t->simplemask) != t->simplevalue)
1608 if (((last_sample & t->risingmask) != 0) ||
1609 ((sample & t->risingmask) != t->risingmask))
1613 if ((last_sample & t->fallingmask) != t->fallingmask ||
1614 (sample & t->fallingmask) != 0)
1620 /* If we did not match, return original trigger pos. */
1624 static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample)
1627 * Check whether the combination of this very sample and the
1628 * previous state match the configured trigger condition. This
1629 * improves the resolution of the trigger marker's position.
1630 * The hardware provided position is coarse, and may point to
1631 * a position before the actual match.
1633 * See the previous get_trigger_offset() implementation. This
1634 * code needs to get re-used here.
1636 if (!devc->use_triggers)
1640 (void)get_trigger_offset;
1645 static int send_trigger_marker(struct dev_context *devc)
1649 ret = flush_submit_buffer(devc);
1652 ret = std_session_send_df_trigger(devc->buffer->sdi);
1659 static int check_and_submit_sample(struct dev_context *devc,
1660 uint16_t sample, size_t count, gboolean check_trigger)
1666 * Ignore the condition provided by the "inner loop" logic of
1667 * sample memory iteration. Instead use device context status
1668 * for the period with software trigger match checks.
1670 check_trigger = devc->interp.trig_chk.armed;
1672 triggered = check_trigger && sample_matches_trigger(devc, sample);
1674 send_trigger_marker(devc);
1676 ret = addto_submit_buffer(devc, sample, count);
1683 static void sigma_location_check(struct dev_context *devc)
1685 struct sigma_sample_interp *interp;
1689 interp = &devc->interp;
1692 * Manage the period of trigger match checks in software.
1693 * Start supervision somewhere before the hardware provided
1694 * location. Stop supervision after an arbitrary amount of
1695 * event slots, or when a match was found.
1697 if (interp->trig_chk.armed) {
1698 interp->trig_chk.evt_remain--;
1699 if (!interp->trig_chk.evt_remain || interp->trig_chk.matched)
1700 interp->trig_chk.armed = FALSE;
1702 if (!interp->trig_chk.armed && !interp->trig_chk.matched) {
1703 if (sigma_location_is_eq(&interp->iter, &interp->trig_arm, TRUE)) {
1704 interp->trig_chk.armed = TRUE;
1705 interp->trig_chk.matched = FALSE;
1706 interp->trig_chk.evt_remain = 8 * EVENTS_PER_CLUSTER;
1711 * Force a trigger marker when the software check found no match
1712 * yet while the hardware provided position was reached. This
1713 * very probably is a user initiated button press.
1715 if (interp->trig_chk.armed) {
1716 if (sigma_location_is_eq(&interp->iter, &interp->trig, TRUE)) {
1717 (void)send_trigger_marker(devc);
1718 interp->trig_chk.matched = TRUE;
1724 * Return the timestamp of "DRAM cluster".
1726 static uint16_t sigma_dram_cluster_ts(struct sigma_dram_cluster *cluster)
1728 return read_u16le((const uint8_t *)&cluster->timestamp);
1732 * Return one 16bit data entity of a DRAM cluster at the specified index.
1734 static uint16_t sigma_dram_cluster_data(struct sigma_dram_cluster *cl, int idx)
1736 return read_u16le((const uint8_t *)&cl->samples[idx]);
1740 * Deinterlace sample data that was retrieved at 100MHz samplerate.
1741 * One 16bit item contains two samples of 8bits each. The bits of
1742 * multiple samples are interleaved.
1744 static uint16_t sigma_deinterlace_data_2x8(uint16_t indata, int idx)
1750 outdata |= (indata >> (0 * 2 - 0)) & (1 << 0);
1751 outdata |= (indata >> (1 * 2 - 1)) & (1 << 1);
1752 outdata |= (indata >> (2 * 2 - 2)) & (1 << 2);
1753 outdata |= (indata >> (3 * 2 - 3)) & (1 << 3);
1754 outdata |= (indata >> (4 * 2 - 4)) & (1 << 4);
1755 outdata |= (indata >> (5 * 2 - 5)) & (1 << 5);
1756 outdata |= (indata >> (6 * 2 - 6)) & (1 << 6);
1757 outdata |= (indata >> (7 * 2 - 7)) & (1 << 7);
1762 * Deinterlace sample data that was retrieved at 200MHz samplerate.
1763 * One 16bit item contains four samples of 4bits each. The bits of
1764 * multiple samples are interleaved.
1766 static uint16_t sigma_deinterlace_data_4x4(uint16_t indata, int idx)
1772 outdata |= (indata >> (0 * 4 - 0)) & (1 << 0);
1773 outdata |= (indata >> (1 * 4 - 1)) & (1 << 1);
1774 outdata |= (indata >> (2 * 4 - 2)) & (1 << 2);
1775 outdata |= (indata >> (3 * 4 - 3)) & (1 << 3);
1779 static void sigma_decode_dram_cluster(struct dev_context *devc,
1780 struct sigma_dram_cluster *dram_cluster,
1781 size_t events_in_cluster, gboolean triggered)
1783 uint16_t tsdiff, ts, sample, item16;
1787 if (!devc->use_triggers || !ASIX_SIGMA_WITH_TRIGGER)
1791 * If this cluster is not adjacent to the previously received
1792 * cluster, then send the appropriate number of samples with the
1793 * previous values to the sigrok session. This "decodes RLE".
1795 * These samples cannot match the trigger since they just repeat
1796 * the previously submitted data pattern. (This assumption holds
1797 * for simple level and edge triggers. It would not for timed or
1798 * counted conditions, which currently are not supported.)
1800 ts = sigma_dram_cluster_ts(dram_cluster);
1801 tsdiff = ts - devc->interp.last.ts;
1803 sample = devc->interp.last.sample;
1804 count = tsdiff * devc->interp.samples_per_event;
1805 (void)check_and_submit_sample(devc, sample, count, FALSE);
1807 devc->interp.last.ts = ts + EVENTS_PER_CLUSTER;
1810 * Grab sample data from the current cluster and prepare their
1811 * submission to the session feed. Handle samplerate dependent
1812 * memory layout of sample data. Accumulation of data chunks
1813 * before submission is transparent to this code path, specific
1814 * buffer depth is neither assumed nor required here.
1817 for (evt = 0; evt < events_in_cluster; evt++) {
1818 item16 = sigma_dram_cluster_data(dram_cluster, evt);
1819 if (devc->interp.samples_per_event == 4) {
1820 sample = sigma_deinterlace_data_4x4(item16, 0);
1821 check_and_submit_sample(devc, sample, 1, triggered);
1822 devc->interp.last.sample = sample;
1823 sample = sigma_deinterlace_data_4x4(item16, 1);
1824 check_and_submit_sample(devc, sample, 1, triggered);
1825 devc->interp.last.sample = sample;
1826 sample = sigma_deinterlace_data_4x4(item16, 2);
1827 check_and_submit_sample(devc, sample, 1, triggered);
1828 devc->interp.last.sample = sample;
1829 sample = sigma_deinterlace_data_4x4(item16, 3);
1830 check_and_submit_sample(devc, sample, 1, triggered);
1831 devc->interp.last.sample = sample;
1832 } else if (devc->interp.samples_per_event == 2) {
1833 sample = sigma_deinterlace_data_2x8(item16, 0);
1834 check_and_submit_sample(devc, sample, 1, triggered);
1835 devc->interp.last.sample = sample;
1836 sample = sigma_deinterlace_data_2x8(item16, 1);
1837 check_and_submit_sample(devc, sample, 1, triggered);
1838 devc->interp.last.sample = sample;
1841 check_and_submit_sample(devc, sample, 1, triggered);
1842 devc->interp.last.sample = sample;
1844 sigma_location_increment(&devc->interp.iter);
1845 sigma_location_check(devc);
1850 * Decode chunk of 1024 bytes, 64 clusters, 7 events per cluster.
1851 * Each event is 20ns apart, and can contain multiple samples.
1853 * For 200 MHz, events contain 4 samples for each channel, spread 5 ns apart.
1854 * For 100 MHz, events contain 2 samples for each channel, spread 10 ns apart.
1855 * For 50 MHz and below, events contain one sample for each channel,
1856 * spread 20 ns apart.
1858 static int decode_chunk_ts(struct dev_context *devc,
1859 struct sigma_dram_line *dram_line,
1860 size_t events_in_line, size_t trigger_event)
1862 struct sigma_dram_cluster *dram_cluster;
1863 size_t clusters_in_line;
1864 size_t events_in_cluster;
1866 size_t trigger_cluster;
1868 clusters_in_line = events_in_line;
1869 clusters_in_line += EVENTS_PER_CLUSTER - 1;
1870 clusters_in_line /= EVENTS_PER_CLUSTER;
1872 /* Check if trigger is in this chunk. */
1873 trigger_cluster = ~UINT64_C(0);
1874 if (trigger_event < EVENTS_PER_ROW) {
1875 if (devc->clock.samplerate <= SR_MHZ(50)) {
1876 trigger_event -= MIN(EVENTS_PER_CLUSTER - 1,
1880 /* Find in which cluster the trigger occurred. */
1881 trigger_cluster = trigger_event / EVENTS_PER_CLUSTER;
1884 /* For each full DRAM cluster. */
1885 for (cluster = 0; cluster < clusters_in_line; cluster++) {
1886 dram_cluster = &dram_line->cluster[cluster];
1888 /* The last cluster might not be full. */
1889 if ((cluster == clusters_in_line - 1) &&
1890 (events_in_line % EVENTS_PER_CLUSTER)) {
1891 events_in_cluster = events_in_line % EVENTS_PER_CLUSTER;
1893 events_in_cluster = EVENTS_PER_CLUSTER;
1896 sigma_decode_dram_cluster(devc, dram_cluster,
1897 events_in_cluster, cluster == trigger_cluster);
1903 static int download_capture(struct sr_dev_inst *sdi)
1905 struct dev_context *devc;
1906 struct sigma_sample_interp *interp;
1907 uint32_t stoppos, triggerpos;
1912 interp = &devc->interp;
1914 sr_info("Downloading sample data.");
1915 devc->state = SIGMA_DOWNLOAD;
1918 * Ask the hardware to stop data acquisition. Reception of the
1919 * FORCESTOP request makes the hardware "disable RLE" (store
1920 * clusters to DRAM regardless of whether pin state changes) and
1921 * raise the POSTTRIGGERED flag.
1923 * Then switch the hardware from DRAM write (data acquisition)
1924 * to DRAM read (sample memory download).
1926 modestatus = WMR_FORCESTOP | WMR_SDRAMWRITEEN;
1927 ret = sigma_set_register(devc, WRITE_MODE, modestatus);
1931 ret = sigma_get_register(devc, READ_MODE, &modestatus);
1933 sr_err("Could not poll for post-trigger state.");
1936 } while (!(modestatus & RMR_POSTTRIGGERED));
1937 ret = sigma_set_register(devc, WRITE_MODE, WMR_SDRAMREADEN);
1942 * Get the current positions (acquisition write pointer, and
1943 * trigger match location). With disabled triggers, use a value
1944 * for the location that will never match during interpretation.
1946 ret = sigma_read_pos(devc, &stoppos, &triggerpos, &modestatus);
1948 sr_err("Could not query capture positions/state.");
1951 if (!devc->use_triggers)
1953 if (!(modestatus & RMR_TRIGGERED))
1957 * Determine which area of the sample memory to retrieve,
1958 * allocate a receive buffer, and setup counters/pointers.
1960 ret = alloc_sample_buffer(devc, stoppos, triggerpos, modestatus);
1964 ret = alloc_submit_buffer(sdi);
1967 ret = setup_submit_limit(devc);
1970 while (interp->fetch.lines_done < interp->fetch.lines_total) {
1971 size_t dl_events_in_line, trigger_event;
1973 /* Read another chunk of sample memory (several lines). */
1974 ret = fetch_sample_buffer(devc);
1978 /* Process lines of sample data. Last line may be short. */
1979 while (interp->fetch.lines_rcvd--) {
1980 dl_events_in_line = EVENTS_PER_ROW;
1981 if (interp->iter.line == interp->stop.line) {
1982 dl_events_in_line = interp->stop.raw & ROW_MASK;
1984 trigger_event = ~UINT64_C(0);
1985 if (interp->iter.line == interp->trig.line) {
1986 trigger_event = interp->trig.raw & ROW_MASK;
1988 decode_chunk_ts(devc, interp->fetch.curr_line,
1989 dl_events_in_line, trigger_event);
1990 interp->fetch.curr_line++;
1991 interp->fetch.lines_done++;
1994 flush_submit_buffer(devc);
1995 free_submit_buffer(devc);
1996 free_sample_buffer(devc);
1998 std_session_send_df_end(sdi);
2000 devc->state = SIGMA_IDLE;
2001 sr_dev_acquisition_stop(sdi);
2007 * Periodically check the Sigma status when in CAPTURE mode. This routine
2008 * checks whether the configured sample count or sample time have passed,
2009 * and will stop acquisition and download the acquired samples.
2011 static int sigma_capture_mode(struct sr_dev_inst *sdi)
2013 struct dev_context *devc;
2016 if (sr_sw_limits_check(&devc->limit.acquire))
2017 return download_capture(sdi);
2022 SR_PRIV int sigma_receive_data(int fd, int revents, void *cb_data)
2024 struct sr_dev_inst *sdi;
2025 struct dev_context *devc;
2033 if (devc->state == SIGMA_IDLE)
2037 * When the application has requested to stop the acquisition,
2038 * then immediately start downloading sample data. Otherwise
2039 * keep checking configured limits which will terminate the
2040 * acquisition and initiate download.
2042 if (devc->state == SIGMA_STOPPING)
2043 return download_capture(sdi);
2044 if (devc->state == SIGMA_CAPTURE)
2045 return sigma_capture_mode(sdi);
2050 /* Build a LUT entry used by the trigger functions. */
2051 static void build_lut_entry(uint16_t *lut_entry,
2052 uint16_t spec_value, uint16_t spec_mask)
2054 size_t quad, bitidx, ch;
2055 uint16_t quadmask, bitmask;
2056 gboolean spec_value_low, bit_idx_low;
2059 * For each quad-channel-group, for each bit in the LUT (each
2060 * bit pattern of the channel signals, aka LUT address), for
2061 * each channel in the quad, setup the bit in the LUT entry.
2063 * Start from all-ones in the LUT (true, always matches), then
2064 * "pessimize the truthness" for specified conditions.
2066 for (quad = 0; quad < 4; quad++) {
2067 lut_entry[quad] = ~0;
2068 for (bitidx = 0; bitidx < 16; bitidx++) {
2069 for (ch = 0; ch < 4; ch++) {
2071 bitmask = quadmask << (quad * 4);
2072 if (!(spec_mask & bitmask))
2075 * This bit is part of the spec. The
2076 * condition which gets checked here
2077 * (got checked in all implementations
2078 * so far) is uncertain. A bit position
2079 * in the current index' number(!) is
2082 spec_value_low = !(spec_value & bitmask);
2083 bit_idx_low = !(bitidx & quadmask);
2084 if (spec_value_low == bit_idx_low)
2086 lut_entry[quad] &= ~BIT(bitidx);
2092 /* Add a logical function to LUT mask. */
2093 static void add_trigger_function(enum triggerop oper, enum triggerfunc func,
2094 size_t index, gboolean neg, uint16_t *mask)
2096 int x[2][2], a, b, aset, bset, rset;
2100 * Beware! The x, a, b, aset, bset, rset variables strictly
2101 * require the limited 0..1 range. They are not interpreted
2102 * as logically true, instead bit arith is done on them.
2105 /* Construct a pattern which detects the condition. */
2106 memset(x, 0, sizeof(x));
2136 case OP_NOTRISEFALL:
2142 /* Transpose the pattern if the condition is negated. */
2147 for (i = 0; i < 2; i++) {
2148 for (j = 0; j < 2; j++) {
2150 x[i][j] = x[1 - i][1 - j];
2151 x[1 - i][1 - j] = tmp;
2156 /* Update the LUT mask with the function's condition. */
2157 for (bitidx = 0; bitidx < 16; bitidx++) {
2158 a = (bitidx & BIT(2 * index + 0)) ? 1 : 0;
2159 b = (bitidx & BIT(2 * index + 1)) ? 1 : 0;
2161 aset = (*mask & BIT(bitidx)) ? 1 : 0;
2164 if (func == FUNC_AND || func == FUNC_NAND)
2166 else if (func == FUNC_OR || func == FUNC_NOR)
2168 else if (func == FUNC_XOR || func == FUNC_NXOR)
2173 if (func == FUNC_NAND || func == FUNC_NOR || func == FUNC_NXOR)
2177 *mask |= BIT(bitidx);
2179 *mask &= ~BIT(bitidx);
2184 * Build trigger LUTs used by 50 MHz and lower sample rates for supporting
2185 * simple pin change and state triggers. Only two transitions (rise/fall) can be
2186 * set at any time, but a full mask and value can be set (0/1).
2188 SR_PRIV int sigma_build_basic_trigger(struct dev_context *devc,
2189 struct triggerlut *lut)
2192 size_t bitidx, condidx;
2193 uint16_t value, mask;
2195 /* Setup something that "won't match" in the absence of a spec. */
2196 memset(lut, 0, sizeof(*lut));
2197 if (!devc->use_triggers)
2200 /* Start assuming simple triggers. Edges are handled below. */
2204 /* Process value/mask triggers. */
2205 value = devc->trigger.simplevalue;
2206 mask = devc->trigger.simplemask;
2207 build_lut_entry(lut->m2d, value, mask);
2209 /* Scan for and process rise/fall triggers. */
2210 memset(&masks, 0, sizeof(masks));
2212 for (bitidx = 0; bitidx < 16; bitidx++) {
2214 value = devc->trigger.risingmask | devc->trigger.fallingmask;
2215 if (!(value & mask))
2218 build_lut_entry(lut->m0d, mask, mask);
2220 build_lut_entry(lut->m1d, mask, mask);
2221 masks[condidx++] = mask;
2222 if (condidx == ARRAY_SIZE(masks))
2226 /* Add glue logic for rise/fall triggers. */
2227 if (masks[0] || masks[1]) {
2229 if (masks[0] & devc->trigger.risingmask)
2230 add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3q);
2231 if (masks[0] & devc->trigger.fallingmask)
2232 add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3q);
2233 if (masks[1] & devc->trigger.risingmask)
2234 add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3q);
2235 if (masks[1] & devc->trigger.fallingmask)
2236 add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3q);
2239 /* Triggertype: event. */
2240 lut->params.selres = TRGSEL_SELCODE_NEVER;
2241 lut->params.selinc = TRGSEL_SELCODE_LEVEL;
2242 lut->params.sela = 0; /* Counter >= CMPA && LEVEL */
2243 lut->params.cmpa = 0; /* Count 0 -> 1 already triggers. */