X-Git-Url: https://sigrok.org/gitweb/?a=blobdiff_plain;f=src%2Fhardware%2Fasix-sigma%2Fprotocol.c;h=2c00d74dd266612e0d4fcf882f7efc4c81af8164;hb=8a72362505408849fb0d04d7df22a3c54a1aee73;hp=9a45a2d95c4ae8355f99b410edb198ec35d10766;hpb=9b4d261fabf7f9fd70ccd7514ecdadb8e87a7490;p=libsigrok.git diff --git a/src/hardware/asix-sigma/protocol.c b/src/hardware/asix-sigma/protocol.c index 9a45a2d9..2c00d74d 100644 --- a/src/hardware/asix-sigma/protocol.c +++ b/src/hardware/asix-sigma/protocol.c @@ -4,6 +4,7 @@ * Copyright (C) 2010-2012 HÃ¥vard Espeland , * Copyright (C) 2010 Martin StensgÃ¥rd * Copyright (C) 2010 Carl Henrik Lunde + * Copyright (C) 2020 Gerhard Sittig * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -36,7 +37,7 @@ * few discrete values, while setter routines accept any user specified * rate that is supported by the hardware. */ -SR_PRIV const uint64_t samplerates[] = { +static const uint64_t samplerates[] = { /* 50MHz and integer divider. 1/2/5 steps (where possible). */ SR_KHZ(200), SR_KHZ(500), SR_MHZ(1), SR_MHZ(2), SR_MHZ(5), @@ -45,7 +46,10 @@ SR_PRIV const uint64_t samplerates[] = { SR_MHZ(100), SR_MHZ(200), }; -SR_PRIV const size_t samplerates_count = ARRAY_SIZE(samplerates); +SR_PRIV GVariant *sigma_get_samplerates_list(void) +{ + return std_gvar_samplerates(samplerates, ARRAY_SIZE(samplerates)); +} static const char *firmware_files[] = { [SIGMA_FW_50MHZ] = "asix-sigma-50.fw", /* 50MHz, 8bit divider. */ @@ -57,58 +61,223 @@ static const char *firmware_files[] = { #define SIGMA_FIRMWARE_SIZE_LIMIT (256 * 1024) -static int sigma_read(struct dev_context *devc, void *buf, size_t size) +static int sigma_ftdi_open(const struct sr_dev_inst *sdi) { + struct dev_context *devc; + int vid, pid; + const char *serno; int ret; - ret = ftdi_read_data(&devc->ftdic, (unsigned char *)buf, size); + devc = sdi->priv; + if (!devc) + return SR_ERR_ARG; + + if (devc->ftdi.is_open) + return SR_OK; + + vid = devc->id.vid; + pid = devc->id.pid; + serno = sdi->serial_num; + if (!vid || !pid || !serno || !*serno) + return SR_ERR_ARG; + + ret = ftdi_init(&devc->ftdi.ctx); + if (ret < 0) { + sr_err("Cannot initialize FTDI context (%d): %s.", + ret, ftdi_get_error_string(&devc->ftdi.ctx)); + return SR_ERR_IO; + } + ret = ftdi_usb_open_desc_index(&devc->ftdi.ctx, + vid, pid, NULL, serno, 0); if (ret < 0) { - sr_err("ftdi_read_data failed: %s", - ftdi_get_error_string(&devc->ftdic)); + sr_err("Cannot open device (%d): %s.", + ret, ftdi_get_error_string(&devc->ftdi.ctx)); + return SR_ERR_IO; } + devc->ftdi.is_open = TRUE; + + return SR_OK; +} + +static int sigma_ftdi_close(struct dev_context *devc) +{ + int ret; + + ret = ftdi_usb_close(&devc->ftdi.ctx); + devc->ftdi.is_open = FALSE; + devc->ftdi.must_close = FALSE; + ftdi_deinit(&devc->ftdi.ctx); + + return ret == 0 ? SR_OK : SR_ERR_IO; +} + +SR_PRIV int sigma_check_open(const struct sr_dev_inst *sdi) +{ + struct dev_context *devc; + int ret; + + if (!sdi) + return SR_ERR_ARG; + devc = sdi->priv; + if (!devc) + return SR_ERR_ARG; + + if (devc->ftdi.is_open) + return SR_OK; + + ret = sigma_ftdi_open(sdi); + if (ret != SR_OK) + return ret; + devc->ftdi.must_close = TRUE; return ret; } -static int sigma_write(struct dev_context *devc, void *buf, size_t size) +SR_PRIV int sigma_check_close(struct dev_context *devc) { int ret; - ret = ftdi_write_data(&devc->ftdic, (unsigned char *)buf, size); - if (ret < 0) - sr_err("ftdi_write_data failed: %s", - ftdi_get_error_string(&devc->ftdic)); - else if ((size_t) ret != size) - sr_err("ftdi_write_data did not complete write."); + if (!devc) + return SR_ERR_ARG; + + if (devc->ftdi.must_close) { + ret = sigma_ftdi_close(devc); + if (ret != SR_OK) + return ret; + devc->ftdi.must_close = FALSE; + } + + return SR_OK; +} + +SR_PRIV int sigma_force_open(const struct sr_dev_inst *sdi) +{ + struct dev_context *devc; + int ret; + + if (!sdi) + return SR_ERR_ARG; + devc = sdi->priv; + if (!devc) + return SR_ERR_ARG; + + ret = sigma_ftdi_open(sdi); + if (ret != SR_OK) + return ret; + devc->ftdi.must_close = FALSE; + + return SR_OK; +} + +SR_PRIV int sigma_force_close(struct dev_context *devc) +{ + return sigma_ftdi_close(devc); +} + +/* + * BEWARE! Error propagation is important, as are kinds of return values. + * + * - Raw USB tranport communicates the number of sent or received bytes, + * or negative error codes in the external library's(!) range of codes. + * - Internal routines at the "sigrok driver level" communicate success + * or failure in terms of SR_OK et al error codes. + * - Main loop style receive callbacks communicate booleans which arrange + * for repeated calls to drive progress during acquisition. + * + * Careful consideration by maintainers is essential, because all of the + * above kinds of values are assignment compatbile from the compiler's + * point of view. Implementation errors will go unnoticed at build time. + */ + +static int sigma_read_raw(struct dev_context *devc, void *buf, size_t size) +{ + int ret; + + ret = ftdi_read_data(&devc->ftdi.ctx, (unsigned char *)buf, size); + if (ret < 0) { + sr_err("USB data read failed: %s", + ftdi_get_error_string(&devc->ftdi.ctx)); + } return ret; } +static int sigma_write_raw(struct dev_context *devc, const void *buf, size_t size) +{ + int ret; + + ret = ftdi_write_data(&devc->ftdi.ctx, buf, size); + if (ret < 0) { + sr_err("USB data write failed: %s", + ftdi_get_error_string(&devc->ftdi.ctx)); + } else if ((size_t)ret != size) { + sr_err("USB data write length mismatch."); + } + + return ret; +} + +static int sigma_read_sr(struct dev_context *devc, void *buf, size_t size) +{ + int ret; + + ret = sigma_read_raw(devc, buf, size); + if (ret < 0 || (size_t)ret != size) + return SR_ERR_IO; + + return SR_OK; +} + +static int sigma_write_sr(struct dev_context *devc, const void *buf, size_t size) +{ + int ret; + + ret = sigma_write_raw(devc, buf, size); + if (ret < 0 || (size_t)ret != size) + return SR_ERR_IO; + + return SR_OK; +} + +/* + * Implementor's note: The local write buffer's size shall suffice for + * any know FPGA register transaction that is involved in the supported + * feature set of this sigrok device driver. If the length check trips, + * that's a programmer's error and needs adjustment in the complete call + * stack of the respective code path. + */ +#define SIGMA_MAX_REG_DEPTH 32 + /* - * NOTE: We chose the buffer size to be large enough to hold any write to the - * device. We still print a message just in case. + * Implementor's note: The FPGA command set supports register access + * with automatic address adjustment. This operation is documented to + * wrap within a 16-address range, it cannot cross boundaries where the + * register address' nibble overflows. An internal helper assumes that + * callers remain within this auto-adjustment range, and thus multi + * register access requests can never exceed that count. */ +#define SIGMA_MAX_REG_COUNT 16 + SR_PRIV int sigma_write_register(struct dev_context *devc, uint8_t reg, uint8_t *data, size_t len) { - size_t i; - uint8_t buf[80]; - int idx = 0; + uint8_t buf[2 + SIGMA_MAX_REG_DEPTH * 2], *wrptr; + size_t idx; - if ((2 * len + 2) > sizeof(buf)) { - sr_err("Write buffer too small to write %zu bytes.", len); + if (len > SIGMA_MAX_REG_DEPTH) { + sr_err("Short write buffer for %zu bytes to reg %u.", len, reg); return SR_ERR_BUG; } - buf[idx++] = REG_ADDR_LOW | (reg & 0xf); - buf[idx++] = REG_ADDR_HIGH | (reg >> 4); - - for (i = 0; i < len; i++) { - buf[idx++] = REG_DATA_LOW | (data[i] & 0xf); - buf[idx++] = REG_DATA_HIGH_WRITE | (data[i] >> 4); + wrptr = buf; + write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg)); + write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg)); + for (idx = 0; idx < len; idx++) { + write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data[idx])); + write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data[idx])); } - return sigma_write(devc, buf, idx); + return sigma_write_sr(devc, buf, wrptr - buf); } SR_PRIV int sigma_set_register(struct dev_context *devc, @@ -120,159 +289,249 @@ SR_PRIV int sigma_set_register(struct dev_context *devc, static int sigma_read_register(struct dev_context *devc, uint8_t reg, uint8_t *data, size_t len) { - uint8_t buf[3]; + uint8_t buf[3], *wrptr; + int ret; + + wrptr = buf; + write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg)); + write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg)); + write_u8_inc(&wrptr, REG_READ_ADDR); + ret = sigma_write_sr(devc, buf, wrptr - buf); + if (ret != SR_OK) + return ret; + + return sigma_read_sr(devc, data, len); +} + +static int sigma_get_register(struct dev_context *devc, + uint8_t reg, uint8_t *data) +{ + return sigma_read_register(devc, reg, data, sizeof(*data)); +} + +static int sigma_get_registers(struct dev_context *devc, + uint8_t reg, uint8_t *data, size_t count) +{ + uint8_t buf[2 + SIGMA_MAX_REG_COUNT], *wrptr; + size_t idx; + int ret; - buf[0] = REG_ADDR_LOW | (reg & 0xf); - buf[1] = REG_ADDR_HIGH | (reg >> 4); - buf[2] = REG_READ_ADDR; + if (count > SIGMA_MAX_REG_COUNT) { + sr_err("Short command buffer for %zu reg reads at %u.", count, reg); + return SR_ERR_BUG; + } - sigma_write(devc, buf, sizeof(buf)); + wrptr = buf; + write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg)); + write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg)); + for (idx = 0; idx < count; idx++) + write_u8_inc(&wrptr, REG_READ_ADDR | REG_ADDR_INC); + ret = sigma_write_sr(devc, buf, wrptr - buf); + if (ret != SR_OK) + return ret; - return sigma_read(devc, data, len); + return sigma_read_sr(devc, data, count); } static int sigma_read_pos(struct dev_context *devc, - uint32_t *stoppos, uint32_t *triggerpos) + uint32_t *stoppos, uint32_t *triggerpos, uint8_t *mode) { + uint8_t result[7]; + const uint8_t *rdptr; + uint32_t v32; + uint8_t v8; + int ret; + /* - * Read 6 registers starting at trigger position LSB. - * Which yields two 24bit counter values. + * Read 7 registers starting at trigger position LSB. + * Which yields two 24bit counter values, and mode flags. */ - uint8_t buf[] = { - REG_ADDR_LOW | READ_TRIGGER_POS_LOW, - REG_READ_ADDR | REG_ADDR_INC, - REG_READ_ADDR | REG_ADDR_INC, - REG_READ_ADDR | REG_ADDR_INC, - REG_READ_ADDR | REG_ADDR_INC, - REG_READ_ADDR | REG_ADDR_INC, - REG_READ_ADDR | REG_ADDR_INC, - }; - uint8_t result[6]; - - sigma_write(devc, buf, sizeof(buf)); - - sigma_read(devc, result, sizeof(result)); + ret = sigma_get_registers(devc, READ_TRIGGER_POS_LOW, + result, sizeof(result)); + if (ret != SR_OK) + return ret; - *triggerpos = result[0] | (result[1] << 8) | (result[2] << 16); - *stoppos = result[3] | (result[4] << 8) | (result[5] << 16); + rdptr = &result[0]; + v32 = read_u24le_inc(&rdptr); + if (triggerpos) + *triggerpos = v32; + v32 = read_u24le_inc(&rdptr); + if (stoppos) + *stoppos = v32; + v8 = read_u8_inc(&rdptr); + if (mode) + *mode = v8; /* - * These "position" values point to after the event (end of - * capture data, trigger condition matched). This is why they - * get decremented here. Sample memory consists of 512-byte - * chunks with meta data in the upper 64 bytes. Thus when the - * decrements takes us into this upper part of the chunk, then - * further move backwards to the end of the chunk's data part. + * These positions consist of "the memory row" in the MSB fields, + * and "an event index" within the row in the LSB fields. Part + * of the memory row's content is sample data, another part is + * timestamps. * - * TODO Re-consider the above comment's validity. It's true - * that a 1024byte row contains 512 u16 entities, of which 64 - * are timestamps and 448 are events with sample data. It's not - * true that 64bytes of metadata reside at the top of a 512byte - * block in a row. - * - * TODO Use ROW_MASK and CLUSTERS_PER_ROW here? + * The retrieved register values point to after the captured + * position. So they need to get decremented, and adjusted to + * cater for the timestamps when the decrement carries over to + * a different memory row. */ - if ((--*stoppos & 0x1ff) == 0x1ff) - *stoppos -= 64; - if ((--*triggerpos & 0x1ff) == 0x1ff) - *triggerpos -= 64; + if (stoppos && (--*stoppos & ROW_MASK) == ROW_MASK) + *stoppos -= CLUSTERS_PER_ROW; + if (triggerpos && (--*triggerpos & ROW_MASK) == ROW_MASK) + *triggerpos -= CLUSTERS_PER_ROW; - return 1; + return SR_OK; } static int sigma_read_dram(struct dev_context *devc, - uint16_t startchunk, size_t numchunks, uint8_t *data) + size_t startchunk, size_t numchunks, uint8_t *data) { - uint8_t buf[4096]; - int idx; + uint8_t buf[128], *wrptr, regval; size_t chunk; - int sel; + int sel, ret; gboolean is_last; + if (2 + 3 * numchunks > ARRAY_SIZE(buf)) { + sr_err("Short write buffer for %zu DRAM row reads.", numchunks); + return SR_ERR_BUG; + } + /* Communicate DRAM start address (memory row, aka samples line). */ - idx = 0; - buf[idx++] = startchunk >> 8; - buf[idx++] = startchunk & 0xff; - sigma_write_register(devc, WRITE_MEMROW, buf, idx); + wrptr = buf; + write_u16be_inc(&wrptr, startchunk); + ret = sigma_write_register(devc, WRITE_MEMROW, buf, wrptr - buf); + if (ret != SR_OK) + return ret; /* * Access DRAM content. Fetch from DRAM to FPGA's internal RAM, * then transfer via USB. Interleave the FPGA's DRAM access and * USB transfer, use alternating buffers (0/1) in the process. */ - idx = 0; - buf[idx++] = REG_DRAM_BLOCK; - buf[idx++] = REG_DRAM_WAIT_ACK; + wrptr = buf; + write_u8_inc(&wrptr, REG_DRAM_BLOCK); + write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK); for (chunk = 0; chunk < numchunks; chunk++) { sel = chunk % 2; is_last = chunk == numchunks - 1; + if (!is_last) { + regval = REG_DRAM_BLOCK | REG_DRAM_SEL_BOOL(!sel); + write_u8_inc(&wrptr, regval); + } + regval = REG_DRAM_BLOCK_DATA | REG_DRAM_SEL_BOOL(sel); + write_u8_inc(&wrptr, regval); if (!is_last) - buf[idx++] = REG_DRAM_BLOCK | REG_DRAM_SEL_BOOL(!sel); - buf[idx++] = REG_DRAM_BLOCK_DATA | REG_DRAM_SEL_BOOL(sel); - if (!is_last) - buf[idx++] = REG_DRAM_WAIT_ACK; + write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK); } - sigma_write(devc, buf, idx); + ret = sigma_write_sr(devc, buf, wrptr - buf); + if (ret != SR_OK) + return ret; - return sigma_read(devc, data, numchunks * ROW_LENGTH_BYTES); + return sigma_read_sr(devc, data, numchunks * ROW_LENGTH_BYTES); } /* Upload trigger look-up tables to Sigma. */ SR_PRIV int sigma_write_trigger_lut(struct dev_context *devc, struct triggerlut *lut) { - int i; - uint8_t tmp[2]; + size_t lut_addr; uint16_t bit; + uint8_t m3d, m2d, m1d, m0d; + uint8_t buf[6], *wrptr; + uint8_t trgsel2; + uint16_t lutreg, selreg; + int ret; - /* Transpose the table and send to Sigma. */ - for (i = 0; i < 16; i++) { - bit = 1 << i; - - tmp[0] = tmp[1] = 0; - - if (lut->m2d[0] & bit) - tmp[0] |= 0x01; - if (lut->m2d[1] & bit) - tmp[0] |= 0x02; - if (lut->m2d[2] & bit) - tmp[0] |= 0x04; - if (lut->m2d[3] & bit) - tmp[0] |= 0x08; + /* + * Translate the LUT part of the trigger configuration from the + * application's perspective to the hardware register's bitfield + * layout. Send the LUT to the device. This configures the logic + * which combines pin levels or edges. + */ + for (lut_addr = 0; lut_addr < 16; lut_addr++) { + bit = BIT(lut_addr); - if (lut->m3 & bit) - tmp[0] |= 0x10; - if (lut->m3s & bit) - tmp[0] |= 0x20; + /* - M4 M3S M3Q */ + m3d = 0; if (lut->m4 & bit) - tmp[0] |= 0x40; + m3d |= BIT(2); + if (lut->m3s & bit) + m3d |= BIT(1); + if (lut->m3q & bit) + m3d |= BIT(0); - if (lut->m0d[0] & bit) - tmp[1] |= 0x01; - if (lut->m0d[1] & bit) - tmp[1] |= 0x02; - if (lut->m0d[2] & bit) - tmp[1] |= 0x04; - if (lut->m0d[3] & bit) - tmp[1] |= 0x08; + /* M2D3 M2D2 M2D1 M2D0 */ + m2d = 0; + if (lut->m2d[3] & bit) + m2d |= BIT(3); + if (lut->m2d[2] & bit) + m2d |= BIT(2); + if (lut->m2d[1] & bit) + m2d |= BIT(1); + if (lut->m2d[0] & bit) + m2d |= BIT(0); - if (lut->m1d[0] & bit) - tmp[1] |= 0x10; - if (lut->m1d[1] & bit) - tmp[1] |= 0x20; - if (lut->m1d[2] & bit) - tmp[1] |= 0x40; + /* M1D3 M1D2 M1D1 M1D0 */ + m1d = 0; if (lut->m1d[3] & bit) - tmp[1] |= 0x80; + m1d |= BIT(3); + if (lut->m1d[2] & bit) + m1d |= BIT(2); + if (lut->m1d[1] & bit) + m1d |= BIT(1); + if (lut->m1d[0] & bit) + m1d |= BIT(0); - sigma_write_register(devc, WRITE_TRIGGER_SELECT, - tmp, sizeof(tmp)); - sigma_set_register(devc, WRITE_TRIGGER_SELECT2, 0x30 | i); + /* M0D3 M0D2 M0D1 M0D0 */ + m0d = 0; + if (lut->m0d[3] & bit) + m0d |= BIT(3); + if (lut->m0d[2] & bit) + m0d |= BIT(2); + if (lut->m0d[1] & bit) + m0d |= BIT(1); + if (lut->m0d[0] & bit) + m0d |= BIT(0); + + /* + * Send 16bits with M3D/M2D and M1D/M0D bit masks to the + * TriggerSelect register, then strobe the LUT write by + * passing A3-A0 to TriggerSelect2. Hold RESET during LUT + * programming. + */ + wrptr = buf; + lutreg = 0; + lutreg <<= 4; lutreg |= m3d; + lutreg <<= 4; lutreg |= m2d; + lutreg <<= 4; lutreg |= m1d; + lutreg <<= 4; lutreg |= m0d; + write_u16be_inc(&wrptr, lutreg); + ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, + buf, wrptr - buf); + if (ret != SR_OK) + return ret; + trgsel2 = TRGSEL2_RESET | TRGSEL2_LUT_WRITE | + (lut_addr & TRGSEL2_LUT_ADDR_MASK); + ret = sigma_set_register(devc, WRITE_TRIGGER_SELECT2, trgsel2); + if (ret != SR_OK) + return ret; } - /* Send the parameters */ - sigma_write_register(devc, WRITE_TRIGGER_SELECT, - (uint8_t *)&lut->params, sizeof(lut->params)); + /* + * Send the parameters. This covers counters and durations. + */ + wrptr = buf; + selreg = 0; + selreg |= (lut->params.selinc & TRGSEL_SELINC_MASK) << TRGSEL_SELINC_SHIFT; + selreg |= (lut->params.selres & TRGSEL_SELRES_MASK) << TRGSEL_SELRES_SHIFT; + selreg |= (lut->params.sela & TRGSEL_SELA_MASK) << TRGSEL_SELA_SHIFT; + selreg |= (lut->params.selb & TRGSEL_SELB_MASK) << TRGSEL_SELB_SHIFT; + selreg |= (lut->params.selc & TRGSEL_SELC_MASK) << TRGSEL_SELC_SHIFT; + selreg |= (lut->params.selpresc & TRGSEL_SELPRESC_MASK) << TRGSEL_SELPRESC_SHIFT; + write_u16be_inc(&wrptr, selreg); + write_u16be_inc(&wrptr, lut->params.cmpb); + write_u16be_inc(&wrptr, lut->params.cmpa); + ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, buf, wrptr - buf); + if (ret != SR_OK) + return ret; return SR_OK; } @@ -299,14 +558,14 @@ SR_PRIV int sigma_write_trigger_lut(struct dev_context *devc, * mode and sending configuration data. Set D7 and toggle D2, D3, D4 * a few times. */ -#define BB_PIN_CCLK (1 << 0) /* D0, CCLK */ -#define BB_PIN_PROG (1 << 1) /* D1, PROG */ -#define BB_PIN_D2 (1 << 2) /* D2, (part of) SUICIDE */ -#define BB_PIN_D3 (1 << 3) /* D3, (part of) SUICIDE */ -#define BB_PIN_D4 (1 << 4) /* D4, (part of) SUICIDE (unused?) */ -#define BB_PIN_INIT (1 << 5) /* D5, INIT, input pin */ -#define BB_PIN_DIN (1 << 6) /* D6, DIN */ -#define BB_PIN_D7 (1 << 7) /* D7, (part of) SUICIDE */ +#define BB_PIN_CCLK BIT(0) /* D0, CCLK */ +#define BB_PIN_PROG BIT(1) /* D1, PROG */ +#define BB_PIN_D2 BIT(2) /* D2, (part of) SUICIDE */ +#define BB_PIN_D3 BIT(3) /* D3, (part of) SUICIDE */ +#define BB_PIN_D4 BIT(4) /* D4, (part of) SUICIDE (unused?) */ +#define BB_PIN_INIT BIT(5) /* D5, INIT, input pin */ +#define BB_PIN_DIN BIT(6) /* D6, DIN */ +#define BB_PIN_D7 BIT(7) /* D7, (part of) SUICIDE */ #define BB_BITRATE (750 * 1000) #define BB_PINMASK (0xff & ~BB_PIN_INIT) @@ -325,7 +584,7 @@ SR_PRIV int sigma_write_trigger_lut(struct dev_context *devc, */ static int sigma_fpga_init_bitbang_once(struct dev_context *devc) { - uint8_t suicide[] = { + const uint8_t suicide[] = { BB_PIN_D7 | BB_PIN_D2, BB_PIN_D7 | BB_PIN_D2, BB_PIN_D7 | BB_PIN_D3, @@ -335,7 +594,7 @@ static int sigma_fpga_init_bitbang_once(struct dev_context *devc) BB_PIN_D7 | BB_PIN_D3, BB_PIN_D7 | BB_PIN_D2, }; - uint8_t init_array[] = { + const uint8_t init_array[] = { BB_PIN_CCLK, BB_PIN_CCLK | BB_PIN_PROG, BB_PIN_CCLK | BB_PIN_PROG, @@ -347,30 +606,48 @@ static int sigma_fpga_init_bitbang_once(struct dev_context *devc) BB_PIN_CCLK, BB_PIN_CCLK, }; - int retries, ret; + size_t retries; + int ret; uint8_t data; /* Section 2. part 1), do the FPGA suicide. */ - sigma_write(devc, suicide, sizeof(suicide)); - sigma_write(devc, suicide, sizeof(suicide)); - sigma_write(devc, suicide, sizeof(suicide)); - sigma_write(devc, suicide, sizeof(suicide)); + ret = SR_OK; + ret |= sigma_write_sr(devc, suicide, sizeof(suicide)); + ret |= sigma_write_sr(devc, suicide, sizeof(suicide)); + ret |= sigma_write_sr(devc, suicide, sizeof(suicide)); + ret |= sigma_write_sr(devc, suicide, sizeof(suicide)); + if (ret != SR_OK) + return SR_ERR_IO; g_usleep(10 * 1000); /* Section 2. part 2), pulse PROG. */ - sigma_write(devc, init_array, sizeof(init_array)); + ret = sigma_write_sr(devc, init_array, sizeof(init_array)); + if (ret != SR_OK) + return ret; g_usleep(10 * 1000); - ftdi_usb_purge_buffers(&devc->ftdic); + ftdi_usb_purge_buffers(&devc->ftdi.ctx); - /* Wait until the FPGA asserts INIT_B. */ + /* + * Wait until the FPGA asserts INIT_B. Check in a maximum number + * of bursts with a given delay between them. Read as many pin + * capture results as the combination of FTDI chip and FTID lib + * may provide. Cope with absence of pin capture data in a cycle. + * This approach shall result in fast reponse in case of success, + * low cost of execution during wait, reliable error handling in + * the transport layer, and robust response to failure or absence + * of result data (hardware inactivity after stimulus). + */ retries = 10; while (retries--) { - ret = sigma_read(devc, &data, 1); - if (ret < 0) - return ret; - if (data & BB_PIN_INIT) - return SR_OK; - g_usleep(10 * 1000); + do { + ret = sigma_read_raw(devc, &data, sizeof(data)); + if (ret < 0) + return SR_ERR_IO; + if (ret == sizeof(data) && (data & BB_PIN_INIT)) + return SR_OK; + } while (ret == sizeof(data)); + if (retries) + g_usleep(10 * 1000); } return SR_ERR_TIMEOUT; @@ -402,52 +679,71 @@ static int sigma_fpga_init_bitbang(struct dev_context *devc) */ static int sigma_fpga_init_la(struct dev_context *devc) { - /* - * TODO Construct the sequence at runtime? Such that request data - * and response check values will match more apparently? - */ - uint8_t mode_regval = WMR_SDRAMINIT; - uint8_t logic_mode_start[] = { - /* Read ID register. */ - REG_ADDR_LOW | (READ_ID & 0xf), - REG_ADDR_HIGH | (READ_ID >> 4), - REG_READ_ADDR, - - /* Write 0x55 to scratch register, read back. */ - REG_ADDR_LOW | (WRITE_TEST & 0xf), - REG_DATA_LOW | 0x5, - REG_DATA_HIGH_WRITE | 0x5, - REG_READ_ADDR, - - /* Write 0xaa to scratch register, read back. */ - REG_DATA_LOW | 0xa, - REG_DATA_HIGH_WRITE | 0xa, - REG_READ_ADDR, - - /* Initiate SDRAM initialization in mode register. */ - REG_ADDR_LOW | (WRITE_MODE & 0xf), - REG_DATA_LOW | (mode_regval & 0xf), - REG_DATA_HIGH_WRITE | (mode_regval >> 4), - }; + uint8_t buf[20], *wrptr; + uint8_t data_55, data_aa, mode; uint8_t result[3]; + const uint8_t *rdptr; int ret; + wrptr = buf; + + /* Read ID register. */ + write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(READ_ID)); + write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(READ_ID)); + write_u8_inc(&wrptr, REG_READ_ADDR); + + /* Write 0x55 to scratch register, read back. */ + data_55 = 0x55; + write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST)); + write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST)); + write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_55)); + write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_55)); + write_u8_inc(&wrptr, REG_READ_ADDR); + + /* Write 0xaa to scratch register, read back. */ + data_aa = 0xaa; + write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST)); + write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST)); + write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_aa)); + write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_aa)); + write_u8_inc(&wrptr, REG_READ_ADDR); + + /* Initiate SDRAM initialization in mode register. */ + mode = WMR_SDRAMINIT; + write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_MODE)); + write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_MODE)); + write_u8_inc(&wrptr, REG_DATA_LOW | LO4(mode)); + write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(mode)); + /* * Send the command sequence which contains 3 READ requests. * Expect to see the corresponding 3 response bytes. */ - sigma_write(devc, logic_mode_start, sizeof(logic_mode_start)); - ret = sigma_read(devc, result, ARRAY_SIZE(result)); - if (ret != ARRAY_SIZE(result)) - goto err; - if (result[0] != 0xa6 || result[1] != 0x55 || result[2] != 0xaa) - goto err; + ret = sigma_write_sr(devc, buf, wrptr - buf); + if (ret != SR_OK) { + sr_err("Could not request LA start response."); + return ret; + } + ret = sigma_read_sr(devc, result, ARRAY_SIZE(result)); + if (ret != SR_OK) { + sr_err("Could not receive LA start response."); + return SR_ERR_IO; + } + rdptr = result; + if (read_u8_inc(&rdptr) != 0xa6) { + sr_err("Unexpected ID response."); + return SR_ERR_DATA; + } + if (read_u8_inc(&rdptr) != data_55) { + sr_err("Unexpected scratch read-back (55)."); + return SR_ERR_DATA; + } + if (read_u8_inc(&rdptr) != data_aa) { + sr_err("Unexpected scratch read-back (aa)."); + return SR_ERR_DATA; + } return SR_OK; - -err: - sr_err("Configuration failed. Invalid reply received."); - return SR_ERR; } /* @@ -456,7 +752,7 @@ err: * by the caller of this function. */ static int sigma_fw_2_bitbang(struct sr_context *ctx, const char *name, - uint8_t **bb_cmd, gsize *bb_cmd_size) + uint8_t **bb_cmd, size_t *bb_cmd_size) { uint8_t *firmware; size_t file_size; @@ -499,7 +795,7 @@ static int sigma_fw_2_bitbang(struct sr_context *ctx, const char *name, bb_size = file_size * 8 * 2; bb_stream = g_try_malloc(bb_size); if (!bb_stream) { - sr_err("%s: Failed to allocate bitbang stream", __func__); + sr_err("Memory allocation failed during firmware upload."); g_free(firmware); return SR_ERR_MALLOC; } @@ -529,8 +825,8 @@ static int upload_firmware(struct sr_context *ctx, struct dev_context *devc, enum sigma_firmware_idx firmware_idx) { int ret; - unsigned char *buf; - unsigned char pins; + uint8_t *buf; + uint8_t pins; size_t buf_size; const char *firmware; @@ -547,58 +843,65 @@ static int upload_firmware(struct sr_context *ctx, struct dev_context *devc, return SR_OK; } - devc->state.state = SIGMA_CONFIG; + devc->state = SIGMA_CONFIG; /* Set the cable to bitbang mode. */ - ret = ftdi_set_bitmode(&devc->ftdic, BB_PINMASK, BITMODE_BITBANG); + ret = ftdi_set_bitmode(&devc->ftdi.ctx, BB_PINMASK, BITMODE_BITBANG); if (ret < 0) { - sr_err("ftdi_set_bitmode failed: %s", - ftdi_get_error_string(&devc->ftdic)); + sr_err("Could not setup cable mode for upload: %s", + ftdi_get_error_string(&devc->ftdi.ctx)); return SR_ERR; } - ret = ftdi_set_baudrate(&devc->ftdic, BB_BITRATE); + ret = ftdi_set_baudrate(&devc->ftdi.ctx, BB_BITRATE); if (ret < 0) { - sr_err("ftdi_set_baudrate failed: %s", - ftdi_get_error_string(&devc->ftdic)); + sr_err("Could not setup bitrate for upload: %s", + ftdi_get_error_string(&devc->ftdi.ctx)); return SR_ERR; } /* Initiate FPGA configuration mode. */ ret = sigma_fpga_init_bitbang(devc); - if (ret) + if (ret) { + sr_err("Could not initiate firmware upload to hardware"); return ret; + } /* Prepare wire format of the firmware image. */ ret = sigma_fw_2_bitbang(ctx, firmware, &buf, &buf_size); if (ret != SR_OK) { - sr_err("Could not prepare file %s for download.", firmware); + sr_err("Could not prepare file %s for upload.", firmware); return ret; } /* Write the FPGA netlist to the cable. */ sr_info("Uploading firmware file '%s'.", firmware); - sigma_write(devc, buf, buf_size); - + ret = sigma_write_sr(devc, buf, buf_size); g_free(buf); + if (ret != SR_OK) { + sr_err("Could not upload firmware file '%s'.", firmware); + return ret; + } /* Leave bitbang mode and discard pending input data. */ - ret = ftdi_set_bitmode(&devc->ftdic, 0, BITMODE_RESET); + ret = ftdi_set_bitmode(&devc->ftdi.ctx, 0, BITMODE_RESET); if (ret < 0) { - sr_err("ftdi_set_bitmode failed: %s", - ftdi_get_error_string(&devc->ftdic)); + sr_err("Could not setup cable mode after upload: %s", + ftdi_get_error_string(&devc->ftdi.ctx)); return SR_ERR; } - ftdi_usb_purge_buffers(&devc->ftdic); - while (sigma_read(devc, &pins, 1) == 1) + ftdi_usb_purge_buffers(&devc->ftdi.ctx); + while (sigma_read_raw(devc, &pins, sizeof(pins)) > 0) ; /* Initialize the FPGA for logic-analyzer mode. */ ret = sigma_fpga_init_la(devc); - if (ret != SR_OK) + if (ret != SR_OK) { + sr_err("Hardware response after firmware upload failed."); return ret; + } /* Keep track of successful firmware download completion. */ - devc->state.state = SIGMA_IDLE; + devc->state = SIGMA_IDLE; devc->firmware_idx = firmware_idx; sr_info("Firmware uploaded."); @@ -635,10 +938,10 @@ SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc) uint64_t worst_cluster_time_ms; uint64_t count_msecs, acquire_msecs; - sr_sw_limits_init(&devc->acq_limits); + sr_sw_limits_init(&devc->limit.acquire); /* Get sample count limit, convert to msecs. */ - ret = sr_sw_limits_config_get(&devc->cfg_limits, + ret = sr_sw_limits_config_get(&devc->limit.config, SR_CONF_LIMIT_SAMPLES, &data); if (ret != SR_OK) return ret; @@ -646,10 +949,10 @@ SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc) g_variant_unref(data); count_msecs = 0; if (user_count) - count_msecs = 1000 * user_count / devc->samplerate + 1; + count_msecs = 1000 * user_count / devc->clock.samplerate + 1; /* Get time limit, which is in msecs. */ - ret = sr_sw_limits_config_get(&devc->cfg_limits, + ret = sr_sw_limits_config_get(&devc->limit.config, SR_CONF_LIMIT_MSEC, &data); if (ret != SR_OK) return ret; @@ -666,16 +969,16 @@ SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc) return SR_OK; /* Add some slack, and use that timeout for acquisition. */ - worst_cluster_time_ms = 1000 * 65536 / devc->samplerate; + worst_cluster_time_ms = 1000 * 65536 / devc->clock.samplerate; acquire_msecs += 2 * worst_cluster_time_ms; data = g_variant_new_uint64(acquire_msecs); - ret = sr_sw_limits_config_set(&devc->acq_limits, + ret = sr_sw_limits_config_set(&devc->limit.acquire, SR_CONF_LIMIT_MSEC, data); g_variant_unref(data); if (ret != SR_OK) return ret; - sr_sw_limits_acquisition_start(&devc->acq_limits); + sr_sw_limits_acquisition_start(&devc->limit.acquire); return SR_OK; } @@ -717,19 +1020,66 @@ SR_PRIV int sigma_normalize_samplerate(uint64_t want_rate, uint64_t *have_rate) return SR_ERR_ARG; } +/* Gets called at probe time. Can seed software settings from hardware state. */ +SR_PRIV int sigma_fetch_hw_config(const struct sr_dev_inst *sdi) +{ + struct dev_context *devc; + int ret; + uint8_t regaddr, regval; + + devc = sdi->priv; + if (!devc) + return SR_ERR_ARG; + + /* Seed configuration values from defaults. */ + devc->firmware_idx = SIGMA_FW_NONE; + devc->clock.samplerate = samplerates[0]; + + /* TODO + * Ideally the device driver could retrieve recently stored + * details from hardware registers, thus re-use user specified + * configuration values across sigrok sessions. Which could + * avoid repeated expensive though unnecessary firmware uploads, + * improve performance and usability. Unfortunately it appears + * that the registers range which is documented as available for + * application use keeps providing 0xff data content. At least + * with the netlist version which ships with sigrok. The same + * was observed with unused registers in the first page. + */ + return SR_ERR_NA; + + /* This is for research, currently does not work yet. */ + ret = sigma_check_open(sdi); + regaddr = 16; + regaddr = 14; + ret = sigma_set_register(devc, regaddr, 'F'); + ret = sigma_get_register(devc, regaddr, ®val); + sr_warn("%s() reg[%u] val[%u] rc[%d]", __func__, regaddr, regval, ret); + ret = sigma_check_close(devc); + return ret; +} + +/* Gets called after successful (volatile) hardware configuration. */ +SR_PRIV int sigma_store_hw_config(const struct sr_dev_inst *sdi) +{ + /* TODO See above, registers seem to not hold written data. */ + (void)sdi; + return SR_ERR_NA; +} + SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi) { struct dev_context *devc; struct drv_context *drvc; uint64_t samplerate; int ret; - int num_channels; + size_t num_channels; devc = sdi->priv; drvc = sdi->driver->context; /* Accept any caller specified rate which the hardware supports. */ - ret = sigma_normalize_samplerate(devc->samplerate, &samplerate); + ret = sigma_normalize_samplerate(devc->clock.samplerate, &samplerate); if (ret != SR_OK) return ret; @@ -738,7 +1088,7 @@ SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi) * firmware is required and higher rates might limit the set * of available channels. */ - num_channels = devc->num_channels; + num_channels = devc->interp.num_channels; if (samplerate <= SR_MHZ(50)) { ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_50MHZ); num_channels = 16; @@ -756,10 +1106,18 @@ SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi) * which the device will communicate within an "event"). */ if (ret == SR_OK) { - devc->num_channels = num_channels; - devc->samples_per_event = 16 / devc->num_channels; + devc->interp.num_channels = num_channels; + devc->interp.samples_per_event = 16 / devc->interp.num_channels; } + /* + * Store the firmware type and most recently configured samplerate + * in hardware, such that subsequent sessions can start from there. + * This is a "best effort" approach. Failure is non-fatal. + */ + if (ret == SR_OK) + (void)sigma_store_hw_config(sdi); + return ret; } @@ -804,7 +1162,7 @@ static int alloc_submit_buffer(struct sr_dev_inst *sdi) if (!buffer->sample_data) return SR_ERR_MALLOC; buffer->write_pointer = buffer->sample_data; - sr_sw_limits_init(&devc->feed_limits); + sr_sw_limits_init(&devc->limit.submit); buffer->sdi = sdi; memset(&buffer->logic, 0, sizeof(buffer->logic)); @@ -824,9 +1182,9 @@ static int setup_submit_limit(struct dev_context *devc) GVariant *data; uint64_t total; - limits = &devc->feed_limits; + limits = &devc->limit.submit; - ret = sr_sw_limits_config_get(&devc->cfg_limits, + ret = sr_sw_limits_config_get(&devc->limit.config, SR_CONF_LIMIT_SAMPLES, &data); if (ret != SR_OK) return ret; @@ -896,8 +1254,8 @@ static int addto_submit_buffer(struct dev_context *devc, int ret; buffer = devc->buffer; - limits = &devc->feed_limits; - if (sr_sw_limits_check(limits)) + limits = &devc->limit.submit; + if (!devc->use_triggers && sr_sw_limits_check(limits)) count = 0; /* @@ -906,8 +1264,7 @@ static int addto_submit_buffer(struct dev_context *devc, * enforcement of user specified limits is exact. */ while (count--) { - WL16(buffer->write_pointer, sample); - buffer->write_pointer += buffer->unit_size; + write_u16le_inc(&buffer->write_pointer, sample); buffer->curr_samples++; if (buffer->curr_samples == buffer->max_samples) { ret = flush_submit_buffer(devc); @@ -915,13 +1272,230 @@ static int addto_submit_buffer(struct dev_context *devc, return ret; } sr_sw_limits_update_samples_read(limits, 1); - if (sr_sw_limits_check(limits)) + if (!devc->use_triggers && sr_sw_limits_check(limits)) break; } return SR_OK; } +static void sigma_location_break_down(struct sigma_location *loc) +{ + + loc->line = loc->raw / ROW_LENGTH_U16; + loc->line += ROW_COUNT; + loc->line %= ROW_COUNT; + loc->cluster = loc->raw % ROW_LENGTH_U16; + loc->event = loc->cluster % EVENTS_PER_CLUSTER; + loc->cluster = loc->cluster / EVENTS_PER_CLUSTER; +} + +static gboolean sigma_location_is_eq(struct sigma_location *loc1, + struct sigma_location *loc2, gboolean with_event) +{ + + if (!loc1 || !loc2) + return FALSE; + + if (loc1->line != loc2->line) + return FALSE; + if (loc1->cluster != loc2->cluster) + return FALSE; + + if (with_event && loc1->event != loc2->event) + return FALSE; + + return TRUE; +} + +/* Decrement the broken-down location fields (leave 'raw' as is). */ +static void sigma_location_decrement(struct sigma_location *loc, + gboolean with_event) +{ + + if (!loc) + return; + + if (with_event) { + if (loc->event--) + return; + loc->event = EVENTS_PER_CLUSTER - 1; + } + + if (loc->cluster--) + return; + loc->cluster = CLUSTERS_PER_ROW - 1; + + if (loc->line--) + return; + loc->line = ROW_COUNT - 1; +} + +static void sigma_location_increment(struct sigma_location *loc) +{ + + if (!loc) + return; + + if (++loc->event < EVENTS_PER_CLUSTER) + return; + loc->event = 0; + if (++loc->cluster < CLUSTERS_PER_ROW) + return; + loc->cluster = 0; + if (++loc->line < ROW_COUNT) + return; + loc->line = 0; +} + +/* + * Determine the position where to open the period of trigger match + * checks. Setup an "impossible" location when triggers are not used. + * Start from the hardware provided 'trig' position otherwise, and + * go back a few clusters, but don't go before the 'start' position. + */ +static void rewind_trig_arm_pos(struct dev_context *devc, size_t count) +{ + struct sigma_sample_interp *interp; + + if (!devc) + return; + interp = &devc->interp; + + if (!devc->use_triggers) { + interp->trig_arm.raw = ~0; + sigma_location_break_down(&interp->trig_arm); + return; + } + + interp->trig_arm = interp->trig; + while (count--) { + if (sigma_location_is_eq(&interp->trig_arm, &interp->start, TRUE)) + break; + sigma_location_decrement(&interp->trig_arm, TRUE); + } +} + +static int alloc_sample_buffer(struct dev_context *devc, + size_t stop_pos, size_t trig_pos, uint8_t mode) +{ + struct sigma_sample_interp *interp; + gboolean wrapped; + size_t alloc_size; + + interp = &devc->interp; + + /* + * Either fetch sample memory from absolute start of DRAM to the + * current write position. Or from after the current write position + * to before the current write position, if the write pointer has + * wrapped around at the upper DRAM boundary. Assume that the line + * which most recently got written to is of unknown state, ignore + * its content in the "wrapped" case. + */ + wrapped = mode & RMR_ROUND; + interp->start.raw = 0; + interp->stop.raw = stop_pos; + if (wrapped) { + interp->start.raw = stop_pos; + interp->start.raw >>= ROW_SHIFT; + interp->start.raw++; + interp->start.raw <<= ROW_SHIFT; + interp->stop.raw = stop_pos; + interp->stop.raw >>= ROW_SHIFT; + interp->stop.raw--; + interp->stop.raw <<= ROW_SHIFT; + } + interp->trig.raw = trig_pos; + interp->iter.raw = 0; + + /* Break down raw values to line, cluster, event fields. */ + sigma_location_break_down(&interp->start); + sigma_location_break_down(&interp->stop); + sigma_location_break_down(&interp->trig); + sigma_location_break_down(&interp->iter); + + /* + * The hardware provided trigger location "is late" because of + * latency in hardware pipelines. It points to after the trigger + * condition match. Arrange for a software check of sample data + * matches starting just a little before the hardware provided + * location. The "4 clusters" distance is an arbitrary choice. + */ + rewind_trig_arm_pos(devc, 4 * EVENTS_PER_CLUSTER); + memset(&interp->trig_chk, 0, sizeof(interp->trig_chk)); + + /* Determine which DRAM lines to fetch from the device. */ + memset(&interp->fetch, 0, sizeof(interp->fetch)); + interp->fetch.lines_total = interp->stop.line + 1; + interp->fetch.lines_total -= interp->start.line; + interp->fetch.lines_total += ROW_COUNT; + interp->fetch.lines_total %= ROW_COUNT; + interp->fetch.lines_done = 0; + + /* Arrange for chunked download, N lines per USB request. */ + interp->fetch.lines_per_read = 32; + alloc_size = sizeof(devc->interp.fetch.rcvd_lines[0]); + alloc_size *= devc->interp.fetch.lines_per_read; + devc->interp.fetch.rcvd_lines = g_try_malloc0(alloc_size); + if (!devc->interp.fetch.rcvd_lines) + return SR_ERR_MALLOC; + + return SR_OK; +} + +static uint16_t sigma_deinterlace_data_4x4(uint16_t indata, int idx); +static uint16_t sigma_deinterlace_data_2x8(uint16_t indata, int idx); + +static int fetch_sample_buffer(struct dev_context *devc) +{ + struct sigma_sample_interp *interp; + size_t count; + int ret; + const uint8_t *rdptr; + uint16_t ts, data; + + interp = &devc->interp; + + /* First invocation? Seed the iteration position. */ + if (!interp->fetch.lines_done) { + interp->iter = interp->start; + } + + /* Get another set of DRAM lines in one read call. */ + count = interp->fetch.lines_total - interp->fetch.lines_done; + if (count > interp->fetch.lines_per_read) + count = interp->fetch.lines_per_read; + ret = sigma_read_dram(devc, interp->iter.line, count, + (uint8_t *)interp->fetch.rcvd_lines); + if (ret != SR_OK) + return ret; + interp->fetch.lines_rcvd = count; + interp->fetch.curr_line = &interp->fetch.rcvd_lines[0]; + + /* First invocation? Get initial timestamp and sample data. */ + if (!interp->fetch.lines_done) { + rdptr = (void *)interp->fetch.curr_line; + ts = read_u16le_inc(&rdptr); + data = read_u16le_inc(&rdptr); + if (interp->samples_per_event == 4) { + data = sigma_deinterlace_data_4x4(data, 0); + } else if (interp->samples_per_event == 2) { + data = sigma_deinterlace_data_2x8(data, 0); + } + interp->last.ts = ts; + interp->last.sample = data; + } + + return SR_OK; +} + +static void free_sample_buffer(struct dev_context *devc) +{ + g_free(devc->interp.fetch.rcvd_lines); + devc->interp.fetch.rcvd_lines = NULL; +} + /* * In 100 and 200 MHz mode, only a single pin rising/falling can be * set as trigger. In other modes, two rising/falling triggers can be set, @@ -937,13 +1511,21 @@ SR_PRIV int sigma_convert_trigger(const struct sr_dev_inst *sdi) struct sr_trigger_stage *stage; struct sr_trigger_match *match; const GSList *l, *m; - int channelbit, trigger_set; + uint16_t channelbit; + size_t trigger_set; devc = sdi->priv; - memset(&devc->trigger, 0, sizeof(struct sigma_trigger)); - if (!(trigger = sr_session_trigger_get(sdi->session))) + memset(&devc->trigger, 0, sizeof(devc->trigger)); + devc->use_triggers = FALSE; + trigger = sr_session_trigger_get(sdi->session); + if (!trigger) return SR_OK; + if (!ASIX_SIGMA_WITH_TRIGGER) { + sr_warn("Trigger support is not implemented. Ignoring the spec."); + return SR_OK; + } + trigger_set = 0; for (l = trigger->stages; l; l = l->next) { stage = l->data; @@ -952,21 +1534,19 @@ SR_PRIV int sigma_convert_trigger(const struct sr_dev_inst *sdi) /* Ignore disabled channels with a trigger. */ if (!match->channel->enabled) continue; - channelbit = 1 << (match->channel->index); - if (devc->samplerate >= SR_MHZ(100)) { + channelbit = BIT(match->channel->index); + if (devc->clock.samplerate >= SR_MHZ(100)) { /* Fast trigger support. */ if (trigger_set) { - sr_err("Only a single pin trigger is " - "supported in 100 and 200MHz mode."); + sr_err("100/200MHz modes limited to single trigger pin."); return SR_ERR; } - if (match->match == SR_TRIGGER_FALLING) + if (match->match == SR_TRIGGER_FALLING) { devc->trigger.fallingmask |= channelbit; - else if (match->match == SR_TRIGGER_RISING) + } else if (match->match == SR_TRIGGER_RISING) { devc->trigger.risingmask |= channelbit; - else { - sr_err("Only rising/falling trigger is " - "supported in 100 and 200MHz mode."); + } else { + sr_err("100/200MHz modes limited to edge trigger."); return SR_ERR; } @@ -993,82 +1573,83 @@ SR_PRIV int sigma_convert_trigger(const struct sr_dev_inst *sdi) * does not permit ORed triggers. */ if (trigger_set > 1) { - sr_err("Only 1 rising/falling trigger is supported."); + sr_err("Limited to 1 edge trigger."); return SR_ERR; } } } } + /* Keep track whether triggers are involved during acquisition. */ + devc->use_triggers = TRUE; + return SR_OK; } -/* Software trigger to determine exact trigger position. */ -static int get_trigger_offset(uint8_t *samples, uint16_t last_sample, - struct sigma_trigger *t) +static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample) { - int i; - uint16_t sample = 0; - - for (i = 0; i < 8; i++) { - if (i > 0) - last_sample = sample; - sample = samples[2 * i] | (samples[2 * i + 1] << 8); - - /* Simple triggers. */ - if ((sample & t->simplemask) != t->simplevalue) - continue; - - /* Rising edge. */ - if (((last_sample & t->risingmask) != 0) || - ((sample & t->risingmask) != t->risingmask)) - continue; - - /* Falling edge. */ - if ((last_sample & t->fallingmask) != t->fallingmask || - (sample & t->fallingmask) != 0) - continue; + struct sigma_sample_interp *interp; + uint16_t last_sample; + struct sigma_trigger *t; + gboolean simple_match, rising_match, falling_match; + gboolean matched; - break; - } + /* + * This logic is about improving the precision of the hardware + * provided trigger match position. Software checks are only + * required for a short range of samples, and only when a user + * specified trigger condition was involved during acquisition. + */ + if (!devc) + return FALSE; + if (!devc->use_triggers) + return FALSE; + interp = &devc->interp; + if (!interp->trig_chk.armed) + return FALSE; - /* If we did not match, return original trigger pos. */ - return i & 0x7; + /* + * Check if the current sample and its most recent transition + * match the initially provided trigger condition. The data + * must not fail either of the individual checks. Unused + * trigger features remain neutral in the summary expression. + */ + last_sample = interp->last.sample; + t = &devc->trigger; + simple_match = (sample & t->simplemask) == t->simplevalue; + rising_match = ((last_sample & t->risingmask) == 0) && + ((sample & t->risingmask) == t->risingmask); + falling_match = ((last_sample & t->fallingmask) == t->fallingmask) && + ((sample & t->fallingmask) == 0); + matched = simple_match && rising_match && falling_match; + + return matched; } -static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample) +static int send_trigger_marker(struct dev_context *devc) { - /* TODO - * Check whether the combination of this very sample and the - * previous state match the configured trigger condition. This - * improves the resolution of the trigger marker's position. - * The hardware provided position is coarse, and may point to - * a position before the actual match. - * - * See the previous get_trigger_offset() implementation. This - * code needs to get re-used here. - */ - (void)devc; - (void)sample; - (void)get_trigger_offset; + int ret; - return FALSE; + ret = flush_submit_buffer(devc); + if (ret != SR_OK) + return ret; + ret = std_session_send_df_trigger(devc->buffer->sdi); + if (ret != SR_OK) + return ret; + + return SR_OK; } static int check_and_submit_sample(struct dev_context *devc, - uint16_t sample, size_t count, gboolean check_trigger) + uint16_t sample, size_t count) { gboolean triggered; int ret; - triggered = check_trigger && sample_matches_trigger(devc, sample); + triggered = sample_matches_trigger(devc, sample); if (triggered) { - ret = flush_submit_buffer(devc); - if (ret != SR_OK) - return ret; - ret = std_session_send_df_trigger(devc->buffer->sdi); - if (ret != SR_OK) - return ret; + send_trigger_marker(devc); + devc->interp.trig_chk.matched = TRUE; } ret = addto_submit_buffer(devc, sample, count); @@ -1078,12 +1659,52 @@ static int check_and_submit_sample(struct dev_context *devc, return SR_OK; } +static void sigma_location_check(struct dev_context *devc) +{ + struct sigma_sample_interp *interp; + + if (!devc) + return; + interp = &devc->interp; + + /* + * Manage the period of trigger match checks in software. + * Start supervision somewhere before the hardware provided + * location. Stop supervision after an arbitrary amount of + * event slots, or when a match was found. + */ + if (interp->trig_chk.armed) { + interp->trig_chk.evt_remain--; + if (!interp->trig_chk.evt_remain || interp->trig_chk.matched) + interp->trig_chk.armed = FALSE; + } + if (!interp->trig_chk.armed && !interp->trig_chk.matched) { + if (sigma_location_is_eq(&interp->iter, &interp->trig_arm, TRUE)) { + interp->trig_chk.armed = TRUE; + interp->trig_chk.matched = FALSE; + interp->trig_chk.evt_remain = 8 * EVENTS_PER_CLUSTER; + } + } + + /* + * Force a trigger marker when the software check found no match + * yet while the hardware provided position was reached. This + * very probably is a user initiated button press. + */ + if (interp->trig_chk.armed) { + if (sigma_location_is_eq(&interp->iter, &interp->trig, TRUE)) { + (void)send_trigger_marker(devc); + interp->trig_chk.matched = TRUE; + } + } +} + /* * Return the timestamp of "DRAM cluster". */ static uint16_t sigma_dram_cluster_ts(struct sigma_dram_cluster *cluster) { - return (cluster->timestamp_hi << 8) | cluster->timestamp_lo; + return read_u16le((const uint8_t *)&cluster->timestamp); } /* @@ -1091,13 +1712,7 @@ static uint16_t sigma_dram_cluster_ts(struct sigma_dram_cluster *cluster) */ static uint16_t sigma_dram_cluster_data(struct sigma_dram_cluster *cl, int idx) { - uint16_t sample; - - sample = 0; - sample |= cl->samples[idx].sample_lo << 0; - sample |= cl->samples[idx].sample_hi << 8; - sample = (sample >> 8) | (sample << 8); - return sample; + return read_u16le((const uint8_t *)&cl->samples[idx]); } /* @@ -1105,7 +1720,7 @@ static uint16_t sigma_dram_cluster_data(struct sigma_dram_cluster *cl, int idx) * One 16bit item contains two samples of 8bits each. The bits of * multiple samples are interleaved. */ -static uint16_t sigma_deinterlace_100mhz_data(uint16_t indata, int idx) +static uint16_t sigma_deinterlace_data_2x8(uint16_t indata, int idx) { uint16_t outdata; @@ -1127,7 +1742,7 @@ static uint16_t sigma_deinterlace_100mhz_data(uint16_t indata, int idx) * One 16bit item contains four samples of 4bits each. The bits of * multiple samples are interleaved. */ -static uint16_t sigma_deinterlace_200mhz_data(uint16_t indata, int idx) +static uint16_t sigma_deinterlace_data_4x4(uint16_t indata, int idx) { uint16_t outdata; @@ -1144,9 +1759,9 @@ static void sigma_decode_dram_cluster(struct dev_context *devc, struct sigma_dram_cluster *dram_cluster, size_t events_in_cluster, gboolean triggered) { - struct sigma_state *ss; uint16_t tsdiff, ts, sample, item16; - unsigned int i; + size_t count; + size_t evt; if (!devc->use_triggers || !ASIX_SIGMA_WITH_TRIGGER) triggered = FALSE; @@ -1161,16 +1776,14 @@ static void sigma_decode_dram_cluster(struct dev_context *devc, * for simple level and edge triggers. It would not for timed or * counted conditions, which currently are not supported.) */ - ss = &devc->state; ts = sigma_dram_cluster_ts(dram_cluster); - tsdiff = ts - ss->lastts; + tsdiff = ts - devc->interp.last.ts; if (tsdiff > 0) { - size_t count; - sample = ss->lastsample; - count = tsdiff * devc->samples_per_event; - (void)check_and_submit_sample(devc, sample, count, FALSE); + sample = devc->interp.last.sample; + count = tsdiff * devc->interp.samples_per_event; + (void)check_and_submit_sample(devc, sample, count); } - ss->lastts = ts + EVENTS_PER_CLUSTER; + devc->interp.last.ts = ts + EVENTS_PER_CLUSTER; /* * Grab sample data from the current cluster and prepare their @@ -1180,28 +1793,36 @@ static void sigma_decode_dram_cluster(struct dev_context *devc, * buffer depth is neither assumed nor required here. */ sample = 0; - for (i = 0; i < events_in_cluster; i++) { - item16 = sigma_dram_cluster_data(dram_cluster, i); - if (devc->samplerate == SR_MHZ(200)) { - sample = sigma_deinterlace_200mhz_data(item16, 0); - check_and_submit_sample(devc, sample, 1, triggered); - sample = sigma_deinterlace_200mhz_data(item16, 1); - check_and_submit_sample(devc, sample, 1, triggered); - sample = sigma_deinterlace_200mhz_data(item16, 2); - check_and_submit_sample(devc, sample, 1, triggered); - sample = sigma_deinterlace_200mhz_data(item16, 3); - check_and_submit_sample(devc, sample, 1, triggered); - } else if (devc->samplerate == SR_MHZ(100)) { - sample = sigma_deinterlace_100mhz_data(item16, 0); - check_and_submit_sample(devc, sample, 1, triggered); - sample = sigma_deinterlace_100mhz_data(item16, 1); - check_and_submit_sample(devc, sample, 1, triggered); + for (evt = 0; evt < events_in_cluster; evt++) { + item16 = sigma_dram_cluster_data(dram_cluster, evt); + if (devc->interp.samples_per_event == 4) { + sample = sigma_deinterlace_data_4x4(item16, 0); + check_and_submit_sample(devc, sample, 1); + devc->interp.last.sample = sample; + sample = sigma_deinterlace_data_4x4(item16, 1); + check_and_submit_sample(devc, sample, 1); + devc->interp.last.sample = sample; + sample = sigma_deinterlace_data_4x4(item16, 2); + check_and_submit_sample(devc, sample, 1); + devc->interp.last.sample = sample; + sample = sigma_deinterlace_data_4x4(item16, 3); + check_and_submit_sample(devc, sample, 1); + devc->interp.last.sample = sample; + } else if (devc->interp.samples_per_event == 2) { + sample = sigma_deinterlace_data_2x8(item16, 0); + check_and_submit_sample(devc, sample, 1); + devc->interp.last.sample = sample; + sample = sigma_deinterlace_data_2x8(item16, 1); + check_and_submit_sample(devc, sample, 1); + devc->interp.last.sample = sample; } else { sample = item16; - check_and_submit_sample(devc, sample, 1, triggered); + check_and_submit_sample(devc, sample, 1); + devc->interp.last.sample = sample; } + sigma_location_increment(&devc->interp.iter); + sigma_location_check(devc); } - ss->lastsample = sample; } /* @@ -1218,19 +1839,19 @@ static int decode_chunk_ts(struct dev_context *devc, size_t events_in_line, size_t trigger_event) { struct sigma_dram_cluster *dram_cluster; - unsigned int clusters_in_line; - unsigned int events_in_cluster; - unsigned int i; - uint32_t trigger_cluster; + size_t clusters_in_line; + size_t events_in_cluster; + size_t cluster; + size_t trigger_cluster; clusters_in_line = events_in_line; clusters_in_line += EVENTS_PER_CLUSTER - 1; clusters_in_line /= EVENTS_PER_CLUSTER; - trigger_cluster = ~0; /* Check if trigger is in this chunk. */ + trigger_cluster = ~UINT64_C(0); if (trigger_event < EVENTS_PER_ROW) { - if (devc->samplerate <= SR_MHZ(50)) { + if (devc->clock.samplerate <= SR_MHZ(50)) { trigger_event -= MIN(EVENTS_PER_CLUSTER - 1, trigger_event); } @@ -1240,11 +1861,11 @@ static int decode_chunk_ts(struct dev_context *devc, } /* For each full DRAM cluster. */ - for (i = 0; i < clusters_in_line; i++) { - dram_cluster = &dram_line->cluster[i]; + for (cluster = 0; cluster < clusters_in_line; cluster++) { + dram_cluster = &dram_line->cluster[cluster]; /* The last cluster might not be full. */ - if ((i == clusters_in_line - 1) && + if ((cluster == clusters_in_line - 1) && (events_in_line % EVENTS_PER_CLUSTER)) { events_in_cluster = events_in_line % EVENTS_PER_CLUSTER; } else { @@ -1252,7 +1873,7 @@ static int decode_chunk_ts(struct dev_context *devc, } sigma_decode_dram_cluster(devc, dram_cluster, - events_in_cluster, i == trigger_cluster); + events_in_cluster, cluster == trigger_cluster); } return SR_OK; @@ -1260,125 +1881,102 @@ static int decode_chunk_ts(struct dev_context *devc, static int download_capture(struct sr_dev_inst *sdi) { - const uint32_t chunks_per_read = 32; - struct dev_context *devc; - struct sigma_dram_line *dram_line; - int bufsz; + struct sigma_sample_interp *interp; uint32_t stoppos, triggerpos; uint8_t modestatus; - uint32_t i; - uint32_t dl_lines_total, dl_lines_curr, dl_lines_done; - uint32_t dl_first_line, dl_line; - uint32_t dl_events_in_line; - uint32_t trg_line, trg_event; int ret; devc = sdi->priv; - dl_events_in_line = EVENTS_PER_ROW; + interp = &devc->interp; sr_info("Downloading sample data."); - devc->state.state = SIGMA_DOWNLOAD; + devc->state = SIGMA_DOWNLOAD; /* * Ask the hardware to stop data acquisition. Reception of the * FORCESTOP request makes the hardware "disable RLE" (store * clusters to DRAM regardless of whether pin state changes) and * raise the POSTTRIGGERED flag. + * + * Then switch the hardware from DRAM write (data acquisition) + * to DRAM read (sample memory download). */ - sigma_set_register(devc, WRITE_MODE, WMR_FORCESTOP | WMR_SDRAMWRITEEN); + modestatus = WMR_FORCESTOP | WMR_SDRAMWRITEEN; + ret = sigma_set_register(devc, WRITE_MODE, modestatus); + if (ret != SR_OK) + return ret; do { - if (sigma_read_register(devc, READ_MODE, &modestatus, 1) != 1) { - sr_err("failed while waiting for RMR_POSTTRIGGERED bit"); + ret = sigma_get_register(devc, READ_MODE, &modestatus); + if (ret != SR_OK) { + sr_err("Could not poll for post-trigger state."); return FALSE; } } while (!(modestatus & RMR_POSTTRIGGERED)); + ret = sigma_set_register(devc, WRITE_MODE, WMR_SDRAMREADEN); + if (ret != SR_OK) + return ret; - /* Set SDRAM Read Enable. */ - sigma_set_register(devc, WRITE_MODE, WMR_SDRAMREADEN); - - /* Get the current position. */ - sigma_read_pos(devc, &stoppos, &triggerpos); - - /* Check if trigger has fired. */ - if (sigma_read_register(devc, READ_MODE, &modestatus, 1) != 1) { - sr_err("failed to read READ_MODE register"); + /* + * Get the current positions (acquisition write pointer, and + * trigger match location). With disabled triggers, use a value + * for the location that will never match during interpretation. + */ + ret = sigma_read_pos(devc, &stoppos, &triggerpos, &modestatus); + if (ret != SR_OK) { + sr_err("Could not query capture positions/state."); return FALSE; } - trg_line = ~0; - trg_event = ~0; - if (modestatus & RMR_TRIGGERED) { - trg_line = triggerpos >> 9; - trg_event = triggerpos & 0x1ff; - } + if (!devc->use_triggers) + triggerpos = ~0; + if (!(modestatus & RMR_TRIGGERED)) + triggerpos = ~0; /* - * Determine how many "DRAM lines" of 1024 bytes each we need to - * retrieve from the Sigma hardware, so that we have a complete - * set of samples. Note that the last line need not contain 64 - * clusters, it might be partially filled only. - * - * When RMR_ROUND is set, the circular buffer in DRAM has wrapped - * around. Since the status of the very next line is uncertain in - * that case, we skip it and start reading from the next line. + * Determine which area of the sample memory to retrieve, + * allocate a receive buffer, and setup counters/pointers. */ - dl_first_line = 0; - dl_lines_total = (stoppos >> ROW_SHIFT) + 1; - if (modestatus & RMR_ROUND) { - dl_first_line = dl_lines_total + 1; - dl_lines_total = ROW_COUNT - 2; - } - dram_line = g_try_malloc0(chunks_per_read * sizeof(*dram_line)); - if (!dram_line) + ret = alloc_sample_buffer(devc, stoppos, triggerpos, modestatus); + if (ret != SR_OK) return FALSE; + ret = alloc_submit_buffer(sdi); if (ret != SR_OK) return FALSE; ret = setup_submit_limit(devc); if (ret != SR_OK) return FALSE; - dl_lines_done = 0; - while (dl_lines_total > dl_lines_done) { - /* We can download only up-to 32 DRAM lines in one go! */ - dl_lines_curr = MIN(chunks_per_read, dl_lines_total - dl_lines_done); - - dl_line = dl_first_line + dl_lines_done; - dl_line %= ROW_COUNT; - bufsz = sigma_read_dram(devc, dl_line, dl_lines_curr, - (uint8_t *)dram_line); - /* TODO: Check bufsz. For now, just avoid compiler warnings. */ - (void)bufsz; - - /* This is the first DRAM line, so find the initial timestamp. */ - if (dl_lines_done == 0) { - devc->state.lastts = - sigma_dram_cluster_ts(&dram_line[0].cluster[0]); - devc->state.lastsample = 0; - } + while (interp->fetch.lines_done < interp->fetch.lines_total) { + size_t dl_events_in_line, trigger_event; - for (i = 0; i < dl_lines_curr; i++) { - uint32_t trigger_event = ~0; - /* The last "DRAM line" can be only partially full. */ - if (dl_lines_done + i == dl_lines_total - 1) - dl_events_in_line = stoppos & 0x1ff; - - /* Test if the trigger happened on this line. */ - if (dl_lines_done + i == trg_line) - trigger_event = trg_event; + /* Read another chunk of sample memory (several lines). */ + ret = fetch_sample_buffer(devc); + if (ret != SR_OK) + return FALSE; - decode_chunk_ts(devc, dram_line + i, + /* Process lines of sample data. Last line may be short. */ + while (interp->fetch.lines_rcvd--) { + dl_events_in_line = EVENTS_PER_ROW; + if (interp->iter.line == interp->stop.line) { + dl_events_in_line = interp->stop.raw & ROW_MASK; + } + trigger_event = ~UINT64_C(0); + if (interp->iter.line == interp->trig.line) { + trigger_event = interp->trig.raw & ROW_MASK; + } + decode_chunk_ts(devc, interp->fetch.curr_line, dl_events_in_line, trigger_event); + interp->fetch.curr_line++; + interp->fetch.lines_done++; } - - dl_lines_done += dl_lines_curr; } flush_submit_buffer(devc); free_submit_buffer(devc); - g_free(dram_line); + free_sample_buffer(devc); std_session_send_df_end(sdi); - devc->state.state = SIGMA_IDLE; + devc->state = SIGMA_IDLE; sr_dev_acquisition_stop(sdi); return TRUE; @@ -1394,7 +1992,7 @@ static int sigma_capture_mode(struct sr_dev_inst *sdi) struct dev_context *devc; devc = sdi->priv; - if (sr_sw_limits_check(&devc->acq_limits)) + if (sr_sw_limits_check(&devc->limit.acquire)) return download_capture(sdi); return TRUE; @@ -1411,7 +2009,7 @@ SR_PRIV int sigma_receive_data(int fd, int revents, void *cb_data) sdi = cb_data; devc = sdi->priv; - if (devc->state.state == SIGMA_IDLE) + if (devc->state == SIGMA_IDLE) return TRUE; /* @@ -1420,48 +2018,71 @@ SR_PRIV int sigma_receive_data(int fd, int revents, void *cb_data) * keep checking configured limits which will terminate the * acquisition and initiate download. */ - if (devc->state.state == SIGMA_STOPPING) + if (devc->state == SIGMA_STOPPING) return download_capture(sdi); - if (devc->state.state == SIGMA_CAPTURE) + if (devc->state == SIGMA_CAPTURE) return sigma_capture_mode(sdi); return TRUE; } /* Build a LUT entry used by the trigger functions. */ -static void build_lut_entry(uint16_t value, uint16_t mask, uint16_t *entry) +static void build_lut_entry(uint16_t *lut_entry, + uint16_t spec_value, uint16_t spec_mask) { - int i, j, k, bit; - - /* For each quad channel. */ - for (i = 0; i < 4; i++) { - entry[i] = 0xffff; + size_t quad, bitidx, ch; + uint16_t quadmask, bitmask; + gboolean spec_value_low, bit_idx_low; - /* For each bit in LUT. */ - for (j = 0; j < 16; j++) - - /* For each channel in quad. */ - for (k = 0; k < 4; k++) { - bit = 1 << (i * 4 + k); - - /* Set bit in entry */ - if ((mask & bit) && ((!(value & bit)) != - (!(j & (1 << k))))) - entry[i] &= ~(1 << j); + /* + * For each quad-channel-group, for each bit in the LUT (each + * bit pattern of the channel signals, aka LUT address), for + * each channel in the quad, setup the bit in the LUT entry. + * + * Start from all-ones in the LUT (true, always matches), then + * "pessimize the truthness" for specified conditions. + */ + for (quad = 0; quad < 4; quad++) { + lut_entry[quad] = ~0; + for (bitidx = 0; bitidx < 16; bitidx++) { + for (ch = 0; ch < 4; ch++) { + quadmask = BIT(ch); + bitmask = quadmask << (quad * 4); + if (!(spec_mask & bitmask)) + continue; + /* + * This bit is part of the spec. The + * condition which gets checked here + * (got checked in all implementations + * so far) is uncertain. A bit position + * in the current index' number(!) is + * checked? + */ + spec_value_low = !(spec_value & bitmask); + bit_idx_low = !(bitidx & quadmask); + if (spec_value_low == bit_idx_low) + continue; + lut_entry[quad] &= ~BIT(bitidx); } + } } } /* Add a logical function to LUT mask. */ static void add_trigger_function(enum triggerop oper, enum triggerfunc func, - int index, int neg, uint16_t *mask) + size_t index, gboolean neg, uint16_t *mask) { - int i, j; - int x[2][2], tmp, a, b, aset, bset, rset; + int x[2][2], a, b, aset, bset, rset; + size_t bitidx; - memset(x, 0, 4 * sizeof(int)); + /* + * Beware! The x, a, b, aset, bset, rset variables strictly + * require the limited 0..1 range. They are not interpreted + * as logically true, instead bit arith is done on them. + */ - /* Trigger detect condition. */ + /* Construct a pattern which detects the condition. */ + memset(x, 0, sizeof(x)); switch (oper) { case OP_LEVEL: x[0][1] = 1; @@ -1497,8 +2118,11 @@ static void add_trigger_function(enum triggerop oper, enum triggerfunc func, break; } - /* Transpose if neg is set. */ + /* Transpose the pattern if the condition is negated. */ if (neg) { + size_t i, j; + int tmp; + for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { tmp = x[i][j]; @@ -1508,29 +2132,30 @@ static void add_trigger_function(enum triggerop oper, enum triggerfunc func, } } - /* Update mask with function. */ - for (i = 0; i < 16; i++) { - a = (i >> (2 * index + 0)) & 1; - b = (i >> (2 * index + 1)) & 1; + /* Update the LUT mask with the function's condition. */ + for (bitidx = 0; bitidx < 16; bitidx++) { + a = (bitidx & BIT(2 * index + 0)) ? 1 : 0; + b = (bitidx & BIT(2 * index + 1)) ? 1 : 0; - aset = (*mask >> i) & 1; + aset = (*mask & BIT(bitidx)) ? 1 : 0; bset = x[b][a]; - rset = 0; if (func == FUNC_AND || func == FUNC_NAND) rset = aset & bset; else if (func == FUNC_OR || func == FUNC_NOR) rset = aset | bset; else if (func == FUNC_XOR || func == FUNC_NXOR) rset = aset ^ bset; + else + rset = 0; if (func == FUNC_NAND || func == FUNC_NOR || func == FUNC_NXOR) - rset = !rset; - - *mask &= ~(1 << i); + rset = 1 - rset; if (rset) - *mask |= 1 << i; + *mask |= BIT(bitidx); + else + *mask &= ~BIT(bitidx); } } @@ -1542,46 +2167,59 @@ static void add_trigger_function(enum triggerop oper, enum triggerfunc func, SR_PRIV int sigma_build_basic_trigger(struct dev_context *devc, struct triggerlut *lut) { - int i,j; - uint16_t masks[2] = { 0, 0 }; + uint16_t masks[2]; + size_t bitidx, condidx; + uint16_t value, mask; - memset(lut, 0, sizeof(struct triggerlut)); + /* Setup something that "won't match" in the absence of a spec. */ + memset(lut, 0, sizeof(*lut)); + if (!devc->use_triggers) + return SR_OK; - /* Constant for simple triggers. */ + /* Start assuming simple triggers. Edges are handled below. */ lut->m4 = 0xa000; - - /* Value/mask trigger support. */ - build_lut_entry(devc->trigger.simplevalue, devc->trigger.simplemask, - lut->m2d); - - /* Rise/fall trigger support. */ - for (i = 0, j = 0; i < 16; i++) { - if (devc->trigger.risingmask & (1 << i) || - devc->trigger.fallingmask & (1 << i)) - masks[j++] = 1 << i; + lut->m3q = 0xffff; + + /* Process value/mask triggers. */ + value = devc->trigger.simplevalue; + mask = devc->trigger.simplemask; + build_lut_entry(lut->m2d, value, mask); + + /* Scan for and process rise/fall triggers. */ + memset(&masks, 0, sizeof(masks)); + condidx = 0; + for (bitidx = 0; bitidx < 16; bitidx++) { + mask = BIT(bitidx); + value = devc->trigger.risingmask | devc->trigger.fallingmask; + if (!(value & mask)) + continue; + if (condidx == 0) + build_lut_entry(lut->m0d, mask, mask); + if (condidx == 1) + build_lut_entry(lut->m1d, mask, mask); + masks[condidx++] = mask; + if (condidx == ARRAY_SIZE(masks)) + break; } - build_lut_entry(masks[0], masks[0], lut->m0d); - build_lut_entry(masks[1], masks[1], lut->m1d); - - /* Add glue logic */ + /* Add glue logic for rise/fall triggers. */ if (masks[0] || masks[1]) { - /* Transition trigger. */ + lut->m3q = 0; if (masks[0] & devc->trigger.risingmask) - add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3); + add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3q); if (masks[0] & devc->trigger.fallingmask) - add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3); + add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3q); if (masks[1] & devc->trigger.risingmask) - add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3); + add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3q); if (masks[1] & devc->trigger.fallingmask) - add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3); - } else { - /* Only value/mask trigger. */ - lut->m3 = 0xffff; + add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3q); } /* Triggertype: event. */ - lut->params.selres = 3; + lut->params.selres = TRGSEL_SELCODE_NEVER; + lut->params.selinc = TRGSEL_SELCODE_LEVEL; + lut->params.sela = 0; /* Counter >= CMPA && LEVEL */ + lut->params.cmpa = 0; /* Count 0 -> 1 already triggers. */ return SR_OK; }