* Copyright (C) 2010-2012 Håvard Espeland <gus@ping.uio.no>,
* Copyright (C) 2010 Martin Stensgård <mastensg@ping.uio.no>
* Copyright (C) 2010 Carl Henrik Lunde <chlunde@ping.uio.no>
+ * Copyright (C) 2020 Gerhard Sittig <gerhard.sittig@gmx.net>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include "protocol.h"
/*
- * The ASIX Sigma supports arbitrary integer frequency divider in
- * the 50MHz mode. The divider is in range 1...256 , allowing for
- * very precise sampling rate selection. This driver supports only
- * a subset of the sampling rates.
+ * The ASIX SIGMA hardware supports fixed 200MHz and 100MHz sample rates
+ * (by means of separate firmware images). As well as 50MHz divided by
+ * an integer divider in the 1..256 range (by the "typical" firmware).
+ * Which translates to a strict lower boundary of around 195kHz.
+ *
+ * This driver "suggests" a subset of the available rates by listing a
+ * few discrete values, while setter routines accept any user specified
+ * rate that is supported by the hardware.
*/
-SR_PRIV const uint64_t samplerates[] = {
- SR_KHZ(200), /* div=250 */
- SR_KHZ(250), /* div=200 */
- SR_KHZ(500), /* div=100 */
- SR_MHZ(1), /* div=50 */
- SR_MHZ(5), /* div=10 */
- SR_MHZ(10), /* div=5 */
- SR_MHZ(25), /* div=2 */
- SR_MHZ(50), /* div=1 */
- SR_MHZ(100), /* Special FW needed */
- SR_MHZ(200), /* Special FW needed */
+static const uint64_t samplerates[] = {
+ /* 50MHz and integer divider. 1/2/5 steps (where possible). */
+ SR_KHZ(200), SR_KHZ(500),
+ SR_MHZ(1), SR_MHZ(2), SR_MHZ(5),
+ SR_MHZ(10), SR_MHZ(25), SR_MHZ(50),
+ /* 100MHz/200MHz, fixed rates in special firmware. */
+ SR_MHZ(100), SR_MHZ(200),
};
-SR_PRIV const size_t samplerates_count = ARRAY_SIZE(samplerates);
-
-static const char firmware_files[][24] = {
- /* 50 MHz, supports 8 bit fractions */
- "asix-sigma-50.fw",
- /* 100 MHz */
- "asix-sigma-100.fw",
- /* 200 MHz */
- "asix-sigma-200.fw",
- /* Synchronous clock from pin */
- "asix-sigma-50sync.fw",
- /* Frequency counter */
- "asix-sigma-phasor.fw",
+SR_PRIV GVariant *sigma_get_samplerates_list(void)
+{
+ return std_gvar_samplerates(samplerates, ARRAY_SIZE(samplerates));
+}
+
+static const char *firmware_files[] = {
+ [SIGMA_FW_50MHZ] = "asix-sigma-50.fw", /* 50MHz, 8bit divider. */
+ [SIGMA_FW_100MHZ] = "asix-sigma-100.fw", /* 100MHz, fixed. */
+ [SIGMA_FW_200MHZ] = "asix-sigma-200.fw", /* 200MHz, fixed. */
+ [SIGMA_FW_SYNC] = "asix-sigma-50sync.fw", /* Sync from external pin. */
+ [SIGMA_FW_FREQ] = "asix-sigma-phasor.fw", /* Frequency counter. */
};
-static int sigma_read(void *buf, size_t size, struct dev_context *devc)
+#define SIGMA_FIRMWARE_SIZE_LIMIT (256 * 1024)
+
+static int sigma_ftdi_open(const struct sr_dev_inst *sdi)
{
+ struct dev_context *devc;
+ int vid, pid;
+ const char *serno;
int ret;
- ret = ftdi_read_data(&devc->ftdic, (unsigned char *)buf, size);
+ devc = sdi->priv;
+ if (!devc)
+ return SR_ERR_ARG;
+
+ if (devc->ftdi.is_open)
+ return SR_OK;
+
+ vid = devc->id.vid;
+ pid = devc->id.pid;
+ serno = sdi->serial_num;
+ if (!vid || !pid || !serno || !*serno)
+ return SR_ERR_ARG;
+
+ ret = ftdi_init(&devc->ftdi.ctx);
if (ret < 0) {
- sr_err("ftdi_read_data failed: %s",
- ftdi_get_error_string(&devc->ftdic));
+ sr_err("Cannot initialize FTDI context (%d): %s.",
+ ret, ftdi_get_error_string(&devc->ftdi.ctx));
+ return SR_ERR_IO;
}
+ ret = ftdi_usb_open_desc_index(&devc->ftdi.ctx,
+ vid, pid, NULL, serno, 0);
+ if (ret < 0) {
+ sr_err("Cannot open device (%d): %s.",
+ ret, ftdi_get_error_string(&devc->ftdi.ctx));
+ return SR_ERR_IO;
+ }
+ devc->ftdi.is_open = TRUE;
- return ret;
+ return SR_OK;
}
-static int sigma_write(void *buf, size_t size, struct dev_context *devc)
+static int sigma_ftdi_close(struct dev_context *devc)
{
int ret;
- ret = ftdi_write_data(&devc->ftdic, (unsigned char *)buf, size);
- if (ret < 0)
- sr_err("ftdi_write_data failed: %s",
- ftdi_get_error_string(&devc->ftdic));
- else if ((size_t) ret != size)
- sr_err("ftdi_write_data did not complete write.");
+ ret = ftdi_usb_close(&devc->ftdi.ctx);
+ devc->ftdi.is_open = FALSE;
+ devc->ftdi.must_close = FALSE;
+ ftdi_deinit(&devc->ftdi.ctx);
+
+ return ret == 0 ? SR_OK : SR_ERR_IO;
+}
+
+SR_PRIV int sigma_check_open(const struct sr_dev_inst *sdi)
+{
+ struct dev_context *devc;
+ int ret;
+
+ if (!sdi)
+ return SR_ERR_ARG;
+ devc = sdi->priv;
+ if (!devc)
+ return SR_ERR_ARG;
+
+ if (devc->ftdi.is_open)
+ return SR_OK;
+
+ ret = sigma_ftdi_open(sdi);
+ if (ret != SR_OK)
+ return ret;
+ devc->ftdi.must_close = TRUE;
return ret;
}
+SR_PRIV int sigma_check_close(struct dev_context *devc)
+{
+ int ret;
+
+ if (!devc)
+ return SR_ERR_ARG;
+
+ if (devc->ftdi.must_close) {
+ ret = sigma_ftdi_close(devc);
+ if (ret != SR_OK)
+ return ret;
+ devc->ftdi.must_close = FALSE;
+ }
+
+ return SR_OK;
+}
+
+SR_PRIV int sigma_force_open(const struct sr_dev_inst *sdi)
+{
+ struct dev_context *devc;
+ int ret;
+
+ if (!sdi)
+ return SR_ERR_ARG;
+ devc = sdi->priv;
+ if (!devc)
+ return SR_ERR_ARG;
+
+ ret = sigma_ftdi_open(sdi);
+ if (ret != SR_OK)
+ return ret;
+ devc->ftdi.must_close = FALSE;
+
+ return SR_OK;
+}
+
+SR_PRIV int sigma_force_close(struct dev_context *devc)
+{
+ return sigma_ftdi_close(devc);
+}
+
/*
- * NOTE: We chose the buffer size to be large enough to hold any write to the
- * device. We still print a message just in case.
+ * BEWARE! Error propagation is important, as are kinds of return values.
+ *
+ * - Raw USB tranport communicates the number of sent or received bytes,
+ * or negative error codes in the external library's(!) range of codes.
+ * - Internal routines at the "sigrok driver level" communicate success
+ * or failure in terms of SR_OK et al error codes.
+ * - Main loop style receive callbacks communicate booleans which arrange
+ * for repeated calls to drive progress during acquisition.
+ *
+ * Careful consideration by maintainers is essential, because all of the
+ * above kinds of values are assignment compatbile from the compiler's
+ * point of view. Implementation errors will go unnoticed at build time.
*/
-SR_PRIV int sigma_write_register(uint8_t reg, uint8_t *data, size_t len,
- struct dev_context *devc)
+
+static int sigma_read_raw(struct dev_context *devc, void *buf, size_t size)
{
- size_t i;
- uint8_t buf[80];
- int idx = 0;
+ int ret;
- if ((2 * len + 2) > sizeof(buf)) {
- sr_err("Attempted to write %zu bytes, but buffer is too small.",
- len);
- return SR_ERR_BUG;
+ ret = ftdi_read_data(&devc->ftdi.ctx, (unsigned char *)buf, size);
+ if (ret < 0) {
+ sr_err("USB data read failed: %s",
+ ftdi_get_error_string(&devc->ftdi.ctx));
}
- buf[idx++] = REG_ADDR_LOW | (reg & 0xf);
- buf[idx++] = REG_ADDR_HIGH | (reg >> 4);
+ return ret;
+}
+
+static int sigma_write_raw(struct dev_context *devc, const void *buf, size_t size)
+{
+ int ret;
- for (i = 0; i < len; i++) {
- buf[idx++] = REG_DATA_LOW | (data[i] & 0xf);
- buf[idx++] = REG_DATA_HIGH_WRITE | (data[i] >> 4);
+ ret = ftdi_write_data(&devc->ftdi.ctx, buf, size);
+ if (ret < 0) {
+ sr_err("USB data write failed: %s",
+ ftdi_get_error_string(&devc->ftdi.ctx));
+ } else if ((size_t)ret != size) {
+ sr_err("USB data write length mismatch.");
}
- return sigma_write(buf, idx, devc);
+ return ret;
+}
+
+static int sigma_read_sr(struct dev_context *devc, void *buf, size_t size)
+{
+ int ret;
+
+ ret = sigma_read_raw(devc, buf, size);
+ if (ret < 0 || (size_t)ret != size)
+ return SR_ERR_IO;
+
+ return SR_OK;
}
-SR_PRIV int sigma_set_register(uint8_t reg, uint8_t value, struct dev_context *devc)
+static int sigma_write_sr(struct dev_context *devc, const void *buf, size_t size)
{
- return sigma_write_register(reg, &value, 1, devc);
+ int ret;
+
+ ret = sigma_write_raw(devc, buf, size);
+ if (ret < 0 || (size_t)ret != size)
+ return SR_ERR_IO;
+
+ return SR_OK;
}
-static int sigma_read_register(uint8_t reg, uint8_t *data, size_t len,
- struct dev_context *devc)
+/*
+ * Implementor's note: The local write buffer's size shall suffice for
+ * any know FPGA register transaction that is involved in the supported
+ * feature set of this sigrok device driver. If the length check trips,
+ * that's a programmer's error and needs adjustment in the complete call
+ * stack of the respective code path.
+ */
+#define SIGMA_MAX_REG_DEPTH 32
+
+/*
+ * Implementor's note: The FPGA command set supports register access
+ * with automatic address adjustment. This operation is documented to
+ * wrap within a 16-address range, it cannot cross boundaries where the
+ * register address' nibble overflows. An internal helper assumes that
+ * callers remain within this auto-adjustment range, and thus multi
+ * register access requests can never exceed that count.
+ */
+#define SIGMA_MAX_REG_COUNT 16
+
+SR_PRIV int sigma_write_register(struct dev_context *devc,
+ uint8_t reg, uint8_t *data, size_t len)
{
- uint8_t buf[3];
+ uint8_t buf[2 + SIGMA_MAX_REG_DEPTH * 2], *wrptr;
+ size_t idx;
+
+ if (len > SIGMA_MAX_REG_DEPTH) {
+ sr_err("Short write buffer for %zu bytes to reg %u.", len, reg);
+ return SR_ERR_BUG;
+ }
- buf[0] = REG_ADDR_LOW | (reg & 0xf);
- buf[1] = REG_ADDR_HIGH | (reg >> 4);
- buf[2] = REG_READ_ADDR;
+ wrptr = buf;
+ write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
+ write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
+ for (idx = 0; idx < len; idx++) {
+ write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data[idx]));
+ write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data[idx]));
+ }
- sigma_write(buf, sizeof(buf), devc);
+ return sigma_write_sr(devc, buf, wrptr - buf);
+}
- return sigma_read(data, len, devc);
+SR_PRIV int sigma_set_register(struct dev_context *devc,
+ uint8_t reg, uint8_t value)
+{
+ return sigma_write_register(devc, reg, &value, sizeof(value));
}
-static int sigma_read_pos(uint32_t *stoppos, uint32_t *triggerpos,
- struct dev_context *devc)
+static int sigma_read_register(struct dev_context *devc,
+ uint8_t reg, uint8_t *data, size_t len)
{
- uint8_t buf[] = {
- REG_ADDR_LOW | READ_TRIGGER_POS_LOW,
-
- REG_READ_ADDR | NEXT_REG,
- REG_READ_ADDR | NEXT_REG,
- REG_READ_ADDR | NEXT_REG,
- REG_READ_ADDR | NEXT_REG,
- REG_READ_ADDR | NEXT_REG,
- REG_READ_ADDR | NEXT_REG,
- };
- uint8_t result[6];
+ uint8_t buf[3], *wrptr;
+ int ret;
- sigma_write(buf, sizeof(buf), devc);
+ wrptr = buf;
+ write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
+ write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
+ write_u8_inc(&wrptr, REG_READ_ADDR);
+ ret = sigma_write_sr(devc, buf, wrptr - buf);
+ if (ret != SR_OK)
+ return ret;
- sigma_read(result, sizeof(result), devc);
+ return sigma_read_sr(devc, data, len);
+}
- *triggerpos = result[0] | (result[1] << 8) | (result[2] << 16);
- *stoppos = result[3] | (result[4] << 8) | (result[5] << 16);
+static int sigma_get_register(struct dev_context *devc,
+ uint8_t reg, uint8_t *data)
+{
+ return sigma_read_register(devc, reg, data, sizeof(*data));
+}
- /*
- * These "position" values point to after the event (end of
- * capture data, trigger condition matched). This is why they
- * get decremented here. Sample memory consists of 512-byte
- * chunks with meta data in the upper 64 bytes. Thus when the
- * decrements takes us into this upper part of the chunk, then
- * further move backwards to the end of the chunk's data part.
- */
- if ((--*stoppos & 0x1ff) == 0x1ff)
- *stoppos -= 64;
- if ((--*triggerpos & 0x1ff) == 0x1ff)
- *triggerpos -= 64;
+static int sigma_get_registers(struct dev_context *devc,
+ uint8_t reg, uint8_t *data, size_t count)
+{
+ uint8_t buf[2 + SIGMA_MAX_REG_COUNT], *wrptr;
+ size_t idx;
+ int ret;
+
+ if (count > SIGMA_MAX_REG_COUNT) {
+ sr_err("Short command buffer for %zu reg reads at %u.", count, reg);
+ return SR_ERR_BUG;
+ }
+
+ wrptr = buf;
+ write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
+ write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
+ for (idx = 0; idx < count; idx++)
+ write_u8_inc(&wrptr, REG_READ_ADDR | REG_ADDR_INC);
+ ret = sigma_write_sr(devc, buf, wrptr - buf);
+ if (ret != SR_OK)
+ return ret;
- return 1;
+ return sigma_read_sr(devc, data, count);
}
-static int sigma_read_dram(uint16_t startchunk, size_t numchunks,
- uint8_t *data, struct dev_context *devc)
+static int sigma_read_pos(struct dev_context *devc,
+ uint32_t *stoppos, uint32_t *triggerpos, uint8_t *mode)
{
- size_t i;
- uint8_t buf[4096];
- int idx;
+ uint8_t result[7];
+ const uint8_t *rdptr;
+ uint32_t v32;
+ uint8_t v8;
+ int ret;
+
+ /*
+ * Read 7 registers starting at trigger position LSB.
+ * Which yields two 24bit counter values, and mode flags.
+ */
+ ret = sigma_get_registers(devc, READ_TRIGGER_POS_LOW,
+ result, sizeof(result));
+ if (ret != SR_OK)
+ return ret;
- /* Send the startchunk. Index start with 1. */
- idx = 0;
- buf[idx++] = startchunk >> 8;
- buf[idx++] = startchunk & 0xff;
- sigma_write_register(WRITE_MEMROW, buf, idx, devc);
+ rdptr = &result[0];
+ v32 = read_u24le_inc(&rdptr);
+ if (triggerpos)
+ *triggerpos = v32;
+ v32 = read_u24le_inc(&rdptr);
+ if (stoppos)
+ *stoppos = v32;
+ v8 = read_u8_inc(&rdptr);
+ if (mode)
+ *mode = v8;
- /* Read the DRAM. */
- idx = 0;
- buf[idx++] = REG_DRAM_BLOCK;
- buf[idx++] = REG_DRAM_WAIT_ACK;
+ /*
+ * These positions consist of "the memory row" in the MSB fields,
+ * and "an event index" within the row in the LSB fields. Part
+ * of the memory row's content is sample data, another part is
+ * timestamps.
+ *
+ * The retrieved register values point to after the captured
+ * position. So they need to get decremented, and adjusted to
+ * cater for the timestamps when the decrement carries over to
+ * a different memory row.
+ */
+ if (stoppos && (--*stoppos & ROW_MASK) == ROW_MASK)
+ *stoppos -= CLUSTERS_PER_ROW;
+ if (triggerpos && (--*triggerpos & ROW_MASK) == ROW_MASK)
+ *triggerpos -= CLUSTERS_PER_ROW;
- for (i = 0; i < numchunks; i++) {
- /* Alternate bit to copy from DRAM to cache. */
- if (i != (numchunks - 1))
- buf[idx++] = REG_DRAM_BLOCK | (((i + 1) % 2) << 4);
+ return SR_OK;
+}
- buf[idx++] = REG_DRAM_BLOCK_DATA | ((i % 2) << 4);
+static int sigma_read_dram(struct dev_context *devc,
+ size_t startchunk, size_t numchunks, uint8_t *data)
+{
+ uint8_t buf[128], *wrptr, regval;
+ size_t chunk;
+ int sel, ret;
+ gboolean is_last;
- if (i != (numchunks - 1))
- buf[idx++] = REG_DRAM_WAIT_ACK;
+ if (2 + 3 * numchunks > ARRAY_SIZE(buf)) {
+ sr_err("Short write buffer for %zu DRAM row reads.", numchunks);
+ return SR_ERR_BUG;
}
- sigma_write(buf, idx, devc);
+ /* Communicate DRAM start address (memory row, aka samples line). */
+ wrptr = buf;
+ write_u16be_inc(&wrptr, startchunk);
+ ret = sigma_write_register(devc, WRITE_MEMROW, buf, wrptr - buf);
+ if (ret != SR_OK)
+ return ret;
- return sigma_read(data, numchunks * CHUNK_SIZE, devc);
+ /*
+ * Access DRAM content. Fetch from DRAM to FPGA's internal RAM,
+ * then transfer via USB. Interleave the FPGA's DRAM access and
+ * USB transfer, use alternating buffers (0/1) in the process.
+ */
+ wrptr = buf;
+ write_u8_inc(&wrptr, REG_DRAM_BLOCK);
+ write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
+ for (chunk = 0; chunk < numchunks; chunk++) {
+ sel = chunk % 2;
+ is_last = chunk == numchunks - 1;
+ if (!is_last) {
+ regval = REG_DRAM_BLOCK | REG_DRAM_SEL_BOOL(!sel);
+ write_u8_inc(&wrptr, regval);
+ }
+ regval = REG_DRAM_BLOCK_DATA | REG_DRAM_SEL_BOOL(sel);
+ write_u8_inc(&wrptr, regval);
+ if (!is_last)
+ write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
+ }
+ ret = sigma_write_sr(devc, buf, wrptr - buf);
+ if (ret != SR_OK)
+ return ret;
+
+ return sigma_read_sr(devc, data, numchunks * ROW_LENGTH_BYTES);
}
/* Upload trigger look-up tables to Sigma. */
-SR_PRIV int sigma_write_trigger_lut(struct triggerlut *lut, struct dev_context *devc)
+SR_PRIV int sigma_write_trigger_lut(struct dev_context *devc,
+ struct triggerlut *lut)
{
- int i;
- uint8_t tmp[2];
+ size_t lut_addr;
uint16_t bit;
+ uint8_t m3d, m2d, m1d, m0d;
+ uint8_t buf[6], *wrptr, v8;
+ uint16_t selreg;
+ int ret;
- /* Transpose the table and send to Sigma. */
- for (i = 0; i < 16; i++) {
- bit = 1 << i;
+ /*
+ * Translate the LUT part of the trigger configuration from the
+ * application's perspective to the hardware register's bitfield
+ * layout. Send the LUT to the device. This configures the logic
+ * which combines pin levels or edges.
+ */
+ for (lut_addr = 0; lut_addr < 16; lut_addr++) {
+ bit = BIT(lut_addr);
- tmp[0] = tmp[1] = 0;
+ /* - M4 M3S M3Q */
+ m3d = 0;
+ if (lut->m4 & bit)
+ m3d |= BIT(2);
+ if (lut->m3s & bit)
+ m3d |= BIT(1);
+ if (lut->m3q & bit)
+ m3d |= BIT(0);
- if (lut->m2d[0] & bit)
- tmp[0] |= 0x01;
- if (lut->m2d[1] & bit)
- tmp[0] |= 0x02;
- if (lut->m2d[2] & bit)
- tmp[0] |= 0x04;
+ /* M2D3 M2D2 M2D1 M2D0 */
+ m2d = 0;
if (lut->m2d[3] & bit)
- tmp[0] |= 0x08;
+ m2d |= BIT(3);
+ if (lut->m2d[2] & bit)
+ m2d |= BIT(2);
+ if (lut->m2d[1] & bit)
+ m2d |= BIT(1);
+ if (lut->m2d[0] & bit)
+ m2d |= BIT(0);
- if (lut->m3 & bit)
- tmp[0] |= 0x10;
- if (lut->m3s & bit)
- tmp[0] |= 0x20;
- if (lut->m4 & bit)
- tmp[0] |= 0x40;
+ /* M1D3 M1D2 M1D1 M1D0 */
+ m1d = 0;
+ if (lut->m1d[3] & bit)
+ m1d |= BIT(3);
+ if (lut->m1d[2] & bit)
+ m1d |= BIT(2);
+ if (lut->m1d[1] & bit)
+ m1d |= BIT(1);
+ if (lut->m1d[0] & bit)
+ m1d |= BIT(0);
- if (lut->m0d[0] & bit)
- tmp[1] |= 0x01;
- if (lut->m0d[1] & bit)
- tmp[1] |= 0x02;
- if (lut->m0d[2] & bit)
- tmp[1] |= 0x04;
+ /* M0D3 M0D2 M0D1 M0D0 */
+ m0d = 0;
if (lut->m0d[3] & bit)
- tmp[1] |= 0x08;
-
- if (lut->m1d[0] & bit)
- tmp[1] |= 0x10;
- if (lut->m1d[1] & bit)
- tmp[1] |= 0x20;
- if (lut->m1d[2] & bit)
- tmp[1] |= 0x40;
- if (lut->m1d[3] & bit)
- tmp[1] |= 0x80;
+ m0d |= BIT(3);
+ if (lut->m0d[2] & bit)
+ m0d |= BIT(2);
+ if (lut->m0d[1] & bit)
+ m0d |= BIT(1);
+ if (lut->m0d[0] & bit)
+ m0d |= BIT(0);
- sigma_write_register(WRITE_TRIGGER_SELECT0, tmp, sizeof(tmp),
- devc);
- sigma_set_register(WRITE_TRIGGER_SELECT1, 0x30 | i, devc);
+ /*
+ * Send 16bits with M3D/M2D and M1D/M0D bit masks to the
+ * TriggerSelect register, then strobe the LUT write by
+ * passing A3-A0 to TriggerSelect2. Hold RESET during LUT
+ * programming.
+ */
+ wrptr = buf;
+ write_u8_inc(&wrptr, (m3d << 4) | (m2d << 0));
+ write_u8_inc(&wrptr, (m1d << 4) | (m0d << 0));
+ ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT,
+ buf, wrptr - buf);
+ if (ret != SR_OK)
+ return ret;
+ v8 = TRGSEL2_RESET | TRGSEL2_LUT_WRITE |
+ (lut_addr & TRGSEL2_LUT_ADDR_MASK);
+ ret = sigma_set_register(devc, WRITE_TRIGGER_SELECT2, v8);
+ if (ret != SR_OK)
+ return ret;
}
- /* Send the parameters */
- sigma_write_register(WRITE_TRIGGER_SELECT0, (uint8_t *) &lut->params,
- sizeof(lut->params), devc);
+ /*
+ * Send the parameters. This covers counters and durations.
+ */
+ wrptr = buf;
+ selreg = 0;
+ selreg |= (lut->params.selinc & TRGSEL_SELINC_MASK) << TRGSEL_SELINC_SHIFT;
+ selreg |= (lut->params.selres & TRGSEL_SELRES_MASK) << TRGSEL_SELRES_SHIFT;
+ selreg |= (lut->params.sela & TRGSEL_SELA_MASK) << TRGSEL_SELA_SHIFT;
+ selreg |= (lut->params.selb & TRGSEL_SELB_MASK) << TRGSEL_SELB_SHIFT;
+ selreg |= (lut->params.selc & TRGSEL_SELC_MASK) << TRGSEL_SELC_SHIFT;
+ selreg |= (lut->params.selpresc & TRGSEL_SELPRESC_MASK) << TRGSEL_SELPRESC_SHIFT;
+ write_u16be_inc(&wrptr, selreg);
+ write_u16be_inc(&wrptr, lut->params.cmpb);
+ write_u16be_inc(&wrptr, lut->params.cmpa);
+ ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, buf, wrptr - buf);
+ if (ret != SR_OK)
+ return ret;
return SR_OK;
}
/*
- * Configure the FPGA for bitbang mode.
- * This sequence is documented in section 2. of the ASIX Sigma programming
- * manual. This sequence is necessary to configure the FPGA in the Sigma
- * into Bitbang mode, in which it can be programmed with the firmware.
+ * See Xilinx UG332 for Spartan-3 FPGA configuration. The SIGMA device
+ * uses FTDI bitbang mode for netlist download in slave serial mode.
+ * (LATER: The OMEGA device's cable contains a more capable FTDI chip
+ * and uses MPSSE mode for bitbang. -- Can we also use FT232H in FT245
+ * compatible bitbang mode? For maximum code re-use and reduced libftdi
+ * dependency? See section 3.5.5 of FT232H: D0 clk, D1 data (out), D2
+ * data (in), D3 select, D4-7 GPIOL. See section 3.5.7 for MCU FIFO.)
+ *
+ * 750kbps rate (four times the speed of sigmalogan) works well for
+ * netlist download. All pins except INIT_B are output pins during
+ * configuration download.
+ *
+ * Some pins are inverted as a byproduct of level shifting circuitry.
+ * That's why high CCLK level (from the cable's point of view) is idle
+ * from the FPGA's perspective.
+ *
+ * The vendor's literature discusses a "suicide sequence" which ends
+ * regular FPGA execution and should be sent before entering bitbang
+ * mode and sending configuration data. Set D7 and toggle D2, D3, D4
+ * a few times.
*/
-static int sigma_fpga_init_bitbang(struct dev_context *devc)
+#define BB_PIN_CCLK BIT(0) /* D0, CCLK */
+#define BB_PIN_PROG BIT(1) /* D1, PROG */
+#define BB_PIN_D2 BIT(2) /* D2, (part of) SUICIDE */
+#define BB_PIN_D3 BIT(3) /* D3, (part of) SUICIDE */
+#define BB_PIN_D4 BIT(4) /* D4, (part of) SUICIDE (unused?) */
+#define BB_PIN_INIT BIT(5) /* D5, INIT, input pin */
+#define BB_PIN_DIN BIT(6) /* D6, DIN */
+#define BB_PIN_D7 BIT(7) /* D7, (part of) SUICIDE */
+
+#define BB_BITRATE (750 * 1000)
+#define BB_PINMASK (0xff & ~BB_PIN_INIT)
+
+/*
+ * Initiate slave serial mode for configuration download. Which is done
+ * by pulsing PROG_B and sensing INIT_B. Make sure CCLK is idle before
+ * initiating the configuration download.
+ *
+ * Run a "suicide sequence" first to terminate the regular FPGA operation
+ * before reconfiguration. The FTDI cable is single channel, and shares
+ * pins which are used for data communication in FIFO mode with pins that
+ * are used for FPGA configuration in bitbang mode. Hardware defaults for
+ * unconfigured hardware, and runtime conditions after FPGA configuration
+ * need to cooperate such that re-configuration of the FPGA can start.
+ */
+static int sigma_fpga_init_bitbang_once(struct dev_context *devc)
{
- uint8_t suicide[] = {
- 0x84, 0x84, 0x88, 0x84, 0x88, 0x84, 0x88, 0x84,
+ const uint8_t suicide[] = {
+ BB_PIN_D7 | BB_PIN_D2,
+ BB_PIN_D7 | BB_PIN_D2,
+ BB_PIN_D7 | BB_PIN_D3,
+ BB_PIN_D7 | BB_PIN_D2,
+ BB_PIN_D7 | BB_PIN_D3,
+ BB_PIN_D7 | BB_PIN_D2,
+ BB_PIN_D7 | BB_PIN_D3,
+ BB_PIN_D7 | BB_PIN_D2,
};
- uint8_t init_array[] = {
- 0x01, 0x03, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01,
+ const uint8_t init_array[] = {
+ BB_PIN_CCLK,
+ BB_PIN_CCLK | BB_PIN_PROG,
+ BB_PIN_CCLK | BB_PIN_PROG,
+ BB_PIN_CCLK,
+ BB_PIN_CCLK,
+ BB_PIN_CCLK,
+ BB_PIN_CCLK,
+ BB_PIN_CCLK,
+ BB_PIN_CCLK,
+ BB_PIN_CCLK,
};
- int i, ret, timeout = (10 * 1000);
+ size_t retries;
+ int ret;
uint8_t data;
/* Section 2. part 1), do the FPGA suicide. */
- sigma_write(suicide, sizeof(suicide), devc);
- sigma_write(suicide, sizeof(suicide), devc);
- sigma_write(suicide, sizeof(suicide), devc);
- sigma_write(suicide, sizeof(suicide), devc);
-
- /* Section 2. part 2), do pulse on D1. */
- sigma_write(init_array, sizeof(init_array), devc);
- ftdi_usb_purge_buffers(&devc->ftdic);
-
- /* Wait until the FPGA asserts D6/INIT_B. */
- for (i = 0; i < timeout; i++) {
- ret = sigma_read(&data, 1, devc);
- if (ret < 0)
- return ret;
- /* Test if pin D6 got asserted. */
- if (data & (1 << 5))
- return 0;
- /* The D6 was not asserted yet, wait a bit. */
- g_usleep(10 * 1000);
+ ret = SR_OK;
+ ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
+ ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
+ ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
+ ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
+ if (ret != SR_OK)
+ return SR_ERR_IO;
+ g_usleep(10 * 1000);
+
+ /* Section 2. part 2), pulse PROG. */
+ ret = sigma_write_sr(devc, init_array, sizeof(init_array));
+ if (ret != SR_OK)
+ return ret;
+ g_usleep(10 * 1000);
+ ftdi_usb_purge_buffers(&devc->ftdi.ctx);
+
+ /*
+ * Wait until the FPGA asserts INIT_B. Check in a maximum number
+ * of bursts with a given delay between them. Read as many pin
+ * capture results as the combination of FTDI chip and FTID lib
+ * may provide. Cope with absence of pin capture data in a cycle.
+ * This approach shall result in fast reponse in case of success,
+ * low cost of execution during wait, reliable error handling in
+ * the transport layer, and robust response to failure or absence
+ * of result data (hardware inactivity after stimulus).
+ */
+ retries = 10;
+ while (retries--) {
+ do {
+ ret = sigma_read_raw(devc, &data, sizeof(data));
+ if (ret < 0)
+ return SR_ERR_IO;
+ if (ret == sizeof(data) && (data & BB_PIN_INIT))
+ return SR_OK;
+ } while (ret == sizeof(data));
+ if (retries)
+ g_usleep(10 * 1000);
}
return SR_ERR_TIMEOUT;
}
+/*
+ * This is belt and braces. Re-run the bitbang initiation sequence a few
+ * times should first attempts fail. Failure is rare but can happen (was
+ * observed during driver development).
+ */
+static int sigma_fpga_init_bitbang(struct dev_context *devc)
+{
+ size_t retries;
+ int ret;
+
+ retries = 10;
+ while (retries--) {
+ ret = sigma_fpga_init_bitbang_once(devc);
+ if (ret == SR_OK)
+ return ret;
+ if (ret != SR_ERR_TIMEOUT)
+ return ret;
+ }
+ return ret;
+}
+
/*
* Configure the FPGA for logic-analyzer mode.
*/
static int sigma_fpga_init_la(struct dev_context *devc)
{
- /* Initialize the logic analyzer mode. */
- uint8_t mode_regval = WMR_SDRAMINIT;
- uint8_t logic_mode_start[] = {
- REG_ADDR_LOW | (READ_ID & 0xf),
- REG_ADDR_HIGH | (READ_ID >> 4),
- REG_READ_ADDR, /* Read ID register. */
-
- REG_ADDR_LOW | (WRITE_TEST & 0xf),
- REG_DATA_LOW | 0x5,
- REG_DATA_HIGH_WRITE | 0x5,
- REG_READ_ADDR, /* Read scratch register. */
-
- REG_DATA_LOW | 0xa,
- REG_DATA_HIGH_WRITE | 0xa,
- REG_READ_ADDR, /* Read scratch register. */
-
- REG_ADDR_LOW | (WRITE_MODE & 0xf),
- REG_DATA_LOW | (mode_regval & 0xf),
- REG_DATA_HIGH_WRITE | (mode_regval >> 4),
- };
-
+ uint8_t buf[20], *wrptr;
+ uint8_t data_55, data_aa, mode;
uint8_t result[3];
+ const uint8_t *rdptr;
int ret;
- /* Initialize the logic analyzer mode. */
- sigma_write(logic_mode_start, sizeof(logic_mode_start), devc);
-
- /* Expect a 3 byte reply since we issued three READ requests. */
- ret = sigma_read(result, 3, devc);
- if (ret != 3)
- goto err;
+ wrptr = buf;
+
+ /* Read ID register. */
+ write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(READ_ID));
+ write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(READ_ID));
+ write_u8_inc(&wrptr, REG_READ_ADDR);
+
+ /* Write 0x55 to scratch register, read back. */
+ data_55 = 0x55;
+ write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST));
+ write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST));
+ write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_55));
+ write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_55));
+ write_u8_inc(&wrptr, REG_READ_ADDR);
+
+ /* Write 0xaa to scratch register, read back. */
+ data_aa = 0xaa;
+ write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST));
+ write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST));
+ write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_aa));
+ write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_aa));
+ write_u8_inc(&wrptr, REG_READ_ADDR);
+
+ /* Initiate SDRAM initialization in mode register. */
+ mode = WMR_SDRAMINIT;
+ write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_MODE));
+ write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_MODE));
+ write_u8_inc(&wrptr, REG_DATA_LOW | LO4(mode));
+ write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(mode));
- if (result[0] != 0xa6 || result[1] != 0x55 || result[2] != 0xaa)
- goto err;
+ /*
+ * Send the command sequence which contains 3 READ requests.
+ * Expect to see the corresponding 3 response bytes.
+ */
+ ret = sigma_write_sr(devc, buf, wrptr - buf);
+ if (ret != SR_OK) {
+ sr_err("Could not request LA start response.");
+ return ret;
+ }
+ ret = sigma_read_sr(devc, result, ARRAY_SIZE(result));
+ if (ret != SR_OK) {
+ sr_err("Could not receive LA start response.");
+ return SR_ERR_IO;
+ }
+ rdptr = result;
+ if (read_u8_inc(&rdptr) != 0xa6) {
+ sr_err("Unexpected ID response.");
+ return SR_ERR_DATA;
+ }
+ if (read_u8_inc(&rdptr) != data_55) {
+ sr_err("Unexpected scratch read-back (55).");
+ return SR_ERR_DATA;
+ }
+ if (read_u8_inc(&rdptr) != data_aa) {
+ sr_err("Unexpected scratch read-back (aa).");
+ return SR_ERR_DATA;
+ }
return SR_OK;
-err:
- sr_err("Configuration failed. Invalid reply received.");
- return SR_ERR;
}
/*
* by the caller of this function.
*/
static int sigma_fw_2_bitbang(struct sr_context *ctx, const char *name,
- uint8_t **bb_cmd, gsize *bb_cmd_size)
+ uint8_t **bb_cmd, size_t *bb_cmd_size)
{
- size_t i, file_size, bb_size;
- char *firmware;
- uint8_t *bb_stream, *bbs;
+ uint8_t *firmware;
+ size_t file_size;
+ uint8_t *p;
+ size_t l;
uint32_t imm;
- int bit, v;
- int ret = SR_OK;
+ size_t bb_size;
+ uint8_t *bb_stream, *bbs, byte, mask, v;
/* Retrieve the on-disk firmware file content. */
- firmware = sr_resource_load(ctx, SR_RESOURCE_FIRMWARE,
- name, &file_size, 256 * 1024);
+ firmware = sr_resource_load(ctx, SR_RESOURCE_FIRMWARE, name,
+ &file_size, SIGMA_FIRMWARE_SIZE_LIMIT);
if (!firmware)
- return SR_ERR;
+ return SR_ERR_IO;
/* Unscramble the file content (XOR with "random" sequence). */
+ p = firmware;
+ l = file_size;
imm = 0x3f6df2ab;
- for (i = 0; i < file_size; i++) {
+ while (l--) {
imm = (imm + 0xa853753) % 177 + (imm * 0x8034052);
- firmware[i] ^= imm & 0xff;
+ *p++ ^= imm & 0xff;
}
/*
* the bitbang samples, and release the allocated memory.
*/
bb_size = file_size * 8 * 2;
- bb_stream = (uint8_t *)g_try_malloc(bb_size);
+ bb_stream = g_try_malloc(bb_size);
if (!bb_stream) {
- sr_err("%s: Failed to allocate bitbang stream", __func__);
- ret = SR_ERR_MALLOC;
- goto exit;
+ sr_err("Memory allocation failed during firmware upload.");
+ g_free(firmware);
+ return SR_ERR_MALLOC;
}
bbs = bb_stream;
- for (i = 0; i < file_size; i++) {
- for (bit = 7; bit >= 0; bit--) {
- v = (firmware[i] & (1 << bit)) ? 0x40 : 0x00;
- *bbs++ = v | 0x01;
+ p = firmware;
+ l = file_size;
+ while (l--) {
+ byte = *p++;
+ mask = 0x80;
+ while (mask) {
+ v = (byte & mask) ? BB_PIN_DIN : 0;
+ mask >>= 1;
+ *bbs++ = v | BB_PIN_CCLK;
*bbs++ = v;
}
}
+ g_free(firmware);
/* The transformation completed successfully, return the result. */
*bb_cmd = bb_stream;
*bb_cmd_size = bb_size;
-exit:
- g_free(firmware);
- return ret;
+ return SR_OK;
}
-static int upload_firmware(struct sr_context *ctx,
- int firmware_idx, struct dev_context *devc)
+static int upload_firmware(struct sr_context *ctx, struct dev_context *devc,
+ enum sigma_firmware_idx firmware_idx)
{
int ret;
- unsigned char *buf;
- unsigned char pins;
+ uint8_t *buf;
+ uint8_t pins;
size_t buf_size;
const char *firmware;
- /* Avoid downloading the same firmware multiple times. */
+ /* Check for valid firmware file selection. */
+ if (firmware_idx >= ARRAY_SIZE(firmware_files))
+ return SR_ERR_ARG;
firmware = firmware_files[firmware_idx];
- if (devc->cur_firmware == firmware_idx) {
+ if (!firmware || !*firmware)
+ return SR_ERR_ARG;
+
+ /* Avoid downloading the same firmware multiple times. */
+ if (devc->firmware_idx == firmware_idx) {
sr_info("Not uploading firmware file '%s' again.", firmware);
return SR_OK;
}
- ret = ftdi_set_bitmode(&devc->ftdic, 0xdf, BITMODE_BITBANG);
+ devc->state.state = SIGMA_CONFIG;
+
+ /* Set the cable to bitbang mode. */
+ ret = ftdi_set_bitmode(&devc->ftdi.ctx, BB_PINMASK, BITMODE_BITBANG);
if (ret < 0) {
- sr_err("ftdi_set_bitmode failed: %s",
- ftdi_get_error_string(&devc->ftdic));
+ sr_err("Could not setup cable mode for upload: %s",
+ ftdi_get_error_string(&devc->ftdi.ctx));
return SR_ERR;
}
-
- /* Four times the speed of sigmalogan - Works well. */
- ret = ftdi_set_baudrate(&devc->ftdic, 750 * 1000);
+ ret = ftdi_set_baudrate(&devc->ftdi.ctx, BB_BITRATE);
if (ret < 0) {
- sr_err("ftdi_set_baudrate failed: %s",
- ftdi_get_error_string(&devc->ftdic));
+ sr_err("Could not setup bitrate for upload: %s",
+ ftdi_get_error_string(&devc->ftdi.ctx));
return SR_ERR;
}
- /* Initialize the FPGA for firmware upload. */
+ /* Initiate FPGA configuration mode. */
ret = sigma_fpga_init_bitbang(devc);
- if (ret)
+ if (ret) {
+ sr_err("Could not initiate firmware upload to hardware");
return ret;
+ }
- /* Prepare firmware. */
+ /* Prepare wire format of the firmware image. */
ret = sigma_fw_2_bitbang(ctx, firmware, &buf, &buf_size);
if (ret != SR_OK) {
- sr_err("An error occurred while reading the firmware: %s",
- firmware);
+ sr_err("Could not prepare file %s for upload.", firmware);
return ret;
}
- /* Upload firmware. */
+ /* Write the FPGA netlist to the cable. */
sr_info("Uploading firmware file '%s'.", firmware);
- sigma_write(buf, buf_size, devc);
-
+ ret = sigma_write_sr(devc, buf, buf_size);
g_free(buf);
+ if (ret != SR_OK) {
+ sr_err("Could not upload firmware file '%s'.", firmware);
+ return ret;
+ }
- ret = ftdi_set_bitmode(&devc->ftdic, 0x00, BITMODE_RESET);
+ /* Leave bitbang mode and discard pending input data. */
+ ret = ftdi_set_bitmode(&devc->ftdi.ctx, 0, BITMODE_RESET);
if (ret < 0) {
- sr_err("ftdi_set_bitmode failed: %s",
- ftdi_get_error_string(&devc->ftdic));
+ sr_err("Could not setup cable mode after upload: %s",
+ ftdi_get_error_string(&devc->ftdi.ctx));
return SR_ERR;
}
-
- ftdi_usb_purge_buffers(&devc->ftdic);
-
- /* Discard garbage. */
- while (sigma_read(&pins, 1, devc) == 1)
+ ftdi_usb_purge_buffers(&devc->ftdi.ctx);
+ while (sigma_read_raw(devc, &pins, sizeof(pins)) > 0)
;
/* Initialize the FPGA for logic-analyzer mode. */
ret = sigma_fpga_init_la(devc);
- if (ret != SR_OK)
+ if (ret != SR_OK) {
+ sr_err("Hardware response after firmware upload failed.");
return ret;
+ }
- devc->cur_firmware = firmware_idx;
-
+ /* Keep track of successful firmware download completion. */
+ devc->state.state = SIGMA_IDLE;
+ devc->firmware_idx = firmware_idx;
sr_info("Firmware uploaded.");
return SR_OK;
}
/*
- * Sigma doesn't support limiting the number of samples, so we have to
- * translate the number and the samplerate to an elapsed time.
+ * The driver supports user specified time or sample count limits. The
+ * device's hardware supports neither, and hardware compression prevents
+ * reliable detection of "fill levels" (currently reached sample counts)
+ * from register values during acquisition. That's why the driver needs
+ * to apply some heuristics:
*
- * In addition we need to ensure that the last data cluster has passed
- * the hardware pipeline, and became available to the PC side. With RLE
- * compression up to 327ms could pass before another cluster accumulates
- * at 200kHz samplerate when input pins don't change.
+ * - The (optional) sample count limit and the (normalized) samplerate
+ * get mapped to an estimated duration for these samples' acquisition.
+ * - The (optional) time limit gets checked as well. The lesser of the
+ * two limits will terminate the data acquisition phase. The exact
+ * sample count limit gets enforced in session feed submission paths.
+ * - Some slack needs to be given to account for hardware pipelines as
+ * well as late storage of last chunks after compression thresholds
+ * are tripped. The resulting data set will span at least the caller
+ * specified period of time, which shall be perfectly acceptable.
+ *
+ * With RLE compression active, up to 64K sample periods can pass before
+ * a cluster accumulates. Which translates to 327ms at 200kHz. Add two
+ * times that period for good measure, one is not enough to flush the
+ * hardware pipeline (observation from an earlier experiment).
*/
-SR_PRIV uint64_t sigma_limit_samples_to_msec(const struct dev_context *devc,
- uint64_t limit_samples)
+SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc)
{
- uint64_t limit_msec;
+ int ret;
+ GVariant *data;
+ uint64_t user_count, user_msecs;
uint64_t worst_cluster_time_ms;
+ uint64_t count_msecs, acquire_msecs;
- limit_msec = limit_samples * 1000 / devc->cur_samplerate;
- worst_cluster_time_ms = 65536 * 1000 / devc->cur_samplerate;
- /*
- * One cluster time is not enough to flush pipeline when sampling
- * grounded pins with 1 sample limit at 200kHz. Hence the 2* fix.
+ sr_sw_limits_init(&devc->limit.acquire);
+
+ /* Get sample count limit, convert to msecs. */
+ ret = sr_sw_limits_config_get(&devc->limit.config,
+ SR_CONF_LIMIT_SAMPLES, &data);
+ if (ret != SR_OK)
+ return ret;
+ user_count = g_variant_get_uint64(data);
+ g_variant_unref(data);
+ count_msecs = 0;
+ if (user_count)
+ count_msecs = 1000 * user_count / devc->clock.samplerate + 1;
+
+ /* Get time limit, which is in msecs. */
+ ret = sr_sw_limits_config_get(&devc->limit.config,
+ SR_CONF_LIMIT_MSEC, &data);
+ if (ret != SR_OK)
+ return ret;
+ user_msecs = g_variant_get_uint64(data);
+ g_variant_unref(data);
+
+ /* Get the lesser of them, with both being optional. */
+ acquire_msecs = ~0ull;
+ if (user_count && count_msecs < acquire_msecs)
+ acquire_msecs = count_msecs;
+ if (user_msecs && user_msecs < acquire_msecs)
+ acquire_msecs = user_msecs;
+ if (acquire_msecs == ~0ull)
+ return SR_OK;
+
+ /* Add some slack, and use that timeout for acquisition. */
+ worst_cluster_time_ms = 1000 * 65536 / devc->clock.samplerate;
+ acquire_msecs += 2 * worst_cluster_time_ms;
+ data = g_variant_new_uint64(acquire_msecs);
+ ret = sr_sw_limits_config_set(&devc->limit.acquire,
+ SR_CONF_LIMIT_MSEC, data);
+ g_variant_unref(data);
+ if (ret != SR_OK)
+ return ret;
+
+ sr_sw_limits_acquisition_start(&devc->limit.acquire);
+ return SR_OK;
+}
+
+/*
+ * Check whether a caller specified samplerate matches the device's
+ * hardware constraints (can be used for acquisition). Optionally yield
+ * a value that approximates the original spec.
+ *
+ * This routine assumes that input specs are in the 200kHz to 200MHz
+ * range of supported rates, and callers typically want to normalize a
+ * given value to the hardware capabilities. Values in the 50MHz range
+ * get rounded up by default, to avoid a more expensive check for the
+ * closest match, while higher sampling rate is always desirable during
+ * measurement. Input specs which exactly match hardware capabilities
+ * remain unaffected. Because 100/200MHz rates also limit the number of
+ * available channels, they are not suggested by this routine, instead
+ * callers need to pick them consciously.
+ */
+SR_PRIV int sigma_normalize_samplerate(uint64_t want_rate, uint64_t *have_rate)
+{
+ uint64_t div, rate;
+
+ /* Accept exact matches for 100/200MHz. */
+ if (want_rate == SR_MHZ(200) || want_rate == SR_MHZ(100)) {
+ if (have_rate)
+ *have_rate = want_rate;
+ return SR_OK;
+ }
+
+ /* Accept 200kHz to 50MHz range, and map to near value. */
+ if (want_rate >= SR_KHZ(200) && want_rate <= SR_MHZ(50)) {
+ div = SR_MHZ(50) / want_rate;
+ rate = SR_MHZ(50) / div;
+ if (have_rate)
+ *have_rate = rate;
+ return SR_OK;
+ }
+
+ return SR_ERR_ARG;
+}
+
+/* Gets called at probe time. Can seed software settings from hardware state. */
+SR_PRIV int sigma_fetch_hw_config(const struct sr_dev_inst *sdi)
+{
+ struct dev_context *devc;
+ int ret;
+ uint8_t regaddr, regval;
+
+ devc = sdi->priv;
+ if (!devc)
+ return SR_ERR_ARG;
+
+ /* Seed configuration values from defaults. */
+ devc->firmware_idx = SIGMA_FW_NONE;
+ devc->clock.samplerate = samplerates[0];
+
+ /* TODO
+ * Ideally the device driver could retrieve recently stored
+ * details from hardware registers, thus re-use user specified
+ * configuration values across sigrok sessions. Which could
+ * avoid repeated expensive though unnecessary firmware uploads,
+ * improve performance and usability. Unfortunately it appears
+ * that the registers range which is documented as available for
+ * application use keeps providing 0xff data content. At least
+ * with the netlist version which ships with sigrok. The same
+ * was observed with unused registers in the first page.
*/
- return limit_msec + 2 * worst_cluster_time_ms;
+ return SR_ERR_NA;
+
+ /* This is for research, currently does not work yet. */
+ ret = sigma_check_open(sdi);
+ regaddr = 16;
+ regaddr = 14;
+ ret = sigma_set_register(devc, regaddr, 'F');
+ ret = sigma_get_register(devc, regaddr, ®val);
+ sr_warn("%s() reg[%u] val[%u] rc[%d]", __func__, regaddr, regval, ret);
+ ret = sigma_check_close(devc);
+ return ret;
+}
+
+/* Gets called after successful (volatile) hardware configuration. */
+SR_PRIV int sigma_store_hw_config(const struct sr_dev_inst *sdi)
+{
+ /* TODO See above, registers seem to not hold written data. */
+ (void)sdi;
+ return SR_ERR_NA;
}
-SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi, uint64_t samplerate)
+SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi)
{
struct dev_context *devc;
struct drv_context *drvc;
- size_t i;
+ uint64_t samplerate;
int ret;
- int num_channels;
+ size_t num_channels;
devc = sdi->priv;
drvc = sdi->driver->context;
- ret = SR_OK;
- /* Reject rates that are not in the list of supported rates. */
- for (i = 0; i < samplerates_count; i++) {
- if (samplerates[i] == samplerate)
- break;
- }
- if (i >= samplerates_count || samplerates[i] == 0)
- return SR_ERR_SAMPLERATE;
+ /* Accept any caller specified rate which the hardware supports. */
+ ret = sigma_normalize_samplerate(devc->clock.samplerate, &samplerate);
+ if (ret != SR_OK)
+ return ret;
/*
* Depending on the samplerates of 200/100/50- MHz, specific
*/
num_channels = devc->num_channels;
if (samplerate <= SR_MHZ(50)) {
- ret = upload_firmware(drvc->sr_ctx, 0, devc);
+ ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_50MHZ);
num_channels = 16;
} else if (samplerate == SR_MHZ(100)) {
- ret = upload_firmware(drvc->sr_ctx, 1, devc);
+ ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_100MHZ);
num_channels = 8;
} else if (samplerate == SR_MHZ(200)) {
- ret = upload_firmware(drvc->sr_ctx, 2, devc);
+ ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_200MHZ);
num_channels = 4;
}
/*
- * Derive the sample period from the sample rate as well as the
- * number of samples that the device will communicate within
- * an "event" (memory organization internal to the device).
+ * The samplerate affects the number of available logic channels
+ * as well as a sample memory layout detail (the number of samples
+ * which the device will communicate within an "event").
*/
if (ret == SR_OK) {
devc->num_channels = num_channels;
- devc->cur_samplerate = samplerate;
devc->samples_per_event = 16 / devc->num_channels;
- devc->state.state = SIGMA_IDLE;
}
/*
- * Support for "limit_samples" is implemented by stopping
- * acquisition after a corresponding period of time.
- * Re-calculate that period of time, in case the limit is
- * set first and the samplerate gets (re-)configured later.
+ * Store the firmware type and most recently configured samplerate
+ * in hardware, such that subsequent sessions can start from there.
+ * This is a "best effort" approach. Failure is non-fatal.
*/
- if (ret == SR_OK && devc->limit_samples) {
- uint64_t msecs;
- msecs = sigma_limit_samples_to_msec(devc, devc->limit_samples);
- devc->limit_msec = msecs;
- }
+ if (ret == SR_OK)
+ (void)sigma_store_hw_config(sdi);
return ret;
}
+/*
+ * Arrange for a session feed submit buffer. A queue where a number of
+ * samples gets accumulated to reduce the number of send calls. Which
+ * also enforces an optional sample count limit for data acquisition.
+ *
+ * The buffer holds up to CHUNK_SIZE bytes. The unit size is fixed (the
+ * driver provides a fixed channel layout regardless of samplerate).
+ */
+
+#define CHUNK_SIZE (4 * 1024 * 1024)
+
+struct submit_buffer {
+ size_t unit_size;
+ size_t max_samples, curr_samples;
+ uint8_t *sample_data;
+ uint8_t *write_pointer;
+ struct sr_dev_inst *sdi;
+ struct sr_datafeed_packet packet;
+ struct sr_datafeed_logic logic;
+};
+
+static int alloc_submit_buffer(struct sr_dev_inst *sdi)
+{
+ struct dev_context *devc;
+ struct submit_buffer *buffer;
+ size_t size;
+
+ devc = sdi->priv;
+
+ buffer = g_malloc0(sizeof(*buffer));
+ devc->buffer = buffer;
+
+ buffer->unit_size = sizeof(uint16_t);
+ size = CHUNK_SIZE;
+ size /= buffer->unit_size;
+ buffer->max_samples = size;
+ size *= buffer->unit_size;
+ buffer->sample_data = g_try_malloc0(size);
+ if (!buffer->sample_data)
+ return SR_ERR_MALLOC;
+ buffer->write_pointer = buffer->sample_data;
+ sr_sw_limits_init(&devc->limit.submit);
+
+ buffer->sdi = sdi;
+ memset(&buffer->logic, 0, sizeof(buffer->logic));
+ buffer->logic.unitsize = buffer->unit_size;
+ buffer->logic.data = buffer->sample_data;
+ memset(&buffer->packet, 0, sizeof(buffer->packet));
+ buffer->packet.type = SR_DF_LOGIC;
+ buffer->packet.payload = &buffer->logic;
+
+ return SR_OK;
+}
+
+static int setup_submit_limit(struct dev_context *devc)
+{
+ struct sr_sw_limits *limits;
+ int ret;
+ GVariant *data;
+ uint64_t total;
+
+ limits = &devc->limit.submit;
+
+ ret = sr_sw_limits_config_get(&devc->limit.config,
+ SR_CONF_LIMIT_SAMPLES, &data);
+ if (ret != SR_OK)
+ return ret;
+ total = g_variant_get_uint64(data);
+ g_variant_unref(data);
+
+ sr_sw_limits_init(limits);
+ if (total) {
+ data = g_variant_new_uint64(total);
+ ret = sr_sw_limits_config_set(limits,
+ SR_CONF_LIMIT_SAMPLES, data);
+ g_variant_unref(data);
+ if (ret != SR_OK)
+ return ret;
+ }
+
+ sr_sw_limits_acquisition_start(limits);
+
+ return SR_OK;
+}
+
+static void free_submit_buffer(struct dev_context *devc)
+{
+ struct submit_buffer *buffer;
+
+ if (!devc)
+ return;
+
+ buffer = devc->buffer;
+ if (!buffer)
+ return;
+ devc->buffer = NULL;
+
+ g_free(buffer->sample_data);
+ g_free(buffer);
+}
+
+static int flush_submit_buffer(struct dev_context *devc)
+{
+ struct submit_buffer *buffer;
+ int ret;
+
+ buffer = devc->buffer;
+
+ /* Is queued sample data available? */
+ if (!buffer->curr_samples)
+ return SR_OK;
+
+ /* Submit to the session feed. */
+ buffer->logic.length = buffer->curr_samples * buffer->unit_size;
+ ret = sr_session_send(buffer->sdi, &buffer->packet);
+ if (ret != SR_OK)
+ return ret;
+
+ /* Rewind queue position. */
+ buffer->curr_samples = 0;
+ buffer->write_pointer = buffer->sample_data;
+
+ return SR_OK;
+}
+
+static int addto_submit_buffer(struct dev_context *devc,
+ uint16_t sample, size_t count)
+{
+ struct submit_buffer *buffer;
+ struct sr_sw_limits *limits;
+ int ret;
+
+ buffer = devc->buffer;
+ limits = &devc->limit.submit;
+ if (sr_sw_limits_check(limits))
+ count = 0;
+
+ /*
+ * Individually accumulate and check each sample, such that
+ * accumulation between flushes won't exceed local storage, and
+ * enforcement of user specified limits is exact.
+ */
+ while (count--) {
+ write_u16le_inc(&buffer->write_pointer, sample);
+ buffer->curr_samples++;
+ if (buffer->curr_samples == buffer->max_samples) {
+ ret = flush_submit_buffer(devc);
+ if (ret != SR_OK)
+ return ret;
+ }
+ sr_sw_limits_update_samples_read(limits, 1);
+ if (sr_sw_limits_check(limits))
+ break;
+ }
+
+ return SR_OK;
+}
+
/*
* In 100 and 200 MHz mode, only a single pin rising/falling can be
* set as trigger. In other modes, two rising/falling triggers can be set,
struct sr_trigger_stage *stage;
struct sr_trigger_match *match;
const GSList *l, *m;
- int channelbit, trigger_set;
+ uint16_t channelbit;
+ size_t trigger_set;
devc = sdi->priv;
- memset(&devc->trigger, 0, sizeof(struct sigma_trigger));
- if (!(trigger = sr_session_trigger_get(sdi->session)))
+ memset(&devc->trigger, 0, sizeof(devc->trigger));
+ devc->use_triggers = FALSE;
+ trigger = sr_session_trigger_get(sdi->session);
+ if (!trigger)
+ return SR_OK;
+
+ if (!ASIX_SIGMA_WITH_TRIGGER) {
+ sr_warn("Trigger support is not implemented. Ignoring the spec.");
return SR_OK;
+ }
trigger_set = 0;
for (l = trigger->stages; l; l = l->next) {
stage = l->data;
for (m = stage->matches; m; m = m->next) {
match = m->data;
+ /* Ignore disabled channels with a trigger. */
if (!match->channel->enabled)
- /* Ignore disabled channels with a trigger. */
continue;
- channelbit = 1 << (match->channel->index);
- if (devc->cur_samplerate >= SR_MHZ(100)) {
+ channelbit = BIT(match->channel->index);
+ if (devc->clock.samplerate >= SR_MHZ(100)) {
/* Fast trigger support. */
if (trigger_set) {
- sr_err("Only a single pin trigger is "
- "supported in 100 and 200MHz mode.");
+ sr_err("100/200MHz modes limited to single trigger pin.");
return SR_ERR;
}
- if (match->match == SR_TRIGGER_FALLING)
+ if (match->match == SR_TRIGGER_FALLING) {
devc->trigger.fallingmask |= channelbit;
- else if (match->match == SR_TRIGGER_RISING)
+ } else if (match->match == SR_TRIGGER_RISING) {
devc->trigger.risingmask |= channelbit;
- else {
- sr_err("Only rising/falling trigger is "
- "supported in 100 and 200MHz mode.");
+ } else {
+ sr_err("100/200MHz modes limited to edge trigger.");
return SR_ERR;
}
* does not permit ORed triggers.
*/
if (trigger_set > 1) {
- sr_err("Only 1 rising/falling trigger "
- "is supported.");
+ sr_err("Limited to 1 edge trigger.");
return SR_ERR;
}
}
}
}
+ /* Keep track whether triggers are involved during acquisition. */
+ devc->use_triggers = TRUE;
+
return SR_OK;
}
/* Software trigger to determine exact trigger position. */
static int get_trigger_offset(uint8_t *samples, uint16_t last_sample,
- struct sigma_trigger *t)
+ struct sigma_trigger *t)
{
- int i;
- uint16_t sample = 0;
+ const uint8_t *rdptr;
+ size_t i;
+ uint16_t sample;
+ rdptr = samples;
+ sample = 0;
for (i = 0; i < 8; i++) {
if (i > 0)
last_sample = sample;
- sample = samples[2 * i] | (samples[2 * i + 1] << 8);
+ sample = read_u16le_inc(&rdptr);
/* Simple triggers. */
if ((sample & t->simplemask) != t->simplevalue)
return i & 0x7;
}
+static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample)
+{
+ /* TODO
+ * Check whether the combination of this very sample and the
+ * previous state match the configured trigger condition. This
+ * improves the resolution of the trigger marker's position.
+ * The hardware provided position is coarse, and may point to
+ * a position before the actual match.
+ *
+ * See the previous get_trigger_offset() implementation. This
+ * code needs to get re-used here.
+ */
+ if (!devc->use_triggers)
+ return FALSE;
+
+ (void)sample;
+ (void)get_trigger_offset;
+
+ return FALSE;
+}
+
+static int check_and_submit_sample(struct dev_context *devc,
+ uint16_t sample, size_t count, gboolean check_trigger)
+{
+ gboolean triggered;
+ int ret;
+
+ triggered = check_trigger && sample_matches_trigger(devc, sample);
+ if (triggered) {
+ ret = flush_submit_buffer(devc);
+ if (ret != SR_OK)
+ return ret;
+ ret = std_session_send_df_trigger(devc->buffer->sdi);
+ if (ret != SR_OK)
+ return ret;
+ }
+
+ ret = addto_submit_buffer(devc, sample, count);
+ if (ret != SR_OK)
+ return ret;
+
+ return SR_OK;
+}
+
/*
* Return the timestamp of "DRAM cluster".
*/
static uint16_t sigma_dram_cluster_ts(struct sigma_dram_cluster *cluster)
{
- return (cluster->timestamp_hi << 8) | cluster->timestamp_lo;
+ return read_u16le((const uint8_t *)&cluster->timestamp);
}
/*
*/
static uint16_t sigma_dram_cluster_data(struct sigma_dram_cluster *cl, int idx)
{
- uint16_t sample;
-
- sample = 0;
- sample |= cl->samples[idx].sample_lo << 0;
- sample |= cl->samples[idx].sample_hi << 8;
- sample = (sample >> 8) | (sample << 8);
- return sample;
+ return read_u16le((const uint8_t *)&cl->samples[idx]);
}
/*
return outdata;
}
-static void store_sr_sample(uint8_t *samples, int idx, uint16_t data)
-{
- samples[2 * idx + 0] = (data >> 0) & 0xff;
- samples[2 * idx + 1] = (data >> 8) & 0xff;
-}
-
-/*
- * Local wrapper around sr_session_send() calls. Make sure to not send
- * more samples to the session's datafeed than what was requested by a
- * previously configured (optional) sample count.
- */
-static void sigma_session_send(struct sr_dev_inst *sdi,
- struct sr_datafeed_packet *packet)
-{
- struct dev_context *devc;
- struct sr_datafeed_logic *logic;
- uint64_t send_now;
-
- devc = sdi->priv;
- if (devc->limit_samples) {
- logic = (void *)packet->payload;
- send_now = logic->length / logic->unitsize;
- if (devc->sent_samples + send_now > devc->limit_samples) {
- send_now = devc->limit_samples - devc->sent_samples;
- logic->length = send_now * logic->unitsize;
- }
- if (!send_now)
- return;
- devc->sent_samples += send_now;
- }
-
- sr_session_send(sdi, packet);
-}
-
-/*
- * This size translates to: event count (1K events per cluster), times
- * the sample width (unitsize, 16bits per event), times the maximum
- * number of samples per event.
- */
-#define SAMPLES_BUFFER_SIZE (1024 * 2 * 4)
-
-static void sigma_decode_dram_cluster(struct sigma_dram_cluster *dram_cluster,
- unsigned int events_in_cluster,
- unsigned int triggered,
- struct sr_dev_inst *sdi)
+static void sigma_decode_dram_cluster(struct dev_context *devc,
+ struct sigma_dram_cluster *dram_cluster,
+ size_t events_in_cluster, gboolean triggered)
{
- struct dev_context *devc = sdi->priv;
- struct sigma_state *ss = &devc->state;
- struct sr_datafeed_packet packet;
- struct sr_datafeed_logic logic;
+ struct sigma_state *ss;
uint16_t tsdiff, ts, sample, item16;
- uint8_t samples[SAMPLES_BUFFER_SIZE];
- uint8_t *send_ptr;
- size_t send_count, trig_count;
- unsigned int i;
- int j;
+ size_t count;
+ size_t evt;
- ts = sigma_dram_cluster_ts(dram_cluster);
- tsdiff = ts - ss->lastts;
- ss->lastts = ts + EVENTS_PER_CLUSTER;
-
- packet.type = SR_DF_LOGIC;
- packet.payload = &logic;
- logic.unitsize = 2;
- logic.data = samples;
+ if (!devc->use_triggers || !ASIX_SIGMA_WITH_TRIGGER)
+ triggered = FALSE;
/*
* If this cluster is not adjacent to the previously received
* cluster, then send the appropriate number of samples with the
* previous values to the sigrok session. This "decodes RLE".
+ *
+ * These samples cannot match the trigger since they just repeat
+ * the previously submitted data pattern. (This assumption holds
+ * for simple level and edge triggers. It would not for timed or
+ * counted conditions, which currently are not supported.)
*/
- for (ts = 0; ts < tsdiff; ts++) {
- i = ts % 1024;
- store_sr_sample(samples, i, ss->lastsample);
-
- /*
- * If we have 1024 samples ready or we're at the
- * end of submitting the padding samples, submit
- * the packet to Sigrok. Since constant data is
- * sent, duplication of data for rates above 50MHz
- * is simple.
- */
- if ((i == 1023) || (ts == tsdiff - 1)) {
- logic.length = (i + 1) * logic.unitsize;
- for (j = 0; j < devc->samples_per_event; j++)
- sigma_session_send(sdi, &packet);
- }
+ ss = &devc->state;
+ ts = sigma_dram_cluster_ts(dram_cluster);
+ tsdiff = ts - ss->lastts;
+ if (tsdiff > 0) {
+ sample = ss->lastsample;
+ count = tsdiff * devc->samples_per_event;
+ (void)check_and_submit_sample(devc, sample, count, FALSE);
}
+ ss->lastts = ts + EVENTS_PER_CLUSTER;
/*
- * Parse the samples in current cluster and prepare them
- * to be submitted to Sigrok. Cope with memory layouts that
- * vary with the samplerate.
+ * Grab sample data from the current cluster and prepare their
+ * submission to the session feed. Handle samplerate dependent
+ * memory layout of sample data. Accumulation of data chunks
+ * before submission is transparent to this code path, specific
+ * buffer depth is neither assumed nor required here.
*/
- send_ptr = &samples[0];
- send_count = 0;
sample = 0;
- for (i = 0; i < events_in_cluster; i++) {
- item16 = sigma_dram_cluster_data(dram_cluster, i);
- if (devc->cur_samplerate == SR_MHZ(200)) {
+ for (evt = 0; evt < events_in_cluster; evt++) {
+ item16 = sigma_dram_cluster_data(dram_cluster, evt);
+ if (devc->clock.samplerate == SR_MHZ(200)) {
sample = sigma_deinterlace_200mhz_data(item16, 0);
- store_sr_sample(samples, send_count++, sample);
+ check_and_submit_sample(devc, sample, 1, triggered);
sample = sigma_deinterlace_200mhz_data(item16, 1);
- store_sr_sample(samples, send_count++, sample);
+ check_and_submit_sample(devc, sample, 1, triggered);
sample = sigma_deinterlace_200mhz_data(item16, 2);
- store_sr_sample(samples, send_count++, sample);
+ check_and_submit_sample(devc, sample, 1, triggered);
sample = sigma_deinterlace_200mhz_data(item16, 3);
- store_sr_sample(samples, send_count++, sample);
- } else if (devc->cur_samplerate == SR_MHZ(100)) {
+ check_and_submit_sample(devc, sample, 1, triggered);
+ } else if (devc->clock.samplerate == SR_MHZ(100)) {
sample = sigma_deinterlace_100mhz_data(item16, 0);
- store_sr_sample(samples, send_count++, sample);
+ check_and_submit_sample(devc, sample, 1, triggered);
sample = sigma_deinterlace_100mhz_data(item16, 1);
- store_sr_sample(samples, send_count++, sample);
+ check_and_submit_sample(devc, sample, 1, triggered);
} else {
sample = item16;
- store_sr_sample(samples, send_count++, sample);
- }
- }
-
- /*
- * If a trigger position applies, then provide the datafeed with
- * the first part of data up to that position, then send the
- * trigger marker.
- */
- int trigger_offset = 0;
- if (triggered) {
- /*
- * Trigger is not always accurate to sample because of
- * pipeline delay. However, it always triggers before
- * the actual event. We therefore look at the next
- * samples to pinpoint the exact position of the trigger.
- */
- trigger_offset = get_trigger_offset(samples,
- ss->lastsample, &devc->trigger);
-
- if (trigger_offset > 0) {
- trig_count = trigger_offset * devc->samples_per_event;
- packet.type = SR_DF_LOGIC;
- logic.length = trig_count * logic.unitsize;
- sigma_session_send(sdi, &packet);
- send_ptr += trig_count * logic.unitsize;
- send_count -= trig_count;
+ check_and_submit_sample(devc, sample, 1, triggered);
}
-
- /* Only send trigger if explicitly enabled. */
- if (devc->use_triggers)
- std_session_send_df_trigger(sdi);
- }
-
- /*
- * Send the data after the trigger, or all of the received data
- * if no trigger position applies.
- */
- if (send_count) {
- packet.type = SR_DF_LOGIC;
- logic.length = send_count * logic.unitsize;
- logic.data = send_ptr;
- sigma_session_send(sdi, &packet);
}
-
ss->lastsample = sample;
}
* For 50 MHz and below, events contain one sample for each channel,
* spread 20 ns apart.
*/
-static int decode_chunk_ts(struct sigma_dram_line *dram_line,
- uint16_t events_in_line,
- uint32_t trigger_event,
- struct sr_dev_inst *sdi)
+static int decode_chunk_ts(struct dev_context *devc,
+ struct sigma_dram_line *dram_line,
+ size_t events_in_line, size_t trigger_event)
{
struct sigma_dram_cluster *dram_cluster;
- struct dev_context *devc;
- unsigned int clusters_in_line;
- unsigned int events_in_cluster;
- unsigned int i;
- uint32_t trigger_cluster, triggered;
+ size_t clusters_in_line;
+ size_t events_in_cluster;
+ size_t cluster;
+ size_t trigger_cluster;
- devc = sdi->priv;
clusters_in_line = events_in_line;
clusters_in_line += EVENTS_PER_CLUSTER - 1;
clusters_in_line /= EVENTS_PER_CLUSTER;
- trigger_cluster = ~0;
- triggered = 0;
/* Check if trigger is in this chunk. */
- if (trigger_event < (64 * 7)) {
- if (devc->cur_samplerate <= SR_MHZ(50)) {
+ trigger_cluster = ~0UL;
+ if (trigger_event < EVENTS_PER_ROW) {
+ if (devc->clock.samplerate <= SR_MHZ(50)) {
trigger_event -= MIN(EVENTS_PER_CLUSTER - 1,
trigger_event);
}
}
/* For each full DRAM cluster. */
- for (i = 0; i < clusters_in_line; i++) {
- dram_cluster = &dram_line->cluster[i];
+ for (cluster = 0; cluster < clusters_in_line; cluster++) {
+ dram_cluster = &dram_line->cluster[cluster];
/* The last cluster might not be full. */
- if ((i == clusters_in_line - 1) &&
+ if ((cluster == clusters_in_line - 1) &&
(events_in_line % EVENTS_PER_CLUSTER)) {
events_in_cluster = events_in_line % EVENTS_PER_CLUSTER;
} else {
events_in_cluster = EVENTS_PER_CLUSTER;
}
- triggered = (i == trigger_cluster);
- sigma_decode_dram_cluster(dram_cluster, events_in_cluster,
- triggered, sdi);
+ sigma_decode_dram_cluster(devc, dram_cluster,
+ events_in_cluster, cluster == trigger_cluster);
}
return SR_OK;
struct dev_context *devc;
struct sigma_dram_line *dram_line;
- int bufsz;
uint32_t stoppos, triggerpos;
uint8_t modestatus;
- uint32_t i;
- uint32_t dl_lines_total, dl_lines_curr, dl_lines_done;
- uint32_t dl_first_line, dl_line;
- uint32_t dl_events_in_line;
- uint32_t trg_line, trg_event;
+ size_t line_idx;
+ size_t dl_lines_total, dl_lines_curr, dl_lines_done;
+ size_t dl_first_line, dl_line;
+ size_t dl_events_in_line, trigger_event;
+ size_t trg_line, trg_event;
+ int ret;
devc = sdi->priv;
- dl_events_in_line = 64 * 7;
sr_info("Downloading sample data.");
devc->state.state = SIGMA_DOWNLOAD;
* clusters to DRAM regardless of whether pin state changes) and
* raise the POSTTRIGGERED flag.
*/
- sigma_set_register(WRITE_MODE, WMR_FORCESTOP | WMR_SDRAMWRITEEN, devc);
+ modestatus = WMR_FORCESTOP | WMR_SDRAMWRITEEN;
+ ret = sigma_set_register(devc, WRITE_MODE, modestatus);
+ if (ret != SR_OK)
+ return ret;
do {
- if (sigma_read_register(READ_MODE, &modestatus, 1, devc) != 1) {
- sr_err("failed while waiting for RMR_POSTTRIGGERED bit");
+ ret = sigma_get_register(devc, READ_MODE, &modestatus);
+ if (ret != SR_OK) {
+ sr_err("Could not poll for post-trigger state.");
return FALSE;
}
} while (!(modestatus & RMR_POSTTRIGGERED));
/* Set SDRAM Read Enable. */
- sigma_set_register(WRITE_MODE, WMR_SDRAMREADEN, devc);
-
- /* Get the current position. */
- sigma_read_pos(&stoppos, &triggerpos, devc);
+ ret = sigma_set_register(devc, WRITE_MODE, WMR_SDRAMREADEN);
+ if (ret != SR_OK)
+ return ret;
- /* Check if trigger has fired. */
- if (sigma_read_register(READ_MODE, &modestatus, 1, devc) != 1) {
- sr_err("failed to read READ_MODE register");
+ /* Get the current position. Check if trigger has fired. */
+ ret = sigma_read_pos(devc, &stoppos, &triggerpos, &modestatus);
+ if (ret != SR_OK) {
+ sr_err("Could not query capture positions/state.");
return FALSE;
}
- trg_line = ~0;
- trg_event = ~0;
+ if (!devc->use_triggers)
+ triggerpos = ~0;
+ trg_line = ~0UL;
+ trg_event = ~0UL;
if (modestatus & RMR_TRIGGERED) {
- trg_line = triggerpos >> 9;
- trg_event = triggerpos & 0x1ff;
+ trg_line = triggerpos >> ROW_SHIFT;
+ trg_event = triggerpos & ROW_MASK;
}
- devc->sent_samples = 0;
-
/*
* Determine how many "DRAM lines" of 1024 bytes each we need to
* retrieve from the Sigma hardware, so that we have a complete
*
* When RMR_ROUND is set, the circular buffer in DRAM has wrapped
* around. Since the status of the very next line is uncertain in
- * that case, we skip it and start reading from the next line. The
- * circular buffer has 32K lines (0x8000).
+ * that case, we skip it and start reading from the next line.
*/
- dl_lines_total = (stoppos >> 9) + 1;
+ dl_first_line = 0;
+ dl_lines_total = (stoppos >> ROW_SHIFT) + 1;
if (modestatus & RMR_ROUND) {
dl_first_line = dl_lines_total + 1;
- dl_lines_total = 0x8000 - 2;
- } else {
- dl_first_line = 0;
+ dl_lines_total = ROW_COUNT - 2;
}
dram_line = g_try_malloc0(chunks_per_read * sizeof(*dram_line));
if (!dram_line)
return FALSE;
+ ret = alloc_submit_buffer(sdi);
+ if (ret != SR_OK)
+ return FALSE;
+ ret = setup_submit_limit(devc);
+ if (ret != SR_OK)
+ return FALSE;
dl_lines_done = 0;
while (dl_lines_total > dl_lines_done) {
/* We can download only up-to 32 DRAM lines in one go! */
dl_lines_curr = MIN(chunks_per_read, dl_lines_total - dl_lines_done);
dl_line = dl_first_line + dl_lines_done;
- dl_line %= 0x8000;
- bufsz = sigma_read_dram(dl_line, dl_lines_curr,
- (uint8_t *)dram_line, devc);
- /* TODO: Check bufsz. For now, just avoid compiler warnings. */
- (void)bufsz;
+ dl_line %= ROW_COUNT;
+ ret = sigma_read_dram(devc, dl_line, dl_lines_curr,
+ (uint8_t *)dram_line);
+ if (ret != SR_OK)
+ return FALSE;
/* This is the first DRAM line, so find the initial timestamp. */
if (dl_lines_done == 0) {
devc->state.lastsample = 0;
}
- for (i = 0; i < dl_lines_curr; i++) {
- uint32_t trigger_event = ~0;
- /* The last "DRAM line" can be only partially full. */
- if (dl_lines_done + i == dl_lines_total - 1)
- dl_events_in_line = stoppos & 0x1ff;
+ for (line_idx = 0; line_idx < dl_lines_curr; line_idx++) {
+ /* The last "DRAM line" need not span its full length. */
+ dl_events_in_line = EVENTS_PER_ROW;
+ if (dl_lines_done + line_idx == dl_lines_total - 1)
+ dl_events_in_line = stoppos & ROW_MASK;
/* Test if the trigger happened on this line. */
- if (dl_lines_done + i == trg_line)
+ trigger_event = ~0UL;
+ if (dl_lines_done + line_idx == trg_line)
trigger_event = trg_event;
- decode_chunk_ts(dram_line + i, dl_events_in_line,
- trigger_event, sdi);
+ decode_chunk_ts(devc, dram_line + line_idx,
+ dl_events_in_line, trigger_event);
}
dl_lines_done += dl_lines_curr;
}
+ flush_submit_buffer(devc);
+ free_submit_buffer(devc);
g_free(dram_line);
std_session_send_df_end(sdi);
static int sigma_capture_mode(struct sr_dev_inst *sdi)
{
struct dev_context *devc;
- uint64_t running_msec;
- uint64_t current_time;
devc = sdi->priv;
-
- /*
- * Check if the selected sampling duration passed. Sample count
- * limits are covered by this enforced timeout as well.
- */
- current_time = g_get_monotonic_time();
- running_msec = (current_time - devc->start_time) / 1000;
- if (running_msec >= devc->limit_msec)
+ if (sr_sw_limits_check(&devc->limit.acquire))
return download_capture(sdi);
return TRUE;
}
/* Build a LUT entry used by the trigger functions. */
-static void build_lut_entry(uint16_t value, uint16_t mask, uint16_t *entry)
+static void build_lut_entry(uint16_t *lut_entry,
+ uint16_t spec_value, uint16_t spec_mask)
{
- int i, j, k, bit;
-
- /* For each quad channel. */
- for (i = 0; i < 4; i++) {
- entry[i] = 0xffff;
+ size_t quad, bitidx, ch;
+ uint16_t quadmask, bitmask;
+ gboolean spec_value_low, bit_idx_low;
- /* For each bit in LUT. */
- for (j = 0; j < 16; j++)
-
- /* For each channel in quad. */
- for (k = 0; k < 4; k++) {
- bit = 1 << (i * 4 + k);
-
- /* Set bit in entry */
- if ((mask & bit) && ((!(value & bit)) !=
- (!(j & (1 << k)))))
- entry[i] &= ~(1 << j);
+ /*
+ * For each quad-channel-group, for each bit in the LUT (each
+ * bit pattern of the channel signals, aka LUT address), for
+ * each channel in the quad, setup the bit in the LUT entry.
+ *
+ * Start from all-ones in the LUT (true, always matches), then
+ * "pessimize the truthness" for specified conditions.
+ */
+ for (quad = 0; quad < 4; quad++) {
+ lut_entry[quad] = ~0;
+ for (bitidx = 0; bitidx < 16; bitidx++) {
+ for (ch = 0; ch < 4; ch++) {
+ quadmask = BIT(ch);
+ bitmask = quadmask << (quad * 4);
+ if (!(spec_mask & bitmask))
+ continue;
+ /*
+ * This bit is part of the spec. The
+ * condition which gets checked here
+ * (got checked in all implementations
+ * so far) is uncertain. A bit position
+ * in the current index' number(!) is
+ * checked?
+ */
+ spec_value_low = !(spec_value & bitmask);
+ bit_idx_low = !(bitidx & quadmask);
+ if (spec_value_low == bit_idx_low)
+ continue;
+ lut_entry[quad] &= ~BIT(bitidx);
}
+ }
}
}
/* Add a logical function to LUT mask. */
static void add_trigger_function(enum triggerop oper, enum triggerfunc func,
- int index, int neg, uint16_t *mask)
+ size_t index, gboolean neg, uint16_t *mask)
{
- int i, j;
+ size_t i, j;
int x[2][2], tmp, a, b, aset, bset, rset;
- memset(x, 0, 4 * sizeof(int));
+ memset(x, 0, sizeof(x));
/* Trigger detect condition. */
switch (oper) {
if (func == FUNC_NAND || func == FUNC_NOR || func == FUNC_NXOR)
rset = !rset;
- *mask &= ~(1 << i);
+ *mask &= ~BIT(i);
if (rset)
- *mask |= 1 << i;
+ *mask |= BIT(i);
}
}
* simple pin change and state triggers. Only two transitions (rise/fall) can be
* set at any time, but a full mask and value can be set (0/1).
*/
-SR_PRIV int sigma_build_basic_trigger(struct triggerlut *lut, struct dev_context *devc)
+SR_PRIV int sigma_build_basic_trigger(struct dev_context *devc,
+ struct triggerlut *lut)
{
- int i,j;
- uint16_t masks[2] = { 0, 0 };
+ uint16_t masks[2];
+ size_t bitidx, condidx;
+ uint16_t value, mask;
- memset(lut, 0, sizeof(struct triggerlut));
+ /* Setup something that "won't match" in the absence of a spec. */
+ memset(lut, 0, sizeof(*lut));
+ if (!devc->use_triggers)
+ return SR_OK;
- /* Constant for simple triggers. */
+ /* Start assuming simple triggers. Edges are handled below. */
lut->m4 = 0xa000;
-
- /* Value/mask trigger support. */
- build_lut_entry(devc->trigger.simplevalue, devc->trigger.simplemask,
- lut->m2d);
-
- /* Rise/fall trigger support. */
- for (i = 0, j = 0; i < 16; i++) {
- if (devc->trigger.risingmask & (1 << i) ||
- devc->trigger.fallingmask & (1 << i))
- masks[j++] = 1 << i;
+ lut->m3q = 0xffff;
+
+ /* Process value/mask triggers. */
+ value = devc->trigger.simplevalue;
+ mask = devc->trigger.simplemask;
+ build_lut_entry(lut->m2d, value, mask);
+
+ /* Scan for and process rise/fall triggers. */
+ memset(&masks, 0, sizeof(masks));
+ condidx = 0;
+ for (bitidx = 0; bitidx < 16; bitidx++) {
+ mask = BIT(bitidx);
+ value = devc->trigger.risingmask | devc->trigger.fallingmask;
+ if (!(value & mask))
+ continue;
+ if (condidx == 0)
+ build_lut_entry(lut->m0d, mask, mask);
+ if (condidx == 1)
+ build_lut_entry(lut->m1d, mask, mask);
+ masks[condidx++] = mask;
+ if (condidx == ARRAY_SIZE(masks))
+ break;
}
- build_lut_entry(masks[0], masks[0], lut->m0d);
- build_lut_entry(masks[1], masks[1], lut->m1d);
-
- /* Add glue logic */
+ /* Add glue logic for rise/fall triggers. */
if (masks[0] || masks[1]) {
- /* Transition trigger. */
+ lut->m3q = 0;
if (masks[0] & devc->trigger.risingmask)
- add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3);
+ add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3q);
if (masks[0] & devc->trigger.fallingmask)
- add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3);
+ add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3q);
if (masks[1] & devc->trigger.risingmask)
- add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3);
+ add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3q);
if (masks[1] & devc->trigger.fallingmask)
- add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3);
- } else {
- /* Only value/mask trigger. */
- lut->m3 = 0xffff;
+ add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3q);
}
/* Triggertype: event. */
- lut->params.selres = 3;
+ lut->params.selres = TRGSEL_SELCODE_NEVER;
+ lut->params.selinc = TRGSEL_SELCODE_LEVEL;
+ lut->params.sela = 0; /* Counter >= CMPA && LEVEL */
+ lut->params.cmpa = 0; /* Count 0 -> 1 already triggers. */
return SR_OK;
}