]> sigrok.org Git - libsigrok.git/blame_incremental - src/hardware/asix-sigma/protocol.c
asix-sigma: improve error propagation, increase robustness
[libsigrok.git] / src / hardware / asix-sigma / protocol.c
... / ...
CommitLineData
1/*
2 * This file is part of the libsigrok project.
3 *
4 * Copyright (C) 2010-2012 Håvard Espeland <gus@ping.uio.no>,
5 * Copyright (C) 2010 Martin Stensgård <mastensg@ping.uio.no>
6 * Copyright (C) 2010 Carl Henrik Lunde <chlunde@ping.uio.no>
7 * Copyright (C) 2020 Gerhard Sittig <gerhard.sittig@gmx.net>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation, either version 3 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23/*
24 * ASIX SIGMA/SIGMA2 logic analyzer driver
25 */
26
27#include <config.h>
28#include "protocol.h"
29
30/*
31 * The ASIX SIGMA hardware supports fixed 200MHz and 100MHz sample rates
32 * (by means of separate firmware images). As well as 50MHz divided by
33 * an integer divider in the 1..256 range (by the "typical" firmware).
34 * Which translates to a strict lower boundary of around 195kHz.
35 *
36 * This driver "suggests" a subset of the available rates by listing a
37 * few discrete values, while setter routines accept any user specified
38 * rate that is supported by the hardware.
39 */
40SR_PRIV const uint64_t samplerates[] = {
41 /* 50MHz and integer divider. 1/2/5 steps (where possible). */
42 SR_KHZ(200), SR_KHZ(500),
43 SR_MHZ(1), SR_MHZ(2), SR_MHZ(5),
44 SR_MHZ(10), SR_MHZ(25), SR_MHZ(50),
45 /* 100MHz/200MHz, fixed rates in special firmware. */
46 SR_MHZ(100), SR_MHZ(200),
47};
48
49SR_PRIV const size_t samplerates_count = ARRAY_SIZE(samplerates);
50
51static const char *firmware_files[] = {
52 [SIGMA_FW_50MHZ] = "asix-sigma-50.fw", /* 50MHz, 8bit divider. */
53 [SIGMA_FW_100MHZ] = "asix-sigma-100.fw", /* 100MHz, fixed. */
54 [SIGMA_FW_200MHZ] = "asix-sigma-200.fw", /* 200MHz, fixed. */
55 [SIGMA_FW_SYNC] = "asix-sigma-50sync.fw", /* Sync from external pin. */
56 [SIGMA_FW_FREQ] = "asix-sigma-phasor.fw", /* Frequency counter. */
57};
58
59#define SIGMA_FIRMWARE_SIZE_LIMIT (256 * 1024)
60
61/*
62 * BEWARE! Error propagation is important, as are kinds of return values.
63 *
64 * - Raw USB tranport communicates the number of sent or received bytes,
65 * or negative error codes in the external library's(!) range of codes.
66 * - Internal routines at the "sigrok driver level" communicate success
67 * or failure in terms of SR_OK et al error codes.
68 * - Main loop style receive callbacks communicate booleans which arrange
69 * for repeated calls to drive progress during acquisition.
70 *
71 * Careful consideration by maintainers is essential, because all of the
72 * above kinds of values are assignment compatbile from the compiler's
73 * point of view. Implementation errors will go unnoticed at build time.
74 */
75
76static int sigma_read_raw(struct dev_context *devc, void *buf, size_t size)
77{
78 int ret;
79
80 ret = ftdi_read_data(&devc->ftdic, (unsigned char *)buf, size);
81 if (ret < 0) {
82 sr_err("USB data read failed: %s",
83 ftdi_get_error_string(&devc->ftdic));
84 }
85
86 return ret;
87}
88
89static int sigma_write_raw(struct dev_context *devc, const void *buf, size_t size)
90{
91 int ret;
92
93 ret = ftdi_write_data(&devc->ftdic, buf, size);
94 if (ret < 0) {
95 sr_err("USB data write failed: %s",
96 ftdi_get_error_string(&devc->ftdic));
97 } else if ((size_t)ret != size) {
98 sr_err("USB data write length mismatch.");
99 }
100
101 return ret;
102}
103
104static int sigma_read_sr(struct dev_context *devc, void *buf, size_t size)
105{
106 int ret;
107
108 ret = sigma_read_raw(devc, buf, size);
109 if (ret < 0 || (size_t)ret != size)
110 return SR_ERR_IO;
111
112 return SR_OK;
113}
114
115static int sigma_write_sr(struct dev_context *devc, const void *buf, size_t size)
116{
117 int ret;
118
119 ret = sigma_write_raw(devc, buf, size);
120 if (ret < 0 || (size_t)ret != size)
121 return SR_ERR_IO;
122
123 return SR_OK;
124}
125
126/*
127 * Implementor's note: The local write buffer's size shall suffice for
128 * any know FPGA register transaction that is involved in the supported
129 * feature set of this sigrok device driver. If the length check trips,
130 * that's a programmer's error and needs adjustment in the complete call
131 * stack of the respective code path.
132 */
133SR_PRIV int sigma_write_register(struct dev_context *devc,
134 uint8_t reg, uint8_t *data, size_t len)
135{
136 uint8_t buf[80], *wrptr;
137 size_t idx;
138
139 if (2 + 2 * len > sizeof(buf)) {
140 sr_err("Short write buffer for %zu bytes to reg %u.", len, reg);
141 return SR_ERR_BUG;
142 }
143
144 wrptr = buf;
145 write_u8_inc(&wrptr, REG_ADDR_LOW | (reg & 0xf));
146 write_u8_inc(&wrptr, REG_ADDR_HIGH | (reg >> 4));
147 for (idx = 0; idx < len; idx++) {
148 write_u8_inc(&wrptr, REG_DATA_LOW | (data[idx] & 0xf));
149 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (data[idx] >> 4));
150 }
151
152 return sigma_write_sr(devc, buf, wrptr - buf);
153}
154
155SR_PRIV int sigma_set_register(struct dev_context *devc,
156 uint8_t reg, uint8_t value)
157{
158 return sigma_write_register(devc, reg, &value, sizeof(value));
159}
160
161static int sigma_read_register(struct dev_context *devc,
162 uint8_t reg, uint8_t *data, size_t len)
163{
164 uint8_t buf[3], *wrptr;
165 int ret;
166
167 wrptr = buf;
168 write_u8_inc(&wrptr, REG_ADDR_LOW | (reg & 0xf));
169 write_u8_inc(&wrptr, REG_ADDR_HIGH | (reg >> 4));
170 write_u8_inc(&wrptr, REG_READ_ADDR);
171 ret = sigma_write_sr(devc, buf, wrptr - buf);
172 if (ret != SR_OK)
173 return ret;
174
175 return sigma_read_sr(devc, data, len);
176}
177
178static int sigma_read_pos(struct dev_context *devc,
179 uint32_t *stoppos, uint32_t *triggerpos, uint8_t *mode)
180{
181 /*
182 * Read 7 registers starting at trigger position LSB.
183 * Which yields two 24bit counter values, and mode flags.
184 */
185 const uint8_t buf[] = {
186 /* Setup first register address. */
187 REG_ADDR_LOW | READ_TRIGGER_POS_LOW,
188 /* Retrieve trigger position. */
189 REG_READ_ADDR | REG_ADDR_INC,
190 REG_READ_ADDR | REG_ADDR_INC,
191 REG_READ_ADDR | REG_ADDR_INC,
192 /* Retrieve stop position. */
193 REG_READ_ADDR | REG_ADDR_INC,
194 REG_READ_ADDR | REG_ADDR_INC,
195 REG_READ_ADDR | REG_ADDR_INC,
196 /* Retrieve mode register. */
197 REG_READ_ADDR | REG_ADDR_INC,
198 }, *rdptr;
199 uint8_t result[7];
200 uint32_t v32;
201 uint8_t v8;
202 int ret;
203
204 ret = sigma_write_sr(devc, buf, sizeof(buf));
205 if (ret != SR_OK)
206 return ret;
207
208 ret = sigma_read_sr(devc, result, sizeof(result));
209 if (ret != SR_OK)
210 return ret;
211
212 rdptr = &result[0];
213 v32 = read_u24le_inc(&rdptr);
214 if (triggerpos)
215 *triggerpos = v32;
216 v32 = read_u24le_inc(&rdptr);
217 if (stoppos)
218 *stoppos = v32;
219 v8 = read_u8_inc(&rdptr);
220 if (mode)
221 *mode = v8;
222
223 /*
224 * These positions consist of "the memory row" in the MSB fields,
225 * and "an event index" within the row in the LSB fields. Part
226 * of the memory row's content is sample data, another part is
227 * timestamps.
228 *
229 * The retrieved register values point to after the captured
230 * position. So they need to get decremented, and adjusted to
231 * cater for the timestamps when the decrement carries over to
232 * a different memory row.
233 */
234 if (stoppos && (--*stoppos & ROW_MASK) == ROW_MASK)
235 *stoppos -= CLUSTERS_PER_ROW;
236 if (triggerpos && (--*triggerpos & ROW_MASK) == ROW_MASK)
237 *triggerpos -= CLUSTERS_PER_ROW;
238
239 return SR_OK;
240}
241
242static int sigma_read_dram(struct dev_context *devc,
243 uint16_t startchunk, size_t numchunks, uint8_t *data)
244{
245 uint8_t buf[128], *wrptr;
246 size_t chunk;
247 int sel, ret;
248 gboolean is_last;
249
250 if (2 + 3 * numchunks > ARRAY_SIZE(buf)) {
251 sr_err("Short write buffer for %zu DRAM row reads.", numchunks);
252 return SR_ERR_BUG;
253 }
254
255 /* Communicate DRAM start address (memory row, aka samples line). */
256 wrptr = buf;
257 write_u8_inc(&wrptr, startchunk >> 8);
258 write_u8_inc(&wrptr, startchunk & 0xff);
259 ret = sigma_write_register(devc, WRITE_MEMROW, buf, wrptr - buf);
260 if (ret != SR_OK)
261 return ret;
262
263 /*
264 * Access DRAM content. Fetch from DRAM to FPGA's internal RAM,
265 * then transfer via USB. Interleave the FPGA's DRAM access and
266 * USB transfer, use alternating buffers (0/1) in the process.
267 */
268 wrptr = buf;
269 write_u8_inc(&wrptr, REG_DRAM_BLOCK);
270 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
271 for (chunk = 0; chunk < numchunks; chunk++) {
272 sel = chunk % 2;
273 is_last = chunk == numchunks - 1;
274 if (!is_last)
275 write_u8_inc(&wrptr, REG_DRAM_BLOCK | REG_DRAM_SEL_BOOL(!sel));
276 write_u8_inc(&wrptr, REG_DRAM_BLOCK_DATA | REG_DRAM_SEL_BOOL(sel));
277 if (!is_last)
278 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
279 }
280 ret = sigma_write_sr(devc, buf, wrptr - buf);
281 if (ret != SR_OK)
282 return ret;
283
284 return sigma_read_sr(devc, data, numchunks * ROW_LENGTH_BYTES);
285}
286
287/* Upload trigger look-up tables to Sigma. */
288SR_PRIV int sigma_write_trigger_lut(struct dev_context *devc,
289 struct triggerlut *lut)
290{
291 int i;
292 uint8_t tmp[2];
293 uint16_t bit;
294 uint8_t buf[6], *wrptr, regval;
295 int ret;
296
297 /* Transpose the table and send to Sigma. */
298 for (i = 0; i < 16; i++) {
299 bit = 1 << i;
300
301 tmp[0] = tmp[1] = 0;
302
303 if (lut->m2d[0] & bit)
304 tmp[0] |= 0x01;
305 if (lut->m2d[1] & bit)
306 tmp[0] |= 0x02;
307 if (lut->m2d[2] & bit)
308 tmp[0] |= 0x04;
309 if (lut->m2d[3] & bit)
310 tmp[0] |= 0x08;
311
312 if (lut->m3 & bit)
313 tmp[0] |= 0x10;
314 if (lut->m3s & bit)
315 tmp[0] |= 0x20;
316 if (lut->m4 & bit)
317 tmp[0] |= 0x40;
318
319 if (lut->m0d[0] & bit)
320 tmp[1] |= 0x01;
321 if (lut->m0d[1] & bit)
322 tmp[1] |= 0x02;
323 if (lut->m0d[2] & bit)
324 tmp[1] |= 0x04;
325 if (lut->m0d[3] & bit)
326 tmp[1] |= 0x08;
327
328 if (lut->m1d[0] & bit)
329 tmp[1] |= 0x10;
330 if (lut->m1d[1] & bit)
331 tmp[1] |= 0x20;
332 if (lut->m1d[2] & bit)
333 tmp[1] |= 0x40;
334 if (lut->m1d[3] & bit)
335 tmp[1] |= 0x80;
336
337 /*
338 * This logic seems redundant, but separates the value
339 * determination from the wire format, and is useful
340 * during future maintenance and research.
341 */
342 wrptr = buf;
343 write_u8_inc(&wrptr, tmp[0]);
344 write_u8_inc(&wrptr, tmp[1]);
345 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, buf, wrptr - buf);
346 if (ret != SR_OK)
347 return ret;
348 ret = sigma_set_register(devc, WRITE_TRIGGER_SELECT2, 0x30 | i);
349 if (ret != SR_OK)
350 return ret;
351 }
352
353 /* Send the parameters */
354 wrptr = buf;
355 regval = 0;
356 regval |= lut->params.selc << 6;
357 regval |= lut->params.selpresc << 0;
358 write_u8_inc(&wrptr, regval);
359 regval = 0;
360 regval |= lut->params.selinc << 6;
361 regval |= lut->params.selres << 4;
362 regval |= lut->params.sela << 2;
363 regval |= lut->params.selb << 0;
364 write_u8_inc(&wrptr, regval);
365 write_u16le_inc(&wrptr, lut->params.cmpb);
366 write_u16le_inc(&wrptr, lut->params.cmpa);
367 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, buf, wrptr - buf);
368 if (ret != SR_OK)
369 return ret;
370
371 return SR_OK;
372}
373
374/*
375 * See Xilinx UG332 for Spartan-3 FPGA configuration. The SIGMA device
376 * uses FTDI bitbang mode for netlist download in slave serial mode.
377 * (LATER: The OMEGA device's cable contains a more capable FTDI chip
378 * and uses MPSSE mode for bitbang. -- Can we also use FT232H in FT245
379 * compatible bitbang mode? For maximum code re-use and reduced libftdi
380 * dependency? See section 3.5.5 of FT232H: D0 clk, D1 data (out), D2
381 * data (in), D3 select, D4-7 GPIOL. See section 3.5.7 for MCU FIFO.)
382 *
383 * 750kbps rate (four times the speed of sigmalogan) works well for
384 * netlist download. All pins except INIT_B are output pins during
385 * configuration download.
386 *
387 * Some pins are inverted as a byproduct of level shifting circuitry.
388 * That's why high CCLK level (from the cable's point of view) is idle
389 * from the FPGA's perspective.
390 *
391 * The vendor's literature discusses a "suicide sequence" which ends
392 * regular FPGA execution and should be sent before entering bitbang
393 * mode and sending configuration data. Set D7 and toggle D2, D3, D4
394 * a few times.
395 */
396#define BB_PIN_CCLK (1 << 0) /* D0, CCLK */
397#define BB_PIN_PROG (1 << 1) /* D1, PROG */
398#define BB_PIN_D2 (1 << 2) /* D2, (part of) SUICIDE */
399#define BB_PIN_D3 (1 << 3) /* D3, (part of) SUICIDE */
400#define BB_PIN_D4 (1 << 4) /* D4, (part of) SUICIDE (unused?) */
401#define BB_PIN_INIT (1 << 5) /* D5, INIT, input pin */
402#define BB_PIN_DIN (1 << 6) /* D6, DIN */
403#define BB_PIN_D7 (1 << 7) /* D7, (part of) SUICIDE */
404
405#define BB_BITRATE (750 * 1000)
406#define BB_PINMASK (0xff & ~BB_PIN_INIT)
407
408/*
409 * Initiate slave serial mode for configuration download. Which is done
410 * by pulsing PROG_B and sensing INIT_B. Make sure CCLK is idle before
411 * initiating the configuration download.
412 *
413 * Run a "suicide sequence" first to terminate the regular FPGA operation
414 * before reconfiguration. The FTDI cable is single channel, and shares
415 * pins which are used for data communication in FIFO mode with pins that
416 * are used for FPGA configuration in bitbang mode. Hardware defaults for
417 * unconfigured hardware, and runtime conditions after FPGA configuration
418 * need to cooperate such that re-configuration of the FPGA can start.
419 */
420static int sigma_fpga_init_bitbang_once(struct dev_context *devc)
421{
422 const uint8_t suicide[] = {
423 BB_PIN_D7 | BB_PIN_D2,
424 BB_PIN_D7 | BB_PIN_D2,
425 BB_PIN_D7 | BB_PIN_D3,
426 BB_PIN_D7 | BB_PIN_D2,
427 BB_PIN_D7 | BB_PIN_D3,
428 BB_PIN_D7 | BB_PIN_D2,
429 BB_PIN_D7 | BB_PIN_D3,
430 BB_PIN_D7 | BB_PIN_D2,
431 };
432 const uint8_t init_array[] = {
433 BB_PIN_CCLK,
434 BB_PIN_CCLK | BB_PIN_PROG,
435 BB_PIN_CCLK | BB_PIN_PROG,
436 BB_PIN_CCLK,
437 BB_PIN_CCLK,
438 BB_PIN_CCLK,
439 BB_PIN_CCLK,
440 BB_PIN_CCLK,
441 BB_PIN_CCLK,
442 BB_PIN_CCLK,
443 };
444 int retries, ret;
445 uint8_t data;
446
447 /* Section 2. part 1), do the FPGA suicide. */
448 ret = SR_OK;
449 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
450 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
451 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
452 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
453 if (ret != SR_OK)
454 return SR_ERR_IO;
455 g_usleep(10 * 1000);
456
457 /* Section 2. part 2), pulse PROG. */
458 ret = sigma_write_sr(devc, init_array, sizeof(init_array));
459 if (ret != SR_OK)
460 return ret;
461 g_usleep(10 * 1000);
462 ftdi_usb_purge_buffers(&devc->ftdic);
463
464 /*
465 * Wait until the FPGA asserts INIT_B. Check in a maximum number
466 * of bursts with a given delay between them. Read as many pin
467 * capture results as the combination of FTDI chip and FTID lib
468 * may provide. Cope with absence of pin capture data in a cycle.
469 * This approach shall result in fast reponse in case of success,
470 * low cost of execution during wait, reliable error handling in
471 * the transport layer, and robust response to failure or absence
472 * of result data (hardware inactivity after stimulus).
473 */
474 retries = 10;
475 while (retries--) {
476 do {
477 ret = sigma_read_raw(devc, &data, sizeof(data));
478 if (ret < 0)
479 return SR_ERR_IO;
480 if (ret == sizeof(data) && (data & BB_PIN_INIT))
481 return SR_OK;
482 } while (ret == sizeof(data));
483 if (retries)
484 g_usleep(10 * 1000);
485 }
486
487 return SR_ERR_TIMEOUT;
488}
489
490/*
491 * This is belt and braces. Re-run the bitbang initiation sequence a few
492 * times should first attempts fail. Failure is rare but can happen (was
493 * observed during driver development).
494 */
495static int sigma_fpga_init_bitbang(struct dev_context *devc)
496{
497 size_t retries;
498 int ret;
499
500 retries = 10;
501 while (retries--) {
502 ret = sigma_fpga_init_bitbang_once(devc);
503 if (ret == SR_OK)
504 return ret;
505 if (ret != SR_ERR_TIMEOUT)
506 return ret;
507 }
508 return ret;
509}
510
511/*
512 * Configure the FPGA for logic-analyzer mode.
513 */
514static int sigma_fpga_init_la(struct dev_context *devc)
515{
516 uint8_t buf[16], *wrptr;
517 uint8_t data_55, data_aa, mode;
518 uint8_t result[3];
519 const uint8_t *rdptr;
520 int ret;
521
522 wrptr = buf;
523
524 /* Read ID register. */
525 write_u8_inc(&wrptr, REG_ADDR_LOW | (READ_ID & 0xf));
526 write_u8_inc(&wrptr, REG_ADDR_HIGH | (READ_ID >> 4));
527 write_u8_inc(&wrptr, REG_READ_ADDR);
528
529 /* Write 0x55 to scratch register, read back. */
530 data_55 = 0x55;
531 write_u8_inc(&wrptr, REG_ADDR_LOW | (WRITE_TEST & 0xf));
532 write_u8_inc(&wrptr, REG_DATA_LOW | (data_55 & 0xf));
533 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (data_55 >> 4));
534 write_u8_inc(&wrptr, REG_READ_ADDR);
535
536 /* Write 0xaa to scratch register, read back. */
537 data_aa = 0xaa;
538 write_u8_inc(&wrptr, REG_ADDR_LOW | (WRITE_TEST & 0xf));
539 write_u8_inc(&wrptr, REG_DATA_LOW | (data_aa & 0xf));
540 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (data_aa >> 4));
541 write_u8_inc(&wrptr, REG_READ_ADDR);
542
543 /* Initiate SDRAM initialization in mode register. */
544 mode = WMR_SDRAMINIT;
545 write_u8_inc(&wrptr, REG_ADDR_LOW | (WRITE_MODE & 0xf));
546 write_u8_inc(&wrptr, REG_DATA_LOW | (mode & 0xf));
547 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (mode >> 4));
548
549 /*
550 * Send the command sequence which contains 3 READ requests.
551 * Expect to see the corresponding 3 response bytes.
552 */
553 ret = sigma_write_sr(devc, buf, wrptr - buf);
554 if (ret != SR_OK) {
555 sr_err("Could not request LA start response.");
556 return ret;
557 }
558 ret = sigma_read_sr(devc, result, ARRAY_SIZE(result));
559 if (ret != SR_OK) {
560 sr_err("Could not receive LA start response.");
561 return SR_ERR_IO;
562 }
563 rdptr = result;
564 if (read_u8_inc(&rdptr) != 0xa6) {
565 sr_err("Unexpected ID response.");
566 return SR_ERR_DATA;
567 }
568 if (read_u8_inc(&rdptr) != data_55) {
569 sr_err("Unexpected scratch read-back (55).");
570 return SR_ERR_DATA;
571 }
572 if (read_u8_inc(&rdptr) != data_aa) {
573 sr_err("Unexpected scratch read-back (aa).");
574 return SR_ERR_DATA;
575 }
576
577 return SR_OK;
578}
579
580/*
581 * Read the firmware from a file and transform it into a series of bitbang
582 * pulses used to program the FPGA. Note that the *bb_cmd must be free()'d
583 * by the caller of this function.
584 */
585static int sigma_fw_2_bitbang(struct sr_context *ctx, const char *name,
586 uint8_t **bb_cmd, gsize *bb_cmd_size)
587{
588 uint8_t *firmware;
589 size_t file_size;
590 uint8_t *p;
591 size_t l;
592 uint32_t imm;
593 size_t bb_size;
594 uint8_t *bb_stream, *bbs, byte, mask, v;
595
596 /* Retrieve the on-disk firmware file content. */
597 firmware = sr_resource_load(ctx, SR_RESOURCE_FIRMWARE, name,
598 &file_size, SIGMA_FIRMWARE_SIZE_LIMIT);
599 if (!firmware)
600 return SR_ERR_IO;
601
602 /* Unscramble the file content (XOR with "random" sequence). */
603 p = firmware;
604 l = file_size;
605 imm = 0x3f6df2ab;
606 while (l--) {
607 imm = (imm + 0xa853753) % 177 + (imm * 0x8034052);
608 *p++ ^= imm & 0xff;
609 }
610
611 /*
612 * Generate a sequence of bitbang samples. With two samples per
613 * FPGA configuration bit, providing the level for the DIN signal
614 * as well as two edges for CCLK. See Xilinx UG332 for details
615 * ("slave serial" mode).
616 *
617 * Note that CCLK is inverted in hardware. That's why the
618 * respective bit is first set and then cleared in the bitbang
619 * sample sets. So that the DIN level will be stable when the
620 * data gets sampled at the rising CCLK edge, and the signals'
621 * setup time constraint will be met.
622 *
623 * The caller will put the FPGA into download mode, will send
624 * the bitbang samples, and release the allocated memory.
625 */
626 bb_size = file_size * 8 * 2;
627 bb_stream = g_try_malloc(bb_size);
628 if (!bb_stream) {
629 sr_err("Memory allocation failed during firmware upload.");
630 g_free(firmware);
631 return SR_ERR_MALLOC;
632 }
633 bbs = bb_stream;
634 p = firmware;
635 l = file_size;
636 while (l--) {
637 byte = *p++;
638 mask = 0x80;
639 while (mask) {
640 v = (byte & mask) ? BB_PIN_DIN : 0;
641 mask >>= 1;
642 *bbs++ = v | BB_PIN_CCLK;
643 *bbs++ = v;
644 }
645 }
646 g_free(firmware);
647
648 /* The transformation completed successfully, return the result. */
649 *bb_cmd = bb_stream;
650 *bb_cmd_size = bb_size;
651
652 return SR_OK;
653}
654
655static int upload_firmware(struct sr_context *ctx, struct dev_context *devc,
656 enum sigma_firmware_idx firmware_idx)
657{
658 int ret;
659 uint8_t *buf;
660 uint8_t pins;
661 size_t buf_size;
662 const char *firmware;
663
664 /* Check for valid firmware file selection. */
665 if (firmware_idx >= ARRAY_SIZE(firmware_files))
666 return SR_ERR_ARG;
667 firmware = firmware_files[firmware_idx];
668 if (!firmware || !*firmware)
669 return SR_ERR_ARG;
670
671 /* Avoid downloading the same firmware multiple times. */
672 if (devc->firmware_idx == firmware_idx) {
673 sr_info("Not uploading firmware file '%s' again.", firmware);
674 return SR_OK;
675 }
676
677 devc->state.state = SIGMA_CONFIG;
678
679 /* Set the cable to bitbang mode. */
680 ret = ftdi_set_bitmode(&devc->ftdic, BB_PINMASK, BITMODE_BITBANG);
681 if (ret < 0) {
682 sr_err("Could not setup cable mode for upload: %s",
683 ftdi_get_error_string(&devc->ftdic));
684 return SR_ERR;
685 }
686 ret = ftdi_set_baudrate(&devc->ftdic, BB_BITRATE);
687 if (ret < 0) {
688 sr_err("Could not setup bitrate for upload: %s",
689 ftdi_get_error_string(&devc->ftdic));
690 return SR_ERR;
691 }
692
693 /* Initiate FPGA configuration mode. */
694 ret = sigma_fpga_init_bitbang(devc);
695 if (ret) {
696 sr_err("Could not initiate firmware upload to hardware");
697 return ret;
698 }
699
700 /* Prepare wire format of the firmware image. */
701 ret = sigma_fw_2_bitbang(ctx, firmware, &buf, &buf_size);
702 if (ret != SR_OK) {
703 sr_err("Could not prepare file %s for upload.", firmware);
704 return ret;
705 }
706
707 /* Write the FPGA netlist to the cable. */
708 sr_info("Uploading firmware file '%s'.", firmware);
709 ret = sigma_write_sr(devc, buf, buf_size);
710 g_free(buf);
711 if (ret != SR_OK) {
712 sr_err("Could not upload firmware file '%s'.", firmware);
713 return ret;
714 }
715
716 /* Leave bitbang mode and discard pending input data. */
717 ret = ftdi_set_bitmode(&devc->ftdic, 0, BITMODE_RESET);
718 if (ret < 0) {
719 sr_err("Could not setup cable mode after upload: %s",
720 ftdi_get_error_string(&devc->ftdic));
721 return SR_ERR;
722 }
723 ftdi_usb_purge_buffers(&devc->ftdic);
724 while (sigma_read_raw(devc, &pins, sizeof(pins)) > 0)
725 ;
726
727 /* Initialize the FPGA for logic-analyzer mode. */
728 ret = sigma_fpga_init_la(devc);
729 if (ret != SR_OK) {
730 sr_err("Hardware response after firmware upload failed.");
731 return ret;
732 }
733
734 /* Keep track of successful firmware download completion. */
735 devc->state.state = SIGMA_IDLE;
736 devc->firmware_idx = firmware_idx;
737 sr_info("Firmware uploaded.");
738
739 return SR_OK;
740}
741
742/*
743 * The driver supports user specified time or sample count limits. The
744 * device's hardware supports neither, and hardware compression prevents
745 * reliable detection of "fill levels" (currently reached sample counts)
746 * from register values during acquisition. That's why the driver needs
747 * to apply some heuristics:
748 *
749 * - The (optional) sample count limit and the (normalized) samplerate
750 * get mapped to an estimated duration for these samples' acquisition.
751 * - The (optional) time limit gets checked as well. The lesser of the
752 * two limits will terminate the data acquisition phase. The exact
753 * sample count limit gets enforced in session feed submission paths.
754 * - Some slack needs to be given to account for hardware pipelines as
755 * well as late storage of last chunks after compression thresholds
756 * are tripped. The resulting data set will span at least the caller
757 * specified period of time, which shall be perfectly acceptable.
758 *
759 * With RLE compression active, up to 64K sample periods can pass before
760 * a cluster accumulates. Which translates to 327ms at 200kHz. Add two
761 * times that period for good measure, one is not enough to flush the
762 * hardware pipeline (observation from an earlier experiment).
763 */
764SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc)
765{
766 int ret;
767 GVariant *data;
768 uint64_t user_count, user_msecs;
769 uint64_t worst_cluster_time_ms;
770 uint64_t count_msecs, acquire_msecs;
771
772 sr_sw_limits_init(&devc->acq_limits);
773
774 /* Get sample count limit, convert to msecs. */
775 ret = sr_sw_limits_config_get(&devc->cfg_limits,
776 SR_CONF_LIMIT_SAMPLES, &data);
777 if (ret != SR_OK)
778 return ret;
779 user_count = g_variant_get_uint64(data);
780 g_variant_unref(data);
781 count_msecs = 0;
782 if (user_count)
783 count_msecs = 1000 * user_count / devc->samplerate + 1;
784
785 /* Get time limit, which is in msecs. */
786 ret = sr_sw_limits_config_get(&devc->cfg_limits,
787 SR_CONF_LIMIT_MSEC, &data);
788 if (ret != SR_OK)
789 return ret;
790 user_msecs = g_variant_get_uint64(data);
791 g_variant_unref(data);
792
793 /* Get the lesser of them, with both being optional. */
794 acquire_msecs = ~0ull;
795 if (user_count && count_msecs < acquire_msecs)
796 acquire_msecs = count_msecs;
797 if (user_msecs && user_msecs < acquire_msecs)
798 acquire_msecs = user_msecs;
799 if (acquire_msecs == ~0ull)
800 return SR_OK;
801
802 /* Add some slack, and use that timeout for acquisition. */
803 worst_cluster_time_ms = 1000 * 65536 / devc->samplerate;
804 acquire_msecs += 2 * worst_cluster_time_ms;
805 data = g_variant_new_uint64(acquire_msecs);
806 ret = sr_sw_limits_config_set(&devc->acq_limits,
807 SR_CONF_LIMIT_MSEC, data);
808 g_variant_unref(data);
809 if (ret != SR_OK)
810 return ret;
811
812 sr_sw_limits_acquisition_start(&devc->acq_limits);
813 return SR_OK;
814}
815
816/*
817 * Check whether a caller specified samplerate matches the device's
818 * hardware constraints (can be used for acquisition). Optionally yield
819 * a value that approximates the original spec.
820 *
821 * This routine assumes that input specs are in the 200kHz to 200MHz
822 * range of supported rates, and callers typically want to normalize a
823 * given value to the hardware capabilities. Values in the 50MHz range
824 * get rounded up by default, to avoid a more expensive check for the
825 * closest match, while higher sampling rate is always desirable during
826 * measurement. Input specs which exactly match hardware capabilities
827 * remain unaffected. Because 100/200MHz rates also limit the number of
828 * available channels, they are not suggested by this routine, instead
829 * callers need to pick them consciously.
830 */
831SR_PRIV int sigma_normalize_samplerate(uint64_t want_rate, uint64_t *have_rate)
832{
833 uint64_t div, rate;
834
835 /* Accept exact matches for 100/200MHz. */
836 if (want_rate == SR_MHZ(200) || want_rate == SR_MHZ(100)) {
837 if (have_rate)
838 *have_rate = want_rate;
839 return SR_OK;
840 }
841
842 /* Accept 200kHz to 50MHz range, and map to near value. */
843 if (want_rate >= SR_KHZ(200) && want_rate <= SR_MHZ(50)) {
844 div = SR_MHZ(50) / want_rate;
845 rate = SR_MHZ(50) / div;
846 if (have_rate)
847 *have_rate = rate;
848 return SR_OK;
849 }
850
851 return SR_ERR_ARG;
852}
853
854SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi)
855{
856 struct dev_context *devc;
857 struct drv_context *drvc;
858 uint64_t samplerate;
859 int ret;
860 int num_channels;
861
862 devc = sdi->priv;
863 drvc = sdi->driver->context;
864
865 /* Accept any caller specified rate which the hardware supports. */
866 ret = sigma_normalize_samplerate(devc->samplerate, &samplerate);
867 if (ret != SR_OK)
868 return ret;
869
870 /*
871 * Depending on the samplerates of 200/100/50- MHz, specific
872 * firmware is required and higher rates might limit the set
873 * of available channels.
874 */
875 num_channels = devc->num_channels;
876 if (samplerate <= SR_MHZ(50)) {
877 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_50MHZ);
878 num_channels = 16;
879 } else if (samplerate == SR_MHZ(100)) {
880 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_100MHZ);
881 num_channels = 8;
882 } else if (samplerate == SR_MHZ(200)) {
883 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_200MHZ);
884 num_channels = 4;
885 }
886
887 /*
888 * The samplerate affects the number of available logic channels
889 * as well as a sample memory layout detail (the number of samples
890 * which the device will communicate within an "event").
891 */
892 if (ret == SR_OK) {
893 devc->num_channels = num_channels;
894 devc->samples_per_event = 16 / devc->num_channels;
895 }
896
897 return ret;
898}
899
900/*
901 * Arrange for a session feed submit buffer. A queue where a number of
902 * samples gets accumulated to reduce the number of send calls. Which
903 * also enforces an optional sample count limit for data acquisition.
904 *
905 * The buffer holds up to CHUNK_SIZE bytes. The unit size is fixed (the
906 * driver provides a fixed channel layout regardless of samplerate).
907 */
908
909#define CHUNK_SIZE (4 * 1024 * 1024)
910
911struct submit_buffer {
912 size_t unit_size;
913 size_t max_samples, curr_samples;
914 uint8_t *sample_data;
915 uint8_t *write_pointer;
916 struct sr_dev_inst *sdi;
917 struct sr_datafeed_packet packet;
918 struct sr_datafeed_logic logic;
919};
920
921static int alloc_submit_buffer(struct sr_dev_inst *sdi)
922{
923 struct dev_context *devc;
924 struct submit_buffer *buffer;
925 size_t size;
926
927 devc = sdi->priv;
928
929 buffer = g_malloc0(sizeof(*buffer));
930 devc->buffer = buffer;
931
932 buffer->unit_size = sizeof(uint16_t);
933 size = CHUNK_SIZE;
934 size /= buffer->unit_size;
935 buffer->max_samples = size;
936 size *= buffer->unit_size;
937 buffer->sample_data = g_try_malloc0(size);
938 if (!buffer->sample_data)
939 return SR_ERR_MALLOC;
940 buffer->write_pointer = buffer->sample_data;
941 sr_sw_limits_init(&devc->feed_limits);
942
943 buffer->sdi = sdi;
944 memset(&buffer->logic, 0, sizeof(buffer->logic));
945 buffer->logic.unitsize = buffer->unit_size;
946 buffer->logic.data = buffer->sample_data;
947 memset(&buffer->packet, 0, sizeof(buffer->packet));
948 buffer->packet.type = SR_DF_LOGIC;
949 buffer->packet.payload = &buffer->logic;
950
951 return SR_OK;
952}
953
954static int setup_submit_limit(struct dev_context *devc)
955{
956 struct sr_sw_limits *limits;
957 int ret;
958 GVariant *data;
959 uint64_t total;
960
961 limits = &devc->feed_limits;
962
963 ret = sr_sw_limits_config_get(&devc->cfg_limits,
964 SR_CONF_LIMIT_SAMPLES, &data);
965 if (ret != SR_OK)
966 return ret;
967 total = g_variant_get_uint64(data);
968 g_variant_unref(data);
969
970 sr_sw_limits_init(limits);
971 if (total) {
972 data = g_variant_new_uint64(total);
973 ret = sr_sw_limits_config_set(limits,
974 SR_CONF_LIMIT_SAMPLES, data);
975 g_variant_unref(data);
976 if (ret != SR_OK)
977 return ret;
978 }
979
980 sr_sw_limits_acquisition_start(limits);
981
982 return SR_OK;
983}
984
985static void free_submit_buffer(struct dev_context *devc)
986{
987 struct submit_buffer *buffer;
988
989 if (!devc)
990 return;
991
992 buffer = devc->buffer;
993 if (!buffer)
994 return;
995 devc->buffer = NULL;
996
997 g_free(buffer->sample_data);
998 g_free(buffer);
999}
1000
1001static int flush_submit_buffer(struct dev_context *devc)
1002{
1003 struct submit_buffer *buffer;
1004 int ret;
1005
1006 buffer = devc->buffer;
1007
1008 /* Is queued sample data available? */
1009 if (!buffer->curr_samples)
1010 return SR_OK;
1011
1012 /* Submit to the session feed. */
1013 buffer->logic.length = buffer->curr_samples * buffer->unit_size;
1014 ret = sr_session_send(buffer->sdi, &buffer->packet);
1015 if (ret != SR_OK)
1016 return ret;
1017
1018 /* Rewind queue position. */
1019 buffer->curr_samples = 0;
1020 buffer->write_pointer = buffer->sample_data;
1021
1022 return SR_OK;
1023}
1024
1025static int addto_submit_buffer(struct dev_context *devc,
1026 uint16_t sample, size_t count)
1027{
1028 struct submit_buffer *buffer;
1029 struct sr_sw_limits *limits;
1030 int ret;
1031
1032 buffer = devc->buffer;
1033 limits = &devc->feed_limits;
1034 if (sr_sw_limits_check(limits))
1035 count = 0;
1036
1037 /*
1038 * Individually accumulate and check each sample, such that
1039 * accumulation between flushes won't exceed local storage, and
1040 * enforcement of user specified limits is exact.
1041 */
1042 while (count--) {
1043 write_u16le_inc(&buffer->write_pointer, sample);
1044 buffer->curr_samples++;
1045 if (buffer->curr_samples == buffer->max_samples) {
1046 ret = flush_submit_buffer(devc);
1047 if (ret != SR_OK)
1048 return ret;
1049 }
1050 sr_sw_limits_update_samples_read(limits, 1);
1051 if (sr_sw_limits_check(limits))
1052 break;
1053 }
1054
1055 return SR_OK;
1056}
1057
1058/*
1059 * In 100 and 200 MHz mode, only a single pin rising/falling can be
1060 * set as trigger. In other modes, two rising/falling triggers can be set,
1061 * in addition to value/mask trigger for any number of channels.
1062 *
1063 * The Sigma supports complex triggers using boolean expressions, but this
1064 * has not been implemented yet.
1065 */
1066SR_PRIV int sigma_convert_trigger(const struct sr_dev_inst *sdi)
1067{
1068 struct dev_context *devc;
1069 struct sr_trigger *trigger;
1070 struct sr_trigger_stage *stage;
1071 struct sr_trigger_match *match;
1072 const GSList *l, *m;
1073 int channelbit, trigger_set;
1074
1075 devc = sdi->priv;
1076 memset(&devc->trigger, 0, sizeof(devc->trigger));
1077 trigger = sr_session_trigger_get(sdi->session);
1078 if (!trigger)
1079 return SR_OK;
1080
1081 trigger_set = 0;
1082 for (l = trigger->stages; l; l = l->next) {
1083 stage = l->data;
1084 for (m = stage->matches; m; m = m->next) {
1085 match = m->data;
1086 /* Ignore disabled channels with a trigger. */
1087 if (!match->channel->enabled)
1088 continue;
1089 channelbit = 1 << match->channel->index;
1090 if (devc->samplerate >= SR_MHZ(100)) {
1091 /* Fast trigger support. */
1092 if (trigger_set) {
1093 sr_err("100/200MHz modes limited to single trigger pin.");
1094 return SR_ERR;
1095 }
1096 if (match->match == SR_TRIGGER_FALLING) {
1097 devc->trigger.fallingmask |= channelbit;
1098 } else if (match->match == SR_TRIGGER_RISING) {
1099 devc->trigger.risingmask |= channelbit;
1100 } else {
1101 sr_err("100/200MHz modes limited to edge trigger.");
1102 return SR_ERR;
1103 }
1104
1105 trigger_set++;
1106 } else {
1107 /* Simple trigger support (event). */
1108 if (match->match == SR_TRIGGER_ONE) {
1109 devc->trigger.simplevalue |= channelbit;
1110 devc->trigger.simplemask |= channelbit;
1111 } else if (match->match == SR_TRIGGER_ZERO) {
1112 devc->trigger.simplevalue &= ~channelbit;
1113 devc->trigger.simplemask |= channelbit;
1114 } else if (match->match == SR_TRIGGER_FALLING) {
1115 devc->trigger.fallingmask |= channelbit;
1116 trigger_set++;
1117 } else if (match->match == SR_TRIGGER_RISING) {
1118 devc->trigger.risingmask |= channelbit;
1119 trigger_set++;
1120 }
1121
1122 /*
1123 * Actually, Sigma supports 2 rising/falling triggers,
1124 * but they are ORed and the current trigger syntax
1125 * does not permit ORed triggers.
1126 */
1127 if (trigger_set > 1) {
1128 sr_err("Limited to 1 edge trigger.");
1129 return SR_ERR;
1130 }
1131 }
1132 }
1133 }
1134
1135 return SR_OK;
1136}
1137
1138/* Software trigger to determine exact trigger position. */
1139static int get_trigger_offset(uint8_t *samples, uint16_t last_sample,
1140 struct sigma_trigger *t)
1141{
1142 const uint8_t *rdptr;
1143 int i;
1144 uint16_t sample;
1145
1146 rdptr = samples;
1147 sample = 0;
1148 for (i = 0; i < 8; i++) {
1149 if (i > 0)
1150 last_sample = sample;
1151 sample = read_u16le_inc(&rdptr);
1152
1153 /* Simple triggers. */
1154 if ((sample & t->simplemask) != t->simplevalue)
1155 continue;
1156
1157 /* Rising edge. */
1158 if (((last_sample & t->risingmask) != 0) ||
1159 ((sample & t->risingmask) != t->risingmask))
1160 continue;
1161
1162 /* Falling edge. */
1163 if ((last_sample & t->fallingmask) != t->fallingmask ||
1164 (sample & t->fallingmask) != 0)
1165 continue;
1166
1167 break;
1168 }
1169
1170 /* If we did not match, return original trigger pos. */
1171 return i & 0x7;
1172}
1173
1174static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample)
1175{
1176 /* TODO
1177 * Check whether the combination of this very sample and the
1178 * previous state match the configured trigger condition. This
1179 * improves the resolution of the trigger marker's position.
1180 * The hardware provided position is coarse, and may point to
1181 * a position before the actual match.
1182 *
1183 * See the previous get_trigger_offset() implementation. This
1184 * code needs to get re-used here.
1185 */
1186 (void)devc;
1187 (void)sample;
1188 (void)get_trigger_offset;
1189
1190 return FALSE;
1191}
1192
1193static int check_and_submit_sample(struct dev_context *devc,
1194 uint16_t sample, size_t count, gboolean check_trigger)
1195{
1196 gboolean triggered;
1197 int ret;
1198
1199 triggered = check_trigger && sample_matches_trigger(devc, sample);
1200 if (triggered) {
1201 ret = flush_submit_buffer(devc);
1202 if (ret != SR_OK)
1203 return ret;
1204 ret = std_session_send_df_trigger(devc->buffer->sdi);
1205 if (ret != SR_OK)
1206 return ret;
1207 }
1208
1209 ret = addto_submit_buffer(devc, sample, count);
1210 if (ret != SR_OK)
1211 return ret;
1212
1213 return SR_OK;
1214}
1215
1216/*
1217 * Return the timestamp of "DRAM cluster".
1218 */
1219static uint16_t sigma_dram_cluster_ts(struct sigma_dram_cluster *cluster)
1220{
1221 return read_u16le((const uint8_t *)&cluster->timestamp);
1222}
1223
1224/*
1225 * Return one 16bit data entity of a DRAM cluster at the specified index.
1226 */
1227static uint16_t sigma_dram_cluster_data(struct sigma_dram_cluster *cl, int idx)
1228{
1229 return read_u16le((const uint8_t *)&cl->samples[idx]);
1230}
1231
1232/*
1233 * Deinterlace sample data that was retrieved at 100MHz samplerate.
1234 * One 16bit item contains two samples of 8bits each. The bits of
1235 * multiple samples are interleaved.
1236 */
1237static uint16_t sigma_deinterlace_100mhz_data(uint16_t indata, int idx)
1238{
1239 uint16_t outdata;
1240
1241 indata >>= idx;
1242 outdata = 0;
1243 outdata |= (indata >> (0 * 2 - 0)) & (1 << 0);
1244 outdata |= (indata >> (1 * 2 - 1)) & (1 << 1);
1245 outdata |= (indata >> (2 * 2 - 2)) & (1 << 2);
1246 outdata |= (indata >> (3 * 2 - 3)) & (1 << 3);
1247 outdata |= (indata >> (4 * 2 - 4)) & (1 << 4);
1248 outdata |= (indata >> (5 * 2 - 5)) & (1 << 5);
1249 outdata |= (indata >> (6 * 2 - 6)) & (1 << 6);
1250 outdata |= (indata >> (7 * 2 - 7)) & (1 << 7);
1251 return outdata;
1252}
1253
1254/*
1255 * Deinterlace sample data that was retrieved at 200MHz samplerate.
1256 * One 16bit item contains four samples of 4bits each. The bits of
1257 * multiple samples are interleaved.
1258 */
1259static uint16_t sigma_deinterlace_200mhz_data(uint16_t indata, int idx)
1260{
1261 uint16_t outdata;
1262
1263 indata >>= idx;
1264 outdata = 0;
1265 outdata |= (indata >> (0 * 4 - 0)) & (1 << 0);
1266 outdata |= (indata >> (1 * 4 - 1)) & (1 << 1);
1267 outdata |= (indata >> (2 * 4 - 2)) & (1 << 2);
1268 outdata |= (indata >> (3 * 4 - 3)) & (1 << 3);
1269 return outdata;
1270}
1271
1272static void sigma_decode_dram_cluster(struct dev_context *devc,
1273 struct sigma_dram_cluster *dram_cluster,
1274 size_t events_in_cluster, gboolean triggered)
1275{
1276 struct sigma_state *ss;
1277 uint16_t tsdiff, ts, sample, item16;
1278 unsigned int i;
1279
1280 if (!devc->use_triggers || !ASIX_SIGMA_WITH_TRIGGER)
1281 triggered = FALSE;
1282
1283 /*
1284 * If this cluster is not adjacent to the previously received
1285 * cluster, then send the appropriate number of samples with the
1286 * previous values to the sigrok session. This "decodes RLE".
1287 *
1288 * These samples cannot match the trigger since they just repeat
1289 * the previously submitted data pattern. (This assumption holds
1290 * for simple level and edge triggers. It would not for timed or
1291 * counted conditions, which currently are not supported.)
1292 */
1293 ss = &devc->state;
1294 ts = sigma_dram_cluster_ts(dram_cluster);
1295 tsdiff = ts - ss->lastts;
1296 if (tsdiff > 0) {
1297 size_t count;
1298 sample = ss->lastsample;
1299 count = tsdiff * devc->samples_per_event;
1300 (void)check_and_submit_sample(devc, sample, count, FALSE);
1301 }
1302 ss->lastts = ts + EVENTS_PER_CLUSTER;
1303
1304 /*
1305 * Grab sample data from the current cluster and prepare their
1306 * submission to the session feed. Handle samplerate dependent
1307 * memory layout of sample data. Accumulation of data chunks
1308 * before submission is transparent to this code path, specific
1309 * buffer depth is neither assumed nor required here.
1310 */
1311 sample = 0;
1312 for (i = 0; i < events_in_cluster; i++) {
1313 item16 = sigma_dram_cluster_data(dram_cluster, i);
1314 if (devc->samplerate == SR_MHZ(200)) {
1315 sample = sigma_deinterlace_200mhz_data(item16, 0);
1316 check_and_submit_sample(devc, sample, 1, triggered);
1317 sample = sigma_deinterlace_200mhz_data(item16, 1);
1318 check_and_submit_sample(devc, sample, 1, triggered);
1319 sample = sigma_deinterlace_200mhz_data(item16, 2);
1320 check_and_submit_sample(devc, sample, 1, triggered);
1321 sample = sigma_deinterlace_200mhz_data(item16, 3);
1322 check_and_submit_sample(devc, sample, 1, triggered);
1323 } else if (devc->samplerate == SR_MHZ(100)) {
1324 sample = sigma_deinterlace_100mhz_data(item16, 0);
1325 check_and_submit_sample(devc, sample, 1, triggered);
1326 sample = sigma_deinterlace_100mhz_data(item16, 1);
1327 check_and_submit_sample(devc, sample, 1, triggered);
1328 } else {
1329 sample = item16;
1330 check_and_submit_sample(devc, sample, 1, triggered);
1331 }
1332 }
1333 ss->lastsample = sample;
1334}
1335
1336/*
1337 * Decode chunk of 1024 bytes, 64 clusters, 7 events per cluster.
1338 * Each event is 20ns apart, and can contain multiple samples.
1339 *
1340 * For 200 MHz, events contain 4 samples for each channel, spread 5 ns apart.
1341 * For 100 MHz, events contain 2 samples for each channel, spread 10 ns apart.
1342 * For 50 MHz and below, events contain one sample for each channel,
1343 * spread 20 ns apart.
1344 */
1345static int decode_chunk_ts(struct dev_context *devc,
1346 struct sigma_dram_line *dram_line,
1347 size_t events_in_line, size_t trigger_event)
1348{
1349 struct sigma_dram_cluster *dram_cluster;
1350 unsigned int clusters_in_line;
1351 unsigned int events_in_cluster;
1352 unsigned int i;
1353 uint32_t trigger_cluster;
1354
1355 clusters_in_line = events_in_line;
1356 clusters_in_line += EVENTS_PER_CLUSTER - 1;
1357 clusters_in_line /= EVENTS_PER_CLUSTER;
1358 trigger_cluster = ~0;
1359
1360 /* Check if trigger is in this chunk. */
1361 if (trigger_event < EVENTS_PER_ROW) {
1362 if (devc->samplerate <= SR_MHZ(50)) {
1363 trigger_event -= MIN(EVENTS_PER_CLUSTER - 1,
1364 trigger_event);
1365 }
1366
1367 /* Find in which cluster the trigger occurred. */
1368 trigger_cluster = trigger_event / EVENTS_PER_CLUSTER;
1369 }
1370
1371 /* For each full DRAM cluster. */
1372 for (i = 0; i < clusters_in_line; i++) {
1373 dram_cluster = &dram_line->cluster[i];
1374
1375 /* The last cluster might not be full. */
1376 if ((i == clusters_in_line - 1) &&
1377 (events_in_line % EVENTS_PER_CLUSTER)) {
1378 events_in_cluster = events_in_line % EVENTS_PER_CLUSTER;
1379 } else {
1380 events_in_cluster = EVENTS_PER_CLUSTER;
1381 }
1382
1383 sigma_decode_dram_cluster(devc, dram_cluster,
1384 events_in_cluster, i == trigger_cluster);
1385 }
1386
1387 return SR_OK;
1388}
1389
1390static int download_capture(struct sr_dev_inst *sdi)
1391{
1392 const uint32_t chunks_per_read = 32;
1393
1394 struct dev_context *devc;
1395 struct sigma_dram_line *dram_line;
1396 uint32_t stoppos, triggerpos;
1397 uint8_t modestatus;
1398 uint32_t i;
1399 uint32_t dl_lines_total, dl_lines_curr, dl_lines_done;
1400 uint32_t dl_first_line, dl_line;
1401 uint32_t dl_events_in_line, trigger_event;
1402 uint32_t trg_line, trg_event;
1403 int ret;
1404
1405 devc = sdi->priv;
1406
1407 sr_info("Downloading sample data.");
1408 devc->state.state = SIGMA_DOWNLOAD;
1409
1410 /*
1411 * Ask the hardware to stop data acquisition. Reception of the
1412 * FORCESTOP request makes the hardware "disable RLE" (store
1413 * clusters to DRAM regardless of whether pin state changes) and
1414 * raise the POSTTRIGGERED flag.
1415 */
1416 modestatus = WMR_FORCESTOP | WMR_SDRAMWRITEEN;
1417 ret = sigma_set_register(devc, WRITE_MODE, modestatus);
1418 if (ret != SR_OK)
1419 return ret;
1420 do {
1421 ret = sigma_read_register(devc, READ_MODE,
1422 &modestatus, sizeof(modestatus));
1423 if (ret != SR_OK) {
1424 sr_err("Could not poll for post-trigger state.");
1425 return FALSE;
1426 }
1427 } while (!(modestatus & RMR_POSTTRIGGERED));
1428
1429 /* Set SDRAM Read Enable. */
1430 ret = sigma_set_register(devc, WRITE_MODE, WMR_SDRAMREADEN);
1431 if (ret != SR_OK)
1432 return ret;
1433
1434 /* Get the current position. Check if trigger has fired. */
1435 ret = sigma_read_pos(devc, &stoppos, &triggerpos, &modestatus);
1436 if (ret != SR_OK) {
1437 sr_err("Could not query capture positions/state.");
1438 return FALSE;
1439 }
1440 trg_line = ~0;
1441 trg_event = ~0;
1442 if (modestatus & RMR_TRIGGERED) {
1443 trg_line = triggerpos >> ROW_SHIFT;
1444 trg_event = triggerpos & ROW_MASK;
1445 }
1446
1447 /*
1448 * Determine how many "DRAM lines" of 1024 bytes each we need to
1449 * retrieve from the Sigma hardware, so that we have a complete
1450 * set of samples. Note that the last line need not contain 64
1451 * clusters, it might be partially filled only.
1452 *
1453 * When RMR_ROUND is set, the circular buffer in DRAM has wrapped
1454 * around. Since the status of the very next line is uncertain in
1455 * that case, we skip it and start reading from the next line.
1456 */
1457 dl_first_line = 0;
1458 dl_lines_total = (stoppos >> ROW_SHIFT) + 1;
1459 if (modestatus & RMR_ROUND) {
1460 dl_first_line = dl_lines_total + 1;
1461 dl_lines_total = ROW_COUNT - 2;
1462 }
1463 dram_line = g_try_malloc0(chunks_per_read * sizeof(*dram_line));
1464 if (!dram_line)
1465 return FALSE;
1466 ret = alloc_submit_buffer(sdi);
1467 if (ret != SR_OK)
1468 return FALSE;
1469 ret = setup_submit_limit(devc);
1470 if (ret != SR_OK)
1471 return FALSE;
1472 dl_lines_done = 0;
1473 while (dl_lines_total > dl_lines_done) {
1474 /* We can download only up-to 32 DRAM lines in one go! */
1475 dl_lines_curr = MIN(chunks_per_read, dl_lines_total - dl_lines_done);
1476
1477 dl_line = dl_first_line + dl_lines_done;
1478 dl_line %= ROW_COUNT;
1479 ret = sigma_read_dram(devc, dl_line, dl_lines_curr,
1480 (uint8_t *)dram_line);
1481 if (ret != SR_OK)
1482 return FALSE;
1483
1484 /* This is the first DRAM line, so find the initial timestamp. */
1485 if (dl_lines_done == 0) {
1486 devc->state.lastts =
1487 sigma_dram_cluster_ts(&dram_line[0].cluster[0]);
1488 devc->state.lastsample = 0;
1489 }
1490
1491 for (i = 0; i < dl_lines_curr; i++) {
1492 /* The last "DRAM line" need not span its full length. */
1493 dl_events_in_line = EVENTS_PER_ROW;
1494 if (dl_lines_done + i == dl_lines_total - 1)
1495 dl_events_in_line = stoppos & ROW_MASK;
1496
1497 /* Test if the trigger happened on this line. */
1498 trigger_event = ~0;
1499 if (dl_lines_done + i == trg_line)
1500 trigger_event = trg_event;
1501
1502 decode_chunk_ts(devc, dram_line + i,
1503 dl_events_in_line, trigger_event);
1504 }
1505
1506 dl_lines_done += dl_lines_curr;
1507 }
1508 flush_submit_buffer(devc);
1509 free_submit_buffer(devc);
1510 g_free(dram_line);
1511
1512 std_session_send_df_end(sdi);
1513
1514 devc->state.state = SIGMA_IDLE;
1515 sr_dev_acquisition_stop(sdi);
1516
1517 return TRUE;
1518}
1519
1520/*
1521 * Periodically check the Sigma status when in CAPTURE mode. This routine
1522 * checks whether the configured sample count or sample time have passed,
1523 * and will stop acquisition and download the acquired samples.
1524 */
1525static int sigma_capture_mode(struct sr_dev_inst *sdi)
1526{
1527 struct dev_context *devc;
1528
1529 devc = sdi->priv;
1530 if (sr_sw_limits_check(&devc->acq_limits))
1531 return download_capture(sdi);
1532
1533 return TRUE;
1534}
1535
1536SR_PRIV int sigma_receive_data(int fd, int revents, void *cb_data)
1537{
1538 struct sr_dev_inst *sdi;
1539 struct dev_context *devc;
1540
1541 (void)fd;
1542 (void)revents;
1543
1544 sdi = cb_data;
1545 devc = sdi->priv;
1546
1547 if (devc->state.state == SIGMA_IDLE)
1548 return TRUE;
1549
1550 /*
1551 * When the application has requested to stop the acquisition,
1552 * then immediately start downloading sample data. Otherwise
1553 * keep checking configured limits which will terminate the
1554 * acquisition and initiate download.
1555 */
1556 if (devc->state.state == SIGMA_STOPPING)
1557 return download_capture(sdi);
1558 if (devc->state.state == SIGMA_CAPTURE)
1559 return sigma_capture_mode(sdi);
1560
1561 return TRUE;
1562}
1563
1564/* Build a LUT entry used by the trigger functions. */
1565static void build_lut_entry(uint16_t value, uint16_t mask, uint16_t *entry)
1566{
1567 int i, j, k, bit;
1568
1569 /* For each quad channel. */
1570 for (i = 0; i < 4; i++) {
1571 entry[i] = 0xffff;
1572
1573 /* For each bit in LUT. */
1574 for (j = 0; j < 16; j++) {
1575
1576 /* For each channel in quad. */
1577 for (k = 0; k < 4; k++) {
1578 bit = 1 << (i * 4 + k);
1579
1580 /* Set bit in entry */
1581 if ((mask & bit) && ((!(value & bit)) !=
1582 (!(j & (1 << k)))))
1583 entry[i] &= ~(1 << j);
1584 }
1585 }
1586 }
1587}
1588
1589/* Add a logical function to LUT mask. */
1590static void add_trigger_function(enum triggerop oper, enum triggerfunc func,
1591 int index, int neg, uint16_t *mask)
1592{
1593 int i, j;
1594 int x[2][2], tmp, a, b, aset, bset, rset;
1595
1596 memset(x, 0, sizeof(x));
1597
1598 /* Trigger detect condition. */
1599 switch (oper) {
1600 case OP_LEVEL:
1601 x[0][1] = 1;
1602 x[1][1] = 1;
1603 break;
1604 case OP_NOT:
1605 x[0][0] = 1;
1606 x[1][0] = 1;
1607 break;
1608 case OP_RISE:
1609 x[0][1] = 1;
1610 break;
1611 case OP_FALL:
1612 x[1][0] = 1;
1613 break;
1614 case OP_RISEFALL:
1615 x[0][1] = 1;
1616 x[1][0] = 1;
1617 break;
1618 case OP_NOTRISE:
1619 x[1][1] = 1;
1620 x[0][0] = 1;
1621 x[1][0] = 1;
1622 break;
1623 case OP_NOTFALL:
1624 x[1][1] = 1;
1625 x[0][0] = 1;
1626 x[0][1] = 1;
1627 break;
1628 case OP_NOTRISEFALL:
1629 x[1][1] = 1;
1630 x[0][0] = 1;
1631 break;
1632 }
1633
1634 /* Transpose if neg is set. */
1635 if (neg) {
1636 for (i = 0; i < 2; i++) {
1637 for (j = 0; j < 2; j++) {
1638 tmp = x[i][j];
1639 x[i][j] = x[1 - i][1 - j];
1640 x[1 - i][1 - j] = tmp;
1641 }
1642 }
1643 }
1644
1645 /* Update mask with function. */
1646 for (i = 0; i < 16; i++) {
1647 a = (i >> (2 * index + 0)) & 1;
1648 b = (i >> (2 * index + 1)) & 1;
1649
1650 aset = (*mask >> i) & 1;
1651 bset = x[b][a];
1652
1653 rset = 0;
1654 if (func == FUNC_AND || func == FUNC_NAND)
1655 rset = aset & bset;
1656 else if (func == FUNC_OR || func == FUNC_NOR)
1657 rset = aset | bset;
1658 else if (func == FUNC_XOR || func == FUNC_NXOR)
1659 rset = aset ^ bset;
1660
1661 if (func == FUNC_NAND || func == FUNC_NOR || func == FUNC_NXOR)
1662 rset = !rset;
1663
1664 *mask &= ~(1 << i);
1665
1666 if (rset)
1667 *mask |= 1 << i;
1668 }
1669}
1670
1671/*
1672 * Build trigger LUTs used by 50 MHz and lower sample rates for supporting
1673 * simple pin change and state triggers. Only two transitions (rise/fall) can be
1674 * set at any time, but a full mask and value can be set (0/1).
1675 */
1676SR_PRIV int sigma_build_basic_trigger(struct dev_context *devc,
1677 struct triggerlut *lut)
1678{
1679 int i,j;
1680 uint16_t masks[2];
1681
1682 memset(lut, 0, sizeof(*lut));
1683 memset(&masks, 0, sizeof(masks));
1684
1685 /* Constant for simple triggers. */
1686 lut->m4 = 0xa000;
1687
1688 /* Value/mask trigger support. */
1689 build_lut_entry(devc->trigger.simplevalue, devc->trigger.simplemask,
1690 lut->m2d);
1691
1692 /* Rise/fall trigger support. */
1693 for (i = 0, j = 0; i < 16; i++) {
1694 if (devc->trigger.risingmask & (1 << i) ||
1695 devc->trigger.fallingmask & (1 << i))
1696 masks[j++] = 1 << i;
1697 }
1698
1699 build_lut_entry(masks[0], masks[0], lut->m0d);
1700 build_lut_entry(masks[1], masks[1], lut->m1d);
1701
1702 /* Add glue logic */
1703 if (masks[0] || masks[1]) {
1704 /* Transition trigger. */
1705 if (masks[0] & devc->trigger.risingmask)
1706 add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3);
1707 if (masks[0] & devc->trigger.fallingmask)
1708 add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3);
1709 if (masks[1] & devc->trigger.risingmask)
1710 add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3);
1711 if (masks[1] & devc->trigger.fallingmask)
1712 add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3);
1713 } else {
1714 /* Only value/mask trigger. */
1715 lut->m3 = 0xffff;
1716 }
1717
1718 /* Triggertype: event. */
1719 lut->params.selres = 3;
1720
1721 return SR_OK;
1722}