]> sigrok.org Git - libsigrok.git/blame_incremental - src/hardware/asix-sigma/protocol.c
asix-sigma: rephrase firmware dependent param upload at acquisition start
[libsigrok.git] / src / hardware / asix-sigma / protocol.c
... / ...
CommitLineData
1/*
2 * This file is part of the libsigrok project.
3 *
4 * Copyright (C) 2010-2012 Håvard Espeland <gus@ping.uio.no>,
5 * Copyright (C) 2010 Martin Stensgård <mastensg@ping.uio.no>
6 * Copyright (C) 2010 Carl Henrik Lunde <chlunde@ping.uio.no>
7 * Copyright (C) 2020 Gerhard Sittig <gerhard.sittig@gmx.net>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation, either version 3 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23/*
24 * ASIX SIGMA/SIGMA2 logic analyzer driver
25 */
26
27#include <config.h>
28#include "protocol.h"
29
30/*
31 * The ASIX SIGMA hardware supports fixed 200MHz and 100MHz sample rates
32 * (by means of separate firmware images). As well as 50MHz divided by
33 * an integer divider in the 1..256 range (by the "typical" firmware).
34 * Which translates to a strict lower boundary of around 195kHz.
35 *
36 * This driver "suggests" a subset of the available rates by listing a
37 * few discrete values, while setter routines accept any user specified
38 * rate that is supported by the hardware.
39 */
40static const uint64_t samplerates[] = {
41 /* 50MHz and integer divider. 1/2/5 steps (where possible). */
42 SR_KHZ(200), SR_KHZ(500),
43 SR_MHZ(1), SR_MHZ(2), SR_MHZ(5),
44 SR_MHZ(10), SR_MHZ(25), SR_MHZ(50),
45 /* 100MHz/200MHz, fixed rates in special firmware. */
46 SR_MHZ(100), SR_MHZ(200),
47};
48
49SR_PRIV GVariant *sigma_get_samplerates_list(void)
50{
51 return std_gvar_samplerates(samplerates, ARRAY_SIZE(samplerates));
52}
53
54static const char *firmware_files[] = {
55 [SIGMA_FW_50MHZ] = "asix-sigma-50.fw", /* 50MHz, 8bit divider. */
56 [SIGMA_FW_100MHZ] = "asix-sigma-100.fw", /* 100MHz, fixed. */
57 [SIGMA_FW_200MHZ] = "asix-sigma-200.fw", /* 200MHz, fixed. */
58 [SIGMA_FW_SYNC] = "asix-sigma-50sync.fw", /* Sync from external pin. */
59 [SIGMA_FW_FREQ] = "asix-sigma-phasor.fw", /* Frequency counter. */
60};
61
62#define SIGMA_FIRMWARE_SIZE_LIMIT (256 * 1024)
63
64static int sigma_ftdi_open(const struct sr_dev_inst *sdi)
65{
66 struct dev_context *devc;
67 int vid, pid;
68 const char *serno;
69 int ret;
70
71 devc = sdi->priv;
72 if (!devc)
73 return SR_ERR_ARG;
74
75 if (devc->ftdi.is_open)
76 return SR_OK;
77
78 vid = devc->id.vid;
79 pid = devc->id.pid;
80 serno = sdi->serial_num;
81 if (!vid || !pid || !serno || !*serno)
82 return SR_ERR_ARG;
83
84 ret = ftdi_init(&devc->ftdi.ctx);
85 if (ret < 0) {
86 sr_err("Cannot initialize FTDI context (%d): %s.",
87 ret, ftdi_get_error_string(&devc->ftdi.ctx));
88 return SR_ERR_IO;
89 }
90 ret = ftdi_usb_open_desc_index(&devc->ftdi.ctx,
91 vid, pid, NULL, serno, 0);
92 if (ret < 0) {
93 sr_err("Cannot open device (%d): %s.",
94 ret, ftdi_get_error_string(&devc->ftdi.ctx));
95 return SR_ERR_IO;
96 }
97 devc->ftdi.is_open = TRUE;
98
99 return SR_OK;
100}
101
102static int sigma_ftdi_close(struct dev_context *devc)
103{
104 int ret;
105
106 ret = ftdi_usb_close(&devc->ftdi.ctx);
107 devc->ftdi.is_open = FALSE;
108 devc->ftdi.must_close = FALSE;
109 ftdi_deinit(&devc->ftdi.ctx);
110
111 return ret == 0 ? SR_OK : SR_ERR_IO;
112}
113
114SR_PRIV int sigma_check_open(const struct sr_dev_inst *sdi)
115{
116 struct dev_context *devc;
117 int ret;
118
119 if (!sdi)
120 return SR_ERR_ARG;
121 devc = sdi->priv;
122 if (!devc)
123 return SR_ERR_ARG;
124
125 if (devc->ftdi.is_open)
126 return SR_OK;
127
128 ret = sigma_ftdi_open(sdi);
129 if (ret != SR_OK)
130 return ret;
131 devc->ftdi.must_close = TRUE;
132
133 return ret;
134}
135
136SR_PRIV int sigma_check_close(struct dev_context *devc)
137{
138 int ret;
139
140 if (!devc)
141 return SR_ERR_ARG;
142
143 if (devc->ftdi.must_close) {
144 ret = sigma_ftdi_close(devc);
145 if (ret != SR_OK)
146 return ret;
147 devc->ftdi.must_close = FALSE;
148 }
149
150 return SR_OK;
151}
152
153SR_PRIV int sigma_force_open(const struct sr_dev_inst *sdi)
154{
155 struct dev_context *devc;
156 int ret;
157
158 if (!sdi)
159 return SR_ERR_ARG;
160 devc = sdi->priv;
161 if (!devc)
162 return SR_ERR_ARG;
163
164 ret = sigma_ftdi_open(sdi);
165 if (ret != SR_OK)
166 return ret;
167 devc->ftdi.must_close = FALSE;
168
169 return SR_OK;
170}
171
172SR_PRIV int sigma_force_close(struct dev_context *devc)
173{
174 return sigma_ftdi_close(devc);
175}
176
177/*
178 * BEWARE! Error propagation is important, as are kinds of return values.
179 *
180 * - Raw USB tranport communicates the number of sent or received bytes,
181 * or negative error codes in the external library's(!) range of codes.
182 * - Internal routines at the "sigrok driver level" communicate success
183 * or failure in terms of SR_OK et al error codes.
184 * - Main loop style receive callbacks communicate booleans which arrange
185 * for repeated calls to drive progress during acquisition.
186 *
187 * Careful consideration by maintainers is essential, because all of the
188 * above kinds of values are assignment compatbile from the compiler's
189 * point of view. Implementation errors will go unnoticed at build time.
190 */
191
192static int sigma_read_raw(struct dev_context *devc, void *buf, size_t size)
193{
194 int ret;
195
196 ret = ftdi_read_data(&devc->ftdi.ctx, (unsigned char *)buf, size);
197 if (ret < 0) {
198 sr_err("USB data read failed: %s",
199 ftdi_get_error_string(&devc->ftdi.ctx));
200 }
201
202 return ret;
203}
204
205static int sigma_write_raw(struct dev_context *devc, const void *buf, size_t size)
206{
207 int ret;
208
209 ret = ftdi_write_data(&devc->ftdi.ctx, buf, size);
210 if (ret < 0) {
211 sr_err("USB data write failed: %s",
212 ftdi_get_error_string(&devc->ftdi.ctx));
213 } else if ((size_t)ret != size) {
214 sr_err("USB data write length mismatch.");
215 }
216
217 return ret;
218}
219
220static int sigma_read_sr(struct dev_context *devc, void *buf, size_t size)
221{
222 int ret;
223
224 ret = sigma_read_raw(devc, buf, size);
225 if (ret < 0 || (size_t)ret != size)
226 return SR_ERR_IO;
227
228 return SR_OK;
229}
230
231static int sigma_write_sr(struct dev_context *devc, const void *buf, size_t size)
232{
233 int ret;
234
235 ret = sigma_write_raw(devc, buf, size);
236 if (ret < 0 || (size_t)ret != size)
237 return SR_ERR_IO;
238
239 return SR_OK;
240}
241
242/*
243 * Implementor's note: The local write buffer's size shall suffice for
244 * any know FPGA register transaction that is involved in the supported
245 * feature set of this sigrok device driver. If the length check trips,
246 * that's a programmer's error and needs adjustment in the complete call
247 * stack of the respective code path.
248 */
249SR_PRIV int sigma_write_register(struct dev_context *devc,
250 uint8_t reg, uint8_t *data, size_t len)
251{
252 uint8_t buf[80], *wrptr;
253 size_t idx;
254
255 if (2 + 2 * len > sizeof(buf)) {
256 sr_err("Short write buffer for %zu bytes to reg %u.", len, reg);
257 return SR_ERR_BUG;
258 }
259
260 wrptr = buf;
261 write_u8_inc(&wrptr, REG_ADDR_LOW | (reg & 0xf));
262 write_u8_inc(&wrptr, REG_ADDR_HIGH | (reg >> 4));
263 for (idx = 0; idx < len; idx++) {
264 write_u8_inc(&wrptr, REG_DATA_LOW | (data[idx] & 0xf));
265 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (data[idx] >> 4));
266 }
267
268 return sigma_write_sr(devc, buf, wrptr - buf);
269}
270
271SR_PRIV int sigma_set_register(struct dev_context *devc,
272 uint8_t reg, uint8_t value)
273{
274 return sigma_write_register(devc, reg, &value, sizeof(value));
275}
276
277static int sigma_read_register(struct dev_context *devc,
278 uint8_t reg, uint8_t *data, size_t len)
279{
280 uint8_t buf[3], *wrptr;
281 int ret;
282
283 wrptr = buf;
284 write_u8_inc(&wrptr, REG_ADDR_LOW | (reg & 0xf));
285 write_u8_inc(&wrptr, REG_ADDR_HIGH | (reg >> 4));
286 write_u8_inc(&wrptr, REG_READ_ADDR);
287 ret = sigma_write_sr(devc, buf, wrptr - buf);
288 if (ret != SR_OK)
289 return ret;
290
291 return sigma_read_sr(devc, data, len);
292}
293
294static int sigma_read_pos(struct dev_context *devc,
295 uint32_t *stoppos, uint32_t *triggerpos, uint8_t *mode)
296{
297 /*
298 * Read 7 registers starting at trigger position LSB.
299 * Which yields two 24bit counter values, and mode flags.
300 */
301 const uint8_t buf[] = {
302 /* Setup first register address. */
303 REG_ADDR_LOW | READ_TRIGGER_POS_LOW,
304 /* Retrieve trigger position. */
305 REG_READ_ADDR | REG_ADDR_INC,
306 REG_READ_ADDR | REG_ADDR_INC,
307 REG_READ_ADDR | REG_ADDR_INC,
308 /* Retrieve stop position. */
309 REG_READ_ADDR | REG_ADDR_INC,
310 REG_READ_ADDR | REG_ADDR_INC,
311 REG_READ_ADDR | REG_ADDR_INC,
312 /* Retrieve mode register. */
313 REG_READ_ADDR | REG_ADDR_INC,
314 }, *rdptr;
315 uint8_t result[7];
316 uint32_t v32;
317 uint8_t v8;
318 int ret;
319
320 ret = sigma_write_sr(devc, buf, sizeof(buf));
321 if (ret != SR_OK)
322 return ret;
323
324 ret = sigma_read_sr(devc, result, sizeof(result));
325 if (ret != SR_OK)
326 return ret;
327
328 rdptr = &result[0];
329 v32 = read_u24le_inc(&rdptr);
330 if (triggerpos)
331 *triggerpos = v32;
332 v32 = read_u24le_inc(&rdptr);
333 if (stoppos)
334 *stoppos = v32;
335 v8 = read_u8_inc(&rdptr);
336 if (mode)
337 *mode = v8;
338
339 /*
340 * These positions consist of "the memory row" in the MSB fields,
341 * and "an event index" within the row in the LSB fields. Part
342 * of the memory row's content is sample data, another part is
343 * timestamps.
344 *
345 * The retrieved register values point to after the captured
346 * position. So they need to get decremented, and adjusted to
347 * cater for the timestamps when the decrement carries over to
348 * a different memory row.
349 */
350 if (stoppos && (--*stoppos & ROW_MASK) == ROW_MASK)
351 *stoppos -= CLUSTERS_PER_ROW;
352 if (triggerpos && (--*triggerpos & ROW_MASK) == ROW_MASK)
353 *triggerpos -= CLUSTERS_PER_ROW;
354
355 return SR_OK;
356}
357
358static int sigma_read_dram(struct dev_context *devc,
359 uint16_t startchunk, size_t numchunks, uint8_t *data)
360{
361 uint8_t buf[128], *wrptr;
362 size_t chunk;
363 int sel, ret;
364 gboolean is_last;
365
366 if (2 + 3 * numchunks > ARRAY_SIZE(buf)) {
367 sr_err("Short write buffer for %zu DRAM row reads.", numchunks);
368 return SR_ERR_BUG;
369 }
370
371 /* Communicate DRAM start address (memory row, aka samples line). */
372 wrptr = buf;
373 write_u8_inc(&wrptr, startchunk >> 8);
374 write_u8_inc(&wrptr, startchunk & 0xff);
375 ret = sigma_write_register(devc, WRITE_MEMROW, buf, wrptr - buf);
376 if (ret != SR_OK)
377 return ret;
378
379 /*
380 * Access DRAM content. Fetch from DRAM to FPGA's internal RAM,
381 * then transfer via USB. Interleave the FPGA's DRAM access and
382 * USB transfer, use alternating buffers (0/1) in the process.
383 */
384 wrptr = buf;
385 write_u8_inc(&wrptr, REG_DRAM_BLOCK);
386 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
387 for (chunk = 0; chunk < numchunks; chunk++) {
388 sel = chunk % 2;
389 is_last = chunk == numchunks - 1;
390 if (!is_last)
391 write_u8_inc(&wrptr, REG_DRAM_BLOCK | REG_DRAM_SEL_BOOL(!sel));
392 write_u8_inc(&wrptr, REG_DRAM_BLOCK_DATA | REG_DRAM_SEL_BOOL(sel));
393 if (!is_last)
394 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
395 }
396 ret = sigma_write_sr(devc, buf, wrptr - buf);
397 if (ret != SR_OK)
398 return ret;
399
400 return sigma_read_sr(devc, data, numchunks * ROW_LENGTH_BYTES);
401}
402
403/* Upload trigger look-up tables to Sigma. */
404SR_PRIV int sigma_write_trigger_lut(struct dev_context *devc,
405 struct triggerlut *lut)
406{
407 int i;
408 uint8_t tmp[2];
409 uint16_t bit;
410 uint8_t buf[6], *wrptr, regval;
411 int ret;
412
413 /* Transpose the table and send to Sigma. */
414 for (i = 0; i < 16; i++) {
415 bit = 1 << i;
416
417 tmp[0] = tmp[1] = 0;
418
419 if (lut->m2d[0] & bit)
420 tmp[0] |= 0x01;
421 if (lut->m2d[1] & bit)
422 tmp[0] |= 0x02;
423 if (lut->m2d[2] & bit)
424 tmp[0] |= 0x04;
425 if (lut->m2d[3] & bit)
426 tmp[0] |= 0x08;
427
428 if (lut->m3 & bit)
429 tmp[0] |= 0x10;
430 if (lut->m3s & bit)
431 tmp[0] |= 0x20;
432 if (lut->m4 & bit)
433 tmp[0] |= 0x40;
434
435 if (lut->m0d[0] & bit)
436 tmp[1] |= 0x01;
437 if (lut->m0d[1] & bit)
438 tmp[1] |= 0x02;
439 if (lut->m0d[2] & bit)
440 tmp[1] |= 0x04;
441 if (lut->m0d[3] & bit)
442 tmp[1] |= 0x08;
443
444 if (lut->m1d[0] & bit)
445 tmp[1] |= 0x10;
446 if (lut->m1d[1] & bit)
447 tmp[1] |= 0x20;
448 if (lut->m1d[2] & bit)
449 tmp[1] |= 0x40;
450 if (lut->m1d[3] & bit)
451 tmp[1] |= 0x80;
452
453 /*
454 * This logic seems redundant, but separates the value
455 * determination from the wire format, and is useful
456 * during future maintenance and research.
457 */
458 wrptr = buf;
459 write_u8_inc(&wrptr, tmp[0]);
460 write_u8_inc(&wrptr, tmp[1]);
461 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT,
462 buf, wrptr - buf);
463 if (ret != SR_OK)
464 return ret;
465 ret = sigma_set_register(devc, WRITE_TRIGGER_SELECT2,
466 TRGSEL2_RESET | TRGSEL2_LUT_WRITE |
467 (i & TRGSEL2_LUT_ADDR_MASK));
468 if (ret != SR_OK)
469 return ret;
470 }
471
472 /* Send the parameters */
473 wrptr = buf;
474 regval = 0;
475 regval |= lut->params.selc << 6;
476 regval |= lut->params.selpresc << 0;
477 write_u8_inc(&wrptr, regval);
478 regval = 0;
479 regval |= lut->params.selinc << 6;
480 regval |= lut->params.selres << 4;
481 regval |= lut->params.sela << 2;
482 regval |= lut->params.selb << 0;
483 write_u8_inc(&wrptr, regval);
484 write_u16le_inc(&wrptr, lut->params.cmpb);
485 write_u16le_inc(&wrptr, lut->params.cmpa);
486 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, buf, wrptr - buf);
487 if (ret != SR_OK)
488 return ret;
489
490 return SR_OK;
491}
492
493/*
494 * See Xilinx UG332 for Spartan-3 FPGA configuration. The SIGMA device
495 * uses FTDI bitbang mode for netlist download in slave serial mode.
496 * (LATER: The OMEGA device's cable contains a more capable FTDI chip
497 * and uses MPSSE mode for bitbang. -- Can we also use FT232H in FT245
498 * compatible bitbang mode? For maximum code re-use and reduced libftdi
499 * dependency? See section 3.5.5 of FT232H: D0 clk, D1 data (out), D2
500 * data (in), D3 select, D4-7 GPIOL. See section 3.5.7 for MCU FIFO.)
501 *
502 * 750kbps rate (four times the speed of sigmalogan) works well for
503 * netlist download. All pins except INIT_B are output pins during
504 * configuration download.
505 *
506 * Some pins are inverted as a byproduct of level shifting circuitry.
507 * That's why high CCLK level (from the cable's point of view) is idle
508 * from the FPGA's perspective.
509 *
510 * The vendor's literature discusses a "suicide sequence" which ends
511 * regular FPGA execution and should be sent before entering bitbang
512 * mode and sending configuration data. Set D7 and toggle D2, D3, D4
513 * a few times.
514 */
515#define BB_PIN_CCLK (1 << 0) /* D0, CCLK */
516#define BB_PIN_PROG (1 << 1) /* D1, PROG */
517#define BB_PIN_D2 (1 << 2) /* D2, (part of) SUICIDE */
518#define BB_PIN_D3 (1 << 3) /* D3, (part of) SUICIDE */
519#define BB_PIN_D4 (1 << 4) /* D4, (part of) SUICIDE (unused?) */
520#define BB_PIN_INIT (1 << 5) /* D5, INIT, input pin */
521#define BB_PIN_DIN (1 << 6) /* D6, DIN */
522#define BB_PIN_D7 (1 << 7) /* D7, (part of) SUICIDE */
523
524#define BB_BITRATE (750 * 1000)
525#define BB_PINMASK (0xff & ~BB_PIN_INIT)
526
527/*
528 * Initiate slave serial mode for configuration download. Which is done
529 * by pulsing PROG_B and sensing INIT_B. Make sure CCLK is idle before
530 * initiating the configuration download.
531 *
532 * Run a "suicide sequence" first to terminate the regular FPGA operation
533 * before reconfiguration. The FTDI cable is single channel, and shares
534 * pins which are used for data communication in FIFO mode with pins that
535 * are used for FPGA configuration in bitbang mode. Hardware defaults for
536 * unconfigured hardware, and runtime conditions after FPGA configuration
537 * need to cooperate such that re-configuration of the FPGA can start.
538 */
539static int sigma_fpga_init_bitbang_once(struct dev_context *devc)
540{
541 const uint8_t suicide[] = {
542 BB_PIN_D7 | BB_PIN_D2,
543 BB_PIN_D7 | BB_PIN_D2,
544 BB_PIN_D7 | BB_PIN_D3,
545 BB_PIN_D7 | BB_PIN_D2,
546 BB_PIN_D7 | BB_PIN_D3,
547 BB_PIN_D7 | BB_PIN_D2,
548 BB_PIN_D7 | BB_PIN_D3,
549 BB_PIN_D7 | BB_PIN_D2,
550 };
551 const uint8_t init_array[] = {
552 BB_PIN_CCLK,
553 BB_PIN_CCLK | BB_PIN_PROG,
554 BB_PIN_CCLK | BB_PIN_PROG,
555 BB_PIN_CCLK,
556 BB_PIN_CCLK,
557 BB_PIN_CCLK,
558 BB_PIN_CCLK,
559 BB_PIN_CCLK,
560 BB_PIN_CCLK,
561 BB_PIN_CCLK,
562 };
563 int retries, ret;
564 uint8_t data;
565
566 /* Section 2. part 1), do the FPGA suicide. */
567 ret = SR_OK;
568 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
569 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
570 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
571 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
572 if (ret != SR_OK)
573 return SR_ERR_IO;
574 g_usleep(10 * 1000);
575
576 /* Section 2. part 2), pulse PROG. */
577 ret = sigma_write_sr(devc, init_array, sizeof(init_array));
578 if (ret != SR_OK)
579 return ret;
580 g_usleep(10 * 1000);
581 ftdi_usb_purge_buffers(&devc->ftdi.ctx);
582
583 /*
584 * Wait until the FPGA asserts INIT_B. Check in a maximum number
585 * of bursts with a given delay between them. Read as many pin
586 * capture results as the combination of FTDI chip and FTID lib
587 * may provide. Cope with absence of pin capture data in a cycle.
588 * This approach shall result in fast reponse in case of success,
589 * low cost of execution during wait, reliable error handling in
590 * the transport layer, and robust response to failure or absence
591 * of result data (hardware inactivity after stimulus).
592 */
593 retries = 10;
594 while (retries--) {
595 do {
596 ret = sigma_read_raw(devc, &data, sizeof(data));
597 if (ret < 0)
598 return SR_ERR_IO;
599 if (ret == sizeof(data) && (data & BB_PIN_INIT))
600 return SR_OK;
601 } while (ret == sizeof(data));
602 if (retries)
603 g_usleep(10 * 1000);
604 }
605
606 return SR_ERR_TIMEOUT;
607}
608
609/*
610 * This is belt and braces. Re-run the bitbang initiation sequence a few
611 * times should first attempts fail. Failure is rare but can happen (was
612 * observed during driver development).
613 */
614static int sigma_fpga_init_bitbang(struct dev_context *devc)
615{
616 size_t retries;
617 int ret;
618
619 retries = 10;
620 while (retries--) {
621 ret = sigma_fpga_init_bitbang_once(devc);
622 if (ret == SR_OK)
623 return ret;
624 if (ret != SR_ERR_TIMEOUT)
625 return ret;
626 }
627 return ret;
628}
629
630/*
631 * Configure the FPGA for logic-analyzer mode.
632 */
633static int sigma_fpga_init_la(struct dev_context *devc)
634{
635 uint8_t buf[16], *wrptr;
636 uint8_t data_55, data_aa, mode;
637 uint8_t result[3];
638 const uint8_t *rdptr;
639 int ret;
640
641 wrptr = buf;
642
643 /* Read ID register. */
644 write_u8_inc(&wrptr, REG_ADDR_LOW | (READ_ID & 0xf));
645 write_u8_inc(&wrptr, REG_ADDR_HIGH | (READ_ID >> 4));
646 write_u8_inc(&wrptr, REG_READ_ADDR);
647
648 /* Write 0x55 to scratch register, read back. */
649 data_55 = 0x55;
650 write_u8_inc(&wrptr, REG_ADDR_LOW | (WRITE_TEST & 0xf));
651 write_u8_inc(&wrptr, REG_DATA_LOW | (data_55 & 0xf));
652 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (data_55 >> 4));
653 write_u8_inc(&wrptr, REG_READ_ADDR);
654
655 /* Write 0xaa to scratch register, read back. */
656 data_aa = 0xaa;
657 write_u8_inc(&wrptr, REG_ADDR_LOW | (WRITE_TEST & 0xf));
658 write_u8_inc(&wrptr, REG_DATA_LOW | (data_aa & 0xf));
659 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (data_aa >> 4));
660 write_u8_inc(&wrptr, REG_READ_ADDR);
661
662 /* Initiate SDRAM initialization in mode register. */
663 mode = WMR_SDRAMINIT;
664 write_u8_inc(&wrptr, REG_ADDR_LOW | (WRITE_MODE & 0xf));
665 write_u8_inc(&wrptr, REG_DATA_LOW | (mode & 0xf));
666 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | (mode >> 4));
667
668 /*
669 * Send the command sequence which contains 3 READ requests.
670 * Expect to see the corresponding 3 response bytes.
671 */
672 ret = sigma_write_sr(devc, buf, wrptr - buf);
673 if (ret != SR_OK) {
674 sr_err("Could not request LA start response.");
675 return ret;
676 }
677 ret = sigma_read_sr(devc, result, ARRAY_SIZE(result));
678 if (ret != SR_OK) {
679 sr_err("Could not receive LA start response.");
680 return SR_ERR_IO;
681 }
682 rdptr = result;
683 if (read_u8_inc(&rdptr) != 0xa6) {
684 sr_err("Unexpected ID response.");
685 return SR_ERR_DATA;
686 }
687 if (read_u8_inc(&rdptr) != data_55) {
688 sr_err("Unexpected scratch read-back (55).");
689 return SR_ERR_DATA;
690 }
691 if (read_u8_inc(&rdptr) != data_aa) {
692 sr_err("Unexpected scratch read-back (aa).");
693 return SR_ERR_DATA;
694 }
695
696 return SR_OK;
697}
698
699/*
700 * Read the firmware from a file and transform it into a series of bitbang
701 * pulses used to program the FPGA. Note that the *bb_cmd must be free()'d
702 * by the caller of this function.
703 */
704static int sigma_fw_2_bitbang(struct sr_context *ctx, const char *name,
705 uint8_t **bb_cmd, gsize *bb_cmd_size)
706{
707 uint8_t *firmware;
708 size_t file_size;
709 uint8_t *p;
710 size_t l;
711 uint32_t imm;
712 size_t bb_size;
713 uint8_t *bb_stream, *bbs, byte, mask, v;
714
715 /* Retrieve the on-disk firmware file content. */
716 firmware = sr_resource_load(ctx, SR_RESOURCE_FIRMWARE, name,
717 &file_size, SIGMA_FIRMWARE_SIZE_LIMIT);
718 if (!firmware)
719 return SR_ERR_IO;
720
721 /* Unscramble the file content (XOR with "random" sequence). */
722 p = firmware;
723 l = file_size;
724 imm = 0x3f6df2ab;
725 while (l--) {
726 imm = (imm + 0xa853753) % 177 + (imm * 0x8034052);
727 *p++ ^= imm & 0xff;
728 }
729
730 /*
731 * Generate a sequence of bitbang samples. With two samples per
732 * FPGA configuration bit, providing the level for the DIN signal
733 * as well as two edges for CCLK. See Xilinx UG332 for details
734 * ("slave serial" mode).
735 *
736 * Note that CCLK is inverted in hardware. That's why the
737 * respective bit is first set and then cleared in the bitbang
738 * sample sets. So that the DIN level will be stable when the
739 * data gets sampled at the rising CCLK edge, and the signals'
740 * setup time constraint will be met.
741 *
742 * The caller will put the FPGA into download mode, will send
743 * the bitbang samples, and release the allocated memory.
744 */
745 bb_size = file_size * 8 * 2;
746 bb_stream = g_try_malloc(bb_size);
747 if (!bb_stream) {
748 sr_err("Memory allocation failed during firmware upload.");
749 g_free(firmware);
750 return SR_ERR_MALLOC;
751 }
752 bbs = bb_stream;
753 p = firmware;
754 l = file_size;
755 while (l--) {
756 byte = *p++;
757 mask = 0x80;
758 while (mask) {
759 v = (byte & mask) ? BB_PIN_DIN : 0;
760 mask >>= 1;
761 *bbs++ = v | BB_PIN_CCLK;
762 *bbs++ = v;
763 }
764 }
765 g_free(firmware);
766
767 /* The transformation completed successfully, return the result. */
768 *bb_cmd = bb_stream;
769 *bb_cmd_size = bb_size;
770
771 return SR_OK;
772}
773
774static int upload_firmware(struct sr_context *ctx, struct dev_context *devc,
775 enum sigma_firmware_idx firmware_idx)
776{
777 int ret;
778 uint8_t *buf;
779 uint8_t pins;
780 size_t buf_size;
781 const char *firmware;
782
783 /* Check for valid firmware file selection. */
784 if (firmware_idx >= ARRAY_SIZE(firmware_files))
785 return SR_ERR_ARG;
786 firmware = firmware_files[firmware_idx];
787 if (!firmware || !*firmware)
788 return SR_ERR_ARG;
789
790 /* Avoid downloading the same firmware multiple times. */
791 if (devc->firmware_idx == firmware_idx) {
792 sr_info("Not uploading firmware file '%s' again.", firmware);
793 return SR_OK;
794 }
795
796 devc->state.state = SIGMA_CONFIG;
797
798 /* Set the cable to bitbang mode. */
799 ret = ftdi_set_bitmode(&devc->ftdi.ctx, BB_PINMASK, BITMODE_BITBANG);
800 if (ret < 0) {
801 sr_err("Could not setup cable mode for upload: %s",
802 ftdi_get_error_string(&devc->ftdi.ctx));
803 return SR_ERR;
804 }
805 ret = ftdi_set_baudrate(&devc->ftdi.ctx, BB_BITRATE);
806 if (ret < 0) {
807 sr_err("Could not setup bitrate for upload: %s",
808 ftdi_get_error_string(&devc->ftdi.ctx));
809 return SR_ERR;
810 }
811
812 /* Initiate FPGA configuration mode. */
813 ret = sigma_fpga_init_bitbang(devc);
814 if (ret) {
815 sr_err("Could not initiate firmware upload to hardware");
816 return ret;
817 }
818
819 /* Prepare wire format of the firmware image. */
820 ret = sigma_fw_2_bitbang(ctx, firmware, &buf, &buf_size);
821 if (ret != SR_OK) {
822 sr_err("Could not prepare file %s for upload.", firmware);
823 return ret;
824 }
825
826 /* Write the FPGA netlist to the cable. */
827 sr_info("Uploading firmware file '%s'.", firmware);
828 ret = sigma_write_sr(devc, buf, buf_size);
829 g_free(buf);
830 if (ret != SR_OK) {
831 sr_err("Could not upload firmware file '%s'.", firmware);
832 return ret;
833 }
834
835 /* Leave bitbang mode and discard pending input data. */
836 ret = ftdi_set_bitmode(&devc->ftdi.ctx, 0, BITMODE_RESET);
837 if (ret < 0) {
838 sr_err("Could not setup cable mode after upload: %s",
839 ftdi_get_error_string(&devc->ftdi.ctx));
840 return SR_ERR;
841 }
842 ftdi_usb_purge_buffers(&devc->ftdi.ctx);
843 while (sigma_read_raw(devc, &pins, sizeof(pins)) > 0)
844 ;
845
846 /* Initialize the FPGA for logic-analyzer mode. */
847 ret = sigma_fpga_init_la(devc);
848 if (ret != SR_OK) {
849 sr_err("Hardware response after firmware upload failed.");
850 return ret;
851 }
852
853 /* Keep track of successful firmware download completion. */
854 devc->state.state = SIGMA_IDLE;
855 devc->firmware_idx = firmware_idx;
856 sr_info("Firmware uploaded.");
857
858 return SR_OK;
859}
860
861/*
862 * The driver supports user specified time or sample count limits. The
863 * device's hardware supports neither, and hardware compression prevents
864 * reliable detection of "fill levels" (currently reached sample counts)
865 * from register values during acquisition. That's why the driver needs
866 * to apply some heuristics:
867 *
868 * - The (optional) sample count limit and the (normalized) samplerate
869 * get mapped to an estimated duration for these samples' acquisition.
870 * - The (optional) time limit gets checked as well. The lesser of the
871 * two limits will terminate the data acquisition phase. The exact
872 * sample count limit gets enforced in session feed submission paths.
873 * - Some slack needs to be given to account for hardware pipelines as
874 * well as late storage of last chunks after compression thresholds
875 * are tripped. The resulting data set will span at least the caller
876 * specified period of time, which shall be perfectly acceptable.
877 *
878 * With RLE compression active, up to 64K sample periods can pass before
879 * a cluster accumulates. Which translates to 327ms at 200kHz. Add two
880 * times that period for good measure, one is not enough to flush the
881 * hardware pipeline (observation from an earlier experiment).
882 */
883SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc)
884{
885 int ret;
886 GVariant *data;
887 uint64_t user_count, user_msecs;
888 uint64_t worst_cluster_time_ms;
889 uint64_t count_msecs, acquire_msecs;
890
891 sr_sw_limits_init(&devc->acq_limits);
892
893 /* Get sample count limit, convert to msecs. */
894 ret = sr_sw_limits_config_get(&devc->cfg_limits,
895 SR_CONF_LIMIT_SAMPLES, &data);
896 if (ret != SR_OK)
897 return ret;
898 user_count = g_variant_get_uint64(data);
899 g_variant_unref(data);
900 count_msecs = 0;
901 if (user_count)
902 count_msecs = 1000 * user_count / devc->samplerate + 1;
903
904 /* Get time limit, which is in msecs. */
905 ret = sr_sw_limits_config_get(&devc->cfg_limits,
906 SR_CONF_LIMIT_MSEC, &data);
907 if (ret != SR_OK)
908 return ret;
909 user_msecs = g_variant_get_uint64(data);
910 g_variant_unref(data);
911
912 /* Get the lesser of them, with both being optional. */
913 acquire_msecs = ~0ull;
914 if (user_count && count_msecs < acquire_msecs)
915 acquire_msecs = count_msecs;
916 if (user_msecs && user_msecs < acquire_msecs)
917 acquire_msecs = user_msecs;
918 if (acquire_msecs == ~0ull)
919 return SR_OK;
920
921 /* Add some slack, and use that timeout for acquisition. */
922 worst_cluster_time_ms = 1000 * 65536 / devc->samplerate;
923 acquire_msecs += 2 * worst_cluster_time_ms;
924 data = g_variant_new_uint64(acquire_msecs);
925 ret = sr_sw_limits_config_set(&devc->acq_limits,
926 SR_CONF_LIMIT_MSEC, data);
927 g_variant_unref(data);
928 if (ret != SR_OK)
929 return ret;
930
931 sr_sw_limits_acquisition_start(&devc->acq_limits);
932 return SR_OK;
933}
934
935/*
936 * Check whether a caller specified samplerate matches the device's
937 * hardware constraints (can be used for acquisition). Optionally yield
938 * a value that approximates the original spec.
939 *
940 * This routine assumes that input specs are in the 200kHz to 200MHz
941 * range of supported rates, and callers typically want to normalize a
942 * given value to the hardware capabilities. Values in the 50MHz range
943 * get rounded up by default, to avoid a more expensive check for the
944 * closest match, while higher sampling rate is always desirable during
945 * measurement. Input specs which exactly match hardware capabilities
946 * remain unaffected. Because 100/200MHz rates also limit the number of
947 * available channels, they are not suggested by this routine, instead
948 * callers need to pick them consciously.
949 */
950SR_PRIV int sigma_normalize_samplerate(uint64_t want_rate, uint64_t *have_rate)
951{
952 uint64_t div, rate;
953
954 /* Accept exact matches for 100/200MHz. */
955 if (want_rate == SR_MHZ(200) || want_rate == SR_MHZ(100)) {
956 if (have_rate)
957 *have_rate = want_rate;
958 return SR_OK;
959 }
960
961 /* Accept 200kHz to 50MHz range, and map to near value. */
962 if (want_rate >= SR_KHZ(200) && want_rate <= SR_MHZ(50)) {
963 div = SR_MHZ(50) / want_rate;
964 rate = SR_MHZ(50) / div;
965 if (have_rate)
966 *have_rate = rate;
967 return SR_OK;
968 }
969
970 return SR_ERR_ARG;
971}
972
973SR_PRIV uint64_t sigma_get_samplerate(const struct sr_dev_inst *sdi)
974{
975 /* TODO Retrieve value from hardware. */
976 (void)sdi;
977 return samplerates[0];
978}
979
980SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi)
981{
982 struct dev_context *devc;
983 struct drv_context *drvc;
984 uint64_t samplerate;
985 int ret;
986 int num_channels;
987
988 devc = sdi->priv;
989 drvc = sdi->driver->context;
990
991 /* Accept any caller specified rate which the hardware supports. */
992 ret = sigma_normalize_samplerate(devc->samplerate, &samplerate);
993 if (ret != SR_OK)
994 return ret;
995
996 /*
997 * Depending on the samplerates of 200/100/50- MHz, specific
998 * firmware is required and higher rates might limit the set
999 * of available channels.
1000 */
1001 num_channels = devc->num_channels;
1002 if (samplerate <= SR_MHZ(50)) {
1003 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_50MHZ);
1004 num_channels = 16;
1005 } else if (samplerate == SR_MHZ(100)) {
1006 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_100MHZ);
1007 num_channels = 8;
1008 } else if (samplerate == SR_MHZ(200)) {
1009 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_200MHZ);
1010 num_channels = 4;
1011 }
1012
1013 /*
1014 * The samplerate affects the number of available logic channels
1015 * as well as a sample memory layout detail (the number of samples
1016 * which the device will communicate within an "event").
1017 */
1018 if (ret == SR_OK) {
1019 devc->num_channels = num_channels;
1020 devc->samples_per_event = 16 / devc->num_channels;
1021 }
1022
1023 return ret;
1024}
1025
1026/*
1027 * Arrange for a session feed submit buffer. A queue where a number of
1028 * samples gets accumulated to reduce the number of send calls. Which
1029 * also enforces an optional sample count limit for data acquisition.
1030 *
1031 * The buffer holds up to CHUNK_SIZE bytes. The unit size is fixed (the
1032 * driver provides a fixed channel layout regardless of samplerate).
1033 */
1034
1035#define CHUNK_SIZE (4 * 1024 * 1024)
1036
1037struct submit_buffer {
1038 size_t unit_size;
1039 size_t max_samples, curr_samples;
1040 uint8_t *sample_data;
1041 uint8_t *write_pointer;
1042 struct sr_dev_inst *sdi;
1043 struct sr_datafeed_packet packet;
1044 struct sr_datafeed_logic logic;
1045};
1046
1047static int alloc_submit_buffer(struct sr_dev_inst *sdi)
1048{
1049 struct dev_context *devc;
1050 struct submit_buffer *buffer;
1051 size_t size;
1052
1053 devc = sdi->priv;
1054
1055 buffer = g_malloc0(sizeof(*buffer));
1056 devc->buffer = buffer;
1057
1058 buffer->unit_size = sizeof(uint16_t);
1059 size = CHUNK_SIZE;
1060 size /= buffer->unit_size;
1061 buffer->max_samples = size;
1062 size *= buffer->unit_size;
1063 buffer->sample_data = g_try_malloc0(size);
1064 if (!buffer->sample_data)
1065 return SR_ERR_MALLOC;
1066 buffer->write_pointer = buffer->sample_data;
1067 sr_sw_limits_init(&devc->feed_limits);
1068
1069 buffer->sdi = sdi;
1070 memset(&buffer->logic, 0, sizeof(buffer->logic));
1071 buffer->logic.unitsize = buffer->unit_size;
1072 buffer->logic.data = buffer->sample_data;
1073 memset(&buffer->packet, 0, sizeof(buffer->packet));
1074 buffer->packet.type = SR_DF_LOGIC;
1075 buffer->packet.payload = &buffer->logic;
1076
1077 return SR_OK;
1078}
1079
1080static int setup_submit_limit(struct dev_context *devc)
1081{
1082 struct sr_sw_limits *limits;
1083 int ret;
1084 GVariant *data;
1085 uint64_t total;
1086
1087 limits = &devc->feed_limits;
1088
1089 ret = sr_sw_limits_config_get(&devc->cfg_limits,
1090 SR_CONF_LIMIT_SAMPLES, &data);
1091 if (ret != SR_OK)
1092 return ret;
1093 total = g_variant_get_uint64(data);
1094 g_variant_unref(data);
1095
1096 sr_sw_limits_init(limits);
1097 if (total) {
1098 data = g_variant_new_uint64(total);
1099 ret = sr_sw_limits_config_set(limits,
1100 SR_CONF_LIMIT_SAMPLES, data);
1101 g_variant_unref(data);
1102 if (ret != SR_OK)
1103 return ret;
1104 }
1105
1106 sr_sw_limits_acquisition_start(limits);
1107
1108 return SR_OK;
1109}
1110
1111static void free_submit_buffer(struct dev_context *devc)
1112{
1113 struct submit_buffer *buffer;
1114
1115 if (!devc)
1116 return;
1117
1118 buffer = devc->buffer;
1119 if (!buffer)
1120 return;
1121 devc->buffer = NULL;
1122
1123 g_free(buffer->sample_data);
1124 g_free(buffer);
1125}
1126
1127static int flush_submit_buffer(struct dev_context *devc)
1128{
1129 struct submit_buffer *buffer;
1130 int ret;
1131
1132 buffer = devc->buffer;
1133
1134 /* Is queued sample data available? */
1135 if (!buffer->curr_samples)
1136 return SR_OK;
1137
1138 /* Submit to the session feed. */
1139 buffer->logic.length = buffer->curr_samples * buffer->unit_size;
1140 ret = sr_session_send(buffer->sdi, &buffer->packet);
1141 if (ret != SR_OK)
1142 return ret;
1143
1144 /* Rewind queue position. */
1145 buffer->curr_samples = 0;
1146 buffer->write_pointer = buffer->sample_data;
1147
1148 return SR_OK;
1149}
1150
1151static int addto_submit_buffer(struct dev_context *devc,
1152 uint16_t sample, size_t count)
1153{
1154 struct submit_buffer *buffer;
1155 struct sr_sw_limits *limits;
1156 int ret;
1157
1158 buffer = devc->buffer;
1159 limits = &devc->feed_limits;
1160 if (sr_sw_limits_check(limits))
1161 count = 0;
1162
1163 /*
1164 * Individually accumulate and check each sample, such that
1165 * accumulation between flushes won't exceed local storage, and
1166 * enforcement of user specified limits is exact.
1167 */
1168 while (count--) {
1169 write_u16le_inc(&buffer->write_pointer, sample);
1170 buffer->curr_samples++;
1171 if (buffer->curr_samples == buffer->max_samples) {
1172 ret = flush_submit_buffer(devc);
1173 if (ret != SR_OK)
1174 return ret;
1175 }
1176 sr_sw_limits_update_samples_read(limits, 1);
1177 if (sr_sw_limits_check(limits))
1178 break;
1179 }
1180
1181 return SR_OK;
1182}
1183
1184/*
1185 * In 100 and 200 MHz mode, only a single pin rising/falling can be
1186 * set as trigger. In other modes, two rising/falling triggers can be set,
1187 * in addition to value/mask trigger for any number of channels.
1188 *
1189 * The Sigma supports complex triggers using boolean expressions, but this
1190 * has not been implemented yet.
1191 */
1192SR_PRIV int sigma_convert_trigger(const struct sr_dev_inst *sdi)
1193{
1194 struct dev_context *devc;
1195 struct sr_trigger *trigger;
1196 struct sr_trigger_stage *stage;
1197 struct sr_trigger_match *match;
1198 const GSList *l, *m;
1199 int channelbit, trigger_set;
1200
1201 devc = sdi->priv;
1202 memset(&devc->trigger, 0, sizeof(devc->trigger));
1203 trigger = sr_session_trigger_get(sdi->session);
1204 if (!trigger)
1205 return SR_OK;
1206
1207 trigger_set = 0;
1208 for (l = trigger->stages; l; l = l->next) {
1209 stage = l->data;
1210 for (m = stage->matches; m; m = m->next) {
1211 match = m->data;
1212 /* Ignore disabled channels with a trigger. */
1213 if (!match->channel->enabled)
1214 continue;
1215 channelbit = 1 << match->channel->index;
1216 if (devc->samplerate >= SR_MHZ(100)) {
1217 /* Fast trigger support. */
1218 if (trigger_set) {
1219 sr_err("100/200MHz modes limited to single trigger pin.");
1220 return SR_ERR;
1221 }
1222 if (match->match == SR_TRIGGER_FALLING) {
1223 devc->trigger.fallingmask |= channelbit;
1224 } else if (match->match == SR_TRIGGER_RISING) {
1225 devc->trigger.risingmask |= channelbit;
1226 } else {
1227 sr_err("100/200MHz modes limited to edge trigger.");
1228 return SR_ERR;
1229 }
1230
1231 trigger_set++;
1232 } else {
1233 /* Simple trigger support (event). */
1234 if (match->match == SR_TRIGGER_ONE) {
1235 devc->trigger.simplevalue |= channelbit;
1236 devc->trigger.simplemask |= channelbit;
1237 } else if (match->match == SR_TRIGGER_ZERO) {
1238 devc->trigger.simplevalue &= ~channelbit;
1239 devc->trigger.simplemask |= channelbit;
1240 } else if (match->match == SR_TRIGGER_FALLING) {
1241 devc->trigger.fallingmask |= channelbit;
1242 trigger_set++;
1243 } else if (match->match == SR_TRIGGER_RISING) {
1244 devc->trigger.risingmask |= channelbit;
1245 trigger_set++;
1246 }
1247
1248 /*
1249 * Actually, Sigma supports 2 rising/falling triggers,
1250 * but they are ORed and the current trigger syntax
1251 * does not permit ORed triggers.
1252 */
1253 if (trigger_set > 1) {
1254 sr_err("Limited to 1 edge trigger.");
1255 return SR_ERR;
1256 }
1257 }
1258 }
1259 }
1260
1261 return SR_OK;
1262}
1263
1264/* Software trigger to determine exact trigger position. */
1265static int get_trigger_offset(uint8_t *samples, uint16_t last_sample,
1266 struct sigma_trigger *t)
1267{
1268 const uint8_t *rdptr;
1269 int i;
1270 uint16_t sample;
1271
1272 rdptr = samples;
1273 sample = 0;
1274 for (i = 0; i < 8; i++) {
1275 if (i > 0)
1276 last_sample = sample;
1277 sample = read_u16le_inc(&rdptr);
1278
1279 /* Simple triggers. */
1280 if ((sample & t->simplemask) != t->simplevalue)
1281 continue;
1282
1283 /* Rising edge. */
1284 if (((last_sample & t->risingmask) != 0) ||
1285 ((sample & t->risingmask) != t->risingmask))
1286 continue;
1287
1288 /* Falling edge. */
1289 if ((last_sample & t->fallingmask) != t->fallingmask ||
1290 (sample & t->fallingmask) != 0)
1291 continue;
1292
1293 break;
1294 }
1295
1296 /* If we did not match, return original trigger pos. */
1297 return i & 0x7;
1298}
1299
1300static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample)
1301{
1302 /* TODO
1303 * Check whether the combination of this very sample and the
1304 * previous state match the configured trigger condition. This
1305 * improves the resolution of the trigger marker's position.
1306 * The hardware provided position is coarse, and may point to
1307 * a position before the actual match.
1308 *
1309 * See the previous get_trigger_offset() implementation. This
1310 * code needs to get re-used here.
1311 */
1312 (void)devc;
1313 (void)sample;
1314 (void)get_trigger_offset;
1315
1316 return FALSE;
1317}
1318
1319static int check_and_submit_sample(struct dev_context *devc,
1320 uint16_t sample, size_t count, gboolean check_trigger)
1321{
1322 gboolean triggered;
1323 int ret;
1324
1325 triggered = check_trigger && sample_matches_trigger(devc, sample);
1326 if (triggered) {
1327 ret = flush_submit_buffer(devc);
1328 if (ret != SR_OK)
1329 return ret;
1330 ret = std_session_send_df_trigger(devc->buffer->sdi);
1331 if (ret != SR_OK)
1332 return ret;
1333 }
1334
1335 ret = addto_submit_buffer(devc, sample, count);
1336 if (ret != SR_OK)
1337 return ret;
1338
1339 return SR_OK;
1340}
1341
1342/*
1343 * Return the timestamp of "DRAM cluster".
1344 */
1345static uint16_t sigma_dram_cluster_ts(struct sigma_dram_cluster *cluster)
1346{
1347 return read_u16le((const uint8_t *)&cluster->timestamp);
1348}
1349
1350/*
1351 * Return one 16bit data entity of a DRAM cluster at the specified index.
1352 */
1353static uint16_t sigma_dram_cluster_data(struct sigma_dram_cluster *cl, int idx)
1354{
1355 return read_u16le((const uint8_t *)&cl->samples[idx]);
1356}
1357
1358/*
1359 * Deinterlace sample data that was retrieved at 100MHz samplerate.
1360 * One 16bit item contains two samples of 8bits each. The bits of
1361 * multiple samples are interleaved.
1362 */
1363static uint16_t sigma_deinterlace_100mhz_data(uint16_t indata, int idx)
1364{
1365 uint16_t outdata;
1366
1367 indata >>= idx;
1368 outdata = 0;
1369 outdata |= (indata >> (0 * 2 - 0)) & (1 << 0);
1370 outdata |= (indata >> (1 * 2 - 1)) & (1 << 1);
1371 outdata |= (indata >> (2 * 2 - 2)) & (1 << 2);
1372 outdata |= (indata >> (3 * 2 - 3)) & (1 << 3);
1373 outdata |= (indata >> (4 * 2 - 4)) & (1 << 4);
1374 outdata |= (indata >> (5 * 2 - 5)) & (1 << 5);
1375 outdata |= (indata >> (6 * 2 - 6)) & (1 << 6);
1376 outdata |= (indata >> (7 * 2 - 7)) & (1 << 7);
1377 return outdata;
1378}
1379
1380/*
1381 * Deinterlace sample data that was retrieved at 200MHz samplerate.
1382 * One 16bit item contains four samples of 4bits each. The bits of
1383 * multiple samples are interleaved.
1384 */
1385static uint16_t sigma_deinterlace_200mhz_data(uint16_t indata, int idx)
1386{
1387 uint16_t outdata;
1388
1389 indata >>= idx;
1390 outdata = 0;
1391 outdata |= (indata >> (0 * 4 - 0)) & (1 << 0);
1392 outdata |= (indata >> (1 * 4 - 1)) & (1 << 1);
1393 outdata |= (indata >> (2 * 4 - 2)) & (1 << 2);
1394 outdata |= (indata >> (3 * 4 - 3)) & (1 << 3);
1395 return outdata;
1396}
1397
1398static void sigma_decode_dram_cluster(struct dev_context *devc,
1399 struct sigma_dram_cluster *dram_cluster,
1400 size_t events_in_cluster, gboolean triggered)
1401{
1402 struct sigma_state *ss;
1403 uint16_t tsdiff, ts, sample, item16;
1404 unsigned int i;
1405
1406 if (!devc->use_triggers || !ASIX_SIGMA_WITH_TRIGGER)
1407 triggered = FALSE;
1408
1409 /*
1410 * If this cluster is not adjacent to the previously received
1411 * cluster, then send the appropriate number of samples with the
1412 * previous values to the sigrok session. This "decodes RLE".
1413 *
1414 * These samples cannot match the trigger since they just repeat
1415 * the previously submitted data pattern. (This assumption holds
1416 * for simple level and edge triggers. It would not for timed or
1417 * counted conditions, which currently are not supported.)
1418 */
1419 ss = &devc->state;
1420 ts = sigma_dram_cluster_ts(dram_cluster);
1421 tsdiff = ts - ss->lastts;
1422 if (tsdiff > 0) {
1423 size_t count;
1424 sample = ss->lastsample;
1425 count = tsdiff * devc->samples_per_event;
1426 (void)check_and_submit_sample(devc, sample, count, FALSE);
1427 }
1428 ss->lastts = ts + EVENTS_PER_CLUSTER;
1429
1430 /*
1431 * Grab sample data from the current cluster and prepare their
1432 * submission to the session feed. Handle samplerate dependent
1433 * memory layout of sample data. Accumulation of data chunks
1434 * before submission is transparent to this code path, specific
1435 * buffer depth is neither assumed nor required here.
1436 */
1437 sample = 0;
1438 for (i = 0; i < events_in_cluster; i++) {
1439 item16 = sigma_dram_cluster_data(dram_cluster, i);
1440 if (devc->samplerate == SR_MHZ(200)) {
1441 sample = sigma_deinterlace_200mhz_data(item16, 0);
1442 check_and_submit_sample(devc, sample, 1, triggered);
1443 sample = sigma_deinterlace_200mhz_data(item16, 1);
1444 check_and_submit_sample(devc, sample, 1, triggered);
1445 sample = sigma_deinterlace_200mhz_data(item16, 2);
1446 check_and_submit_sample(devc, sample, 1, triggered);
1447 sample = sigma_deinterlace_200mhz_data(item16, 3);
1448 check_and_submit_sample(devc, sample, 1, triggered);
1449 } else if (devc->samplerate == SR_MHZ(100)) {
1450 sample = sigma_deinterlace_100mhz_data(item16, 0);
1451 check_and_submit_sample(devc, sample, 1, triggered);
1452 sample = sigma_deinterlace_100mhz_data(item16, 1);
1453 check_and_submit_sample(devc, sample, 1, triggered);
1454 } else {
1455 sample = item16;
1456 check_and_submit_sample(devc, sample, 1, triggered);
1457 }
1458 }
1459 ss->lastsample = sample;
1460}
1461
1462/*
1463 * Decode chunk of 1024 bytes, 64 clusters, 7 events per cluster.
1464 * Each event is 20ns apart, and can contain multiple samples.
1465 *
1466 * For 200 MHz, events contain 4 samples for each channel, spread 5 ns apart.
1467 * For 100 MHz, events contain 2 samples for each channel, spread 10 ns apart.
1468 * For 50 MHz and below, events contain one sample for each channel,
1469 * spread 20 ns apart.
1470 */
1471static int decode_chunk_ts(struct dev_context *devc,
1472 struct sigma_dram_line *dram_line,
1473 size_t events_in_line, size_t trigger_event)
1474{
1475 struct sigma_dram_cluster *dram_cluster;
1476 unsigned int clusters_in_line;
1477 unsigned int events_in_cluster;
1478 unsigned int i;
1479 uint32_t trigger_cluster;
1480
1481 clusters_in_line = events_in_line;
1482 clusters_in_line += EVENTS_PER_CLUSTER - 1;
1483 clusters_in_line /= EVENTS_PER_CLUSTER;
1484 trigger_cluster = ~0;
1485
1486 /* Check if trigger is in this chunk. */
1487 if (trigger_event < EVENTS_PER_ROW) {
1488 if (devc->samplerate <= SR_MHZ(50)) {
1489 trigger_event -= MIN(EVENTS_PER_CLUSTER - 1,
1490 trigger_event);
1491 }
1492
1493 /* Find in which cluster the trigger occurred. */
1494 trigger_cluster = trigger_event / EVENTS_PER_CLUSTER;
1495 }
1496
1497 /* For each full DRAM cluster. */
1498 for (i = 0; i < clusters_in_line; i++) {
1499 dram_cluster = &dram_line->cluster[i];
1500
1501 /* The last cluster might not be full. */
1502 if ((i == clusters_in_line - 1) &&
1503 (events_in_line % EVENTS_PER_CLUSTER)) {
1504 events_in_cluster = events_in_line % EVENTS_PER_CLUSTER;
1505 } else {
1506 events_in_cluster = EVENTS_PER_CLUSTER;
1507 }
1508
1509 sigma_decode_dram_cluster(devc, dram_cluster,
1510 events_in_cluster, i == trigger_cluster);
1511 }
1512
1513 return SR_OK;
1514}
1515
1516static int download_capture(struct sr_dev_inst *sdi)
1517{
1518 const uint32_t chunks_per_read = 32;
1519
1520 struct dev_context *devc;
1521 struct sigma_dram_line *dram_line;
1522 uint32_t stoppos, triggerpos;
1523 uint8_t modestatus;
1524 uint32_t i;
1525 uint32_t dl_lines_total, dl_lines_curr, dl_lines_done;
1526 uint32_t dl_first_line, dl_line;
1527 uint32_t dl_events_in_line, trigger_event;
1528 uint32_t trg_line, trg_event;
1529 int ret;
1530
1531 devc = sdi->priv;
1532
1533 sr_info("Downloading sample data.");
1534 devc->state.state = SIGMA_DOWNLOAD;
1535
1536 /*
1537 * Ask the hardware to stop data acquisition. Reception of the
1538 * FORCESTOP request makes the hardware "disable RLE" (store
1539 * clusters to DRAM regardless of whether pin state changes) and
1540 * raise the POSTTRIGGERED flag.
1541 */
1542 modestatus = WMR_FORCESTOP | WMR_SDRAMWRITEEN;
1543 ret = sigma_set_register(devc, WRITE_MODE, modestatus);
1544 if (ret != SR_OK)
1545 return ret;
1546 do {
1547 ret = sigma_read_register(devc, READ_MODE,
1548 &modestatus, sizeof(modestatus));
1549 if (ret != SR_OK) {
1550 sr_err("Could not poll for post-trigger state.");
1551 return FALSE;
1552 }
1553 } while (!(modestatus & RMR_POSTTRIGGERED));
1554
1555 /* Set SDRAM Read Enable. */
1556 ret = sigma_set_register(devc, WRITE_MODE, WMR_SDRAMREADEN);
1557 if (ret != SR_OK)
1558 return ret;
1559
1560 /* Get the current position. Check if trigger has fired. */
1561 ret = sigma_read_pos(devc, &stoppos, &triggerpos, &modestatus);
1562 if (ret != SR_OK) {
1563 sr_err("Could not query capture positions/state.");
1564 return FALSE;
1565 }
1566 trg_line = ~0;
1567 trg_event = ~0;
1568 if (modestatus & RMR_TRIGGERED) {
1569 trg_line = triggerpos >> ROW_SHIFT;
1570 trg_event = triggerpos & ROW_MASK;
1571 }
1572
1573 /*
1574 * Determine how many "DRAM lines" of 1024 bytes each we need to
1575 * retrieve from the Sigma hardware, so that we have a complete
1576 * set of samples. Note that the last line need not contain 64
1577 * clusters, it might be partially filled only.
1578 *
1579 * When RMR_ROUND is set, the circular buffer in DRAM has wrapped
1580 * around. Since the status of the very next line is uncertain in
1581 * that case, we skip it and start reading from the next line.
1582 */
1583 dl_first_line = 0;
1584 dl_lines_total = (stoppos >> ROW_SHIFT) + 1;
1585 if (modestatus & RMR_ROUND) {
1586 dl_first_line = dl_lines_total + 1;
1587 dl_lines_total = ROW_COUNT - 2;
1588 }
1589 dram_line = g_try_malloc0(chunks_per_read * sizeof(*dram_line));
1590 if (!dram_line)
1591 return FALSE;
1592 ret = alloc_submit_buffer(sdi);
1593 if (ret != SR_OK)
1594 return FALSE;
1595 ret = setup_submit_limit(devc);
1596 if (ret != SR_OK)
1597 return FALSE;
1598 dl_lines_done = 0;
1599 while (dl_lines_total > dl_lines_done) {
1600 /* We can download only up-to 32 DRAM lines in one go! */
1601 dl_lines_curr = MIN(chunks_per_read, dl_lines_total - dl_lines_done);
1602
1603 dl_line = dl_first_line + dl_lines_done;
1604 dl_line %= ROW_COUNT;
1605 ret = sigma_read_dram(devc, dl_line, dl_lines_curr,
1606 (uint8_t *)dram_line);
1607 if (ret != SR_OK)
1608 return FALSE;
1609
1610 /* This is the first DRAM line, so find the initial timestamp. */
1611 if (dl_lines_done == 0) {
1612 devc->state.lastts =
1613 sigma_dram_cluster_ts(&dram_line[0].cluster[0]);
1614 devc->state.lastsample = 0;
1615 }
1616
1617 for (i = 0; i < dl_lines_curr; i++) {
1618 /* The last "DRAM line" need not span its full length. */
1619 dl_events_in_line = EVENTS_PER_ROW;
1620 if (dl_lines_done + i == dl_lines_total - 1)
1621 dl_events_in_line = stoppos & ROW_MASK;
1622
1623 /* Test if the trigger happened on this line. */
1624 trigger_event = ~0;
1625 if (dl_lines_done + i == trg_line)
1626 trigger_event = trg_event;
1627
1628 decode_chunk_ts(devc, dram_line + i,
1629 dl_events_in_line, trigger_event);
1630 }
1631
1632 dl_lines_done += dl_lines_curr;
1633 }
1634 flush_submit_buffer(devc);
1635 free_submit_buffer(devc);
1636 g_free(dram_line);
1637
1638 std_session_send_df_end(sdi);
1639
1640 devc->state.state = SIGMA_IDLE;
1641 sr_dev_acquisition_stop(sdi);
1642
1643 return TRUE;
1644}
1645
1646/*
1647 * Periodically check the Sigma status when in CAPTURE mode. This routine
1648 * checks whether the configured sample count or sample time have passed,
1649 * and will stop acquisition and download the acquired samples.
1650 */
1651static int sigma_capture_mode(struct sr_dev_inst *sdi)
1652{
1653 struct dev_context *devc;
1654
1655 devc = sdi->priv;
1656 if (sr_sw_limits_check(&devc->acq_limits))
1657 return download_capture(sdi);
1658
1659 return TRUE;
1660}
1661
1662SR_PRIV int sigma_receive_data(int fd, int revents, void *cb_data)
1663{
1664 struct sr_dev_inst *sdi;
1665 struct dev_context *devc;
1666
1667 (void)fd;
1668 (void)revents;
1669
1670 sdi = cb_data;
1671 devc = sdi->priv;
1672
1673 if (devc->state.state == SIGMA_IDLE)
1674 return TRUE;
1675
1676 /*
1677 * When the application has requested to stop the acquisition,
1678 * then immediately start downloading sample data. Otherwise
1679 * keep checking configured limits which will terminate the
1680 * acquisition and initiate download.
1681 */
1682 if (devc->state.state == SIGMA_STOPPING)
1683 return download_capture(sdi);
1684 if (devc->state.state == SIGMA_CAPTURE)
1685 return sigma_capture_mode(sdi);
1686
1687 return TRUE;
1688}
1689
1690/* Build a LUT entry used by the trigger functions. */
1691static void build_lut_entry(uint16_t value, uint16_t mask, uint16_t *entry)
1692{
1693 int i, j, k, bit;
1694
1695 /* For each quad channel. */
1696 for (i = 0; i < 4; i++) {
1697 entry[i] = 0xffff;
1698
1699 /* For each bit in LUT. */
1700 for (j = 0; j < 16; j++) {
1701
1702 /* For each channel in quad. */
1703 for (k = 0; k < 4; k++) {
1704 bit = 1 << (i * 4 + k);
1705
1706 /* Set bit in entry */
1707 if ((mask & bit) && ((!(value & bit)) !=
1708 (!(j & (1 << k)))))
1709 entry[i] &= ~(1 << j);
1710 }
1711 }
1712 }
1713}
1714
1715/* Add a logical function to LUT mask. */
1716static void add_trigger_function(enum triggerop oper, enum triggerfunc func,
1717 int index, int neg, uint16_t *mask)
1718{
1719 int i, j;
1720 int x[2][2], tmp, a, b, aset, bset, rset;
1721
1722 memset(x, 0, sizeof(x));
1723
1724 /* Trigger detect condition. */
1725 switch (oper) {
1726 case OP_LEVEL:
1727 x[0][1] = 1;
1728 x[1][1] = 1;
1729 break;
1730 case OP_NOT:
1731 x[0][0] = 1;
1732 x[1][0] = 1;
1733 break;
1734 case OP_RISE:
1735 x[0][1] = 1;
1736 break;
1737 case OP_FALL:
1738 x[1][0] = 1;
1739 break;
1740 case OP_RISEFALL:
1741 x[0][1] = 1;
1742 x[1][0] = 1;
1743 break;
1744 case OP_NOTRISE:
1745 x[1][1] = 1;
1746 x[0][0] = 1;
1747 x[1][0] = 1;
1748 break;
1749 case OP_NOTFALL:
1750 x[1][1] = 1;
1751 x[0][0] = 1;
1752 x[0][1] = 1;
1753 break;
1754 case OP_NOTRISEFALL:
1755 x[1][1] = 1;
1756 x[0][0] = 1;
1757 break;
1758 }
1759
1760 /* Transpose if neg is set. */
1761 if (neg) {
1762 for (i = 0; i < 2; i++) {
1763 for (j = 0; j < 2; j++) {
1764 tmp = x[i][j];
1765 x[i][j] = x[1 - i][1 - j];
1766 x[1 - i][1 - j] = tmp;
1767 }
1768 }
1769 }
1770
1771 /* Update mask with function. */
1772 for (i = 0; i < 16; i++) {
1773 a = (i >> (2 * index + 0)) & 1;
1774 b = (i >> (2 * index + 1)) & 1;
1775
1776 aset = (*mask >> i) & 1;
1777 bset = x[b][a];
1778
1779 rset = 0;
1780 if (func == FUNC_AND || func == FUNC_NAND)
1781 rset = aset & bset;
1782 else if (func == FUNC_OR || func == FUNC_NOR)
1783 rset = aset | bset;
1784 else if (func == FUNC_XOR || func == FUNC_NXOR)
1785 rset = aset ^ bset;
1786
1787 if (func == FUNC_NAND || func == FUNC_NOR || func == FUNC_NXOR)
1788 rset = !rset;
1789
1790 *mask &= ~(1 << i);
1791
1792 if (rset)
1793 *mask |= 1 << i;
1794 }
1795}
1796
1797/*
1798 * Build trigger LUTs used by 50 MHz and lower sample rates for supporting
1799 * simple pin change and state triggers. Only two transitions (rise/fall) can be
1800 * set at any time, but a full mask and value can be set (0/1).
1801 */
1802SR_PRIV int sigma_build_basic_trigger(struct dev_context *devc,
1803 struct triggerlut *lut)
1804{
1805 int i, j;
1806 uint16_t masks[2];
1807
1808 memset(lut, 0, sizeof(*lut));
1809 memset(&masks, 0, sizeof(masks));
1810
1811 /* Constant for simple triggers. */
1812 lut->m4 = 0xa000;
1813
1814 /* Value/mask trigger support. */
1815 build_lut_entry(devc->trigger.simplevalue, devc->trigger.simplemask,
1816 lut->m2d);
1817
1818 /* Rise/fall trigger support. */
1819 for (i = 0, j = 0; i < 16; i++) {
1820 if (devc->trigger.risingmask & (1 << i) ||
1821 devc->trigger.fallingmask & (1 << i))
1822 masks[j++] = 1 << i;
1823 }
1824
1825 build_lut_entry(masks[0], masks[0], lut->m0d);
1826 build_lut_entry(masks[1], masks[1], lut->m1d);
1827
1828 /* Add glue logic */
1829 if (masks[0] || masks[1]) {
1830 /* Transition trigger. */
1831 if (masks[0] & devc->trigger.risingmask)
1832 add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3);
1833 if (masks[0] & devc->trigger.fallingmask)
1834 add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3);
1835 if (masks[1] & devc->trigger.risingmask)
1836 add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3);
1837 if (masks[1] & devc->trigger.fallingmask)
1838 add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3);
1839 } else {
1840 /* Only value/mask trigger. */
1841 lut->m3 = 0xffff;
1842 }
1843
1844 /* Triggertype: event. */
1845 lut->params.selres = 3;
1846
1847 return SR_OK;
1848}