]> sigrok.org Git - libsigrok.git/blame_incremental - src/hardware/asix-sigma/protocol.c
asix-sigma: track whether triggers were specified when acquisition started
[libsigrok.git] / src / hardware / asix-sigma / protocol.c
... / ...
CommitLineData
1/*
2 * This file is part of the libsigrok project.
3 *
4 * Copyright (C) 2010-2012 Håvard Espeland <gus@ping.uio.no>,
5 * Copyright (C) 2010 Martin Stensgård <mastensg@ping.uio.no>
6 * Copyright (C) 2010 Carl Henrik Lunde <chlunde@ping.uio.no>
7 * Copyright (C) 2020 Gerhard Sittig <gerhard.sittig@gmx.net>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation, either version 3 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23/*
24 * ASIX SIGMA/SIGMA2 logic analyzer driver
25 */
26
27#include <config.h>
28#include "protocol.h"
29
30/*
31 * The ASIX SIGMA hardware supports fixed 200MHz and 100MHz sample rates
32 * (by means of separate firmware images). As well as 50MHz divided by
33 * an integer divider in the 1..256 range (by the "typical" firmware).
34 * Which translates to a strict lower boundary of around 195kHz.
35 *
36 * This driver "suggests" a subset of the available rates by listing a
37 * few discrete values, while setter routines accept any user specified
38 * rate that is supported by the hardware.
39 */
40static const uint64_t samplerates[] = {
41 /* 50MHz and integer divider. 1/2/5 steps (where possible). */
42 SR_KHZ(200), SR_KHZ(500),
43 SR_MHZ(1), SR_MHZ(2), SR_MHZ(5),
44 SR_MHZ(10), SR_MHZ(25), SR_MHZ(50),
45 /* 100MHz/200MHz, fixed rates in special firmware. */
46 SR_MHZ(100), SR_MHZ(200),
47};
48
49SR_PRIV GVariant *sigma_get_samplerates_list(void)
50{
51 return std_gvar_samplerates(samplerates, ARRAY_SIZE(samplerates));
52}
53
54static const char *firmware_files[] = {
55 [SIGMA_FW_50MHZ] = "asix-sigma-50.fw", /* 50MHz, 8bit divider. */
56 [SIGMA_FW_100MHZ] = "asix-sigma-100.fw", /* 100MHz, fixed. */
57 [SIGMA_FW_200MHZ] = "asix-sigma-200.fw", /* 200MHz, fixed. */
58 [SIGMA_FW_SYNC] = "asix-sigma-50sync.fw", /* Sync from external pin. */
59 [SIGMA_FW_FREQ] = "asix-sigma-phasor.fw", /* Frequency counter. */
60};
61
62#define SIGMA_FIRMWARE_SIZE_LIMIT (256 * 1024)
63
64static int sigma_ftdi_open(const struct sr_dev_inst *sdi)
65{
66 struct dev_context *devc;
67 int vid, pid;
68 const char *serno;
69 int ret;
70
71 devc = sdi->priv;
72 if (!devc)
73 return SR_ERR_ARG;
74
75 if (devc->ftdi.is_open)
76 return SR_OK;
77
78 vid = devc->id.vid;
79 pid = devc->id.pid;
80 serno = sdi->serial_num;
81 if (!vid || !pid || !serno || !*serno)
82 return SR_ERR_ARG;
83
84 ret = ftdi_init(&devc->ftdi.ctx);
85 if (ret < 0) {
86 sr_err("Cannot initialize FTDI context (%d): %s.",
87 ret, ftdi_get_error_string(&devc->ftdi.ctx));
88 return SR_ERR_IO;
89 }
90 ret = ftdi_usb_open_desc_index(&devc->ftdi.ctx,
91 vid, pid, NULL, serno, 0);
92 if (ret < 0) {
93 sr_err("Cannot open device (%d): %s.",
94 ret, ftdi_get_error_string(&devc->ftdi.ctx));
95 return SR_ERR_IO;
96 }
97 devc->ftdi.is_open = TRUE;
98
99 return SR_OK;
100}
101
102static int sigma_ftdi_close(struct dev_context *devc)
103{
104 int ret;
105
106 ret = ftdi_usb_close(&devc->ftdi.ctx);
107 devc->ftdi.is_open = FALSE;
108 devc->ftdi.must_close = FALSE;
109 ftdi_deinit(&devc->ftdi.ctx);
110
111 return ret == 0 ? SR_OK : SR_ERR_IO;
112}
113
114SR_PRIV int sigma_check_open(const struct sr_dev_inst *sdi)
115{
116 struct dev_context *devc;
117 int ret;
118
119 if (!sdi)
120 return SR_ERR_ARG;
121 devc = sdi->priv;
122 if (!devc)
123 return SR_ERR_ARG;
124
125 if (devc->ftdi.is_open)
126 return SR_OK;
127
128 ret = sigma_ftdi_open(sdi);
129 if (ret != SR_OK)
130 return ret;
131 devc->ftdi.must_close = TRUE;
132
133 return ret;
134}
135
136SR_PRIV int sigma_check_close(struct dev_context *devc)
137{
138 int ret;
139
140 if (!devc)
141 return SR_ERR_ARG;
142
143 if (devc->ftdi.must_close) {
144 ret = sigma_ftdi_close(devc);
145 if (ret != SR_OK)
146 return ret;
147 devc->ftdi.must_close = FALSE;
148 }
149
150 return SR_OK;
151}
152
153SR_PRIV int sigma_force_open(const struct sr_dev_inst *sdi)
154{
155 struct dev_context *devc;
156 int ret;
157
158 if (!sdi)
159 return SR_ERR_ARG;
160 devc = sdi->priv;
161 if (!devc)
162 return SR_ERR_ARG;
163
164 ret = sigma_ftdi_open(sdi);
165 if (ret != SR_OK)
166 return ret;
167 devc->ftdi.must_close = FALSE;
168
169 return SR_OK;
170}
171
172SR_PRIV int sigma_force_close(struct dev_context *devc)
173{
174 return sigma_ftdi_close(devc);
175}
176
177/*
178 * BEWARE! Error propagation is important, as are kinds of return values.
179 *
180 * - Raw USB tranport communicates the number of sent or received bytes,
181 * or negative error codes in the external library's(!) range of codes.
182 * - Internal routines at the "sigrok driver level" communicate success
183 * or failure in terms of SR_OK et al error codes.
184 * - Main loop style receive callbacks communicate booleans which arrange
185 * for repeated calls to drive progress during acquisition.
186 *
187 * Careful consideration by maintainers is essential, because all of the
188 * above kinds of values are assignment compatbile from the compiler's
189 * point of view. Implementation errors will go unnoticed at build time.
190 */
191
192static int sigma_read_raw(struct dev_context *devc, void *buf, size_t size)
193{
194 int ret;
195
196 ret = ftdi_read_data(&devc->ftdi.ctx, (unsigned char *)buf, size);
197 if (ret < 0) {
198 sr_err("USB data read failed: %s",
199 ftdi_get_error_string(&devc->ftdi.ctx));
200 }
201
202 return ret;
203}
204
205static int sigma_write_raw(struct dev_context *devc, const void *buf, size_t size)
206{
207 int ret;
208
209 ret = ftdi_write_data(&devc->ftdi.ctx, buf, size);
210 if (ret < 0) {
211 sr_err("USB data write failed: %s",
212 ftdi_get_error_string(&devc->ftdi.ctx));
213 } else if ((size_t)ret != size) {
214 sr_err("USB data write length mismatch.");
215 }
216
217 return ret;
218}
219
220static int sigma_read_sr(struct dev_context *devc, void *buf, size_t size)
221{
222 int ret;
223
224 ret = sigma_read_raw(devc, buf, size);
225 if (ret < 0 || (size_t)ret != size)
226 return SR_ERR_IO;
227
228 return SR_OK;
229}
230
231static int sigma_write_sr(struct dev_context *devc, const void *buf, size_t size)
232{
233 int ret;
234
235 ret = sigma_write_raw(devc, buf, size);
236 if (ret < 0 || (size_t)ret != size)
237 return SR_ERR_IO;
238
239 return SR_OK;
240}
241
242/*
243 * Implementor's note: The local write buffer's size shall suffice for
244 * any know FPGA register transaction that is involved in the supported
245 * feature set of this sigrok device driver. If the length check trips,
246 * that's a programmer's error and needs adjustment in the complete call
247 * stack of the respective code path.
248 */
249#define SIGMA_MAX_REG_DEPTH 32
250
251/*
252 * Implementor's note: The FPGA command set supports register access
253 * with automatic address adjustment. This operation is documented to
254 * wrap within a 16-address range, it cannot cross boundaries where the
255 * register address' nibble overflows. An internal helper assumes that
256 * callers remain within this auto-adjustment range, and thus multi
257 * register access requests can never exceed that count.
258 */
259#define SIGMA_MAX_REG_COUNT 16
260
261SR_PRIV int sigma_write_register(struct dev_context *devc,
262 uint8_t reg, uint8_t *data, size_t len)
263{
264 uint8_t buf[2 + SIGMA_MAX_REG_DEPTH * 2], *wrptr;
265 size_t idx;
266
267 if (len > SIGMA_MAX_REG_DEPTH) {
268 sr_err("Short write buffer for %zu bytes to reg %u.", len, reg);
269 return SR_ERR_BUG;
270 }
271
272 wrptr = buf;
273 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
274 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
275 for (idx = 0; idx < len; idx++) {
276 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data[idx]));
277 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data[idx]));
278 }
279
280 return sigma_write_sr(devc, buf, wrptr - buf);
281}
282
283SR_PRIV int sigma_set_register(struct dev_context *devc,
284 uint8_t reg, uint8_t value)
285{
286 return sigma_write_register(devc, reg, &value, sizeof(value));
287}
288
289static int sigma_read_register(struct dev_context *devc,
290 uint8_t reg, uint8_t *data, size_t len)
291{
292 uint8_t buf[3], *wrptr;
293 int ret;
294
295 wrptr = buf;
296 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
297 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
298 write_u8_inc(&wrptr, REG_READ_ADDR);
299 ret = sigma_write_sr(devc, buf, wrptr - buf);
300 if (ret != SR_OK)
301 return ret;
302
303 return sigma_read_sr(devc, data, len);
304}
305
306static int sigma_get_register(struct dev_context *devc,
307 uint8_t reg, uint8_t *data)
308{
309 return sigma_read_register(devc, reg, data, sizeof(*data));
310}
311
312static int sigma_get_registers(struct dev_context *devc,
313 uint8_t reg, uint8_t *data, size_t count)
314{
315 uint8_t buf[2 + SIGMA_MAX_REG_COUNT], *wrptr;
316 size_t idx;
317 int ret;
318
319 if (count > SIGMA_MAX_REG_COUNT) {
320 sr_err("Short command buffer for %zu reg reads at %u.", count, reg);
321 return SR_ERR_BUG;
322 }
323
324 wrptr = buf;
325 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
326 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
327 for (idx = 0; idx < count; idx++)
328 write_u8_inc(&wrptr, REG_READ_ADDR | REG_ADDR_INC);
329 ret = sigma_write_sr(devc, buf, wrptr - buf);
330 if (ret != SR_OK)
331 return ret;
332
333 return sigma_read_sr(devc, data, count);
334}
335
336static int sigma_read_pos(struct dev_context *devc,
337 uint32_t *stoppos, uint32_t *triggerpos, uint8_t *mode)
338{
339 uint8_t result[7];
340 const uint8_t *rdptr;
341 uint32_t v32;
342 uint8_t v8;
343 int ret;
344
345 /*
346 * Read 7 registers starting at trigger position LSB.
347 * Which yields two 24bit counter values, and mode flags.
348 */
349 ret = sigma_get_registers(devc, READ_TRIGGER_POS_LOW,
350 result, sizeof(result));
351 if (ret != SR_OK)
352 return ret;
353
354 rdptr = &result[0];
355 v32 = read_u24le_inc(&rdptr);
356 if (triggerpos)
357 *triggerpos = v32;
358 v32 = read_u24le_inc(&rdptr);
359 if (stoppos)
360 *stoppos = v32;
361 v8 = read_u8_inc(&rdptr);
362 if (mode)
363 *mode = v8;
364
365 /*
366 * These positions consist of "the memory row" in the MSB fields,
367 * and "an event index" within the row in the LSB fields. Part
368 * of the memory row's content is sample data, another part is
369 * timestamps.
370 *
371 * The retrieved register values point to after the captured
372 * position. So they need to get decremented, and adjusted to
373 * cater for the timestamps when the decrement carries over to
374 * a different memory row.
375 */
376 if (stoppos && (--*stoppos & ROW_MASK) == ROW_MASK)
377 *stoppos -= CLUSTERS_PER_ROW;
378 if (triggerpos && (--*triggerpos & ROW_MASK) == ROW_MASK)
379 *triggerpos -= CLUSTERS_PER_ROW;
380
381 return SR_OK;
382}
383
384static int sigma_read_dram(struct dev_context *devc,
385 size_t startchunk, size_t numchunks, uint8_t *data)
386{
387 uint8_t buf[128], *wrptr, regval;
388 size_t chunk;
389 int sel, ret;
390 gboolean is_last;
391
392 if (2 + 3 * numchunks > ARRAY_SIZE(buf)) {
393 sr_err("Short write buffer for %zu DRAM row reads.", numchunks);
394 return SR_ERR_BUG;
395 }
396
397 /* Communicate DRAM start address (memory row, aka samples line). */
398 wrptr = buf;
399 write_u16be_inc(&wrptr, startchunk);
400 ret = sigma_write_register(devc, WRITE_MEMROW, buf, wrptr - buf);
401 if (ret != SR_OK)
402 return ret;
403
404 /*
405 * Access DRAM content. Fetch from DRAM to FPGA's internal RAM,
406 * then transfer via USB. Interleave the FPGA's DRAM access and
407 * USB transfer, use alternating buffers (0/1) in the process.
408 */
409 wrptr = buf;
410 write_u8_inc(&wrptr, REG_DRAM_BLOCK);
411 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
412 for (chunk = 0; chunk < numchunks; chunk++) {
413 sel = chunk % 2;
414 is_last = chunk == numchunks - 1;
415 if (!is_last) {
416 regval = REG_DRAM_BLOCK | REG_DRAM_SEL_BOOL(!sel);
417 write_u8_inc(&wrptr, regval);
418 }
419 regval = REG_DRAM_BLOCK_DATA | REG_DRAM_SEL_BOOL(sel);
420 write_u8_inc(&wrptr, regval);
421 if (!is_last)
422 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
423 }
424 ret = sigma_write_sr(devc, buf, wrptr - buf);
425 if (ret != SR_OK)
426 return ret;
427
428 return sigma_read_sr(devc, data, numchunks * ROW_LENGTH_BYTES);
429}
430
431/* Upload trigger look-up tables to Sigma. */
432SR_PRIV int sigma_write_trigger_lut(struct dev_context *devc,
433 struct triggerlut *lut)
434{
435 size_t lut_addr;
436 uint16_t bit;
437 uint8_t m3d, m2d, m1d, m0d;
438 uint8_t buf[6], *wrptr, v8;
439 uint16_t selreg;
440 int ret;
441
442 /*
443 * Translate the LUT part of the trigger configuration from the
444 * application's perspective to the hardware register's bitfield
445 * layout. Send the LUT to the device. This configures the logic
446 * which combines pin levels or edges.
447 */
448 for (lut_addr = 0; lut_addr < 16; lut_addr++) {
449 bit = 1 << lut_addr;
450
451 /* - M4 M3S M3Q */
452 m3d = 0;
453 if (lut->m4 & bit)
454 m3d |= 1 << 2;
455 if (lut->m3s & bit)
456 m3d |= 1 << 1;
457 if (lut->m3q & bit)
458 m3d |= 1 << 0;
459
460 /* M2D3 M2D2 M2D1 M2D0 */
461 m2d = 0;
462 if (lut->m2d[3] & bit)
463 m2d |= 1 << 3;
464 if (lut->m2d[2] & bit)
465 m2d |= 1 << 2;
466 if (lut->m2d[1] & bit)
467 m2d |= 1 << 1;
468 if (lut->m2d[0] & bit)
469 m2d |= 1 << 0;
470
471 /* M1D3 M1D2 M1D1 M1D0 */
472 m1d = 0;
473 if (lut->m1d[3] & bit)
474 m1d |= 1 << 3;
475 if (lut->m1d[2] & bit)
476 m1d |= 1 << 2;
477 if (lut->m1d[1] & bit)
478 m1d |= 1 << 1;
479 if (lut->m1d[0] & bit)
480 m1d |= 1 << 0;
481
482 /* M0D3 M0D2 M0D1 M0D0 */
483 m0d = 0;
484 if (lut->m0d[3] & bit)
485 m0d |= 1 << 3;
486 if (lut->m0d[2] & bit)
487 m0d |= 1 << 2;
488 if (lut->m0d[1] & bit)
489 m0d |= 1 << 1;
490 if (lut->m0d[0] & bit)
491 m0d |= 1 << 0;
492
493 /*
494 * Send 16bits with M3D/M2D and M1D/M0D bit masks to the
495 * TriggerSelect register, then strobe the LUT write by
496 * passing A3-A0 to TriggerSelect2. Hold RESET during LUT
497 * programming.
498 */
499 wrptr = buf;
500 write_u8_inc(&wrptr, (m3d << 4) | (m2d << 0));
501 write_u8_inc(&wrptr, (m1d << 4) | (m0d << 0));
502 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT,
503 buf, wrptr - buf);
504 if (ret != SR_OK)
505 return ret;
506 v8 = TRGSEL2_RESET | TRGSEL2_LUT_WRITE |
507 (lut_addr & TRGSEL2_LUT_ADDR_MASK);
508 ret = sigma_set_register(devc, WRITE_TRIGGER_SELECT2, v8);
509 if (ret != SR_OK)
510 return ret;
511 }
512
513 /*
514 * Send the parameters. This covers counters and durations.
515 */
516 wrptr = buf;
517 selreg = 0;
518 selreg |= (lut->params.selinc & TRGSEL_SELINC_MASK) << TRGSEL_SELINC_SHIFT;
519 selreg |= (lut->params.selres & TRGSEL_SELRES_MASK) << TRGSEL_SELRES_SHIFT;
520 selreg |= (lut->params.sela & TRGSEL_SELA_MASK) << TRGSEL_SELA_SHIFT;
521 selreg |= (lut->params.selb & TRGSEL_SELB_MASK) << TRGSEL_SELB_SHIFT;
522 selreg |= (lut->params.selc & TRGSEL_SELC_MASK) << TRGSEL_SELC_SHIFT;
523 selreg |= (lut->params.selpresc & TRGSEL_SELPRESC_MASK) << TRGSEL_SELPRESC_SHIFT;
524 write_u16be_inc(&wrptr, selreg);
525 write_u16be_inc(&wrptr, lut->params.cmpb);
526 write_u16be_inc(&wrptr, lut->params.cmpa);
527 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, buf, wrptr - buf);
528 if (ret != SR_OK)
529 return ret;
530
531 return SR_OK;
532}
533
534/*
535 * See Xilinx UG332 for Spartan-3 FPGA configuration. The SIGMA device
536 * uses FTDI bitbang mode for netlist download in slave serial mode.
537 * (LATER: The OMEGA device's cable contains a more capable FTDI chip
538 * and uses MPSSE mode for bitbang. -- Can we also use FT232H in FT245
539 * compatible bitbang mode? For maximum code re-use and reduced libftdi
540 * dependency? See section 3.5.5 of FT232H: D0 clk, D1 data (out), D2
541 * data (in), D3 select, D4-7 GPIOL. See section 3.5.7 for MCU FIFO.)
542 *
543 * 750kbps rate (four times the speed of sigmalogan) works well for
544 * netlist download. All pins except INIT_B are output pins during
545 * configuration download.
546 *
547 * Some pins are inverted as a byproduct of level shifting circuitry.
548 * That's why high CCLK level (from the cable's point of view) is idle
549 * from the FPGA's perspective.
550 *
551 * The vendor's literature discusses a "suicide sequence" which ends
552 * regular FPGA execution and should be sent before entering bitbang
553 * mode and sending configuration data. Set D7 and toggle D2, D3, D4
554 * a few times.
555 */
556#define BB_PIN_CCLK (1 << 0) /* D0, CCLK */
557#define BB_PIN_PROG (1 << 1) /* D1, PROG */
558#define BB_PIN_D2 (1 << 2) /* D2, (part of) SUICIDE */
559#define BB_PIN_D3 (1 << 3) /* D3, (part of) SUICIDE */
560#define BB_PIN_D4 (1 << 4) /* D4, (part of) SUICIDE (unused?) */
561#define BB_PIN_INIT (1 << 5) /* D5, INIT, input pin */
562#define BB_PIN_DIN (1 << 6) /* D6, DIN */
563#define BB_PIN_D7 (1 << 7) /* D7, (part of) SUICIDE */
564
565#define BB_BITRATE (750 * 1000)
566#define BB_PINMASK (0xff & ~BB_PIN_INIT)
567
568/*
569 * Initiate slave serial mode for configuration download. Which is done
570 * by pulsing PROG_B and sensing INIT_B. Make sure CCLK is idle before
571 * initiating the configuration download.
572 *
573 * Run a "suicide sequence" first to terminate the regular FPGA operation
574 * before reconfiguration. The FTDI cable is single channel, and shares
575 * pins which are used for data communication in FIFO mode with pins that
576 * are used for FPGA configuration in bitbang mode. Hardware defaults for
577 * unconfigured hardware, and runtime conditions after FPGA configuration
578 * need to cooperate such that re-configuration of the FPGA can start.
579 */
580static int sigma_fpga_init_bitbang_once(struct dev_context *devc)
581{
582 const uint8_t suicide[] = {
583 BB_PIN_D7 | BB_PIN_D2,
584 BB_PIN_D7 | BB_PIN_D2,
585 BB_PIN_D7 | BB_PIN_D3,
586 BB_PIN_D7 | BB_PIN_D2,
587 BB_PIN_D7 | BB_PIN_D3,
588 BB_PIN_D7 | BB_PIN_D2,
589 BB_PIN_D7 | BB_PIN_D3,
590 BB_PIN_D7 | BB_PIN_D2,
591 };
592 const uint8_t init_array[] = {
593 BB_PIN_CCLK,
594 BB_PIN_CCLK | BB_PIN_PROG,
595 BB_PIN_CCLK | BB_PIN_PROG,
596 BB_PIN_CCLK,
597 BB_PIN_CCLK,
598 BB_PIN_CCLK,
599 BB_PIN_CCLK,
600 BB_PIN_CCLK,
601 BB_PIN_CCLK,
602 BB_PIN_CCLK,
603 };
604 size_t retries;
605 int ret;
606 uint8_t data;
607
608 /* Section 2. part 1), do the FPGA suicide. */
609 ret = SR_OK;
610 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
611 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
612 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
613 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
614 if (ret != SR_OK)
615 return SR_ERR_IO;
616 g_usleep(10 * 1000);
617
618 /* Section 2. part 2), pulse PROG. */
619 ret = sigma_write_sr(devc, init_array, sizeof(init_array));
620 if (ret != SR_OK)
621 return ret;
622 g_usleep(10 * 1000);
623 ftdi_usb_purge_buffers(&devc->ftdi.ctx);
624
625 /*
626 * Wait until the FPGA asserts INIT_B. Check in a maximum number
627 * of bursts with a given delay between them. Read as many pin
628 * capture results as the combination of FTDI chip and FTID lib
629 * may provide. Cope with absence of pin capture data in a cycle.
630 * This approach shall result in fast reponse in case of success,
631 * low cost of execution during wait, reliable error handling in
632 * the transport layer, and robust response to failure or absence
633 * of result data (hardware inactivity after stimulus).
634 */
635 retries = 10;
636 while (retries--) {
637 do {
638 ret = sigma_read_raw(devc, &data, sizeof(data));
639 if (ret < 0)
640 return SR_ERR_IO;
641 if (ret == sizeof(data) && (data & BB_PIN_INIT))
642 return SR_OK;
643 } while (ret == sizeof(data));
644 if (retries)
645 g_usleep(10 * 1000);
646 }
647
648 return SR_ERR_TIMEOUT;
649}
650
651/*
652 * This is belt and braces. Re-run the bitbang initiation sequence a few
653 * times should first attempts fail. Failure is rare but can happen (was
654 * observed during driver development).
655 */
656static int sigma_fpga_init_bitbang(struct dev_context *devc)
657{
658 size_t retries;
659 int ret;
660
661 retries = 10;
662 while (retries--) {
663 ret = sigma_fpga_init_bitbang_once(devc);
664 if (ret == SR_OK)
665 return ret;
666 if (ret != SR_ERR_TIMEOUT)
667 return ret;
668 }
669 return ret;
670}
671
672/*
673 * Configure the FPGA for logic-analyzer mode.
674 */
675static int sigma_fpga_init_la(struct dev_context *devc)
676{
677 uint8_t buf[20], *wrptr;
678 uint8_t data_55, data_aa, mode;
679 uint8_t result[3];
680 const uint8_t *rdptr;
681 int ret;
682
683 wrptr = buf;
684
685 /* Read ID register. */
686 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(READ_ID));
687 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(READ_ID));
688 write_u8_inc(&wrptr, REG_READ_ADDR);
689
690 /* Write 0x55 to scratch register, read back. */
691 data_55 = 0x55;
692 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST));
693 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST));
694 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_55));
695 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_55));
696 write_u8_inc(&wrptr, REG_READ_ADDR);
697
698 /* Write 0xaa to scratch register, read back. */
699 data_aa = 0xaa;
700 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST));
701 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST));
702 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_aa));
703 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_aa));
704 write_u8_inc(&wrptr, REG_READ_ADDR);
705
706 /* Initiate SDRAM initialization in mode register. */
707 mode = WMR_SDRAMINIT;
708 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_MODE));
709 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_MODE));
710 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(mode));
711 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(mode));
712
713 /*
714 * Send the command sequence which contains 3 READ requests.
715 * Expect to see the corresponding 3 response bytes.
716 */
717 ret = sigma_write_sr(devc, buf, wrptr - buf);
718 if (ret != SR_OK) {
719 sr_err("Could not request LA start response.");
720 return ret;
721 }
722 ret = sigma_read_sr(devc, result, ARRAY_SIZE(result));
723 if (ret != SR_OK) {
724 sr_err("Could not receive LA start response.");
725 return SR_ERR_IO;
726 }
727 rdptr = result;
728 if (read_u8_inc(&rdptr) != 0xa6) {
729 sr_err("Unexpected ID response.");
730 return SR_ERR_DATA;
731 }
732 if (read_u8_inc(&rdptr) != data_55) {
733 sr_err("Unexpected scratch read-back (55).");
734 return SR_ERR_DATA;
735 }
736 if (read_u8_inc(&rdptr) != data_aa) {
737 sr_err("Unexpected scratch read-back (aa).");
738 return SR_ERR_DATA;
739 }
740
741 return SR_OK;
742}
743
744/*
745 * Read the firmware from a file and transform it into a series of bitbang
746 * pulses used to program the FPGA. Note that the *bb_cmd must be free()'d
747 * by the caller of this function.
748 */
749static int sigma_fw_2_bitbang(struct sr_context *ctx, const char *name,
750 uint8_t **bb_cmd, size_t *bb_cmd_size)
751{
752 uint8_t *firmware;
753 size_t file_size;
754 uint8_t *p;
755 size_t l;
756 uint32_t imm;
757 size_t bb_size;
758 uint8_t *bb_stream, *bbs, byte, mask, v;
759
760 /* Retrieve the on-disk firmware file content. */
761 firmware = sr_resource_load(ctx, SR_RESOURCE_FIRMWARE, name,
762 &file_size, SIGMA_FIRMWARE_SIZE_LIMIT);
763 if (!firmware)
764 return SR_ERR_IO;
765
766 /* Unscramble the file content (XOR with "random" sequence). */
767 p = firmware;
768 l = file_size;
769 imm = 0x3f6df2ab;
770 while (l--) {
771 imm = (imm + 0xa853753) % 177 + (imm * 0x8034052);
772 *p++ ^= imm & 0xff;
773 }
774
775 /*
776 * Generate a sequence of bitbang samples. With two samples per
777 * FPGA configuration bit, providing the level for the DIN signal
778 * as well as two edges for CCLK. See Xilinx UG332 for details
779 * ("slave serial" mode).
780 *
781 * Note that CCLK is inverted in hardware. That's why the
782 * respective bit is first set and then cleared in the bitbang
783 * sample sets. So that the DIN level will be stable when the
784 * data gets sampled at the rising CCLK edge, and the signals'
785 * setup time constraint will be met.
786 *
787 * The caller will put the FPGA into download mode, will send
788 * the bitbang samples, and release the allocated memory.
789 */
790 bb_size = file_size * 8 * 2;
791 bb_stream = g_try_malloc(bb_size);
792 if (!bb_stream) {
793 sr_err("Memory allocation failed during firmware upload.");
794 g_free(firmware);
795 return SR_ERR_MALLOC;
796 }
797 bbs = bb_stream;
798 p = firmware;
799 l = file_size;
800 while (l--) {
801 byte = *p++;
802 mask = 0x80;
803 while (mask) {
804 v = (byte & mask) ? BB_PIN_DIN : 0;
805 mask >>= 1;
806 *bbs++ = v | BB_PIN_CCLK;
807 *bbs++ = v;
808 }
809 }
810 g_free(firmware);
811
812 /* The transformation completed successfully, return the result. */
813 *bb_cmd = bb_stream;
814 *bb_cmd_size = bb_size;
815
816 return SR_OK;
817}
818
819static int upload_firmware(struct sr_context *ctx, struct dev_context *devc,
820 enum sigma_firmware_idx firmware_idx)
821{
822 int ret;
823 uint8_t *buf;
824 uint8_t pins;
825 size_t buf_size;
826 const char *firmware;
827
828 /* Check for valid firmware file selection. */
829 if (firmware_idx >= ARRAY_SIZE(firmware_files))
830 return SR_ERR_ARG;
831 firmware = firmware_files[firmware_idx];
832 if (!firmware || !*firmware)
833 return SR_ERR_ARG;
834
835 /* Avoid downloading the same firmware multiple times. */
836 if (devc->firmware_idx == firmware_idx) {
837 sr_info("Not uploading firmware file '%s' again.", firmware);
838 return SR_OK;
839 }
840
841 devc->state.state = SIGMA_CONFIG;
842
843 /* Set the cable to bitbang mode. */
844 ret = ftdi_set_bitmode(&devc->ftdi.ctx, BB_PINMASK, BITMODE_BITBANG);
845 if (ret < 0) {
846 sr_err("Could not setup cable mode for upload: %s",
847 ftdi_get_error_string(&devc->ftdi.ctx));
848 return SR_ERR;
849 }
850 ret = ftdi_set_baudrate(&devc->ftdi.ctx, BB_BITRATE);
851 if (ret < 0) {
852 sr_err("Could not setup bitrate for upload: %s",
853 ftdi_get_error_string(&devc->ftdi.ctx));
854 return SR_ERR;
855 }
856
857 /* Initiate FPGA configuration mode. */
858 ret = sigma_fpga_init_bitbang(devc);
859 if (ret) {
860 sr_err("Could not initiate firmware upload to hardware");
861 return ret;
862 }
863
864 /* Prepare wire format of the firmware image. */
865 ret = sigma_fw_2_bitbang(ctx, firmware, &buf, &buf_size);
866 if (ret != SR_OK) {
867 sr_err("Could not prepare file %s for upload.", firmware);
868 return ret;
869 }
870
871 /* Write the FPGA netlist to the cable. */
872 sr_info("Uploading firmware file '%s'.", firmware);
873 ret = sigma_write_sr(devc, buf, buf_size);
874 g_free(buf);
875 if (ret != SR_OK) {
876 sr_err("Could not upload firmware file '%s'.", firmware);
877 return ret;
878 }
879
880 /* Leave bitbang mode and discard pending input data. */
881 ret = ftdi_set_bitmode(&devc->ftdi.ctx, 0, BITMODE_RESET);
882 if (ret < 0) {
883 sr_err("Could not setup cable mode after upload: %s",
884 ftdi_get_error_string(&devc->ftdi.ctx));
885 return SR_ERR;
886 }
887 ftdi_usb_purge_buffers(&devc->ftdi.ctx);
888 while (sigma_read_raw(devc, &pins, sizeof(pins)) > 0)
889 ;
890
891 /* Initialize the FPGA for logic-analyzer mode. */
892 ret = sigma_fpga_init_la(devc);
893 if (ret != SR_OK) {
894 sr_err("Hardware response after firmware upload failed.");
895 return ret;
896 }
897
898 /* Keep track of successful firmware download completion. */
899 devc->state.state = SIGMA_IDLE;
900 devc->firmware_idx = firmware_idx;
901 sr_info("Firmware uploaded.");
902
903 return SR_OK;
904}
905
906/*
907 * The driver supports user specified time or sample count limits. The
908 * device's hardware supports neither, and hardware compression prevents
909 * reliable detection of "fill levels" (currently reached sample counts)
910 * from register values during acquisition. That's why the driver needs
911 * to apply some heuristics:
912 *
913 * - The (optional) sample count limit and the (normalized) samplerate
914 * get mapped to an estimated duration for these samples' acquisition.
915 * - The (optional) time limit gets checked as well. The lesser of the
916 * two limits will terminate the data acquisition phase. The exact
917 * sample count limit gets enforced in session feed submission paths.
918 * - Some slack needs to be given to account for hardware pipelines as
919 * well as late storage of last chunks after compression thresholds
920 * are tripped. The resulting data set will span at least the caller
921 * specified period of time, which shall be perfectly acceptable.
922 *
923 * With RLE compression active, up to 64K sample periods can pass before
924 * a cluster accumulates. Which translates to 327ms at 200kHz. Add two
925 * times that period for good measure, one is not enough to flush the
926 * hardware pipeline (observation from an earlier experiment).
927 */
928SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc)
929{
930 int ret;
931 GVariant *data;
932 uint64_t user_count, user_msecs;
933 uint64_t worst_cluster_time_ms;
934 uint64_t count_msecs, acquire_msecs;
935
936 sr_sw_limits_init(&devc->acq_limits);
937
938 /* Get sample count limit, convert to msecs. */
939 ret = sr_sw_limits_config_get(&devc->cfg_limits,
940 SR_CONF_LIMIT_SAMPLES, &data);
941 if (ret != SR_OK)
942 return ret;
943 user_count = g_variant_get_uint64(data);
944 g_variant_unref(data);
945 count_msecs = 0;
946 if (user_count)
947 count_msecs = 1000 * user_count / devc->clock.samplerate + 1;
948
949 /* Get time limit, which is in msecs. */
950 ret = sr_sw_limits_config_get(&devc->cfg_limits,
951 SR_CONF_LIMIT_MSEC, &data);
952 if (ret != SR_OK)
953 return ret;
954 user_msecs = g_variant_get_uint64(data);
955 g_variant_unref(data);
956
957 /* Get the lesser of them, with both being optional. */
958 acquire_msecs = ~0ull;
959 if (user_count && count_msecs < acquire_msecs)
960 acquire_msecs = count_msecs;
961 if (user_msecs && user_msecs < acquire_msecs)
962 acquire_msecs = user_msecs;
963 if (acquire_msecs == ~0ull)
964 return SR_OK;
965
966 /* Add some slack, and use that timeout for acquisition. */
967 worst_cluster_time_ms = 1000 * 65536 / devc->clock.samplerate;
968 acquire_msecs += 2 * worst_cluster_time_ms;
969 data = g_variant_new_uint64(acquire_msecs);
970 ret = sr_sw_limits_config_set(&devc->acq_limits,
971 SR_CONF_LIMIT_MSEC, data);
972 g_variant_unref(data);
973 if (ret != SR_OK)
974 return ret;
975
976 sr_sw_limits_acquisition_start(&devc->acq_limits);
977 return SR_OK;
978}
979
980/*
981 * Check whether a caller specified samplerate matches the device's
982 * hardware constraints (can be used for acquisition). Optionally yield
983 * a value that approximates the original spec.
984 *
985 * This routine assumes that input specs are in the 200kHz to 200MHz
986 * range of supported rates, and callers typically want to normalize a
987 * given value to the hardware capabilities. Values in the 50MHz range
988 * get rounded up by default, to avoid a more expensive check for the
989 * closest match, while higher sampling rate is always desirable during
990 * measurement. Input specs which exactly match hardware capabilities
991 * remain unaffected. Because 100/200MHz rates also limit the number of
992 * available channels, they are not suggested by this routine, instead
993 * callers need to pick them consciously.
994 */
995SR_PRIV int sigma_normalize_samplerate(uint64_t want_rate, uint64_t *have_rate)
996{
997 uint64_t div, rate;
998
999 /* Accept exact matches for 100/200MHz. */
1000 if (want_rate == SR_MHZ(200) || want_rate == SR_MHZ(100)) {
1001 if (have_rate)
1002 *have_rate = want_rate;
1003 return SR_OK;
1004 }
1005
1006 /* Accept 200kHz to 50MHz range, and map to near value. */
1007 if (want_rate >= SR_KHZ(200) && want_rate <= SR_MHZ(50)) {
1008 div = SR_MHZ(50) / want_rate;
1009 rate = SR_MHZ(50) / div;
1010 if (have_rate)
1011 *have_rate = rate;
1012 return SR_OK;
1013 }
1014
1015 return SR_ERR_ARG;
1016}
1017
1018/* Gets called at probe time. Can seed software settings from hardware state. */
1019SR_PRIV int sigma_fetch_hw_config(const struct sr_dev_inst *sdi)
1020{
1021 struct dev_context *devc;
1022 int ret;
1023 uint8_t regaddr, regval;
1024
1025 devc = sdi->priv;
1026 if (!devc)
1027 return SR_ERR_ARG;
1028
1029 /* Seed configuration values from defaults. */
1030 devc->firmware_idx = SIGMA_FW_NONE;
1031 devc->clock.samplerate = samplerates[0];
1032
1033 /* TODO
1034 * Ideally the device driver could retrieve recently stored
1035 * details from hardware registers, thus re-use user specified
1036 * configuration values across sigrok sessions. Which could
1037 * avoid repeated expensive though unnecessary firmware uploads,
1038 * improve performance and usability. Unfortunately it appears
1039 * that the registers range which is documented as available for
1040 * application use keeps providing 0xff data content. At least
1041 * with the netlist version which ships with sigrok. The same
1042 * was observed with unused registers in the first page.
1043 */
1044 return SR_ERR_NA;
1045
1046 /* This is for research, currently does not work yet. */
1047 ret = sigma_check_open(sdi);
1048 regaddr = 16;
1049 regaddr = 14;
1050 ret = sigma_set_register(devc, regaddr, 'F');
1051 ret = sigma_get_register(devc, regaddr, &regval);
1052 sr_warn("%s() reg[%u] val[%u] rc[%d]", __func__, regaddr, regval, ret);
1053 ret = sigma_check_close(devc);
1054 return ret;
1055}
1056
1057/* Gets called after successful (volatile) hardware configuration. */
1058SR_PRIV int sigma_store_hw_config(const struct sr_dev_inst *sdi)
1059{
1060 /* TODO See above, registers seem to not hold written data. */
1061 (void)sdi;
1062 return SR_ERR_NA;
1063}
1064
1065SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi)
1066{
1067 struct dev_context *devc;
1068 struct drv_context *drvc;
1069 uint64_t samplerate;
1070 int ret;
1071 size_t num_channels;
1072
1073 devc = sdi->priv;
1074 drvc = sdi->driver->context;
1075
1076 /* Accept any caller specified rate which the hardware supports. */
1077 ret = sigma_normalize_samplerate(devc->clock.samplerate, &samplerate);
1078 if (ret != SR_OK)
1079 return ret;
1080
1081 /*
1082 * Depending on the samplerates of 200/100/50- MHz, specific
1083 * firmware is required and higher rates might limit the set
1084 * of available channels.
1085 */
1086 num_channels = devc->num_channels;
1087 if (samplerate <= SR_MHZ(50)) {
1088 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_50MHZ);
1089 num_channels = 16;
1090 } else if (samplerate == SR_MHZ(100)) {
1091 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_100MHZ);
1092 num_channels = 8;
1093 } else if (samplerate == SR_MHZ(200)) {
1094 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_200MHZ);
1095 num_channels = 4;
1096 }
1097
1098 /*
1099 * The samplerate affects the number of available logic channels
1100 * as well as a sample memory layout detail (the number of samples
1101 * which the device will communicate within an "event").
1102 */
1103 if (ret == SR_OK) {
1104 devc->num_channels = num_channels;
1105 devc->samples_per_event = 16 / devc->num_channels;
1106 }
1107
1108 /*
1109 * Store the firmware type and most recently configured samplerate
1110 * in hardware, such that subsequent sessions can start from there.
1111 * This is a "best effort" approach. Failure is non-fatal.
1112 */
1113 if (ret == SR_OK)
1114 (void)sigma_store_hw_config(sdi);
1115
1116 return ret;
1117}
1118
1119/*
1120 * Arrange for a session feed submit buffer. A queue where a number of
1121 * samples gets accumulated to reduce the number of send calls. Which
1122 * also enforces an optional sample count limit for data acquisition.
1123 *
1124 * The buffer holds up to CHUNK_SIZE bytes. The unit size is fixed (the
1125 * driver provides a fixed channel layout regardless of samplerate).
1126 */
1127
1128#define CHUNK_SIZE (4 * 1024 * 1024)
1129
1130struct submit_buffer {
1131 size_t unit_size;
1132 size_t max_samples, curr_samples;
1133 uint8_t *sample_data;
1134 uint8_t *write_pointer;
1135 struct sr_dev_inst *sdi;
1136 struct sr_datafeed_packet packet;
1137 struct sr_datafeed_logic logic;
1138};
1139
1140static int alloc_submit_buffer(struct sr_dev_inst *sdi)
1141{
1142 struct dev_context *devc;
1143 struct submit_buffer *buffer;
1144 size_t size;
1145
1146 devc = sdi->priv;
1147
1148 buffer = g_malloc0(sizeof(*buffer));
1149 devc->buffer = buffer;
1150
1151 buffer->unit_size = sizeof(uint16_t);
1152 size = CHUNK_SIZE;
1153 size /= buffer->unit_size;
1154 buffer->max_samples = size;
1155 size *= buffer->unit_size;
1156 buffer->sample_data = g_try_malloc0(size);
1157 if (!buffer->sample_data)
1158 return SR_ERR_MALLOC;
1159 buffer->write_pointer = buffer->sample_data;
1160 sr_sw_limits_init(&devc->feed_limits);
1161
1162 buffer->sdi = sdi;
1163 memset(&buffer->logic, 0, sizeof(buffer->logic));
1164 buffer->logic.unitsize = buffer->unit_size;
1165 buffer->logic.data = buffer->sample_data;
1166 memset(&buffer->packet, 0, sizeof(buffer->packet));
1167 buffer->packet.type = SR_DF_LOGIC;
1168 buffer->packet.payload = &buffer->logic;
1169
1170 return SR_OK;
1171}
1172
1173static int setup_submit_limit(struct dev_context *devc)
1174{
1175 struct sr_sw_limits *limits;
1176 int ret;
1177 GVariant *data;
1178 uint64_t total;
1179
1180 limits = &devc->feed_limits;
1181
1182 ret = sr_sw_limits_config_get(&devc->cfg_limits,
1183 SR_CONF_LIMIT_SAMPLES, &data);
1184 if (ret != SR_OK)
1185 return ret;
1186 total = g_variant_get_uint64(data);
1187 g_variant_unref(data);
1188
1189 sr_sw_limits_init(limits);
1190 if (total) {
1191 data = g_variant_new_uint64(total);
1192 ret = sr_sw_limits_config_set(limits,
1193 SR_CONF_LIMIT_SAMPLES, data);
1194 g_variant_unref(data);
1195 if (ret != SR_OK)
1196 return ret;
1197 }
1198
1199 sr_sw_limits_acquisition_start(limits);
1200
1201 return SR_OK;
1202}
1203
1204static void free_submit_buffer(struct dev_context *devc)
1205{
1206 struct submit_buffer *buffer;
1207
1208 if (!devc)
1209 return;
1210
1211 buffer = devc->buffer;
1212 if (!buffer)
1213 return;
1214 devc->buffer = NULL;
1215
1216 g_free(buffer->sample_data);
1217 g_free(buffer);
1218}
1219
1220static int flush_submit_buffer(struct dev_context *devc)
1221{
1222 struct submit_buffer *buffer;
1223 int ret;
1224
1225 buffer = devc->buffer;
1226
1227 /* Is queued sample data available? */
1228 if (!buffer->curr_samples)
1229 return SR_OK;
1230
1231 /* Submit to the session feed. */
1232 buffer->logic.length = buffer->curr_samples * buffer->unit_size;
1233 ret = sr_session_send(buffer->sdi, &buffer->packet);
1234 if (ret != SR_OK)
1235 return ret;
1236
1237 /* Rewind queue position. */
1238 buffer->curr_samples = 0;
1239 buffer->write_pointer = buffer->sample_data;
1240
1241 return SR_OK;
1242}
1243
1244static int addto_submit_buffer(struct dev_context *devc,
1245 uint16_t sample, size_t count)
1246{
1247 struct submit_buffer *buffer;
1248 struct sr_sw_limits *limits;
1249 int ret;
1250
1251 buffer = devc->buffer;
1252 limits = &devc->feed_limits;
1253 if (sr_sw_limits_check(limits))
1254 count = 0;
1255
1256 /*
1257 * Individually accumulate and check each sample, such that
1258 * accumulation between flushes won't exceed local storage, and
1259 * enforcement of user specified limits is exact.
1260 */
1261 while (count--) {
1262 write_u16le_inc(&buffer->write_pointer, sample);
1263 buffer->curr_samples++;
1264 if (buffer->curr_samples == buffer->max_samples) {
1265 ret = flush_submit_buffer(devc);
1266 if (ret != SR_OK)
1267 return ret;
1268 }
1269 sr_sw_limits_update_samples_read(limits, 1);
1270 if (sr_sw_limits_check(limits))
1271 break;
1272 }
1273
1274 return SR_OK;
1275}
1276
1277/*
1278 * In 100 and 200 MHz mode, only a single pin rising/falling can be
1279 * set as trigger. In other modes, two rising/falling triggers can be set,
1280 * in addition to value/mask trigger for any number of channels.
1281 *
1282 * The Sigma supports complex triggers using boolean expressions, but this
1283 * has not been implemented yet.
1284 */
1285SR_PRIV int sigma_convert_trigger(const struct sr_dev_inst *sdi)
1286{
1287 struct dev_context *devc;
1288 struct sr_trigger *trigger;
1289 struct sr_trigger_stage *stage;
1290 struct sr_trigger_match *match;
1291 const GSList *l, *m;
1292 uint16_t channelbit;
1293 size_t trigger_set;
1294
1295 devc = sdi->priv;
1296 memset(&devc->trigger, 0, sizeof(devc->trigger));
1297 devc->use_triggers = FALSE;
1298 trigger = sr_session_trigger_get(sdi->session);
1299 if (!trigger)
1300 return SR_OK;
1301
1302 if (!ASIX_SIGMA_WITH_TRIGGER) {
1303 sr_warn("Trigger support is not implemented. Ignoring the spec.");
1304 return SR_OK;
1305 }
1306
1307 trigger_set = 0;
1308 for (l = trigger->stages; l; l = l->next) {
1309 stage = l->data;
1310 for (m = stage->matches; m; m = m->next) {
1311 match = m->data;
1312 /* Ignore disabled channels with a trigger. */
1313 if (!match->channel->enabled)
1314 continue;
1315 channelbit = 1 << match->channel->index;
1316 if (devc->clock.samplerate >= SR_MHZ(100)) {
1317 /* Fast trigger support. */
1318 if (trigger_set) {
1319 sr_err("100/200MHz modes limited to single trigger pin.");
1320 return SR_ERR;
1321 }
1322 if (match->match == SR_TRIGGER_FALLING) {
1323 devc->trigger.fallingmask |= channelbit;
1324 } else if (match->match == SR_TRIGGER_RISING) {
1325 devc->trigger.risingmask |= channelbit;
1326 } else {
1327 sr_err("100/200MHz modes limited to edge trigger.");
1328 return SR_ERR;
1329 }
1330
1331 trigger_set++;
1332 } else {
1333 /* Simple trigger support (event). */
1334 if (match->match == SR_TRIGGER_ONE) {
1335 devc->trigger.simplevalue |= channelbit;
1336 devc->trigger.simplemask |= channelbit;
1337 } else if (match->match == SR_TRIGGER_ZERO) {
1338 devc->trigger.simplevalue &= ~channelbit;
1339 devc->trigger.simplemask |= channelbit;
1340 } else if (match->match == SR_TRIGGER_FALLING) {
1341 devc->trigger.fallingmask |= channelbit;
1342 trigger_set++;
1343 } else if (match->match == SR_TRIGGER_RISING) {
1344 devc->trigger.risingmask |= channelbit;
1345 trigger_set++;
1346 }
1347
1348 /*
1349 * Actually, Sigma supports 2 rising/falling triggers,
1350 * but they are ORed and the current trigger syntax
1351 * does not permit ORed triggers.
1352 */
1353 if (trigger_set > 1) {
1354 sr_err("Limited to 1 edge trigger.");
1355 return SR_ERR;
1356 }
1357 }
1358 }
1359 }
1360
1361 /* Keep track whether triggers are involved during acquisition. */
1362 devc->use_triggers = TRUE;
1363
1364 return SR_OK;
1365}
1366
1367/* Software trigger to determine exact trigger position. */
1368static int get_trigger_offset(uint8_t *samples, uint16_t last_sample,
1369 struct sigma_trigger *t)
1370{
1371 const uint8_t *rdptr;
1372 size_t i;
1373 uint16_t sample;
1374
1375 rdptr = samples;
1376 sample = 0;
1377 for (i = 0; i < 8; i++) {
1378 if (i > 0)
1379 last_sample = sample;
1380 sample = read_u16le_inc(&rdptr);
1381
1382 /* Simple triggers. */
1383 if ((sample & t->simplemask) != t->simplevalue)
1384 continue;
1385
1386 /* Rising edge. */
1387 if (((last_sample & t->risingmask) != 0) ||
1388 ((sample & t->risingmask) != t->risingmask))
1389 continue;
1390
1391 /* Falling edge. */
1392 if ((last_sample & t->fallingmask) != t->fallingmask ||
1393 (sample & t->fallingmask) != 0)
1394 continue;
1395
1396 break;
1397 }
1398
1399 /* If we did not match, return original trigger pos. */
1400 return i & 0x7;
1401}
1402
1403static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample)
1404{
1405 /* TODO
1406 * Check whether the combination of this very sample and the
1407 * previous state match the configured trigger condition. This
1408 * improves the resolution of the trigger marker's position.
1409 * The hardware provided position is coarse, and may point to
1410 * a position before the actual match.
1411 *
1412 * See the previous get_trigger_offset() implementation. This
1413 * code needs to get re-used here.
1414 */
1415 if (!devc->use_triggers)
1416 return FALSE;
1417
1418 (void)sample;
1419 (void)get_trigger_offset;
1420
1421 return FALSE;
1422}
1423
1424static int check_and_submit_sample(struct dev_context *devc,
1425 uint16_t sample, size_t count, gboolean check_trigger)
1426{
1427 gboolean triggered;
1428 int ret;
1429
1430 triggered = check_trigger && sample_matches_trigger(devc, sample);
1431 if (triggered) {
1432 ret = flush_submit_buffer(devc);
1433 if (ret != SR_OK)
1434 return ret;
1435 ret = std_session_send_df_trigger(devc->buffer->sdi);
1436 if (ret != SR_OK)
1437 return ret;
1438 }
1439
1440 ret = addto_submit_buffer(devc, sample, count);
1441 if (ret != SR_OK)
1442 return ret;
1443
1444 return SR_OK;
1445}
1446
1447/*
1448 * Return the timestamp of "DRAM cluster".
1449 */
1450static uint16_t sigma_dram_cluster_ts(struct sigma_dram_cluster *cluster)
1451{
1452 return read_u16le((const uint8_t *)&cluster->timestamp);
1453}
1454
1455/*
1456 * Return one 16bit data entity of a DRAM cluster at the specified index.
1457 */
1458static uint16_t sigma_dram_cluster_data(struct sigma_dram_cluster *cl, int idx)
1459{
1460 return read_u16le((const uint8_t *)&cl->samples[idx]);
1461}
1462
1463/*
1464 * Deinterlace sample data that was retrieved at 100MHz samplerate.
1465 * One 16bit item contains two samples of 8bits each. The bits of
1466 * multiple samples are interleaved.
1467 */
1468static uint16_t sigma_deinterlace_100mhz_data(uint16_t indata, int idx)
1469{
1470 uint16_t outdata;
1471
1472 indata >>= idx;
1473 outdata = 0;
1474 outdata |= (indata >> (0 * 2 - 0)) & (1 << 0);
1475 outdata |= (indata >> (1 * 2 - 1)) & (1 << 1);
1476 outdata |= (indata >> (2 * 2 - 2)) & (1 << 2);
1477 outdata |= (indata >> (3 * 2 - 3)) & (1 << 3);
1478 outdata |= (indata >> (4 * 2 - 4)) & (1 << 4);
1479 outdata |= (indata >> (5 * 2 - 5)) & (1 << 5);
1480 outdata |= (indata >> (6 * 2 - 6)) & (1 << 6);
1481 outdata |= (indata >> (7 * 2 - 7)) & (1 << 7);
1482 return outdata;
1483}
1484
1485/*
1486 * Deinterlace sample data that was retrieved at 200MHz samplerate.
1487 * One 16bit item contains four samples of 4bits each. The bits of
1488 * multiple samples are interleaved.
1489 */
1490static uint16_t sigma_deinterlace_200mhz_data(uint16_t indata, int idx)
1491{
1492 uint16_t outdata;
1493
1494 indata >>= idx;
1495 outdata = 0;
1496 outdata |= (indata >> (0 * 4 - 0)) & (1 << 0);
1497 outdata |= (indata >> (1 * 4 - 1)) & (1 << 1);
1498 outdata |= (indata >> (2 * 4 - 2)) & (1 << 2);
1499 outdata |= (indata >> (3 * 4 - 3)) & (1 << 3);
1500 return outdata;
1501}
1502
1503static void sigma_decode_dram_cluster(struct dev_context *devc,
1504 struct sigma_dram_cluster *dram_cluster,
1505 size_t events_in_cluster, gboolean triggered)
1506{
1507 struct sigma_state *ss;
1508 uint16_t tsdiff, ts, sample, item16;
1509 size_t count;
1510 size_t evt;
1511
1512 if (!devc->use_triggers || !ASIX_SIGMA_WITH_TRIGGER)
1513 triggered = FALSE;
1514
1515 /*
1516 * If this cluster is not adjacent to the previously received
1517 * cluster, then send the appropriate number of samples with the
1518 * previous values to the sigrok session. This "decodes RLE".
1519 *
1520 * These samples cannot match the trigger since they just repeat
1521 * the previously submitted data pattern. (This assumption holds
1522 * for simple level and edge triggers. It would not for timed or
1523 * counted conditions, which currently are not supported.)
1524 */
1525 ss = &devc->state;
1526 ts = sigma_dram_cluster_ts(dram_cluster);
1527 tsdiff = ts - ss->lastts;
1528 if (tsdiff > 0) {
1529 sample = ss->lastsample;
1530 count = tsdiff * devc->samples_per_event;
1531 (void)check_and_submit_sample(devc, sample, count, FALSE);
1532 }
1533 ss->lastts = ts + EVENTS_PER_CLUSTER;
1534
1535 /*
1536 * Grab sample data from the current cluster and prepare their
1537 * submission to the session feed. Handle samplerate dependent
1538 * memory layout of sample data. Accumulation of data chunks
1539 * before submission is transparent to this code path, specific
1540 * buffer depth is neither assumed nor required here.
1541 */
1542 sample = 0;
1543 for (evt = 0; evt < events_in_cluster; evt++) {
1544 item16 = sigma_dram_cluster_data(dram_cluster, evt);
1545 if (devc->clock.samplerate == SR_MHZ(200)) {
1546 sample = sigma_deinterlace_200mhz_data(item16, 0);
1547 check_and_submit_sample(devc, sample, 1, triggered);
1548 sample = sigma_deinterlace_200mhz_data(item16, 1);
1549 check_and_submit_sample(devc, sample, 1, triggered);
1550 sample = sigma_deinterlace_200mhz_data(item16, 2);
1551 check_and_submit_sample(devc, sample, 1, triggered);
1552 sample = sigma_deinterlace_200mhz_data(item16, 3);
1553 check_and_submit_sample(devc, sample, 1, triggered);
1554 } else if (devc->clock.samplerate == SR_MHZ(100)) {
1555 sample = sigma_deinterlace_100mhz_data(item16, 0);
1556 check_and_submit_sample(devc, sample, 1, triggered);
1557 sample = sigma_deinterlace_100mhz_data(item16, 1);
1558 check_and_submit_sample(devc, sample, 1, triggered);
1559 } else {
1560 sample = item16;
1561 check_and_submit_sample(devc, sample, 1, triggered);
1562 }
1563 }
1564 ss->lastsample = sample;
1565}
1566
1567/*
1568 * Decode chunk of 1024 bytes, 64 clusters, 7 events per cluster.
1569 * Each event is 20ns apart, and can contain multiple samples.
1570 *
1571 * For 200 MHz, events contain 4 samples for each channel, spread 5 ns apart.
1572 * For 100 MHz, events contain 2 samples for each channel, spread 10 ns apart.
1573 * For 50 MHz and below, events contain one sample for each channel,
1574 * spread 20 ns apart.
1575 */
1576static int decode_chunk_ts(struct dev_context *devc,
1577 struct sigma_dram_line *dram_line,
1578 size_t events_in_line, size_t trigger_event)
1579{
1580 struct sigma_dram_cluster *dram_cluster;
1581 size_t clusters_in_line;
1582 size_t events_in_cluster;
1583 size_t cluster;
1584 size_t trigger_cluster;
1585
1586 clusters_in_line = events_in_line;
1587 clusters_in_line += EVENTS_PER_CLUSTER - 1;
1588 clusters_in_line /= EVENTS_PER_CLUSTER;
1589
1590 /* Check if trigger is in this chunk. */
1591 trigger_cluster = ~0UL;
1592 if (trigger_event < EVENTS_PER_ROW) {
1593 if (devc->clock.samplerate <= SR_MHZ(50)) {
1594 trigger_event -= MIN(EVENTS_PER_CLUSTER - 1,
1595 trigger_event);
1596 }
1597
1598 /* Find in which cluster the trigger occurred. */
1599 trigger_cluster = trigger_event / EVENTS_PER_CLUSTER;
1600 }
1601
1602 /* For each full DRAM cluster. */
1603 for (cluster = 0; cluster < clusters_in_line; cluster++) {
1604 dram_cluster = &dram_line->cluster[cluster];
1605
1606 /* The last cluster might not be full. */
1607 if ((cluster == clusters_in_line - 1) &&
1608 (events_in_line % EVENTS_PER_CLUSTER)) {
1609 events_in_cluster = events_in_line % EVENTS_PER_CLUSTER;
1610 } else {
1611 events_in_cluster = EVENTS_PER_CLUSTER;
1612 }
1613
1614 sigma_decode_dram_cluster(devc, dram_cluster,
1615 events_in_cluster, cluster == trigger_cluster);
1616 }
1617
1618 return SR_OK;
1619}
1620
1621static int download_capture(struct sr_dev_inst *sdi)
1622{
1623 const uint32_t chunks_per_read = 32;
1624
1625 struct dev_context *devc;
1626 struct sigma_dram_line *dram_line;
1627 uint32_t stoppos, triggerpos;
1628 uint8_t modestatus;
1629 size_t line_idx;
1630 size_t dl_lines_total, dl_lines_curr, dl_lines_done;
1631 size_t dl_first_line, dl_line;
1632 size_t dl_events_in_line, trigger_event;
1633 size_t trg_line, trg_event;
1634 int ret;
1635
1636 devc = sdi->priv;
1637
1638 sr_info("Downloading sample data.");
1639 devc->state.state = SIGMA_DOWNLOAD;
1640
1641 /*
1642 * Ask the hardware to stop data acquisition. Reception of the
1643 * FORCESTOP request makes the hardware "disable RLE" (store
1644 * clusters to DRAM regardless of whether pin state changes) and
1645 * raise the POSTTRIGGERED flag.
1646 */
1647 modestatus = WMR_FORCESTOP | WMR_SDRAMWRITEEN;
1648 ret = sigma_set_register(devc, WRITE_MODE, modestatus);
1649 if (ret != SR_OK)
1650 return ret;
1651 do {
1652 ret = sigma_get_register(devc, READ_MODE, &modestatus);
1653 if (ret != SR_OK) {
1654 sr_err("Could not poll for post-trigger state.");
1655 return FALSE;
1656 }
1657 } while (!(modestatus & RMR_POSTTRIGGERED));
1658
1659 /* Set SDRAM Read Enable. */
1660 ret = sigma_set_register(devc, WRITE_MODE, WMR_SDRAMREADEN);
1661 if (ret != SR_OK)
1662 return ret;
1663
1664 /* Get the current position. Check if trigger has fired. */
1665 ret = sigma_read_pos(devc, &stoppos, &triggerpos, &modestatus);
1666 if (ret != SR_OK) {
1667 sr_err("Could not query capture positions/state.");
1668 return FALSE;
1669 }
1670 if (!devc->use_triggers)
1671 triggerpos = ~0;
1672 trg_line = ~0UL;
1673 trg_event = ~0UL;
1674 if (modestatus & RMR_TRIGGERED) {
1675 trg_line = triggerpos >> ROW_SHIFT;
1676 trg_event = triggerpos & ROW_MASK;
1677 }
1678
1679 /*
1680 * Determine how many "DRAM lines" of 1024 bytes each we need to
1681 * retrieve from the Sigma hardware, so that we have a complete
1682 * set of samples. Note that the last line need not contain 64
1683 * clusters, it might be partially filled only.
1684 *
1685 * When RMR_ROUND is set, the circular buffer in DRAM has wrapped
1686 * around. Since the status of the very next line is uncertain in
1687 * that case, we skip it and start reading from the next line.
1688 */
1689 dl_first_line = 0;
1690 dl_lines_total = (stoppos >> ROW_SHIFT) + 1;
1691 if (modestatus & RMR_ROUND) {
1692 dl_first_line = dl_lines_total + 1;
1693 dl_lines_total = ROW_COUNT - 2;
1694 }
1695 dram_line = g_try_malloc0(chunks_per_read * sizeof(*dram_line));
1696 if (!dram_line)
1697 return FALSE;
1698 ret = alloc_submit_buffer(sdi);
1699 if (ret != SR_OK)
1700 return FALSE;
1701 ret = setup_submit_limit(devc);
1702 if (ret != SR_OK)
1703 return FALSE;
1704 dl_lines_done = 0;
1705 while (dl_lines_total > dl_lines_done) {
1706 /* We can download only up-to 32 DRAM lines in one go! */
1707 dl_lines_curr = MIN(chunks_per_read, dl_lines_total - dl_lines_done);
1708
1709 dl_line = dl_first_line + dl_lines_done;
1710 dl_line %= ROW_COUNT;
1711 ret = sigma_read_dram(devc, dl_line, dl_lines_curr,
1712 (uint8_t *)dram_line);
1713 if (ret != SR_OK)
1714 return FALSE;
1715
1716 /* This is the first DRAM line, so find the initial timestamp. */
1717 if (dl_lines_done == 0) {
1718 devc->state.lastts =
1719 sigma_dram_cluster_ts(&dram_line[0].cluster[0]);
1720 devc->state.lastsample = 0;
1721 }
1722
1723 for (line_idx = 0; line_idx < dl_lines_curr; line_idx++) {
1724 /* The last "DRAM line" need not span its full length. */
1725 dl_events_in_line = EVENTS_PER_ROW;
1726 if (dl_lines_done + line_idx == dl_lines_total - 1)
1727 dl_events_in_line = stoppos & ROW_MASK;
1728
1729 /* Test if the trigger happened on this line. */
1730 trigger_event = ~0UL;
1731 if (dl_lines_done + line_idx == trg_line)
1732 trigger_event = trg_event;
1733
1734 decode_chunk_ts(devc, dram_line + line_idx,
1735 dl_events_in_line, trigger_event);
1736 }
1737
1738 dl_lines_done += dl_lines_curr;
1739 }
1740 flush_submit_buffer(devc);
1741 free_submit_buffer(devc);
1742 g_free(dram_line);
1743
1744 std_session_send_df_end(sdi);
1745
1746 devc->state.state = SIGMA_IDLE;
1747 sr_dev_acquisition_stop(sdi);
1748
1749 return TRUE;
1750}
1751
1752/*
1753 * Periodically check the Sigma status when in CAPTURE mode. This routine
1754 * checks whether the configured sample count or sample time have passed,
1755 * and will stop acquisition and download the acquired samples.
1756 */
1757static int sigma_capture_mode(struct sr_dev_inst *sdi)
1758{
1759 struct dev_context *devc;
1760
1761 devc = sdi->priv;
1762 if (sr_sw_limits_check(&devc->acq_limits))
1763 return download_capture(sdi);
1764
1765 return TRUE;
1766}
1767
1768SR_PRIV int sigma_receive_data(int fd, int revents, void *cb_data)
1769{
1770 struct sr_dev_inst *sdi;
1771 struct dev_context *devc;
1772
1773 (void)fd;
1774 (void)revents;
1775
1776 sdi = cb_data;
1777 devc = sdi->priv;
1778
1779 if (devc->state.state == SIGMA_IDLE)
1780 return TRUE;
1781
1782 /*
1783 * When the application has requested to stop the acquisition,
1784 * then immediately start downloading sample data. Otherwise
1785 * keep checking configured limits which will terminate the
1786 * acquisition and initiate download.
1787 */
1788 if (devc->state.state == SIGMA_STOPPING)
1789 return download_capture(sdi);
1790 if (devc->state.state == SIGMA_CAPTURE)
1791 return sigma_capture_mode(sdi);
1792
1793 return TRUE;
1794}
1795
1796/* Build a LUT entry used by the trigger functions. */
1797static void build_lut_entry(uint16_t *lut_entry,
1798 uint16_t spec_value, uint16_t spec_mask)
1799{
1800 size_t quad, bitidx, ch;
1801 uint16_t quadmask, bitmask;
1802 gboolean spec_value_low, bit_idx_low;
1803
1804 /*
1805 * For each quad-channel-group, for each bit in the LUT (each
1806 * bit pattern of the channel signals, aka LUT address), for
1807 * each channel in the quad, setup the bit in the LUT entry.
1808 *
1809 * Start from all-ones in the LUT (true, always matches), then
1810 * "pessimize the truthness" for specified conditions.
1811 */
1812 for (quad = 0; quad < 4; quad++) {
1813 lut_entry[quad] = ~0;
1814 for (bitidx = 0; bitidx < 16; bitidx++) {
1815 for (ch = 0; ch < 4; ch++) {
1816 quadmask = 1 << ch;
1817 bitmask = quadmask << (quad * 4);
1818 if (!(spec_mask & bitmask))
1819 continue;
1820 /*
1821 * This bit is part of the spec. The
1822 * condition which gets checked here
1823 * (got checked in all implementations
1824 * so far) is uncertain. A bit position
1825 * in the current index' number(!) is
1826 * checked?
1827 */
1828 spec_value_low = !(spec_value & bitmask);
1829 bit_idx_low = !(bitidx & quadmask);
1830 if (spec_value_low == bit_idx_low)
1831 continue;
1832 lut_entry[quad] &= ~(1 << bitidx);
1833 }
1834 }
1835 }
1836}
1837
1838/* Add a logical function to LUT mask. */
1839static void add_trigger_function(enum triggerop oper, enum triggerfunc func,
1840 size_t index, gboolean neg, uint16_t *mask)
1841{
1842 size_t i, j;
1843 int x[2][2], tmp, a, b, aset, bset, rset;
1844
1845 memset(x, 0, sizeof(x));
1846
1847 /* Trigger detect condition. */
1848 switch (oper) {
1849 case OP_LEVEL:
1850 x[0][1] = 1;
1851 x[1][1] = 1;
1852 break;
1853 case OP_NOT:
1854 x[0][0] = 1;
1855 x[1][0] = 1;
1856 break;
1857 case OP_RISE:
1858 x[0][1] = 1;
1859 break;
1860 case OP_FALL:
1861 x[1][0] = 1;
1862 break;
1863 case OP_RISEFALL:
1864 x[0][1] = 1;
1865 x[1][0] = 1;
1866 break;
1867 case OP_NOTRISE:
1868 x[1][1] = 1;
1869 x[0][0] = 1;
1870 x[1][0] = 1;
1871 break;
1872 case OP_NOTFALL:
1873 x[1][1] = 1;
1874 x[0][0] = 1;
1875 x[0][1] = 1;
1876 break;
1877 case OP_NOTRISEFALL:
1878 x[1][1] = 1;
1879 x[0][0] = 1;
1880 break;
1881 }
1882
1883 /* Transpose if neg is set. */
1884 if (neg) {
1885 for (i = 0; i < 2; i++) {
1886 for (j = 0; j < 2; j++) {
1887 tmp = x[i][j];
1888 x[i][j] = x[1 - i][1 - j];
1889 x[1 - i][1 - j] = tmp;
1890 }
1891 }
1892 }
1893
1894 /* Update mask with function. */
1895 for (i = 0; i < 16; i++) {
1896 a = (i >> (2 * index + 0)) & 1;
1897 b = (i >> (2 * index + 1)) & 1;
1898
1899 aset = (*mask >> i) & 1;
1900 bset = x[b][a];
1901
1902 rset = 0;
1903 if (func == FUNC_AND || func == FUNC_NAND)
1904 rset = aset & bset;
1905 else if (func == FUNC_OR || func == FUNC_NOR)
1906 rset = aset | bset;
1907 else if (func == FUNC_XOR || func == FUNC_NXOR)
1908 rset = aset ^ bset;
1909
1910 if (func == FUNC_NAND || func == FUNC_NOR || func == FUNC_NXOR)
1911 rset = !rset;
1912
1913 *mask &= ~(1 << i);
1914
1915 if (rset)
1916 *mask |= 1 << i;
1917 }
1918}
1919
1920/*
1921 * Build trigger LUTs used by 50 MHz and lower sample rates for supporting
1922 * simple pin change and state triggers. Only two transitions (rise/fall) can be
1923 * set at any time, but a full mask and value can be set (0/1).
1924 */
1925SR_PRIV int sigma_build_basic_trigger(struct dev_context *devc,
1926 struct triggerlut *lut)
1927{
1928 uint16_t masks[2];
1929 size_t bitidx, condidx;
1930 uint16_t value, mask;
1931
1932 /* Setup something that "won't match" in the absence of a spec. */
1933 memset(lut, 0, sizeof(*lut));
1934 if (!devc->use_triggers)
1935 return SR_OK;
1936
1937 /* Start assuming simple triggers. Edges are handled below. */
1938 lut->m4 = 0xa000;
1939 lut->m3q = 0xffff;
1940
1941 /* Process value/mask triggers. */
1942 value = devc->trigger.simplevalue;
1943 mask = devc->trigger.simplemask;
1944 build_lut_entry(lut->m2d, value, mask);
1945
1946 /* Scan for and process rise/fall triggers. */
1947 memset(&masks, 0, sizeof(masks));
1948 condidx = 0;
1949 for (bitidx = 0; bitidx < 16; bitidx++) {
1950 mask = 1 << bitidx;
1951 value = devc->trigger.risingmask | devc->trigger.fallingmask;
1952 if (!(value & mask))
1953 continue;
1954 if (condidx == 0)
1955 build_lut_entry(lut->m0d, mask, mask);
1956 if (condidx == 1)
1957 build_lut_entry(lut->m1d, mask, mask);
1958 masks[condidx++] = mask;
1959 if (condidx == ARRAY_SIZE(masks))
1960 break;
1961 }
1962
1963 /* Add glue logic for rise/fall triggers. */
1964 if (masks[0] || masks[1]) {
1965 lut->m3q = 0;
1966 if (masks[0] & devc->trigger.risingmask)
1967 add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3q);
1968 if (masks[0] & devc->trigger.fallingmask)
1969 add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3q);
1970 if (masks[1] & devc->trigger.risingmask)
1971 add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3q);
1972 if (masks[1] & devc->trigger.fallingmask)
1973 add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3q);
1974 }
1975
1976 /* Triggertype: event. */
1977 lut->params.selres = TRGSEL_SELCODE_NEVER;
1978 lut->params.selinc = TRGSEL_SELCODE_LEVEL;
1979 lut->params.sela = 0; /* Counter >= CMPA && LEVEL */
1980 lut->params.cmpa = 0; /* Count 0 -> 1 already triggers. */
1981
1982 return SR_OK;
1983}