]> sigrok.org Git - libsigrok.git/blame - src/hardware/asix-sigma/protocol.c
asix-sigma: complete and extend capture mode supervision
[libsigrok.git] / src / hardware / asix-sigma / protocol.c
CommitLineData
28a35d8a 1/*
50985c20 2 * This file is part of the libsigrok project.
28a35d8a 3 *
868501fa 4 * Copyright (C) 2010-2012 Håvard Espeland <gus@ping.uio.no>,
911f1834
UH
5 * Copyright (C) 2010 Martin Stensgård <mastensg@ping.uio.no>
6 * Copyright (C) 2010 Carl Henrik Lunde <chlunde@ping.uio.no>
9334ed6c 7 * Copyright (C) 2020 Gerhard Sittig <gerhard.sittig@gmx.net>
28a35d8a
HE
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation, either version 3 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
911f1834 23/*
6352d030 24 * ASIX SIGMA/SIGMA2 logic analyzer driver
911f1834
UH
25 */
26
6ec6c43b 27#include <config.h>
3ba56876 28#include "protocol.h"
28a35d8a 29
b1648dea 30/*
b65649f6
GS
31 * The ASIX SIGMA hardware supports fixed 200MHz and 100MHz sample rates
32 * (by means of separate firmware images). As well as 50MHz divided by
33 * an integer divider in the 1..256 range (by the "typical" firmware).
34 * Which translates to a strict lower boundary of around 195kHz.
35 *
36 * This driver "suggests" a subset of the available rates by listing a
37 * few discrete values, while setter routines accept any user specified
38 * rate that is supported by the hardware.
b1648dea 39 */
abcd4771 40static const uint64_t samplerates[] = {
b65649f6
GS
41 /* 50MHz and integer divider. 1/2/5 steps (where possible). */
42 SR_KHZ(200), SR_KHZ(500),
43 SR_MHZ(1), SR_MHZ(2), SR_MHZ(5),
44 SR_MHZ(10), SR_MHZ(25), SR_MHZ(50),
45 /* 100MHz/200MHz, fixed rates in special firmware. */
46 SR_MHZ(100), SR_MHZ(200),
28a35d8a
HE
47};
48
abcd4771
GS
49SR_PRIV GVariant *sigma_get_samplerates_list(void)
50{
51 return std_gvar_samplerates(samplerates, ARRAY_SIZE(samplerates));
52}
39c64c6a 53
742368a2 54static const char *firmware_files[] = {
80e717b3
GS
55 [SIGMA_FW_50MHZ] = "asix-sigma-50.fw", /* 50MHz, 8bit divider. */
56 [SIGMA_FW_100MHZ] = "asix-sigma-100.fw", /* 100MHz, fixed. */
57 [SIGMA_FW_200MHZ] = "asix-sigma-200.fw", /* 200MHz, fixed. */
58 [SIGMA_FW_SYNC] = "asix-sigma-50sync.fw", /* Sync from external pin. */
59 [SIGMA_FW_FREQ] = "asix-sigma-phasor.fw", /* Frequency counter. */
f6564c8d
HE
60};
61
742368a2
GS
62#define SIGMA_FIRMWARE_SIZE_LIMIT (256 * 1024)
63
7fe1f91f
GS
64static int sigma_ftdi_open(const struct sr_dev_inst *sdi)
65{
66 struct dev_context *devc;
67 int vid, pid;
68 const char *serno;
69 int ret;
70
71 devc = sdi->priv;
72 if (!devc)
73 return SR_ERR_ARG;
74
75 if (devc->ftdi.is_open)
76 return SR_OK;
77
78 vid = devc->id.vid;
79 pid = devc->id.pid;
80 serno = sdi->serial_num;
81 if (!vid || !pid || !serno || !*serno)
82 return SR_ERR_ARG;
83
84 ret = ftdi_init(&devc->ftdi.ctx);
85 if (ret < 0) {
86 sr_err("Cannot initialize FTDI context (%d): %s.",
87 ret, ftdi_get_error_string(&devc->ftdi.ctx));
88 return SR_ERR_IO;
89 }
90 ret = ftdi_usb_open_desc_index(&devc->ftdi.ctx,
91 vid, pid, NULL, serno, 0);
92 if (ret < 0) {
93 sr_err("Cannot open device (%d): %s.",
94 ret, ftdi_get_error_string(&devc->ftdi.ctx));
95 return SR_ERR_IO;
96 }
97 devc->ftdi.is_open = TRUE;
98
99 return SR_OK;
100}
101
102static int sigma_ftdi_close(struct dev_context *devc)
103{
104 int ret;
105
106 ret = ftdi_usb_close(&devc->ftdi.ctx);
107 devc->ftdi.is_open = FALSE;
108 devc->ftdi.must_close = FALSE;
109 ftdi_deinit(&devc->ftdi.ctx);
110
111 return ret == 0 ? SR_OK : SR_ERR_IO;
112}
113
114SR_PRIV int sigma_check_open(const struct sr_dev_inst *sdi)
115{
116 struct dev_context *devc;
117 int ret;
118
119 if (!sdi)
120 return SR_ERR_ARG;
121 devc = sdi->priv;
122 if (!devc)
123 return SR_ERR_ARG;
124
125 if (devc->ftdi.is_open)
126 return SR_OK;
127
128 ret = sigma_ftdi_open(sdi);
129 if (ret != SR_OK)
130 return ret;
131 devc->ftdi.must_close = TRUE;
132
133 return ret;
134}
135
136SR_PRIV int sigma_check_close(struct dev_context *devc)
137{
138 int ret;
139
140 if (!devc)
141 return SR_ERR_ARG;
142
143 if (devc->ftdi.must_close) {
144 ret = sigma_ftdi_close(devc);
145 if (ret != SR_OK)
146 return ret;
147 devc->ftdi.must_close = FALSE;
148 }
149
150 return SR_OK;
151}
152
153SR_PRIV int sigma_force_open(const struct sr_dev_inst *sdi)
154{
155 struct dev_context *devc;
156 int ret;
157
158 if (!sdi)
159 return SR_ERR_ARG;
160 devc = sdi->priv;
161 if (!devc)
162 return SR_ERR_ARG;
163
164 ret = sigma_ftdi_open(sdi);
165 if (ret != SR_OK)
166 return ret;
167 devc->ftdi.must_close = FALSE;
168
169 return SR_OK;
170}
171
172SR_PRIV int sigma_force_close(struct dev_context *devc)
173{
174 return sigma_ftdi_close(devc);
175}
176
88a5f9ea
GS
177/*
178 * BEWARE! Error propagation is important, as are kinds of return values.
179 *
180 * - Raw USB tranport communicates the number of sent or received bytes,
181 * or negative error codes in the external library's(!) range of codes.
182 * - Internal routines at the "sigrok driver level" communicate success
183 * or failure in terms of SR_OK et al error codes.
184 * - Main loop style receive callbacks communicate booleans which arrange
185 * for repeated calls to drive progress during acquisition.
186 *
187 * Careful consideration by maintainers is essential, because all of the
188 * above kinds of values are assignment compatbile from the compiler's
189 * point of view. Implementation errors will go unnoticed at build time.
190 */
191
192static int sigma_read_raw(struct dev_context *devc, void *buf, size_t size)
28a35d8a
HE
193{
194 int ret;
fefa1800 195
7fe1f91f 196 ret = ftdi_read_data(&devc->ftdi.ctx, (unsigned char *)buf, size);
28a35d8a 197 if (ret < 0) {
88a5f9ea 198 sr_err("USB data read failed: %s",
7fe1f91f 199 ftdi_get_error_string(&devc->ftdi.ctx));
28a35d8a
HE
200 }
201
202 return ret;
203}
204
88a5f9ea 205static int sigma_write_raw(struct dev_context *devc, const void *buf, size_t size)
28a35d8a
HE
206{
207 int ret;
fefa1800 208
7fe1f91f 209 ret = ftdi_write_data(&devc->ftdi.ctx, buf, size);
88a5f9ea
GS
210 if (ret < 0) {
211 sr_err("USB data write failed: %s",
7fe1f91f 212 ftdi_get_error_string(&devc->ftdi.ctx));
88a5f9ea
GS
213 } else if ((size_t)ret != size) {
214 sr_err("USB data write length mismatch.");
215 }
28a35d8a
HE
216
217 return ret;
218}
219
88a5f9ea
GS
220static int sigma_read_sr(struct dev_context *devc, void *buf, size_t size)
221{
222 int ret;
223
224 ret = sigma_read_raw(devc, buf, size);
225 if (ret < 0 || (size_t)ret != size)
226 return SR_ERR_IO;
227
228 return SR_OK;
229}
230
231static int sigma_write_sr(struct dev_context *devc, const void *buf, size_t size)
232{
233 int ret;
234
235 ret = sigma_write_raw(devc, buf, size);
236 if (ret < 0 || (size_t)ret != size)
237 return SR_ERR_IO;
238
239 return SR_OK;
240}
241
e8686e3a 242/*
88a5f9ea
GS
243 * Implementor's note: The local write buffer's size shall suffice for
244 * any know FPGA register transaction that is involved in the supported
245 * feature set of this sigrok device driver. If the length check trips,
246 * that's a programmer's error and needs adjustment in the complete call
247 * stack of the respective code path.
e8686e3a 248 */
0f017b7d
GS
249#define SIGMA_MAX_REG_DEPTH 32
250
251/*
252 * Implementor's note: The FPGA command set supports register access
253 * with automatic address adjustment. This operation is documented to
254 * wrap within a 16-address range, it cannot cross boundaries where the
255 * register address' nibble overflows. An internal helper assumes that
256 * callers remain within this auto-adjustment range, and thus multi
257 * register access requests can never exceed that count.
258 */
259#define SIGMA_MAX_REG_COUNT 16
260
9b4d261f
GS
261SR_PRIV int sigma_write_register(struct dev_context *devc,
262 uint8_t reg, uint8_t *data, size_t len)
28a35d8a 263{
0f017b7d 264 uint8_t buf[2 + SIGMA_MAX_REG_DEPTH * 2], *wrptr;
88a5f9ea 265 size_t idx;
28a35d8a 266
0f017b7d 267 if (len > SIGMA_MAX_REG_DEPTH) {
88a5f9ea 268 sr_err("Short write buffer for %zu bytes to reg %u.", len, reg);
e8686e3a
AG
269 return SR_ERR_BUG;
270 }
271
a53b8e4d 272 wrptr = buf;
0f017b7d
GS
273 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
274 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
a53b8e4d 275 for (idx = 0; idx < len; idx++) {
0f017b7d
GS
276 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data[idx]));
277 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data[idx]));
28a35d8a
HE
278 }
279
88a5f9ea 280 return sigma_write_sr(devc, buf, wrptr - buf);
28a35d8a
HE
281}
282
9b4d261f
GS
283SR_PRIV int sigma_set_register(struct dev_context *devc,
284 uint8_t reg, uint8_t value)
28a35d8a 285{
9b4d261f 286 return sigma_write_register(devc, reg, &value, sizeof(value));
28a35d8a
HE
287}
288
9b4d261f
GS
289static int sigma_read_register(struct dev_context *devc,
290 uint8_t reg, uint8_t *data, size_t len)
28a35d8a 291{
a53b8e4d 292 uint8_t buf[3], *wrptr;
88a5f9ea 293 int ret;
28a35d8a 294
a53b8e4d 295 wrptr = buf;
0f017b7d
GS
296 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
297 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
a53b8e4d 298 write_u8_inc(&wrptr, REG_READ_ADDR);
88a5f9ea
GS
299 ret = sigma_write_sr(devc, buf, wrptr - buf);
300 if (ret != SR_OK)
301 return ret;
28a35d8a 302
88a5f9ea 303 return sigma_read_sr(devc, data, len);
28a35d8a
HE
304}
305
0f017b7d
GS
306static int sigma_get_register(struct dev_context *devc,
307 uint8_t reg, uint8_t *data)
308{
309 return sigma_read_register(devc, reg, data, sizeof(*data));
310}
311
312static int sigma_get_registers(struct dev_context *devc,
313 uint8_t reg, uint8_t *data, size_t count)
314{
315 uint8_t buf[2 + SIGMA_MAX_REG_COUNT], *wrptr;
316 size_t idx;
317 int ret;
318
319 if (count > SIGMA_MAX_REG_COUNT) {
320 sr_err("Short command buffer for %zu reg reads at %u.", count, reg);
321 return SR_ERR_BUG;
322 }
323
324 wrptr = buf;
325 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg));
326 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg));
327 for (idx = 0; idx < count; idx++)
328 write_u8_inc(&wrptr, REG_READ_ADDR | REG_ADDR_INC);
329 ret = sigma_write_sr(devc, buf, wrptr - buf);
330 if (ret != SR_OK)
331 return ret;
332
333 return sigma_read_sr(devc, data, count);
334}
335
9b4d261f 336static int sigma_read_pos(struct dev_context *devc,
88a5f9ea 337 uint32_t *stoppos, uint32_t *triggerpos, uint8_t *mode)
28a35d8a 338{
88a5f9ea 339 uint8_t result[7];
0f017b7d 340 const uint8_t *rdptr;
88a5f9ea
GS
341 uint32_t v32;
342 uint8_t v8;
343 int ret;
28a35d8a 344
0f017b7d
GS
345 /*
346 * Read 7 registers starting at trigger position LSB.
347 * Which yields two 24bit counter values, and mode flags.
348 */
349 ret = sigma_get_registers(devc, READ_TRIGGER_POS_LOW,
350 result, sizeof(result));
88a5f9ea
GS
351 if (ret != SR_OK)
352 return ret;
28a35d8a 353
a53b8e4d 354 rdptr = &result[0];
88a5f9ea
GS
355 v32 = read_u24le_inc(&rdptr);
356 if (triggerpos)
357 *triggerpos = v32;
358 v32 = read_u24le_inc(&rdptr);
359 if (stoppos)
360 *stoppos = v32;
361 v8 = read_u8_inc(&rdptr);
362 if (mode)
363 *mode = v8;
28a35d8a 364
dc400817 365 /*
a53b8e4d
GS
366 * These positions consist of "the memory row" in the MSB fields,
367 * and "an event index" within the row in the LSB fields. Part
368 * of the memory row's content is sample data, another part is
369 * timestamps.
2c33b092 370 *
a53b8e4d
GS
371 * The retrieved register values point to after the captured
372 * position. So they need to get decremented, and adjusted to
373 * cater for the timestamps when the decrement carries over to
374 * a different memory row.
dc400817 375 */
88a5f9ea 376 if (stoppos && (--*stoppos & ROW_MASK) == ROW_MASK)
a53b8e4d 377 *stoppos -= CLUSTERS_PER_ROW;
88a5f9ea 378 if (triggerpos && (--*triggerpos & ROW_MASK) == ROW_MASK)
a53b8e4d 379 *triggerpos -= CLUSTERS_PER_ROW;
57bbf56b 380
a53b8e4d 381 return SR_OK;
28a35d8a
HE
382}
383
9b4d261f 384static int sigma_read_dram(struct dev_context *devc,
3d9373af 385 size_t startchunk, size_t numchunks, uint8_t *data)
28a35d8a 386{
0f017b7d 387 uint8_t buf[128], *wrptr, regval;
07411a60 388 size_t chunk;
88a5f9ea 389 int sel, ret;
07411a60 390 gboolean is_last;
28a35d8a 391
a53b8e4d 392 if (2 + 3 * numchunks > ARRAY_SIZE(buf)) {
88a5f9ea 393 sr_err("Short write buffer for %zu DRAM row reads.", numchunks);
a53b8e4d
GS
394 return SR_ERR_BUG;
395 }
396
07411a60 397 /* Communicate DRAM start address (memory row, aka samples line). */
a53b8e4d 398 wrptr = buf;
0f017b7d 399 write_u16be_inc(&wrptr, startchunk);
88a5f9ea
GS
400 ret = sigma_write_register(devc, WRITE_MEMROW, buf, wrptr - buf);
401 if (ret != SR_OK)
402 return ret;
28a35d8a 403
07411a60
GS
404 /*
405 * Access DRAM content. Fetch from DRAM to FPGA's internal RAM,
406 * then transfer via USB. Interleave the FPGA's DRAM access and
407 * USB transfer, use alternating buffers (0/1) in the process.
408 */
a53b8e4d
GS
409 wrptr = buf;
410 write_u8_inc(&wrptr, REG_DRAM_BLOCK);
411 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
07411a60
GS
412 for (chunk = 0; chunk < numchunks; chunk++) {
413 sel = chunk % 2;
414 is_last = chunk == numchunks - 1;
0f017b7d
GS
415 if (!is_last) {
416 regval = REG_DRAM_BLOCK | REG_DRAM_SEL_BOOL(!sel);
417 write_u8_inc(&wrptr, regval);
418 }
419 regval = REG_DRAM_BLOCK_DATA | REG_DRAM_SEL_BOOL(sel);
420 write_u8_inc(&wrptr, regval);
07411a60 421 if (!is_last)
a53b8e4d 422 write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK);
28a35d8a 423 }
88a5f9ea
GS
424 ret = sigma_write_sr(devc, buf, wrptr - buf);
425 if (ret != SR_OK)
426 return ret;
28a35d8a 427
88a5f9ea 428 return sigma_read_sr(devc, data, numchunks * ROW_LENGTH_BYTES);
28a35d8a
HE
429}
430
4ae1f451 431/* Upload trigger look-up tables to Sigma. */
9b4d261f
GS
432SR_PRIV int sigma_write_trigger_lut(struct dev_context *devc,
433 struct triggerlut *lut)
ee492173 434{
3d9373af 435 size_t lut_addr;
ee492173 436 uint16_t bit;
72ea3b84 437 uint8_t m3d, m2d, m1d, m0d;
1385f791
GS
438 uint8_t buf[6], *wrptr;
439 uint8_t trgsel2;
440 uint16_t lutreg, selreg;
88a5f9ea 441 int ret;
ee492173 442
72ea3b84
GS
443 /*
444 * Translate the LUT part of the trigger configuration from the
445 * application's perspective to the hardware register's bitfield
446 * layout. Send the LUT to the device. This configures the logic
447 * which combines pin levels or edges.
448 */
0f017b7d 449 for (lut_addr = 0; lut_addr < 16; lut_addr++) {
3f5f5484 450 bit = BIT(lut_addr);
ee492173 451
72ea3b84
GS
452 /* - M4 M3S M3Q */
453 m3d = 0;
454 if (lut->m4 & bit)
3f5f5484 455 m3d |= BIT(2);
72ea3b84 456 if (lut->m3s & bit)
3f5f5484 457 m3d |= BIT(1);
16791da9 458 if (lut->m3q & bit)
3f5f5484 459 m3d |= BIT(0);
ee492173 460
72ea3b84
GS
461 /* M2D3 M2D2 M2D1 M2D0 */
462 m2d = 0;
ee492173 463 if (lut->m2d[3] & bit)
3f5f5484 464 m2d |= BIT(3);
72ea3b84 465 if (lut->m2d[2] & bit)
3f5f5484 466 m2d |= BIT(2);
72ea3b84 467 if (lut->m2d[1] & bit)
3f5f5484 468 m2d |= BIT(1);
72ea3b84 469 if (lut->m2d[0] & bit)
3f5f5484 470 m2d |= BIT(0);
ee492173 471
72ea3b84
GS
472 /* M1D3 M1D2 M1D1 M1D0 */
473 m1d = 0;
474 if (lut->m1d[3] & bit)
3f5f5484 475 m1d |= BIT(3);
72ea3b84 476 if (lut->m1d[2] & bit)
3f5f5484 477 m1d |= BIT(2);
72ea3b84 478 if (lut->m1d[1] & bit)
3f5f5484 479 m1d |= BIT(1);
72ea3b84 480 if (lut->m1d[0] & bit)
3f5f5484 481 m1d |= BIT(0);
ee492173 482
72ea3b84
GS
483 /* M0D3 M0D2 M0D1 M0D0 */
484 m0d = 0;
ee492173 485 if (lut->m0d[3] & bit)
3f5f5484 486 m0d |= BIT(3);
72ea3b84 487 if (lut->m0d[2] & bit)
3f5f5484 488 m0d |= BIT(2);
72ea3b84 489 if (lut->m0d[1] & bit)
3f5f5484 490 m0d |= BIT(1);
72ea3b84 491 if (lut->m0d[0] & bit)
3f5f5484 492 m0d |= BIT(0);
ee492173 493
a53b8e4d 494 /*
72ea3b84
GS
495 * Send 16bits with M3D/M2D and M1D/M0D bit masks to the
496 * TriggerSelect register, then strobe the LUT write by
497 * passing A3-A0 to TriggerSelect2. Hold RESET during LUT
498 * programming.
a53b8e4d
GS
499 */
500 wrptr = buf;
1385f791
GS
501 lutreg = 0;
502 lutreg <<= 4; lutreg |= m3d;
503 lutreg <<= 4; lutreg |= m2d;
504 lutreg <<= 4; lutreg |= m1d;
505 lutreg <<= 4; lutreg |= m0d;
506 write_u16be_inc(&wrptr, lutreg);
419f1095
GS
507 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT,
508 buf, wrptr - buf);
88a5f9ea
GS
509 if (ret != SR_OK)
510 return ret;
1385f791 511 trgsel2 = TRGSEL2_RESET | TRGSEL2_LUT_WRITE |
3d9373af 512 (lut_addr & TRGSEL2_LUT_ADDR_MASK);
1385f791 513 ret = sigma_set_register(devc, WRITE_TRIGGER_SELECT2, trgsel2);
88a5f9ea
GS
514 if (ret != SR_OK)
515 return ret;
ee492173
HE
516 }
517
72ea3b84
GS
518 /*
519 * Send the parameters. This covers counters and durations.
520 */
a53b8e4d 521 wrptr = buf;
16791da9
GS
522 selreg = 0;
523 selreg |= (lut->params.selinc & TRGSEL_SELINC_MASK) << TRGSEL_SELINC_SHIFT;
524 selreg |= (lut->params.selres & TRGSEL_SELRES_MASK) << TRGSEL_SELRES_SHIFT;
525 selreg |= (lut->params.sela & TRGSEL_SELA_MASK) << TRGSEL_SELA_SHIFT;
526 selreg |= (lut->params.selb & TRGSEL_SELB_MASK) << TRGSEL_SELB_SHIFT;
527 selreg |= (lut->params.selc & TRGSEL_SELC_MASK) << TRGSEL_SELC_SHIFT;
528 selreg |= (lut->params.selpresc & TRGSEL_SELPRESC_MASK) << TRGSEL_SELPRESC_SHIFT;
529 write_u16be_inc(&wrptr, selreg);
0f017b7d
GS
530 write_u16be_inc(&wrptr, lut->params.cmpb);
531 write_u16be_inc(&wrptr, lut->params.cmpa);
88a5f9ea
GS
532 ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, buf, wrptr - buf);
533 if (ret != SR_OK)
534 return ret;
ee492173 535
e46b8fb1 536 return SR_OK;
ee492173
HE
537}
538
d5fa188a 539/*
dc0906e2
GS
540 * See Xilinx UG332 for Spartan-3 FPGA configuration. The SIGMA device
541 * uses FTDI bitbang mode for netlist download in slave serial mode.
542 * (LATER: The OMEGA device's cable contains a more capable FTDI chip
543 * and uses MPSSE mode for bitbang. -- Can we also use FT232H in FT245
544 * compatible bitbang mode? For maximum code re-use and reduced libftdi
545 * dependency? See section 3.5.5 of FT232H: D0 clk, D1 data (out), D2
546 * data (in), D3 select, D4-7 GPIOL. See section 3.5.7 for MCU FIFO.)
547 *
548 * 750kbps rate (four times the speed of sigmalogan) works well for
549 * netlist download. All pins except INIT_B are output pins during
550 * configuration download.
551 *
552 * Some pins are inverted as a byproduct of level shifting circuitry.
553 * That's why high CCLK level (from the cable's point of view) is idle
554 * from the FPGA's perspective.
555 *
556 * The vendor's literature discusses a "suicide sequence" which ends
557 * regular FPGA execution and should be sent before entering bitbang
558 * mode and sending configuration data. Set D7 and toggle D2, D3, D4
559 * a few times.
560 */
3f5f5484
GS
561#define BB_PIN_CCLK BIT(0) /* D0, CCLK */
562#define BB_PIN_PROG BIT(1) /* D1, PROG */
563#define BB_PIN_D2 BIT(2) /* D2, (part of) SUICIDE */
564#define BB_PIN_D3 BIT(3) /* D3, (part of) SUICIDE */
565#define BB_PIN_D4 BIT(4) /* D4, (part of) SUICIDE (unused?) */
566#define BB_PIN_INIT BIT(5) /* D5, INIT, input pin */
567#define BB_PIN_DIN BIT(6) /* D6, DIN */
568#define BB_PIN_D7 BIT(7) /* D7, (part of) SUICIDE */
dc0906e2
GS
569
570#define BB_BITRATE (750 * 1000)
571#define BB_PINMASK (0xff & ~BB_PIN_INIT)
572
573/*
574 * Initiate slave serial mode for configuration download. Which is done
575 * by pulsing PROG_B and sensing INIT_B. Make sure CCLK is idle before
c749d1ca
GS
576 * initiating the configuration download.
577 *
578 * Run a "suicide sequence" first to terminate the regular FPGA operation
579 * before reconfiguration. The FTDI cable is single channel, and shares
580 * pins which are used for data communication in FIFO mode with pins that
581 * are used for FPGA configuration in bitbang mode. Hardware defaults for
582 * unconfigured hardware, and runtime conditions after FPGA configuration
583 * need to cooperate such that re-configuration of the FPGA can start.
d5fa188a 584 */
c749d1ca 585static int sigma_fpga_init_bitbang_once(struct dev_context *devc)
d5fa188a 586{
a53b8e4d 587 const uint8_t suicide[] = {
dc0906e2
GS
588 BB_PIN_D7 | BB_PIN_D2,
589 BB_PIN_D7 | BB_PIN_D2,
590 BB_PIN_D7 | BB_PIN_D3,
591 BB_PIN_D7 | BB_PIN_D2,
592 BB_PIN_D7 | BB_PIN_D3,
593 BB_PIN_D7 | BB_PIN_D2,
594 BB_PIN_D7 | BB_PIN_D3,
595 BB_PIN_D7 | BB_PIN_D2,
d5fa188a 596 };
a53b8e4d 597 const uint8_t init_array[] = {
dc0906e2
GS
598 BB_PIN_CCLK,
599 BB_PIN_CCLK | BB_PIN_PROG,
600 BB_PIN_CCLK | BB_PIN_PROG,
601 BB_PIN_CCLK,
602 BB_PIN_CCLK,
603 BB_PIN_CCLK,
604 BB_PIN_CCLK,
605 BB_PIN_CCLK,
606 BB_PIN_CCLK,
607 BB_PIN_CCLK,
d5fa188a 608 };
3d9373af
GS
609 size_t retries;
610 int ret;
d5fa188a
MV
611 uint8_t data;
612
613 /* Section 2. part 1), do the FPGA suicide. */
88a5f9ea
GS
614 ret = SR_OK;
615 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
616 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
617 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
618 ret |= sigma_write_sr(devc, suicide, sizeof(suicide));
619 if (ret != SR_OK)
620 return SR_ERR_IO;
c749d1ca 621 g_usleep(10 * 1000);
d5fa188a 622
dc0906e2 623 /* Section 2. part 2), pulse PROG. */
88a5f9ea
GS
624 ret = sigma_write_sr(devc, init_array, sizeof(init_array));
625 if (ret != SR_OK)
626 return ret;
c749d1ca 627 g_usleep(10 * 1000);
7fe1f91f 628 ftdi_usb_purge_buffers(&devc->ftdi.ctx);
d5fa188a 629
88a5f9ea
GS
630 /*
631 * Wait until the FPGA asserts INIT_B. Check in a maximum number
632 * of bursts with a given delay between them. Read as many pin
633 * capture results as the combination of FTDI chip and FTID lib
634 * may provide. Cope with absence of pin capture data in a cycle.
635 * This approach shall result in fast reponse in case of success,
636 * low cost of execution during wait, reliable error handling in
637 * the transport layer, and robust response to failure or absence
638 * of result data (hardware inactivity after stimulus).
639 */
dc0906e2
GS
640 retries = 10;
641 while (retries--) {
88a5f9ea
GS
642 do {
643 ret = sigma_read_raw(devc, &data, sizeof(data));
644 if (ret < 0)
645 return SR_ERR_IO;
646 if (ret == sizeof(data) && (data & BB_PIN_INIT))
647 return SR_OK;
648 } while (ret == sizeof(data));
649 if (retries)
650 g_usleep(10 * 1000);
d5fa188a
MV
651 }
652
653 return SR_ERR_TIMEOUT;
654}
655
c749d1ca
GS
656/*
657 * This is belt and braces. Re-run the bitbang initiation sequence a few
658 * times should first attempts fail. Failure is rare but can happen (was
659 * observed during driver development).
660 */
661static int sigma_fpga_init_bitbang(struct dev_context *devc)
662{
663 size_t retries;
664 int ret;
665
666 retries = 10;
667 while (retries--) {
668 ret = sigma_fpga_init_bitbang_once(devc);
669 if (ret == SR_OK)
670 return ret;
671 if (ret != SR_ERR_TIMEOUT)
672 return ret;
673 }
674 return ret;
675}
676
64fe661b
MV
677/*
678 * Configure the FPGA for logic-analyzer mode.
679 */
680static int sigma_fpga_init_la(struct dev_context *devc)
681{
0f017b7d 682 uint8_t buf[20], *wrptr;
a53b8e4d 683 uint8_t data_55, data_aa, mode;
64fe661b 684 uint8_t result[3];
a53b8e4d 685 const uint8_t *rdptr;
64fe661b
MV
686 int ret;
687
a53b8e4d
GS
688 wrptr = buf;
689
690 /* Read ID register. */
0f017b7d
GS
691 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(READ_ID));
692 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(READ_ID));
a53b8e4d
GS
693 write_u8_inc(&wrptr, REG_READ_ADDR);
694
695 /* Write 0x55 to scratch register, read back. */
696 data_55 = 0x55;
0f017b7d
GS
697 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST));
698 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST));
699 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_55));
700 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_55));
a53b8e4d
GS
701 write_u8_inc(&wrptr, REG_READ_ADDR);
702
703 /* Write 0xaa to scratch register, read back. */
704 data_aa = 0xaa;
0f017b7d
GS
705 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST));
706 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST));
707 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_aa));
708 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_aa));
a53b8e4d
GS
709 write_u8_inc(&wrptr, REG_READ_ADDR);
710
711 /* Initiate SDRAM initialization in mode register. */
712 mode = WMR_SDRAMINIT;
0f017b7d
GS
713 write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_MODE));
714 write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_MODE));
715 write_u8_inc(&wrptr, REG_DATA_LOW | LO4(mode));
716 write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(mode));
a53b8e4d 717
dc0906e2
GS
718 /*
719 * Send the command sequence which contains 3 READ requests.
720 * Expect to see the corresponding 3 response bytes.
721 */
88a5f9ea
GS
722 ret = sigma_write_sr(devc, buf, wrptr - buf);
723 if (ret != SR_OK) {
724 sr_err("Could not request LA start response.");
725 return ret;
726 }
727 ret = sigma_read_sr(devc, result, ARRAY_SIZE(result));
728 if (ret != SR_OK) {
729 sr_err("Could not receive LA start response.");
a53b8e4d
GS
730 return SR_ERR_IO;
731 }
732 rdptr = result;
733 if (read_u8_inc(&rdptr) != 0xa6) {
734 sr_err("Unexpected ID response.");
735 return SR_ERR_DATA;
736 }
737 if (read_u8_inc(&rdptr) != data_55) {
738 sr_err("Unexpected scratch read-back (55).");
739 return SR_ERR_DATA;
740 }
741 if (read_u8_inc(&rdptr) != data_aa) {
742 sr_err("Unexpected scratch read-back (aa).");
743 return SR_ERR_DATA;
744 }
64fe661b
MV
745
746 return SR_OK;
64fe661b
MV
747}
748
a80226bb
MV
749/*
750 * Read the firmware from a file and transform it into a series of bitbang
751 * pulses used to program the FPGA. Note that the *bb_cmd must be free()'d
752 * by the caller of this function.
753 */
8e2d6c9d 754static int sigma_fw_2_bitbang(struct sr_context *ctx, const char *name,
3d9373af 755 uint8_t **bb_cmd, size_t *bb_cmd_size)
a80226bb 756{
dc0906e2
GS
757 uint8_t *firmware;
758 size_t file_size;
759 uint8_t *p;
760 size_t l;
a80226bb 761 uint32_t imm;
dc0906e2
GS
762 size_t bb_size;
763 uint8_t *bb_stream, *bbs, byte, mask, v;
a80226bb 764
387825dc 765 /* Retrieve the on-disk firmware file content. */
742368a2
GS
766 firmware = sr_resource_load(ctx, SR_RESOURCE_FIRMWARE, name,
767 &file_size, SIGMA_FIRMWARE_SIZE_LIMIT);
8e2d6c9d 768 if (!firmware)
dc0906e2 769 return SR_ERR_IO;
a80226bb 770
387825dc 771 /* Unscramble the file content (XOR with "random" sequence). */
dc0906e2
GS
772 p = firmware;
773 l = file_size;
a80226bb 774 imm = 0x3f6df2ab;
dc0906e2 775 while (l--) {
a80226bb 776 imm = (imm + 0xa853753) % 177 + (imm * 0x8034052);
dc0906e2 777 *p++ ^= imm & 0xff;
a80226bb
MV
778 }
779
780 /*
387825dc
GS
781 * Generate a sequence of bitbang samples. With two samples per
782 * FPGA configuration bit, providing the level for the DIN signal
783 * as well as two edges for CCLK. See Xilinx UG332 for details
784 * ("slave serial" mode).
785 *
786 * Note that CCLK is inverted in hardware. That's why the
787 * respective bit is first set and then cleared in the bitbang
788 * sample sets. So that the DIN level will be stable when the
789 * data gets sampled at the rising CCLK edge, and the signals'
790 * setup time constraint will be met.
791 *
792 * The caller will put the FPGA into download mode, will send
793 * the bitbang samples, and release the allocated memory.
a80226bb 794 */
a80226bb 795 bb_size = file_size * 8 * 2;
dc0906e2 796 bb_stream = g_try_malloc(bb_size);
a80226bb 797 if (!bb_stream) {
88a5f9ea 798 sr_err("Memory allocation failed during firmware upload.");
dc0906e2
GS
799 g_free(firmware);
800 return SR_ERR_MALLOC;
a80226bb 801 }
a80226bb 802 bbs = bb_stream;
dc0906e2
GS
803 p = firmware;
804 l = file_size;
805 while (l--) {
806 byte = *p++;
807 mask = 0x80;
808 while (mask) {
809 v = (byte & mask) ? BB_PIN_DIN : 0;
810 mask >>= 1;
811 *bbs++ = v | BB_PIN_CCLK;
a80226bb
MV
812 *bbs++ = v;
813 }
814 }
dc0906e2 815 g_free(firmware);
a80226bb
MV
816
817 /* The transformation completed successfully, return the result. */
818 *bb_cmd = bb_stream;
819 *bb_cmd_size = bb_size;
820
dc0906e2 821 return SR_OK;
a80226bb
MV
822}
823
9b4d261f
GS
824static int upload_firmware(struct sr_context *ctx, struct dev_context *devc,
825 enum sigma_firmware_idx firmware_idx)
28a35d8a
HE
826{
827 int ret;
a53b8e4d
GS
828 uint8_t *buf;
829 uint8_t pins;
28a35d8a 830 size_t buf_size;
a9016883 831 const char *firmware;
a9016883 832
80e717b3
GS
833 /* Check for valid firmware file selection. */
834 if (firmware_idx >= ARRAY_SIZE(firmware_files))
835 return SR_ERR_ARG;
4b25cbff 836 firmware = firmware_files[firmware_idx];
80e717b3
GS
837 if (!firmware || !*firmware)
838 return SR_ERR_ARG;
839
840 /* Avoid downloading the same firmware multiple times. */
841 if (devc->firmware_idx == firmware_idx) {
a9016883
GS
842 sr_info("Not uploading firmware file '%s' again.", firmware);
843 return SR_OK;
844 }
28a35d8a 845
de4c29fa 846 devc->state = SIGMA_CONFIG;
1bb9dc82 847
dc0906e2 848 /* Set the cable to bitbang mode. */
7fe1f91f 849 ret = ftdi_set_bitmode(&devc->ftdi.ctx, BB_PINMASK, BITMODE_BITBANG);
8bbf7627 850 if (ret < 0) {
88a5f9ea 851 sr_err("Could not setup cable mode for upload: %s",
7fe1f91f 852 ftdi_get_error_string(&devc->ftdi.ctx));
7bcf2168 853 return SR_ERR;
28a35d8a 854 }
7fe1f91f 855 ret = ftdi_set_baudrate(&devc->ftdi.ctx, BB_BITRATE);
8bbf7627 856 if (ret < 0) {
88a5f9ea 857 sr_err("Could not setup bitrate for upload: %s",
7fe1f91f 858 ftdi_get_error_string(&devc->ftdi.ctx));
7bcf2168 859 return SR_ERR;
28a35d8a
HE
860 }
861
dc0906e2 862 /* Initiate FPGA configuration mode. */
d5fa188a 863 ret = sigma_fpga_init_bitbang(devc);
88a5f9ea
GS
864 if (ret) {
865 sr_err("Could not initiate firmware upload to hardware");
d5fa188a 866 return ret;
88a5f9ea 867 }
28a35d8a 868
dc0906e2 869 /* Prepare wire format of the firmware image. */
8e2d6c9d 870 ret = sigma_fw_2_bitbang(ctx, firmware, &buf, &buf_size);
8bbf7627 871 if (ret != SR_OK) {
88a5f9ea 872 sr_err("Could not prepare file %s for upload.", firmware);
b53738ba 873 return ret;
28a35d8a
HE
874 }
875
dc0906e2 876 /* Write the FPGA netlist to the cable. */
499b17e9 877 sr_info("Uploading firmware file '%s'.", firmware);
88a5f9ea 878 ret = sigma_write_sr(devc, buf, buf_size);
28a35d8a 879 g_free(buf);
88a5f9ea
GS
880 if (ret != SR_OK) {
881 sr_err("Could not upload firmware file '%s'.", firmware);
882 return ret;
883 }
28a35d8a 884
dc0906e2 885 /* Leave bitbang mode and discard pending input data. */
7fe1f91f 886 ret = ftdi_set_bitmode(&devc->ftdi.ctx, 0, BITMODE_RESET);
8bbf7627 887 if (ret < 0) {
88a5f9ea 888 sr_err("Could not setup cable mode after upload: %s",
7fe1f91f 889 ftdi_get_error_string(&devc->ftdi.ctx));
e46b8fb1 890 return SR_ERR;
28a35d8a 891 }
7fe1f91f 892 ftdi_usb_purge_buffers(&devc->ftdi.ctx);
88a5f9ea 893 while (sigma_read_raw(devc, &pins, sizeof(pins)) > 0)
28a35d8a
HE
894 ;
895
64fe661b
MV
896 /* Initialize the FPGA for logic-analyzer mode. */
897 ret = sigma_fpga_init_la(devc);
88a5f9ea
GS
898 if (ret != SR_OK) {
899 sr_err("Hardware response after firmware upload failed.");
64fe661b 900 return ret;
88a5f9ea 901 }
28a35d8a 902
dc0906e2 903 /* Keep track of successful firmware download completion. */
de4c29fa 904 devc->state = SIGMA_IDLE;
80e717b3 905 devc->firmware_idx = firmware_idx;
47f4f073 906 sr_info("Firmware uploaded.");
e3fff420 907
e46b8fb1 908 return SR_OK;
f6564c8d
HE
909}
910
9a0a606a 911/*
5e78a564
GS
912 * The driver supports user specified time or sample count limits. The
913 * device's hardware supports neither, and hardware compression prevents
914 * reliable detection of "fill levels" (currently reached sample counts)
915 * from register values during acquisition. That's why the driver needs
916 * to apply some heuristics:
9a0a606a 917 *
5e78a564
GS
918 * - The (optional) sample count limit and the (normalized) samplerate
919 * get mapped to an estimated duration for these samples' acquisition.
920 * - The (optional) time limit gets checked as well. The lesser of the
921 * two limits will terminate the data acquisition phase. The exact
922 * sample count limit gets enforced in session feed submission paths.
923 * - Some slack needs to be given to account for hardware pipelines as
924 * well as late storage of last chunks after compression thresholds
925 * are tripped. The resulting data set will span at least the caller
926 * specified period of time, which shall be perfectly acceptable.
927 *
928 * With RLE compression active, up to 64K sample periods can pass before
929 * a cluster accumulates. Which translates to 327ms at 200kHz. Add two
930 * times that period for good measure, one is not enough to flush the
931 * hardware pipeline (observation from an earlier experiment).
9a0a606a 932 */
5e78a564 933SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc)
9a0a606a 934{
5e78a564
GS
935 int ret;
936 GVariant *data;
937 uint64_t user_count, user_msecs;
9a0a606a 938 uint64_t worst_cluster_time_ms;
5e78a564 939 uint64_t count_msecs, acquire_msecs;
9a0a606a 940
156b6879 941 sr_sw_limits_init(&devc->limit.acquire);
f14e6f7e 942 devc->late_trigger_timeout = FALSE;
5e78a564
GS
943
944 /* Get sample count limit, convert to msecs. */
156b6879 945 ret = sr_sw_limits_config_get(&devc->limit.config,
5e78a564
GS
946 SR_CONF_LIMIT_SAMPLES, &data);
947 if (ret != SR_OK)
948 return ret;
949 user_count = g_variant_get_uint64(data);
950 g_variant_unref(data);
951 count_msecs = 0;
f14e6f7e
GS
952 if (devc->use_triggers) {
953 user_count *= 100 - devc->capture_ratio;
954 user_count /= 100;
955 }
5e78a564 956 if (user_count)
2d8a5089 957 count_msecs = 1000 * user_count / devc->clock.samplerate + 1;
5e78a564
GS
958
959 /* Get time limit, which is in msecs. */
156b6879 960 ret = sr_sw_limits_config_get(&devc->limit.config,
5e78a564
GS
961 SR_CONF_LIMIT_MSEC, &data);
962 if (ret != SR_OK)
963 return ret;
964 user_msecs = g_variant_get_uint64(data);
965 g_variant_unref(data);
f14e6f7e
GS
966 if (devc->use_triggers) {
967 user_msecs *= 100 - devc->capture_ratio;
968 user_msecs /= 100;
969 }
5e78a564
GS
970
971 /* Get the lesser of them, with both being optional. */
f14e6f7e 972 acquire_msecs = ~UINT64_C(0);
5e78a564
GS
973 if (user_count && count_msecs < acquire_msecs)
974 acquire_msecs = count_msecs;
975 if (user_msecs && user_msecs < acquire_msecs)
976 acquire_msecs = user_msecs;
f14e6f7e 977 if (acquire_msecs == ~UINT64_C(0))
5e78a564
GS
978 return SR_OK;
979
980 /* Add some slack, and use that timeout for acquisition. */
2d8a5089 981 worst_cluster_time_ms = 1000 * 65536 / devc->clock.samplerate;
5e78a564
GS
982 acquire_msecs += 2 * worst_cluster_time_ms;
983 data = g_variant_new_uint64(acquire_msecs);
156b6879 984 ret = sr_sw_limits_config_set(&devc->limit.acquire,
5e78a564
GS
985 SR_CONF_LIMIT_MSEC, data);
986 g_variant_unref(data);
987 if (ret != SR_OK)
988 return ret;
989
f14e6f7e
GS
990 /* Deferred or immediate (trigger-less) timeout period start. */
991 if (devc->use_triggers)
992 devc->late_trigger_timeout = TRUE;
993 else
994 sr_sw_limits_acquisition_start(&devc->limit.acquire);
995
5e78a564 996 return SR_OK;
9a0a606a
GS
997}
998
5e78a564
GS
999/*
1000 * Check whether a caller specified samplerate matches the device's
1001 * hardware constraints (can be used for acquisition). Optionally yield
1002 * a value that approximates the original spec.
1003 *
1004 * This routine assumes that input specs are in the 200kHz to 200MHz
1005 * range of supported rates, and callers typically want to normalize a
1006 * given value to the hardware capabilities. Values in the 50MHz range
1007 * get rounded up by default, to avoid a more expensive check for the
1008 * closest match, while higher sampling rate is always desirable during
1009 * measurement. Input specs which exactly match hardware capabilities
1010 * remain unaffected. Because 100/200MHz rates also limit the number of
1011 * available channels, they are not suggested by this routine, instead
1012 * callers need to pick them consciously.
1013 */
1014SR_PRIV int sigma_normalize_samplerate(uint64_t want_rate, uint64_t *have_rate)
1015{
1016 uint64_t div, rate;
1017
1018 /* Accept exact matches for 100/200MHz. */
1019 if (want_rate == SR_MHZ(200) || want_rate == SR_MHZ(100)) {
1020 if (have_rate)
1021 *have_rate = want_rate;
1022 return SR_OK;
1023 }
1024
1025 /* Accept 200kHz to 50MHz range, and map to near value. */
1026 if (want_rate >= SR_KHZ(200) && want_rate <= SR_MHZ(50)) {
1027 div = SR_MHZ(50) / want_rate;
1028 rate = SR_MHZ(50) / div;
1029 if (have_rate)
1030 *have_rate = rate;
1031 return SR_OK;
1032 }
1033
1034 return SR_ERR_ARG;
1035}
1036
53c8a99c
GS
1037/* Gets called at probe time. Can seed software settings from hardware state. */
1038SR_PRIV int sigma_fetch_hw_config(const struct sr_dev_inst *sdi)
abcd4771 1039{
53c8a99c
GS
1040 struct dev_context *devc;
1041 int ret;
1042 uint8_t regaddr, regval;
1043
1044 devc = sdi->priv;
1045 if (!devc)
1046 return SR_ERR_ARG;
1047
1048 /* Seed configuration values from defaults. */
1049 devc->firmware_idx = SIGMA_FW_NONE;
1050 devc->clock.samplerate = samplerates[0];
1051
1052 /* TODO
1053 * Ideally the device driver could retrieve recently stored
1054 * details from hardware registers, thus re-use user specified
1055 * configuration values across sigrok sessions. Which could
1056 * avoid repeated expensive though unnecessary firmware uploads,
1057 * improve performance and usability. Unfortunately it appears
1058 * that the registers range which is documented as available for
1059 * application use keeps providing 0xff data content. At least
1060 * with the netlist version which ships with sigrok. The same
1061 * was observed with unused registers in the first page.
1062 */
1063 return SR_ERR_NA;
1064
1065 /* This is for research, currently does not work yet. */
1066 ret = sigma_check_open(sdi);
1067 regaddr = 16;
1068 regaddr = 14;
1069 ret = sigma_set_register(devc, regaddr, 'F');
1070 ret = sigma_get_register(devc, regaddr, &regval);
1071 sr_warn("%s() reg[%u] val[%u] rc[%d]", __func__, regaddr, regval, ret);
1072 ret = sigma_check_close(devc);
1073 return ret;
1074}
1075
1076/* Gets called after successful (volatile) hardware configuration. */
1077SR_PRIV int sigma_store_hw_config(const struct sr_dev_inst *sdi)
1078{
1079 /* TODO See above, registers seem to not hold written data. */
abcd4771 1080 (void)sdi;
53c8a99c 1081 return SR_ERR_NA;
abcd4771
GS
1082}
1083
5e78a564 1084SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi)
f6564c8d 1085{
2c9c0df8 1086 struct dev_context *devc;
8e2d6c9d 1087 struct drv_context *drvc;
5e78a564 1088 uint64_t samplerate;
2c9c0df8 1089 int ret;
3d9373af 1090 size_t num_channels;
f6564c8d 1091
2c9c0df8 1092 devc = sdi->priv;
8e2d6c9d 1093 drvc = sdi->driver->context;
f4abaa9f 1094
5e78a564 1095 /* Accept any caller specified rate which the hardware supports. */
2d8a5089 1096 ret = sigma_normalize_samplerate(devc->clock.samplerate, &samplerate);
5e78a564
GS
1097 if (ret != SR_OK)
1098 return ret;
f6564c8d 1099
2f7e529c
GS
1100 /*
1101 * Depending on the samplerates of 200/100/50- MHz, specific
1102 * firmware is required and higher rates might limit the set
1103 * of available channels.
1104 */
de4c29fa 1105 num_channels = devc->interp.num_channels;
59df0c77 1106 if (samplerate <= SR_MHZ(50)) {
80e717b3 1107 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_50MHZ);
ac9534f4 1108 num_channels = 16;
6b2d3385 1109 } else if (samplerate == SR_MHZ(100)) {
80e717b3 1110 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_100MHZ);
ac9534f4 1111 num_channels = 8;
6b2d3385 1112 } else if (samplerate == SR_MHZ(200)) {
80e717b3 1113 ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_200MHZ);
ac9534f4 1114 num_channels = 4;
f78898e9 1115 }
f6564c8d 1116
2f7e529c 1117 /*
5e78a564
GS
1118 * The samplerate affects the number of available logic channels
1119 * as well as a sample memory layout detail (the number of samples
1120 * which the device will communicate within an "event").
2f7e529c 1121 */
6b2d3385 1122 if (ret == SR_OK) {
de4c29fa
GS
1123 devc->interp.num_channels = num_channels;
1124 devc->interp.samples_per_event = 16 / devc->interp.num_channels;
6b2d3385 1125 }
f6564c8d 1126
53c8a99c
GS
1127 /*
1128 * Store the firmware type and most recently configured samplerate
1129 * in hardware, such that subsequent sessions can start from there.
1130 * This is a "best effort" approach. Failure is non-fatal.
1131 */
1132 if (ret == SR_OK)
1133 (void)sigma_store_hw_config(sdi);
1134
e8397563 1135 return ret;
28a35d8a
HE
1136}
1137
98b43eb3
GS
1138/*
1139 * Arrange for a session feed submit buffer. A queue where a number of
1140 * samples gets accumulated to reduce the number of send calls. Which
1141 * also enforces an optional sample count limit for data acquisition.
1142 *
1143 * The buffer holds up to CHUNK_SIZE bytes. The unit size is fixed (the
1144 * driver provides a fixed channel layout regardless of samplerate).
1145 */
1146
1147#define CHUNK_SIZE (4 * 1024 * 1024)
1148
1149struct submit_buffer {
1150 size_t unit_size;
1151 size_t max_samples, curr_samples;
1152 uint8_t *sample_data;
1153 uint8_t *write_pointer;
1154 struct sr_dev_inst *sdi;
1155 struct sr_datafeed_packet packet;
1156 struct sr_datafeed_logic logic;
98b43eb3
GS
1157};
1158
1159static int alloc_submit_buffer(struct sr_dev_inst *sdi)
1160{
1161 struct dev_context *devc;
1162 struct submit_buffer *buffer;
1163 size_t size;
1164
1165 devc = sdi->priv;
1166
1167 buffer = g_malloc0(sizeof(*buffer));
1168 devc->buffer = buffer;
1169
1170 buffer->unit_size = sizeof(uint16_t);
1171 size = CHUNK_SIZE;
1172 size /= buffer->unit_size;
1173 buffer->max_samples = size;
1174 size *= buffer->unit_size;
1175 buffer->sample_data = g_try_malloc0(size);
1176 if (!buffer->sample_data)
1177 return SR_ERR_MALLOC;
1178 buffer->write_pointer = buffer->sample_data;
156b6879 1179 sr_sw_limits_init(&devc->limit.submit);
98b43eb3
GS
1180
1181 buffer->sdi = sdi;
1182 memset(&buffer->logic, 0, sizeof(buffer->logic));
1183 buffer->logic.unitsize = buffer->unit_size;
1184 buffer->logic.data = buffer->sample_data;
1185 memset(&buffer->packet, 0, sizeof(buffer->packet));
1186 buffer->packet.type = SR_DF_LOGIC;
1187 buffer->packet.payload = &buffer->logic;
1188
1189 return SR_OK;
1190}
1191
5e78a564 1192static int setup_submit_limit(struct dev_context *devc)
98b43eb3 1193{
5e78a564 1194 struct sr_sw_limits *limits;
98b43eb3
GS
1195 int ret;
1196 GVariant *data;
1197 uint64_t total;
1198
156b6879 1199 limits = &devc->limit.submit;
98b43eb3 1200
156b6879 1201 ret = sr_sw_limits_config_get(&devc->limit.config,
5e78a564
GS
1202 SR_CONF_LIMIT_SAMPLES, &data);
1203 if (ret != SR_OK)
1204 return ret;
1205 total = g_variant_get_uint64(data);
1206 g_variant_unref(data);
1207
1208 sr_sw_limits_init(limits);
98b43eb3
GS
1209 if (total) {
1210 data = g_variant_new_uint64(total);
5e78a564 1211 ret = sr_sw_limits_config_set(limits,
98b43eb3
GS
1212 SR_CONF_LIMIT_SAMPLES, data);
1213 g_variant_unref(data);
1214 if (ret != SR_OK)
1215 return ret;
1216 }
1217
5e78a564 1218 sr_sw_limits_acquisition_start(limits);
98b43eb3
GS
1219
1220 return SR_OK;
1221}
1222
1223static void free_submit_buffer(struct dev_context *devc)
1224{
1225 struct submit_buffer *buffer;
1226
1227 if (!devc)
1228 return;
1229
1230 buffer = devc->buffer;
1231 if (!buffer)
1232 return;
1233 devc->buffer = NULL;
1234
1235 g_free(buffer->sample_data);
1236 g_free(buffer);
1237}
1238
1239static int flush_submit_buffer(struct dev_context *devc)
1240{
1241 struct submit_buffer *buffer;
1242 int ret;
1243
1244 buffer = devc->buffer;
1245
1246 /* Is queued sample data available? */
1247 if (!buffer->curr_samples)
1248 return SR_OK;
1249
1250 /* Submit to the session feed. */
1251 buffer->logic.length = buffer->curr_samples * buffer->unit_size;
1252 ret = sr_session_send(buffer->sdi, &buffer->packet);
1253 if (ret != SR_OK)
1254 return ret;
1255
1256 /* Rewind queue position. */
1257 buffer->curr_samples = 0;
1258 buffer->write_pointer = buffer->sample_data;
1259
1260 return SR_OK;
1261}
1262
1263static int addto_submit_buffer(struct dev_context *devc,
1264 uint16_t sample, size_t count)
1265{
1266 struct submit_buffer *buffer;
5e78a564 1267 struct sr_sw_limits *limits;
98b43eb3
GS
1268 int ret;
1269
1270 buffer = devc->buffer;
156b6879 1271 limits = &devc->limit.submit;
8a723625 1272 if (!devc->use_triggers && sr_sw_limits_check(limits))
98b43eb3
GS
1273 count = 0;
1274
1275 /*
1276 * Individually accumulate and check each sample, such that
1277 * accumulation between flushes won't exceed local storage, and
1278 * enforcement of user specified limits is exact.
1279 */
1280 while (count--) {
a53b8e4d 1281 write_u16le_inc(&buffer->write_pointer, sample);
98b43eb3
GS
1282 buffer->curr_samples++;
1283 if (buffer->curr_samples == buffer->max_samples) {
1284 ret = flush_submit_buffer(devc);
1285 if (ret != SR_OK)
1286 return ret;
1287 }
5e78a564 1288 sr_sw_limits_update_samples_read(limits, 1);
8a723625 1289 if (!devc->use_triggers && sr_sw_limits_check(limits))
98b43eb3
GS
1290 break;
1291 }
1292
1293 return SR_OK;
1294}
1295
16a5d5ac 1296static void sigma_location_break_down(struct sigma_location *loc)
ee5cef71 1297{
16a5d5ac
GS
1298
1299 loc->line = loc->raw / ROW_LENGTH_U16;
1300 loc->line += ROW_COUNT;
1301 loc->line %= ROW_COUNT;
1302 loc->cluster = loc->raw % ROW_LENGTH_U16;
1303 loc->event = loc->cluster % EVENTS_PER_CLUSTER;
1304 loc->cluster = loc->cluster / EVENTS_PER_CLUSTER;
1305}
1306
66d1790c
GS
1307static gboolean sigma_location_is_eq(struct sigma_location *loc1,
1308 struct sigma_location *loc2, gboolean with_event)
1309{
1310
1311 if (!loc1 || !loc2)
1312 return FALSE;
1313
1314 if (loc1->line != loc2->line)
1315 return FALSE;
1316 if (loc1->cluster != loc2->cluster)
1317 return FALSE;
1318
1319 if (with_event && loc1->event != loc2->event)
1320 return FALSE;
1321
1322 return TRUE;
1323}
1324
1325/* Decrement the broken-down location fields (leave 'raw' as is). */
1326static void sigma_location_decrement(struct sigma_location *loc,
1327 gboolean with_event)
1328{
1329
1330 if (!loc)
1331 return;
1332
1333 if (with_event) {
1334 if (loc->event--)
1335 return;
1336 loc->event = EVENTS_PER_CLUSTER - 1;
1337 }
1338
1339 if (loc->cluster--)
1340 return;
1341 loc->cluster = CLUSTERS_PER_ROW - 1;
1342
1343 if (loc->line--)
1344 return;
1345 loc->line = ROW_COUNT - 1;
1346}
1347
1348static void sigma_location_increment(struct sigma_location *loc)
1349{
1350
1351 if (!loc)
1352 return;
1353
1354 if (++loc->event < EVENTS_PER_CLUSTER)
1355 return;
1356 loc->event = 0;
1357 if (++loc->cluster < CLUSTERS_PER_ROW)
1358 return;
1359 loc->cluster = 0;
1360 if (++loc->line < ROW_COUNT)
1361 return;
1362 loc->line = 0;
1363}
1364
1365/*
1366 * Determine the position where to open the period of trigger match
1367 * checks. Setup an "impossible" location when triggers are not used.
1368 * Start from the hardware provided 'trig' position otherwise, and
1369 * go back a few clusters, but don't go before the 'start' position.
1370 */
1371static void rewind_trig_arm_pos(struct dev_context *devc, size_t count)
1372{
1373 struct sigma_sample_interp *interp;
1374
1375 if (!devc)
1376 return;
1377 interp = &devc->interp;
1378
1379 if (!devc->use_triggers) {
1380 interp->trig_arm.raw = ~0;
1381 sigma_location_break_down(&interp->trig_arm);
1382 return;
1383 }
1384
1385 interp->trig_arm = interp->trig;
1386 while (count--) {
1387 if (sigma_location_is_eq(&interp->trig_arm, &interp->start, TRUE))
1388 break;
1389 sigma_location_decrement(&interp->trig_arm, TRUE);
1390 }
1391}
1392
16a5d5ac
GS
1393static int alloc_sample_buffer(struct dev_context *devc,
1394 size_t stop_pos, size_t trig_pos, uint8_t mode)
1395{
1396 struct sigma_sample_interp *interp;
1397 gboolean wrapped;
ee5cef71
GS
1398 size_t alloc_size;
1399
16a5d5ac
GS
1400 interp = &devc->interp;
1401
1402 /*
1403 * Either fetch sample memory from absolute start of DRAM to the
1404 * current write position. Or from after the current write position
1405 * to before the current write position, if the write pointer has
1406 * wrapped around at the upper DRAM boundary. Assume that the line
1407 * which most recently got written to is of unknown state, ignore
1408 * its content in the "wrapped" case.
1409 */
1410 wrapped = mode & RMR_ROUND;
1411 interp->start.raw = 0;
1412 interp->stop.raw = stop_pos;
1413 if (wrapped) {
1414 interp->start.raw = stop_pos;
1415 interp->start.raw >>= ROW_SHIFT;
1416 interp->start.raw++;
1417 interp->start.raw <<= ROW_SHIFT;
1418 interp->stop.raw = stop_pos;
1419 interp->stop.raw >>= ROW_SHIFT;
1420 interp->stop.raw--;
1421 interp->stop.raw <<= ROW_SHIFT;
1422 }
1423 interp->trig.raw = trig_pos;
1424 interp->iter.raw = 0;
1425
1426 /* Break down raw values to line, cluster, event fields. */
1427 sigma_location_break_down(&interp->start);
1428 sigma_location_break_down(&interp->stop);
1429 sigma_location_break_down(&interp->trig);
1430 sigma_location_break_down(&interp->iter);
1431
66d1790c
GS
1432 /*
1433 * The hardware provided trigger location "is late" because of
1434 * latency in hardware pipelines. It points to after the trigger
1435 * condition match. Arrange for a software check of sample data
1436 * matches starting just a little before the hardware provided
1437 * location. The "4 clusters" distance is an arbitrary choice.
1438 */
1439 rewind_trig_arm_pos(devc, 4 * EVENTS_PER_CLUSTER);
8a723625 1440 memset(&interp->trig_chk, 0, sizeof(interp->trig_chk));
66d1790c 1441
16a5d5ac
GS
1442 /* Determine which DRAM lines to fetch from the device. */
1443 memset(&interp->fetch, 0, sizeof(interp->fetch));
1444 interp->fetch.lines_total = interp->stop.line + 1;
1445 interp->fetch.lines_total -= interp->start.line;
1446 interp->fetch.lines_total += ROW_COUNT;
1447 interp->fetch.lines_total %= ROW_COUNT;
1448 interp->fetch.lines_done = 0;
1449
1450 /* Arrange for chunked download, N lines per USB request. */
1451 interp->fetch.lines_per_read = 32;
ee5cef71
GS
1452 alloc_size = sizeof(devc->interp.fetch.rcvd_lines[0]);
1453 alloc_size *= devc->interp.fetch.lines_per_read;
1454 devc->interp.fetch.rcvd_lines = g_try_malloc0(alloc_size);
1455 if (!devc->interp.fetch.rcvd_lines)
1456 return SR_ERR_MALLOC;
1457
1458 return SR_OK;
1459}
1460
16a5d5ac
GS
1461static uint16_t sigma_deinterlace_data_4x4(uint16_t indata, int idx);
1462static uint16_t sigma_deinterlace_data_2x8(uint16_t indata, int idx);
1463
1464static int fetch_sample_buffer(struct dev_context *devc)
1465{
1466 struct sigma_sample_interp *interp;
1467 size_t count;
1468 int ret;
1469 const uint8_t *rdptr;
1470 uint16_t ts, data;
1471
1472 interp = &devc->interp;
1473
1474 /* First invocation? Seed the iteration position. */
1475 if (!interp->fetch.lines_done) {
1476 interp->iter = interp->start;
1477 }
1478
1479 /* Get another set of DRAM lines in one read call. */
1480 count = interp->fetch.lines_total - interp->fetch.lines_done;
1481 if (count > interp->fetch.lines_per_read)
1482 count = interp->fetch.lines_per_read;
1483 ret = sigma_read_dram(devc, interp->iter.line, count,
1484 (uint8_t *)interp->fetch.rcvd_lines);
1485 if (ret != SR_OK)
1486 return ret;
1487 interp->fetch.lines_rcvd = count;
1488 interp->fetch.curr_line = &interp->fetch.rcvd_lines[0];
1489
1490 /* First invocation? Get initial timestamp and sample data. */
1491 if (!interp->fetch.lines_done) {
1492 rdptr = (void *)interp->fetch.curr_line;
1493 ts = read_u16le_inc(&rdptr);
1494 data = read_u16le_inc(&rdptr);
1495 if (interp->samples_per_event == 4) {
1496 data = sigma_deinterlace_data_4x4(data, 0);
1497 } else if (interp->samples_per_event == 2) {
1498 data = sigma_deinterlace_data_2x8(data, 0);
1499 }
1500 interp->last.ts = ts;
1501 interp->last.sample = data;
1502 }
1503
1504 return SR_OK;
1505}
1506
ee5cef71
GS
1507static void free_sample_buffer(struct dev_context *devc)
1508{
1509 g_free(devc->interp.fetch.rcvd_lines);
1510 devc->interp.fetch.rcvd_lines = NULL;
debe1ff6 1511 devc->interp.fetch.lines_per_read = 0;
ee5cef71
GS
1512}
1513
c53d793f
HE
1514/*
1515 * In 100 and 200 MHz mode, only a single pin rising/falling can be
1516 * set as trigger. In other modes, two rising/falling triggers can be set,
ba7dd8bb 1517 * in addition to value/mask trigger for any number of channels.
c53d793f
HE
1518 *
1519 * The Sigma supports complex triggers using boolean expressions, but this
1520 * has not been implemented yet.
1521 */
3ba56876 1522SR_PRIV int sigma_convert_trigger(const struct sr_dev_inst *sdi)
57bbf56b 1523{
39c64c6a
BV
1524 struct dev_context *devc;
1525 struct sr_trigger *trigger;
1526 struct sr_trigger_stage *stage;
1527 struct sr_trigger_match *match;
1528 const GSList *l, *m;
3d9373af
GS
1529 uint16_t channelbit;
1530 size_t trigger_set;
57bbf56b 1531
39c64c6a 1532 devc = sdi->priv;
5c231fc4 1533 memset(&devc->trigger, 0, sizeof(devc->trigger));
fb65ca09 1534 devc->use_triggers = FALSE;
5c231fc4
GS
1535 trigger = sr_session_trigger_get(sdi->session);
1536 if (!trigger)
39c64c6a
BV
1537 return SR_OK;
1538
fb65ca09
GS
1539 if (!ASIX_SIGMA_WITH_TRIGGER) {
1540 sr_warn("Trigger support is not implemented. Ignoring the spec.");
1541 return SR_OK;
1542 }
1543
39c64c6a
BV
1544 trigger_set = 0;
1545 for (l = trigger->stages; l; l = l->next) {
1546 stage = l->data;
1547 for (m = stage->matches; m; m = m->next) {
1548 match = m->data;
9b4d261f 1549 /* Ignore disabled channels with a trigger. */
39c64c6a 1550 if (!match->channel->enabled)
39c64c6a 1551 continue;
3f5f5484 1552 channelbit = BIT(match->channel->index);
2d8a5089 1553 if (devc->clock.samplerate >= SR_MHZ(100)) {
39c64c6a
BV
1554 /* Fast trigger support. */
1555 if (trigger_set) {
88a5f9ea 1556 sr_err("100/200MHz modes limited to single trigger pin.");
39c64c6a
BV
1557 return SR_ERR;
1558 }
a53b8e4d 1559 if (match->match == SR_TRIGGER_FALLING) {
39c64c6a 1560 devc->trigger.fallingmask |= channelbit;
a53b8e4d 1561 } else if (match->match == SR_TRIGGER_RISING) {
39c64c6a 1562 devc->trigger.risingmask |= channelbit;
a53b8e4d 1563 } else {
88a5f9ea 1564 sr_err("100/200MHz modes limited to edge trigger.");
39c64c6a
BV
1565 return SR_ERR;
1566 }
eec5275e 1567
0a1f7b09 1568 trigger_set++;
39c64c6a
BV
1569 } else {
1570 /* Simple trigger support (event). */
1571 if (match->match == SR_TRIGGER_ONE) {
1572 devc->trigger.simplevalue |= channelbit;
1573 devc->trigger.simplemask |= channelbit;
8ebad343 1574 } else if (match->match == SR_TRIGGER_ZERO) {
39c64c6a
BV
1575 devc->trigger.simplevalue &= ~channelbit;
1576 devc->trigger.simplemask |= channelbit;
8ebad343 1577 } else if (match->match == SR_TRIGGER_FALLING) {
39c64c6a 1578 devc->trigger.fallingmask |= channelbit;
0a1f7b09 1579 trigger_set++;
8ebad343 1580 } else if (match->match == SR_TRIGGER_RISING) {
39c64c6a 1581 devc->trigger.risingmask |= channelbit;
0a1f7b09 1582 trigger_set++;
39c64c6a
BV
1583 }
1584
1585 /*
1586 * Actually, Sigma supports 2 rising/falling triggers,
1587 * but they are ORed and the current trigger syntax
1588 * does not permit ORed triggers.
1589 */
1590 if (trigger_set > 1) {
88a5f9ea 1591 sr_err("Limited to 1 edge trigger.");
39c64c6a
BV
1592 return SR_ERR;
1593 }
ee492173 1594 }
ee492173 1595 }
57bbf56b
HE
1596 }
1597
fb65ca09
GS
1598 /* Keep track whether triggers are involved during acquisition. */
1599 devc->use_triggers = TRUE;
1600
e46b8fb1 1601 return SR_OK;
57bbf56b
HE
1602}
1603
98b43eb3
GS
1604static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample)
1605{
8a723625
GS
1606 struct sigma_sample_interp *interp;
1607 uint16_t last_sample;
1608 struct sigma_trigger *t;
1609 gboolean simple_match, rising_match, falling_match;
1610 gboolean matched;
1611
1612 /*
1613 * This logic is about improving the precision of the hardware
1614 * provided trigger match position. Software checks are only
1615 * required for a short range of samples, and only when a user
1616 * specified trigger condition was involved during acquisition.
98b43eb3 1617 */
8a723625
GS
1618 if (!devc)
1619 return FALSE;
fb65ca09
GS
1620 if (!devc->use_triggers)
1621 return FALSE;
8a723625
GS
1622 interp = &devc->interp;
1623 if (!interp->trig_chk.armed)
1624 return FALSE;
fb65ca09 1625
8a723625
GS
1626 /*
1627 * Check if the current sample and its most recent transition
1628 * match the initially provided trigger condition. The data
1629 * must not fail either of the individual checks. Unused
1630 * trigger features remain neutral in the summary expression.
1631 */
1632 last_sample = interp->last.sample;
1633 t = &devc->trigger;
1634 simple_match = (sample & t->simplemask) == t->simplevalue;
1635 rising_match = ((last_sample & t->risingmask) == 0) &&
1636 ((sample & t->risingmask) == t->risingmask);
1637 falling_match = ((last_sample & t->fallingmask) == t->fallingmask) &&
1638 ((sample & t->fallingmask) == 0);
1639 matched = simple_match && rising_match && falling_match;
1640
1641 return matched;
98b43eb3
GS
1642}
1643
66d1790c
GS
1644static int send_trigger_marker(struct dev_context *devc)
1645{
1646 int ret;
1647
1648 ret = flush_submit_buffer(devc);
1649 if (ret != SR_OK)
1650 return ret;
1651 ret = std_session_send_df_trigger(devc->buffer->sdi);
1652 if (ret != SR_OK)
1653 return ret;
1654
1655 return SR_OK;
1656}
1657
98b43eb3 1658static int check_and_submit_sample(struct dev_context *devc,
8a723625 1659 uint16_t sample, size_t count)
98b43eb3
GS
1660{
1661 gboolean triggered;
1662 int ret;
1663
8a723625
GS
1664 triggered = sample_matches_trigger(devc, sample);
1665 if (triggered) {
66d1790c 1666 send_trigger_marker(devc);
8a723625
GS
1667 devc->interp.trig_chk.matched = TRUE;
1668 }
98b43eb3
GS
1669
1670 ret = addto_submit_buffer(devc, sample, count);
1671 if (ret != SR_OK)
1672 return ret;
1673
1674 return SR_OK;
1675}
1676
66d1790c
GS
1677static void sigma_location_check(struct dev_context *devc)
1678{
1679 struct sigma_sample_interp *interp;
1680
1681 if (!devc)
1682 return;
1683 interp = &devc->interp;
1684
1685 /*
1686 * Manage the period of trigger match checks in software.
1687 * Start supervision somewhere before the hardware provided
1688 * location. Stop supervision after an arbitrary amount of
1689 * event slots, or when a match was found.
1690 */
1691 if (interp->trig_chk.armed) {
1692 interp->trig_chk.evt_remain--;
1693 if (!interp->trig_chk.evt_remain || interp->trig_chk.matched)
1694 interp->trig_chk.armed = FALSE;
1695 }
1696 if (!interp->trig_chk.armed && !interp->trig_chk.matched) {
1697 if (sigma_location_is_eq(&interp->iter, &interp->trig_arm, TRUE)) {
1698 interp->trig_chk.armed = TRUE;
1699 interp->trig_chk.matched = FALSE;
1700 interp->trig_chk.evt_remain = 8 * EVENTS_PER_CLUSTER;
1701 }
1702 }
1703
1704 /*
1705 * Force a trigger marker when the software check found no match
1706 * yet while the hardware provided position was reached. This
1707 * very probably is a user initiated button press.
1708 */
1709 if (interp->trig_chk.armed) {
1710 if (sigma_location_is_eq(&interp->iter, &interp->trig, TRUE)) {
1711 (void)send_trigger_marker(devc);
1712 interp->trig_chk.matched = TRUE;
1713 }
1714 }
1715}
1716
3513d965
MV
1717/*
1718 * Return the timestamp of "DRAM cluster".
1719 */
1720static uint16_t sigma_dram_cluster_ts(struct sigma_dram_cluster *cluster)
1721{
2a62a9c4 1722 return read_u16le((const uint8_t *)&cluster->timestamp);
3513d965
MV
1723}
1724
0498f743
GS
1725/*
1726 * Return one 16bit data entity of a DRAM cluster at the specified index.
1727 */
1728static uint16_t sigma_dram_cluster_data(struct sigma_dram_cluster *cl, int idx)
1729{
2a62a9c4 1730 return read_u16le((const uint8_t *)&cl->samples[idx]);
0498f743
GS
1731}
1732
85c032e4
GS
1733/*
1734 * Deinterlace sample data that was retrieved at 100MHz samplerate.
1735 * One 16bit item contains two samples of 8bits each. The bits of
1736 * multiple samples are interleaved.
1737 */
de4c29fa 1738static uint16_t sigma_deinterlace_data_2x8(uint16_t indata, int idx)
85c032e4
GS
1739{
1740 uint16_t outdata;
1741
1742 indata >>= idx;
1743 outdata = 0;
1744 outdata |= (indata >> (0 * 2 - 0)) & (1 << 0);
1745 outdata |= (indata >> (1 * 2 - 1)) & (1 << 1);
1746 outdata |= (indata >> (2 * 2 - 2)) & (1 << 2);
1747 outdata |= (indata >> (3 * 2 - 3)) & (1 << 3);
1748 outdata |= (indata >> (4 * 2 - 4)) & (1 << 4);
1749 outdata |= (indata >> (5 * 2 - 5)) & (1 << 5);
1750 outdata |= (indata >> (6 * 2 - 6)) & (1 << 6);
1751 outdata |= (indata >> (7 * 2 - 7)) & (1 << 7);
1752 return outdata;
1753}
1754
1755/*
1756 * Deinterlace sample data that was retrieved at 200MHz samplerate.
1757 * One 16bit item contains four samples of 4bits each. The bits of
1758 * multiple samples are interleaved.
1759 */
de4c29fa 1760static uint16_t sigma_deinterlace_data_4x4(uint16_t indata, int idx)
85c032e4
GS
1761{
1762 uint16_t outdata;
1763
1764 indata >>= idx;
1765 outdata = 0;
1766 outdata |= (indata >> (0 * 4 - 0)) & (1 << 0);
1767 outdata |= (indata >> (1 * 4 - 1)) & (1 << 1);
1768 outdata |= (indata >> (2 * 4 - 2)) & (1 << 2);
1769 outdata |= (indata >> (3 * 4 - 3)) & (1 << 3);
1770 return outdata;
1771}
1772
98b43eb3
GS
1773static void sigma_decode_dram_cluster(struct dev_context *devc,
1774 struct sigma_dram_cluster *dram_cluster,
914f8160 1775 size_t events_in_cluster)
23239b5c 1776{
85c032e4 1777 uint16_t tsdiff, ts, sample, item16;
3d9373af
GS
1778 size_t count;
1779 size_t evt;
23239b5c 1780
23239b5c 1781 /*
468f17f2
GS
1782 * If this cluster is not adjacent to the previously received
1783 * cluster, then send the appropriate number of samples with the
1784 * previous values to the sigrok session. This "decodes RLE".
2c33b092 1785 *
98b43eb3
GS
1786 * These samples cannot match the trigger since they just repeat
1787 * the previously submitted data pattern. (This assumption holds
1788 * for simple level and edge triggers. It would not for timed or
1789 * counted conditions, which currently are not supported.)
23239b5c 1790 */
98b43eb3 1791 ts = sigma_dram_cluster_ts(dram_cluster);
ee5cef71 1792 tsdiff = ts - devc->interp.last.ts;
98b43eb3 1793 if (tsdiff > 0) {
ee5cef71 1794 sample = devc->interp.last.sample;
de4c29fa 1795 count = tsdiff * devc->interp.samples_per_event;
8a723625 1796 (void)check_and_submit_sample(devc, sample, count);
23239b5c 1797 }
ee5cef71 1798 devc->interp.last.ts = ts + EVENTS_PER_CLUSTER;
23239b5c
MV
1799
1800 /*
98b43eb3
GS
1801 * Grab sample data from the current cluster and prepare their
1802 * submission to the session feed. Handle samplerate dependent
1803 * memory layout of sample data. Accumulation of data chunks
1804 * before submission is transparent to this code path, specific
1805 * buffer depth is neither assumed nor required here.
23239b5c 1806 */
0498f743 1807 sample = 0;
3d9373af
GS
1808 for (evt = 0; evt < events_in_cluster; evt++) {
1809 item16 = sigma_dram_cluster_data(dram_cluster, evt);
de4c29fa
GS
1810 if (devc->interp.samples_per_event == 4) {
1811 sample = sigma_deinterlace_data_4x4(item16, 0);
8a723625 1812 check_and_submit_sample(devc, sample, 1);
66d1790c 1813 devc->interp.last.sample = sample;
de4c29fa 1814 sample = sigma_deinterlace_data_4x4(item16, 1);
8a723625 1815 check_and_submit_sample(devc, sample, 1);
66d1790c 1816 devc->interp.last.sample = sample;
de4c29fa 1817 sample = sigma_deinterlace_data_4x4(item16, 2);
8a723625 1818 check_and_submit_sample(devc, sample, 1);
66d1790c 1819 devc->interp.last.sample = sample;
de4c29fa 1820 sample = sigma_deinterlace_data_4x4(item16, 3);
8a723625 1821 check_and_submit_sample(devc, sample, 1);
66d1790c 1822 devc->interp.last.sample = sample;
de4c29fa
GS
1823 } else if (devc->interp.samples_per_event == 2) {
1824 sample = sigma_deinterlace_data_2x8(item16, 0);
8a723625 1825 check_and_submit_sample(devc, sample, 1);
66d1790c 1826 devc->interp.last.sample = sample;
de4c29fa 1827 sample = sigma_deinterlace_data_2x8(item16, 1);
8a723625 1828 check_and_submit_sample(devc, sample, 1);
66d1790c 1829 devc->interp.last.sample = sample;
85c032e4
GS
1830 } else {
1831 sample = item16;
8a723625 1832 check_and_submit_sample(devc, sample, 1);
66d1790c 1833 devc->interp.last.sample = sample;
23239b5c 1834 }
66d1790c
GS
1835 sigma_location_increment(&devc->interp.iter);
1836 sigma_location_check(devc);
23239b5c 1837 }
23239b5c
MV
1838}
1839
28a35d8a 1840/*
fefa1800
UH
1841 * Decode chunk of 1024 bytes, 64 clusters, 7 events per cluster.
1842 * Each event is 20ns apart, and can contain multiple samples.
f78898e9
HE
1843 *
1844 * For 200 MHz, events contain 4 samples for each channel, spread 5 ns apart.
1845 * For 100 MHz, events contain 2 samples for each channel, spread 10 ns apart.
1846 * For 50 MHz and below, events contain one sample for each channel,
1847 * spread 20 ns apart.
28a35d8a 1848 */
98b43eb3
GS
1849static int decode_chunk_ts(struct dev_context *devc,
1850 struct sigma_dram_line *dram_line,
914f8160 1851 size_t events_in_line)
28a35d8a 1852{
3628074d 1853 struct sigma_dram_cluster *dram_cluster;
3d9373af
GS
1854 size_t clusters_in_line;
1855 size_t events_in_cluster;
1856 size_t cluster;
f06fb3e9 1857
f06fb3e9
GS
1858 clusters_in_line = events_in_line;
1859 clusters_in_line += EVENTS_PER_CLUSTER - 1;
1860 clusters_in_line /= EVENTS_PER_CLUSTER;
ee492173 1861
5fc01191 1862 /* For each full DRAM cluster. */
3d9373af
GS
1863 for (cluster = 0; cluster < clusters_in_line; cluster++) {
1864 dram_cluster = &dram_line->cluster[cluster];
5fc01191 1865
5fc01191 1866 /* The last cluster might not be full. */
3d9373af 1867 if ((cluster == clusters_in_line - 1) &&
23239b5c 1868 (events_in_line % EVENTS_PER_CLUSTER)) {
5fc01191 1869 events_in_cluster = events_in_line % EVENTS_PER_CLUSTER;
23239b5c 1870 } else {
5fc01191 1871 events_in_cluster = EVENTS_PER_CLUSTER;
abda62ce 1872 }
ee492173 1873
98b43eb3 1874 sigma_decode_dram_cluster(devc, dram_cluster,
914f8160 1875 events_in_cluster);
28a35d8a
HE
1876 }
1877
e46b8fb1 1878 return SR_OK;
28a35d8a
HE
1879}
1880
6057d9fa 1881static int download_capture(struct sr_dev_inst *sdi)
28a35d8a 1882{
f06fb3e9 1883 struct dev_context *devc;
ee5cef71 1884 struct sigma_sample_interp *interp;
462fe786 1885 uint32_t stoppos, triggerpos;
6057d9fa 1886 uint8_t modestatus;
98b43eb3 1887 int ret;
debe1ff6 1888 size_t chunks_per_receive_call;
f06fb3e9
GS
1889
1890 devc = sdi->priv;
ee5cef71 1891 interp = &devc->interp;
c6648b66 1892
22f64ed8 1893 /*
debe1ff6
GS
1894 * Check the mode register. Force stop the current acquisition
1895 * if it has not yet terminated before. Will block until the
1896 * acquisition stops, assuming that this won't take long. Should
1897 * execute exactly once, then keep finding its condition met.
1898 *
22f64ed8
GS
1899 * Ask the hardware to stop data acquisition. Reception of the
1900 * FORCESTOP request makes the hardware "disable RLE" (store
1901 * clusters to DRAM regardless of whether pin state changes) and
1902 * raise the POSTTRIGGERED flag.
1903 */
debe1ff6
GS
1904 ret = sigma_get_register(devc, READ_MODE, &modestatus);
1905 if (ret != SR_OK) {
1906 sr_err("Could not determine current device state.");
1907 return FALSE;
1908 }
1909 if (!(modestatus & RMR_POSTTRIGGERED)) {
1910 sr_info("Downloading sample data.");
1911 devc->state = SIGMA_DOWNLOAD;
1912
1913 modestatus = WMR_FORCESTOP | WMR_SDRAMWRITEEN;
1914 ret = sigma_set_register(devc, WRITE_MODE, modestatus);
1915 if (ret != SR_OK)
f73b00b6 1916 return FALSE;
debe1ff6
GS
1917 do {
1918 ret = sigma_get_register(devc, READ_MODE, &modestatus);
1919 if (ret != SR_OK) {
1920 sr_err("Could not poll for post-trigger state.");
1921 return FALSE;
1922 }
1923 } while (!(modestatus & RMR_POSTTRIGGERED));
1924 }
6057d9fa 1925
16a5d5ac 1926 /*
debe1ff6
GS
1927 * Switch the hardware from DRAM write (data acquisition) to
1928 * DRAM read (sample memory download). Prepare resources for
1929 * sample memory content retrieval. Should execute exactly once,
1930 * then keep finding its condition met.
1931 *
16a5d5ac
GS
1932 * Get the current positions (acquisition write pointer, and
1933 * trigger match location). With disabled triggers, use a value
1934 * for the location that will never match during interpretation.
debe1ff6
GS
1935 * Determine which area of the sample memory to retrieve,
1936 * allocate a receive buffer, and setup counters/pointers.
16a5d5ac 1937 */
debe1ff6
GS
1938 if (!interp->fetch.lines_per_read) {
1939 ret = sigma_set_register(devc, WRITE_MODE, WMR_SDRAMREADEN);
1940 if (ret != SR_OK)
1941 return FALSE;
1942
1943 ret = sigma_read_pos(devc, &stoppos, &triggerpos, &modestatus);
1944 if (ret != SR_OK) {
1945 sr_err("Could not query capture positions/state.");
1946 return FALSE;
1947 }
1948 if (!devc->use_triggers)
1949 triggerpos = ~0;
1950 if (!(modestatus & RMR_TRIGGERED))
1951 triggerpos = ~0;
1952
1953 ret = alloc_sample_buffer(devc, stoppos, triggerpos, modestatus);
1954 if (ret != SR_OK)
1955 return FALSE;
1956
1957 ret = alloc_submit_buffer(sdi);
1958 if (ret != SR_OK)
1959 return FALSE;
1960 ret = setup_submit_limit(devc);
1961 if (ret != SR_OK)
1962 return FALSE;
f73b00b6 1963 }
6057d9fa 1964
c6648b66 1965 /*
debe1ff6
GS
1966 * Get another set of sample memory rows, and interpret its
1967 * content. Will execute as many times as it takes to complete
1968 * the memory region that the recent acquisition spans.
1969 *
1970 * The size of a receive call's workload and the main loop's
1971 * receive call poll period determine the UI responsiveness and
1972 * the overall transfer time for the sample memory content.
c6648b66 1973 */
debe1ff6 1974 chunks_per_receive_call = 50;
16a5d5ac 1975 while (interp->fetch.lines_done < interp->fetch.lines_total) {
914f8160 1976 size_t dl_events_in_line;
16a5d5ac
GS
1977
1978 /* Read another chunk of sample memory (several lines). */
1979 ret = fetch_sample_buffer(devc);
88a5f9ea
GS
1980 if (ret != SR_OK)
1981 return FALSE;
6868626b 1982
16a5d5ac
GS
1983 /* Process lines of sample data. Last line may be short. */
1984 while (interp->fetch.lines_rcvd--) {
5c231fc4 1985 dl_events_in_line = EVENTS_PER_ROW;
16a5d5ac
GS
1986 if (interp->iter.line == interp->stop.line) {
1987 dl_events_in_line = interp->stop.raw & ROW_MASK;
1988 }
ee5cef71 1989 decode_chunk_ts(devc, interp->fetch.curr_line,
914f8160 1990 dl_events_in_line);
ee5cef71 1991 interp->fetch.curr_line++;
16a5d5ac 1992 interp->fetch.lines_done++;
c6648b66 1993 }
debe1ff6
GS
1994
1995 /* Keep returning to application code for large data sets. */
1996 if (!--chunks_per_receive_call) {
1997 ret = flush_submit_buffer(devc);
1998 if (ret != SR_OK)
1999 return FALSE;
2000 break;
2001 }
6868626b
BV
2002 }
2003
debe1ff6
GS
2004 /*
2005 * Release previously allocated resources, and adjust state when
2006 * all of the sample memory was retrieved, and interpretation has
2007 * completed. Should execute exactly once.
2008 */
2009 if (interp->fetch.lines_done >= interp->fetch.lines_total) {
2010 ret = flush_submit_buffer(devc);
2011 if (ret != SR_OK)
2012 return FALSE;
2013 free_submit_buffer(devc);
2014 free_sample_buffer(devc);
6057d9fa 2015
debe1ff6
GS
2016 ret = std_session_send_df_end(sdi);
2017 if (ret != SR_OK)
2018 return FALSE;
2019
2020 devc->state = SIGMA_IDLE;
2021 sr_dev_acquisition_stop(sdi);
2022 }
6057d9fa
MV
2023
2024 return TRUE;
6868626b
BV
2025}
2026
d4051930 2027/*
74d453ab
GS
2028 * Periodically check the Sigma status when in CAPTURE mode. This routine
2029 * checks whether the configured sample count or sample time have passed,
2030 * and will stop acquisition and download the acquired samples.
d4051930
MV
2031 */
2032static int sigma_capture_mode(struct sr_dev_inst *sdi)
6868626b 2033{
f06fb3e9 2034 struct dev_context *devc;
f14e6f7e
GS
2035 int ret;
2036 uint32_t stoppos, triggerpos;
2037 uint8_t mode;
2038 gboolean full, wrapped, triggered, complete;
28a35d8a 2039
f06fb3e9 2040 devc = sdi->priv;
f14e6f7e
GS
2041
2042 /*
2043 * Get and interpret current acquisition status. Some of these
2044 * thresholds are rather arbitrary.
2045 */
2046 ret = sigma_read_pos(devc, &stoppos, &triggerpos, &mode);
2047 if (ret != SR_OK)
2048 return FALSE;
2049 stoppos >>= ROW_SHIFT;
2050 full = stoppos >= ROW_COUNT - 2;
2051 wrapped = mode & RMR_ROUND;
2052 triggered = mode & RMR_TRIGGERED;
2053 complete = mode & RMR_POSTTRIGGERED;
2054
2055 /*
2056 * Acquisition completed in the hardware? Start or continue
2057 * sample memory content download.
2058 * (Can user initiated button presses result in auto stop?
2059 * Will they "trigger", and later result in expired time limit
2060 * of post trigger conditions?)
2061 */
2062 if (complete)
2063 return download_capture(sdi);
2064
2065 /*
2066 * Previously configured acquisition period exceeded? Start
2067 * sample download. Start the timeout period late when triggers
2068 * are used (unknown period from acquisition start to trigger
2069 * match).
2070 */
156b6879 2071 if (sr_sw_limits_check(&devc->limit.acquire))
6057d9fa 2072 return download_capture(sdi);
f14e6f7e
GS
2073 if (devc->late_trigger_timeout && triggered) {
2074 sr_sw_limits_acquisition_start(&devc->limit.acquire);
2075 devc->late_trigger_timeout = FALSE;
2076 }
2077
2078 /*
2079 * No trigger specified, and sample memory exhausted? Start
2080 * download (may otherwise keep acquiring, even for infinite
2081 * amounts of time without a user specified time/count limit).
2082 * This handles situations when users specify limits which
2083 * exceed the device's capabilities.
2084 */
2085 (void)full;
2086 if (!devc->use_triggers && wrapped)
2087 return download_capture(sdi);
00c86508 2088
d4051930
MV
2089 return TRUE;
2090}
28a35d8a 2091
3ba56876 2092SR_PRIV int sigma_receive_data(int fd, int revents, void *cb_data)
d4051930
MV
2093{
2094 struct sr_dev_inst *sdi;
2095 struct dev_context *devc;
88c51afe 2096
d4051930
MV
2097 (void)fd;
2098 (void)revents;
88c51afe 2099
d4051930
MV
2100 sdi = cb_data;
2101 devc = sdi->priv;
2102
de4c29fa 2103 if (devc->state == SIGMA_IDLE)
d4051930
MV
2104 return TRUE;
2105
dde0175d
GS
2106 /*
2107 * When the application has requested to stop the acquisition,
debe1ff6
GS
2108 * then immediately start downloading sample data. Continue a
2109 * previously initiated download until completion. Otherwise
dde0175d
GS
2110 * keep checking configured limits which will terminate the
2111 * acquisition and initiate download.
2112 */
de4c29fa 2113 if (devc->state == SIGMA_STOPPING)
dde0175d 2114 return download_capture(sdi);
debe1ff6
GS
2115 if (devc->state == SIGMA_DOWNLOAD)
2116 return download_capture(sdi);
de4c29fa 2117 if (devc->state == SIGMA_CAPTURE)
d4051930 2118 return sigma_capture_mode(sdi);
28a35d8a 2119
28a35d8a
HE
2120 return TRUE;
2121}
2122
c53d793f 2123/* Build a LUT entry used by the trigger functions. */
7dd766e0
GS
2124static void build_lut_entry(uint16_t *lut_entry,
2125 uint16_t spec_value, uint16_t spec_mask)
ee492173 2126{
7dd766e0
GS
2127 size_t quad, bitidx, ch;
2128 uint16_t quadmask, bitmask;
2129 gboolean spec_value_low, bit_idx_low;
ee492173 2130
7dd766e0
GS
2131 /*
2132 * For each quad-channel-group, for each bit in the LUT (each
2133 * bit pattern of the channel signals, aka LUT address), for
2134 * each channel in the quad, setup the bit in the LUT entry.
2135 *
2136 * Start from all-ones in the LUT (true, always matches), then
2137 * "pessimize the truthness" for specified conditions.
2138 */
2139 for (quad = 0; quad < 4; quad++) {
2140 lut_entry[quad] = ~0;
2141 for (bitidx = 0; bitidx < 16; bitidx++) {
2142 for (ch = 0; ch < 4; ch++) {
3f5f5484 2143 quadmask = BIT(ch);
7dd766e0
GS
2144 bitmask = quadmask << (quad * 4);
2145 if (!(spec_mask & bitmask))
2146 continue;
2147 /*
2148 * This bit is part of the spec. The
2149 * condition which gets checked here
2150 * (got checked in all implementations
2151 * so far) is uncertain. A bit position
2152 * in the current index' number(!) is
2153 * checked?
2154 */
2155 spec_value_low = !(spec_value & bitmask);
2156 bit_idx_low = !(bitidx & quadmask);
2157 if (spec_value_low == bit_idx_low)
2158 continue;
3f5f5484 2159 lut_entry[quad] &= ~BIT(bitidx);
ee492173 2160 }
a53b8e4d 2161 }
ee492173 2162 }
c53d793f 2163}
ee492173 2164
c53d793f
HE
2165/* Add a logical function to LUT mask. */
2166static void add_trigger_function(enum triggerop oper, enum triggerfunc func,
3d9373af 2167 size_t index, gboolean neg, uint16_t *mask)
c53d793f 2168{
ea57157d
GS
2169 int x[2][2], a, b, aset, bset, rset;
2170 size_t bitidx;
c53d793f 2171
ea57157d
GS
2172 /*
2173 * Beware! The x, a, b, aset, bset, rset variables strictly
2174 * require the limited 0..1 range. They are not interpreted
2175 * as logically true, instead bit arith is done on them.
2176 */
c53d793f 2177
ea57157d
GS
2178 /* Construct a pattern which detects the condition. */
2179 memset(x, 0, sizeof(x));
c53d793f
HE
2180 switch (oper) {
2181 case OP_LEVEL:
2182 x[0][1] = 1;
2183 x[1][1] = 1;
2184 break;
2185 case OP_NOT:
2186 x[0][0] = 1;
2187 x[1][0] = 1;
2188 break;
2189 case OP_RISE:
2190 x[0][1] = 1;
2191 break;
2192 case OP_FALL:
2193 x[1][0] = 1;
2194 break;
2195 case OP_RISEFALL:
2196 x[0][1] = 1;
2197 x[1][0] = 1;
2198 break;
2199 case OP_NOTRISE:
2200 x[1][1] = 1;
2201 x[0][0] = 1;
2202 x[1][0] = 1;
2203 break;
2204 case OP_NOTFALL:
2205 x[1][1] = 1;
2206 x[0][0] = 1;
2207 x[0][1] = 1;
2208 break;
2209 case OP_NOTRISEFALL:
2210 x[1][1] = 1;
2211 x[0][0] = 1;
2212 break;
2213 }
2214
ea57157d 2215 /* Transpose the pattern if the condition is negated. */
c53d793f 2216 if (neg) {
ea57157d
GS
2217 size_t i, j;
2218 int tmp;
2219
0a1f7b09
UH
2220 for (i = 0; i < 2; i++) {
2221 for (j = 0; j < 2; j++) {
c53d793f 2222 tmp = x[i][j];
0a1f7b09
UH
2223 x[i][j] = x[1 - i][1 - j];
2224 x[1 - i][1 - j] = tmp;
c53d793f 2225 }
ea9cfed7 2226 }
c53d793f
HE
2227 }
2228
ea57157d
GS
2229 /* Update the LUT mask with the function's condition. */
2230 for (bitidx = 0; bitidx < 16; bitidx++) {
2231 a = (bitidx & BIT(2 * index + 0)) ? 1 : 0;
2232 b = (bitidx & BIT(2 * index + 1)) ? 1 : 0;
c53d793f 2233
ea57157d 2234 aset = (*mask & BIT(bitidx)) ? 1 : 0;
c53d793f
HE
2235 bset = x[b][a];
2236
2237 if (func == FUNC_AND || func == FUNC_NAND)
2238 rset = aset & bset;
2239 else if (func == FUNC_OR || func == FUNC_NOR)
2240 rset = aset | bset;
2241 else if (func == FUNC_XOR || func == FUNC_NXOR)
2242 rset = aset ^ bset;
ea57157d
GS
2243 else
2244 rset = 0;
c53d793f
HE
2245
2246 if (func == FUNC_NAND || func == FUNC_NOR || func == FUNC_NXOR)
ea57157d 2247 rset = 1 - rset;
c53d793f
HE
2248
2249 if (rset)
ea57157d
GS
2250 *mask |= BIT(bitidx);
2251 else
2252 *mask &= ~BIT(bitidx);
c53d793f
HE
2253 }
2254}
2255
2256/*
2257 * Build trigger LUTs used by 50 MHz and lower sample rates for supporting
2258 * simple pin change and state triggers. Only two transitions (rise/fall) can be
2259 * set at any time, but a full mask and value can be set (0/1).
2260 */
9b4d261f
GS
2261SR_PRIV int sigma_build_basic_trigger(struct dev_context *devc,
2262 struct triggerlut *lut)
c53d793f 2263{
5c231fc4 2264 uint16_t masks[2];
3d9373af 2265 size_t bitidx, condidx;
7dd766e0 2266 uint16_t value, mask;
c53d793f 2267
fb65ca09 2268 /* Setup something that "won't match" in the absence of a spec. */
5c231fc4 2269 memset(lut, 0, sizeof(*lut));
fb65ca09
GS
2270 if (!devc->use_triggers)
2271 return SR_OK;
2272
2273 /* Start assuming simple triggers. Edges are handled below. */
c53d793f 2274 lut->m4 = 0xa000;
16791da9 2275 lut->m3q = 0xffff;
c53d793f 2276
7dd766e0
GS
2277 /* Process value/mask triggers. */
2278 value = devc->trigger.simplevalue;
2279 mask = devc->trigger.simplemask;
2280 build_lut_entry(lut->m2d, value, mask);
c53d793f 2281
7dd766e0
GS
2282 /* Scan for and process rise/fall triggers. */
2283 memset(&masks, 0, sizeof(masks));
2284 condidx = 0;
2285 for (bitidx = 0; bitidx < 16; bitidx++) {
3f5f5484 2286 mask = BIT(bitidx);
7dd766e0
GS
2287 value = devc->trigger.risingmask | devc->trigger.fallingmask;
2288 if (!(value & mask))
2289 continue;
2290 if (condidx == 0)
2291 build_lut_entry(lut->m0d, mask, mask);
2292 if (condidx == 1)
2293 build_lut_entry(lut->m1d, mask, mask);
2294 masks[condidx++] = mask;
2295 if (condidx == ARRAY_SIZE(masks))
2296 break;
c53d793f
HE
2297 }
2298
7dd766e0 2299 /* Add glue logic for rise/fall triggers. */
c53d793f 2300 if (masks[0] || masks[1]) {
16791da9 2301 lut->m3q = 0;
0e1357e8 2302 if (masks[0] & devc->trigger.risingmask)
16791da9 2303 add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3q);
0e1357e8 2304 if (masks[0] & devc->trigger.fallingmask)
16791da9 2305 add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3q);
0e1357e8 2306 if (masks[1] & devc->trigger.risingmask)
16791da9 2307 add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3q);
0e1357e8 2308 if (masks[1] & devc->trigger.fallingmask)
16791da9 2309 add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3q);
c53d793f 2310 }
ee492173 2311
c53d793f 2312 /* Triggertype: event. */
16791da9
GS
2313 lut->params.selres = TRGSEL_SELCODE_NEVER;
2314 lut->params.selinc = TRGSEL_SELCODE_LEVEL;
2315 lut->params.sela = 0; /* Counter >= CMPA && LEVEL */
2316 lut->params.cmpa = 0; /* Count 0 -> 1 already triggers. */
ee492173 2317
e46b8fb1 2318 return SR_OK;
ee492173 2319}