]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * This file is part of the libsigrok project. | |
3 | * | |
4 | * Copyright (C) 2010-2012 Håvard Espeland <gus@ping.uio.no>, | |
5 | * Copyright (C) 2010 Martin Stensgård <mastensg@ping.uio.no> | |
6 | * Copyright (C) 2010 Carl Henrik Lunde <chlunde@ping.uio.no> | |
7 | * Copyright (C) 2020 Gerhard Sittig <gerhard.sittig@gmx.net> | |
8 | * | |
9 | * This program is free software: you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation, either version 3 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
21 | */ | |
22 | ||
23 | /* | |
24 | * ASIX SIGMA/SIGMA2 logic analyzer driver | |
25 | */ | |
26 | ||
27 | #include <config.h> | |
28 | #include "protocol.h" | |
29 | ||
30 | /* | |
31 | * The ASIX SIGMA hardware supports fixed 200MHz and 100MHz sample rates | |
32 | * (by means of separate firmware images). As well as 50MHz divided by | |
33 | * an integer divider in the 1..256 range (by the "typical" firmware). | |
34 | * Which translates to a strict lower boundary of around 195kHz. | |
35 | * | |
36 | * This driver "suggests" a subset of the available rates by listing a | |
37 | * few discrete values, while setter routines accept any user specified | |
38 | * rate that is supported by the hardware. | |
39 | */ | |
40 | static const uint64_t samplerates[] = { | |
41 | /* 50MHz and integer divider. 1/2/5 steps (where possible). */ | |
42 | SR_KHZ(200), SR_KHZ(500), | |
43 | SR_MHZ(1), SR_MHZ(2), SR_MHZ(5), | |
44 | SR_MHZ(10), SR_MHZ(25), SR_MHZ(50), | |
45 | /* 100MHz/200MHz, fixed rates in special firmware. */ | |
46 | SR_MHZ(100), SR_MHZ(200), | |
47 | }; | |
48 | ||
49 | SR_PRIV GVariant *sigma_get_samplerates_list(void) | |
50 | { | |
51 | return std_gvar_samplerates(samplerates, ARRAY_SIZE(samplerates)); | |
52 | } | |
53 | ||
54 | static const char *firmware_files[] = { | |
55 | [SIGMA_FW_50MHZ] = "asix-sigma-50.fw", /* 50MHz, 8bit divider. */ | |
56 | [SIGMA_FW_100MHZ] = "asix-sigma-100.fw", /* 100MHz, fixed. */ | |
57 | [SIGMA_FW_200MHZ] = "asix-sigma-200.fw", /* 200MHz, fixed. */ | |
58 | [SIGMA_FW_SYNC] = "asix-sigma-50sync.fw", /* Sync from external pin. */ | |
59 | [SIGMA_FW_FREQ] = "asix-sigma-phasor.fw", /* Frequency counter. */ | |
60 | }; | |
61 | ||
62 | #define SIGMA_FIRMWARE_SIZE_LIMIT (256 * 1024) | |
63 | ||
64 | static int sigma_ftdi_open(const struct sr_dev_inst *sdi) | |
65 | { | |
66 | struct dev_context *devc; | |
67 | int vid, pid; | |
68 | const char *serno; | |
69 | int ret; | |
70 | ||
71 | devc = sdi->priv; | |
72 | if (!devc) | |
73 | return SR_ERR_ARG; | |
74 | ||
75 | if (devc->ftdi.is_open) | |
76 | return SR_OK; | |
77 | ||
78 | vid = devc->id.vid; | |
79 | pid = devc->id.pid; | |
80 | serno = sdi->serial_num; | |
81 | if (!vid || !pid || !serno || !*serno) | |
82 | return SR_ERR_ARG; | |
83 | ||
84 | ret = ftdi_init(&devc->ftdi.ctx); | |
85 | if (ret < 0) { | |
86 | sr_err("Cannot initialize FTDI context (%d): %s.", | |
87 | ret, ftdi_get_error_string(&devc->ftdi.ctx)); | |
88 | return SR_ERR_IO; | |
89 | } | |
90 | ret = ftdi_usb_open_desc_index(&devc->ftdi.ctx, | |
91 | vid, pid, NULL, serno, 0); | |
92 | if (ret < 0) { | |
93 | sr_err("Cannot open device (%d): %s.", | |
94 | ret, ftdi_get_error_string(&devc->ftdi.ctx)); | |
95 | return SR_ERR_IO; | |
96 | } | |
97 | devc->ftdi.is_open = TRUE; | |
98 | ||
99 | return SR_OK; | |
100 | } | |
101 | ||
102 | static int sigma_ftdi_close(struct dev_context *devc) | |
103 | { | |
104 | int ret; | |
105 | ||
106 | ret = ftdi_usb_close(&devc->ftdi.ctx); | |
107 | devc->ftdi.is_open = FALSE; | |
108 | devc->ftdi.must_close = FALSE; | |
109 | ftdi_deinit(&devc->ftdi.ctx); | |
110 | ||
111 | return ret == 0 ? SR_OK : SR_ERR_IO; | |
112 | } | |
113 | ||
114 | SR_PRIV int sigma_check_open(const struct sr_dev_inst *sdi) | |
115 | { | |
116 | struct dev_context *devc; | |
117 | int ret; | |
118 | ||
119 | if (!sdi) | |
120 | return SR_ERR_ARG; | |
121 | devc = sdi->priv; | |
122 | if (!devc) | |
123 | return SR_ERR_ARG; | |
124 | ||
125 | if (devc->ftdi.is_open) | |
126 | return SR_OK; | |
127 | ||
128 | ret = sigma_ftdi_open(sdi); | |
129 | if (ret != SR_OK) | |
130 | return ret; | |
131 | devc->ftdi.must_close = TRUE; | |
132 | ||
133 | return ret; | |
134 | } | |
135 | ||
136 | SR_PRIV int sigma_check_close(struct dev_context *devc) | |
137 | { | |
138 | int ret; | |
139 | ||
140 | if (!devc) | |
141 | return SR_ERR_ARG; | |
142 | ||
143 | if (devc->ftdi.must_close) { | |
144 | ret = sigma_ftdi_close(devc); | |
145 | if (ret != SR_OK) | |
146 | return ret; | |
147 | devc->ftdi.must_close = FALSE; | |
148 | } | |
149 | ||
150 | return SR_OK; | |
151 | } | |
152 | ||
153 | SR_PRIV int sigma_force_open(const struct sr_dev_inst *sdi) | |
154 | { | |
155 | struct dev_context *devc; | |
156 | int ret; | |
157 | ||
158 | if (!sdi) | |
159 | return SR_ERR_ARG; | |
160 | devc = sdi->priv; | |
161 | if (!devc) | |
162 | return SR_ERR_ARG; | |
163 | ||
164 | ret = sigma_ftdi_open(sdi); | |
165 | if (ret != SR_OK) | |
166 | return ret; | |
167 | devc->ftdi.must_close = FALSE; | |
168 | ||
169 | return SR_OK; | |
170 | } | |
171 | ||
172 | SR_PRIV int sigma_force_close(struct dev_context *devc) | |
173 | { | |
174 | return sigma_ftdi_close(devc); | |
175 | } | |
176 | ||
177 | /* | |
178 | * BEWARE! Error propagation is important, as are kinds of return values. | |
179 | * | |
180 | * - Raw USB tranport communicates the number of sent or received bytes, | |
181 | * or negative error codes in the external library's(!) range of codes. | |
182 | * - Internal routines at the "sigrok driver level" communicate success | |
183 | * or failure in terms of SR_OK et al error codes. | |
184 | * - Main loop style receive callbacks communicate booleans which arrange | |
185 | * for repeated calls to drive progress during acquisition. | |
186 | * | |
187 | * Careful consideration by maintainers is essential, because all of the | |
188 | * above kinds of values are assignment compatbile from the compiler's | |
189 | * point of view. Implementation errors will go unnoticed at build time. | |
190 | */ | |
191 | ||
192 | static int sigma_read_raw(struct dev_context *devc, void *buf, size_t size) | |
193 | { | |
194 | int ret; | |
195 | ||
196 | ret = ftdi_read_data(&devc->ftdi.ctx, (unsigned char *)buf, size); | |
197 | if (ret < 0) { | |
198 | sr_err("USB data read failed: %s", | |
199 | ftdi_get_error_string(&devc->ftdi.ctx)); | |
200 | } | |
201 | ||
202 | return ret; | |
203 | } | |
204 | ||
205 | static int sigma_write_raw(struct dev_context *devc, const void *buf, size_t size) | |
206 | { | |
207 | int ret; | |
208 | ||
209 | ret = ftdi_write_data(&devc->ftdi.ctx, buf, size); | |
210 | if (ret < 0) { | |
211 | sr_err("USB data write failed: %s", | |
212 | ftdi_get_error_string(&devc->ftdi.ctx)); | |
213 | } else if ((size_t)ret != size) { | |
214 | sr_err("USB data write length mismatch."); | |
215 | } | |
216 | ||
217 | return ret; | |
218 | } | |
219 | ||
220 | static int sigma_read_sr(struct dev_context *devc, void *buf, size_t size) | |
221 | { | |
222 | int ret; | |
223 | ||
224 | ret = sigma_read_raw(devc, buf, size); | |
225 | if (ret < 0 || (size_t)ret != size) | |
226 | return SR_ERR_IO; | |
227 | ||
228 | return SR_OK; | |
229 | } | |
230 | ||
231 | static int sigma_write_sr(struct dev_context *devc, const void *buf, size_t size) | |
232 | { | |
233 | int ret; | |
234 | ||
235 | ret = sigma_write_raw(devc, buf, size); | |
236 | if (ret < 0 || (size_t)ret != size) | |
237 | return SR_ERR_IO; | |
238 | ||
239 | return SR_OK; | |
240 | } | |
241 | ||
242 | /* | |
243 | * Implementor's note: The local write buffer's size shall suffice for | |
244 | * any know FPGA register transaction that is involved in the supported | |
245 | * feature set of this sigrok device driver. If the length check trips, | |
246 | * that's a programmer's error and needs adjustment in the complete call | |
247 | * stack of the respective code path. | |
248 | */ | |
249 | #define SIGMA_MAX_REG_DEPTH 32 | |
250 | ||
251 | /* | |
252 | * Implementor's note: The FPGA command set supports register access | |
253 | * with automatic address adjustment. This operation is documented to | |
254 | * wrap within a 16-address range, it cannot cross boundaries where the | |
255 | * register address' nibble overflows. An internal helper assumes that | |
256 | * callers remain within this auto-adjustment range, and thus multi | |
257 | * register access requests can never exceed that count. | |
258 | */ | |
259 | #define SIGMA_MAX_REG_COUNT 16 | |
260 | ||
261 | SR_PRIV int sigma_write_register(struct dev_context *devc, | |
262 | uint8_t reg, uint8_t *data, size_t len) | |
263 | { | |
264 | uint8_t buf[2 + SIGMA_MAX_REG_DEPTH * 2], *wrptr; | |
265 | size_t idx; | |
266 | ||
267 | if (len > SIGMA_MAX_REG_DEPTH) { | |
268 | sr_err("Short write buffer for %zu bytes to reg %u.", len, reg); | |
269 | return SR_ERR_BUG; | |
270 | } | |
271 | ||
272 | wrptr = buf; | |
273 | write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg)); | |
274 | write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg)); | |
275 | for (idx = 0; idx < len; idx++) { | |
276 | write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data[idx])); | |
277 | write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data[idx])); | |
278 | } | |
279 | ||
280 | return sigma_write_sr(devc, buf, wrptr - buf); | |
281 | } | |
282 | ||
283 | SR_PRIV int sigma_set_register(struct dev_context *devc, | |
284 | uint8_t reg, uint8_t value) | |
285 | { | |
286 | return sigma_write_register(devc, reg, &value, sizeof(value)); | |
287 | } | |
288 | ||
289 | static int sigma_read_register(struct dev_context *devc, | |
290 | uint8_t reg, uint8_t *data, size_t len) | |
291 | { | |
292 | uint8_t buf[3], *wrptr; | |
293 | int ret; | |
294 | ||
295 | wrptr = buf; | |
296 | write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg)); | |
297 | write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg)); | |
298 | write_u8_inc(&wrptr, REG_READ_ADDR); | |
299 | ret = sigma_write_sr(devc, buf, wrptr - buf); | |
300 | if (ret != SR_OK) | |
301 | return ret; | |
302 | ||
303 | return sigma_read_sr(devc, data, len); | |
304 | } | |
305 | ||
306 | static int sigma_get_register(struct dev_context *devc, | |
307 | uint8_t reg, uint8_t *data) | |
308 | { | |
309 | return sigma_read_register(devc, reg, data, sizeof(*data)); | |
310 | } | |
311 | ||
312 | static int sigma_get_registers(struct dev_context *devc, | |
313 | uint8_t reg, uint8_t *data, size_t count) | |
314 | { | |
315 | uint8_t buf[2 + SIGMA_MAX_REG_COUNT], *wrptr; | |
316 | size_t idx; | |
317 | int ret; | |
318 | ||
319 | if (count > SIGMA_MAX_REG_COUNT) { | |
320 | sr_err("Short command buffer for %zu reg reads at %u.", count, reg); | |
321 | return SR_ERR_BUG; | |
322 | } | |
323 | ||
324 | wrptr = buf; | |
325 | write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(reg)); | |
326 | write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(reg)); | |
327 | for (idx = 0; idx < count; idx++) | |
328 | write_u8_inc(&wrptr, REG_READ_ADDR | REG_ADDR_INC); | |
329 | ret = sigma_write_sr(devc, buf, wrptr - buf); | |
330 | if (ret != SR_OK) | |
331 | return ret; | |
332 | ||
333 | return sigma_read_sr(devc, data, count); | |
334 | } | |
335 | ||
336 | static int sigma_read_pos(struct dev_context *devc, | |
337 | uint32_t *stoppos, uint32_t *triggerpos, uint8_t *mode) | |
338 | { | |
339 | uint8_t result[7]; | |
340 | const uint8_t *rdptr; | |
341 | uint32_t v32; | |
342 | uint8_t v8; | |
343 | int ret; | |
344 | ||
345 | /* | |
346 | * Read 7 registers starting at trigger position LSB. | |
347 | * Which yields two 24bit counter values, and mode flags. | |
348 | */ | |
349 | ret = sigma_get_registers(devc, READ_TRIGGER_POS_LOW, | |
350 | result, sizeof(result)); | |
351 | if (ret != SR_OK) | |
352 | return ret; | |
353 | ||
354 | rdptr = &result[0]; | |
355 | v32 = read_u24le_inc(&rdptr); | |
356 | if (triggerpos) | |
357 | *triggerpos = v32; | |
358 | v32 = read_u24le_inc(&rdptr); | |
359 | if (stoppos) | |
360 | *stoppos = v32; | |
361 | v8 = read_u8_inc(&rdptr); | |
362 | if (mode) | |
363 | *mode = v8; | |
364 | ||
365 | /* | |
366 | * These positions consist of "the memory row" in the MSB fields, | |
367 | * and "an event index" within the row in the LSB fields. Part | |
368 | * of the memory row's content is sample data, another part is | |
369 | * timestamps. | |
370 | * | |
371 | * The retrieved register values point to after the captured | |
372 | * position. So they need to get decremented, and adjusted to | |
373 | * cater for the timestamps when the decrement carries over to | |
374 | * a different memory row. | |
375 | */ | |
376 | if (stoppos && (--*stoppos & ROW_MASK) == ROW_MASK) | |
377 | *stoppos -= CLUSTERS_PER_ROW; | |
378 | if (triggerpos && (--*triggerpos & ROW_MASK) == ROW_MASK) | |
379 | *triggerpos -= CLUSTERS_PER_ROW; | |
380 | ||
381 | return SR_OK; | |
382 | } | |
383 | ||
384 | static int sigma_read_dram(struct dev_context *devc, | |
385 | size_t startchunk, size_t numchunks, uint8_t *data) | |
386 | { | |
387 | uint8_t buf[128], *wrptr, regval; | |
388 | size_t chunk; | |
389 | int sel, ret; | |
390 | gboolean is_last; | |
391 | ||
392 | if (2 + 3 * numchunks > ARRAY_SIZE(buf)) { | |
393 | sr_err("Short write buffer for %zu DRAM row reads.", numchunks); | |
394 | return SR_ERR_BUG; | |
395 | } | |
396 | ||
397 | /* Communicate DRAM start address (memory row, aka samples line). */ | |
398 | wrptr = buf; | |
399 | write_u16be_inc(&wrptr, startchunk); | |
400 | ret = sigma_write_register(devc, WRITE_MEMROW, buf, wrptr - buf); | |
401 | if (ret != SR_OK) | |
402 | return ret; | |
403 | ||
404 | /* | |
405 | * Access DRAM content. Fetch from DRAM to FPGA's internal RAM, | |
406 | * then transfer via USB. Interleave the FPGA's DRAM access and | |
407 | * USB transfer, use alternating buffers (0/1) in the process. | |
408 | */ | |
409 | wrptr = buf; | |
410 | write_u8_inc(&wrptr, REG_DRAM_BLOCK); | |
411 | write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK); | |
412 | for (chunk = 0; chunk < numchunks; chunk++) { | |
413 | sel = chunk % 2; | |
414 | is_last = chunk == numchunks - 1; | |
415 | if (!is_last) { | |
416 | regval = REG_DRAM_BLOCK | REG_DRAM_SEL_BOOL(!sel); | |
417 | write_u8_inc(&wrptr, regval); | |
418 | } | |
419 | regval = REG_DRAM_BLOCK_DATA | REG_DRAM_SEL_BOOL(sel); | |
420 | write_u8_inc(&wrptr, regval); | |
421 | if (!is_last) | |
422 | write_u8_inc(&wrptr, REG_DRAM_WAIT_ACK); | |
423 | } | |
424 | ret = sigma_write_sr(devc, buf, wrptr - buf); | |
425 | if (ret != SR_OK) | |
426 | return ret; | |
427 | ||
428 | return sigma_read_sr(devc, data, numchunks * ROW_LENGTH_BYTES); | |
429 | } | |
430 | ||
431 | /* Upload trigger look-up tables to Sigma. */ | |
432 | SR_PRIV int sigma_write_trigger_lut(struct dev_context *devc, | |
433 | struct triggerlut *lut) | |
434 | { | |
435 | size_t lut_addr; | |
436 | uint16_t bit; | |
437 | uint8_t m3d, m2d, m1d, m0d; | |
438 | uint8_t buf[6], *wrptr; | |
439 | uint8_t trgsel2; | |
440 | uint16_t lutreg, selreg; | |
441 | int ret; | |
442 | ||
443 | /* | |
444 | * Translate the LUT part of the trigger configuration from the | |
445 | * application's perspective to the hardware register's bitfield | |
446 | * layout. Send the LUT to the device. This configures the logic | |
447 | * which combines pin levels or edges. | |
448 | */ | |
449 | for (lut_addr = 0; lut_addr < 16; lut_addr++) { | |
450 | bit = BIT(lut_addr); | |
451 | ||
452 | /* - M4 M3S M3Q */ | |
453 | m3d = 0; | |
454 | if (lut->m4 & bit) | |
455 | m3d |= BIT(2); | |
456 | if (lut->m3s & bit) | |
457 | m3d |= BIT(1); | |
458 | if (lut->m3q & bit) | |
459 | m3d |= BIT(0); | |
460 | ||
461 | /* M2D3 M2D2 M2D1 M2D0 */ | |
462 | m2d = 0; | |
463 | if (lut->m2d[3] & bit) | |
464 | m2d |= BIT(3); | |
465 | if (lut->m2d[2] & bit) | |
466 | m2d |= BIT(2); | |
467 | if (lut->m2d[1] & bit) | |
468 | m2d |= BIT(1); | |
469 | if (lut->m2d[0] & bit) | |
470 | m2d |= BIT(0); | |
471 | ||
472 | /* M1D3 M1D2 M1D1 M1D0 */ | |
473 | m1d = 0; | |
474 | if (lut->m1d[3] & bit) | |
475 | m1d |= BIT(3); | |
476 | if (lut->m1d[2] & bit) | |
477 | m1d |= BIT(2); | |
478 | if (lut->m1d[1] & bit) | |
479 | m1d |= BIT(1); | |
480 | if (lut->m1d[0] & bit) | |
481 | m1d |= BIT(0); | |
482 | ||
483 | /* M0D3 M0D2 M0D1 M0D0 */ | |
484 | m0d = 0; | |
485 | if (lut->m0d[3] & bit) | |
486 | m0d |= BIT(3); | |
487 | if (lut->m0d[2] & bit) | |
488 | m0d |= BIT(2); | |
489 | if (lut->m0d[1] & bit) | |
490 | m0d |= BIT(1); | |
491 | if (lut->m0d[0] & bit) | |
492 | m0d |= BIT(0); | |
493 | ||
494 | /* | |
495 | * Send 16bits with M3D/M2D and M1D/M0D bit masks to the | |
496 | * TriggerSelect register, then strobe the LUT write by | |
497 | * passing A3-A0 to TriggerSelect2. Hold RESET during LUT | |
498 | * programming. | |
499 | */ | |
500 | wrptr = buf; | |
501 | lutreg = 0; | |
502 | lutreg <<= 4; lutreg |= m3d; | |
503 | lutreg <<= 4; lutreg |= m2d; | |
504 | lutreg <<= 4; lutreg |= m1d; | |
505 | lutreg <<= 4; lutreg |= m0d; | |
506 | write_u16be_inc(&wrptr, lutreg); | |
507 | ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, | |
508 | buf, wrptr - buf); | |
509 | if (ret != SR_OK) | |
510 | return ret; | |
511 | trgsel2 = TRGSEL2_RESET | TRGSEL2_LUT_WRITE | | |
512 | (lut_addr & TRGSEL2_LUT_ADDR_MASK); | |
513 | ret = sigma_set_register(devc, WRITE_TRIGGER_SELECT2, trgsel2); | |
514 | if (ret != SR_OK) | |
515 | return ret; | |
516 | } | |
517 | ||
518 | /* | |
519 | * Send the parameters. This covers counters and durations. | |
520 | */ | |
521 | wrptr = buf; | |
522 | selreg = 0; | |
523 | selreg |= (lut->params.selinc & TRGSEL_SELINC_MASK) << TRGSEL_SELINC_SHIFT; | |
524 | selreg |= (lut->params.selres & TRGSEL_SELRES_MASK) << TRGSEL_SELRES_SHIFT; | |
525 | selreg |= (lut->params.sela & TRGSEL_SELA_MASK) << TRGSEL_SELA_SHIFT; | |
526 | selreg |= (lut->params.selb & TRGSEL_SELB_MASK) << TRGSEL_SELB_SHIFT; | |
527 | selreg |= (lut->params.selc & TRGSEL_SELC_MASK) << TRGSEL_SELC_SHIFT; | |
528 | selreg |= (lut->params.selpresc & TRGSEL_SELPRESC_MASK) << TRGSEL_SELPRESC_SHIFT; | |
529 | write_u16be_inc(&wrptr, selreg); | |
530 | write_u16be_inc(&wrptr, lut->params.cmpb); | |
531 | write_u16be_inc(&wrptr, lut->params.cmpa); | |
532 | ret = sigma_write_register(devc, WRITE_TRIGGER_SELECT, buf, wrptr - buf); | |
533 | if (ret != SR_OK) | |
534 | return ret; | |
535 | ||
536 | return SR_OK; | |
537 | } | |
538 | ||
539 | /* | |
540 | * See Xilinx UG332 for Spartan-3 FPGA configuration. The SIGMA device | |
541 | * uses FTDI bitbang mode for netlist download in slave serial mode. | |
542 | * (LATER: The OMEGA device's cable contains a more capable FTDI chip | |
543 | * and uses MPSSE mode for bitbang. -- Can we also use FT232H in FT245 | |
544 | * compatible bitbang mode? For maximum code re-use and reduced libftdi | |
545 | * dependency? See section 3.5.5 of FT232H: D0 clk, D1 data (out), D2 | |
546 | * data (in), D3 select, D4-7 GPIOL. See section 3.5.7 for MCU FIFO.) | |
547 | * | |
548 | * 750kbps rate (four times the speed of sigmalogan) works well for | |
549 | * netlist download. All pins except INIT_B are output pins during | |
550 | * configuration download. | |
551 | * | |
552 | * Some pins are inverted as a byproduct of level shifting circuitry. | |
553 | * That's why high CCLK level (from the cable's point of view) is idle | |
554 | * from the FPGA's perspective. | |
555 | * | |
556 | * The vendor's literature discusses a "suicide sequence" which ends | |
557 | * regular FPGA execution and should be sent before entering bitbang | |
558 | * mode and sending configuration data. Set D7 and toggle D2, D3, D4 | |
559 | * a few times. | |
560 | */ | |
561 | #define BB_PIN_CCLK BIT(0) /* D0, CCLK */ | |
562 | #define BB_PIN_PROG BIT(1) /* D1, PROG */ | |
563 | #define BB_PIN_D2 BIT(2) /* D2, (part of) SUICIDE */ | |
564 | #define BB_PIN_D3 BIT(3) /* D3, (part of) SUICIDE */ | |
565 | #define BB_PIN_D4 BIT(4) /* D4, (part of) SUICIDE (unused?) */ | |
566 | #define BB_PIN_INIT BIT(5) /* D5, INIT, input pin */ | |
567 | #define BB_PIN_DIN BIT(6) /* D6, DIN */ | |
568 | #define BB_PIN_D7 BIT(7) /* D7, (part of) SUICIDE */ | |
569 | ||
570 | #define BB_BITRATE (750 * 1000) | |
571 | #define BB_PINMASK (0xff & ~BB_PIN_INIT) | |
572 | ||
573 | /* | |
574 | * Initiate slave serial mode for configuration download. Which is done | |
575 | * by pulsing PROG_B and sensing INIT_B. Make sure CCLK is idle before | |
576 | * initiating the configuration download. | |
577 | * | |
578 | * Run a "suicide sequence" first to terminate the regular FPGA operation | |
579 | * before reconfiguration. The FTDI cable is single channel, and shares | |
580 | * pins which are used for data communication in FIFO mode with pins that | |
581 | * are used for FPGA configuration in bitbang mode. Hardware defaults for | |
582 | * unconfigured hardware, and runtime conditions after FPGA configuration | |
583 | * need to cooperate such that re-configuration of the FPGA can start. | |
584 | */ | |
585 | static int sigma_fpga_init_bitbang_once(struct dev_context *devc) | |
586 | { | |
587 | const uint8_t suicide[] = { | |
588 | BB_PIN_D7 | BB_PIN_D2, | |
589 | BB_PIN_D7 | BB_PIN_D2, | |
590 | BB_PIN_D7 | BB_PIN_D3, | |
591 | BB_PIN_D7 | BB_PIN_D2, | |
592 | BB_PIN_D7 | BB_PIN_D3, | |
593 | BB_PIN_D7 | BB_PIN_D2, | |
594 | BB_PIN_D7 | BB_PIN_D3, | |
595 | BB_PIN_D7 | BB_PIN_D2, | |
596 | }; | |
597 | const uint8_t init_array[] = { | |
598 | BB_PIN_CCLK, | |
599 | BB_PIN_CCLK | BB_PIN_PROG, | |
600 | BB_PIN_CCLK | BB_PIN_PROG, | |
601 | BB_PIN_CCLK, | |
602 | BB_PIN_CCLK, | |
603 | BB_PIN_CCLK, | |
604 | BB_PIN_CCLK, | |
605 | BB_PIN_CCLK, | |
606 | BB_PIN_CCLK, | |
607 | BB_PIN_CCLK, | |
608 | }; | |
609 | size_t retries; | |
610 | int ret; | |
611 | uint8_t data; | |
612 | ||
613 | /* Section 2. part 1), do the FPGA suicide. */ | |
614 | ret = SR_OK; | |
615 | ret |= sigma_write_sr(devc, suicide, sizeof(suicide)); | |
616 | ret |= sigma_write_sr(devc, suicide, sizeof(suicide)); | |
617 | ret |= sigma_write_sr(devc, suicide, sizeof(suicide)); | |
618 | ret |= sigma_write_sr(devc, suicide, sizeof(suicide)); | |
619 | if (ret != SR_OK) | |
620 | return SR_ERR_IO; | |
621 | g_usleep(10 * 1000); | |
622 | ||
623 | /* Section 2. part 2), pulse PROG. */ | |
624 | ret = sigma_write_sr(devc, init_array, sizeof(init_array)); | |
625 | if (ret != SR_OK) | |
626 | return ret; | |
627 | g_usleep(10 * 1000); | |
628 | PURGE_FTDI_BOTH(&devc->ftdi.ctx); | |
629 | ||
630 | /* | |
631 | * Wait until the FPGA asserts INIT_B. Check in a maximum number | |
632 | * of bursts with a given delay between them. Read as many pin | |
633 | * capture results as the combination of FTDI chip and FTID lib | |
634 | * may provide. Cope with absence of pin capture data in a cycle. | |
635 | * This approach shall result in fast reponse in case of success, | |
636 | * low cost of execution during wait, reliable error handling in | |
637 | * the transport layer, and robust response to failure or absence | |
638 | * of result data (hardware inactivity after stimulus). | |
639 | */ | |
640 | retries = 10; | |
641 | while (retries--) { | |
642 | do { | |
643 | ret = sigma_read_raw(devc, &data, sizeof(data)); | |
644 | if (ret < 0) | |
645 | return SR_ERR_IO; | |
646 | if (ret == sizeof(data) && (data & BB_PIN_INIT)) | |
647 | return SR_OK; | |
648 | } while (ret == sizeof(data)); | |
649 | if (retries) | |
650 | g_usleep(10 * 1000); | |
651 | } | |
652 | ||
653 | return SR_ERR_TIMEOUT; | |
654 | } | |
655 | ||
656 | /* | |
657 | * This is belt and braces. Re-run the bitbang initiation sequence a few | |
658 | * times should first attempts fail. Failure is rare but can happen (was | |
659 | * observed during driver development). | |
660 | */ | |
661 | static int sigma_fpga_init_bitbang(struct dev_context *devc) | |
662 | { | |
663 | size_t retries; | |
664 | int ret; | |
665 | ||
666 | retries = 10; | |
667 | while (retries--) { | |
668 | ret = sigma_fpga_init_bitbang_once(devc); | |
669 | if (ret == SR_OK) | |
670 | return ret; | |
671 | if (ret != SR_ERR_TIMEOUT) | |
672 | return ret; | |
673 | } | |
674 | return ret; | |
675 | } | |
676 | ||
677 | /* | |
678 | * Configure the FPGA for logic-analyzer mode. | |
679 | */ | |
680 | static int sigma_fpga_init_la(struct dev_context *devc) | |
681 | { | |
682 | uint8_t buf[20], *wrptr; | |
683 | uint8_t data_55, data_aa, mode; | |
684 | uint8_t result[3]; | |
685 | const uint8_t *rdptr; | |
686 | int ret; | |
687 | ||
688 | wrptr = buf; | |
689 | ||
690 | /* Read ID register. */ | |
691 | write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(READ_ID)); | |
692 | write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(READ_ID)); | |
693 | write_u8_inc(&wrptr, REG_READ_ADDR); | |
694 | ||
695 | /* Write 0x55 to scratch register, read back. */ | |
696 | data_55 = 0x55; | |
697 | write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST)); | |
698 | write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST)); | |
699 | write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_55)); | |
700 | write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_55)); | |
701 | write_u8_inc(&wrptr, REG_READ_ADDR); | |
702 | ||
703 | /* Write 0xaa to scratch register, read back. */ | |
704 | data_aa = 0xaa; | |
705 | write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_TEST)); | |
706 | write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_TEST)); | |
707 | write_u8_inc(&wrptr, REG_DATA_LOW | LO4(data_aa)); | |
708 | write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(data_aa)); | |
709 | write_u8_inc(&wrptr, REG_READ_ADDR); | |
710 | ||
711 | /* Initiate SDRAM initialization in mode register. */ | |
712 | mode = WMR_SDRAMINIT; | |
713 | write_u8_inc(&wrptr, REG_ADDR_LOW | LO4(WRITE_MODE)); | |
714 | write_u8_inc(&wrptr, REG_ADDR_HIGH | HI4(WRITE_MODE)); | |
715 | write_u8_inc(&wrptr, REG_DATA_LOW | LO4(mode)); | |
716 | write_u8_inc(&wrptr, REG_DATA_HIGH_WRITE | HI4(mode)); | |
717 | ||
718 | /* | |
719 | * Send the command sequence which contains 3 READ requests. | |
720 | * Expect to see the corresponding 3 response bytes. | |
721 | */ | |
722 | ret = sigma_write_sr(devc, buf, wrptr - buf); | |
723 | if (ret != SR_OK) { | |
724 | sr_err("Could not request LA start response."); | |
725 | return ret; | |
726 | } | |
727 | ret = sigma_read_sr(devc, result, ARRAY_SIZE(result)); | |
728 | if (ret != SR_OK) { | |
729 | sr_err("Could not receive LA start response."); | |
730 | return SR_ERR_IO; | |
731 | } | |
732 | rdptr = result; | |
733 | if (read_u8_inc(&rdptr) != 0xa6) { | |
734 | sr_err("Unexpected ID response."); | |
735 | return SR_ERR_DATA; | |
736 | } | |
737 | if (read_u8_inc(&rdptr) != data_55) { | |
738 | sr_err("Unexpected scratch read-back (55)."); | |
739 | return SR_ERR_DATA; | |
740 | } | |
741 | if (read_u8_inc(&rdptr) != data_aa) { | |
742 | sr_err("Unexpected scratch read-back (aa)."); | |
743 | return SR_ERR_DATA; | |
744 | } | |
745 | ||
746 | return SR_OK; | |
747 | } | |
748 | ||
749 | /* | |
750 | * Read the firmware from a file and transform it into a series of bitbang | |
751 | * pulses used to program the FPGA. Note that the *bb_cmd must be free()'d | |
752 | * by the caller of this function. | |
753 | */ | |
754 | static int sigma_fw_2_bitbang(struct sr_context *ctx, const char *name, | |
755 | uint8_t **bb_cmd, size_t *bb_cmd_size) | |
756 | { | |
757 | uint8_t *firmware; | |
758 | size_t file_size; | |
759 | uint8_t *p; | |
760 | size_t l; | |
761 | uint32_t imm; | |
762 | size_t bb_size; | |
763 | uint8_t *bb_stream, *bbs, byte, mask, v; | |
764 | ||
765 | /* Retrieve the on-disk firmware file content. */ | |
766 | firmware = sr_resource_load(ctx, SR_RESOURCE_FIRMWARE, name, | |
767 | &file_size, SIGMA_FIRMWARE_SIZE_LIMIT); | |
768 | if (!firmware) | |
769 | return SR_ERR_IO; | |
770 | ||
771 | /* Unscramble the file content (XOR with "random" sequence). */ | |
772 | p = firmware; | |
773 | l = file_size; | |
774 | imm = 0x3f6df2ab; | |
775 | while (l--) { | |
776 | imm = (imm + 0xa853753) % 177 + (imm * 0x8034052); | |
777 | *p++ ^= imm & 0xff; | |
778 | } | |
779 | ||
780 | /* | |
781 | * Generate a sequence of bitbang samples. With two samples per | |
782 | * FPGA configuration bit, providing the level for the DIN signal | |
783 | * as well as two edges for CCLK. See Xilinx UG332 for details | |
784 | * ("slave serial" mode). | |
785 | * | |
786 | * Note that CCLK is inverted in hardware. That's why the | |
787 | * respective bit is first set and then cleared in the bitbang | |
788 | * sample sets. So that the DIN level will be stable when the | |
789 | * data gets sampled at the rising CCLK edge, and the signals' | |
790 | * setup time constraint will be met. | |
791 | * | |
792 | * The caller will put the FPGA into download mode, will send | |
793 | * the bitbang samples, and release the allocated memory. | |
794 | */ | |
795 | bb_size = file_size * 8 * 2; | |
796 | bb_stream = g_try_malloc(bb_size); | |
797 | if (!bb_stream) { | |
798 | sr_err("Memory allocation failed during firmware upload."); | |
799 | g_free(firmware); | |
800 | return SR_ERR_MALLOC; | |
801 | } | |
802 | bbs = bb_stream; | |
803 | p = firmware; | |
804 | l = file_size; | |
805 | while (l--) { | |
806 | byte = *p++; | |
807 | mask = 0x80; | |
808 | while (mask) { | |
809 | v = (byte & mask) ? BB_PIN_DIN : 0; | |
810 | mask >>= 1; | |
811 | *bbs++ = v | BB_PIN_CCLK; | |
812 | *bbs++ = v; | |
813 | } | |
814 | } | |
815 | g_free(firmware); | |
816 | ||
817 | /* The transformation completed successfully, return the result. */ | |
818 | *bb_cmd = bb_stream; | |
819 | *bb_cmd_size = bb_size; | |
820 | ||
821 | return SR_OK; | |
822 | } | |
823 | ||
824 | static int upload_firmware(struct sr_context *ctx, struct dev_context *devc, | |
825 | enum sigma_firmware_idx firmware_idx) | |
826 | { | |
827 | int ret; | |
828 | uint8_t *buf; | |
829 | uint8_t pins; | |
830 | size_t buf_size; | |
831 | const char *firmware; | |
832 | ||
833 | /* Check for valid firmware file selection. */ | |
834 | if (firmware_idx >= ARRAY_SIZE(firmware_files)) | |
835 | return SR_ERR_ARG; | |
836 | firmware = firmware_files[firmware_idx]; | |
837 | if (!firmware || !*firmware) | |
838 | return SR_ERR_ARG; | |
839 | ||
840 | /* Avoid downloading the same firmware multiple times. */ | |
841 | if (devc->firmware_idx == firmware_idx) { | |
842 | sr_info("Not uploading firmware file '%s' again.", firmware); | |
843 | return SR_OK; | |
844 | } | |
845 | ||
846 | devc->state = SIGMA_CONFIG; | |
847 | ||
848 | /* Set the cable to bitbang mode. */ | |
849 | ret = ftdi_set_bitmode(&devc->ftdi.ctx, BB_PINMASK, BITMODE_BITBANG); | |
850 | if (ret < 0) { | |
851 | sr_err("Could not setup cable mode for upload: %s", | |
852 | ftdi_get_error_string(&devc->ftdi.ctx)); | |
853 | return SR_ERR; | |
854 | } | |
855 | ret = ftdi_set_baudrate(&devc->ftdi.ctx, BB_BITRATE); | |
856 | if (ret < 0) { | |
857 | sr_err("Could not setup bitrate for upload: %s", | |
858 | ftdi_get_error_string(&devc->ftdi.ctx)); | |
859 | return SR_ERR; | |
860 | } | |
861 | ||
862 | /* Initiate FPGA configuration mode. */ | |
863 | ret = sigma_fpga_init_bitbang(devc); | |
864 | if (ret) { | |
865 | sr_err("Could not initiate firmware upload to hardware"); | |
866 | return ret; | |
867 | } | |
868 | ||
869 | /* Prepare wire format of the firmware image. */ | |
870 | ret = sigma_fw_2_bitbang(ctx, firmware, &buf, &buf_size); | |
871 | if (ret != SR_OK) { | |
872 | sr_err("Could not prepare file %s for upload.", firmware); | |
873 | return ret; | |
874 | } | |
875 | ||
876 | /* Write the FPGA netlist to the cable. */ | |
877 | sr_info("Uploading firmware file '%s'.", firmware); | |
878 | ret = sigma_write_sr(devc, buf, buf_size); | |
879 | g_free(buf); | |
880 | if (ret != SR_OK) { | |
881 | sr_err("Could not upload firmware file '%s'.", firmware); | |
882 | return ret; | |
883 | } | |
884 | ||
885 | /* Leave bitbang mode and discard pending input data. */ | |
886 | ret = ftdi_set_bitmode(&devc->ftdi.ctx, 0, BITMODE_RESET); | |
887 | if (ret < 0) { | |
888 | sr_err("Could not setup cable mode after upload: %s", | |
889 | ftdi_get_error_string(&devc->ftdi.ctx)); | |
890 | return SR_ERR; | |
891 | } | |
892 | PURGE_FTDI_BOTH(&devc->ftdi.ctx); | |
893 | while (sigma_read_raw(devc, &pins, sizeof(pins)) > 0) | |
894 | ; | |
895 | ||
896 | /* Initialize the FPGA for logic-analyzer mode. */ | |
897 | ret = sigma_fpga_init_la(devc); | |
898 | if (ret != SR_OK) { | |
899 | sr_err("Hardware response after firmware upload failed."); | |
900 | return ret; | |
901 | } | |
902 | ||
903 | /* Keep track of successful firmware download completion. */ | |
904 | devc->state = SIGMA_IDLE; | |
905 | devc->firmware_idx = firmware_idx; | |
906 | sr_info("Firmware uploaded."); | |
907 | ||
908 | return SR_OK; | |
909 | } | |
910 | ||
911 | /* | |
912 | * The driver supports user specified time or sample count limits. The | |
913 | * device's hardware supports neither, and hardware compression prevents | |
914 | * reliable detection of "fill levels" (currently reached sample counts) | |
915 | * from register values during acquisition. That's why the driver needs | |
916 | * to apply some heuristics: | |
917 | * | |
918 | * - The (optional) sample count limit and the (normalized) samplerate | |
919 | * get mapped to an estimated duration for these samples' acquisition. | |
920 | * - The (optional) time limit gets checked as well. The lesser of the | |
921 | * two limits will terminate the data acquisition phase. The exact | |
922 | * sample count limit gets enforced in session feed submission paths. | |
923 | * - Some slack needs to be given to account for hardware pipelines as | |
924 | * well as late storage of last chunks after compression thresholds | |
925 | * are tripped. The resulting data set will span at least the caller | |
926 | * specified period of time, which shall be perfectly acceptable. | |
927 | * | |
928 | * With RLE compression active, up to 64K sample periods can pass before | |
929 | * a cluster accumulates. Which translates to 327ms at 200kHz. Add two | |
930 | * times that period for good measure, one is not enough to flush the | |
931 | * hardware pipeline (observation from an earlier experiment). | |
932 | */ | |
933 | SR_PRIV int sigma_set_acquire_timeout(struct dev_context *devc) | |
934 | { | |
935 | int ret; | |
936 | GVariant *data; | |
937 | uint64_t user_count, user_msecs; | |
938 | uint64_t worst_cluster_time_ms; | |
939 | uint64_t count_msecs, acquire_msecs; | |
940 | ||
941 | sr_sw_limits_init(&devc->limit.acquire); | |
942 | devc->late_trigger_timeout = FALSE; | |
943 | ||
944 | /* Get sample count limit, convert to msecs. */ | |
945 | ret = sr_sw_limits_config_get(&devc->limit.config, | |
946 | SR_CONF_LIMIT_SAMPLES, &data); | |
947 | if (ret != SR_OK) | |
948 | return ret; | |
949 | user_count = g_variant_get_uint64(data); | |
950 | g_variant_unref(data); | |
951 | count_msecs = 0; | |
952 | if (devc->use_triggers) { | |
953 | user_count *= 100 - devc->capture_ratio; | |
954 | user_count /= 100; | |
955 | } | |
956 | if (user_count) | |
957 | count_msecs = 1000 * user_count / devc->clock.samplerate + 1; | |
958 | ||
959 | /* Get time limit, which is in msecs. */ | |
960 | ret = sr_sw_limits_config_get(&devc->limit.config, | |
961 | SR_CONF_LIMIT_MSEC, &data); | |
962 | if (ret != SR_OK) | |
963 | return ret; | |
964 | user_msecs = g_variant_get_uint64(data); | |
965 | g_variant_unref(data); | |
966 | if (devc->use_triggers) { | |
967 | user_msecs *= 100 - devc->capture_ratio; | |
968 | user_msecs /= 100; | |
969 | } | |
970 | ||
971 | /* Get the lesser of them, with both being optional. */ | |
972 | acquire_msecs = ~UINT64_C(0); | |
973 | if (user_count && count_msecs < acquire_msecs) | |
974 | acquire_msecs = count_msecs; | |
975 | if (user_msecs && user_msecs < acquire_msecs) | |
976 | acquire_msecs = user_msecs; | |
977 | if (acquire_msecs == ~UINT64_C(0)) | |
978 | return SR_OK; | |
979 | ||
980 | /* Add some slack, and use that timeout for acquisition. */ | |
981 | worst_cluster_time_ms = 1000 * 65536 / devc->clock.samplerate; | |
982 | acquire_msecs += 2 * worst_cluster_time_ms; | |
983 | data = g_variant_new_uint64(acquire_msecs); | |
984 | ret = sr_sw_limits_config_set(&devc->limit.acquire, | |
985 | SR_CONF_LIMIT_MSEC, data); | |
986 | g_variant_unref(data); | |
987 | if (ret != SR_OK) | |
988 | return ret; | |
989 | ||
990 | /* Deferred or immediate (trigger-less) timeout period start. */ | |
991 | if (devc->use_triggers) | |
992 | devc->late_trigger_timeout = TRUE; | |
993 | else | |
994 | sr_sw_limits_acquisition_start(&devc->limit.acquire); | |
995 | ||
996 | return SR_OK; | |
997 | } | |
998 | ||
999 | /* | |
1000 | * Check whether a caller specified samplerate matches the device's | |
1001 | * hardware constraints (can be used for acquisition). Optionally yield | |
1002 | * a value that approximates the original spec. | |
1003 | * | |
1004 | * This routine assumes that input specs are in the 200kHz to 200MHz | |
1005 | * range of supported rates, and callers typically want to normalize a | |
1006 | * given value to the hardware capabilities. Values in the 50MHz range | |
1007 | * get rounded up by default, to avoid a more expensive check for the | |
1008 | * closest match, while higher sampling rate is always desirable during | |
1009 | * measurement. Input specs which exactly match hardware capabilities | |
1010 | * remain unaffected. Because 100/200MHz rates also limit the number of | |
1011 | * available channels, they are not suggested by this routine, instead | |
1012 | * callers need to pick them consciously. | |
1013 | */ | |
1014 | SR_PRIV int sigma_normalize_samplerate(uint64_t want_rate, uint64_t *have_rate) | |
1015 | { | |
1016 | uint64_t div, rate; | |
1017 | ||
1018 | /* Accept exact matches for 100/200MHz. */ | |
1019 | if (want_rate == SR_MHZ(200) || want_rate == SR_MHZ(100)) { | |
1020 | if (have_rate) | |
1021 | *have_rate = want_rate; | |
1022 | return SR_OK; | |
1023 | } | |
1024 | ||
1025 | /* Accept 200kHz to 50MHz range, and map to near value. */ | |
1026 | if (want_rate >= SR_KHZ(200) && want_rate <= SR_MHZ(50)) { | |
1027 | div = SR_MHZ(50) / want_rate; | |
1028 | rate = SR_MHZ(50) / div; | |
1029 | if (have_rate) | |
1030 | *have_rate = rate; | |
1031 | return SR_OK; | |
1032 | } | |
1033 | ||
1034 | return SR_ERR_ARG; | |
1035 | } | |
1036 | ||
1037 | /* Gets called at probe time. Can seed software settings from hardware state. */ | |
1038 | SR_PRIV int sigma_fetch_hw_config(const struct sr_dev_inst *sdi) | |
1039 | { | |
1040 | struct dev_context *devc; | |
1041 | int ret; | |
1042 | uint8_t regaddr, regval; | |
1043 | ||
1044 | devc = sdi->priv; | |
1045 | if (!devc) | |
1046 | return SR_ERR_ARG; | |
1047 | ||
1048 | /* Seed configuration values from defaults. */ | |
1049 | devc->firmware_idx = SIGMA_FW_NONE; | |
1050 | devc->clock.samplerate = samplerates[0]; | |
1051 | ||
1052 | /* TODO | |
1053 | * Ideally the device driver could retrieve recently stored | |
1054 | * details from hardware registers, thus re-use user specified | |
1055 | * configuration values across sigrok sessions. Which could | |
1056 | * avoid repeated expensive though unnecessary firmware uploads, | |
1057 | * improve performance and usability. Unfortunately it appears | |
1058 | * that the registers range which is documented as available for | |
1059 | * application use keeps providing 0xff data content. At least | |
1060 | * with the netlist version which ships with sigrok. The same | |
1061 | * was observed with unused registers in the first page. | |
1062 | */ | |
1063 | return SR_ERR_NA; | |
1064 | ||
1065 | /* This is for research, currently does not work yet. */ | |
1066 | ret = sigma_check_open(sdi); | |
1067 | regaddr = 16; | |
1068 | regaddr = 14; | |
1069 | ret = sigma_set_register(devc, regaddr, 'F'); | |
1070 | ret = sigma_get_register(devc, regaddr, ®val); | |
1071 | sr_warn("%s() reg[%u] val[%u] rc[%d]", __func__, regaddr, regval, ret); | |
1072 | ret = sigma_check_close(devc); | |
1073 | return ret; | |
1074 | } | |
1075 | ||
1076 | /* Gets called after successful (volatile) hardware configuration. */ | |
1077 | SR_PRIV int sigma_store_hw_config(const struct sr_dev_inst *sdi) | |
1078 | { | |
1079 | /* TODO See above, registers seem to not hold written data. */ | |
1080 | (void)sdi; | |
1081 | return SR_ERR_NA; | |
1082 | } | |
1083 | ||
1084 | SR_PRIV int sigma_set_samplerate(const struct sr_dev_inst *sdi) | |
1085 | { | |
1086 | struct dev_context *devc; | |
1087 | struct drv_context *drvc; | |
1088 | uint64_t samplerate; | |
1089 | int ret; | |
1090 | size_t num_channels; | |
1091 | ||
1092 | devc = sdi->priv; | |
1093 | drvc = sdi->driver->context; | |
1094 | ||
1095 | /* Accept any caller specified rate which the hardware supports. */ | |
1096 | ret = sigma_normalize_samplerate(devc->clock.samplerate, &samplerate); | |
1097 | if (ret != SR_OK) | |
1098 | return ret; | |
1099 | ||
1100 | /* | |
1101 | * Depending on the samplerates of 200/100/50- MHz, specific | |
1102 | * firmware is required and higher rates might limit the set | |
1103 | * of available channels. | |
1104 | */ | |
1105 | num_channels = devc->interp.num_channels; | |
1106 | if (samplerate <= SR_MHZ(50)) { | |
1107 | ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_50MHZ); | |
1108 | num_channels = 16; | |
1109 | } else if (samplerate == SR_MHZ(100)) { | |
1110 | ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_100MHZ); | |
1111 | num_channels = 8; | |
1112 | } else if (samplerate == SR_MHZ(200)) { | |
1113 | ret = upload_firmware(drvc->sr_ctx, devc, SIGMA_FW_200MHZ); | |
1114 | num_channels = 4; | |
1115 | } | |
1116 | ||
1117 | /* | |
1118 | * The samplerate affects the number of available logic channels | |
1119 | * as well as a sample memory layout detail (the number of samples | |
1120 | * which the device will communicate within an "event"). | |
1121 | */ | |
1122 | if (ret == SR_OK) { | |
1123 | devc->interp.num_channels = num_channels; | |
1124 | devc->interp.samples_per_event = 16 / devc->interp.num_channels; | |
1125 | } | |
1126 | ||
1127 | /* | |
1128 | * Store the firmware type and most recently configured samplerate | |
1129 | * in hardware, such that subsequent sessions can start from there. | |
1130 | * This is a "best effort" approach. Failure is non-fatal. | |
1131 | */ | |
1132 | if (ret == SR_OK) | |
1133 | (void)sigma_store_hw_config(sdi); | |
1134 | ||
1135 | return ret; | |
1136 | } | |
1137 | ||
1138 | /* | |
1139 | * Arrange for a session feed submit buffer. A queue where a number of | |
1140 | * samples gets accumulated to reduce the number of send calls. Which | |
1141 | * also enforces an optional sample count limit for data acquisition. | |
1142 | * | |
1143 | * The buffer holds up to CHUNK_SIZE bytes. The unit size is fixed (the | |
1144 | * driver provides a fixed channel layout regardless of samplerate). | |
1145 | */ | |
1146 | ||
1147 | #define CHUNK_SIZE (4 * 1024 * 1024) | |
1148 | ||
1149 | struct submit_buffer { | |
1150 | size_t unit_size; | |
1151 | size_t max_samples, curr_samples; | |
1152 | uint8_t *sample_data; | |
1153 | uint8_t *write_pointer; | |
1154 | struct sr_dev_inst *sdi; | |
1155 | struct sr_datafeed_packet packet; | |
1156 | struct sr_datafeed_logic logic; | |
1157 | }; | |
1158 | ||
1159 | static int alloc_submit_buffer(struct sr_dev_inst *sdi) | |
1160 | { | |
1161 | struct dev_context *devc; | |
1162 | struct submit_buffer *buffer; | |
1163 | size_t size; | |
1164 | ||
1165 | devc = sdi->priv; | |
1166 | ||
1167 | buffer = g_malloc0(sizeof(*buffer)); | |
1168 | devc->buffer = buffer; | |
1169 | ||
1170 | buffer->unit_size = sizeof(uint16_t); | |
1171 | size = CHUNK_SIZE; | |
1172 | size /= buffer->unit_size; | |
1173 | buffer->max_samples = size; | |
1174 | size *= buffer->unit_size; | |
1175 | buffer->sample_data = g_try_malloc0(size); | |
1176 | if (!buffer->sample_data) | |
1177 | return SR_ERR_MALLOC; | |
1178 | buffer->write_pointer = buffer->sample_data; | |
1179 | sr_sw_limits_init(&devc->limit.submit); | |
1180 | ||
1181 | buffer->sdi = sdi; | |
1182 | memset(&buffer->logic, 0, sizeof(buffer->logic)); | |
1183 | buffer->logic.unitsize = buffer->unit_size; | |
1184 | buffer->logic.data = buffer->sample_data; | |
1185 | memset(&buffer->packet, 0, sizeof(buffer->packet)); | |
1186 | buffer->packet.type = SR_DF_LOGIC; | |
1187 | buffer->packet.payload = &buffer->logic; | |
1188 | ||
1189 | return SR_OK; | |
1190 | } | |
1191 | ||
1192 | static int setup_submit_limit(struct dev_context *devc) | |
1193 | { | |
1194 | struct sr_sw_limits *limits; | |
1195 | int ret; | |
1196 | GVariant *data; | |
1197 | uint64_t total; | |
1198 | ||
1199 | limits = &devc->limit.submit; | |
1200 | ||
1201 | ret = sr_sw_limits_config_get(&devc->limit.config, | |
1202 | SR_CONF_LIMIT_SAMPLES, &data); | |
1203 | if (ret != SR_OK) | |
1204 | return ret; | |
1205 | total = g_variant_get_uint64(data); | |
1206 | g_variant_unref(data); | |
1207 | ||
1208 | sr_sw_limits_init(limits); | |
1209 | if (total) { | |
1210 | data = g_variant_new_uint64(total); | |
1211 | ret = sr_sw_limits_config_set(limits, | |
1212 | SR_CONF_LIMIT_SAMPLES, data); | |
1213 | g_variant_unref(data); | |
1214 | if (ret != SR_OK) | |
1215 | return ret; | |
1216 | } | |
1217 | ||
1218 | sr_sw_limits_acquisition_start(limits); | |
1219 | ||
1220 | return SR_OK; | |
1221 | } | |
1222 | ||
1223 | static void free_submit_buffer(struct dev_context *devc) | |
1224 | { | |
1225 | struct submit_buffer *buffer; | |
1226 | ||
1227 | if (!devc) | |
1228 | return; | |
1229 | ||
1230 | buffer = devc->buffer; | |
1231 | if (!buffer) | |
1232 | return; | |
1233 | devc->buffer = NULL; | |
1234 | ||
1235 | g_free(buffer->sample_data); | |
1236 | g_free(buffer); | |
1237 | } | |
1238 | ||
1239 | static int flush_submit_buffer(struct dev_context *devc) | |
1240 | { | |
1241 | struct submit_buffer *buffer; | |
1242 | int ret; | |
1243 | ||
1244 | buffer = devc->buffer; | |
1245 | ||
1246 | /* Is queued sample data available? */ | |
1247 | if (!buffer->curr_samples) | |
1248 | return SR_OK; | |
1249 | ||
1250 | /* Submit to the session feed. */ | |
1251 | buffer->logic.length = buffer->curr_samples * buffer->unit_size; | |
1252 | ret = sr_session_send(buffer->sdi, &buffer->packet); | |
1253 | if (ret != SR_OK) | |
1254 | return ret; | |
1255 | ||
1256 | /* Rewind queue position. */ | |
1257 | buffer->curr_samples = 0; | |
1258 | buffer->write_pointer = buffer->sample_data; | |
1259 | ||
1260 | return SR_OK; | |
1261 | } | |
1262 | ||
1263 | static int addto_submit_buffer(struct dev_context *devc, | |
1264 | uint16_t sample, size_t count) | |
1265 | { | |
1266 | struct submit_buffer *buffer; | |
1267 | struct sr_sw_limits *limits; | |
1268 | int ret; | |
1269 | ||
1270 | buffer = devc->buffer; | |
1271 | limits = &devc->limit.submit; | |
1272 | if (!devc->use_triggers && sr_sw_limits_check(limits)) | |
1273 | count = 0; | |
1274 | ||
1275 | /* | |
1276 | * Individually accumulate and check each sample, such that | |
1277 | * accumulation between flushes won't exceed local storage, and | |
1278 | * enforcement of user specified limits is exact. | |
1279 | */ | |
1280 | while (count--) { | |
1281 | write_u16le_inc(&buffer->write_pointer, sample); | |
1282 | buffer->curr_samples++; | |
1283 | if (buffer->curr_samples == buffer->max_samples) { | |
1284 | ret = flush_submit_buffer(devc); | |
1285 | if (ret != SR_OK) | |
1286 | return ret; | |
1287 | } | |
1288 | sr_sw_limits_update_samples_read(limits, 1); | |
1289 | if (!devc->use_triggers && sr_sw_limits_check(limits)) | |
1290 | break; | |
1291 | } | |
1292 | ||
1293 | return SR_OK; | |
1294 | } | |
1295 | ||
1296 | static void sigma_location_break_down(struct sigma_location *loc) | |
1297 | { | |
1298 | ||
1299 | loc->line = loc->raw / ROW_LENGTH_U16; | |
1300 | loc->line += ROW_COUNT; | |
1301 | loc->line %= ROW_COUNT; | |
1302 | loc->cluster = loc->raw % ROW_LENGTH_U16; | |
1303 | loc->event = loc->cluster % EVENTS_PER_CLUSTER; | |
1304 | loc->cluster = loc->cluster / EVENTS_PER_CLUSTER; | |
1305 | } | |
1306 | ||
1307 | static gboolean sigma_location_is_eq(struct sigma_location *loc1, | |
1308 | struct sigma_location *loc2, gboolean with_event) | |
1309 | { | |
1310 | ||
1311 | if (!loc1 || !loc2) | |
1312 | return FALSE; | |
1313 | ||
1314 | if (loc1->line != loc2->line) | |
1315 | return FALSE; | |
1316 | if (loc1->cluster != loc2->cluster) | |
1317 | return FALSE; | |
1318 | ||
1319 | if (with_event && loc1->event != loc2->event) | |
1320 | return FALSE; | |
1321 | ||
1322 | return TRUE; | |
1323 | } | |
1324 | ||
1325 | /* Decrement the broken-down location fields (leave 'raw' as is). */ | |
1326 | static void sigma_location_decrement(struct sigma_location *loc, | |
1327 | gboolean with_event) | |
1328 | { | |
1329 | ||
1330 | if (!loc) | |
1331 | return; | |
1332 | ||
1333 | if (with_event) { | |
1334 | if (loc->event--) | |
1335 | return; | |
1336 | loc->event = EVENTS_PER_CLUSTER - 1; | |
1337 | } | |
1338 | ||
1339 | if (loc->cluster--) | |
1340 | return; | |
1341 | loc->cluster = CLUSTERS_PER_ROW - 1; | |
1342 | ||
1343 | if (loc->line--) | |
1344 | return; | |
1345 | loc->line = ROW_COUNT - 1; | |
1346 | } | |
1347 | ||
1348 | static void sigma_location_increment(struct sigma_location *loc) | |
1349 | { | |
1350 | ||
1351 | if (!loc) | |
1352 | return; | |
1353 | ||
1354 | if (++loc->event < EVENTS_PER_CLUSTER) | |
1355 | return; | |
1356 | loc->event = 0; | |
1357 | if (++loc->cluster < CLUSTERS_PER_ROW) | |
1358 | return; | |
1359 | loc->cluster = 0; | |
1360 | if (++loc->line < ROW_COUNT) | |
1361 | return; | |
1362 | loc->line = 0; | |
1363 | } | |
1364 | ||
1365 | /* | |
1366 | * Determine the position where to open the period of trigger match | |
1367 | * checks. Setup an "impossible" location when triggers are not used. | |
1368 | * Start from the hardware provided 'trig' position otherwise, and | |
1369 | * go back a few clusters, but don't go before the 'start' position. | |
1370 | */ | |
1371 | static void rewind_trig_arm_pos(struct dev_context *devc, size_t count) | |
1372 | { | |
1373 | struct sigma_sample_interp *interp; | |
1374 | ||
1375 | if (!devc) | |
1376 | return; | |
1377 | interp = &devc->interp; | |
1378 | ||
1379 | if (!devc->use_triggers) { | |
1380 | interp->trig_arm.raw = ~0; | |
1381 | sigma_location_break_down(&interp->trig_arm); | |
1382 | return; | |
1383 | } | |
1384 | ||
1385 | interp->trig_arm = interp->trig; | |
1386 | while (count--) { | |
1387 | if (sigma_location_is_eq(&interp->trig_arm, &interp->start, TRUE)) | |
1388 | break; | |
1389 | sigma_location_decrement(&interp->trig_arm, TRUE); | |
1390 | } | |
1391 | } | |
1392 | ||
1393 | static int alloc_sample_buffer(struct dev_context *devc, | |
1394 | size_t stop_pos, size_t trig_pos, uint8_t mode) | |
1395 | { | |
1396 | struct sigma_sample_interp *interp; | |
1397 | gboolean wrapped; | |
1398 | size_t alloc_size; | |
1399 | ||
1400 | interp = &devc->interp; | |
1401 | ||
1402 | /* | |
1403 | * Either fetch sample memory from absolute start of DRAM to the | |
1404 | * current write position. Or from after the current write position | |
1405 | * to before the current write position, if the write pointer has | |
1406 | * wrapped around at the upper DRAM boundary. Assume that the line | |
1407 | * which most recently got written to is of unknown state, ignore | |
1408 | * its content in the "wrapped" case. | |
1409 | */ | |
1410 | wrapped = mode & RMR_ROUND; | |
1411 | interp->start.raw = 0; | |
1412 | interp->stop.raw = stop_pos; | |
1413 | if (wrapped) { | |
1414 | interp->start.raw = stop_pos; | |
1415 | interp->start.raw >>= ROW_SHIFT; | |
1416 | interp->start.raw++; | |
1417 | interp->start.raw <<= ROW_SHIFT; | |
1418 | interp->stop.raw = stop_pos; | |
1419 | interp->stop.raw >>= ROW_SHIFT; | |
1420 | interp->stop.raw--; | |
1421 | interp->stop.raw <<= ROW_SHIFT; | |
1422 | } | |
1423 | interp->trig.raw = trig_pos; | |
1424 | interp->iter.raw = 0; | |
1425 | ||
1426 | /* Break down raw values to line, cluster, event fields. */ | |
1427 | sigma_location_break_down(&interp->start); | |
1428 | sigma_location_break_down(&interp->stop); | |
1429 | sigma_location_break_down(&interp->trig); | |
1430 | sigma_location_break_down(&interp->iter); | |
1431 | ||
1432 | /* | |
1433 | * The hardware provided trigger location "is late" because of | |
1434 | * latency in hardware pipelines. It points to after the trigger | |
1435 | * condition match. Arrange for a software check of sample data | |
1436 | * matches starting just a little before the hardware provided | |
1437 | * location. The "4 clusters" distance is an arbitrary choice. | |
1438 | */ | |
1439 | rewind_trig_arm_pos(devc, 4 * EVENTS_PER_CLUSTER); | |
1440 | memset(&interp->trig_chk, 0, sizeof(interp->trig_chk)); | |
1441 | ||
1442 | /* Determine which DRAM lines to fetch from the device. */ | |
1443 | memset(&interp->fetch, 0, sizeof(interp->fetch)); | |
1444 | interp->fetch.lines_total = interp->stop.line + 1; | |
1445 | interp->fetch.lines_total -= interp->start.line; | |
1446 | interp->fetch.lines_total += ROW_COUNT; | |
1447 | interp->fetch.lines_total %= ROW_COUNT; | |
1448 | interp->fetch.lines_done = 0; | |
1449 | ||
1450 | /* Arrange for chunked download, N lines per USB request. */ | |
1451 | interp->fetch.lines_per_read = 32; | |
1452 | alloc_size = sizeof(devc->interp.fetch.rcvd_lines[0]); | |
1453 | alloc_size *= devc->interp.fetch.lines_per_read; | |
1454 | devc->interp.fetch.rcvd_lines = g_try_malloc0(alloc_size); | |
1455 | if (!devc->interp.fetch.rcvd_lines) | |
1456 | return SR_ERR_MALLOC; | |
1457 | ||
1458 | return SR_OK; | |
1459 | } | |
1460 | ||
1461 | static uint16_t sigma_deinterlace_data_4x4(uint16_t indata, int idx); | |
1462 | static uint16_t sigma_deinterlace_data_2x8(uint16_t indata, int idx); | |
1463 | ||
1464 | static int fetch_sample_buffer(struct dev_context *devc) | |
1465 | { | |
1466 | struct sigma_sample_interp *interp; | |
1467 | size_t count; | |
1468 | int ret; | |
1469 | const uint8_t *rdptr; | |
1470 | uint16_t ts, data; | |
1471 | ||
1472 | interp = &devc->interp; | |
1473 | ||
1474 | /* First invocation? Seed the iteration position. */ | |
1475 | if (!interp->fetch.lines_done) { | |
1476 | interp->iter = interp->start; | |
1477 | } | |
1478 | ||
1479 | /* Get another set of DRAM lines in one read call. */ | |
1480 | count = interp->fetch.lines_total - interp->fetch.lines_done; | |
1481 | if (count > interp->fetch.lines_per_read) | |
1482 | count = interp->fetch.lines_per_read; | |
1483 | ret = sigma_read_dram(devc, interp->iter.line, count, | |
1484 | (uint8_t *)interp->fetch.rcvd_lines); | |
1485 | if (ret != SR_OK) | |
1486 | return ret; | |
1487 | interp->fetch.lines_rcvd = count; | |
1488 | interp->fetch.curr_line = &interp->fetch.rcvd_lines[0]; | |
1489 | ||
1490 | /* First invocation? Get initial timestamp and sample data. */ | |
1491 | if (!interp->fetch.lines_done) { | |
1492 | rdptr = (void *)interp->fetch.curr_line; | |
1493 | ts = read_u16le_inc(&rdptr); | |
1494 | data = read_u16le_inc(&rdptr); | |
1495 | if (interp->samples_per_event == 4) { | |
1496 | data = sigma_deinterlace_data_4x4(data, 0); | |
1497 | } else if (interp->samples_per_event == 2) { | |
1498 | data = sigma_deinterlace_data_2x8(data, 0); | |
1499 | } | |
1500 | interp->last.ts = ts; | |
1501 | interp->last.sample = data; | |
1502 | } | |
1503 | ||
1504 | return SR_OK; | |
1505 | } | |
1506 | ||
1507 | static void free_sample_buffer(struct dev_context *devc) | |
1508 | { | |
1509 | g_free(devc->interp.fetch.rcvd_lines); | |
1510 | devc->interp.fetch.rcvd_lines = NULL; | |
1511 | devc->interp.fetch.lines_per_read = 0; | |
1512 | } | |
1513 | ||
1514 | /* | |
1515 | * Parse application provided trigger conditions to the driver's internal | |
1516 | * presentation. Yields a mask of pins of interest, and their expected | |
1517 | * pin levels or edges. | |
1518 | * | |
1519 | * In 100 and 200 MHz mode, only a single pin's rising/falling edge can be | |
1520 | * set as trigger. In 50- MHz modes, two rising/falling edges can be set, | |
1521 | * in addition to value/mask specs for any number of channels. | |
1522 | * | |
1523 | * Hardware implementation detail: When more than one edge is specified, | |
1524 | * then the condition is only considered a match when _all_ transitions | |
1525 | * are seen in the same 20ns check interval, regardless of the user's | |
1526 | * perceived samplerate which can be a fraction of 50MHz. Which reduces | |
1527 | * practical use to edges on a single pin in addition to data patterns. | |
1528 | * Which still covers a lot of users' typical scenarios. Not an issue, | |
1529 | * just something to remain aware of. | |
1530 | * | |
1531 | * The Sigma hardware also supports complex triggers which involve the | |
1532 | * logical combination of several patterns, pulse durations, counts of | |
1533 | * condition matches, A-then-B sequences, etc. But this has not been | |
1534 | * implemented yet here, and applications may lack means to express | |
1535 | * these conditions (present the complex conditions to users for entry | |
1536 | * and review, pass application specs to drivers covering the versatile | |
1537 | * combinations). | |
1538 | * | |
1539 | * Implementor's note: This routine currently exclusively accepts input | |
1540 | * in the form of sr_trigger stages, which results from "01rf-" choices | |
1541 | * on a multitude of individual GUI traces, or the CLI's --trigger spec | |
1542 | * which takes one list of <pin>=<value/edge> details. | |
1543 | * | |
1544 | * TODO Consider the addition of SR_CONF_TRIGGER_PATTERN support, which | |
1545 | * accepts a single free form string argument, and could describe a | |
1546 | * multi-bit pattern without the tedious trace name/index selection. | |
1547 | * Fortunately the number of channels is fixed for this device, we need | |
1548 | * not come up with variable length support and counts beyond 64. _When_ | |
1549 | * --trigger as well as SR_CONF_TRIGGER_PATTERN are supported, then the | |
1550 | * implementation needs to come up with priorities for these sources of | |
1551 | * input specs, or enforce exclusive use of either form (at one time, | |
1552 | * per acquisition / invocation). | |
1553 | * | |
1554 | * Text forms that may be worth supporting: | |
1555 | * - Simple forms, mere numbers, optional base specs. These are easiest | |
1556 | * to implement with existing common conversion helpers. | |
1557 | * triggerpattern=<value>[/<mask>] | |
1558 | * triggerpattern=255 | |
1559 | * triggerpattern=45054 | |
1560 | * triggerpattern=0xaffe | |
1561 | * triggerpattern=0xa0f0/0xf0f0 | |
1562 | * triggerpattern=0b1010111111111110/0x7ffe | |
1563 | * - Alternative bit pattern form, including wildcards in a single value. | |
1564 | * This cannot use common conversion support, needs special handling. | |
1565 | * triggerpattern=0b1010xxxx1111xxx0 | |
1566 | * This is most similar to SR_CONF_TRIGGER_PATTERN as hameg-hmo uses | |
1567 | * it. Passes the app's spec via SCPI to the device. See section 2.3.5 | |
1568 | * "Pattern trigger" and :TRIG:A:PATT:SOUR in the Hameg document. | |
1569 | * - Prefixed form to tell the above variants apart, and support both of | |
1570 | * them at the same time. Additional optional separator for long digit | |
1571 | * runs, and edge support in the form which lists individual bits (not | |
1572 | * useful for dec/hex formats). | |
1573 | * triggerpattern=value=45054 | |
1574 | * triggerpattern=value=0b1010111111111110 | |
1575 | * triggerpattern=value=0xa0f0,mask=0xf0f0 | |
1576 | * triggerpattern=bits=1010-xxxx-1111-xxxx | |
1577 | * triggerpattern=bits=0010-r100 | |
1578 | * | |
1579 | * TODO Check this set of processing rules for completeness/correctness. | |
1580 | * - Do implement the prefixed format which covers most use cases, _and_ | |
1581 | * should be usable from CLI and GUI environments. | |
1582 | * - Default to 'bits=' prefix if none was found (and only accept one | |
1583 | * single key/value pair in that case with the default key). | |
1584 | * - Accept dash and space separators in the 'bits=' value. Stick with | |
1585 | * mere unseparated values for value and mask, use common conversion. | |
1586 | * This results in transparent dec/bin/oct/hex support. Underscores? | |
1587 | * - Accept 0/1 binary digits in 'bits=', as well as r/f/e edge specs. | |
1588 | * - Only use --trigger (struct sr_trigger) when SR_CONF_TRIGGER_PATTERN | |
1589 | * is absent? Or always accept --trigger in addition to the data pattern | |
1590 | * spec? Then only accept edge specs from --trigger, since data pattern | |
1591 | * was most importantly motivated by address/data bus inspection? | |
1592 | * - TODO Consider edge=<pin><slope> as an optional additional spec in | |
1593 | * the value= and mask= group? Does that help make exclusive support | |
1594 | * for either --trigger or -c triggerpattern acceptable? | |
1595 | * triggerpattern=value=0xa0f0,mask=0xb0f0,edge=15r | |
1596 | * triggerpattern=bits=1r10-xxxx-1111-xxxx | |
1597 | * triggerpattern=1r10-xxxx-1111-xxxx | |
1598 | * - *Any* input spec regardless of format and origin must end up in the | |
1599 | * 'struct sigma_trigger' internal presentation used by this driver. | |
1600 | * It's desirable to have sigma_convert_trigger() do all the parsing, | |
1601 | * and constraint checking in a central location. | |
1602 | */ | |
1603 | SR_PRIV int sigma_convert_trigger(const struct sr_dev_inst *sdi) | |
1604 | { | |
1605 | struct dev_context *devc; | |
1606 | struct sr_trigger *trigger; | |
1607 | struct sr_trigger_stage *stage; | |
1608 | struct sr_trigger_match *match; | |
1609 | const GSList *l, *m; | |
1610 | uint16_t channelbit; | |
1611 | size_t edge_count; | |
1612 | ||
1613 | devc = sdi->priv; | |
1614 | memset(&devc->trigger, 0, sizeof(devc->trigger)); | |
1615 | devc->use_triggers = FALSE; | |
1616 | ||
1617 | /* TODO Consider additional SR_CONF_TRIGGER_PATTERN support. */ | |
1618 | trigger = sr_session_trigger_get(sdi->session); | |
1619 | if (!trigger) | |
1620 | return SR_OK; | |
1621 | ||
1622 | edge_count = 0; | |
1623 | for (l = trigger->stages; l; l = l->next) { | |
1624 | stage = l->data; | |
1625 | for (m = stage->matches; m; m = m->next) { | |
1626 | match = m->data; | |
1627 | /* Ignore disabled channels with a trigger. */ | |
1628 | if (!match->channel->enabled) | |
1629 | continue; | |
1630 | channelbit = BIT(match->channel->index); | |
1631 | if (devc->clock.samplerate >= SR_MHZ(100)) { | |
1632 | /* Fast trigger support. */ | |
1633 | if (edge_count > 0) { | |
1634 | sr_err("100/200MHz modes limited to single trigger pin."); | |
1635 | return SR_ERR; | |
1636 | } | |
1637 | if (match->match == SR_TRIGGER_FALLING) { | |
1638 | devc->trigger.fallingmask |= channelbit; | |
1639 | } else if (match->match == SR_TRIGGER_RISING) { | |
1640 | devc->trigger.risingmask |= channelbit; | |
1641 | } else { | |
1642 | sr_err("100/200MHz modes limited to edge trigger."); | |
1643 | return SR_ERR; | |
1644 | } | |
1645 | ||
1646 | edge_count++; | |
1647 | } else { | |
1648 | /* Simple trigger support (event). */ | |
1649 | if (match->match == SR_TRIGGER_ONE) { | |
1650 | devc->trigger.simplevalue |= channelbit; | |
1651 | devc->trigger.simplemask |= channelbit; | |
1652 | } else if (match->match == SR_TRIGGER_ZERO) { | |
1653 | devc->trigger.simplevalue &= ~channelbit; | |
1654 | devc->trigger.simplemask |= channelbit; | |
1655 | } else if (match->match == SR_TRIGGER_FALLING) { | |
1656 | devc->trigger.fallingmask |= channelbit; | |
1657 | edge_count++; | |
1658 | } else if (match->match == SR_TRIGGER_RISING) { | |
1659 | devc->trigger.risingmask |= channelbit; | |
1660 | edge_count++; | |
1661 | } | |
1662 | ||
1663 | /* | |
1664 | * Actually, Sigma supports 2 rising/falling triggers, | |
1665 | * but they are ORed and the current trigger syntax | |
1666 | * does not permit ORed triggers. | |
1667 | */ | |
1668 | if (edge_count > 1) { | |
1669 | sr_err("Limited to 1 edge trigger."); | |
1670 | return SR_ERR; | |
1671 | } | |
1672 | } | |
1673 | } | |
1674 | } | |
1675 | ||
1676 | /* Keep track whether triggers are involved during acquisition. */ | |
1677 | devc->use_triggers = TRUE; | |
1678 | ||
1679 | return SR_OK; | |
1680 | } | |
1681 | ||
1682 | static gboolean sample_matches_trigger(struct dev_context *devc, uint16_t sample) | |
1683 | { | |
1684 | struct sigma_sample_interp *interp; | |
1685 | uint16_t last_sample; | |
1686 | struct sigma_trigger *t; | |
1687 | gboolean simple_match, rising_match, falling_match; | |
1688 | gboolean matched; | |
1689 | ||
1690 | /* | |
1691 | * This logic is about improving the precision of the hardware | |
1692 | * provided trigger match position. Software checks are only | |
1693 | * required for a short range of samples, and only when a user | |
1694 | * specified trigger condition was involved during acquisition. | |
1695 | */ | |
1696 | if (!devc) | |
1697 | return FALSE; | |
1698 | if (!devc->use_triggers) | |
1699 | return FALSE; | |
1700 | interp = &devc->interp; | |
1701 | if (!interp->trig_chk.armed) | |
1702 | return FALSE; | |
1703 | ||
1704 | /* | |
1705 | * Check if the current sample and its most recent transition | |
1706 | * match the initially provided trigger condition. The data | |
1707 | * must not fail either of the individual checks. Unused | |
1708 | * trigger features remain neutral in the summary expression. | |
1709 | */ | |
1710 | last_sample = interp->last.sample; | |
1711 | t = &devc->trigger; | |
1712 | simple_match = (sample & t->simplemask) == t->simplevalue; | |
1713 | rising_match = ((last_sample & t->risingmask) == 0) && | |
1714 | ((sample & t->risingmask) == t->risingmask); | |
1715 | falling_match = ((last_sample & t->fallingmask) == t->fallingmask) && | |
1716 | ((sample & t->fallingmask) == 0); | |
1717 | matched = simple_match && rising_match && falling_match; | |
1718 | ||
1719 | return matched; | |
1720 | } | |
1721 | ||
1722 | static int send_trigger_marker(struct dev_context *devc) | |
1723 | { | |
1724 | int ret; | |
1725 | ||
1726 | ret = flush_submit_buffer(devc); | |
1727 | if (ret != SR_OK) | |
1728 | return ret; | |
1729 | ret = std_session_send_df_trigger(devc->buffer->sdi); | |
1730 | if (ret != SR_OK) | |
1731 | return ret; | |
1732 | ||
1733 | return SR_OK; | |
1734 | } | |
1735 | ||
1736 | static int check_and_submit_sample(struct dev_context *devc, | |
1737 | uint16_t sample, size_t count) | |
1738 | { | |
1739 | gboolean triggered; | |
1740 | int ret; | |
1741 | ||
1742 | triggered = sample_matches_trigger(devc, sample); | |
1743 | if (triggered) { | |
1744 | send_trigger_marker(devc); | |
1745 | devc->interp.trig_chk.matched = TRUE; | |
1746 | } | |
1747 | ||
1748 | ret = addto_submit_buffer(devc, sample, count); | |
1749 | if (ret != SR_OK) | |
1750 | return ret; | |
1751 | ||
1752 | return SR_OK; | |
1753 | } | |
1754 | ||
1755 | static void sigma_location_check(struct dev_context *devc) | |
1756 | { | |
1757 | struct sigma_sample_interp *interp; | |
1758 | ||
1759 | if (!devc) | |
1760 | return; | |
1761 | interp = &devc->interp; | |
1762 | ||
1763 | /* | |
1764 | * Manage the period of trigger match checks in software. | |
1765 | * Start supervision somewhere before the hardware provided | |
1766 | * location. Stop supervision after an arbitrary amount of | |
1767 | * event slots, or when a match was found. | |
1768 | */ | |
1769 | if (interp->trig_chk.armed) { | |
1770 | interp->trig_chk.evt_remain--; | |
1771 | if (!interp->trig_chk.evt_remain || interp->trig_chk.matched) | |
1772 | interp->trig_chk.armed = FALSE; | |
1773 | } | |
1774 | if (!interp->trig_chk.armed && !interp->trig_chk.matched) { | |
1775 | if (sigma_location_is_eq(&interp->iter, &interp->trig_arm, TRUE)) { | |
1776 | interp->trig_chk.armed = TRUE; | |
1777 | interp->trig_chk.matched = FALSE; | |
1778 | interp->trig_chk.evt_remain = 8 * EVENTS_PER_CLUSTER; | |
1779 | } | |
1780 | } | |
1781 | ||
1782 | /* | |
1783 | * Force a trigger marker when the software check found no match | |
1784 | * yet while the hardware provided position was reached. This | |
1785 | * very probably is a user initiated button press. | |
1786 | */ | |
1787 | if (interp->trig_chk.armed) { | |
1788 | if (sigma_location_is_eq(&interp->iter, &interp->trig, TRUE)) { | |
1789 | (void)send_trigger_marker(devc); | |
1790 | interp->trig_chk.matched = TRUE; | |
1791 | } | |
1792 | } | |
1793 | } | |
1794 | ||
1795 | /* | |
1796 | * Return the timestamp of "DRAM cluster". | |
1797 | */ | |
1798 | static uint16_t sigma_dram_cluster_ts(struct sigma_dram_cluster *cluster) | |
1799 | { | |
1800 | return read_u16le((const uint8_t *)&cluster->timestamp); | |
1801 | } | |
1802 | ||
1803 | /* | |
1804 | * Return one 16bit data entity of a DRAM cluster at the specified index. | |
1805 | */ | |
1806 | static uint16_t sigma_dram_cluster_data(struct sigma_dram_cluster *cl, int idx) | |
1807 | { | |
1808 | return read_u16le((const uint8_t *)&cl->samples[idx]); | |
1809 | } | |
1810 | ||
1811 | /* | |
1812 | * Deinterlace sample data that was retrieved at 100MHz samplerate. | |
1813 | * One 16bit item contains two samples of 8bits each. The bits of | |
1814 | * multiple samples are interleaved. | |
1815 | */ | |
1816 | static uint16_t sigma_deinterlace_data_2x8(uint16_t indata, int idx) | |
1817 | { | |
1818 | uint16_t outdata; | |
1819 | ||
1820 | indata >>= idx; | |
1821 | outdata = 0; | |
1822 | outdata |= (indata >> (0 * 2 - 0)) & (1 << 0); | |
1823 | outdata |= (indata >> (1 * 2 - 1)) & (1 << 1); | |
1824 | outdata |= (indata >> (2 * 2 - 2)) & (1 << 2); | |
1825 | outdata |= (indata >> (3 * 2 - 3)) & (1 << 3); | |
1826 | outdata |= (indata >> (4 * 2 - 4)) & (1 << 4); | |
1827 | outdata |= (indata >> (5 * 2 - 5)) & (1 << 5); | |
1828 | outdata |= (indata >> (6 * 2 - 6)) & (1 << 6); | |
1829 | outdata |= (indata >> (7 * 2 - 7)) & (1 << 7); | |
1830 | return outdata; | |
1831 | } | |
1832 | ||
1833 | /* | |
1834 | * Deinterlace sample data that was retrieved at 200MHz samplerate. | |
1835 | * One 16bit item contains four samples of 4bits each. The bits of | |
1836 | * multiple samples are interleaved. | |
1837 | */ | |
1838 | static uint16_t sigma_deinterlace_data_4x4(uint16_t indata, int idx) | |
1839 | { | |
1840 | uint16_t outdata; | |
1841 | ||
1842 | indata >>= idx; | |
1843 | outdata = 0; | |
1844 | outdata |= (indata >> (0 * 4 - 0)) & (1 << 0); | |
1845 | outdata |= (indata >> (1 * 4 - 1)) & (1 << 1); | |
1846 | outdata |= (indata >> (2 * 4 - 2)) & (1 << 2); | |
1847 | outdata |= (indata >> (3 * 4 - 3)) & (1 << 3); | |
1848 | return outdata; | |
1849 | } | |
1850 | ||
1851 | static void sigma_decode_dram_cluster(struct dev_context *devc, | |
1852 | struct sigma_dram_cluster *dram_cluster, | |
1853 | size_t events_in_cluster) | |
1854 | { | |
1855 | uint16_t tsdiff, ts, sample, item16; | |
1856 | size_t count; | |
1857 | size_t evt; | |
1858 | ||
1859 | /* | |
1860 | * If this cluster is not adjacent to the previously received | |
1861 | * cluster, then send the appropriate number of samples with the | |
1862 | * previous values to the sigrok session. This "decodes RLE". | |
1863 | * | |
1864 | * These samples cannot match the trigger since they just repeat | |
1865 | * the previously submitted data pattern. (This assumption holds | |
1866 | * for simple level and edge triggers. It would not for timed or | |
1867 | * counted conditions, which currently are not supported.) | |
1868 | */ | |
1869 | ts = sigma_dram_cluster_ts(dram_cluster); | |
1870 | tsdiff = ts - devc->interp.last.ts; | |
1871 | if (tsdiff > 0) { | |
1872 | sample = devc->interp.last.sample; | |
1873 | count = tsdiff * devc->interp.samples_per_event; | |
1874 | (void)check_and_submit_sample(devc, sample, count); | |
1875 | } | |
1876 | devc->interp.last.ts = ts + EVENTS_PER_CLUSTER; | |
1877 | ||
1878 | /* | |
1879 | * Grab sample data from the current cluster and prepare their | |
1880 | * submission to the session feed. Handle samplerate dependent | |
1881 | * memory layout of sample data. Accumulation of data chunks | |
1882 | * before submission is transparent to this code path, specific | |
1883 | * buffer depth is neither assumed nor required here. | |
1884 | */ | |
1885 | sample = 0; | |
1886 | for (evt = 0; evt < events_in_cluster; evt++) { | |
1887 | item16 = sigma_dram_cluster_data(dram_cluster, evt); | |
1888 | if (devc->interp.samples_per_event == 4) { | |
1889 | sample = sigma_deinterlace_data_4x4(item16, 0); | |
1890 | check_and_submit_sample(devc, sample, 1); | |
1891 | devc->interp.last.sample = sample; | |
1892 | sample = sigma_deinterlace_data_4x4(item16, 1); | |
1893 | check_and_submit_sample(devc, sample, 1); | |
1894 | devc->interp.last.sample = sample; | |
1895 | sample = sigma_deinterlace_data_4x4(item16, 2); | |
1896 | check_and_submit_sample(devc, sample, 1); | |
1897 | devc->interp.last.sample = sample; | |
1898 | sample = sigma_deinterlace_data_4x4(item16, 3); | |
1899 | check_and_submit_sample(devc, sample, 1); | |
1900 | devc->interp.last.sample = sample; | |
1901 | } else if (devc->interp.samples_per_event == 2) { | |
1902 | sample = sigma_deinterlace_data_2x8(item16, 0); | |
1903 | check_and_submit_sample(devc, sample, 1); | |
1904 | devc->interp.last.sample = sample; | |
1905 | sample = sigma_deinterlace_data_2x8(item16, 1); | |
1906 | check_and_submit_sample(devc, sample, 1); | |
1907 | devc->interp.last.sample = sample; | |
1908 | } else { | |
1909 | sample = item16; | |
1910 | check_and_submit_sample(devc, sample, 1); | |
1911 | devc->interp.last.sample = sample; | |
1912 | } | |
1913 | sigma_location_increment(&devc->interp.iter); | |
1914 | sigma_location_check(devc); | |
1915 | } | |
1916 | } | |
1917 | ||
1918 | /* | |
1919 | * Decode chunk of 1024 bytes, 64 clusters, 7 events per cluster. | |
1920 | * Each event is 20ns apart, and can contain multiple samples. | |
1921 | * | |
1922 | * For 200 MHz, events contain 4 samples for each channel, spread 5 ns apart. | |
1923 | * For 100 MHz, events contain 2 samples for each channel, spread 10 ns apart. | |
1924 | * For 50 MHz and below, events contain one sample for each channel, | |
1925 | * spread 20 ns apart. | |
1926 | */ | |
1927 | static int decode_chunk_ts(struct dev_context *devc, | |
1928 | struct sigma_dram_line *dram_line, | |
1929 | size_t events_in_line) | |
1930 | { | |
1931 | struct sigma_dram_cluster *dram_cluster; | |
1932 | size_t clusters_in_line; | |
1933 | size_t events_in_cluster; | |
1934 | size_t cluster; | |
1935 | ||
1936 | clusters_in_line = events_in_line; | |
1937 | clusters_in_line += EVENTS_PER_CLUSTER - 1; | |
1938 | clusters_in_line /= EVENTS_PER_CLUSTER; | |
1939 | ||
1940 | /* For each full DRAM cluster. */ | |
1941 | for (cluster = 0; cluster < clusters_in_line; cluster++) { | |
1942 | dram_cluster = &dram_line->cluster[cluster]; | |
1943 | ||
1944 | /* The last cluster might not be full. */ | |
1945 | if ((cluster == clusters_in_line - 1) && | |
1946 | (events_in_line % EVENTS_PER_CLUSTER)) { | |
1947 | events_in_cluster = events_in_line % EVENTS_PER_CLUSTER; | |
1948 | } else { | |
1949 | events_in_cluster = EVENTS_PER_CLUSTER; | |
1950 | } | |
1951 | ||
1952 | sigma_decode_dram_cluster(devc, dram_cluster, | |
1953 | events_in_cluster); | |
1954 | } | |
1955 | ||
1956 | return SR_OK; | |
1957 | } | |
1958 | ||
1959 | static int download_capture(struct sr_dev_inst *sdi) | |
1960 | { | |
1961 | struct dev_context *devc; | |
1962 | struct sigma_sample_interp *interp; | |
1963 | uint32_t stoppos, triggerpos; | |
1964 | uint8_t modestatus; | |
1965 | int ret; | |
1966 | size_t chunks_per_receive_call; | |
1967 | ||
1968 | devc = sdi->priv; | |
1969 | interp = &devc->interp; | |
1970 | ||
1971 | /* | |
1972 | * Check the mode register. Force stop the current acquisition | |
1973 | * if it has not yet terminated before. Will block until the | |
1974 | * acquisition stops, assuming that this won't take long. Should | |
1975 | * execute exactly once, then keep finding its condition met. | |
1976 | * | |
1977 | * Ask the hardware to stop data acquisition. Reception of the | |
1978 | * FORCESTOP request makes the hardware "disable RLE" (store | |
1979 | * clusters to DRAM regardless of whether pin state changes) and | |
1980 | * raise the POSTTRIGGERED flag. | |
1981 | */ | |
1982 | ret = sigma_get_register(devc, READ_MODE, &modestatus); | |
1983 | if (ret != SR_OK) { | |
1984 | sr_err("Could not determine current device state."); | |
1985 | return FALSE; | |
1986 | } | |
1987 | if (!(modestatus & RMR_POSTTRIGGERED)) { | |
1988 | sr_info("Downloading sample data."); | |
1989 | devc->state = SIGMA_DOWNLOAD; | |
1990 | ||
1991 | modestatus = WMR_FORCESTOP | WMR_SDRAMWRITEEN; | |
1992 | ret = sigma_set_register(devc, WRITE_MODE, modestatus); | |
1993 | if (ret != SR_OK) | |
1994 | return FALSE; | |
1995 | do { | |
1996 | ret = sigma_get_register(devc, READ_MODE, &modestatus); | |
1997 | if (ret != SR_OK) { | |
1998 | sr_err("Could not poll for post-trigger state."); | |
1999 | return FALSE; | |
2000 | } | |
2001 | } while (!(modestatus & RMR_POSTTRIGGERED)); | |
2002 | } | |
2003 | ||
2004 | /* | |
2005 | * Switch the hardware from DRAM write (data acquisition) to | |
2006 | * DRAM read (sample memory download). Prepare resources for | |
2007 | * sample memory content retrieval. Should execute exactly once, | |
2008 | * then keep finding its condition met. | |
2009 | * | |
2010 | * Get the current positions (acquisition write pointer, and | |
2011 | * trigger match location). With disabled triggers, use a value | |
2012 | * for the location that will never match during interpretation. | |
2013 | * Determine which area of the sample memory to retrieve, | |
2014 | * allocate a receive buffer, and setup counters/pointers. | |
2015 | */ | |
2016 | if (!interp->fetch.lines_per_read) { | |
2017 | ret = sigma_set_register(devc, WRITE_MODE, WMR_SDRAMREADEN); | |
2018 | if (ret != SR_OK) | |
2019 | return FALSE; | |
2020 | ||
2021 | ret = sigma_read_pos(devc, &stoppos, &triggerpos, &modestatus); | |
2022 | if (ret != SR_OK) { | |
2023 | sr_err("Could not query capture positions/state."); | |
2024 | return FALSE; | |
2025 | } | |
2026 | if (!devc->use_triggers) | |
2027 | triggerpos = ~0; | |
2028 | if (!(modestatus & RMR_TRIGGERED)) | |
2029 | triggerpos = ~0; | |
2030 | ||
2031 | ret = alloc_sample_buffer(devc, stoppos, triggerpos, modestatus); | |
2032 | if (ret != SR_OK) | |
2033 | return FALSE; | |
2034 | ||
2035 | ret = alloc_submit_buffer(sdi); | |
2036 | if (ret != SR_OK) | |
2037 | return FALSE; | |
2038 | ret = setup_submit_limit(devc); | |
2039 | if (ret != SR_OK) | |
2040 | return FALSE; | |
2041 | } | |
2042 | ||
2043 | /* | |
2044 | * Get another set of sample memory rows, and interpret its | |
2045 | * content. Will execute as many times as it takes to complete | |
2046 | * the memory region that the recent acquisition spans. | |
2047 | * | |
2048 | * The size of a receive call's workload and the main loop's | |
2049 | * receive call poll period determine the UI responsiveness and | |
2050 | * the overall transfer time for the sample memory content. | |
2051 | */ | |
2052 | chunks_per_receive_call = 50; | |
2053 | while (interp->fetch.lines_done < interp->fetch.lines_total) { | |
2054 | size_t dl_events_in_line; | |
2055 | ||
2056 | /* Read another chunk of sample memory (several lines). */ | |
2057 | ret = fetch_sample_buffer(devc); | |
2058 | if (ret != SR_OK) | |
2059 | return FALSE; | |
2060 | ||
2061 | /* Process lines of sample data. Last line may be short. */ | |
2062 | while (interp->fetch.lines_rcvd--) { | |
2063 | dl_events_in_line = EVENTS_PER_ROW; | |
2064 | if (interp->iter.line == interp->stop.line) { | |
2065 | dl_events_in_line = interp->stop.raw & ROW_MASK; | |
2066 | } | |
2067 | decode_chunk_ts(devc, interp->fetch.curr_line, | |
2068 | dl_events_in_line); | |
2069 | interp->fetch.curr_line++; | |
2070 | interp->fetch.lines_done++; | |
2071 | } | |
2072 | ||
2073 | /* Keep returning to application code for large data sets. */ | |
2074 | if (!--chunks_per_receive_call) { | |
2075 | ret = flush_submit_buffer(devc); | |
2076 | if (ret != SR_OK) | |
2077 | return FALSE; | |
2078 | break; | |
2079 | } | |
2080 | } | |
2081 | ||
2082 | /* | |
2083 | * Release previously allocated resources, and adjust state when | |
2084 | * all of the sample memory was retrieved, and interpretation has | |
2085 | * completed. Should execute exactly once. | |
2086 | */ | |
2087 | if (interp->fetch.lines_done >= interp->fetch.lines_total) { | |
2088 | ret = flush_submit_buffer(devc); | |
2089 | if (ret != SR_OK) | |
2090 | return FALSE; | |
2091 | free_submit_buffer(devc); | |
2092 | free_sample_buffer(devc); | |
2093 | ||
2094 | ret = std_session_send_df_end(sdi); | |
2095 | if (ret != SR_OK) | |
2096 | return FALSE; | |
2097 | ||
2098 | devc->state = SIGMA_IDLE; | |
2099 | sr_dev_acquisition_stop(sdi); | |
2100 | } | |
2101 | ||
2102 | return TRUE; | |
2103 | } | |
2104 | ||
2105 | /* | |
2106 | * Periodically check the Sigma status when in CAPTURE mode. This routine | |
2107 | * checks whether the configured sample count or sample time have passed, | |
2108 | * and will stop acquisition and download the acquired samples. | |
2109 | */ | |
2110 | static int sigma_capture_mode(struct sr_dev_inst *sdi) | |
2111 | { | |
2112 | struct dev_context *devc; | |
2113 | int ret; | |
2114 | uint32_t stoppos, triggerpos; | |
2115 | uint8_t mode; | |
2116 | gboolean full, wrapped, triggered, complete; | |
2117 | ||
2118 | devc = sdi->priv; | |
2119 | ||
2120 | /* | |
2121 | * Get and interpret current acquisition status. Some of these | |
2122 | * thresholds are rather arbitrary. | |
2123 | */ | |
2124 | ret = sigma_read_pos(devc, &stoppos, &triggerpos, &mode); | |
2125 | if (ret != SR_OK) | |
2126 | return FALSE; | |
2127 | stoppos >>= ROW_SHIFT; | |
2128 | full = stoppos >= ROW_COUNT - 2; | |
2129 | wrapped = mode & RMR_ROUND; | |
2130 | triggered = mode & RMR_TRIGGERED; | |
2131 | complete = mode & RMR_POSTTRIGGERED; | |
2132 | ||
2133 | /* | |
2134 | * Acquisition completed in the hardware? Start or continue | |
2135 | * sample memory content download. | |
2136 | * (Can user initiated button presses result in auto stop? | |
2137 | * Will they "trigger", and later result in expired time limit | |
2138 | * of post trigger conditions?) | |
2139 | */ | |
2140 | if (complete) | |
2141 | return download_capture(sdi); | |
2142 | ||
2143 | /* | |
2144 | * Previously configured acquisition period exceeded? Start | |
2145 | * sample download. Start the timeout period late when triggers | |
2146 | * are used (unknown period from acquisition start to trigger | |
2147 | * match). | |
2148 | */ | |
2149 | if (sr_sw_limits_check(&devc->limit.acquire)) | |
2150 | return download_capture(sdi); | |
2151 | if (devc->late_trigger_timeout && triggered) { | |
2152 | sr_sw_limits_acquisition_start(&devc->limit.acquire); | |
2153 | devc->late_trigger_timeout = FALSE; | |
2154 | } | |
2155 | ||
2156 | /* | |
2157 | * No trigger specified, and sample memory exhausted? Start | |
2158 | * download (may otherwise keep acquiring, even for infinite | |
2159 | * amounts of time without a user specified time/count limit). | |
2160 | * This handles situations when users specify limits which | |
2161 | * exceed the device's capabilities. | |
2162 | */ | |
2163 | (void)full; | |
2164 | if (!devc->use_triggers && wrapped) | |
2165 | return download_capture(sdi); | |
2166 | ||
2167 | return TRUE; | |
2168 | } | |
2169 | ||
2170 | SR_PRIV int sigma_receive_data(int fd, int revents, void *cb_data) | |
2171 | { | |
2172 | struct sr_dev_inst *sdi; | |
2173 | struct dev_context *devc; | |
2174 | ||
2175 | (void)fd; | |
2176 | (void)revents; | |
2177 | ||
2178 | sdi = cb_data; | |
2179 | devc = sdi->priv; | |
2180 | ||
2181 | if (devc->state == SIGMA_IDLE) | |
2182 | return TRUE; | |
2183 | ||
2184 | /* | |
2185 | * When the application has requested to stop the acquisition, | |
2186 | * then immediately start downloading sample data. Continue a | |
2187 | * previously initiated download until completion. Otherwise | |
2188 | * keep checking configured limits which will terminate the | |
2189 | * acquisition and initiate download. | |
2190 | */ | |
2191 | if (devc->state == SIGMA_STOPPING) | |
2192 | return download_capture(sdi); | |
2193 | if (devc->state == SIGMA_DOWNLOAD) | |
2194 | return download_capture(sdi); | |
2195 | if (devc->state == SIGMA_CAPTURE) | |
2196 | return sigma_capture_mode(sdi); | |
2197 | ||
2198 | return TRUE; | |
2199 | } | |
2200 | ||
2201 | /* Build a LUT entry used by the trigger functions. */ | |
2202 | static void build_lut_entry(uint16_t *lut_entry, | |
2203 | uint16_t spec_value, uint16_t spec_mask) | |
2204 | { | |
2205 | size_t quad, bitidx, ch; | |
2206 | uint16_t quadmask, bitmask; | |
2207 | gboolean spec_value_low, bit_idx_low; | |
2208 | ||
2209 | /* | |
2210 | * For each quad-channel-group, for each bit in the LUT (each | |
2211 | * bit pattern of the channel signals, aka LUT address), for | |
2212 | * each channel in the quad, setup the bit in the LUT entry. | |
2213 | * | |
2214 | * Start from all-ones in the LUT (true, always matches), then | |
2215 | * "pessimize the truthness" for specified conditions. | |
2216 | */ | |
2217 | for (quad = 0; quad < 4; quad++) { | |
2218 | lut_entry[quad] = ~0; | |
2219 | for (bitidx = 0; bitidx < 16; bitidx++) { | |
2220 | for (ch = 0; ch < 4; ch++) { | |
2221 | quadmask = BIT(ch); | |
2222 | bitmask = quadmask << (quad * 4); | |
2223 | if (!(spec_mask & bitmask)) | |
2224 | continue; | |
2225 | /* | |
2226 | * This bit is part of the spec. The | |
2227 | * condition which gets checked here | |
2228 | * (got checked in all implementations | |
2229 | * so far) is uncertain. A bit position | |
2230 | * in the current index' number(!) is | |
2231 | * checked? | |
2232 | */ | |
2233 | spec_value_low = !(spec_value & bitmask); | |
2234 | bit_idx_low = !(bitidx & quadmask); | |
2235 | if (spec_value_low == bit_idx_low) | |
2236 | continue; | |
2237 | lut_entry[quad] &= ~BIT(bitidx); | |
2238 | } | |
2239 | } | |
2240 | } | |
2241 | } | |
2242 | ||
2243 | /* Add a logical function to LUT mask. */ | |
2244 | static void add_trigger_function(enum triggerop oper, enum triggerfunc func, | |
2245 | size_t index, gboolean neg, uint16_t *mask) | |
2246 | { | |
2247 | int x[2][2], a, b, aset, bset, rset; | |
2248 | size_t bitidx; | |
2249 | ||
2250 | /* | |
2251 | * Beware! The x, a, b, aset, bset, rset variables strictly | |
2252 | * require the limited 0..1 range. They are not interpreted | |
2253 | * as logically true, instead bit arith is done on them. | |
2254 | */ | |
2255 | ||
2256 | /* Construct a pattern which detects the condition. */ | |
2257 | memset(x, 0, sizeof(x)); | |
2258 | switch (oper) { | |
2259 | case OP_LEVEL: | |
2260 | x[0][1] = 1; | |
2261 | x[1][1] = 1; | |
2262 | break; | |
2263 | case OP_NOT: | |
2264 | x[0][0] = 1; | |
2265 | x[1][0] = 1; | |
2266 | break; | |
2267 | case OP_RISE: | |
2268 | x[0][1] = 1; | |
2269 | break; | |
2270 | case OP_FALL: | |
2271 | x[1][0] = 1; | |
2272 | break; | |
2273 | case OP_RISEFALL: | |
2274 | x[0][1] = 1; | |
2275 | x[1][0] = 1; | |
2276 | break; | |
2277 | case OP_NOTRISE: | |
2278 | x[1][1] = 1; | |
2279 | x[0][0] = 1; | |
2280 | x[1][0] = 1; | |
2281 | break; | |
2282 | case OP_NOTFALL: | |
2283 | x[1][1] = 1; | |
2284 | x[0][0] = 1; | |
2285 | x[0][1] = 1; | |
2286 | break; | |
2287 | case OP_NOTRISEFALL: | |
2288 | x[1][1] = 1; | |
2289 | x[0][0] = 1; | |
2290 | break; | |
2291 | } | |
2292 | ||
2293 | /* Transpose the pattern if the condition is negated. */ | |
2294 | if (neg) { | |
2295 | size_t i, j; | |
2296 | int tmp; | |
2297 | ||
2298 | for (i = 0; i < 2; i++) { | |
2299 | for (j = 0; j < 2; j++) { | |
2300 | tmp = x[i][j]; | |
2301 | x[i][j] = x[1 - i][1 - j]; | |
2302 | x[1 - i][1 - j] = tmp; | |
2303 | } | |
2304 | } | |
2305 | } | |
2306 | ||
2307 | /* Update the LUT mask with the function's condition. */ | |
2308 | for (bitidx = 0; bitidx < 16; bitidx++) { | |
2309 | a = (bitidx & BIT(2 * index + 0)) ? 1 : 0; | |
2310 | b = (bitidx & BIT(2 * index + 1)) ? 1 : 0; | |
2311 | ||
2312 | aset = (*mask & BIT(bitidx)) ? 1 : 0; | |
2313 | bset = x[b][a]; | |
2314 | ||
2315 | if (func == FUNC_AND || func == FUNC_NAND) | |
2316 | rset = aset & bset; | |
2317 | else if (func == FUNC_OR || func == FUNC_NOR) | |
2318 | rset = aset | bset; | |
2319 | else if (func == FUNC_XOR || func == FUNC_NXOR) | |
2320 | rset = aset ^ bset; | |
2321 | else | |
2322 | rset = 0; | |
2323 | ||
2324 | if (func == FUNC_NAND || func == FUNC_NOR || func == FUNC_NXOR) | |
2325 | rset = 1 - rset; | |
2326 | ||
2327 | if (rset) | |
2328 | *mask |= BIT(bitidx); | |
2329 | else | |
2330 | *mask &= ~BIT(bitidx); | |
2331 | } | |
2332 | } | |
2333 | ||
2334 | /* | |
2335 | * Build trigger LUTs used by 50 MHz and lower sample rates for supporting | |
2336 | * simple pin change and state triggers. Only two transitions (rise/fall) can be | |
2337 | * set at any time, but a full mask and value can be set (0/1). | |
2338 | */ | |
2339 | SR_PRIV int sigma_build_basic_trigger(struct dev_context *devc, | |
2340 | struct triggerlut *lut) | |
2341 | { | |
2342 | uint16_t masks[2]; | |
2343 | size_t bitidx, condidx; | |
2344 | uint16_t value, mask; | |
2345 | ||
2346 | /* Setup something that "won't match" in the absence of a spec. */ | |
2347 | memset(lut, 0, sizeof(*lut)); | |
2348 | if (!devc->use_triggers) | |
2349 | return SR_OK; | |
2350 | ||
2351 | /* Start assuming simple triggers. Edges are handled below. */ | |
2352 | lut->m4 = 0xa000; | |
2353 | lut->m3q = 0xffff; | |
2354 | ||
2355 | /* Process value/mask triggers. */ | |
2356 | value = devc->trigger.simplevalue; | |
2357 | mask = devc->trigger.simplemask; | |
2358 | build_lut_entry(lut->m2d, value, mask); | |
2359 | ||
2360 | /* Scan for and process rise/fall triggers. */ | |
2361 | memset(&masks, 0, sizeof(masks)); | |
2362 | condidx = 0; | |
2363 | for (bitidx = 0; bitidx < 16; bitidx++) { | |
2364 | mask = BIT(bitidx); | |
2365 | value = devc->trigger.risingmask | devc->trigger.fallingmask; | |
2366 | if (!(value & mask)) | |
2367 | continue; | |
2368 | if (condidx == 0) | |
2369 | build_lut_entry(lut->m0d, mask, mask); | |
2370 | if (condidx == 1) | |
2371 | build_lut_entry(lut->m1d, mask, mask); | |
2372 | masks[condidx++] = mask; | |
2373 | if (condidx == ARRAY_SIZE(masks)) | |
2374 | break; | |
2375 | } | |
2376 | ||
2377 | /* Add glue logic for rise/fall triggers. */ | |
2378 | if (masks[0] || masks[1]) { | |
2379 | lut->m3q = 0; | |
2380 | if (masks[0] & devc->trigger.risingmask) | |
2381 | add_trigger_function(OP_RISE, FUNC_OR, 0, 0, &lut->m3q); | |
2382 | if (masks[0] & devc->trigger.fallingmask) | |
2383 | add_trigger_function(OP_FALL, FUNC_OR, 0, 0, &lut->m3q); | |
2384 | if (masks[1] & devc->trigger.risingmask) | |
2385 | add_trigger_function(OP_RISE, FUNC_OR, 1, 0, &lut->m3q); | |
2386 | if (masks[1] & devc->trigger.fallingmask) | |
2387 | add_trigger_function(OP_FALL, FUNC_OR, 1, 0, &lut->m3q); | |
2388 | } | |
2389 | ||
2390 | /* Triggertype: event. */ | |
2391 | lut->params.selres = TRGSEL_SELCODE_NEVER; | |
2392 | lut->params.selinc = TRGSEL_SELCODE_LEVEL; | |
2393 | lut->params.sela = 0; /* Counter >= CMPA && LEVEL */ | |
2394 | lut->params.cmpa = 0; /* Count 0 -> 1 already triggers. */ | |
2395 | ||
2396 | return SR_OK; | |
2397 | } |