]> sigrok.org Git - sigrok-test.git/blame - decoder/pdtest
parallel: add missing data file with expected output for SQI traffic
[sigrok-test.git] / decoder / pdtest
CommitLineData
dd37a782
UH
1#!/usr/bin/env python3
2##
3## This file is part of the sigrok-test project.
4##
5## Copyright (C) 2013 Bert Vermeulen <bert@biot.com>
6##
7## This program is free software: you can redistribute it and/or modify
8## it under the terms of the GNU General Public License as published by
9## the Free Software Foundation, either version 3 of the License, or
10## (at your option) any later version.
11##
12## This program is distributed in the hope that it will be useful,
13## but WITHOUT ANY WARRANTY; without even the implied warranty of
14## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15## GNU General Public License for more details.
16##
17## You should have received a copy of the GNU General Public License
18## along with this program. If not, see <http://www.gnu.org/licenses/>.
19##
20
21import os
22import sys
23import re
24from getopt import getopt
25from tempfile import mkstemp
26from subprocess import Popen, PIPE
6a789f0a 27from difflib import unified_diff
dd37a782
UH
28from hashlib import md5
29from shutil import copy
30
31DEBUG = 0
32VERBOSE = False
33
34
35class E_syntax(Exception):
36 pass
37class E_badline(Exception):
38 pass
39
40def INFO(msg, end='\n'):
41 if VERBOSE:
42 print(msg, end=end)
43 sys.stdout.flush()
44
45
46def DBG(msg):
47 if DEBUG:
48 print(msg)
49
50
51def ERR(msg):
52 print(msg, file=sys.stderr)
53
54
55def usage(msg=None):
56 if msg:
57 print(msg.strip() + '\n')
121614a0 58 print("""Usage: testpd [-dvalsrfcR] [<test1> <test2> ...]
dd37a782
UH
59 -d Turn on debugging
60 -v Verbose
61 -a All tests
121614a0 62 -l List test(s)
dd37a782
UH
63 -s Show test(s)
64 -r Run test(s)
810494f4 65 -f Fix failed test(s) / create initial output for new test(s)
dd37a782
UH
66 -c Report decoder code coverage
67 -R <directory> Save test reports to <directory>
121614a0 68 <test> Protocol decoder name ("i2c") and optionally test name ("i2c/rtc")""")
dd37a782
UH
69 sys.exit()
70
71
72def check_tclist(tc):
73 if 'pdlist' not in tc or not tc['pdlist']:
74 return("No protocol decoders")
75 if 'input' not in tc or not tc['input']:
76 return("No input")
77 if 'output' not in tc or not tc['output']:
78 return("No output")
79 for op in tc['output']:
80 if 'match' not in op:
81 return("No match in output")
82
83 return None
84
85
86def parse_testfile(path, pd, tc, op_type, op_class):
87 DBG("Opening '%s'" % path)
88 tclist = []
89 for line in open(path).read().split('\n'):
90 try:
91 line = line.strip()
92 if len(line) == 0 or line[0] == "#":
93 continue
94 f = line.split()
95 if not tclist and f[0] != "test":
96 # That can't be good.
97 raise E_badline
98 key = f.pop(0)
99 if key == 'test':
100 if len(f) != 1:
101 raise E_syntax
102 # new testcase
103 tclist.append({
104 'pd': pd,
105 'name': f[0],
106 'pdlist': [],
107 'output': [],
108 })
109 elif key == 'protocol-decoder':
110 if len(f) < 1:
111 raise E_syntax
112 pd_spec = {
113 'name': f.pop(0),
114 'channels': [],
115 'options': [],
b59c504e 116 'initial_pins': [],
dd37a782
UH
117 }
118 while len(f):
119 if len(f) == 1:
120 # Always needs <key> <value>
121 raise E_syntax
122 a, b = f[:2]
123 f = f[2:]
124 if '=' not in b:
125 raise E_syntax
126 opt, val = b.split('=')
127 if a == 'channel':
128 try:
129 val = int(val)
130 except:
131 raise E_syntax
132 pd_spec['channels'].append([opt, val])
133 elif a == 'option':
134 pd_spec['options'].append([opt, val])
b59c504e
UH
135 elif a == 'initial_pin':
136 try:
137 val = int(val)
138 except:
139 raise E_syntax
140 pd_spec['initial_pins'].append([opt, val])
dd37a782
UH
141 else:
142 raise E_syntax
143 tclist[-1]['pdlist'].append(pd_spec)
144 elif key == 'stack':
145 if len(f) < 2:
146 raise E_syntax
147 tclist[-1]['stack'] = f
148 elif key == 'input':
149 if len(f) != 1:
150 raise E_syntax
151 tclist[-1]['input'] = f[0]
152 elif key == 'output':
153 op_spec = {
154 'pd': f.pop(0),
155 'type': f.pop(0),
156 }
157 while len(f):
158 if len(f) == 1:
159 # Always needs <key> <value>
160 raise E_syntax
161 a, b = f[:2]
162 f = f[2:]
163 if a == 'class':
164 op_spec['class'] = b
165 elif a == 'match':
166 op_spec['match'] = b
167 else:
168 raise E_syntax
169 tclist[-1]['output'].append(op_spec)
170 else:
171 raise E_badline
172 except E_badline as e:
173 ERR("Invalid syntax in %s: line '%s'" % (path, line))
174 return []
175 except E_syntax as e:
176 ERR("Unable to parse %s: unknown line '%s'" % (path, line))
177 return []
178
179 # If a specific testcase was requested, keep only that one.
180 if tc is not None:
181 target_tc = None
182 for t in tclist:
183 if t['name'] == tc:
184 target_tc = t
185 break
186 # ...and a specific output type
187 if op_type is not None:
188 target_oplist = []
189 for op in target_tc['output']:
190 if op['type'] == op_type:
191 # ...and a specific output class
192 if op_class is None or ('class' in op and op['class'] == op_class):
193 target_oplist.append(op)
194 DBG("match on [%s]" % str(op))
195 target_tc['output'] = target_oplist
196 if target_tc is None:
197 tclist = []
198 else:
199 tclist = [target_tc]
200 for t in tclist:
201 error = check_tclist(t)
202 if error:
203 ERR("Error in %s: %s" % (path, error))
204 return []
205
206 return tclist
207
208
209def get_tests(testnames):
210 tests = {}
211 for testspec in testnames:
212 # Optional testspec in the form pd/testcase/type/class
213 tc = op_type = op_class = None
214 ts = testspec.strip("/").split("/")
215 pd = ts.pop(0)
216 tests[pd] = []
217 if ts:
218 tc = ts.pop(0)
219 if ts:
220 op_type = ts.pop(0)
221 if ts:
222 op_class = ts.pop(0)
223 path = os.path.join(tests_dir, pd)
224 if not os.path.isdir(path):
225 # User specified non-existent PD
226 raise Exception("%s not found." % path)
227 path = os.path.join(tests_dir, pd, "test.conf")
228 if not os.path.exists(path):
229 # PD doesn't have any tests yet
230 continue
231 tests[pd].append(parse_testfile(path, pd, tc, op_type, op_class))
232
233 return tests
234
235
236def diff_text(f1, f2):
237 t1 = open(f1).readlines()
238 t2 = open(f2).readlines()
6a789f0a
GS
239 diff = list(unified_diff(t1, t2))
240 diff = diff[2:] # Strip two from/to filename lines with "+++"/"---".
241 diff = [d.strip() for d in diff if d[0] in ('+', '-')]
dd37a782
UH
242 return diff
243
244
245def compare_binary(f1, f2):
246 h1 = md5()
247 h1.update(open(f1, 'rb').read())
248 h2 = md5()
249 h2.update(open(f2, 'rb').read())
250 if h1.digest() == h2.digest():
251 result = None
252 else:
253 result = ["Binary output does not match."]
254
255 return result
256
257
258# runtc's stdout can have lines like:
259# coverage: lines=161 missed=2 coverage=99%
260def parse_stats(text):
261 stats = {}
262 for line in text.strip().split('\n'):
263 fields = line.split()
264 key = fields.pop(0).strip(':')
265 if key not in stats:
266 stats[key] = []
267 stats[key].append({})
268 for f in fields:
269 k, v = f.split('=')
270 stats[key][-1][k] = v
271
272 return stats
273
274
275# take result set of all tests in a PD, and summarize which lines
276# were not covered by any of the tests.
277def coverage_sum(cvglist):
278 lines = 0
279 missed = 0
280 missed_lines = {}
281 for record in cvglist:
282 lines = int(record['lines'])
283 missed += int(record['missed'])
284 if 'missed_lines' not in record:
285 continue
286 for linespec in record['missed_lines'].split(','):
287 if linespec not in missed_lines:
288 missed_lines[linespec] = 1
289 else:
290 missed_lines[linespec] += 1
291
292 # keep only those lines that didn't show up in every non-summary record
293 final_missed = []
294 for linespec in missed_lines:
295 if missed_lines[linespec] != len(cvglist):
296 continue
297 final_missed.append(linespec)
298
299 return lines, final_missed
300
301
302def run_tests(tests, fix=False):
303 errors = 0
304 results = []
305 cmd = [os.path.join(runtc_dir, 'runtc')]
306 if opt_coverage:
307 fd, coverage = mkstemp()
308 os.close(fd)
309 cmd.extend(['-c', coverage])
310 else:
311 coverage = None
312 for pd in sorted(tests.keys()):
313 pd_cvg = []
314 for tclist in tests[pd]:
315 for tc in tclist:
316 args = cmd[:]
317 if DEBUG > 1:
318 args.append('-d')
319 # Set up PD stack for this test.
320 for spd in tc['pdlist']:
321 args.extend(['-P', spd['name']])
322 for label, channel in spd['channels']:
323 args.extend(['-p', "%s=%d" % (label, channel)])
324 for option, value in spd['options']:
325 args.extend(['-o', "%s=%s" % (option, value)])
b59c504e
UH
326 for label, initial_pin in spd['initial_pins']:
327 args.extend(['-N', "%s=%d" % (label, initial_pin)])
dd37a782
UH
328 args.extend(['-i', os.path.join(dumps_dir, tc['input'])])
329 for op in tc['output']:
330 name = "%s/%s/%s" % (pd, tc['name'], op['type'])
331 opargs = ['-O', "%s:%s" % (op['pd'], op['type'])]
332 if 'class' in op:
333 opargs[-1] += ":%s" % op['class']
334 name += "/%s" % op['class']
335 if VERBOSE:
e44830e3 336 dots = '.' * (77 - len(name) - 2)
dd37a782
UH
337 INFO("%s %s " % (name, dots), end='')
338 results.append({
339 'testcase': name,
340 })
341 try:
342 fd, outfile = mkstemp()
343 os.close(fd)
344 opargs.extend(['-f', outfile])
345 DBG("Running %s" % (' '.join(args + opargs)))
346 p = Popen(args + opargs, stdout=PIPE, stderr=PIPE)
347 stdout, stderr = p.communicate()
348 if stdout:
349 # statistics and coverage data on stdout
350 results[-1].update(parse_stats(stdout.decode('utf-8')))
351 if stderr:
352 results[-1]['error'] = stderr.decode('utf-8').strip()
353 errors += 1
354 elif p.returncode != 0:
355 # runtc indicated an error, but didn't output a
356 # message on stderr about it
357 results[-1]['error'] = "Unknown error: runtc %d" % p.returncode
358 if 'error' not in results[-1]:
359 matchfile = os.path.join(tests_dir, op['pd'], op['match'])
360 DBG("Comparing with %s" % matchfile)
361 try:
362 diff = diff_error = None
363 if op['type'] in ('annotation', 'python'):
364 diff = diff_text(matchfile, outfile)
365 elif op['type'] == 'binary':
366 diff = compare_binary(matchfile, outfile)
367 else:
368 diff = ["Unsupported output type '%s'." % op['type']]
369 except Exception as e:
370 diff_error = e
371 if fix:
372 if diff or diff_error:
373 copy(outfile, matchfile)
374 DBG("Wrote %s" % matchfile)
375 else:
376 if diff:
377 results[-1]['diff'] = diff
378 elif diff_error is not None:
379 raise diff_error
380 except Exception as e:
381 results[-1]['error'] = str(e)
382 finally:
383 if coverage:
384 results[-1]['coverage_report'] = coverage
385 os.unlink(outfile)
386 if op['type'] == 'exception' and 'error' in results[-1]:
387 # filter out the exception we were looking for
388 reg = "^Error: srd: %s:" % op['match']
389 if re.match(reg, results[-1]['error']):
390 # found it, not an error
391 results[-1].pop('error')
afd2f3f7 392 errors -= 1
dd37a782
UH
393 if VERBOSE:
394 if 'diff' in results[-1]:
395 INFO("Output mismatch")
396 elif 'error' in results[-1]:
397 error = results[-1]['error']
398 if len(error) > 20:
399 error = error[:17] + '...'
400 INFO(error)
401 elif 'coverage' in results[-1]:
402 # report coverage of this PD
403 for record in results[-1]['coverage']:
404 # but not others used in the stack
405 # as part of the test.
406 if record['scope'] == pd:
407 INFO(record['coverage'])
408 break
409 else:
410 INFO("OK")
411 gen_report(results[-1])
412 if coverage:
413 os.unlink(coverage)
414 # only keep track of coverage records for this PD,
415 # not others in the stack just used for testing.
416 for cvg in results[-1]['coverage']:
417 if cvg['scope'] == pd:
418 pd_cvg.append(cvg)
419 if opt_coverage and len(pd_cvg) > 1:
420 # report total coverage of this PD, across all the tests
421 # that were done on it.
422 total_lines, missed_lines = coverage_sum(pd_cvg)
423 pd_coverage = 100 - (float(len(missed_lines)) / total_lines * 100)
424 if VERBOSE:
425 dots = '.' * (54 - len(pd) - 2)
426 INFO("%s total %s %d%%" % (pd, dots, pd_coverage))
427 if report_dir:
428 # generate a missing lines list across all the files in
429 # the PD
430 files = {}
431 for entry in missed_lines:
432 filename, line = entry.split(':')
433 if filename not in files:
434 files[filename] = []
435 files[filename].append(line)
436 text = ''
437 for filename in sorted(files.keys()):
438 line_list = ','.join(sorted(files[filename], key=int))
439 text += "%s: %s\n" % (filename, line_list)
440 open(os.path.join(report_dir, pd + "_total"), 'w').write(text)
441
442
443 return results, errors
444
a1c10c43
GS
445def get_run_tests_error_diff_counts(results):
446 """Get error and diff counters from run_tests() results."""
447 errs = 0
448 diffs = 0
449 for result in results:
450 if 'error' in result:
451 errs += 1
452 if 'diff' in result:
453 diffs += 1
454 return errs, diffs
455
dd37a782
UH
456
457def gen_report(result):
458 out = []
459 if 'error' in result:
460 out.append("Error:")
461 out.append(result['error'])
462 out.append('')
463 if 'diff' in result:
464 out.append("Test output mismatch:")
465 out.extend(result['diff'])
466 out.append('')
467 if 'coverage_report' in result:
468 out.append(open(result['coverage_report'], 'r').read())
469 out.append('')
470
471 if out:
472 text = "Testcase: %s\n" % result['testcase']
473 text += '\n'.join(out)
474 else:
475 return
476
477 if report_dir:
478 filename = result['testcase'].replace('/', '_')
479 open(os.path.join(report_dir, filename), 'w').write(text)
480 else:
481 print(text)
482
483
484def show_tests(tests):
485 for pd in sorted(tests.keys()):
486 for tclist in tests[pd]:
487 for tc in tclist:
488 print("Testcase: %s/%s" % (tc['pd'], tc['name']))
489 for pd in tc['pdlist']:
490 print(" Protocol decoder: %s" % pd['name'])
491 for label, channel in pd['channels']:
492 print(" Channel %s=%d" % (label, channel))
493 for option, value in pd['options']:
c1ac6dda 494 print(" Option %s=%s" % (option, value))
b59c504e
UH
495 for label, initial_pin in pd['initial_pins']:
496 print(" Initial pin %s=%d" % (label, initial_pin))
dd37a782
UH
497 if 'stack' in tc:
498 print(" Stack: %s" % ' '.join(tc['stack']))
499 print(" Input: %s" % tc['input'])
500 for op in tc['output']:
501 print(" Output:\n Protocol decoder: %s" % op['pd'])
502 print(" Type: %s" % op['type'])
503 if 'class' in op:
504 print(" Class: %s" % op['class'])
505 print(" Match: %s" % op['match'])
506 print()
507
508
509def list_tests(tests):
510 for pd in sorted(tests.keys()):
511 for tclist in tests[pd]:
512 for tc in tclist:
513 for op in tc['output']:
514 line = "%s/%s/%s" % (tc['pd'], tc['name'], op['type'])
515 if 'class' in op:
516 line += "/%s" % op['class']
517 print(line)
518
519
520#
521# main
522#
523
524# project root
525runtc_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
526base_dir = os.path.abspath(os.path.join(os.curdir, runtc_dir, os.path.pardir))
527dumps_dir = os.path.abspath(os.path.join(base_dir, os.path.pardir, 'sigrok-dumps'))
528tests_dir = os.path.abspath(os.path.join(runtc_dir, 'test'))
529
530if len(sys.argv) == 1:
531 usage()
532
533opt_all = opt_run = opt_show = opt_list = opt_fix = opt_coverage = False
534report_dir = None
cf646afd
JS
535try:
536 opts, args = getopt(sys.argv[1:], "dvarslfcR:S:")
537except Exception as e:
538 usage('error while parsing command line arguments: {}'.format(e))
dd37a782
UH
539for opt, arg in opts:
540 if opt == '-d':
541 DEBUG += 1
542 if opt == '-v':
543 VERBOSE = True
544 elif opt == '-a':
545 opt_all = True
546 elif opt == '-r':
547 opt_run = True
548 elif opt == '-s':
549 opt_show = True
550 elif opt == '-l':
551 opt_list = True
552 elif opt == '-f':
553 opt_fix = True
554 elif opt == '-c':
555 opt_coverage = True
556 elif opt == '-R':
557 report_dir = arg
558 elif opt == '-S':
559 dumps_dir = arg
560
561if opt_run and opt_show:
562 usage("Use either -s or -r, not both.")
563if args and opt_all:
564 usage("Specify either -a or tests, not both.")
565if report_dir is not None and not os.path.isdir(report_dir):
566 usage("%s is not a directory" % report_dir)
567
568ret = 0
569try:
570 if args:
571 testlist = get_tests(args)
94ac3e40 572 elif opt_all or opt_list:
dd37a782
UH
573 testlist = get_tests(os.listdir(tests_dir))
574 else:
575 usage("Specify either -a or tests.")
576
577 if opt_run:
578 if not os.path.isdir(dumps_dir):
579 ERR("Could not find sigrok-dumps repository at %s" % dumps_dir)
580 sys.exit(1)
581 results, errors = run_tests(testlist, fix=opt_fix)
1a541759 582 ret = 0
a1c10c43 583 errs, diffs = get_run_tests_error_diff_counts(results)
1a541759
GS
584 if errs:
585 ret = 1
586 elif diffs:
587 ret = 2
dd37a782
UH
588 elif opt_show:
589 show_tests(testlist)
590 elif opt_list:
591 list_tests(testlist)
592 elif opt_fix:
593 run_tests(testlist, fix=True)
594 else:
595 usage()
596except Exception as e:
597 print("Error: %s" % str(e))
598 if DEBUG:
599 raise
600
601sys.exit(ret)