3 ## This file is part of the sigrok-test project.
5 ## Copyright (C) 2013 Bert Vermeulen <bert@biot.com>
7 ## This program is free software: you can redistribute it and/or modify
8 ## it under the terms of the GNU General Public License as published by
9 ## the Free Software Foundation, either version 3 of the License, or
10 ## (at your option) any later version.
12 ## This program is distributed in the hope that it will be useful,
13 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 ## GNU General Public License for more details.
17 ## You should have received a copy of the GNU General Public License
18 ## along with this program. If not, see <http://www.gnu.org/licenses/>.
24 from getopt import getopt
25 from tempfile import mkstemp
26 from subprocess import Popen, PIPE
27 from difflib import unified_diff
28 from hashlib import md5
29 from shutil import copy
35 class E_syntax(Exception):
37 class E_badline(Exception):
40 def INFO(msg, end='\n'):
52 print(msg, file=sys.stderr)
57 print(msg.strip() + '\n')
58 print("""Usage: testpd [-dvalsrfcR] [<test1> <test2> ...]
65 -f Fix failed test(s) / create initial output for new test(s)
66 -c Report decoder code coverage
67 -R <directory> Save test reports to <directory>
68 <test> Protocol decoder name ("i2c") and optionally test name ("i2c/rtc")""")
73 if 'pdlist' not in tc or not tc['pdlist']:
74 return("No protocol decoders")
75 if 'input' not in tc or not tc['input']:
77 if 'output' not in tc or not tc['output']:
79 for op in tc['output']:
81 return("No match in output")
86 def parse_testfile(path, pd, tc, op_type, op_class):
87 DBG("Opening '%s'" % path)
89 for line in open(path).read().split('\n'):
92 if len(line) == 0 or line[0] == "#":
95 if not tclist and f[0] != "test":
109 elif key == 'protocol-decoder':
120 # Always needs <key> <value>
126 opt, val = b.split('=')
132 pd_spec['channels'].append([opt, val])
134 pd_spec['options'].append([opt, val])
135 elif a == 'initial_pin':
140 pd_spec['initial_pins'].append([opt, val])
143 tclist[-1]['pdlist'].append(pd_spec)
147 tclist[-1]['stack'] = f
158 # Always needs <key> <value>
163 input_spec['format'] = b
165 input_spec['options'].append(b)
168 tclist[-1]['input'] = input_spec
169 elif key == 'output':
176 # Always needs <key> <value>
186 tclist[-1]['output'].append(op_spec)
189 except E_badline as e:
190 ERR("Invalid syntax in %s: line '%s'" % (path, line))
192 except E_syntax as e:
193 ERR("Unable to parse %s: unknown line '%s'" % (path, line))
196 # If a specific testcase was requested, keep only that one.
203 # ...and a specific output type
204 if op_type is not None:
206 for op in target_tc['output']:
207 if op['type'] == op_type:
208 # ...and a specific output class
209 if op_class is None or ('class' in op and op['class'] == op_class):
210 target_oplist.append(op)
211 DBG("match on [%s]" % str(op))
212 target_tc['output'] = target_oplist
213 if target_tc is None:
218 error = check_tclist(t)
220 ERR("Error in %s: %s" % (path, error))
226 def get_tests(testnames):
228 for testspec in testnames:
229 # Optional testspec in the form pd/testcase/type/class
230 tc = op_type = op_class = None
231 ts = testspec.strip("/").split("/")
240 path = os.path.join(tests_dir, pd)
241 if not os.path.isdir(path):
242 # User specified non-existent PD
243 raise Exception("%s not found." % path)
244 path = os.path.join(tests_dir, pd, "test.conf")
245 if not os.path.exists(path):
246 # PD doesn't have any tests yet
248 tests[pd].append(parse_testfile(path, pd, tc, op_type, op_class))
253 def diff_text(f1, f2):
254 t1 = open(f1).readlines()
255 t2 = open(f2).readlines()
256 diff = list(unified_diff(t1, t2))
257 diff = diff[2:] # Strip two from/to filename lines with "+++"/"---".
258 diff = [d.strip() for d in diff if d[0] in ('+', '-')]
262 def compare_binary(f1, f2):
264 h1.update(open(f1, 'rb').read())
266 h2.update(open(f2, 'rb').read())
267 if h1.digest() == h2.digest():
270 result = ["Binary output does not match."]
275 # runtc's stdout can have lines like:
276 # coverage: lines=161 missed=2 coverage=99%
277 def parse_stats(text):
279 for line in text.strip().split('\n'):
280 fields = line.split()
281 key = fields.pop(0).strip(':')
284 stats[key].append({})
287 stats[key][-1][k] = v
292 # take result set of all tests in a PD, and summarize which lines
293 # were not covered by any of the tests.
294 def coverage_sum(cvglist):
298 for record in cvglist:
299 lines = int(record['lines'])
300 missed += int(record['missed'])
301 if 'missed_lines' not in record:
303 for linespec in record['missed_lines'].split(','):
304 if linespec not in missed_lines:
305 missed_lines[linespec] = 1
307 missed_lines[linespec] += 1
309 # keep only those lines that didn't show up in every non-summary record
311 for linespec in missed_lines:
312 if missed_lines[linespec] != len(cvglist):
314 final_missed.append(linespec)
316 return lines, final_missed
319 def run_tests(tests, fix=False):
322 cmd = [os.path.join(runtc_dir, 'runtc')]
324 fd, coverage = mkstemp()
326 cmd.extend(['-c', coverage])
329 for pd in sorted(tests.keys()):
331 for tclist in tests[pd]:
336 # Set up PD stack for this test.
337 for spd in tc['pdlist']:
338 args.extend(['-P', spd['name']])
339 for label, channel in spd['channels']:
340 args.extend(['-p', "%s=%d" % (label, channel)])
341 for option, value in spd['options']:
342 args.extend(['-o', "%s=%s" % (option, value)])
343 for label, initial_pin in spd['initial_pins']:
344 args.extend(['-N', "%s=%d" % (label, initial_pin)])
345 # Setup input spec for this test (optional format spec).
346 in_spec = tc['input']
347 infile = os.path.join(dumps_dir, in_spec['name'])
348 args.extend(['-i', infile])
349 if in_spec['format']:
350 args.extend(['-I', in_spec['format']])
351 for opt in in_spec['options']:
352 args.extend(['-I', opt])
353 # Setup output spec for this test.
354 for op in tc['output']:
355 name = "%s/%s/%s" % (pd, tc['name'], op['type'])
356 opargs = ['-O', "%s:%s" % (op['pd'], op['type'])]
358 opargs[-1] += ":%s" % op['class']
359 name += "/%s" % op['class']
361 dots = '.' * (77 - len(name) - 2)
362 INFO("%s %s " % (name, dots), end='')
367 fd, outfile = mkstemp()
369 opargs.extend(['-f', outfile])
370 DBG("Running %s" % (' '.join(args + opargs)))
371 p = Popen(args + opargs, stdout=PIPE, stderr=PIPE)
372 stdout, stderr = p.communicate()
374 # statistics and coverage data on stdout
375 results[-1].update(parse_stats(stdout.decode('utf-8')))
377 results[-1]['error'] = stderr.decode('utf-8').strip()
379 elif p.returncode != 0:
380 # runtc indicated an error, but didn't output a
381 # message on stderr about it
382 results[-1]['error'] = "Unknown error: runtc %d" % p.returncode
383 if 'error' not in results[-1]:
384 matchfile = os.path.join(tests_dir, op['pd'], op['match'])
385 DBG("Comparing with %s" % matchfile)
387 diff = diff_error = None
388 if op['type'] in ('annotation', 'python'):
389 diff = diff_text(matchfile, outfile)
390 elif op['type'] == 'binary':
391 diff = compare_binary(matchfile, outfile)
393 diff = ["Unsupported output type '%s'." % op['type']]
394 except Exception as e:
397 if diff or diff_error:
398 copy(outfile, matchfile)
399 DBG("Wrote %s" % matchfile)
402 results[-1]['diff'] = diff
403 elif diff_error is not None:
405 except Exception as e:
406 results[-1]['error'] = str(e)
409 results[-1]['coverage_report'] = coverage
411 if op['type'] == 'exception' and 'error' in results[-1]:
412 # filter out the exception we were looking for
413 reg = "^Error: srd: %s:" % op['match']
414 if re.match(reg, results[-1]['error']):
415 # found it, not an error
416 results[-1].pop('error')
419 if 'diff' in results[-1]:
420 INFO("Output mismatch")
421 elif 'error' in results[-1]:
422 error = results[-1]['error']
424 error = error[:17] + '...'
426 elif 'coverage' in results[-1]:
427 # report coverage of this PD
428 for record in results[-1]['coverage']:
429 # but not others used in the stack
430 # as part of the test.
431 if record['scope'] == pd:
432 INFO(record['coverage'])
436 gen_report(results[-1])
439 # only keep track of coverage records for this PD,
440 # not others in the stack just used for testing.
441 for cvg in results[-1]['coverage']:
442 if cvg['scope'] == pd:
444 if opt_coverage and len(pd_cvg) > 1:
445 # report total coverage of this PD, across all the tests
446 # that were done on it.
447 total_lines, missed_lines = coverage_sum(pd_cvg)
448 pd_coverage = 100 - (float(len(missed_lines)) / total_lines * 100)
450 dots = '.' * (54 - len(pd) - 2)
451 INFO("%s total %s %d%%" % (pd, dots, pd_coverage))
453 # generate a missing lines list across all the files in
456 for entry in missed_lines:
457 filename, line = entry.split(':')
458 if filename not in files:
460 files[filename].append(line)
462 for filename in sorted(files.keys()):
463 line_list = ','.join(sorted(files[filename], key=int))
464 text += "%s: %s\n" % (filename, line_list)
465 open(os.path.join(report_dir, pd + "_total"), 'w').write(text)
468 return results, errors
470 def get_run_tests_error_diff_counts(results):
471 """Get error and diff counters from run_tests() results."""
474 for result in results:
475 if 'error' in result:
482 def gen_report(result):
484 if 'error' in result:
486 out.append(result['error'])
489 out.append("Test output mismatch:")
490 out.extend(result['diff'])
492 if 'coverage_report' in result:
493 out.append(open(result['coverage_report'], 'r').read())
497 text = "Testcase: %s\n" % result['testcase']
498 text += '\n'.join(out)
503 filename = result['testcase'].replace('/', '_')
504 open(os.path.join(report_dir, filename), 'w').write(text)
509 def show_tests(tests):
510 for pd in sorted(tests.keys()):
511 for tclist in tests[pd]:
513 print("Testcase: %s/%s" % (tc['pd'], tc['name']))
514 for pd in tc['pdlist']:
515 print(" Protocol decoder: %s" % pd['name'])
516 for label, channel in pd['channels']:
517 print(" Channel %s=%d" % (label, channel))
518 for option, value in pd['options']:
519 print(" Option %s=%s" % (option, value))
520 for label, initial_pin in pd['initial_pins']:
521 print(" Initial pin %s=%d" % (label, initial_pin))
523 print(" Stack: %s" % ' '.join(tc['stack']))
524 print(" Input: %s" % tc['input'])
525 for op in tc['output']:
526 print(" Output:\n Protocol decoder: %s" % op['pd'])
527 print(" Type: %s" % op['type'])
529 print(" Class: %s" % op['class'])
530 print(" Match: %s" % op['match'])
534 def list_tests(tests):
535 for pd in sorted(tests.keys()):
536 for tclist in tests[pd]:
538 for op in tc['output']:
539 line = "%s/%s/%s" % (tc['pd'], tc['name'], op['type'])
541 line += "/%s" % op['class']
550 runtc_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
551 base_dir = os.path.abspath(os.path.join(os.curdir, runtc_dir, os.path.pardir))
552 dumps_dir = os.path.abspath(os.path.join(base_dir, os.path.pardir, 'sigrok-dumps'))
553 tests_dir = os.path.abspath(os.path.join(runtc_dir, 'test'))
555 if len(sys.argv) == 1:
558 opt_all = opt_run = opt_show = opt_list = opt_fix = opt_coverage = False
561 opts, args = getopt(sys.argv[1:], "dvarslfcR:S:")
562 except Exception as e:
563 usage('error while parsing command line arguments: {}'.format(e))
564 for opt, arg in opts:
586 if opt_run and opt_show:
587 usage("Use either -s or -r, not both.")
589 usage("Specify either -a or tests, not both.")
590 if report_dir is not None and not os.path.isdir(report_dir):
591 usage("%s is not a directory" % report_dir)
596 testlist = get_tests(args)
597 elif opt_all or opt_list:
598 testlist = get_tests(os.listdir(tests_dir))
600 usage("Specify either -a or tests.")
603 if not os.path.isdir(dumps_dir):
604 ERR("Could not find sigrok-dumps repository at %s" % dumps_dir)
606 results, errors = run_tests(testlist, fix=opt_fix)
608 errs, diffs = get_run_tests_error_diff_counts(results)
618 run_tests(testlist, fix=True)
621 except Exception as e:
622 print("Error: %s" % str(e))