3 ## This file is part of the sigrok-test project.
5 ## Copyright (C) 2013 Bert Vermeulen <bert@biot.com>
7 ## This program is free software: you can redistribute it and/or modify
8 ## it under the terms of the GNU General Public License as published by
9 ## the Free Software Foundation, either version 3 of the License, or
10 ## (at your option) any later version.
12 ## This program is distributed in the hope that it will be useful,
13 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 ## GNU General Public License for more details.
17 ## You should have received a copy of the GNU General Public License
18 ## along with this program. If not, see <http://www.gnu.org/licenses/>.
24 from getopt import getopt
25 from tempfile import mkstemp
26 from subprocess import Popen, PIPE
27 from difflib import Differ
28 from hashlib import md5
29 from shutil import copy
35 class E_syntax(Exception):
37 class E_badline(Exception):
40 def INFO(msg, end='\n'):
52 print(msg, file=sys.stderr)
57 print(msg.strip() + '\n')
58 print("""Usage: testpd [-dvalsrfcR] [<test1> <test2> ...]
65 -f Fix failed test(s) / create initial output for new test(s)
66 -c Report decoder code coverage
67 -R <directory> Save test reports to <directory>
68 <test> Protocol decoder name ("i2c") and optionally test name ("i2c/rtc")""")
73 if 'pdlist' not in tc or not tc['pdlist']:
74 return("No protocol decoders")
75 if 'input' not in tc or not tc['input']:
77 if 'output' not in tc or not tc['output']:
79 for op in tc['output']:
81 return("No match in output")
86 def parse_testfile(path, pd, tc, op_type, op_class):
87 DBG("Opening '%s'" % path)
89 for line in open(path).read().split('\n'):
92 if len(line) == 0 or line[0] == "#":
95 if not tclist and f[0] != "test":
109 elif key == 'protocol-decoder':
119 # Always needs <key> <value>
125 opt, val = b.split('=')
131 pd_spec['channels'].append([opt, val])
133 pd_spec['options'].append([opt, val])
136 tclist[-1]['pdlist'].append(pd_spec)
140 tclist[-1]['stack'] = f
144 tclist[-1]['input'] = f[0]
145 elif key == 'output':
152 # Always needs <key> <value>
162 tclist[-1]['output'].append(op_spec)
165 except E_badline as e:
166 ERR("Invalid syntax in %s: line '%s'" % (path, line))
168 except E_syntax as e:
169 ERR("Unable to parse %s: unknown line '%s'" % (path, line))
172 # If a specific testcase was requested, keep only that one.
179 # ...and a specific output type
180 if op_type is not None:
182 for op in target_tc['output']:
183 if op['type'] == op_type:
184 # ...and a specific output class
185 if op_class is None or ('class' in op and op['class'] == op_class):
186 target_oplist.append(op)
187 DBG("match on [%s]" % str(op))
188 target_tc['output'] = target_oplist
189 if target_tc is None:
194 error = check_tclist(t)
196 ERR("Error in %s: %s" % (path, error))
202 def get_tests(testnames):
204 for testspec in testnames:
205 # Optional testspec in the form pd/testcase/type/class
206 tc = op_type = op_class = None
207 ts = testspec.strip("/").split("/")
216 path = os.path.join(tests_dir, pd)
217 if not os.path.isdir(path):
218 # User specified non-existent PD
219 raise Exception("%s not found." % path)
220 path = os.path.join(tests_dir, pd, "test.conf")
221 if not os.path.exists(path):
222 # PD doesn't have any tests yet
224 tests[pd].append(parse_testfile(path, pd, tc, op_type, op_class))
229 def diff_text(f1, f2):
230 t1 = open(f1).readlines()
231 t2 = open(f2).readlines()
234 for line in d.compare(t1, t2):
235 if line[:2] in ('- ', '+ '):
236 diff.append(line.strip())
241 def compare_binary(f1, f2):
243 h1.update(open(f1, 'rb').read())
245 h2.update(open(f2, 'rb').read())
246 if h1.digest() == h2.digest():
249 result = ["Binary output does not match."]
254 # runtc's stdout can have lines like:
255 # coverage: lines=161 missed=2 coverage=99%
256 def parse_stats(text):
258 for line in text.strip().split('\n'):
259 fields = line.split()
260 key = fields.pop(0).strip(':')
263 stats[key].append({})
266 stats[key][-1][k] = v
271 # take result set of all tests in a PD, and summarize which lines
272 # were not covered by any of the tests.
273 def coverage_sum(cvglist):
277 for record in cvglist:
278 lines = int(record['lines'])
279 missed += int(record['missed'])
280 if 'missed_lines' not in record:
282 for linespec in record['missed_lines'].split(','):
283 if linespec not in missed_lines:
284 missed_lines[linespec] = 1
286 missed_lines[linespec] += 1
288 # keep only those lines that didn't show up in every non-summary record
290 for linespec in missed_lines:
291 if missed_lines[linespec] != len(cvglist):
293 final_missed.append(linespec)
295 return lines, final_missed
298 def run_tests(tests, fix=False):
301 cmd = [os.path.join(runtc_dir, 'runtc')]
303 fd, coverage = mkstemp()
305 cmd.extend(['-c', coverage])
308 for pd in sorted(tests.keys()):
310 for tclist in tests[pd]:
315 # Set up PD stack for this test.
316 for spd in tc['pdlist']:
317 args.extend(['-P', spd['name']])
318 for label, channel in spd['channels']:
319 args.extend(['-p', "%s=%d" % (label, channel)])
320 for option, value in spd['options']:
321 args.extend(['-o', "%s=%s" % (option, value)])
322 args.extend(['-i', os.path.join(dumps_dir, tc['input'])])
323 for op in tc['output']:
324 name = "%s/%s/%s" % (pd, tc['name'], op['type'])
325 opargs = ['-O', "%s:%s" % (op['pd'], op['type'])]
327 opargs[-1] += ":%s" % op['class']
328 name += "/%s" % op['class']
330 dots = '.' * (77 - len(name) - 2)
331 INFO("%s %s " % (name, dots), end='')
336 fd, outfile = mkstemp()
338 opargs.extend(['-f', outfile])
339 DBG("Running %s" % (' '.join(args + opargs)))
340 p = Popen(args + opargs, stdout=PIPE, stderr=PIPE)
341 stdout, stderr = p.communicate()
343 # statistics and coverage data on stdout
344 results[-1].update(parse_stats(stdout.decode('utf-8')))
346 results[-1]['error'] = stderr.decode('utf-8').strip()
348 elif p.returncode != 0:
349 # runtc indicated an error, but didn't output a
350 # message on stderr about it
351 results[-1]['error'] = "Unknown error: runtc %d" % p.returncode
352 if 'error' not in results[-1]:
353 matchfile = os.path.join(tests_dir, op['pd'], op['match'])
354 DBG("Comparing with %s" % matchfile)
356 diff = diff_error = None
357 if op['type'] in ('annotation', 'python'):
358 diff = diff_text(matchfile, outfile)
359 elif op['type'] == 'binary':
360 diff = compare_binary(matchfile, outfile)
362 diff = ["Unsupported output type '%s'." % op['type']]
363 except Exception as e:
366 if diff or diff_error:
367 copy(outfile, matchfile)
368 DBG("Wrote %s" % matchfile)
371 results[-1]['diff'] = diff
372 elif diff_error is not None:
374 except Exception as e:
375 results[-1]['error'] = str(e)
378 results[-1]['coverage_report'] = coverage
380 if op['type'] == 'exception' and 'error' in results[-1]:
381 # filter out the exception we were looking for
382 reg = "^Error: srd: %s:" % op['match']
383 if re.match(reg, results[-1]['error']):
384 # found it, not an error
385 results[-1].pop('error')
388 if 'diff' in results[-1]:
389 INFO("Output mismatch")
390 elif 'error' in results[-1]:
391 error = results[-1]['error']
393 error = error[:17] + '...'
395 elif 'coverage' in results[-1]:
396 # report coverage of this PD
397 for record in results[-1]['coverage']:
398 # but not others used in the stack
399 # as part of the test.
400 if record['scope'] == pd:
401 INFO(record['coverage'])
405 gen_report(results[-1])
408 # only keep track of coverage records for this PD,
409 # not others in the stack just used for testing.
410 for cvg in results[-1]['coverage']:
411 if cvg['scope'] == pd:
413 if opt_coverage and len(pd_cvg) > 1:
414 # report total coverage of this PD, across all the tests
415 # that were done on it.
416 total_lines, missed_lines = coverage_sum(pd_cvg)
417 pd_coverage = 100 - (float(len(missed_lines)) / total_lines * 100)
419 dots = '.' * (54 - len(pd) - 2)
420 INFO("%s total %s %d%%" % (pd, dots, pd_coverage))
422 # generate a missing lines list across all the files in
425 for entry in missed_lines:
426 filename, line = entry.split(':')
427 if filename not in files:
429 files[filename].append(line)
431 for filename in sorted(files.keys()):
432 line_list = ','.join(sorted(files[filename], key=int))
433 text += "%s: %s\n" % (filename, line_list)
434 open(os.path.join(report_dir, pd + "_total"), 'w').write(text)
437 return results, errors
439 def get_run_tests_error_diff_counts(results):
440 """Get error and diff counters from run_tests() results."""
443 for result in results:
444 if 'error' in result:
451 def gen_report(result):
453 if 'error' in result:
455 out.append(result['error'])
458 out.append("Test output mismatch:")
459 out.extend(result['diff'])
461 if 'coverage_report' in result:
462 out.append(open(result['coverage_report'], 'r').read())
466 text = "Testcase: %s\n" % result['testcase']
467 text += '\n'.join(out)
472 filename = result['testcase'].replace('/', '_')
473 open(os.path.join(report_dir, filename), 'w').write(text)
478 def show_tests(tests):
479 for pd in sorted(tests.keys()):
480 for tclist in tests[pd]:
482 print("Testcase: %s/%s" % (tc['pd'], tc['name']))
483 for pd in tc['pdlist']:
484 print(" Protocol decoder: %s" % pd['name'])
485 for label, channel in pd['channels']:
486 print(" Channel %s=%d" % (label, channel))
487 for option, value in pd['options']:
488 print(" Option %s=%d" % (option, value))
490 print(" Stack: %s" % ' '.join(tc['stack']))
491 print(" Input: %s" % tc['input'])
492 for op in tc['output']:
493 print(" Output:\n Protocol decoder: %s" % op['pd'])
494 print(" Type: %s" % op['type'])
496 print(" Class: %s" % op['class'])
497 print(" Match: %s" % op['match'])
501 def list_tests(tests):
502 for pd in sorted(tests.keys()):
503 for tclist in tests[pd]:
505 for op in tc['output']:
506 line = "%s/%s/%s" % (tc['pd'], tc['name'], op['type'])
508 line += "/%s" % op['class']
517 runtc_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
518 base_dir = os.path.abspath(os.path.join(os.curdir, runtc_dir, os.path.pardir))
519 dumps_dir = os.path.abspath(os.path.join(base_dir, os.path.pardir, 'sigrok-dumps'))
520 tests_dir = os.path.abspath(os.path.join(runtc_dir, 'test'))
522 if len(sys.argv) == 1:
525 opt_all = opt_run = opt_show = opt_list = opt_fix = opt_coverage = False
528 opts, args = getopt(sys.argv[1:], "dvarslfcR:S:")
529 except Exception as e:
530 usage('error while parsing command line arguments: {}'.format(e))
531 for opt, arg in opts:
553 if opt_run and opt_show:
554 usage("Use either -s or -r, not both.")
556 usage("Specify either -a or tests, not both.")
557 if report_dir is not None and not os.path.isdir(report_dir):
558 usage("%s is not a directory" % report_dir)
563 testlist = get_tests(args)
564 elif opt_all or opt_list:
565 testlist = get_tests(os.listdir(tests_dir))
567 usage("Specify either -a or tests.")
570 if not os.path.isdir(dumps_dir):
571 ERR("Could not find sigrok-dumps repository at %s" % dumps_dir)
573 results, errors = run_tests(testlist, fix=opt_fix)
575 errs, diffs = get_run_tests_error_diff_counts(results)
585 run_tests(testlist, fix=True)
588 except Exception as e:
589 print("Error: %s" % str(e))