-s Show test(s)
-r Run test(s)
-f Fix failed test(s)
+ -c Report decoder code coverage
-R <directory> Save test reports to <directory>
<test> Protocol decoder name ("i2c") and optionally test name ("i2c/icc")""")
sys.exit()
def get_tests(testnames):
- tests = []
+ tests = {}
for testspec in testnames:
- # Optional testspec in the form i2c/rtc
+ # Optional testspec in the form pd/testcase/type/class
tc = op_type = op_class = None
ts = testspec.strip("/").split("/")
pd = ts.pop(0)
+ tests[pd] = []
if ts:
tc = ts.pop(0)
if ts:
if not os.path.exists(path):
# PD doesn't have any tests yet
continue
- tests.append(parse_testfile(path, pd, tc, op_type, op_class))
+ tests[pd].append(parse_testfile(path, pd, tc, op_type, op_class))
return tests
return result
+# runtc's stdout can have lines like:
+# coverage: lines=161 missed=2 coverage=99%
+def parse_stats(text):
+ stats = {}
+ for line in text.strip().split('\n'):
+ fields = line.split()
+ key = fields.pop(0).strip(':')
+ if key not in stats:
+ stats[key] = []
+ stats[key].append({})
+ for f in fields:
+ k, v = f.split('=')
+ stats[key][-1][k] = v
+
+ return stats
+
+
+# take result set of all tests in a PD, and summarize which lines
+# were not covered by any of the tests.
+def coverage_sum(cvglist):
+ lines = 0
+ missed = 0
+ missed_lines = {}
+ for record in cvglist:
+ lines = int(record['lines'])
+ missed += int(record['missed'])
+ if 'missed_lines' not in record:
+ continue
+ for linespec in record['missed_lines'].split(','):
+ if linespec not in missed_lines:
+ missed_lines[linespec] = 1
+ else:
+ missed_lines[linespec] += 1
+
+ # keep only those lines that didn't show up in every non-summary record
+ final_missed = []
+ for linespec in missed_lines:
+ if missed_lines[linespec] != len(cvglist):
+ continue
+ final_missed.append(linespec)
+
+ return lines, final_missed
+
+
def run_tests(tests, fix=False):
errors = 0
results = []
- cmd = os.path.join(tests_dir, 'runtc')
- for tclist in tests:
- for tc in tclist:
- args = [cmd]
- if DEBUG > 1:
- args.append('-d')
- for pd in tc['pdlist']:
- args.extend(['-P', pd['name']])
- for label, probe in pd['probes']:
- args.extend(['-p', "%s=%d" % (label, probe)])
- for option, value in pd['options']:
- args.extend(['-o', "%s=%s" % (option, value)])
- args.extend(['-i', os.path.join(dumps_dir, tc['input'])])
- for op in tc['output']:
- name = "%s/%s/%s" % (tc['pd'], tc['name'], op['type'])
- opargs = ['-O', "%s:%s" % (op['pd'], op['type'])]
- if 'class' in op:
- opargs[-1] += ":%s" % op['class']
- name += "/%s" % op['class']
- if VERBOSE:
- dots = '.' * (60 - len(name) - 2)
- INFO("%s %s " % (name, dots), end='')
- results.append({
- 'testcase': name,
- })
- try:
- fd, outfile = mkstemp()
- os.close(fd)
- opargs.extend(['-f', outfile])
- DBG("Running %s" % (' '.join(args + opargs)))
- p = Popen(args + opargs, stdout=PIPE, stderr=PIPE)
- stdout, stderr = p.communicate()
- if stdout:
- results[-1]['statistics'] = stdout.decode('utf-8').strip()
- if stderr:
- results[-1]['error'] = stderr.decode('utf-8').strip()
- errors += 1
- elif p.returncode != 0:
- # runtc indicated an error, but didn't output a
- # message on stderr about it
- results[-1]['error'] = "Unknown error: runtc %d" % p.returncode
- if 'error' not in results[-1]:
- matchfile = os.path.join(decoders_dir, op['pd'], 'test', op['match'])
- DBG("Comparing with %s" % matchfile)
- try:
- diff = diff_error = None
- if op['type'] in ('annotation', 'python'):
- diff = diff_text(matchfile, outfile)
- elif op['type'] == 'binary':
- diff = compare_binary(matchfile, outfile)
+ cmd = [os.path.join(tests_dir, 'runtc')]
+ if opt_coverage:
+ fd, coverage = mkstemp()
+ os.close(fd)
+ cmd.extend(['-c', coverage])
+ else:
+ coverage = None
+ for pd in sorted(tests.keys()):
+ pd_cvg = []
+ for tclist in tests[pd]:
+ for tc in tclist:
+ args = cmd.copy()
+ if DEBUG > 1:
+ args.append('-d')
+ # Set up PD stack for this test.
+ for spd in tc['pdlist']:
+ args.extend(['-P', spd['name']])
+ for label, probe in spd['probes']:
+ args.extend(['-p', "%s=%d" % (label, probe)])
+ for option, value in spd['options']:
+ args.extend(['-o', "%s=%s" % (option, value)])
+ args.extend(['-i', os.path.join(dumps_dir, tc['input'])])
+ for op in tc['output']:
+ name = "%s/%s/%s" % (pd, tc['name'], op['type'])
+ opargs = ['-O', "%s:%s" % (op['pd'], op['type'])]
+ if 'class' in op:
+ opargs[-1] += ":%s" % op['class']
+ name += "/%s" % op['class']
+ if VERBOSE:
+ dots = '.' * (60 - len(name) - 2)
+ INFO("%s %s " % (name, dots), end='')
+ results.append({
+ 'testcase': name,
+ })
+ try:
+ fd, outfile = mkstemp()
+ os.close(fd)
+ opargs.extend(['-f', outfile])
+ DBG("Running %s" % (' '.join(args + opargs)))
+ p = Popen(args + opargs, stdout=PIPE, stderr=PIPE)
+ stdout, stderr = p.communicate()
+ if stdout:
+ # statistics and coverage data on stdout
+ results[-1].update(parse_stats(stdout.decode('utf-8')))
+ if stderr:
+ results[-1]['error'] = stderr.decode('utf-8').strip()
+ errors += 1
+ elif p.returncode != 0:
+ # runtc indicated an error, but didn't output a
+ # message on stderr about it
+ results[-1]['error'] = "Unknown error: runtc %d" % p.returncode
+ if 'error' not in results[-1]:
+ matchfile = os.path.join(decoders_dir, op['pd'], 'test', op['match'])
+ DBG("Comparing with %s" % matchfile)
+ try:
+ diff = diff_error = None
+ if op['type'] in ('annotation', 'python'):
+ diff = diff_text(matchfile, outfile)
+ elif op['type'] == 'binary':
+ diff = compare_binary(matchfile, outfile)
+ else:
+ diff = ["Unsupported output type '%s'." % op['type']]
+ except Exception as e:
+ diff_error = e
+ if fix:
+ if diff or diff_error:
+ copy(outfile, matchfile)
+ DBG("Wrote %s" % matchfile)
else:
- diff = ["Unsupported output type '%s'." % op['type']]
- except Exception as e:
- diff_error = e
- if fix:
- if diff or diff_error:
- copy(outfile, matchfile)
- DBG("Wrote %s" % matchfile)
+ if diff:
+ results[-1]['diff'] = diff
+ elif diff_error is not None:
+ raise diff_error
+ except Exception as e:
+ results[-1]['error'] = str(e)
+ finally:
+ if coverage:
+ results[-1]['coverage_report'] = coverage
+ os.unlink(outfile)
+ if VERBOSE:
+ if 'diff' in results[-1]:
+ INFO("Output mismatch")
+ elif 'error' in results[-1]:
+ error = results[-1]['error']
+ if len(error) > 20:
+ error = error[:17] + '...'
+ INFO(error)
+ elif 'coverage' in results[-1]:
+ # report coverage of this PD
+ for record in results[-1]['coverage']:
+ # but not others used in the stack
+ # as part of the test.
+ if record['scope'] == pd:
+ INFO(record['coverage'])
+ break
else:
- if diff:
- results[-1]['diff'] = diff
- elif diff_error is not None:
- raise diff_error
- except Exception as e:
- results[-1]['error'] = str(e)
- finally:
- os.unlink(outfile)
- if VERBOSE:
- if 'diff' in results[-1]:
- INFO("Output mismatch")
- elif 'error' in results[-1]:
- error = results[-1]['error']
- if len(error) > 20:
- error = error[:17] + '...'
- INFO(error)
- else:
- INFO("OK")
- gen_report(results[-1])
+ INFO("OK")
+ gen_report(results[-1])
+ if coverage:
+ os.unlink(coverage)
+ # only keep track of coverage records for this PD,
+ # not others in the stack just used for testing.
+ for cvg in results[-1]['coverage']:
+ if cvg['scope'] == pd:
+ pd_cvg.append(cvg)
+ if VERBOSE and opt_coverage and len(pd_cvg) > 1:
+ # report total coverage of this PD, across all the tests
+ # that were done on it.
+ total_lines, missed_lines = coverage_sum(pd_cvg)
+ pd_coverage = 100 - (float(len(missed_lines)) / total_lines * 100)
+ if VERBOSE:
+ dots = '.' * (54 - len(pd) - 2)
+ INFO("%s total %s %d%%" % (pd, dots, pd_coverage))
return results, errors
out.append("Test output mismatch:")
out.extend(result['diff'])
out.append('')
- if 'statistics' in result:
- out.extend(["Statistics:", result['statistics']])
+ if 'coverage_report' in result:
+ out.append(open(result['coverage_report'], 'r').read())
out.append('')
if out:
def show_tests(tests):
- for tclist in tests:
- for tc in tclist:
- print("Testcase: %s/%s" % (tc['pd'], tc['name']))
- for pd in tc['pdlist']:
- print(" Protocol decoder: %s" % pd['name'])
- for label, probe in pd['probes']:
- print(" Probe %s=%d" % (label, probe))
- for option, value in pd['options']:
- print(" Option %s=%d" % (option, value))
- if 'stack' in tc:
- print(" Stack: %s" % ' '.join(tc['stack']))
- print(" Input: %s" % tc['input'])
- for op in tc['output']:
- print(" Output:\n Protocol decoder: %s" % op['pd'])
- print(" Type: %s" % op['type'])
- if 'class' in op:
- print(" Class: %s" % op['class'])
- print(" Match: %s" % op['match'])
- print()
+ for pd in sorted(tests.keys()):
+ for tclist in tests[pd]:
+ for tc in tclist:
+ print("Testcase: %s/%s" % (tc['pd'], tc['name']))
+ for pd in tc['pdlist']:
+ print(" Protocol decoder: %s" % pd['name'])
+ for label, probe in pd['probes']:
+ print(" Probe %s=%d" % (label, probe))
+ for option, value in pd['options']:
+ print(" Option %s=%d" % (option, value))
+ if 'stack' in tc:
+ print(" Stack: %s" % ' '.join(tc['stack']))
+ print(" Input: %s" % tc['input'])
+ for op in tc['output']:
+ print(" Output:\n Protocol decoder: %s" % op['pd'])
+ print(" Type: %s" % op['type'])
+ if 'class' in op:
+ print(" Class: %s" % op['class'])
+ print(" Match: %s" % op['match'])
+ print()
def list_tests(tests):
- for tclist in tests:
- for tc in tclist:
- for op in tc['output']:
- line = "%s/%s/%s" % (tc['pd'], tc['name'], op['type'])
- if 'class' in op:
- line += "/%s" % op['class']
- print(line)
+ for pd in sorted(tests.keys()):
+ for tclist in tests[pd]:
+ for tc in tclist:
+ for op in tc['output']:
+ line = "%s/%s/%s" % (tc['pd'], tc['name'], op['type'])
+ if 'class' in op:
+ line += "/%s" % op['class']
+ print(line)
#
if len(sys.argv) == 1:
usage()
-opt_all = opt_run = opt_show = opt_list = opt_fix = False
+opt_all = opt_run = opt_show = opt_list = opt_fix = opt_coverage = False
report_dir = None
-opts, args = getopt(sys.argv[1:], "dvarslfR:S:")
+opts, args = getopt(sys.argv[1:], "dvarslfcR:S:")
for opt, arg in opts:
if opt == '-d':
DEBUG += 1
opt_list = True
elif opt == '-f':
opt_fix = True
+ elif opt == '-c':
+ opt_coverage = True
elif opt == '-R':
report_dir = arg
elif opt == '-S':