X-Git-Url: https://sigrok.org/gitweb/?p=libsigrokdecode.git;a=blobdiff_plain;f=tests%2Fpdtest;h=55718be0a8e344713cd7bb3276d59b95e3f5b6bf;hp=158b3e03900bb895fd7fca21e360639f16a5559b;hb=83df730dd21ce52670a593455cfe5e732989a90d;hpb=ea1fc7b1a9d857c7ad147c1b81c6e4b3aa1cd3b6 diff --git a/tests/pdtest b/tests/pdtest index 158b3e0..55718be 100755 --- a/tests/pdtest +++ b/tests/pdtest @@ -20,6 +20,7 @@ import os import sys +import re from getopt import getopt from tempfile import mkstemp from subprocess import Popen, PIPE @@ -62,6 +63,7 @@ def usage(msg=None): -s Show test(s) -r Run test(s) -f Fix failed test(s) + -c Report decoder code coverage -R Save test reports to Protocol decoder name ("i2c") and optionally test name ("i2c/icc")""") sys.exit() @@ -109,7 +111,7 @@ def parse_testfile(path, pd, tc, op_type, op_class): raise E_syntax pd_spec = { 'name': f.pop(0), - 'probes': [], + 'channels': [], 'options': [], } while len(f): @@ -121,12 +123,12 @@ def parse_testfile(path, pd, tc, op_type, op_class): if '=' not in b: raise E_syntax opt, val = b.split('=') - if a == 'probe': + if a == 'channel': try: val = int(val) except: raise E_syntax - pd_spec['probes'].append([opt, val]) + pd_spec['channels'].append([opt, val]) elif a == 'option': pd_spec['options'].append([opt, val]) else: @@ -198,12 +200,13 @@ def parse_testfile(path, pd, tc, op_type, op_class): def get_tests(testnames): - tests = [] + tests = {} for testspec in testnames: - # Optional testspec in the form i2c/rtc + # Optional testspec in the form pd/testcase/type/class tc = op_type = op_class = None ts = testspec.strip("/").split("/") pd = ts.pop(0) + tests[pd] = [] if ts: tc = ts.pop(0) if ts: @@ -218,7 +221,7 @@ def get_tests(testnames): if not os.path.exists(path): # PD doesn't have any tests yet continue - tests.append(parse_testfile(path, pd, tc, op_type, op_class)) + tests[pd].append(parse_testfile(path, pd, tc, op_type, op_class)) return tests @@ -248,87 +251,187 @@ def compare_binary(f1, f2): return result +# runtc's stdout can have lines like: +# coverage: lines=161 missed=2 coverage=99% +def parse_stats(text): + stats = {} + for line in text.strip().split('\n'): + fields = line.split() + key = fields.pop(0).strip(':') + if key not in stats: + stats[key] = [] + stats[key].append({}) + for f in fields: + k, v = f.split('=') + stats[key][-1][k] = v + + return stats + + +# take result set of all tests in a PD, and summarize which lines +# were not covered by any of the tests. +def coverage_sum(cvglist): + lines = 0 + missed = 0 + missed_lines = {} + for record in cvglist: + lines = int(record['lines']) + missed += int(record['missed']) + if 'missed_lines' not in record: + continue + for linespec in record['missed_lines'].split(','): + if linespec not in missed_lines: + missed_lines[linespec] = 1 + else: + missed_lines[linespec] += 1 + + # keep only those lines that didn't show up in every non-summary record + final_missed = [] + for linespec in missed_lines: + if missed_lines[linespec] != len(cvglist): + continue + final_missed.append(linespec) + + return lines, final_missed + + def run_tests(tests, fix=False): errors = 0 results = [] - cmd = os.path.join(tests_dir, 'runtc') - for tclist in tests: - for tc in tclist: - args = [cmd] - if DEBUG > 1: - args.append('-d') - for pd in tc['pdlist']: - args.extend(['-P', pd['name']]) - for label, probe in pd['probes']: - args.extend(['-p', "%s=%d" % (label, probe)]) - for option, value in pd['options']: - args.extend(['-o', "%s=%s" % (option, value)]) - args.extend(['-i', os.path.join(dumps_dir, tc['input'])]) - for op in tc['output']: - name = "%s/%s/%s" % (tc['pd'], tc['name'], op['type']) - opargs = ['-O', "%s:%s" % (op['pd'], op['type'])] - if 'class' in op: - opargs[-1] += ":%s" % op['class'] - name += "/%s" % op['class'] - if VERBOSE: - dots = '.' * (60 - len(name) - 2) - INFO("%s %s " % (name, dots), end='') - results.append({ - 'testcase': name, - }) - try: - fd, outfile = mkstemp() - os.close(fd) - opargs.extend(['-f', outfile]) - DBG("Running %s" % (' '.join(args + opargs))) - p = Popen(args + opargs, stdout=PIPE, stderr=PIPE) - stdout, stderr = p.communicate() - if stdout: - results[-1]['statistics'] = stdout.decode('utf-8').strip() - if stderr: - results[-1]['error'] = stderr.decode('utf-8').strip() - errors += 1 - elif p.returncode != 0: - # runtc indicated an error, but didn't output a - # message on stderr about it - results[-1]['error'] = "Unknown error: runtc %d" % p.returncode - if 'error' not in results[-1]: - matchfile = os.path.join(decoders_dir, op['pd'], 'test', op['match']) - DBG("Comparing with %s" % matchfile) - try: - diff = diff_error = None - if op['type'] in ('annotation', 'python'): - diff = diff_text(matchfile, outfile) - elif op['type'] == 'binary': - diff = compare_binary(matchfile, outfile) + cmd = [os.path.join(tests_dir, 'runtc')] + if opt_coverage: + fd, coverage = mkstemp() + os.close(fd) + cmd.extend(['-c', coverage]) + else: + coverage = None + for pd in sorted(tests.keys()): + pd_cvg = [] + for tclist in tests[pd]: + for tc in tclist: + args = cmd.copy() + if DEBUG > 1: + args.append('-d') + # Set up PD stack for this test. + for spd in tc['pdlist']: + args.extend(['-P', spd['name']]) + for label, channel in spd['channels']: + args.extend(['-p', "%s=%d" % (label, channel)]) + for option, value in spd['options']: + args.extend(['-o', "%s=%s" % (option, value)]) + args.extend(['-i', os.path.join(dumps_dir, tc['input'])]) + for op in tc['output']: + name = "%s/%s/%s" % (pd, tc['name'], op['type']) + opargs = ['-O', "%s:%s" % (op['pd'], op['type'])] + if 'class' in op: + opargs[-1] += ":%s" % op['class'] + name += "/%s" % op['class'] + if VERBOSE: + dots = '.' * (60 - len(name) - 2) + INFO("%s %s " % (name, dots), end='') + results.append({ + 'testcase': name, + }) + try: + fd, outfile = mkstemp() + os.close(fd) + opargs.extend(['-f', outfile]) + DBG("Running %s" % (' '.join(args + opargs))) + p = Popen(args + opargs, stdout=PIPE, stderr=PIPE) + stdout, stderr = p.communicate() + if stdout: + # statistics and coverage data on stdout + results[-1].update(parse_stats(stdout.decode('utf-8'))) + if stderr: + results[-1]['error'] = stderr.decode('utf-8').strip() + errors += 1 + elif p.returncode != 0: + # runtc indicated an error, but didn't output a + # message on stderr about it + results[-1]['error'] = "Unknown error: runtc %d" % p.returncode + if 'error' not in results[-1]: + matchfile = os.path.join(decoders_dir, op['pd'], 'test', op['match']) + DBG("Comparing with %s" % matchfile) + try: + diff = diff_error = None + if op['type'] in ('annotation', 'python'): + diff = diff_text(matchfile, outfile) + elif op['type'] == 'binary': + diff = compare_binary(matchfile, outfile) + else: + diff = ["Unsupported output type '%s'." % op['type']] + except Exception as e: + diff_error = e + if fix: + if diff or diff_error: + copy(outfile, matchfile) + DBG("Wrote %s" % matchfile) else: - diff = ["Unsupported output type '%s'." % op['type']] - except Exception as e: - diff_error = e - if fix: - if diff or diff_error: - copy(outfile, matchfile) - DBG("Wrote %s" % matchfile) + if diff: + results[-1]['diff'] = diff + elif diff_error is not None: + raise diff_error + except Exception as e: + results[-1]['error'] = str(e) + finally: + if coverage: + results[-1]['coverage_report'] = coverage + os.unlink(outfile) + if op['type'] == 'exception' and 'error' in results[-1]: + # filter out the exception we were looking for + reg = "^Error: srd: %s:" % op['match'] + if re.match(reg, results[-1]['error']): + # found it, not an error + results[-1].pop('error') + if VERBOSE: + if 'diff' in results[-1]: + INFO("Output mismatch") + elif 'error' in results[-1]: + error = results[-1]['error'] + if len(error) > 20: + error = error[:17] + '...' + INFO(error) + elif 'coverage' in results[-1]: + # report coverage of this PD + for record in results[-1]['coverage']: + # but not others used in the stack + # as part of the test. + if record['scope'] == pd: + INFO(record['coverage']) + break else: - if diff: - results[-1]['diff'] = diff - elif diff_error is not None: - raise diff_error - except Exception as e: - results[-1]['error'] = str(e) - finally: - os.unlink(outfile) - if VERBOSE: - if 'diff' in results[-1]: - INFO("Output mismatch") - elif 'error' in results[-1]: - error = results[-1]['error'] - if len(error) > 20: - error = error[:17] + '...' - INFO(error) - else: - INFO("OK") - gen_report(results[-1]) + INFO("OK") + gen_report(results[-1]) + if coverage: + os.unlink(coverage) + # only keep track of coverage records for this PD, + # not others in the stack just used for testing. + for cvg in results[-1]['coverage']: + if cvg['scope'] == pd: + pd_cvg.append(cvg) + if opt_coverage and len(pd_cvg) > 1: + # report total coverage of this PD, across all the tests + # that were done on it. + total_lines, missed_lines = coverage_sum(pd_cvg) + pd_coverage = 100 - (float(len(missed_lines)) / total_lines * 100) + if VERBOSE: + dots = '.' * (54 - len(pd) - 2) + INFO("%s total %s %d%%" % (pd, dots, pd_coverage)) + if report_dir: + # generate a missing lines list across all the files in + # the PD + files = {} + for entry in missed_lines: + filename, line = entry.split(':') + if filename not in files: + files[filename] = [] + files[filename].append(line) + text = '' + for filename in sorted(files.keys()): + line_list = ','.join(sorted(files[filename], key=int)) + text += "%s: %s\n" % (filename, line_list) + open(os.path.join(report_dir, pd + "_total"), 'w').write(text) + return results, errors @@ -343,8 +446,8 @@ def gen_report(result): out.append("Test output mismatch:") out.extend(result['diff']) out.append('') - if 'statistics' in result: - out.extend(["Statistics:", result['statistics']]) + if 'coverage_report' in result: + out.append(open(result['coverage_report'], 'r').read()) out.append('') if out: @@ -361,35 +464,37 @@ def gen_report(result): def show_tests(tests): - for tclist in tests: - for tc in tclist: - print("Testcase: %s/%s" % (tc['pd'], tc['name'])) - for pd in tc['pdlist']: - print(" Protocol decoder: %s" % pd['name']) - for label, probe in pd['probes']: - print(" Probe %s=%d" % (label, probe)) - for option, value in pd['options']: - print(" Option %s=%d" % (option, value)) - if 'stack' in tc: - print(" Stack: %s" % ' '.join(tc['stack'])) - print(" Input: %s" % tc['input']) - for op in tc['output']: - print(" Output:\n Protocol decoder: %s" % op['pd']) - print(" Type: %s" % op['type']) - if 'class' in op: - print(" Class: %s" % op['class']) - print(" Match: %s" % op['match']) - print() + for pd in sorted(tests.keys()): + for tclist in tests[pd]: + for tc in tclist: + print("Testcase: %s/%s" % (tc['pd'], tc['name'])) + for pd in tc['pdlist']: + print(" Protocol decoder: %s" % pd['name']) + for label, channel in pd['channels']: + print(" Channel %s=%d" % (label, channel)) + for option, value in pd['options']: + print(" Option %s=%d" % (option, value)) + if 'stack' in tc: + print(" Stack: %s" % ' '.join(tc['stack'])) + print(" Input: %s" % tc['input']) + for op in tc['output']: + print(" Output:\n Protocol decoder: %s" % op['pd']) + print(" Type: %s" % op['type']) + if 'class' in op: + print(" Class: %s" % op['class']) + print(" Match: %s" % op['match']) + print() def list_tests(tests): - for tclist in tests: - for tc in tclist: - for op in tc['output']: - line = "%s/%s/%s" % (tc['pd'], tc['name'], op['type']) - if 'class' in op: - line += "/%s" % op['class'] - print(line) + for pd in sorted(tests.keys()): + for tclist in tests[pd]: + for tc in tclist: + for op in tc['output']: + line = "%s/%s/%s" % (tc['pd'], tc['name'], op['type']) + if 'class' in op: + line += "/%s" % op['class'] + print(line) # @@ -405,9 +510,9 @@ decoders_dir = os.path.abspath(os.path.join(base_dir, 'decoders')) if len(sys.argv) == 1: usage() -opt_all = opt_run = opt_show = opt_list = opt_fix = False +opt_all = opt_run = opt_show = opt_list = opt_fix = opt_coverage = False report_dir = None -opts, args = getopt(sys.argv[1:], "dvarslfR:S:") +opts, args = getopt(sys.argv[1:], "dvarslfcR:S:") for opt, arg in opts: if opt == '-d': DEBUG += 1 @@ -423,6 +528,8 @@ for opt, arg in opts: opt_list = True elif opt == '-f': opt_fix = True + elif opt == '-c': + opt_coverage = True elif opt == '-R': report_dir = arg elif opt == '-S':