unittest.py

changeset 147
bec55b021ae7
parent 145
fde18c4d6784
child 150
fcc07f6907a8
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/unittest.py	Thu Aug 26 19:36:44 2021 +0300
@@ -0,0 +1,160 @@
+#!/usr/bin/env python3
+from ldcheck import appname, version, version_string
+from ldcheck import script_directory
+from pathlib import Path
+from parse import read_ldraw
+from testsuite import load_tests, check_model, problem_text, all_problem_type_names
+
+def unit_test_discovery():
+    ''' Yields unit test paths '''
+    import os
+    unit_test_dir = Path(script_directory) / 'unittests'
+    for dirpath, dirnames, filenames in os.walk(unit_test_dir):
+        yield from sorted(
+            Path(dirpath) / filename
+            for filename in filenames
+            if filename.endswith('.test')
+        )
+
+def parse_expectation(text):
+    problem_type, line_no_str = str.split(text, ':')
+    return problem_type, int(line_no_str)
+
+def load_unit_test(unit_test_path, *, name, context):
+    with open(unit_test_path, 'rb') as device:
+        import re
+        problem_types = set()
+        expecting = None
+        while True:
+            pos = device.tell()
+            line = bytes.decode(device.readline())
+            if not line:
+                raise ValueError('unit test ended unexpectedly')
+            match = re.match('^0 Testing: (.+)', line)
+            if match:
+                set.update(problem_types, match.group(1).split())
+            elif str.strip(line) == '0 Expecting: none':
+                expecting = set()
+            else:
+                match = re.match('^0 Expecting: (.+)', line)
+                if match:
+                    if not expecting:
+                        expecting = set()
+                    set.update(expecting, map(parse_expectation, match.group(1).split()))
+                else:
+                    device.seek(pos)
+                    break
+        if not problem_types or expecting is None:
+            raise ValueError(str.format(
+                'Unit test {name} does not have a proper manifest',
+                name = name,
+            ))
+        return {
+            'problem_types': problem_types,
+            'expecting': expecting,
+            'model': read_ldraw(
+                device,
+                name = name,
+                context = context
+            ),
+        }
+
+def parse_problem(problem):
+    return problem.problem_class.name, problem.line_number
+
+def run_unit_test(unit_test_path, *, context, test_suite):
+    from os.path import basename
+    unit_test = load_unit_test(
+        unit_test_path,
+        name = basename(unit_test_path),
+        context = context,
+    )
+    bad_problems = set.difference(
+        unit_test['problem_types'],
+        all_problem_type_names(test_suite)
+    )
+    if bad_problems:
+        raise ValueError(str.format(
+            'unknown problem type: {names}',
+            names = ' '.join(sorted(bad_problems))
+        ))
+    problem_types = unit_test['problem_types']
+    report = check_model(unit_test['model'], test_suite)
+    expected_problems = unit_test['expecting']
+    problems = set(
+        filter(
+            lambda problem: problem[0] in problem_types,
+            map(
+                parse_problem,
+                report['problems']
+            )
+        )
+    )
+    return {
+        'passed': problems == expected_problems,
+        'unexpected': set.difference(problems, expected_problems),
+        'missing': set.difference(expected_problems, problems),
+        'problem_types': problem_types,
+    }
+
+def format_problem_tuple(problem_tuple):
+    return problem_tuple[0] + ':' + str(problem_tuple[1])
+
+def run_unit_test_suite():
+    from argparse import ArgumentParser
+    parser = ArgumentParser()
+    parser.add_argument('-d', '--debug', action = 'store_true')
+    args = parser.parse_args()
+    from ldcheck import LDrawContext
+    context = LDrawContext()
+    test_suite = load_tests()
+    num_tested = 0
+    num_passed = 0
+    all_problem_types = all_problem_type_names(test_suite)
+    problem_types_tested = set()
+    print('Running unit test suite.')
+    for unit_test_path in unit_test_discovery():
+        try:
+            unit_test_report = run_unit_test(
+                unit_test_path,
+                context = context,
+                test_suite = test_suite
+            )
+        except Exception as error:
+            if args.debug:
+                raise
+            else:
+                print(str.format(
+                    'Error running {test_name}: {error}',
+                    test_name = unit_test_path.name,
+                    error = str(error),
+                ))
+        else:
+            print(str.format(
+                '{name}: {verdict}',
+                name = unit_test_path.name,
+                verdict = ('FAILED', 'PASSED')[unit_test_report['passed']],
+            ))
+            num_tested += 1
+            num_passed += int(unit_test_report['passed'])
+            set.update(problem_types_tested, unit_test_report['problem_types'])
+            if not unit_test_report['passed']:
+                def format_problem_list(key):
+                    return str.join(
+                        ' ',
+                        map(format_problem_tuple, unit_test_report[key])
+                    )
+                print('\tunexpected:', format_problem_list('unexpected'))
+                print('\tmissing:', format_problem_list('missing'))
+    print(str.format(
+        '{num_tested} tests run, {num_passed} tests passed.',
+        num_tested = num_tested,
+        num_passed = num_passed,
+    ))
+    untested_problem_types = set.difference(all_problem_types, problem_types_tested)
+    if untested_problem_types:
+        print('The following problem types were not tested:')
+        for problem_type in sorted(untested_problem_types):
+            print('\t' + problem_type)
+if __name__ == '__main__':
+    run_unit_test_suite()

mercurial