added unit testing

Fri, 18 Sep 2020 20:22:22 +0300

author
Teemu Piippo <teemu@hecknology.net>
date
Fri, 18 Sep 2020 20:22:22 +0300
changeset 117
178d6e54694f
parent 116
60cac583b5df
child 118
01ce74b830c6

added unit testing

testsuite.py file | annotate | diff | comparison | revisions
unittest.py file | annotate | diff | comparison | revisions
unittests/collinearity.test file | annotate | diff | comparison | revisions
unittests/concave.test file | annotate | diff | comparison | revisions
--- a/testsuite.py	Fri Sep 18 19:47:42 2020 +0300
+++ b/testsuite.py	Fri Sep 18 20:22:22 2020 +0300
@@ -139,6 +139,12 @@
         key = lambda problem_type: problem_type.name
     )
 
+def all_problem_type_names(test_suite):
+    return set(
+        problem_type.name
+        for problem_type in iterate_problems(test_suite)
+    )
+
 if __name__ == '__main__':
     from pprint import pprint
     pprint(load_tests())
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/unittest.py	Fri Sep 18 20:22:22 2020 +0300
@@ -0,0 +1,140 @@
+#!/usr/bin/env python3
+from ldcheck import appname, version, version_string
+from ldcheck import load_config, load_colours, find_ldconfig_ldr_paths
+from ldcheck import script_directory
+from pathlib import Path
+from parse import read_ldraw
+from testsuite import load_tests, check_model, problem_text, all_problem_type_names
+
+def unit_test_discovery():
+    ''' Yields unit test paths '''
+    import os
+    unit_test_dir = Path(script_directory) / 'unittests'
+    for dirpath, dirnames, filenames in os.walk(unit_test_dir):
+        yield from (
+            Path(dirpath) / filename
+            for filename in filenames
+            if filename.endswith('.test')
+        )
+
+def parse_expectation(text):
+    problem_type, line_no_str = str.split(text, ':')
+    return problem_type, int(line_no_str)
+
+def load_unit_test(unit_test_path, *, name, ldraw_directories):
+    with open(unit_test_path, 'rb') as device:
+        import re
+        problem_types = set()
+        expecting = set()
+        while True:
+            pos = device.tell()
+            line = bytes.decode(device.readline())
+            if not line:
+                raise ValueError('unit test ended unexpectedly')
+            match = re.match('^0 Testing: (.+)', line)
+            if match:
+                set.update(problem_types, match.group(1).split())
+            else:
+                match = re.match('^0 Expecting: (.+)', line)
+                if match:
+                    set.update(expecting, map(parse_expectation, match.group(1).split()))
+                else:
+                    device.seek(pos)
+                    break
+        if not problem_types or not expecting:
+            raise ValueError(str.format(
+                'Unit test {name} does not have a proper manifest',
+                name = name,
+            ))
+        return {
+            'problem_types': problem_types,
+            'expecting': expecting,
+            'model': read_ldraw(
+                device,
+                name = name,
+                ldraw_directories = ldraw_directories
+            ),
+        }
+
+def parse_problem(problem):
+    return problem.problem_class.name, problem.line_number
+
+def run_unit_test(unit_test_path, *, config, test_suite):
+    from os.path import basename
+    unit_test = load_unit_test(
+        unit_test_path,
+        name = basename(unit_test_path),
+        ldraw_directories = config['libraries'],
+    )
+    bad_problems = set.difference(
+        unit_test['problem_types'],
+        all_problem_type_names(test_suite)
+    )
+    if bad_problems:
+        raise ValueError(str.format(
+            'unknown problem type: {names}',
+            names = ' '.join(sorted(bad_problems))
+        ))
+    problem_types = unit_test['problem_types']
+    report = check_model(unit_test['model'], test_suite)
+    expected_problems = unit_test['expecting']
+    problems = set(
+        filter(
+            lambda problem: problem[0] in problem_types,
+            map(
+                parse_problem,
+                report['problems']
+            )
+        )
+    )
+    return {
+        'passed': problems == expected_problems,
+        'unexpected': set.difference(problems, expected_problems),
+        'missing': set.difference(expected_problems, problems),
+    }
+
+def format_problem_tuple(problem_tuple):
+    return problem_tuple[0] + ':' + str(problem_tuple[1])
+
+def run_unit_test_suite():
+    config = load_config()
+    test_suite = load_tests()
+    num_tested = 0
+    num_passed = 0
+    print('Running unit test suite.')
+    for unit_test_path in unit_test_discovery():
+        try:
+            unit_test_report = run_unit_test(
+                unit_test_path,
+                config = config,
+                test_suite = test_suite
+            )
+        except Exception as error:
+            print(str.format(
+                'Error running {test_name}: {error}',
+                test_name = unit_test_path.name,
+                error = str(error),
+            ))
+        else:
+            print(str.format(
+                '{name}: {verdict}',
+                name = unit_test_path.name,
+                verdict = ('FAILED', 'PASSED')[unit_test_report['passed']],
+            ))
+            num_tested += 1
+            num_passed += int(unit_test_report['passed'])
+            if not unit_test_report['passed']:
+                def format_problem_list(key):
+                    return str.join(
+                        ' ',
+                        map(format_problem_tuple, unit_test_report[key])
+                    )
+                print('\tunexpected:', format_problem_list('unexpected'))
+                print('\tmissing:', format_problem_list('missing'))
+    print(str.format(
+        '{num_tested} tests run, {num_passed} tests passed.',
+        num_tested = num_tested,
+        num_passed = num_passed,
+    ))
+if __name__ == '__main__':
+    run_unit_test_suite()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/unittests/collinearity.test	Fri Sep 18 20:22:22 2020 +0300
@@ -0,0 +1,4 @@
+0 Testing: collinear
+0 Expecting: collinear:1 collinear:2
+3 16 0 0 0 0 100 0 0.01745 100 0
+3 16 0 0 0 0.0436 100 0 0.0436 -100 0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/unittests/concave.test	Fri Sep 18 20:22:22 2020 +0300
@@ -0,0 +1,3 @@
+0 Testing: concave
+0 Expecting: concave:1
+4 16 0 0 0 -1 2 0 0 1 0 1 2 0

mercurial