Fri, 18 Sep 2020 20:22:22 +0300
added unit testing
117 | 1 | #!/usr/bin/env python3 |
2 | from ldcheck import appname, version, version_string | |
3 | from ldcheck import load_config, load_colours, find_ldconfig_ldr_paths | |
4 | from ldcheck import script_directory | |
5 | from pathlib import Path | |
6 | from parse import read_ldraw | |
7 | from testsuite import load_tests, check_model, problem_text, all_problem_type_names | |
8 | ||
9 | def unit_test_discovery(): | |
10 | ''' Yields unit test paths ''' | |
11 | import os | |
12 | unit_test_dir = Path(script_directory) / 'unittests' | |
13 | for dirpath, dirnames, filenames in os.walk(unit_test_dir): | |
14 | yield from ( | |
15 | Path(dirpath) / filename | |
16 | for filename in filenames | |
17 | if filename.endswith('.test') | |
18 | ) | |
19 | ||
20 | def parse_expectation(text): | |
21 | problem_type, line_no_str = str.split(text, ':') | |
22 | return problem_type, int(line_no_str) | |
23 | ||
24 | def load_unit_test(unit_test_path, *, name, ldraw_directories): | |
25 | with open(unit_test_path, 'rb') as device: | |
26 | import re | |
27 | problem_types = set() | |
28 | expecting = set() | |
29 | while True: | |
30 | pos = device.tell() | |
31 | line = bytes.decode(device.readline()) | |
32 | if not line: | |
33 | raise ValueError('unit test ended unexpectedly') | |
34 | match = re.match('^0 Testing: (.+)', line) | |
35 | if match: | |
36 | set.update(problem_types, match.group(1).split()) | |
37 | else: | |
38 | match = re.match('^0 Expecting: (.+)', line) | |
39 | if match: | |
40 | set.update(expecting, map(parse_expectation, match.group(1).split())) | |
41 | else: | |
42 | device.seek(pos) | |
43 | break | |
44 | if not problem_types or not expecting: | |
45 | raise ValueError(str.format( | |
46 | 'Unit test {name} does not have a proper manifest', | |
47 | name = name, | |
48 | )) | |
49 | return { | |
50 | 'problem_types': problem_types, | |
51 | 'expecting': expecting, | |
52 | 'model': read_ldraw( | |
53 | device, | |
54 | name = name, | |
55 | ldraw_directories = ldraw_directories | |
56 | ), | |
57 | } | |
58 | ||
59 | def parse_problem(problem): | |
60 | return problem.problem_class.name, problem.line_number | |
61 | ||
62 | def run_unit_test(unit_test_path, *, config, test_suite): | |
63 | from os.path import basename | |
64 | unit_test = load_unit_test( | |
65 | unit_test_path, | |
66 | name = basename(unit_test_path), | |
67 | ldraw_directories = config['libraries'], | |
68 | ) | |
69 | bad_problems = set.difference( | |
70 | unit_test['problem_types'], | |
71 | all_problem_type_names(test_suite) | |
72 | ) | |
73 | if bad_problems: | |
74 | raise ValueError(str.format( | |
75 | 'unknown problem type: {names}', | |
76 | names = ' '.join(sorted(bad_problems)) | |
77 | )) | |
78 | problem_types = unit_test['problem_types'] | |
79 | report = check_model(unit_test['model'], test_suite) | |
80 | expected_problems = unit_test['expecting'] | |
81 | problems = set( | |
82 | filter( | |
83 | lambda problem: problem[0] in problem_types, | |
84 | map( | |
85 | parse_problem, | |
86 | report['problems'] | |
87 | ) | |
88 | ) | |
89 | ) | |
90 | return { | |
91 | 'passed': problems == expected_problems, | |
92 | 'unexpected': set.difference(problems, expected_problems), | |
93 | 'missing': set.difference(expected_problems, problems), | |
94 | } | |
95 | ||
96 | def format_problem_tuple(problem_tuple): | |
97 | return problem_tuple[0] + ':' + str(problem_tuple[1]) | |
98 | ||
99 | def run_unit_test_suite(): | |
100 | config = load_config() | |
101 | test_suite = load_tests() | |
102 | num_tested = 0 | |
103 | num_passed = 0 | |
104 | print('Running unit test suite.') | |
105 | for unit_test_path in unit_test_discovery(): | |
106 | try: | |
107 | unit_test_report = run_unit_test( | |
108 | unit_test_path, | |
109 | config = config, | |
110 | test_suite = test_suite | |
111 | ) | |
112 | except Exception as error: | |
113 | print(str.format( | |
114 | 'Error running {test_name}: {error}', | |
115 | test_name = unit_test_path.name, | |
116 | error = str(error), | |
117 | )) | |
118 | else: | |
119 | print(str.format( | |
120 | '{name}: {verdict}', | |
121 | name = unit_test_path.name, | |
122 | verdict = ('FAILED', 'PASSED')[unit_test_report['passed']], | |
123 | )) | |
124 | num_tested += 1 | |
125 | num_passed += int(unit_test_report['passed']) | |
126 | if not unit_test_report['passed']: | |
127 | def format_problem_list(key): | |
128 | return str.join( | |
129 | ' ', | |
130 | map(format_problem_tuple, unit_test_report[key]) | |
131 | ) | |
132 | print('\tunexpected:', format_problem_list('unexpected')) | |
133 | print('\tmissing:', format_problem_list('missing')) | |
134 | print(str.format( | |
135 | '{num_tested} tests run, {num_passed} tests passed.', | |
136 | num_tested = num_tested, | |
137 | num_passed = num_passed, | |
138 | )) | |
139 | if __name__ == '__main__': | |
140 | run_unit_test_suite() |