|
1 #!/usr/bin/env python3 |
|
2 from ldcheck import appname, version, version_string |
|
3 from ldcheck import script_directory |
|
4 from pathlib import Path |
|
5 from parse import read_ldraw |
|
6 from testsuite import load_tests, check_model, problem_text, all_problem_type_names |
|
7 |
|
8 def unit_test_discovery(): |
|
9 ''' Yields unit test paths ''' |
|
10 import os |
|
11 unit_test_dir = Path(script_directory) / 'unittests' |
|
12 for dirpath, dirnames, filenames in os.walk(unit_test_dir): |
|
13 yield from sorted( |
|
14 Path(dirpath) / filename |
|
15 for filename in filenames |
|
16 if filename.endswith('.test') |
|
17 ) |
|
18 |
|
19 def parse_expectation(text): |
|
20 problem_type, line_no_str = str.split(text, ':') |
|
21 return problem_type, int(line_no_str) |
|
22 |
|
23 def load_unit_test(unit_test_path, *, name, context): |
|
24 with open(unit_test_path, 'rb') as device: |
|
25 import re |
|
26 problem_types = set() |
|
27 expecting = None |
|
28 while True: |
|
29 pos = device.tell() |
|
30 line = bytes.decode(device.readline()) |
|
31 if not line: |
|
32 raise ValueError('unit test ended unexpectedly') |
|
33 match = re.match('^0 Testing: (.+)', line) |
|
34 if match: |
|
35 set.update(problem_types, match.group(1).split()) |
|
36 elif str.strip(line) == '0 Expecting: none': |
|
37 expecting = set() |
|
38 else: |
|
39 match = re.match('^0 Expecting: (.+)', line) |
|
40 if match: |
|
41 if not expecting: |
|
42 expecting = set() |
|
43 set.update(expecting, map(parse_expectation, match.group(1).split())) |
|
44 else: |
|
45 device.seek(pos) |
|
46 break |
|
47 if not problem_types or expecting is None: |
|
48 raise ValueError(str.format( |
|
49 'Unit test {name} does not have a proper manifest', |
|
50 name = name, |
|
51 )) |
|
52 return { |
|
53 'problem_types': problem_types, |
|
54 'expecting': expecting, |
|
55 'model': read_ldraw( |
|
56 device, |
|
57 name = name, |
|
58 context = context |
|
59 ), |
|
60 } |
|
61 |
|
62 def parse_problem(problem): |
|
63 return problem.problem_class.name, problem.line_number |
|
64 |
|
65 def run_unit_test(unit_test_path, *, context, test_suite): |
|
66 from os.path import basename |
|
67 unit_test = load_unit_test( |
|
68 unit_test_path, |
|
69 name = basename(unit_test_path), |
|
70 context = context, |
|
71 ) |
|
72 bad_problems = set.difference( |
|
73 unit_test['problem_types'], |
|
74 all_problem_type_names(test_suite) |
|
75 ) |
|
76 if bad_problems: |
|
77 raise ValueError(str.format( |
|
78 'unknown problem type: {names}', |
|
79 names = ' '.join(sorted(bad_problems)) |
|
80 )) |
|
81 problem_types = unit_test['problem_types'] |
|
82 report = check_model(unit_test['model'], test_suite) |
|
83 expected_problems = unit_test['expecting'] |
|
84 problems = set( |
|
85 filter( |
|
86 lambda problem: problem[0] in problem_types, |
|
87 map( |
|
88 parse_problem, |
|
89 report['problems'] |
|
90 ) |
|
91 ) |
|
92 ) |
|
93 return { |
|
94 'passed': problems == expected_problems, |
|
95 'unexpected': set.difference(problems, expected_problems), |
|
96 'missing': set.difference(expected_problems, problems), |
|
97 'problem_types': problem_types, |
|
98 } |
|
99 |
|
100 def format_problem_tuple(problem_tuple): |
|
101 return problem_tuple[0] + ':' + str(problem_tuple[1]) |
|
102 |
|
103 def run_unit_test_suite(): |
|
104 from argparse import ArgumentParser |
|
105 parser = ArgumentParser() |
|
106 parser.add_argument('-d', '--debug', action = 'store_true') |
|
107 args = parser.parse_args() |
|
108 from ldcheck import LDrawContext |
|
109 context = LDrawContext() |
|
110 test_suite = load_tests() |
|
111 num_tested = 0 |
|
112 num_passed = 0 |
|
113 all_problem_types = all_problem_type_names(test_suite) |
|
114 problem_types_tested = set() |
|
115 print('Running unit test suite.') |
|
116 for unit_test_path in unit_test_discovery(): |
|
117 try: |
|
118 unit_test_report = run_unit_test( |
|
119 unit_test_path, |
|
120 context = context, |
|
121 test_suite = test_suite |
|
122 ) |
|
123 except Exception as error: |
|
124 if args.debug: |
|
125 raise |
|
126 else: |
|
127 print(str.format( |
|
128 'Error running {test_name}: {error}', |
|
129 test_name = unit_test_path.name, |
|
130 error = str(error), |
|
131 )) |
|
132 else: |
|
133 print(str.format( |
|
134 '{name}: {verdict}', |
|
135 name = unit_test_path.name, |
|
136 verdict = ('FAILED', 'PASSED')[unit_test_report['passed']], |
|
137 )) |
|
138 num_tested += 1 |
|
139 num_passed += int(unit_test_report['passed']) |
|
140 set.update(problem_types_tested, unit_test_report['problem_types']) |
|
141 if not unit_test_report['passed']: |
|
142 def format_problem_list(key): |
|
143 return str.join( |
|
144 ' ', |
|
145 map(format_problem_tuple, unit_test_report[key]) |
|
146 ) |
|
147 print('\tunexpected:', format_problem_list('unexpected')) |
|
148 print('\tmissing:', format_problem_list('missing')) |
|
149 print(str.format( |
|
150 '{num_tested} tests run, {num_passed} tests passed.', |
|
151 num_tested = num_tested, |
|
152 num_passed = num_passed, |
|
153 )) |
|
154 untested_problem_types = set.difference(all_problem_types, problem_types_tested) |
|
155 if untested_problem_types: |
|
156 print('The following problem types were not tested:') |
|
157 for problem_type in sorted(untested_problem_types): |
|
158 print('\t' + problem_type) |
|
159 if __name__ == '__main__': |
|
160 run_unit_test_suite() |