testsuite.py

Mon, 24 Jun 2019 17:31:47 +0300

author
Teemu Piippo <teemu@hecknology.net>
date
Mon, 24 Jun 2019 17:31:47 +0300
changeset 92
b8d72909d593
parent 64
1c0884f5506e
child 65
f2dc17b830e0
child 98
f9d4e59392f7
permissions
-rw-r--r--

improved the mirrored stud check to catch cases where a subfile that contains studs is mirrored

from warnings import warn

class ProblemType:
    severities = ['hold', 'warning'] # in descending order
    def __init__(self, name, severity, message):
        if severity not in ProblemType.severities:
            raise ValueError(str.format(
                'bad severity {severity!r}',
                severity = severity,
           ))
        self.name = name
        self.severity = severity
        self.message = message
    def __call__(self, bad_object, **args):
        return Problem(
            problem_class = self,
            bad_object = bad_object,
            **args,
        )
    def placeholder_message(self):
        if callable(self.message):
            import inspect
            spec = inspect.getfullargspec(self.message)
            args = {}
            assert not spec.varargs and not spec.varkw
            for argname in spec.args + spec.kwonlyargs:
                args[argname] = '<' + argname.replace('_', ' ') + '>'
            return self.message(**args)
        else:
            return self.message

class Problem:
    def __init__(self, problem_class, bad_object, **args):
        self.problem_class = problem_class
        self.severity = problem_class.severity
        self.object = bad_object
        self.args = args
    def __str__(self):
        if callable(self.problem_class.message):
            return self.problem_class.message(**self.args)
        else:
            return self.problem_class.message

def problem_type(problem_name, **args):
    def wrapper(function):
        if not hasattr(function, 'ldcheck_problem_types'):
            function.ldcheck_problem_types = {}
        new_type = ProblemType(name = problem_name, **args)
        function.ldcheck_problem_types[problem_name] = new_type
        return function
    return wrapper

def report_problem(problem_name, *, bad_object, **args):
    return {'type': problem_name, 'bad-object': bad_object, 'args': args}

def name_of_package(package):
    if isinstance(package, tuple):
        return package[1]
    else:
        return package.name

def test_discovery():
    '''
        Finds all test modules and yields their names.
    '''
    from pkgutil import walk_packages
    import tests
    yield from sorted(
        'tests.' + name_of_package(result)
        for result in walk_packages(tests.__path__)
    )

def load_tests():
    '''
        Imports test modules and combines their manifests into a test suite.
    '''
    test_suite = {'tests': []}
    for module_name in test_discovery():
        from importlib import import_module
        module = import_module(module_name)
        if hasattr(module, 'manifest'):
            # Merge the data from the manifest
            test_suite['tests'] += module.manifest['tests']
        else:
            warn(str.format('Module {} does not have a manifest', module_name))
    test_suite['tests'].sort(key = lambda f: f.__name__)
    return test_suite

def problem_key(problem):
    rank = ProblemType.severities.index(problem.severity) # sort by severity
    return (rank, problem.line_number)

def build_problem(test_function, problem_params):
    problem_name = problem_params['type']
    problem_type = test_function.ldcheck_problem_types[problem_name]
    problem_object = problem_type(
        bad_object = problem_params['bad-object'],
        **problem_params['args'],
    )
    return problem_object

def check_model(model, test_suite = None):
    if not test_suite:
        test_suite = load_tests()
    problems = []
    line_numbers = {
        element: (i, i + 1)
        for i, element in enumerate(model.body)
    }
    for test_function in test_suite['tests']:
        for problem_params in test_function(model):
            problem = build_problem(test_function, problem_params)
            # add line numbers to the problem
            problem.body_index, problem.line_number \
                = line_numbers[problem.object]
            problem.object = None
            problems.append(problem)
    return {
        'passed': not any(
            problem.severity == 'hold'
            for problem in problems
        ),
        'problems': sorted(problems, key = problem_key),
    }

def problem_text(problem, test_suite):
    message = problem.problem_class.message
    if callable(message):
        message = message(**problem.args)
    return message

def format_report_html(report, model, test_suite):
    messages = []
    for problem in report['problems']:
        ldraw_code = model.body[problem.body_index].textual_representation()
        message = str.format(
            '<li class="{problem_type}">{model_name}:{line_number}:'
            '{problem_type}: {message}<br />{ldraw_code}</li>',
            model_name = model.name,
            line_number = problem.line_number,
            problem_type = problem.severity,
            message = problem_text(problem, test_suite),
            ldraw_code = ldraw_code,
        )
        messages.append(message)
    return '\n'.join(messages)

def format_report(report, model, test_suite):
    import colorama
    colorama.init()
    messages = []
    for problem in report['problems']:
        if problem.severity == 'hold':
            text_colour = colorama.Fore.LIGHTRED_EX
        elif problem.severity == 'warning':
            text_colour = colorama.Fore.LIGHTBLUE_EX
        else:
            text_colour = ''
        ldraw_code = model.body[problem.body_index].textual_representation()
        message = str.format(
            '{text_colour}{model_name}:{line_number}: {problem_type}: {message}'
            '{colour_reset}\n\t{ldraw_code}',
            text_colour = text_colour,
            model_name = model.name,
            line_number = problem.line_number,
            problem_type = problem.severity,
            message = problem_text(problem, test_suite),
            colour_reset = colorama.Fore.RESET,
            ldraw_code = ldraw_code,
        )
        messages.append(message)
    return '\n'.join(messages)

def iterate_problems(test_suite):
    for test_function in test_suite['tests']:
        yield from test_function.ldcheck_problem_types.values()
    
def all_problem_types(test_suite):
    return sorted(
        iterate_problems(test_suite),
        key = lambda problem_type: problem_type.name
    )
    

if __name__ == '__main__':
    from pprint import pprint
    pprint(load_tests())

mercurial