X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=test%2Fpy%2Fconftest.py;h=3fe91e874606b58de55e2da485fd2fa2c377d1f4;hb=c631e150feb150cff744c12d5e54b609ba62a227;hp=3012c8e4951341a8b7fdeb50d853964fa2aaad40;hpb=9ef2835f26652092a61b0cb0551ef4f36be27946;p=u-boot diff --git a/test/py/conftest.py b/test/py/conftest.py index 3012c8e495..3fe91e8746 100644 --- a/test/py/conftest.py +++ b/test/py/conftest.py @@ -17,10 +17,10 @@ import atexit import errno import os import os.path -import pexpect import pytest from _pytest.runner import runtestprotocol import ConfigParser +import re import StringIO import sys @@ -179,6 +179,7 @@ def pytest_configure(config): ubconfig.board_type = board_type ubconfig.board_identity = board_identity ubconfig.gdbserver = gdbserver + ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb' env_vars = ( 'board_type', @@ -192,15 +193,49 @@ def pytest_configure(config): for v in env_vars: os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v) - if board_type == 'sandbox': + if board_type.startswith('sandbox'): import u_boot_console_sandbox console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig) else: import u_boot_console_exec_attach console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig) -def pytest_generate_tests(metafunc): - """pytest hook: parameterize test functions based on custom rules. +re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$') +def generate_ut_subtest(metafunc, fixture_name): + """Provide parametrization for a ut_subtest fixture. + + Determines the set of unit tests built into a U-Boot binary by parsing the + list of symbols generated by the build process. Provides this information + to test functions by parameterizing their ut_subtest fixture parameter. + + Args: + metafunc: The pytest test function. + fixture_name: The fixture name to test. + + Returns: + Nothing. + """ + + fn = console.config.build_dir + '/u-boot.sym' + try: + with open(fn, 'rt') as f: + lines = f.readlines() + except: + lines = [] + lines.sort() + + vals = [] + for l in lines: + m = re_ut_test_list.search(l) + if not m: + continue + vals.append(m.group(1) + ' ' + m.group(2)) + + ids = ['ut_' + s.replace(' ', '_') for s in vals] + metafunc.parametrize(fixture_name, vals, ids=ids) + +def generate_config(metafunc, fixture_name): + """Provide parametrization for {env,brd}__ fixtures. If a test function takes parameter(s) (fixture names) of the form brd__xxx or env__xxx, the brd and env configuration dictionaries are consulted to @@ -209,6 +244,7 @@ def pytest_generate_tests(metafunc): Args: metafunc: The pytest test function. + fixture_name: The fixture name to test. Returns: Nothing. @@ -218,30 +254,75 @@ def pytest_generate_tests(metafunc): 'brd': console.config.brd, 'env': console.config.env, } + parts = fixture_name.split('__') + if len(parts) < 2: + return + if parts[0] not in subconfigs: + return + subconfig = subconfigs[parts[0]] + vals = [] + val = subconfig.get(fixture_name, []) + # If that exact name is a key in the data source: + if val: + # ... use the dict value as a single parameter value. + vals = (val, ) + else: + # ... otherwise, see if there's a key that contains a list of + # values to use instead. + vals = subconfig.get(fixture_name+ 's', []) + def fixture_id(index, val): + try: + return val['fixture_id'] + except: + return fixture_name + str(index) + ids = [fixture_id(index, val) for (index, val) in enumerate(vals)] + metafunc.parametrize(fixture_name, vals, ids=ids) + +def pytest_generate_tests(metafunc): + """pytest hook: parameterize test functions based on custom rules. + + Check each test function parameter (fixture name) to see if it is one of + our custom names, and if so, provide the correct parametrization for that + parameter. + + Args: + metafunc: The pytest test function. + + Returns: + Nothing. + """ + for fn in metafunc.fixturenames: - parts = fn.split('__') - if len(parts) < 2: - continue - if parts[0] not in subconfigs: + if fn == 'ut_subtest': + generate_ut_subtest(metafunc, fn) continue - subconfig = subconfigs[parts[0]] - vals = [] - val = subconfig.get(fn, []) - # If that exact name is a key in the data source: - if val: - # ... use the dict value as a single parameter value. - vals = (val, ) - else: - # ... otherwise, see if there's a key that contains a list of - # values to use instead. - vals = subconfig.get(fn + 's', []) - def fixture_id(index, val): - try: - return val["fixture_id"] - except: - return fn + str(index) - ids = [fixture_id(index, val) for (index, val) in enumerate(vals)] - metafunc.parametrize(fn, vals, ids=ids) + generate_config(metafunc, fn) + +@pytest.fixture(scope='session') +def u_boot_log(request): + """Generate the value of a test's log fixture. + + Args: + request: The pytest request. + + Returns: + The fixture value. + """ + + return console.log + +@pytest.fixture(scope='session') +def u_boot_config(request): + """Generate the value of a test's u_boot_config fixture. + + Args: + request: The pytest request. + + Returns: + The fixture value. + """ + + return console.config @pytest.fixture(scope='function') def u_boot_console(request): @@ -258,12 +339,12 @@ def u_boot_console(request): return console anchors = {} -tests_not_run = set() -tests_failed = set() -tests_xpassed = set() -tests_xfailed = set() -tests_skipped = set() -tests_passed = set() +tests_not_run = [] +tests_failed = [] +tests_xpassed = [] +tests_xfailed = [] +tests_skipped = [] +tests_passed = [] def pytest_itemcollected(item): """pytest hook: Called once for each test found during collection. @@ -278,7 +359,7 @@ def pytest_itemcollected(item): Nothing. """ - tests_not_run.add(item.name) + tests_not_run.append(item.name) def cleanup(): """Clean up all global state. @@ -348,12 +429,12 @@ def setup_boardspec(item): for board in mark.args: if board.startswith('!'): if ubconfig.board_type == board[1:]: - pytest.skip('board not supported') + pytest.skip('board "%s" not supported' % ubconfig.board_type) return else: required_boards.append(board) if required_boards and ubconfig.board_type not in required_boards: - pytest.skip('board not supported') + pytest.skip('board "%s" not supported' % ubconfig.board_type) def setup_buildconfigspec(item): """Process any 'buildconfigspec' marker for a test. @@ -374,7 +455,38 @@ def setup_buildconfigspec(item): return for option in mark.args: if not ubconfig.buildconfig.get('config_' + option.lower(), None): - pytest.skip('.config feature not enabled') + pytest.skip('.config feature "%s" not enabled' % option.lower()) + +def tool_is_in_path(tool): + for path in os.environ["PATH"].split(os.pathsep): + fn = os.path.join(path, tool) + if os.path.isfile(fn) and os.access(fn, os.X_OK): + return True + return False + +def setup_requiredtool(item): + """Process any 'requiredtool' marker for a test. + + Such a marker lists some external tool (binary, executable, application) + that the test requires. If tests are being executed on a system that + doesn't have the required tool, the test is marked to be skipped. + + Args: + item: The pytest test item. + + Returns: + Nothing. + """ + + mark = item.get_marker('requiredtool') + if not mark: + return + for tool in mark.args: + if not tool_is_in_path(tool): + pytest.skip('tool "%s" not in $PATH' % tool) + +def start_test_section(item): + anchors[item.name] = log.start_section(item.name) def pytest_runtest_setup(item): """pytest hook: Configure (set up) a test item. @@ -389,9 +501,10 @@ def pytest_runtest_setup(item): Nothing. """ - anchors[item.name] = log.start_section(item.name) + start_test_section(item) setup_boardspec(item) setup_buildconfigspec(item) + setup_requiredtool(item) def pytest_runtest_protocol(item, nextitem): """pytest hook: Called to execute a test. @@ -409,6 +522,14 @@ def pytest_runtest_protocol(item, nextitem): reports = runtestprotocol(item, nextitem=nextitem) + # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if + # the test is skipped. That call is required to create the test's section + # in the log file. The call to log.end_section() requires that the log + # contain a section for this test. Create a section for the test if it + # doesn't already exist. + if not item.name in anchors: + start_test_section(item) + failure_cleanup = False test_list = tests_passed msg = 'OK' @@ -439,7 +560,7 @@ def pytest_runtest_protocol(item, nextitem): if failure_cleanup: console.drain_console() - test_list.add(item.name) + test_list.append(item.name) tests_not_run.remove(item.name) try: