xref: /netbsd-src/external/mpl/bind/dist/bin/tests/system/conftest.py (revision c9055873d0546e63388f027d3d7f85381cde0545)
1# Copyright (C) Internet Systems Consortium, Inc. ("ISC")
2#
3# SPDX-License-Identifier: MPL-2.0
4#
5# This Source Code Form is subject to the terms of the Mozilla Public
6# License, v. 2.0.  If a copy of the MPL was not distributed with this
7# file, you can obtain one at https://mozilla.org/MPL/2.0/.
8#
9# See the COPYRIGHT file distributed with this work for additional
10# information regarding copyright ownership.
11
12from functools import partial
13import logging
14import os
15from pathlib import Path
16import re
17import shutil
18import subprocess
19import tempfile
20import time
21from typing import Any, Dict, List, Optional
22
23import pytest
24
25
26pytest.register_assert_rewrite("isctest")
27
28
29# Silence warnings caused by passing a pytest fixture to another fixture.
30# pylint: disable=redefined-outer-name
31
32
33# ----------------- Older pytest / xdist compatibility -------------------
34# As of 2023-01-11, the minimal supported pytest / xdist versions are
35# determined by what is available in EL8/EPEL8:
36# - pytest 3.4.2
37# - pytest-xdist 1.24.1
38_pytest_ver = pytest.__version__.split(".")
39_pytest_major_ver = int(_pytest_ver[0])
40if _pytest_major_ver < 7:
41    # pytest.Stash/pytest.StashKey mechanism has been added in 7.0.0
42    # for older versions, use regular dictionary with string keys instead
43    FIXTURE_OK = "fixture_ok"  # type: Any
44else:
45    FIXTURE_OK = pytest.StashKey[bool]()  # pylint: disable=no-member
46
47# ----------------------- Globals definition -----------------------------
48
49LOG_FORMAT = "%(asctime)s %(levelname)7s:%(name)s  %(message)s"
50XDIST_WORKER = os.environ.get("PYTEST_XDIST_WORKER", "")
51FILE_DIR = os.path.abspath(Path(__file__).parent)
52ENV_RE = re.compile(b"([^=]+)=(.*)")
53PORT_MIN = 5001
54PORT_MAX = 32767
55PORTS_PER_TEST = 20
56PRIORITY_TESTS = [
57    # Tests that are scheduled first. Speeds up parallel execution.
58    "dupsigs/",
59    "rpz/",
60    "rpzrecurse/",
61    "serve-stale/",
62    "timeouts/",
63    "upforwd/",
64]
65PRIORITY_TESTS_RE = re.compile("|".join(PRIORITY_TESTS))
66CONFTEST_LOGGER = logging.getLogger("conftest")
67SYSTEM_TEST_DIR_GIT_PATH = "bin/tests/system"
68SYSTEM_TEST_NAME_RE = re.compile(f"{SYSTEM_TEST_DIR_GIT_PATH}" + r"/([^/]+)")
69SYMLINK_REPLACEMENT_RE = re.compile(r"/tests(_.*)\.py")
70
71# ---------------------- Module initialization ---------------------------
72
73
74def init_pytest_conftest_logger(conftest_logger):
75    """
76    This initializes the conftest logger which is used for pytest setup
77    and configuration before tests are executed -- aka any logging in this
78    file that is _not_ module-specific.
79    """
80    conftest_logger.setLevel(logging.DEBUG)
81    file_handler = logging.FileHandler("pytest.conftest.log.txt")
82    file_handler.setLevel(logging.DEBUG)
83    file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
84    conftest_logger.addHandler(file_handler)
85
86
87init_pytest_conftest_logger(CONFTEST_LOGGER)
88
89
90def avoid_duplicated_logs():
91    """
92    Remove direct root logger output to file descriptors.
93    This default is causing duplicates because all our messages go through
94    regular logging as well and are thus displayed twice.
95    """
96    todel = []
97    for handler in logging.root.handlers:
98        if handler.__class__ == logging.StreamHandler:
99            # Beware: As for pytest 7.2.2, LiveLogging and LogCapture
100            # handlers inherit from logging.StreamHandler
101            todel.append(handler)
102    for handler in todel:
103        logging.root.handlers.remove(handler)
104
105
106def parse_env(env_bytes):
107    """Parse the POSIX env format into Python dictionary."""
108    out = {}
109    for line in env_bytes.splitlines():
110        match = ENV_RE.match(line)
111        if match:
112            # EL8+ workaround for https://access.redhat.com/solutions/6994985
113            # FUTURE: can be removed when we no longer need to parse env vars
114            if match.groups()[0] in [b"which_declare", b"BASH_FUNC_which%%"]:
115                continue
116            out[match.groups()[0]] = match.groups()[1]
117    return out
118
119
120def get_env_bytes(cmd):
121    try:
122        proc = subprocess.run(
123            [cmd],
124            shell=True,
125            check=True,
126            cwd=FILE_DIR,
127            stdout=subprocess.PIPE,
128        )
129    except subprocess.CalledProcessError as exc:
130        CONFTEST_LOGGER.error("failed to get shell env: %s", exc)
131        raise exc
132    env_bytes = proc.stdout
133    return parse_env(env_bytes)
134
135
136# Read common environment variables for running tests from conf.sh.
137# FUTURE: Remove conf.sh entirely and define all variables in pytest only.
138CONF_ENV = get_env_bytes(". ./conf.sh && env")
139os.environb.update(CONF_ENV)
140CONFTEST_LOGGER.debug("variables in env: %s", ", ".join([str(key) for key in CONF_ENV]))
141
142# --------------------------- pytest hooks -------------------------------
143
144
145def pytest_addoption(parser):
146    parser.addoption(
147        "--noclean",
148        action="store_true",
149        default=False,
150        help="don't remove the temporary test directories with artifacts",
151    )
152
153
154def pytest_configure(config):
155    # Ensure this hook only runs on the main pytest instance if xdist is
156    # used to spawn other workers.
157    if not XDIST_WORKER:
158        if config.pluginmanager.has_plugin("xdist") and config.option.numprocesses:
159            # system tests depend on module scope for setup & teardown
160            # enforce use "loadscope" scheduler or disable paralelism
161            try:
162                import xdist.scheduler.loadscope  # pylint: disable=unused-import
163            except ImportError:
164                CONFTEST_LOGGER.debug(
165                    "xdist is too old and does not have "
166                    "scheduler.loadscope, disabling parallelism"
167                )
168                config.option.dist = "no"
169            else:
170                config.option.dist = "loadscope"
171
172
173def pytest_ignore_collect(path):
174    # System tests are executed in temporary directories inside
175    # bin/tests/system. These temporary directories contain all files
176    # needed for the system tests - including tests_*.py files. Make sure to
177    # ignore these during test collection phase. Otherwise, test artifacts
178    # from previous runs could mess with the runner. Also ignore the
179    # convenience symlinks to those test directories. In both of those
180    # cases, the system test name (directory) contains an underscore, which
181    # is otherwise and invalid character for a system test name.
182    match = SYSTEM_TEST_NAME_RE.search(str(path))
183    if match is None:
184        CONFTEST_LOGGER.warning("unexpected test path: %s (ignored)", path)
185        return True
186    system_test_name = match.groups()[0]
187    return "_" in system_test_name
188
189
190def pytest_collection_modifyitems(items):
191    """Schedule long-running tests first to get more benefit from parallelism."""
192    priority = []
193    other = []
194    for item in items:
195        if PRIORITY_TESTS_RE.search(item.nodeid):
196            priority.append(item)
197        else:
198            other.append(item)
199    items[:] = priority + other
200
201
202class NodeResult:
203    def __init__(self, report=None):
204        self.outcome = None
205        self.messages = []
206        if report is not None:
207            self.update(report)
208
209    def update(self, report):
210        if self.outcome is None or report.outcome != "passed":
211            self.outcome = report.outcome
212        if report.longreprtext:
213            self.messages.append(report.longreprtext)
214
215
216@pytest.hookimpl(tryfirst=True, hookwrapper=True)
217def pytest_runtest_makereport(item):
218    """Hook that is used to expose test results to session (for use in fixtures)."""
219    # execute all other hooks to obtain the report object
220    outcome = yield
221    report = outcome.get_result()
222
223    # Set the test outcome in session, so we can access it from module-level
224    # fixture using nodeid. Note that this hook is called three times: for
225    # setup, call and teardown. We only care about the overall result so we
226    # merge the results together and preserve the information whether a test
227    # passed.
228    test_results = {}
229    try:
230        test_results = getattr(item.session, "test_results")
231    except AttributeError:
232        setattr(item.session, "test_results", test_results)
233    node_result = test_results.setdefault(item.nodeid, NodeResult())
234    node_result.update(report)
235
236
237# --------------------------- Fixtures -----------------------------------
238
239
240@pytest.fixture(scope="session")
241def modules():
242    """
243    Sorted list of ALL modules.
244
245    The list includes even test modules that are not tested in the current
246    session. It is used to determine port distribution. Using a complete
247    list of all possible test modules allows independent concurrent pytest
248    invocations.
249    """
250    mods = []
251    for dirpath, _dirs, files in os.walk(FILE_DIR):
252        for file in files:
253            if file.startswith("tests_") and file.endswith(".py"):
254                mod = f"{dirpath}/{file}"
255                if not pytest_ignore_collect(mod):
256                    mods.append(mod)
257    return sorted(mods)
258
259
260@pytest.fixture(scope="session")
261def module_base_ports(modules):
262    """
263    Dictionary containing assigned base port for every module.
264
265    The port numbers are deterministically assigned before any testing
266    starts. This fixture MUST return the same value when called again
267    during the same test session. When running tests in parallel, this is
268    exactly what happens - every worker thread will call this fixture to
269    determine test ports.
270    """
271    port_min = PORT_MIN
272    port_max = PORT_MAX - len(modules) * PORTS_PER_TEST
273    if port_max < port_min:
274        raise RuntimeError("not enough ports to assign unique port set to each module")
275
276    # Rotate the base port value over time to detect possible test issues
277    # with using random ports. This introduces a very slight race condition
278    # risk. If this value changes between pytest invocation and spawning
279    # worker threads, multiple tests may have same port values assigned. If
280    # these tests are then executed simultaneously, the test results will
281    # be misleading.
282    base_port = int(time.time() // 3600) % (port_max - port_min) + port_min
283
284    return {mod: base_port + i * PORTS_PER_TEST for i, mod in enumerate(modules)}
285
286
287@pytest.fixture(scope="module")
288def base_port(request, module_base_ports):
289    """Start of the port range assigned to a particular test module."""
290    port = module_base_ports[request.fspath]
291    return port
292
293
294@pytest.fixture(scope="module")
295def ports(base_port):
296    """Dictionary containing port names and their assigned values."""
297    return {
298        "PORT": base_port,
299        "TLSPORT": base_port + 1,
300        "HTTPPORT": base_port + 2,
301        "HTTPSPORT": base_port + 3,
302        "EXTRAPORT1": base_port + 4,
303        "EXTRAPORT2": base_port + 5,
304        "EXTRAPORT3": base_port + 6,
305        "EXTRAPORT4": base_port + 7,
306        "EXTRAPORT5": base_port + 8,
307        "EXTRAPORT6": base_port + 9,
308        "EXTRAPORT7": base_port + 10,
309        "EXTRAPORT8": base_port + 11,
310        "CONTROLPORT": base_port + 12,
311    }
312
313
314@pytest.fixture(scope="module")
315def named_port(ports):
316    return ports["PORT"]
317
318
319@pytest.fixture(scope="module")
320def named_tlsport(ports):
321    return ports["TLSPORT"]
322
323
324@pytest.fixture(scope="module")
325def named_httpsport(ports):
326    return ports["HTTPSPORT"]
327
328
329@pytest.fixture(scope="module")
330def control_port(ports):
331    return ports["CONTROLPORT"]
332
333
334@pytest.fixture(scope="module")
335def env(ports):
336    """Dictionary containing environment variables for the test."""
337    env = os.environ.copy()
338    for portname, portnum in ports.items():
339        env[portname] = str(portnum)
340    env["builddir"] = f"{env['TOP_BUILDDIR']}/{SYSTEM_TEST_DIR_GIT_PATH}"
341    env["srcdir"] = f"{env['TOP_SRCDIR']}/{SYSTEM_TEST_DIR_GIT_PATH}"
342    return env
343
344
345@pytest.fixture(scope="module")
346def system_test_name(request):
347    """Name of the system test directory."""
348    path = Path(request.fspath)
349    return path.parent.name
350
351
352@pytest.fixture(scope="module")
353def mlogger(system_test_name):
354    """Logging facility specific to this test module."""
355    avoid_duplicated_logs()
356    return logging.getLogger(system_test_name)
357
358
359@pytest.fixture
360def logger(request, system_test_name):
361    """Logging facility specific to a particular test."""
362    return logging.getLogger(f"{system_test_name}.{request.node.name}")
363
364
365@pytest.fixture(scope="module")
366def system_test_dir(
367    request, env, system_test_name, mlogger
368):  # pylint: disable=too-many-statements,too-many-locals
369    """
370    Temporary directory for executing the test.
371
372    This fixture is responsible for creating (and potentially removing) a
373    copy of the system test directory which is used as a temporary
374    directory for the test execution.
375
376    FUTURE: This removes the need to have clean.sh scripts.
377    """
378
379    def get_test_result():
380        """Aggregate test results from all individual tests from this module
381        into a single result: failed > skipped > passed."""
382        try:
383            all_test_results = request.session.test_results
384        except AttributeError:
385            # This may happen if pytest execution is interrupted and
386            # pytest_runtest_makereport() is never called.
387            mlogger.debug("can't obtain test results, test run was interrupted")
388            return "error"
389        test_results = {
390            node.nodeid: all_test_results[node.nodeid]
391            for node in request.node.collect()
392            if node.nodeid in all_test_results
393        }
394        assert len(test_results)
395        messages = []
396        for node, result in test_results.items():
397            mlogger.debug("%s %s", result.outcome.upper(), node)
398            messages.extend(result.messages)
399        for message in messages:
400            mlogger.debug("\n" + message)
401        failed = any(res.outcome == "failed" for res in test_results.values())
402        skipped = any(res.outcome == "skipped" for res in test_results.values())
403        if failed:
404            return "failed"
405        if skipped:
406            return "skipped"
407        assert all(res.outcome == "passed" for res in test_results.values())
408        return "passed"
409
410    def unlink(path):
411        try:
412            path.unlink()  # missing_ok=True isn't available on Python 3.6
413        except FileNotFoundError:
414            pass
415
416    # Create a temporary directory with a copy of the original system test dir contents
417    system_test_root = Path(f"{env['TOP_BUILDDIR']}/{SYSTEM_TEST_DIR_GIT_PATH}")
418    testdir = Path(
419        tempfile.mkdtemp(prefix=f"{system_test_name}_tmp_", dir=system_test_root)
420    )
421    shutil.rmtree(testdir)
422    shutil.copytree(system_test_root / system_test_name, testdir)
423
424    # Create a convenience symlink with a stable and predictable name
425    module_name = SYMLINK_REPLACEMENT_RE.sub(r"\1", request.node.name)
426    symlink_dst = system_test_root / module_name
427    unlink(symlink_dst)
428    symlink_dst.symlink_to(os.path.relpath(testdir, start=system_test_root))
429
430    # Configure logger to write to a file inside the temporary test directory
431    mlogger.handlers.clear()
432    mlogger.setLevel(logging.DEBUG)
433    handler = logging.FileHandler(testdir / "pytest.log.txt", mode="w")
434    formatter = logging.Formatter(LOG_FORMAT)
435    handler.setFormatter(formatter)
436    mlogger.addHandler(handler)
437
438    # System tests are meant to be executed from their directory - switch to it.
439    old_cwd = os.getcwd()
440    os.chdir(testdir)
441    mlogger.debug("switching to tmpdir: %s", testdir)
442    try:
443        yield testdir  # other fixtures / tests will execute here
444    finally:
445        os.chdir(old_cwd)
446        mlogger.debug("changed workdir to: %s", old_cwd)
447
448        result = get_test_result()
449
450        # Clean temporary dir unless it should be kept
451        keep = False
452        if request.config.getoption("--noclean"):
453            mlogger.debug(
454                "--noclean requested, keeping temporary directory %s", testdir
455            )
456            keep = True
457        elif result == "failed":
458            mlogger.debug(
459                "test failure detected, keeping temporary directory %s", testdir
460            )
461            keep = True
462        elif not request.node.stash[FIXTURE_OK]:
463            mlogger.debug(
464                "test setup/teardown issue detected, keeping temporary directory %s",
465                testdir,
466            )
467            keep = True
468
469        if keep:
470            mlogger.info(
471                "test artifacts in: %s", symlink_dst.relative_to(system_test_root)
472            )
473        else:
474            mlogger.debug("deleting temporary directory")
475            handler.flush()
476            handler.close()
477            shutil.rmtree(testdir)
478            unlink(symlink_dst)
479
480
481def _run_script(  # pylint: disable=too-many-arguments
482    env,
483    mlogger,
484    system_test_dir: Path,
485    interpreter: str,
486    script: str,
487    args: Optional[List[str]] = None,
488):
489    """Helper function for the shell / perl script invocations (through fixtures below)."""
490    if args is None:
491        args = []
492    path = Path(script)
493    if not path.is_absolute():
494        # make sure relative paths are always relative to system_dir
495        path = system_test_dir.parent / path
496    script = str(path)
497    cwd = os.getcwd()
498    if not path.exists():
499        raise FileNotFoundError(f"script {script} not found in {cwd}")
500    mlogger.debug("running script: %s %s %s", interpreter, script, " ".join(args))
501    mlogger.debug("  workdir: %s", cwd)
502    returncode = 1
503
504    cmd = [interpreter, script] + args
505    with subprocess.Popen(
506        cmd,
507        env=env,
508        stdout=subprocess.PIPE,
509        stderr=subprocess.STDOUT,
510        bufsize=1,
511        universal_newlines=True,
512        errors="backslashreplace",
513    ) as proc:
514        if proc.stdout:
515            for line in proc.stdout:
516                mlogger.info("    %s", line.rstrip("\n"))
517        proc.communicate()
518        returncode = proc.returncode
519        if returncode:
520            raise subprocess.CalledProcessError(returncode, cmd)
521        mlogger.debug("  exited with %d", returncode)
522
523
524@pytest.fixture(scope="module")
525def shell(env, system_test_dir, mlogger):
526    """Function to call a shell script with arguments."""
527    return partial(_run_script, env, mlogger, system_test_dir, env["SHELL"])
528
529
530@pytest.fixture(scope="module")
531def perl(env, system_test_dir, mlogger):
532    """Function to call a perl script with arguments."""
533    return partial(_run_script, env, mlogger, system_test_dir, env["PERL"])
534
535
536@pytest.fixture(scope="module")
537def run_tests_sh(system_test_dir, shell):
538    """Utility function to execute tests.sh as a python test."""
539
540    def run_tests():
541        shell(f"{system_test_dir}/tests.sh")
542
543    return run_tests
544
545
546@pytest.fixture(scope="module", autouse=True)
547def system_test(  # pylint: disable=too-many-arguments,too-many-statements
548    request,
549    env: Dict[str, str],
550    mlogger,
551    system_test_dir,
552    shell,
553    perl,
554):
555    """
556    Driver of the test setup/teardown process. Used automatically for every test module.
557
558    This is the most important one-fixture-to-rule-them-all. Note the
559    autouse=True which causes this fixture to be loaded by every test
560    module without the need to explicitly specify it.
561
562    When this fixture is used, it utilizes other fixtures, such as
563    system_test_dir, which handles the creation of the temporary test
564    directory.
565
566    Afterwards, it checks the test environment and takes care of starting
567    the servers. When everything is ready, that's when the actual tests are
568    executed. Once that is done, this fixture stops the servers and checks
569    for any artifacts indicating an issue (e.g. coredumps).
570
571    Finally, when this fixture reaches an end (or encounters an exception,
572    which may be caused by fail/skip invocations), any fixtures which is
573    used by this one are finalized - e.g. system_test_dir performs final
574    checks and cleans up the temporary test directory.
575    """
576
577    def check_net_interfaces():
578        try:
579            perl("testsock.pl", ["-p", env["PORT"]])
580        except subprocess.CalledProcessError as exc:
581            mlogger.error("testsock.pl: exited with code %d", exc.returncode)
582            pytest.skip("Network interface aliases not set up.")
583
584    def check_prerequisites():
585        try:
586            shell(f"{system_test_dir}/prereq.sh")
587        except FileNotFoundError:
588            pass  # prereq.sh is optional
589        except subprocess.CalledProcessError:
590            pytest.skip("Prerequisites missing.")
591
592    def setup_test():
593        try:
594            shell(f"{system_test_dir}/setup.sh")
595        except FileNotFoundError:
596            pass  # setup.sh is optional
597        except subprocess.CalledProcessError as exc:
598            mlogger.error("Failed to run test setup")
599            pytest.fail(f"setup.sh exited with {exc.returncode}")
600
601    def start_servers():
602        try:
603            perl("start.pl", ["--port", env["PORT"], system_test_dir.name])
604        except subprocess.CalledProcessError as exc:
605            mlogger.error("Failed to start servers")
606            pytest.fail(f"start.pl exited with {exc.returncode}")
607
608    def stop_servers():
609        try:
610            perl("stop.pl", [system_test_dir.name])
611        except subprocess.CalledProcessError as exc:
612            mlogger.error("Failed to stop servers")
613            get_core_dumps()
614            pytest.fail(f"stop.pl exited with {exc.returncode}")
615
616    def get_core_dumps():
617        try:
618            shell("get_core_dumps.sh", [system_test_dir.name])
619        except subprocess.CalledProcessError as exc:
620            mlogger.error("Found core dumps or sanitizer reports")
621            pytest.fail(f"get_core_dumps.sh exited with {exc.returncode}")
622
623    os.environ.update(env)  # Ensure pytests have the same env vars as shell tests.
624    mlogger.info(f"test started: {request.node.name}")
625    port = int(env["PORT"])
626    mlogger.info("using port range: <%d, %d>", port, port + PORTS_PER_TEST - 1)
627
628    if not hasattr(request.node, "stash"):  # compatibility with pytest<7.0.0
629        request.node.stash = {}  # use regular dict instead of pytest.Stash
630    request.node.stash[FIXTURE_OK] = True
631
632    # Perform checks which may skip this test.
633    check_net_interfaces()
634    check_prerequisites()
635
636    # Store the fact that this fixture hasn't successfully finished yet.
637    # This is checked before temporary directory teardown to decide whether
638    # it's okay to remove the directory.
639    request.node.stash[FIXTURE_OK] = False
640
641    setup_test()
642    try:
643        start_servers()
644        mlogger.debug("executing test(s)")
645        yield
646    finally:
647        mlogger.debug("test(s) finished")
648        stop_servers()
649        get_core_dumps()
650        request.node.stash[FIXTURE_OK] = True
651