From 22034ce21343610946bb2ea73620d48cb95c35fe Mon Sep 17 00:00:00 2001 From: Andrea Cervesato Date: Thu, 12 Jan 2023 16:52:59 +0100 Subject: [PATCH] Add altp package for parallel execution This patch adds altp package that is an alternative implementation of ltp package, using asyncio for parallel tests execution. In order to use it, please set ASYNC_RUN before run. The altp package also introduces a new UI for parallel execution and it replaces paramiko library with asyncssh, since paramiko doesn't support coroutines. Beware this is an EXPERIMENTAL support and it's not the ending version. --- .github/workflows/linting.yml | 4 +- .github/workflows/tests.yml | 6 +- Makefile | 2 + README.md | 21 + altp/__init__.py | 136 ++++++ altp/data.py | 209 ++++++++ altp/events.py | 162 ++++++ altp/export.py | 118 +++++ altp/host.py | 223 +++++++++ altp/ltx.py | 892 ++++++++++++++++++++++++++++++++++ altp/main.py | 363 ++++++++++++++ altp/qemu.py | 541 +++++++++++++++++++++ altp/results.py | 312 ++++++++++++ altp/scheduler.py | 668 +++++++++++++++++++++++++ altp/session.py | 312 ++++++++++++ altp/ssh.py | 305 ++++++++++++ altp/sut.py | 268 ++++++++++ altp/tempfile.py | 129 +++++ altp/tests/__init__.py | 12 + altp/tests/conftest.py | 20 + altp/tests/sut.py | 252 ++++++++++ altp/tests/test_data.py | 79 +++ altp/tests/test_events.py | 121 +++++ altp/tests/test_export.py | 190 ++++++++ altp/tests/test_host.py | 67 +++ altp/tests/test_ltx.py | 331 +++++++++++++ altp/tests/test_main.py | 394 +++++++++++++++ altp/tests/test_qemu.py | 88 ++++ altp/tests/test_scheduler.py | 669 +++++++++++++++++++++++++ altp/tests/test_session.py | 223 +++++++++ altp/tests/test_ssh.py | 164 +++++++ altp/tests/test_tempfile.py | 116 +++++ altp/ui.py | 388 +++++++++++++++ pytest.ini | 6 +- runltp-ng | 10 +- scripts/parallelizable.py | 128 +++++ 36 files changed, 7920 insertions(+), 9 deletions(-) create mode 100644 altp/__init__.py create mode 100644 altp/data.py create mode 100644 altp/events.py create mode 100644 altp/export.py create mode 100644 altp/host.py create mode 100644 altp/ltx.py create mode 100644 altp/main.py create mode 100644 altp/qemu.py create mode 100644 altp/results.py create mode 100644 altp/scheduler.py create mode 100644 altp/session.py create mode 100644 altp/ssh.py create mode 100644 altp/sut.py create mode 100644 altp/tempfile.py create mode 100644 altp/tests/__init__.py create mode 100644 altp/tests/conftest.py create mode 100644 altp/tests/sut.py create mode 100644 altp/tests/test_data.py create mode 100644 altp/tests/test_events.py create mode 100644 altp/tests/test_export.py create mode 100644 altp/tests/test_host.py create mode 100644 altp/tests/test_ltx.py create mode 100644 altp/tests/test_main.py create mode 100644 altp/tests/test_qemu.py create mode 100644 altp/tests/test_scheduler.py create mode 100644 altp/tests/test_session.py create mode 100644 altp/tests/test_ssh.py create mode 100644 altp/tests/test_tempfile.py create mode 100644 altp/ui.py create mode 100644 scripts/parallelizable.py diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 45fb836..8528f1b 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -10,7 +10,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.6", "3.7", "3.8", "3.9", "3.10", "3.11"] + python-version: ["3.6", "3.7", "3.8", "3.9", "3.10", "3.11.0"] steps: - name: Show OS @@ -31,4 +31,4 @@ jobs: run: python3 runltp-ng --help - name: Lint with pylint - run: pylint --rcfile=pylint.ini ltp + run: pylint --rcfile=pylint.ini ltp altp diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 0e5f283..5f957a5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -10,7 +10,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.6", "3.7", "3.8", "3.9", "3.10", "3.11"] + python-version: ["3.6", "3.7", "3.8", "3.9", "3.10", "3.11.0"] steps: - name: Show OS @@ -25,7 +25,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies - run: python3 -m pip install pytest pytest-xdist + run: python3 -m pip install asyncssh pytest pytest-asyncio - name: Test with pytest - run: python3 -m pytest -n 8 -m "not qemu and not ssh" + run: python3 -m pytest -m "not qemu and not ssh and not ltx" diff --git a/Makefile b/Makefile index 3d106df..6ab18cf 100644 --- a/Makefile +++ b/Makefile @@ -12,8 +12,10 @@ INSTALL_DIR := $(BASE_DIR)/runltp-ng.d install: mkdir -p $(INSTALL_DIR)/ltp + mkdir -p $(INSTALL_DIR)/altp install -m 00644 $(top_srcdir)/tools/runltp-ng/ltp/*.py $(INSTALL_DIR)/ltp + install -m 00644 $(top_srcdir)/tools/runltp-ng/ltp/*.py $(INSTALL_DIR)/altp install -m 00775 $(top_srcdir)/tools/runltp-ng/runltp-ng $(BASE_DIR)/runltp-ng include $(top_srcdir)/include/mk/generic_leaf_target.mk diff --git a/README.md b/README.md index bce4011..87aa796 100644 --- a/README.md +++ b/README.md @@ -116,6 +116,27 @@ Once a new SUT class is implemented and placed inside the `ltp` package folder, `runltp-ng -s help` command can be used to see if application correctly recognise it. +Parallel execution +================== + +The tool now supports a new experimental feature that is implemented inside the +`altp` folder. This particular feature permits to execute multiple tests in +parallel when using host execution or SSH protocol. + +To enable the new parallel execution feature, please set the `ASYNC_RUN` flag +as following: + + # run syscalls testing suite in parallel on host using 16 workers + ASYNC_RUN=1 ./runltp-ng --run-suite syscalls --workers 16 + + # run syscalls testing suite in parallel via SSH using 16 workers + # NOTE: asyncssh package must be installed in the system + ./runltp-ng --sut=ssh:host myhost.com:user=root:key_file=myhost_id_rsa \ + --run-suite syscalls --workers 16 + +Unfortunately, `paramiko` doesn't support parallel commands execution, so we +switched into `asyncssh` library that is also way more easy to use. + Development =========== diff --git a/altp/__init__.py b/altp/__init__.py new file mode 100644 index 0000000..65bdd8d --- /dev/null +++ b/altp/__init__.py @@ -0,0 +1,136 @@ +""" +.. module:: __init__ + :platform: Linux + :synopsis: ltp package definition + +.. moduleauthor:: Andrea Cervesato +""" +import sys +import signal +import typing +import asyncio +from altp.events import EventsHandler + + +class LTPException(Exception): + """ + The most generic exception that is raised by any ltp package when + something bad happens. + """ + pass + + +events = EventsHandler() + + +def get_event_loop() -> asyncio.BaseEventLoop: + """ + Return the current asyncio event loop. + """ + loop = None + + try: + loop = asyncio.get_running_loop() + except (AttributeError, RuntimeError): + pass + + if not loop: + try: + loop = asyncio.get_event_loop() + except RuntimeError: + pass + + if not loop: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + return loop + + +def create_task(coro: typing.Coroutine) -> asyncio.Task: + """ + Create a new task. + """ + loop = get_event_loop() + task = loop.create_task(coro) + + return task + + +def cancel_tasks(loop: asyncio.AbstractEventLoop) -> None: + """ + Cancel all asyncio running tasks. + """ + to_cancel = None + + # pylint: disable=no-member + if sys.version_info >= (3, 7): + to_cancel = asyncio.all_tasks(loop=loop) + else: + to_cancel = asyncio.Task.all_tasks(loop=loop) + + if not to_cancel: + return + + for task in to_cancel: + if task.cancelled(): + continue + + task.cancel() + + # pylint: disable=deprecated-argument + if sys.version_info >= (3, 10): + loop.run_until_complete( + asyncio.gather(*to_cancel, return_exceptions=True)) + else: + loop.run_until_complete( + asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)) + + for task in to_cancel: + if task.cancelled(): + continue + + if task.exception() is not None: + loop.call_exception_handler({ + 'message': 'unhandled exception during asyncio.run() shutdown', + 'exception': task.exception(), + 'task': task, + }) + + +def to_thread(coro: callable, *args: typing.Any) -> typing.Any: + """ + Run coroutine inside a thread. This is useful for blocking I/O operations. + """ + loop = get_event_loop() + return loop.run_in_executor(None, coro, *args) + + +def run(coro: typing.Coroutine) -> typing.Any: + """ + Run coroutine inside running event loop and it cancel all loop + tasks at the end. Useful when we want to run the main() function. + """ + loop = get_event_loop() + + def handler() -> None: + cancel_tasks(loop) + + # we don't have to handle signal again + loop.remove_signal_handler(signal.SIGTERM) + loop.add_signal_handler(signal.SIGINT, lambda: None) + + for sig in (signal.SIGTERM, signal.SIGINT): + loop.add_signal_handler(sig, handler) + + try: + return loop.run_until_complete(coro) + finally: + cancel_tasks(loop) + + +__all__ = [ + "LTPException", + "events", + "get_event_loop" +] diff --git a/altp/data.py b/altp/data.py new file mode 100644 index 0000000..485cf92 --- /dev/null +++ b/altp/data.py @@ -0,0 +1,209 @@ +""" +.. module:: data + :platform: Linux + :synopsis: module containing input data handling + +.. moduleauthor:: Andrea Cervesato +""" +import logging + +LOGGER = logging.getLogger("ltp.data") + + +class Suite: + """ + Testing suite definition class. + """ + + def __init__(self, name: str, tests: list) -> None: + """ + :param name: name of the testing suite + :type name: str + :param tests: tests of the suite + :type tests: list + """ + self._name = name + self._tests = tests + + def __repr__(self) -> str: + return \ + f"name: '{self._name}', " \ + f"tests: {self._tests}" + + @property + def name(self): + """ + Name of the testing suite. + """ + return self._name + + @property + def tests(self): + """ + Tests definitions. + """ + return self._tests + + +class Test: + """ + Test definition class. + """ + + def __init__( + self, + name: str, + cmd: str, + args: list, + parallelizable: bool = False) -> None: + """ + :param name: name of the test + :type name: str + :param cmd: command to execute + :type cmd: str + :param args: list of arguments + :type args: list(str) + :param parallelizable: if True, test can be run in parallel + :type parallelizable: bool + """ + self._name = name + self._cmd = cmd + self._args = args + self._parallelizable = parallelizable + + def __repr__(self) -> str: + return \ + f"name: '{self._name}', " \ + f"commmand: '{self._cmd}', " \ + f"arguments: {self._args}, " \ + f"parallelizable: {self._parallelizable}" + + @property + def name(self): + """ + Name of the test. + """ + return self._name + + @property + def command(self): + """ + Command to execute test. + """ + return self._cmd + + @property + def arguments(self): + """ + Arguments of the command. + """ + return self._args + + @property + def parallelizable(self): + """ + If True, test can be run in parallel. + """ + return self._parallelizable + + +PARALLEL_BLACKLIST = [ + "needs_root", + "needs_device", + "mount_device", + "mntpoint", + "resource_file", + "format_device", + "save_restore", + "max_runtime" +] + + +# pylint: disable=too-many-locals +async def read_runtest( + suite_name: str, + content: str, + metadata: dict = None) -> Suite: + """ + It reads a runtest file content and it returns a Suite object. + :param suite_name: name of the test suite + :type suite_name: str + :param content: content of the runtest file + :type content: str + :param metadata: metadata JSON file content + :type metadata: dict + :returns: Suite + """ + if not content: + raise ValueError("content is empty") + + LOGGER.info("collecting testing suite: %s", suite_name) + + metadata_tests = None + if metadata: + LOGGER.info("Reading metadata content") + metadata_tests = metadata.get("tests", None) + + tests = [] + lines = content.split('\n') + for line in lines: + if not line.strip() or line.strip().startswith("#"): + continue + + LOGGER.debug("Test declaration: %s", line) + + parts = line.split() + if len(parts) < 2: + raise ValueError("Test declaration is not defining command") + + test_name = parts[0] + test_cmd = parts[1] + test_args = [] + + if len(parts) >= 3: + test_args = parts[2:] + + parallelizable = True + + if not metadata_tests: + # no metadata no party + parallelizable = False + else: + test_params = metadata_tests.get(test_name, None) + if test_params: + LOGGER.info("Found %s test params in metadata", test_name) + LOGGER.debug("params=%s", test_params) + + if test_params is None: + # this probably means test is not using new LTP API, + # so we can't decide if test can run in parallel or not + parallelizable = False + else: + for blacklist_param in PARALLEL_BLACKLIST: + if blacklist_param in test_params: + parallelizable = False + break + + if not parallelizable: + LOGGER.info("Test '%s' is not parallelizable", test_name) + else: + LOGGER.info("Test '%s' is parallelizable", test_name) + + test = Test( + test_name, + test_cmd, + test_args, + parallelizable=parallelizable) + + tests.append(test) + + LOGGER.debug("test: %s", test) + + LOGGER.debug("Collected tests: %d", len(tests)) + + suite = Suite(suite_name, tests) + + LOGGER.debug(suite) + LOGGER.info("Collected testing suite: %s", suite_name) + + return suite diff --git a/altp/events.py b/altp/events.py new file mode 100644 index 0000000..b8d6d1f --- /dev/null +++ b/altp/events.py @@ -0,0 +1,162 @@ +""" +.. module:: events + :platform: Linux + :synopsis: events handler implementation module + +.. moduleauthor:: Andrea Cervesato +""" +import typing +import logging +import asyncio + + +class EventsHandler: + """ + This class implements event loop and events handling. + """ + + def __init__(self) -> None: + self._logger = logging.getLogger("ltp.events") + self._tasks = asyncio.Queue() + self._lock = asyncio.Lock() + self._events = {} + self._stop = False + + def reset(self) -> None: + """ + Reset the entire events queue. + """ + self._logger.info("Reset events queue") + self._events.clear() + + def is_registered(self, event_name: str) -> bool: + """ + Returns True if ``event_name`` is registered. + :param event_name: name of the event + :type event_name: str + :returns: True if registered, False otherwise + """ + if not event_name: + raise ValueError("event_name is empty") + + return event_name in self._events + + def register(self, event_name: str, coro: typing.Coroutine) -> None: + """ + Register an event with ``event_name``. + :param event_name: name of the event + :type event_name: str + :param coro: coroutine associated with ``event_name`` + :type coro: Coroutine + """ + if not event_name: + raise ValueError("event_name is empty") + + if not coro: + raise ValueError("coro is empty") + + self._logger.info("Register new event: %s", repr(event_name)) + + if not self.is_registered(event_name): + self._events[event_name] = [] + + self._events[event_name].append(coro) + + def unregister(self, event_name: str) -> None: + """ + Unregister an event with ``event_name``. + :param event_name: name of the event + :type event_name: str + """ + if not event_name: + raise ValueError("event_name is empty") + + if not self.is_registered(event_name): + raise ValueError(f"{event_name} is not registered") + + self._logger.info("Unregister event: %s", repr(event_name)) + + self._events.pop(event_name) + + async def fire(self, event_name: str, *args: list, **kwargs: dict) -> None: + """ + Fire a specific event. + :param event_name: name of the event + :type event_name: str + :param args: Arguments to be passed to callback functions execution. + :type args: list + :param kwargs: Keyword arguments to be passed to callback functions + execution. + :type kwargs: dict + """ + if not event_name: + raise ValueError("event_name is empty") + + coros = self._events.get(event_name, None) + if not coros: + return + + for coro in coros: + await self._tasks.put(coro(*args, **kwargs)) + + async def _consume(self) -> None: + """ + Consume the next event. + """ + # following await is a blocking I/O + # so we don't need to sleep before get() + task = await self._tasks.get() + if not task: + return + + # pylint: disable=broad-except + try: + await task + except Exception as exc: + if "internal_error" not in self._events: + return + + self._logger.info("Exception catched") + self._logger.error(exc) + + coros = self._events["internal_error"] + if len(coros) > 0: + coro = coros[0] + await coro(exc, coro.__name__) + + async def stop(self) -> None: + """ + Stop the event loop. + """ + self._logger.info("Stopping event loop") + + self._stop = True + + # indicate producer is done + await self._tasks.put(None) + + async with self._lock: + pass + + # consume the last tasks + while not self._tasks.empty(): + await self._consume() + + self._logger.info("Event loop stopped") + + async def start(self) -> None: + """ + Start the event loop. + """ + self._stop = False + + try: + async with self._lock: + self._logger.info("Starting event loop") + + while not self._stop: + await self._consume() + + self._logger.info("Event loop completed") + except asyncio.CancelledError: + await self.stop() diff --git a/altp/export.py b/altp/export.py new file mode 100644 index 0000000..4a55284 --- /dev/null +++ b/altp/export.py @@ -0,0 +1,118 @@ +""" +.. module:: export + :platform: Linux + :synopsis: module containing exporters definition + +.. moduleauthor:: Andrea Cervesato +""" +import os +import json +import logging +from ltp import LTPException + + +class ExporterError(LTPException): + """ + Raised when an error occurs during Exporter operations. + """ + + +class Exporter: + """ + A class used to export Results into report file. + """ + + async def save_file(self, results: list, path: str) -> None: + """ + Save report into a file by taking information from SUT and testing + results. + :param results: list of suite results to export. + :type results: list(SuiteResults) + :param path: path of the file to save. + :type path: str + """ + raise NotImplementedError() + + +class JSONExporter(Exporter): + """ + Export testing results into a JSON file. + """ + + def __init__(self) -> None: + self._logger = logging.getLogger("ltp.json") + + # pylint: disable=too-many-locals + async def save_file(self, results: list, path: str) -> None: + if not results or len(results) == 0: + raise ValueError("results is empty") + + if not path: + raise ValueError("path is empty") + + if os.path.exists(path): + raise ExporterError(f"'{path}' already exists") + + self._logger.info("Exporting JSON report into %s", path) + + results_json = [] + + for result in results: + for test_report in result.tests_results: + status = "" + if test_report.return_code == 0: + status = "pass" + elif test_report.return_code == 2: + status = "brok" + elif test_report.return_code == 4: + status = "warn" + elif test_report.return_code == 32: + status = "conf" + else: + status = "fail" + + data_test = { + "test_fqn": test_report.test.name, + "status": status, + "test": { + "command": test_report.test.command, + "arguments": test_report.test.arguments, + "log": test_report.stdout, + "retval": [str(test_report.return_code)], + "duration": test_report.exec_time, + "failed": test_report.failed, + "passed": test_report.passed, + "broken": test_report.broken, + "skipped": test_report.skipped, + "warnings": test_report.warnings, + "result": status, + }, + } + + results_json.append(data_test) + + data = { + "results": results_json, + "stats": { + "runtime": results[0].exec_time, + "passed": results[0].passed, + "failed": results[0].failed, + "broken": results[0].broken, + "skipped": results[0].skipped, + "warnings": results[0].warnings + }, + "environment": { + "distribution": results[0].distro, + "distribution_version": results[0].distro_ver, + "kernel": results[0].kernel, + "arch": results[0].arch, + "cpu": results[0].cpu, + "swap": results[0].swap, + "RAM": results[0].ram, + }, + } + + with open(path, "w+", encoding='UTF-8') as outfile: + json.dump(data, outfile, indent=4) + + self._logger.info("Report exported") diff --git a/altp/host.py b/altp/host.py new file mode 100644 index 0000000..fc512c7 --- /dev/null +++ b/altp/host.py @@ -0,0 +1,223 @@ +""" +.. module:: host + :platform: Linux + :synopsis: module containing host SUT implementation + +.. moduleauthor:: Andrea Cervesato +""" +import os +import time +import signal +import asyncio +import logging +import contextlib +from asyncio.subprocess import Process +from altp.sut import SUT +from altp.sut import IOBuffer +from altp.sut import SUTError +from altp.sut import KernelPanicError + + +class HostSUT(SUT): + """ + SUT implementation using host's shell. + """ + BUFFSIZE = 1024 + + def __init__(self) -> None: + self._logger = logging.getLogger("ltp.host") + self._fetch_lock = asyncio.Lock() + self._procs = [] + self._cwd = None + self._env = None + self._running = False + self._stop = False + + def setup(self, **kwargs: dict) -> None: + self._logger.info("Initialize SUT") + + self._cwd = kwargs.get('cwd', None) + self._env = kwargs.get('env', None) + + @property + def config_help(self) -> dict: + # cwd and env are given by default, so no options are needed + return {} + + @property + def name(self) -> str: + return "host" + + @property + def parallel_execution(self) -> bool: + return True + + @property + async def is_running(self) -> bool: + return self._running + + @staticmethod + async def _process_alive(proc: Process) -> bool: + """ + Return True if process is alive and running. + """ + with contextlib.suppress(asyncio.TimeoutError): + returncode = await asyncio.wait_for(proc.wait(), 1e-6) + if returncode is not None: + return False + + return True + + async def _kill_process(self, proc: Process) -> None: + """ + Kill a process and all its subprocesses. + """ + self._logger.info("Kill process %d", proc.pid) + + try: + os.killpg(os.getpgid(proc.pid), signal.SIGKILL) + except ProcessLookupError: + # process has been killed already + pass + + async def ping(self) -> float: + if not await self.is_running: + raise SUTError("SUT is not running") + + ret = await self.run_command("test .") + reply_t = ret["exec_time"] + + return reply_t + + async def communicate(self, iobuffer: IOBuffer = None) -> None: + if await self.is_running: + raise SUTError("SUT is running") + + self._running = True + + async def stop(self, iobuffer: IOBuffer = None) -> None: + if not await self.is_running: + return + + self._logger.info("Stopping SUT") + self._stop = True + + try: + if self._procs: + self._logger.info( + "Terminating %d process(es)", + len(self._procs)) + + for proc in self._procs: + await self._kill_process(proc) + + await asyncio.gather(*[ + proc.wait() for proc in self._procs + ]) + + self._logger.info("Process(es) terminated") + + if self._fetch_lock.locked(): + self._logging.info("Terminating data fetch") + + with await self._fetch_lock: + pass + finally: + self._stop = False + self._running = False + self._logger.info("SUT has stopped") + + async def run_command( + self, + command: str, + iobuffer: IOBuffer = None) -> dict: + if not command: + raise ValueError("command is empty") + + if not await self.is_running: + raise SUTError("SUT is not running") + + self._logger.info("Executing command: '%s'", command) + + ret = None + proc = None + t_end = 0 + stdout = "" + + try: + proc = await asyncio.create_subprocess_shell( + command, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + cwd=self._cwd, + env=self._env, + preexec_fn=os.setsid) + + self._procs.append(proc) + + t_start = time.time() + panic = False + + while True: + line = await proc.stdout.read(self.BUFFSIZE) + sline = line.decode(encoding="utf-8", errors="ignore") + + if iobuffer: + await iobuffer.write(sline) + + stdout += sline + panic = "Kernel panic" in stdout[-2*self.BUFFSIZE:] + + if not await self._process_alive(proc): + break + + await proc.wait() + + t_end = time.time() - t_start + + if panic: + raise KernelPanicError() + finally: + if proc: + self._procs.remove(proc) + + await self._kill_process(proc) + await proc.wait() + + ret = { + "command": command, + "stdout": stdout, + "returncode": proc.returncode, + "exec_time": t_end, + } + + self._logger.debug("return data=%s", ret) + + self._logger.info("Command executed") + + return ret + + async def fetch_file(self, target_path: str) -> bytes: + if not target_path: + raise ValueError("target path is empty") + + if not os.path.isfile(target_path): + raise SUTError(f"'{target_path}' file doesn't exist") + + if not await self.is_running: + raise SUTError("SUT is not running") + + async with self._fetch_lock: + self._logger.info("Downloading '%s'", target_path) + + retdata = bytes() + + try: + with open(target_path, 'rb') as ftarget: + retdata = ftarget.read() + except IOError as err: + raise SUTError(err) + + self._logger.info("File copied") + + return retdata diff --git a/altp/ltx.py b/altp/ltx.py new file mode 100644 index 0000000..9ddde90 --- /dev/null +++ b/altp/ltx.py @@ -0,0 +1,892 @@ +""" +.. module:: ltx + :platform: Linux + :synopsis: module containing LTX communication class + +.. moduleauthor:: Andrea Cervesato +""" +import os +import time +import select +import asyncio +import logging +import importlib +import altp +from altp.sut import SUT +from altp.sut import SUTError +from altp.sut import IOBuffer + +try: + import msgpack +except ModuleNotFoundError: + pass + + +class LTXError(altp.LTPException): + """ + Raised when an error occurs during LTX execution. + """ + + +class Request: + """ + LTX request. + """ + ERROR = 0xff + VERSION = 0x00 + PING = 0x01 + PONG = 0x02 + GET_FILE = 0x03 + SET_FILE = 0x04 + ENV = 0x05 + CWD = 0x06 + EXEC = 0x07 + RESULT = 0x08 + LOG = 0x09 + DATA = 0xa0 + KILL = 0xa1 + MAX_SLOTS = 128 + ALL_SLOTS = 128 + MAX_ENVS = 16 + + def __init__(self, **kwargs: dict) -> None: + """ + :param args: request arguments + :type args: list + """ + self._logger = logging.getLogger("ltx.request") + self._request_id = None + self._args = kwargs.get("args", []) + self._completed = False + self._on_complete = None + + @property + def completed(self) -> bool: + """ + If True the request has been completed. + """ + return self._completed + + @property + def on_complete(self) -> callable: + """ + Get the `on_complete` event. + """ + return self._on_complete + + @on_complete.setter + def on_complete(self, callback: callable) -> None: + """ + Set the `on_complete` event. + """ + self._on_complete = callback + + def pack(self) -> bytes: + """ + Pack request to msgpack. + """ + msg = [] + msg.append(self._request_id) + msg.extend(self._args) + + data = msgpack.packb(msg) + + return data + + def _raise_complete(self, *args) -> None: + """ + Raise the complete callback with given data. + """ + if self._on_complete: + self._logger.info("Raising 'on_complete(self, %s)'", args) + self._on_complete(self, *args) + + self._completed = True + + def check_error(self, message: list) -> None: + """ + Check if given message is an error and eventually raise an error. + :param message: processed msgpack message + :type message: list + """ + if message[0] == self.ERROR: + raise LTXError(message[1]) + + def feed(self, message: list) -> None: + """ + Feed request queue with data and return when the request + has been completed. + :param message: processed msgpack message + :type message: list + """ + raise NotImplementedError() + + +def version() -> Request: + """ + Create VERSION request. + :returns: Request + """ + class _VersionRequest(Request): + """ + VERSION request. + """ + + def __init__(self, **kwargs: dict) -> None: + super().__init__(**kwargs) + + self._request_id = self.VERSION + + def feed(self, message: list) -> None: + if self.completed: + return + + if message[0] == self._request_id: + ver = message[1] + self._logger.debug("version=%s", ver) + + self._raise_complete(ver) + self._completed = True + + return _VersionRequest() + + +def ping() -> Request: + """ + Create PING request. + :returns: Request + """ + class _PingRequest(Request): + """ + PING request. + """ + + def __init__(self, **kwargs: dict) -> None: + super().__init__(**kwargs) + + self._echoed = False + self._request_id = self.PING + + def feed(self, message: list) -> None: + if self.completed: + return + + if message[0] == self.PING: + self._logger.info("PING echoed back") + self._logger.info("Waiting for PONG") + self._echoed = True + elif message[0] == self.PONG: + if not self._echoed: + raise LTXError("PONG received without PING echo") + + end_t = message[1] + + self._logger.debug("end_t=%s", end_t) + + self._raise_complete(end_t) + self._completed = True + + return _PingRequest() + + +def env(slot_id: int, key: str, value: str) -> Request: + """ + Create ENV request. + :param slot_id: command table ID. Can be None if we want to apply the + same environment variables to all commands + :type slot_id: int + :param key: key of the environment variable + :type key: str + :param value: value of the environment variable + :type value: str + :returns: Request + """ + if not key: + raise ValueError("key is empty") + + if not value: + raise ValueError("value is empty") + + class _EnvRequest(Request): + """ + ENV request. + """ + + def __init__(self, **kwargs: dict) -> None: + super().__init__(**kwargs) + + self._request_id = self.ENV + self._slot_id = self._args[0] + + if self._slot_id and \ + (self._slot_id < 0 or self._slot_id > self.ALL_SLOTS): + raise ValueError(f"Out of bounds slot ID [0-{self.ALL_SLOTS}]") + + def feed(self, message: list) -> None: + if self.completed: + return + + if len(message) > 1 and message[1] != self._slot_id: + return + + if message[0] == self.ENV: + self._logger.info("ENV echoed back") + + self._raise_complete() + self._completed = True + + return _EnvRequest(args=[slot_id, key, value]) + + +def cwd(slot_id: int, path: str) -> Request: + """ + Create CWD request. + :param slot_id: command table ID. Can be None if we want to apply the + same environment variables to all commands + :type slot_id: int + :param path: current working path + :type path: str + :returns: Request + """ + if not path: + raise ValueError("path is empty") + + class _CwdRequest(Request): + """ + CWD request. + """ + + def __init__(self, **kwargs: dict) -> None: + super().__init__(**kwargs) + + self._request_id = self.CWD + self._slot_id = self._args[0] + + if self._slot_id and \ + (self._slot_id < 0 or self._slot_id > self.ALL_SLOTS): + raise ValueError(f"Out of bounds slot ID [0-{self.ALL_SLOTS}]") + + def feed(self, message: list) -> None: + if self.completed: + return + + if len(message) > 1 and message[1] != self._slot_id: + return + + if message[0] == self.CWD: + self._logger.info("CWD echoed back") + + self._raise_complete() + self._completed = True + + return _CwdRequest(args=[slot_id, path]) + + +def get_file(path: str) -> Request: + """ + Create GET_FILE request. + :param path: path of the file + :type path: str + :returns: Request + """ + if not path: + raise ValueError("path is empty") + + class _GetFileRequest(Request): + """ + GET_FILE request. + """ + + def __init__(self, **kwargs: dict) -> None: + super().__init__(**kwargs) + + self._request_id = self.GET_FILE + self._data = [] + + def feed(self, message: list) -> None: + if self.completed: + return + + if message[0] == self.GET_FILE: + self._logger.info("GET_FILE echoed back") + self._completed = True + + self._raise_complete(b''.join(self._data)) + self._completed = True + elif message[0] == self.DATA: + self._logger.info("Data received") + self._data.append(message[1]) + + return _GetFileRequest(args=[path]) + + +def set_file(path: str, data: bytes) -> Request: + """ + Create SET_FILE request. + :param path: path of the file to write + :type path: str + :param data: data to write on file + :type data: bytes + :returns: Request + """ + if not path: + raise ValueError("path is empty") + + if not data: + raise ValueError("data is empty") + + class _SetFileRequest(Request): + """ + SET_FILE request. + """ + + def __init__(self, **kwargs: dict) -> None: + super().__init__(**kwargs) + + self._request_id = self.SET_FILE + + def feed(self, message: list) -> None: + if self.completed: + return + + if message[0] == self.SET_FILE and message[1] == self._args[0]: + self._logger.info("SETFILE echoed back") + + self._raise_complete() + self._completed = True + + return _SetFileRequest(args=[path, data]) + + +def execute(slot_id: int, + command: str, + stdout_callback: callable = None) -> Request: + """ + Create EXEC request. + :param slot_id: command table ID + :type slot_id: int + :param command: command to run + :type command: str + :param stdout_callback: called when new data arrives inside stdout + :type stdout_callback: callable + :returns: Request + """ + if not command: + raise ValueError("Command is empty") + + class _ExecRequest(Request): + """ + EXEC request. + """ + + def __init__(self, **kwargs: dict) -> None: + super().__init__(**kwargs) + + self._stdout_callback = kwargs.get("stdout_callback", None) + self._stdout = [] + self._echoed = False + self._request_id = self.EXEC + self._slot_id = self._args[0] + + if self._slot_id and \ + (self._slot_id < 0 or self._slot_id >= self.MAX_SLOTS): + raise ValueError(f"Out of bounds slot ID [0-{self.MAX_SLOTS}]") + + def feed(self, message: list) -> None: + if self.completed: + return + + if len(message) > 1 and message[1] != self._slot_id: + return + + if message[0] == self.EXEC: + self._logger.info("EXEC echoed back") + self._echoed = True + elif message[0] == self.LOG: + if not self._echoed: + raise LTXError("LOG received without EXEC echo") + + log = message[3] + + self._logger.info("LOG replied with data: %s", repr(log)) + self._stdout.append(log) + + if self._stdout_callback: + self._stdout_callback(log) + elif message[0] == self.RESULT: + if not self._echoed: + raise LTXError("RESULT received without EXEC echo") + + self._logger.info("RESULT received") + + stdout = "".join(self._stdout) + time_ns = message[2] + si_code = message[3] + si_status = message[4] + + self._logger.debug( + "time_ns=%s, si_code=%s, si_status=%s", + time_ns, + si_code, + si_status) + + self._raise_complete( + stdout, + time_ns, + si_code, + si_status) + + self._completed = True + + args = [slot_id, command] + + return _ExecRequest(stdout_callback=stdout_callback, args=args) + + +def kill(slot_id: int) -> Request: + """ + Create KILL request. + :param slot_id: command table ID + :type slot_id: int + :returns: Request + """ + class _KillRequest(Request): + """ + KILL request. + """ + + def __init__(self, **kwargs: dict) -> None: + super().__init__(**kwargs) + + self._request_id = self.KILL + self._slot_id = self._args[0] + + if self._slot_id and \ + (self._slot_id < 0 or self._slot_id >= self.MAX_SLOTS): + raise ValueError(f"Out of bounds slot ID [0-{self.MAX_SLOTS}]") + + def feed(self, message: list) -> None: + if self.completed: + return + + if len(message) > 1 and message[1] != self._slot_id: + return + + if message[0] == self.KILL: + self._logger.info("KILL echoed back") + + self._raise_complete() + self._completed = True + + return _KillRequest(args=[slot_id]) + + +class Session: + """ + This class communicates with LTX by processing given requests. + Typical usage is the following: + + with ltx.Session(stdin_fd, stdout_fd) as session: + # create requests + request1 = ltx.execute("echo 'hello world' > myfile") + request2 = ltx.get_file("myfile") + + # set the complete event + request1.on_complete = exec_complete_handler + request2.on_complete = get_file_complete_handler + + # send request + session.send([request1, request2]) + + # process exec_complete_handler/get_file_complete_handler output + ... + + """ + BUFFSIZE = 1 << 21 + + def __init__(self, stdin_fd: int, stdout_fd: int) -> None: + self._logger = logging.getLogger("ltx.session") + self._requests = [] + self._stop = False + self._connected = False + self._stdin_fd = stdin_fd + self._stdout_fd = stdout_fd + self._lock = asyncio.Lock() + + async def __aenter__(self) -> None: + """ + Connect to the LTX service. + """ + await self.connect() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb) -> None: + """ + Disconnect from LTX service. + """ + await self.disconnect() + + def _blocking_read(self, size: int) -> bytes: + """ + Blocking I/O method to read from stdout. + """ + return os.read(self._stdout_fd, size) + + def _blocking_write(self, data: bytes) -> None: + """ + Blocking I/O method to write on stdin. + """ + towrite = len(data) + wrote = os.write(self._stdin_fd, data) + + if towrite != wrote: + raise altp.LTPException( + f"Wrote {wrote} bytes but expected {towrite}") + + def _feed_requests(self, data: list) -> None: + """ + Feed the list of requests with given data. + """ + # TODO: this method could be improved by using producer/consumer + # pattern and gathering multiple tasks according with the number + # of requests we have + pos = 0 + + while pos < len(self._requests): + request = self._requests[pos] + request.check_error(data) + request.feed(data) + + if request.completed: + del self._requests[pos] + else: + pos += 1 + + def _blocking_producer(self) -> None: + """ + Blocking I/O producer that reads messages from stdout. + """ + self._logger.info("Starting message polling") + self._connected = True + + poller = select.epoll() + poller.register(self._stdout_fd, select.EPOLLIN) + + # force utf-8 encoding by using raw=False + unpacker = msgpack.Unpacker(raw=False) + + while not self._stop: + events = poller.poll(0.1) + + for fdesc, _ in events: + if fdesc != self._stdout_fd: + continue + + data = self._blocking_read(self.BUFFSIZE) + if not data: + continue + + unpacker.feed(data) + + self._logger.debug("Unpacking bytes: %s", data) + + while True: + try: + msg = unpacker.unpack() + if msg: + self._logger.info("Received message: %s", msg) + self._feed_requests(msg) + except msgpack.OutOfData: + break + + self._connected = False + self._logger.info("Ending message polling") + + @property + def connected(self) -> bool: + """ + True if connected, False otherwise. + """ + return self._connected + + async def connect(self) -> None: + """ + Connect to LTX. + """ + if self.connected: + return + + self._logger.info("Connecting to LTX") + + altp.to_thread(self._blocking_producer) + + while not self.connected: + await asyncio.sleep(0.01) + + self._logger.info("Connected") + + async def disconnect(self) -> None: + """ + Disconnect from LTX service. + """ + if not self.connected: + return + + self._logger.info("Disconnecting") + self._stop = True + + while self.connected: + await asyncio.sleep(0.01) + + self._logger.info("Disconnected") + + async def send(self, requests: list) -> None: + """ + Send requests to LTX service. The order is preserved during + requests execution. + :param requests: list of requests to send + :type requests: list + """ + if not requests: + raise ValueError("No requests given") + + if not self.connected: + raise LTXError("Client is not connected to LTX") + + with await self._lock: + self._logger.info("Sending requests") + self._requests.extend(requests) + + data = [req.pack() for req in requests] + tosend = b''.join(data) + + self._blocking_write(bytes(tosend)) + + async def gather(self, requests: list, timeout: float) -> dict: + """ + Gather multiple requests and wait for the response, then return all + rquests' replies inside a dictionary that maps requests with their + reply. Beware that this coroutine will override "on_complete" event for + all requests. + """ + req_len = len(requests) + replies = {} + + async def wait_for_completed(): + while len(replies) != req_len: + await asyncio.sleep(1e-3) + + def on_complete(req, *args): + replies[req] = args + + for req in requests: + req.on_complete = on_complete + + await asyncio.gather(*[ + self.send(requests), + asyncio.wait_for(wait_for_completed(), timeout=timeout), + ]) + + return replies + + +class LTXSUT(SUT): + """ + A SUT using LTX as executor. + """ + + def __init__(self) -> None: + self._logger = logging.getLogger("altp.ltx") + self._release_lock = asyncio.Lock() + self._fetch_lock = asyncio.Lock() + self._stdout = '' + self._stdin = '' + self._stdout_fd = -1 + self._stdin_fd = -1 + self._tmpdir = None + self._env = None + self._cwd = None + self._ltx = None + self._slots = [] + + @property + def name(self) -> str: + return "ltx" + + @property + def config_help(self) -> dict: + return { + "stdin": "transport stdin file", + "stdout": "transport stdout file", + } + + def setup(self, **kwargs: dict) -> None: + if not importlib.util.find_spec('msgpack'): + raise SUTError("'msgpack' library is not available") + + self._logger.info("Initialize SUT") + + self._tmpdir = kwargs.get("tmpdir", None) + self._env = kwargs.get("env", None) + self._cwd = kwargs.get("cwd", None) + self._stdin = kwargs.get("stdin", None) + self._stdout = kwargs.get("stdout", None) + + @property + def parallel_execution(self) -> bool: + return True + + @property + async def is_running(self) -> bool: + if self._ltx: + return self._ltx.connected + + return False + + async def stop(self, iobuffer: IOBuffer = None) -> None: + if not await self.is_running: + return + + if self._slots: + requests = [] + for slot_id in self._slots: + requests.append(kill(slot_id)) + + if requests: + await self._ltx.gather(requests, timeout=360) + + while self._slots: + await asyncio.sleep(1e-2) + + await self._ltx.disconnect() + + while await self.is_running: + await asyncio.sleep(1e-2) + + if self._stdin_fd != -1: + os.close(self._stdin_fd) + + if self._stdout_fd != -1: + os.close(self._stdout_fd) + + async def _reserve_slot(self) -> int: + """ + Reserve an execution slot. + """ + async with self._release_lock: + slot_id = -1 + for i in range(0, Request.MAX_SLOTS): + if i not in self._slots: + slot_id = i + break + + if slot_id == -1: + raise SUTError("No execution slots available") + + self._slots.append(slot_id) + + return slot_id + + async def _release_slot(self, slot_id: int) -> None: + """ + Release an execution slot. + """ + if slot_id in self._slots: + self._slots.remove(slot_id) + + async def ping(self) -> float: + if not await self.is_running: + raise SUTError("SUT is not running") + + req = ping() + start_t = time.monotonic() + replies = await self._ltx.gather([req], timeout=1) + + return (replies[req][0] * 1e-9) - start_t + + async def communicate(self, iobuffer: IOBuffer = None) -> None: + if await self.is_running: + raise SUTError("SUT is already running") + + self._stdin_fd = os.open(self._stdin, os.O_WRONLY) + self._stdout_fd = os.open(self._stdout, os.O_RDONLY) + + self._ltx = Session( + self._stdin_fd, + self._stdout_fd) + + await self._ltx.connect() + + requests = [] + requests.append(version()) + + if self._cwd: + requests.append(cwd(Request.ALL_SLOTS, self._cwd)) + + if self._env: + for key, value in self._env.items(): + requests.append(env(Request.ALL_SLOTS, key, value)) + + await self._ltx.gather(requests, timeout=10) + + async def run_command( + self, + command: str, + iobuffer: IOBuffer = None) -> dict: + if not command: + raise ValueError("command is empty") + + if not await self.is_running: + raise SUTError("SUT is not running") + + def _stdout_callback(data): + if iobuffer: + altp.to_thread(iobuffer.write(data)) + + self._logger.info("Running command: %s", repr(command)) + + slot_id = await self._reserve_slot() + ret = None + + try: + start_t = time.monotonic() + + req = execute( + slot_id, + command, + stdout_callback=_stdout_callback) + + replies = await self._ltx.gather([req], timeout=3600) + reply = replies[req] + + ret = { + "command": command, + "stdout": reply[0], + "exec_time": (reply[1] * 1e-9) - start_t, + "returncode": reply[3], + } + + self._logger.debug(ret) + finally: + await self._release_slot(slot_id) + + self._logger.info("Command executed") + + return ret + + async def fetch_file(self, target_path: str) -> bytes: + if not target_path: + raise ValueError("target path is empty") + + if not await self.is_running: + raise SUTError("SSH connection is not present") + + if not os.path.isfile(target_path): + raise SUTError("target path doesn't exist") + + with await self._fetch_lock: + req = get_file(target_path) + replies = await self._ltx.gather([req], timeout=3600) + reply = replies[req] + + return reply[0] diff --git a/altp/main.py b/altp/main.py new file mode 100644 index 0000000..fe0ef78 --- /dev/null +++ b/altp/main.py @@ -0,0 +1,363 @@ +""" +.. module:: main + :platform: Linux + :synopsis: main script + +.. moduleauthor:: Andrea Cervesato +""" +import os +import re +import asyncio +import inspect +import argparse +import importlib +import importlib.util +import altp +import altp.data +import altp.events +from altp import LTPException +from altp.sut import SUT +from altp.ui import SimpleUserInterface +from altp.ui import VerboseUserInterface +from altp.ui import ParallelUserInterface +from altp.session import Session + +# runtime loaded SUT(s) +LOADED_SUT = [] + +# return codes of the application +RC_OK = 0 +RC_ERROR = 1 +RC_INTERRUPT = 130 + + +def _from_params_to_config(params: list) -> dict: + """ + Return a configuration as dictionary according with input parameters + given to the commandline option. + """ + config = {} + for param in params: + if '=' not in param: + raise argparse.ArgumentTypeError( + f"Missing '=' assignment in '{param}' parameter") + + data = param.split('=', 1) + key = data[0] + value = data[1] + + if not key: + raise argparse.ArgumentTypeError( + f"Empty key for '{param}' parameter") + + if not key: + raise argparse.ArgumentTypeError( + f"Empty value for '{param}' parameter") + + config[key] = value + + return config + + +def _sut_config(value: str) -> dict: + """ + Return a SUT configuration according with input string. + """ + if value == "help": + msg = "--sut option supports the following syntax:\n" + msg += "\n\t:=:=:..\n" + msg += "\nSupported SUT: | " + + for sut in LOADED_SUT: + msg += f"{sut.name} | " + + msg += '\n' + + for sut in LOADED_SUT: + if not sut.config_help: + msg += f"\n{sut.name} has not configuration\n" + else: + msg += f"\n{sut.name} configuration:\n" + for opt, desc in sut.config_help.items(): + msg += f"\t{opt}: {desc}\n" + + return {"help": msg} + + if not value: + raise argparse.ArgumentTypeError("SUT parameters can't be empty") + + params = value.split(':') + name = params[0] + + config = _from_params_to_config(params[1:]) + config['name'] = name + + return config + + +def _env_config(value: str) -> dict: + """ + Return an environment configuration dictionary, parsing strings such as + "key=value:key=value:key=value". + """ + if not value: + return None + + params = value.split(':') + config = _from_params_to_config(params) + + return config + + +def _discover_sut(folder: str) -> list: + """ + Discover new SUT implementations inside a specific folder. + """ + LOADED_SUT.clear() + + for myfile in os.listdir(folder): + if not myfile.endswith('.py'): + continue + + path = os.path.join(folder, myfile) + if not os.path.isfile(path): + continue + + spec = importlib.util.spec_from_file_location('sut', path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + members = inspect.getmembers(module, inspect.isclass) + for _, klass in members: + if klass.__module__ != module.__name__ or \ + klass is SUT or \ + klass in LOADED_SUT: + continue + + if issubclass(klass, SUT): + LOADED_SUT.append(klass()) + + if len(LOADED_SUT) > 0: + LOADED_SUT.sort(key=lambda x: x.name) + + +def _get_sut(sut_name: str) -> SUT: + """ + Return the SUT with name `sut_name`. + """ + sut = None + for mysut in LOADED_SUT: + if mysut.name == sut_name: + sut = mysut + break + + return sut + + +def _get_skip_tests(skip_tests: str, skip_file: str) -> str: + """ + Return the skipped tests regexp. + """ + skip = "" + + if skip_file: + lines = None + with open(skip_file, 'r', encoding="utf-8") as skip_file_data: + lines = skip_file_data.readlines() + + toskip = [ + line.rstrip() + for line in lines + if not re.search(r'^\s+#.*', line) + ] + skip = '|'.join(toskip) + + if skip_tests: + if skip_file: + skip += "|" + + skip += skip_tests + + return skip + + +def _start_session( + args: argparse.Namespace, + parser: argparse.ArgumentParser) -> None: + """ + Start the LTP session. + """ + skip_tests = _get_skip_tests(args.skip_tests, args.skip_file) + if skip_tests: + try: + re.compile(skip_tests) + except re.error: + parser.error(f"'{skip_tests}' is not a valid regular expression") + + # get the current SUT communication object + sut_name = args.sut["name"] + sut = _get_sut(sut_name) + if not sut: + parser.error(f"'{sut_name}' is not an available SUT") + + # create session object + session = Session( + sut=sut, + sut_config=args.sut, + ltpdir=args.ltp_dir, + tmpdir=args.tmp_dir, + no_colors=args.no_colors, + exec_timeout=args.exec_timeout, + suite_timeout=args.suite_timeout, + skip_tests=skip_tests, + workers=args.workers, + env=args.env, + force_parallel=args.force_parallel) + + # initialize user interface + if args.workers > 1: + ParallelUserInterface(args.no_colors) + else: + if args.verbose: + VerboseUserInterface(args.no_colors) + else: + SimpleUserInterface(args.no_colors) + + # start event loop + exit_code = RC_OK + + async def session_run() -> None: + """ + Run session then stop events handler. + """ + await session.run( + command=args.run_command, + suites=args.run_suite, + report_path=args.json_report + ) + await altp.events.stop() + + try: + altp.run(asyncio.gather(*[ + altp.create_task(altp.events.start()), + session_run() + ])) + except KeyboardInterrupt: + exit_code = RC_INTERRUPT + except LTPException: + exit_code = RC_ERROR + + parser.exit(exit_code) + + +def run(cmd_args: list = None) -> None: + """ + Entry point of the application. + """ + _discover_sut(os.path.dirname(os.path.realpath(__file__))) + + parser = argparse.ArgumentParser(description='LTP next-gen runner') + parser.add_argument( + "--verbose", + "-v", + action="store_true", + help="Verbose mode") + parser.add_argument( + "--no-colors", + "-n", + action="store_true", + help="If defined, no colors are shown") + parser.add_argument( + "--ltp-dir", + "-l", + type=str, + default="/opt/ltp", + help="LTP install directory") + parser.add_argument( + "--tmp-dir", + "-d", + type=str, + default="/tmp", + help="LTP temporary directory") + parser.add_argument( + "--skip-tests", + "-i", + type=str, + help="Skip specific tests") + parser.add_argument( + "--skip-file", + "-I", + type=str, + help="Skip specific tests using a skip file (newline separated item)") + parser.add_argument( + "--suite-timeout", + "-T", + type=int, + default=3600, + help="Timeout before stopping the suite") + parser.add_argument( + "--exec-timeout", + "-t", + type=int, + default=3600, + help="Timeout before stopping a single execution") + parser.add_argument( + "--run-suite", + "-r", + nargs="*", + help="Suites to run") + parser.add_argument( + "--run-command", + "-c", + help="Command to run") + parser.add_argument( + "--sut", + "-s", + default="host", + type=_sut_config, + help="System Under Test parameters, for help see -s help") + parser.add_argument( + "--json-report", + "-j", + type=str, + help="JSON output report") + parser.add_argument( + "--workers", + "-w", + type=int, + default=1, + help="Number of workers to execute tests in parallel") + parser.add_argument( + "--force-parallel", + "-f", + action="store_true", + help="Force parallelization execution of all tests") + parser.add_argument( + "--env", + "-e", + type=_env_config, + help="List of key=value environment values separated by ':'") + + args = parser.parse_args(cmd_args) + + if args.sut and "help" in args.sut: + print(args.sut["help"]) + parser.exit(RC_OK) + + if args.json_report and os.path.exists(args.json_report): + parser.error(f"JSON report file already exists: {args.json_report}") + + if not args.run_suite and not args.run_command: + parser.error("--run-suite/--run-cmd are required") + + if args.skip_file and not os.path.isfile(args.skip_file): + parser.error(f"'{args.skip_file}' skip file doesn't exist") + + if args.tmp_dir and not os.path.isdir(args.tmp_dir): + parser.error(f"'{args.tmp_dir}' temporary folder doesn't exist") + + _start_session(args, parser) + + +if __name__ == "__main__": + run() diff --git a/altp/qemu.py b/altp/qemu.py new file mode 100644 index 0000000..3c9dee5 --- /dev/null +++ b/altp/qemu.py @@ -0,0 +1,541 @@ +""" +.. module:: qemu + :platform: Linux + :synopsis: module containing qemu SUT implementation + +.. moduleauthor:: Andrea Cervesato +""" +import os +import re +import time +import signal +import string +import shutil +import secrets +import logging +import asyncio +import contextlib +from altp.sut import SUT +from altp.sut import IOBuffer +from altp.sut import SUTError +from altp.sut import KernelPanicError + + +# pylint: disable=too-many-instance-attributes +class QemuSUT(SUT): + """ + Qemu SUT spawn a new VM using qemu and execute commands inside it. + This SUT implementation can be used to run commands inside + a protected, virtualized environment. + """ + + def __init__(self) -> None: + self._logger = logging.getLogger("ltp.qemu") + self._comm_lock = asyncio.Lock() + self._cmd_lock = asyncio.Lock() + self._fetch_lock = asyncio.Lock() + self._tmpdir = None + self._env = None + self._cwd = None + self._proc = None + self._stop = False + self._logged_in = False + self._last_pos = 0 + self._image = None + self._image_overlay = None + self._ro_image = None + self._password = None + self._ram = None + self._smp = None + self._virtfs = None + self._serial_type = None + self._qemu_cmd = None + self._opts = None + self._last_read = "" + self._panic = False + + @staticmethod + def _generate_string(length: int = 10) -> str: + """ + Generate a random string of the given length. + """ + out = ''.join(secrets.choice(string.ascii_letters + string.digits) + for _ in range(length)) + return out + + def _get_transport(self) -> str: + """ + Return a couple of transport_dev and transport_file used by + qemu instance for transport configuration. + """ + pid = os.getpid() + transport_file = os.path.join(self._tmpdir, f"transport-{pid}") + transport_dev = "" + + if self._serial_type == "isa": + transport_dev = "/dev/ttyS1" + elif self._serial_type == "virtio": + transport_dev = "/dev/vport1p1" + + return transport_dev, transport_file + + def _get_command(self) -> str: + """ + Return the full qemu command to execute. + """ + pid = os.getpid() + tty_log = os.path.join(self._tmpdir, f"ttyS0-{pid}.log") + + image = self._image + if self._image_overlay: + shutil.copyfile( + self._image, + self._image_overlay) + image = self._image_overlay + + params = [] + params.append("-enable-kvm") + params.append("-display none") + params.append(f"-m {self._ram}") + params.append(f"-smp {self._smp}") + params.append("-device virtio-rng-pci") + params.append(f"-drive if=virtio,cache=unsafe,file={image}") + params.append(f"-chardev stdio,id=tty,logfile={tty_log}") + + if self._serial_type == "isa": + params.append("-serial chardev:tty") + params.append("-serial chardev:transport") + elif self._serial_type == "virtio": + params.append("-device virtio-serial") + params.append("-device virtconsole,chardev=tty") + params.append("-device virtserialport,chardev=transport") + else: + raise NotImplementedError( + f"Unsupported serial device type {self._serial_type}") + + _, transport_file = self._get_transport() + params.append(f"-chardev file,id=transport,path={transport_file}") + + if self._ro_image: + params.append( + "-drive read-only," + "if=virtio," + "cache=unsafe," + f"file={self._ro_image}") + + if self._virtfs: + params.append( + "-virtfs local," + f"path={self._virtfs}," + "mount_tag=host0," + "security_model=mapped-xattr," + "readonly=on") + + if self._opts: + params.append(self._opts) + + cmd = f"{self._qemu_cmd} {' '.join(params)}" + + return cmd + + def setup(self, **kwargs: dict) -> None: + self._logger.info("Initialize SUT") + + self._env = kwargs.get("env", None) + self._cwd = kwargs.get("cwd", None) + self._tmpdir = kwargs.get("tmpdir", None) + self._image = kwargs.get("image", None) + self._image_overlay = kwargs.get("image_overlay", None) + self._ro_image = kwargs.get("ro_image", None) + self._password = kwargs.get("password", "root") + self._ram = kwargs.get("ram", "2G") + self._smp = kwargs.get("smp", "2") + self._virtfs = kwargs.get("virtfs", None) + self._serial_type = kwargs.get("serial", "isa") + self._opts = kwargs.get("options", None) + + system = kwargs.get("system", "x86_64") + self._qemu_cmd = f"qemu-system-{system}" + + if not self._tmpdir or not os.path.isdir(self._tmpdir): + raise SUTError( + f"Temporary directory doesn't exist: {self._tmpdir}") + + if not self._image or not os.path.isfile(self._image): + raise SUTError( + f"Image location doesn't exist: {self._image}") + + if self._ro_image and not os.path.isfile(self._ro_image): + raise SUTError( + f"Read-only image location doesn't exist: {self._ro_image}") + + if not self._ram: + raise SUTError("RAM is not defined") + + if not self._smp: + raise SUTError("CPU is not defined") + + if self._virtfs and not os.path.isdir(self._virtfs): + raise SUTError( + f"Virtual FS directory doesn't exist: {self._virtfs}") + + if self._serial_type not in ["isa", "virtio"]: + raise SUTError("Serial protocol must be isa or virtio") + + @property + def config_help(self) -> dict: + return { + "image": "qcow2 image location", + "image_overlay": "image_overlay: image copy location", + "password": "root password (default: root)", + "system": "system architecture (default: x86_64)", + "ram": "RAM of the VM (default: 2G)", + "smp": "number of CPUs (default: 2)", + "serial": "type of serial protocol. isa|virtio (default: isa)", + "virtfs": "directory to mount inside VM", + "ro_image": "path of the image that will exposed as read only", + "options": "user defined options", + } + + @property + def name(self) -> str: + return "qemu" + + @property + def parallel_execution(self) -> bool: + return False + + @property + async def is_running(self) -> bool: + if self._proc is None: + return False + + with contextlib.suppress(asyncio.TimeoutError): + await asyncio.wait_for(self._proc.wait(), 1e-6) + + return self._proc.returncode is None + + async def ping(self) -> float: + if not await self.is_running: + raise SUTError("SUT is not running") + + _, _, exec_time = await self._exec("test .", None) + + return exec_time + + async def _read_stdout(self, size: int, iobuffer: IOBuffer) -> str: + """ + Read data from stdout. + """ + data = await self._proc.stdout.read(size) + rdata = data.decode(encoding="utf-8", errors="replace") + + # write on stdout buffers + if iobuffer: + await iobuffer.write(rdata) + + return rdata + + async def _write_stdin(self, data: str) -> None: + """ + Write data on stdin. + """ + if not await self.is_running: + return + + wdata = data.encode(encoding="utf-8") + try: + self._proc.stdin.write(wdata) + except BrokenPipeError as err: + if not self._stop: + raise SUTError(err) + + async def _wait_for(self, message: str, iobuffer: IOBuffer) -> str: + """ + Wait a string from stdout. + """ + if not await self.is_running: + return None + + stdout = self._last_read + self._panic = False + + while True: + if self._stop or self._panic: + break + + if not await self.is_running: + break + + message_pos = stdout.find(message) + if message_pos != -1: + self._last_read = stdout[message_pos + len(message):] + break + + data = await self._read_stdout(1024, iobuffer) + if data: + stdout += data + + if "Kernel panic" in stdout: + # give time to panic message coming out from serial + await asyncio.sleep(2) + + # read as much data as possible from stdout + data = await self._read_stdout(1024 * 1024, iobuffer) + stdout += data + + self._panic = True + + if self._panic: + # if we ended before raising Kernel panic, we raise the exception + raise KernelPanicError() + + return stdout + + async def _wait_lockers(self) -> None: + """ + Wait for SUT lockers to be released. + """ + async with self._comm_lock: + pass + + async with self._cmd_lock: + pass + + async with self._fetch_lock: + pass + + async def _exec(self, command: str, iobuffer: IOBuffer) -> set: + """ + Execute a command and return set(stdout, retcode, exec_time). + """ + self._logger.debug("Execute command: %s", repr(command)) + + code = self._generate_string() + + msg = f"echo $?-{code}\n" + if command and command.rstrip(): + msg = f"{command};" + msg + + self._logger.info("Sending %s", repr(msg)) + + t_start = time.time() + + await self._write_stdin(f"{command}; echo $?-{code}\n") + stdout = await self._wait_for(code, iobuffer) + + exec_time = time.time() - t_start + + retcode = -1 + + if not self._stop: + if stdout and stdout.rstrip(): + match = re.search(f"(?P\\d+)-{code}", stdout) + if not match and not self._stop: + raise SUTError( + f"Can't read return code from reply {repr(stdout)}") + + # first character is '\n' + stdout = stdout[1:match.start()] + + try: + retcode = int(match.group("retcode")) + except TypeError: + pass + + self._logger.debug( + "stdout=%s, retcode=%d, exec_time=%d", + repr(stdout), + retcode, + exec_time) + + return stdout, retcode, exec_time + + async def stop(self, iobuffer: IOBuffer = None) -> None: + if not await self.is_running: + return + + self._logger.info("Shutting down virtual machine") + self._stop = True + + try: + if not self._panic: + # stop command first + if self._cmd_lock.locked() or self._fetch_lock.locked(): + self._logger.info("Stop running command") + + # send interrupt character (equivalent of CTRL+C) + await self._write_stdin('\x03') + await self._wait_lockers() + + # logged in -> poweroff + if self._logged_in: + self._logger.info("Poweroff virtual machine") + + await self._write_stdin("poweroff\n") + + while await self.is_running: + await self._read_stdout(1024, iobuffer) + + await self._proc.wait() + except asyncio.TimeoutError: + pass + finally: + # still running -> stop process + if await self.is_running: + self._logger.info("Killing virtual machine") + + self._proc.kill() + + await self._wait_lockers() + await self._proc.wait() + + self._stop = False + + self._logger.info("Qemu process ended") + + async def communicate(self, iobuffer: IOBuffer = None) -> None: + if not shutil.which(self._qemu_cmd): + raise SUTError(f"Command not found: {self._qemu_cmd}") + + if await self.is_running: + raise SUTError("Virtual machine is already running") + + error = None + + async with self._comm_lock: + self._logged_in = False + + cmd = self._get_command() + + self._logger.info("Starting virtual machine") + self._logger.debug(cmd) + + # pylint: disable=consider-using-with + self._proc = await asyncio.create_subprocess_shell( + cmd, + stdout=asyncio.subprocess.PIPE, + stdin=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.STDOUT) + + try: + await self._wait_for("login:", iobuffer) + await self._write_stdin("root\n") + + if self._password: + await self._wait_for("Password:", iobuffer) + await self._write_stdin(f"{self._password}\n") + + await asyncio.sleep(0.2) + + await self._wait_for("#", iobuffer) + await asyncio.sleep(0.2) + + await self._write_stdin("stty -echo; stty cols 1024\n") + await self._wait_for("#", None) + + _, retcode, _ = await self._exec("export PS1=''", None) + if retcode != 0: + raise SUTError("Can't setup prompt string") + + if self._virtfs: + _, retcode, _ = await self._exec( + "mount -t 9p -o trans=virtio host0 /mnt", None) + if retcode != 0: + raise SUTError("Failed to mount virtfs") + + if self._cwd: + _, retcode, _ = await self._exec(f"cd {self._cwd}", None) + if retcode != 0: + raise SUTError("Can't setup current working directory") + + if self._env: + for key, value in self._env.items(): + _, retcode, _ = await self._exec( + f"export {key}={value}", None) + if retcode != 0: + raise SUTError(f"Can't setup env {key}={value}") + + self._logged_in = True + + self._logger.info("Virtual machine started") + except SUTError as err: + error = err + + if not self._stop and error: + # this can happen when shell is available but + # something happened during commands execution + await self.stop(iobuffer=iobuffer) + + raise SUTError(err) + + async def run_command( + self, + command: str, + iobuffer: IOBuffer = None) -> dict: + if not command: + raise ValueError("command is empty") + + if not await self.is_running: + raise SUTError("Virtual machine is not running") + + async with self._cmd_lock: + self._logger.info("Running command: %s", command) + + stdout, retcode, exec_time = await self._exec( + f"{command}", + iobuffer) + + ret = { + "command": command, + "returncode": retcode, + "stdout": stdout, + "exec_time": exec_time, + } + + self._logger.debug(ret) + + return ret + + async def fetch_file(self, target_path: str) -> bytes: + if not target_path: + raise ValueError("target path is empty") + + if not await self.is_running: + raise SUTError("Virtual machine is not running") + + async with self._fetch_lock: + self._logger.info("Downloading %s", target_path) + + _, retcode, _ = await self._exec(f'test -f {target_path}', None) + if retcode != 0: + raise SUTError(f"'{target_path}' doesn't exist") + + transport_dev, transport_path = self._get_transport() + + stdout, retcode, _ = await self._exec( + f"cat {target_path} > {transport_dev}", None) + + if self._stop: + return bytes() + + if retcode not in [0, signal.SIGHUP, signal.SIGKILL]: + raise SUTError( + f"Can't send file to {transport_dev}: {stdout}") + + # read back data and send it to the local file path + file_size = os.path.getsize(transport_path) + + retdata = bytes() + + with open(transport_path, "rb") as transport: + while not self._stop and self._last_pos < file_size: + transport.seek(self._last_pos) + data = transport.read(4096) + retdata += data + + self._last_pos = transport.tell() + + self._logger.info("File downloaded") + + return retdata diff --git a/altp/results.py b/altp/results.py new file mode 100644 index 0000000..f33c9d1 --- /dev/null +++ b/altp/results.py @@ -0,0 +1,312 @@ +""" +.. module:: data + :platform: Linux + :synopsis: module containing suites data definition + +.. moduleauthor:: Andrea Cervesato +""" +from altp.data import Test +from altp.data import Suite + + +class Results: + """ + Base class for results. + """ + + @property + def exec_time(self) -> float: + """ + Execution time. + :returns: float + """ + raise NotImplementedError() + + @property + def failed(self) -> int: + """ + Number of TFAIL. + :returns: int + """ + raise NotImplementedError() + + @property + def passed(self) -> int: + """ + Number of TPASS. + :returns: int + """ + raise NotImplementedError() + + @property + def broken(self) -> int: + """ + Number of TBROK. + :returns: int + """ + raise NotImplementedError() + + @property + def skipped(self) -> int: + """ + Number of TSKIP. + :returns: int + """ + raise NotImplementedError() + + @property + def warnings(self) -> int: + """ + Number of TWARN. + :returns: int + """ + raise NotImplementedError() + + +class TestResults(Results): + """ + Test results definition. + """ + + def __init__(self, **kwargs) -> None: + """ + :param test: Test object declaration + :type test: Test + :param failed: number of TFAIL + :type failed: int + :param passed: number of TPASS + :type passed: int + :param broken: number of TBROK + :type broken: int + :param skipped: number of TSKIP + :type skipped: int + :param warnings: number of TWARN + :type warnings: int + :param exec_time: time for test's execution + :type exec_time: float + :param retcode: return code of the executed test + :type retcode: int + :param stdout: stdout of the test + :type stdout: str + """ + self._test = kwargs.get("test", None) + self._failed = max(kwargs.get("failed", 0), 0) + self._passed = max(kwargs.get("passed", 0), 0) + self._broken = max(kwargs.get("broken", 0), 0) + self._skipped = max(kwargs.get("skipped", 0), 0) + self._warns = max(kwargs.get("warnings", 0), 0) + self._exec_t = max(kwargs.get("exec_time", 0.0), 0.0) + self._retcode = kwargs.get("retcode", 0) + self._stdout = kwargs.get("stdout", None) + + if not self._test: + raise ValueError("Empty test object") + + def __repr__(self) -> str: + return \ + f"test: '{self._test}', " \ + f"failed: '{self._failed}', " \ + f"passed: {self._passed}, " \ + f"broken: {self._broken}, " \ + f"skipped: {self._skipped}, " \ + f"warnins: {self._warns}, " \ + f"exec_time: {self._exec_t}, " \ + f"retcode: {self._retcode}, " \ + f"stdout: {repr(self._stdout)}" + + @property + def test(self) -> Test: + """ + Test object declaration. + :returns: Test + """ + return self._test + + @property + def return_code(self) -> int: + """ + Return code after execution. + :returns: int + """ + return self._retcode + + @property + def stdout(self) -> str: + """ + Return the ending stdout. + :returns: str + """ + return self._stdout + + @property + def exec_time(self) -> float: + return self._exec_t + + @property + def failed(self) -> int: + return self._failed + + @property + def passed(self) -> int: + return self._passed + + @property + def broken(self) -> int: + return self._broken + + @property + def skipped(self) -> int: + return self._skipped + + @property + def warnings(self) -> int: + return self._warns + + +class SuiteResults(Results): + """ + Testing suite results definition. + """ + + def __init__(self, **kwargs) -> None: + """ + :param suite: Test object declaration + :type suite: Suite + :param tests: List of the tests results + :type tests: list(TestResults) + :param exec_time: execution time + :type exec_time: float + :param distro: distribution name + :type distro: str + :param distro_ver: distribution version + :type distro_ver: str + :param kernel: kernel version + :type kernel: str + :param arch: OS architecture + :type arch: str + """ + self._suite = kwargs.get("suite", None) + self._tests = kwargs.get("tests", []) + self._exec_t = max(kwargs.get("exec_time", 0.0), 0.0) + self._distro = kwargs.get("distro", None) + self._distro_ver = kwargs.get("distro_ver", None) + self._kernel = kwargs.get("kernel", None) + self._arch = kwargs.get("arch", None) + self._cpu = kwargs.get("cpu", None) + self._swap = kwargs.get("swap", None) + self._ram = kwargs.get("ram", None) + + if not self._suite: + raise ValueError("Empty suite object") + + def __repr__(self) -> str: + return \ + f"suite: '{self._suite}', " \ + f"tests: '{self._tests}', " \ + f"exec_time: {self._exec_t}, " \ + f"distro: {self._distro}, " \ + f"distro_ver: {self._distro_ver}, " \ + f"kernel: {self._kernel}, " \ + f"arch: {self._arch}, " \ + f"cpu: {self._cpu}, " \ + f"swap: {self._swap}, " \ + f"ram: {self._ram}" + + @property + def suite(self) -> Suite: + """ + Suite object declaration. + :returns: Suite + """ + return self._suite + + @property + def tests_results(self) -> list: + """ + Results of all tests. + :returns: list(TestResults) + """ + return self._tests + + def _get_result(self, attr: str) -> int: + """ + Return the total number of results. + """ + res = 0 + for test in self._tests: + res += getattr(test, attr) + + return res + + @property + def distro(self) -> str: + """ + Distribution name. + """ + return self._distro + + @property + def distro_ver(self) -> str: + """ + Distribution version. + """ + return self._distro_ver + + @property + def kernel(self) -> str: + """ + Kernel version. + """ + return self._kernel + + @property + def arch(self) -> str: + """ + Operating system architecture. + """ + return self._arch + + @property + def cpu(self) -> str: + """ + Current CPU type. + """ + return self._cpu + + @property + def swap(self) -> str: + """ + Current swap memory occupation. + """ + return self._swap + + @property + def ram(self) -> str: + """ + Current RAM occupation. + """ + return self._ram + + @property + def exec_time(self) -> float: + return self._exec_t + + @property + def failed(self) -> int: + return self._get_result("failed") + + @property + def passed(self) -> int: + return self._get_result("passed") + + @property + def broken(self) -> int: + return self._get_result("broken") + + @property + def skipped(self) -> int: + return self._get_result("skipped") + + @property + def warnings(self) -> int: + return self._get_result("warnings") diff --git a/altp/scheduler.py b/altp/scheduler.py new file mode 100644 index 0000000..bf08868 --- /dev/null +++ b/altp/scheduler.py @@ -0,0 +1,668 @@ +""" +.. module:: runner + :platform: Linux + :synopsis: module containing Runner definition and implementation. + +.. moduleauthor:: Andrea Cervesato +""" +import os +import re +import sys +import time +import asyncio +import logging +import altp +import altp.data +from altp import LTPException +from altp.sut import SUT +from altp.sut import IOBuffer +from altp.sut import KernelPanicError +from altp.data import Test +from altp.data import Suite +from altp.results import TestResults +from altp.results import SuiteResults + + +class KernelTainedError(LTPException): + """ + Raised when kernel is tainted. + """ + + +class KernelTimeoutError(LTPException): + """ + Raised when kernel is not replying anymore. + """ + + +class Scheduler: + """ + Schedule jobs to run on target. + """ + + @property + def results(self) -> list: + """ + Current results. It's reset before every `schedule` call and + it's populated when a job completes the execution. + :returns: list(Results) + """ + raise NotImplementedError() + + async def stop(self) -> None: + """ + Stop all running jobs. + """ + raise NotImplementedError() + + async def schedule(self, jobs: list) -> None: + """ + Schedule and execute a list of jobs. + :param jobs: object containing jobs definition + :type jobs: list(object) + """ + raise NotImplementedError() + + +class RedirectTestStdout(IOBuffer): + """ + Redirect test stdout data to UI events and save it. + """ + + def __init__(self, test: Test) -> None: + self.stdout = "" + self._test = test + + async def write(self, data: str) -> None: + await altp.events.fire("test_stdout", self._test, data) + self.stdout += data + + +class RedirectSUTStdout(IOBuffer): + """ + Redirect SUT stdout data to UI events. + """ + + def __init__(self, sut: SUT) -> None: + self._sut = sut + + async def write(self, data: str) -> None: + await altp.events.fire("sut_stdout", self._sut.name, data) + + +class TestScheduler(Scheduler): + """ + Schedule and run LTP tests, taking into account status of the kernel + during their execution, as well as tests timeout. + """ + STATUS_OK = 0 + TEST_TIMEOUT = 1 + KERNEL_PANIC = 2 + KERNEL_TAINED = 3 + KERNEL_TIMEOUT = 4 + + def __init__(self, **kwargs: dict) -> None: + """ + :param sut: object to communicate with SUT + :type sut: SUT + :param timeout: timeout for tests execution + :type timeout: float + :param max_workers: maximum number of workers to schedule jobs + :type max_workers: int + :param force_parallel: Force parallel execution of all tests + :type force_parallel: bool + """ + self._logger = logging.getLogger("ltp.test_scheduler") + self._sut = kwargs.get("sut", None) + self._timeout = max(kwargs.get("timeout", 3600.0), 0.0) + self._max_workers = kwargs.get("max_workers", 1) + self._force_parallel = kwargs.get("force_parallel", False) + self._lock = asyncio.Lock() + self._results = [] + self._stop = False + self._tasks = [] + + if not self._sut: + raise ValueError("SUT object is empty") + + @ staticmethod + def _command_from_test(test: Test) -> str: + """ + Returns a command from test. + """ + cmd = test.command + if len(test.arguments) > 0: + cmd += ' ' + cmd += ' '.join(test.arguments) + + return cmd + + @ staticmethod + def _get_test_results( + test: Test, + test_data: dict, + error: bool = False) -> TestResults: + """ + Return test results accoding with runner output and Test definition. + :param test: Test definition object + :type test: Test + :param test_data: output data from a runner execution + :type test_data: dict + :param error: if True, test will be considered broken by default + :type error: bool + :returns: TestResults + """ + stdout = test_data["stdout"] + + # get rid of colors from stdout + stdout = re.sub(r'\u001b\[[0-9;]+[a-zA-Z]', '', stdout) + + match = re.search( + r"Summary:\n" + r"passed\s*(?P\d+)\n" + r"failed\s*(?P\d+)\n" + r"broken\s*(?P\d+)\n" + r"skipped\s*(?P\d+)\n" + r"warnings\s*(?P\d+)\n", + stdout + ) + + passed = 0 + failed = 0 + skipped = 0 + broken = 0 + skipped = 0 + warnings = 0 + retcode = test_data["returncode"] + exec_time = test_data["exec_time"] + + if match: + passed = int(match.group("passed")) + failed = int(match.group("failed")) + skipped = int(match.group("skipped")) + broken = int(match.group("broken")) + skipped = int(match.group("skipped")) + warnings = int(match.group("warnings")) + else: + passed = stdout.count("TPASS") + failed = stdout.count("TFAIL") + skipped = stdout.count("TSKIP") + broken = stdout.count("TBROK") + warnings = stdout.count("TWARN") + + if passed == 0 and \ + failed == 0 and \ + skipped == 0 and \ + broken == 0 and \ + warnings == 0: + # if no results are given, this is probably an + # old test implementation that fails when return + # code is != 0 + if retcode == 0: + passed = 1 + elif retcode == 4: + warnings = 1 + elif retcode == 32: + skipped = 1 + elif not error: + failed = 1 + + if error: + broken = 1 + + result = TestResults( + test=test, + failed=failed, + passed=passed, + broken=broken, + skipped=skipped, + warnings=warnings, + exec_time=exec_time, + retcode=retcode, + stdout=stdout, + ) + + return result + + async def _get_tainted_status(self) -> tuple: + """ + Check tainted status of the Kernel. + """ + code, messages = await self._sut.get_tainted_info() + + for msg in messages: + if msg: + self._logger.debug("Kernel tainted: %s", msg) + await altp.events.fire("kernel_tainted", msg) + + return code, messages + + async def _write_kmsg(self, test: Test) -> None: + """ + If root, we write test information on /dev/kmsg. + """ + self._logger.info("Writing test information on /dev/kmsg") + + ret = await self._sut.run_command("id -u") + if ret["stdout"] != "0\n": + self._logger.info("Can't write on /dev/kmsg from user") + return + + cmd = f"{test.command}" + if len(test.arguments) > 0: + cmd += ' ' + cmd += ' '.join(test.arguments) + + message = f'{sys.argv[0]}[{os.getpid()}]: ' \ + f'starting test {test.name} ({cmd})\n' + + await self._sut.run_command(f'echo -n "{message}" > /dev/kmsg') + + @ property + def results(self) -> list: + return self._results + + async def stop(self) -> None: + if not self._tasks: + return + + self._logger.info("Stopping tests execution") + + self._stop = True + try: + for task in self._tasks: + if not task.cancelled(): + task.cancel() + + # wait until all tasks have been cancelled + asyncio.gather(*self._tasks, return_exceptions=True) + + async with self._lock: + pass + finally: + self._stop = False + + self._logger.info("Tests execution has stopped") + + # pylint: disable=too-many-statements + # pylint: disable=too-many-locals + async def _run_test(self, test: Test, sem: asyncio.Semaphore) -> None: + """ + Run a single test and populate the results array. + """ + async with sem: + if self._stop: + return None + + self._logger.info("Running test %s", test.name) + self._logger.debug(test) + + await altp.events.fire("test_started", test) + await self._write_kmsg(test) + + iobuffer = RedirectTestStdout(test) + cmd = self._command_from_test(test) + start_t = time.time() + exec_time = 0 + test_data = None + tainted_msg = None + status = self.STATUS_OK + + try: + tainted_code1, _ = await self._get_tainted_status() + + test_data = await asyncio.wait_for(self._sut.run_command( + cmd, + iobuffer=iobuffer), + timeout=self._timeout + ) + + tainted_code2, tainted_msg2 = await self._get_tainted_status() + if tainted_code2 != tainted_code1: + self._logger.info( + "Recognised Kernel tainted: %s", + tainted_msg2) + + tainted_msg = tainted_msg2 + status = self.KERNEL_TAINED + except altp.sut.KernelPanicError: + exec_time = time.time() - start_t + + self._logger.info("Recognised Kernel panic") + status = self.KERNEL_PANIC + except asyncio.TimeoutError: + exec_time = time.time() - start_t + status = self.TEST_TIMEOUT + + self._logger.info( + "Got test timeout. " + "Checking if SUT is still replying") + + try: + await asyncio.wait_for( + self._sut.ping(), + timeout=10 + ) + + self._logger.info("SUT replied") + except asyncio.TimeoutError: + status = self.KERNEL_TIMEOUT + + # create test results and save it + if status not in [self.STATUS_OK, self.KERNEL_TAINED]: + test_data = { + "name": test.name, + "command": test.command, + "stdout": iobuffer.stdout, + "returncode": -1, + "exec_time": exec_time, + } + + results = self._get_test_results( + test, + test_data, + error=test_data["returncode"] == -1) + + self._logger.debug("results=%s", results) + self._results.append(results) + + # raise kernel errors at the end so we can collect test results + if status == self.KERNEL_TAINED: + await altp.events.fire("kernel_tainted", tainted_msg) + raise KernelTainedError() + + if status == self.KERNEL_PANIC: + await altp.events.fire("kernel_panic") + raise KernelPanicError() + + if status == self.KERNEL_TIMEOUT: + await altp.events.fire("sut_not_responding") + raise KernelTimeoutError() + + await altp.events.fire("test_completed", results) + + self._logger.info("Test completed: %s", test.name) + self._logger.debug(results) + + async def _run_and_wait(self, tests: list) -> None: + """ + Run tests one after another. + """ + if not tests: + return + + sem = asyncio.Semaphore(1) + + self._logger.info("Scheduling %d tests on single worker", len(tests)) + + for test in tests: + task = altp.create_task(self._run_test(test, sem)) + self._tasks.append(task) + + await task + + async def _run_parallel(self, tests: list) -> None: + """ + Run tests in parallel. + """ + if not tests: + return + + sem = asyncio.Semaphore(self._max_workers) + tasks = [asyncio.Task(self._run_test(test, sem)) for test in tests] + + self._logger.info( + "Scheduling %d tests on %d workers", + len(tasks), + self._max_workers) + + self._tasks.extend(tasks) + await asyncio.gather(*tasks) + + async def schedule(self, jobs: list) -> None: + if not jobs: + raise ValueError("jobs list is empty") + + for job in jobs: + if not isinstance(job, Test): + raise ValueError("jobs must be a list of Test") + + async with self._lock: + self._logger.info("Check what tests can be run in parallel") + + self._tasks.clear() + self._results.clear() + + try: + if self._force_parallel: + await self._run_parallel(jobs) + else: + await self._run_parallel([ + test for test in jobs if test.parallelizable + ]) + await self._run_and_wait([ + test for test in jobs if not test.parallelizable + ]) + except LTPException as err: + self._logger.info( + "%s caught. Cancel tasks", + err.__class__.__name__) + + self._logger.error(err) + + for task in self._tasks: + self._logger.info("Cancelling %d tasks", len(self._tasks)) + + if not task.done() and not task.cancelled(): + task.cancel() + + self._logger.info("Wait for tasks to be done") + asyncio.gather(*self._tasks, return_exceptions=True) + + raise err + except asyncio.CancelledError as err: + if not self._stop: + raise err + finally: + self._tasks.clear() + + +class SuiteScheduler(Scheduler): + """ + The Scheduler class implementation for LTP suites execution. + This is a special scheduler that schedules suites tests, checking for + kernel status and rebooting SUT if we have some issues with it + (i.e. kernel panic). + """ + + def __init__(self, **kwargs: dict) -> None: + """ + :param sut: object used to communicate with SUT + :type sut: SUT + :param suite_timeout: timeout before stopping testing suite + :type suite_timeout: float + :param exec_timeout: timeout before stopping single execution + :type exec_timeout: float + :param max_workers: maximum number of workers to schedule jobs + :type max_workers: int + :param skip_tests: regexp excluding tests from execution + :type skip_tests: str + :param force_parallel: Force parallel execution of all tests + :type force_parallel: bool + """ + self._logger = logging.getLogger("ltp.suite_scheduler") + self._sut = kwargs.get("sut", None) + self._suite_timeout = max(kwargs.get("suite_timeout", 3600.0), 0.0) + self._skip_tests = kwargs.get("skip_tests", None) + self._results = [] + self._stop = False + self._lock = asyncio.Lock() + + force_parallel = kwargs.get("force_parallel", False) + exec_timeout = max(kwargs.get("exec_timeout", 3600.0), 0.0) + + self._scheduler = TestScheduler( + sut=self._sut, + timeout=exec_timeout, + max_workers=kwargs.get("max_workers", 1), + force_parallel=force_parallel) + + if not self._sut: + raise ValueError("SUT is an empty object") + + @ property + def results(self) -> list: + return self._results + + async def stop(self) -> None: + if not self._lock.locked(): + return + + self._logger.info("Stopping suites execution") + + self._stop = True + try: + await self._scheduler.stop() + + async with self._lock: + pass + finally: + self._stop = False + + self._logger.info("Suites execution has stopped") + + async def _restart_sut(self) -> None: + """ + Reboot the SUT. + """ + self._logger.info("Rebooting SUT") + + await altp.events.fire("sut_restart", self._sut.name) + + iobuffer = RedirectSUTStdout(self._sut) + + await self._scheduler.stop() + await self._sut.stop(iobuffer=iobuffer) + await self._sut.ensure_communicate(iobuffer=iobuffer) + + self._logger.info("SUT rebooted") + + async def _run_suite(self, suite: Suite) -> None: + """ + Run a single testing suite and populate the results array. + """ + self._logger.info("Running suite %s", suite.name) + self._logger.debug(suite) + + await altp.events.fire("suite_started", suite) + + tests_results = [] + tests = [] + tests_left = [] + timed_out = False + exec_times = [] + + # obtain the list of tests to execute + for test in suite.tests: + if self._skip_tests and re.search(self._skip_tests, test.name): + self._logger.info("Ignoring test: %s", test.name) + continue + + tests.append(test) + + # start the tests execution + tests_left.extend(tests) + + while not self._stop and tests_left: + try: + start_t = time.time() + await asyncio.wait_for( + self._scheduler.schedule(tests_left), + timeout=self._suite_timeout + ) + exec_times.append(time.time() - start_t) + except asyncio.TimeoutError: + self._logger.info("Testing suite timed out: %s", suite.name) + + await altp.events.fire( + "suite_timeout", + suite, + self._suite_timeout) + + timed_out = True + except (KernelPanicError, + KernelTainedError, + KernelTimeoutError): + # once we catch a kernel error, restart the SUT + await self._restart_sut() + finally: + tests_results.extend(self._scheduler.results) + + # tests_left array will be populated when SUT is + # rebooted after a kernel error + tests_left.clear() + + for test in tests: + found = False + for test_res in tests_results: + if test.name == test_res.test.name: + found = True + break + + if not found: + tests_left.append(test) + + if timed_out: + for test in tests_left: + tests_results.append( + TestResults( + test=test, + failed=0, + passed=0, + broken=0, + skipped=1, + warnings=0, + exec_time=0.0, + retcode=32, + stdout="" + ) + ) + + # no more tests need to be run + tests_left.clear() + break + + info = await self._sut.get_info() + + suite_results = SuiteResults( + suite=suite, + tests=tests_results, + distro=info["distro"], + distro_ver=info["distro_ver"], + kernel=info["kernel"], + arch=info["arch"], + cpu=info["cpu"], + swap=info["swap"], + ram=info["ram"], + exec_time=sum(exec_times)) + + await altp.events.fire("suite_completed", suite_results) + + self._logger.info("Suite completed") + self._logger.debug(suite_results) + + self._results.append(suite_results) + + async def schedule(self, jobs: list) -> None: + if not jobs: + raise ValueError("jobs list is empty") + + for job in jobs: + if not isinstance(job, Suite): + raise ValueError("jobs must be a list of Suite") + + async with self._lock: + self._results.clear() + + for suite in jobs: + await altp.create_task(self._run_suite(suite)) diff --git a/altp/session.py b/altp/session.py new file mode 100644 index 0000000..9afa5d8 --- /dev/null +++ b/altp/session.py @@ -0,0 +1,312 @@ +""" +.. module:: session + :platform: Linux + :synopsis: LTP session declaration + +.. moduleauthor:: Andrea Cervesato +""" +import os +import json +import logging +import asyncio +import altp +import altp.data +import altp.events +from altp import LTPException +from altp.sut import SUT +from altp.sut import IOBuffer +from altp.tempfile import TempDir +from altp.export import JSONExporter +from altp.scheduler import SuiteScheduler + + +class RedirectSUTStdout(IOBuffer): + """ + Redirect stdout data to UI events. + """ + + def __init__(self, sut: SUT, is_cmd: bool) -> None: + self._sut = sut + self._is_cmd = is_cmd + + async def write(self, data: str) -> None: + if self._is_cmd: + await altp.events.fire("run_cmd_stdout", data) + else: + await altp.events.fire("sut_stdout", self._sut.name, data) + + +class Session: + """ + The runltp session runner. + """ + + def __init__(self, **kwargs) -> None: + """ + :param tmpdir: temporary directory path + :type tmpdir: str + :param ltpdir: LTP directory path + :type ltpdir: str + :param sut: SUT communication object + :type sut: SUT + :param sut_config: SUT object configuration + :type sut_config: dict + :param no_colors: if True, it disables LTP tests colors + :type no_colors: bool + :param exec_timeout: test timeout + :type exec_timeout: float + :param suite_timeout: testing suite timeout + :type suite_timeout: float + :param skip_tests: regexp excluding tests from execution + :type skip_tests: str + :param workers: number of workers for testing suite scheduler + :type workers: int + :param env: SUT environment vairables to inject before execution + :type env: dict + :param force_parallel: Force parallel execution of all tests + :type force_parallel: bool + """ + self._logger = logging.getLogger("ltp.session") + self._tmpdir = TempDir(kwargs.get("tmpdir", "/tmp")) + self._ltpdir = kwargs.get("ltpdir", "/opt/ltp") + self._sut = kwargs.get("sut", None) + self._no_colors = kwargs.get("no_colors", False) + self._exec_timeout = kwargs.get("exec_timeout", 3600.0) + self._env = kwargs.get("env", None) + + suite_timeout = kwargs.get("suite_timeout", 3600.0) + skip_tests = kwargs.get("skip_tests", "") + workers = kwargs.get("workers", 1) + force_parallel = kwargs.get("force_parallel", False) + + self._scheduler = SuiteScheduler( + sut=self._sut, + suite_timeout=suite_timeout, + exec_timeout=self._exec_timeout, + max_workers=workers, + skip_tests=skip_tests, + force_parallel=force_parallel) + + if not self._sut: + raise ValueError("sut is empty") + + self._sut_config = self._get_sut_config(kwargs.get("sut_config", {})) + self._setup_debug_log() + + if not self._sut.parallel_execution: + self._logger.info( + "SUT doesn't support parallel execution. " + "Forcing workers=1.") + self._workers = 1 + + metadata_path = os.path.join(self._ltpdir, "metadata", "ltp.json") + self._metadata_json = None + if os.path.isfile(metadata_path): + with open(metadata_path, 'r', encoding='utf-8') as metadata: + self._metadata_json = json.loads(metadata.read()) + + def _setup_debug_log(self) -> None: + """ + Set logging module so we save a log file with debugging information + inside the temporary path. + """ + if not self._tmpdir.abspath: + return + + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + + debug_file = os.path.join(self._tmpdir.abspath, "debug.log") + handler = logging.FileHandler(debug_file, encoding="utf8") + handler.setLevel(logging.DEBUG) + + formatter = logging.Formatter( + "%(asctime)s - %(name)s:%(lineno)s - %(levelname)s - %(message)s") + handler.setFormatter(formatter) + logger.addHandler(handler) + + def _get_sut_config(self, sut_config: dict) -> dict: + """ + Create the SUT configuration. The dictionary is usually passed to the + `setup` method of the SUT, in order to setup the environment before + running tests. + """ + testcases = os.path.join(self._ltpdir, "testcases", "bin") + + env = {} + env["PATH"] = "/sbin:/usr/sbin:/usr/local/sbin:" + \ + f"/root/bin:/usr/local/bin:/usr/bin:/bin:{testcases}" + env["LTPROOT"] = self._ltpdir + env["TMPDIR"] = self._tmpdir.root if self._tmpdir.root else "/tmp" + env["LTP_TIMEOUT_MUL"] = str((self._exec_timeout * 0.9) / 300.0) + + if self._no_colors: + env["LTP_COLORIZE_OUTPUT"] = "0" + else: + env["LTP_COLORIZE_OUTPUT"] = "1" + + if self._env: + for key, value in self._env.items(): + if key in env: + continue + + self._logger.info("Set environment variable %s=%s", key, value) + env[key] = value + + config = sut_config.copy() + config['env'] = env + config['cwd'] = testcases + config['tmpdir'] = self._tmpdir.abspath + + return config + + async def _start_sut(self) -> None: + """ + Start communicating with SUT. + """ + self._sut.setup(**self._sut_config) + + await altp.events.fire("sut_start", self._sut.name) + await self._sut.ensure_communicate( + iobuffer=RedirectSUTStdout(self._sut, False)) + + async def _stop_sut(self) -> None: + """ + Stop the SUT. + """ + if not await self._sut.is_running: + return + + await altp.events.fire("sut_stop", self._sut.name) + await self._sut.stop(iobuffer=RedirectSUTStdout(self._sut, False)) + + async def _download_suites(self, suites: list) -> list: + """ + Download all testing suites and return suites objects list. + """ + if not os.path.isdir(os.path.join(self._tmpdir.abspath, "runtest")): + self._tmpdir.mkdir("runtest") + + async def _download(suite: str) -> None: + """ + Download a single suite inside temporary folder. + """ + target = os.path.join(self._ltpdir, "runtest", suite) + + await altp.events.fire( + "suite_download_started", + suite, + target) + + data = await self._sut.fetch_file(target) + data_str = data.decode(encoding="utf-8", errors="ignore") + + self._tmpdir.mkfile(os.path.join("runtest", suite), data_str) + + await altp.events.fire( + "suite_download_completed", + suite, + target) + + suite = await altp.data.read_runtest( + suite, + data_str, + metadata=self._metadata_json) + + return suite + + suites_obj = await asyncio.gather(*[ + _download(suite) + for suite in suites + ]) + + return suites_obj + + async def _exec_command(self, command: str) -> None: + """ + Execute a single command on SUT. + """ + try: + await altp.events.fire("run_cmd_start", command) + + ret = await asyncio.wait_for( + self._sut.run_command( + command, + iobuffer=RedirectSUTStdout(self._sut, True)), + timeout=self._exec_timeout + ) + + await altp.events.fire( + "run_cmd_stop", + command, + ret["stdout"], + ret["returncode"]) + except asyncio.TimeoutError: + raise LTPException(f"Command timeout: {repr(command)}") + + async def stop(self) -> None: + """ + Stop the current session. + """ + await self._scheduler.stop() + await self._stop_sut() + + async def run( + self, + command: str = None, + suites: list = None, + report_path: str = None) -> None: + """ + Run a new session and store results inside a JSON file. + :param command: single command to run before suites + :type command: str + :param suites: name of the testing suites to run + :type suites: list(str) + :param report_path: JSON report path + :type report_path: str + """ + await altp.events.fire( + "session_started", + self._tmpdir.abspath) + + try: + await self._start_sut() + + if command: + await self._exec_command(command) + + if suites: + suites = await self._download_suites(suites) + await self._scheduler.schedule(suites) + + exporter = JSONExporter() + + tasks = [] + tasks.append( + exporter.save_file( + self._scheduler.results, + os.path.join( + self._tmpdir.abspath, + "results.json") + )) + + if report_path: + tasks.append( + exporter.save_file( + self._scheduler.results, + report_path + )) + + await asyncio.gather(*tasks) + + await altp.events.fire( + "session_completed", + self._scheduler.results) + except asyncio.CancelledError: + await altp.events.fire("session_stopped") + except LTPException as err: + self._logger.exception(err) + await altp.events.fire("session_error", str(err)) + raise err + finally: + await self.stop() diff --git a/altp/ssh.py b/altp/ssh.py new file mode 100644 index 0000000..ca6ff30 --- /dev/null +++ b/altp/ssh.py @@ -0,0 +1,305 @@ +""" +.. module:: ssh + :platform: Linux + :synopsis: module defining SSH SUT + +.. moduleauthor:: Andrea Cervesato +""" +import time +import asyncio +import logging +import importlib +import contextlib +from altp.sut import SUT +from altp.sut import SUTError +from altp.sut import IOBuffer +from altp.sut import KernelPanicError + +try: + import asyncssh +except ModuleNotFoundError: + pass + + +# pylint: disable=too-many-instance-attributes +class SSHSUT(SUT): + """ + A SUT that is using SSH protocol con communicate and transfer data. + """ + + def __init__(self) -> None: + self._logger = logging.getLogger("ltp.ssh") + self._tmpdir = None + self._host = None + self._port = None + self._reset_cmd = None + self._env = None + self._cwd = None + self._user = None + self._password = None + self._key_file = None + self._sudo = False + self._session_sem = None + self._stop = False + self._conn = None + self._downloader = None + self._procs = [] + + @property + def name(self) -> str: + return "ssh" + + @property + def config_help(self) -> dict: + return { + "host": "IP address of the SUT (default: localhost)", + "port": "TCP port of the service (default: 22)", + "user": "name of the user (default: root)", + "password": "root password", + "timeout": "connection timeout in seconds (default: 10)", + "key_file": "private key location", + "reset_command": "command to reset the remote SUT", + "sudo": "use sudo to access to root shell (default: 0)", + } + + async def _reset(self, iobuffer: IOBuffer = None) -> None: + """ + Run the reset command on host. + """ + if not self._reset_cmd: + return + + self._logger.info("Executing reset command: %s", repr(self._reset_cmd)) + + proc = await asyncio.create_subprocess_shell( + self._reset_cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE) + + while True: + line = await proc.stdout.read(1024) + if line: + sline = line.decode(encoding="utf-8", errors="ignore") + + if iobuffer: + await iobuffer.write(sline) + + with contextlib.suppress(asyncio.TimeoutError): + returncode = await asyncio.wait_for(proc.wait(), 1e-6) + if returncode is not None: + break + + await proc.wait() + + self._logger.info("Reset command has been executed") + + def _create_command(self, cmd: str) -> str: + """ + Create command to send to SSH client. + """ + script = "" + + if self._cwd: + script += f"cd {self._cwd};" + + if self._env: + for key, value in self._env.items(): + script += f"export {key}={value};" + + script += cmd + + if self._sudo: + script = f"sudo /bin/sh -c '{script}'" + + return script + + def setup(self, **kwargs: dict) -> None: + if not importlib.util.find_spec('asyncssh'): + raise SUTError("'asyncssh' library is not available") + + self._logger.info("Initialize SUT") + + self._tmpdir = kwargs.get("tmpdir", None) + self._host = kwargs.get("host", "localhost") + self._port = kwargs.get("port", 22) + self._reset_cmd = kwargs.get("reset_cmd", None) + self._env = kwargs.get("env", None) + self._cwd = kwargs.get("cwd", None) + self._user = kwargs.get("user", "root") + self._password = kwargs.get("password", None) + self._key_file = kwargs.get("key_file", None) + + try: + self._port = int(kwargs.get("port", "22")) + + if 1 > self._port > 65535: + raise ValueError() + except ValueError: + raise SUTError("'port' must be an integer between 1-65535") + + try: + self._sudo = int(kwargs.get("sudo", 0)) == 1 + except ValueError: + raise SUTError("'sudo' must be 0 or 1") + + @property + def parallel_execution(self) -> bool: + return True + + @property + async def is_running(self) -> bool: + return self._conn is not None + + async def communicate(self, iobuffer: IOBuffer = None) -> None: + if await self.is_running: + raise SUTError("SUT is already running") + + self._conn = None + if self._key_file: + priv_key = asyncssh.read_private_key(self._key_file) + + self._conn = await asyncssh.connect( + host=self._host, + port=self._port, + username=self._user, + client_keys=[priv_key]) + else: + self._conn = await asyncssh.connect( + host=self._host, + port=self._port, + username=self._user, + password=self._password) + + # read maximum number of sessions and limit `run_command` + # concurrent calls to that by using a semaphore + ret = await self._conn.run( + r'sed -n "s/^MaxSessions\s*\([[:digit:]]*\)/\1/p" ' + '/etc/ssh/sshd_config') + + max_sessions = ret.stdout or 10 + + self._logger.info("Maximum SSH sessions: %d", max_sessions) + self._session_sem = asyncio.Semaphore(max_sessions) + + async def stop(self, iobuffer: IOBuffer = None) -> None: + if not await self.is_running: + return + + self._stop = True + try: + if self._procs: + self._logger.info("Killing %d process(es)", len(self._procs)) + + for proc in self._procs: + proc.kill() + await proc.wait() + + self._procs.clear() + + if self._downloader: + await self._downloader.close() + + self._logger.info("Closing connection") + self._conn.close() + self._logger.info("Connection closed") + + await self._reset(iobuffer=iobuffer) + finally: + self._stop = False + self._conn = None + + async def ping(self) -> float: + if not await self.is_running: + raise SUTError("SUT is not running") + + start_t = time.time() + + self._logger.info("Ping %s:%d", self._host, self._port) + + try: + await self._conn.run("test .", check=True) + except asyncssh.Error as err: + raise SUTError(err) + + end_t = time.time() - start_t + + self._logger.info("SUT replied after %.3f seconds", end_t) + + return end_t + + async def run_command( + self, + command: str, + iobuffer: IOBuffer = None) -> dict: + if not command: + raise ValueError("command is empty") + + if not await self.is_running: + raise SUTError("SSH connection is not present") + + async with self._session_sem: + cmd = self._create_command(command) + ret = None + proc = None + start_t = 0 + + try: + self._logger.info("Running command: %s", repr(command)) + + proc = await self._conn.create_process(cmd) + self._procs.append(proc) + + start_t = time.time() + panic = False + stdout = "" + + async for data in proc.stdout: + stdout += data + + if iobuffer: + await iobuffer.write(data) + + if "Kernel panic" in data: + panic = True + finally: + if proc: + self._procs.remove(proc) + + if proc.returncode is None: + proc.kill() + + ret = { + "command": command, + "returncode": proc.returncode, + "exec_time": time.time() - start_t, + "stdout": stdout + } + + if panic: + raise KernelPanicError() + + self._logger.info("Command executed") + self._logger.debug(ret) + + return ret + + async def fetch_file(self, target_path: str) -> bytes: + if not target_path: + raise ValueError("target path is empty") + + if not await self.is_running: + raise SUTError("SSH connection is not present") + + data = None + try: + ret = await self._conn.run( + f"cat {target_path}", + check=True, + encoding=None) + + data = ret.stdout + except asyncssh.Error as err: + if not self._stop: + raise SUTError(err) + + return data diff --git a/altp/sut.py b/altp/sut.py new file mode 100644 index 0000000..c81ee54 --- /dev/null +++ b/altp/sut.py @@ -0,0 +1,268 @@ +""" +.. module:: sut + :platform: Linux + :synopsis: sut definition + +.. moduleauthor:: Andrea Cervesato +""" +import re +import asyncio +from altp import LTPException + + +class SUTError(LTPException): + """ + Raised when an error occurs in SUT. + """ + + +class KernelPanicError(SUTError): + """ + Raised during kernel panic. + """ + + +class IOBuffer: + """ + IO stdout buffer. The API is similar to ``IO`` types. + """ + + async def write(self, data: str) -> None: + """ + Write data. + """ + raise NotImplementedError() + + +TAINED_MSG = [ + "proprietary module was loaded", + "module was force loaded", + "kernel running on an out of specification system", + "module was force unloaded", + "processor reported a Machine Check Exception (MCE)", + "bad page referenced or some unexpected page flags", + "taint requested by userspace application", + "kernel died recently, i.e. there was an OOPS or BUG", + "ACPI table overridden by user", + "kernel issued warning", + "staging driver was loaded", + "workaround for bug in platform firmware applied", + "externally-built (“out-of-tree”) module was loaded", + "unsigned module was loaded", + "soft lockup occurred", + "kernel has been live patched", + "auxiliary taint, defined for and used by distros", + "kernel was built with the struct randomization plugin" +] + + +class SUT: + """ + SUT abstraction class. It could be a remote host, a local host, a virtual + machine instance, etc. + """ + + def setup(self, **kwargs: dict) -> None: + """ + Initialize SUT using configuration dictionary. + :param kwargs: SUT configuration + :type kwargs: dict + """ + raise NotImplementedError() + + @property + def config_help(self) -> dict: + """ + Associate each configuration option with a help message. + This is used by the main menu application to generate --help message. + :returns: dict + """ + raise NotImplementedError() + + @property + def name(self) -> str: + """ + Name of the SUT. + """ + raise NotImplementedError() + + @property + def parallel_execution(self) -> bool: + """ + If True, SUT supports commands parallel execution. + """ + raise NotImplementedError() + + @property + async def is_running(self) -> bool: + """ + Return True if SUT is running. + """ + raise NotImplementedError() + + async def ping(self) -> float: + """ + If SUT is replying and it's available, ping will return time needed to + wait for SUT reply. + :returns: float + """ + raise NotImplementedError() + + async def communicate(self, iobuffer: IOBuffer = None) -> None: + """ + Start communicating with the SUT. + :param iobuffer: buffer used to write SUT stdout + :type iobuffer: IOBuffer + """ + raise NotImplementedError() + + async def stop(self, iobuffer: IOBuffer = None) -> None: + """ + Stop the current SUT session. + :param iobuffer: buffer used to write SUT stdout + :type iobuffer: IOBuffer + """ + raise NotImplementedError() + + async def run_command( + self, + command: str, + iobuffer: IOBuffer = None) -> dict: + """ + Coroutine to run command on target. + :param command: command to execute + :type command: str + :param iobuffer: buffer used to write SUT stdout + :type iobuffer: IOBuffer + :returns: dictionary containing command execution information + + { + "command": , + "returncode": , + "stdout": , + "exec_time": , + } + + If None is returned, then callback failed. + """ + raise NotImplementedError() + + async def fetch_file(self, target_path: str) -> bytes: + """ + Fetch file from target path and return data from target path. + :param target_path: path of the file to download from target + :type target_path: str + :returns: bytes contained in target_path + """ + raise NotImplementedError() + + async def ensure_communicate( + self, + iobuffer: IOBuffer = None, + retries: int = 10) -> None: + """ + Ensure that `communicate` is completed, retrying as many times we + want in case of `LTPException` error. After each `communicate` error + the SUT is stopped and a new communication is tried. + :param iobuffer: buffer used to write SUT stdout + :type iobuffer: IOBuffer + :param retries: number of times we retry communicating with SUT + :type retries: int + """ + retries = max(retries, 1) + + for retry in range(retries): + try: + await self.communicate(iobuffer=iobuffer) + break + except LTPException as err: + if retry >= retries - 1: + raise err + + await self.stop(iobuffer=iobuffer) + + async def get_info(self) -> dict: + """ + Return SUT information. + :returns: dict + + { + "distro": str, + "distro_ver": str, + "kernel": str, + "arch": str, + "cpu" : str, + "swap" : str, + "ram" : str, + } + + """ + # create suite results + async def _run_cmd(cmd: str) -> str: + """ + Run command, check for returncode and return command's stdout. + """ + ret = await self.run_command(cmd) + if ret["returncode"] != 0: + raise SUTError(f"Can't read information from SUT: {cmd}") + + stdout = ret["stdout"].rstrip() + + return stdout + + distro, \ + distro_ver, \ + kernel, \ + arch, \ + cpu, \ + meminfo = await asyncio.gather(*[ + _run_cmd(". /etc/os-release; echo \"$ID\""), + _run_cmd(". /etc/os-release; echo \"$VERSION_ID\""), + _run_cmd("uname -s -r -v"), + _run_cmd("uname -m"), + _run_cmd("uname -p"), + _run_cmd("cat /proc/meminfo") + ]) + + swap_m = re.search(r'SwapTotal:\s+(?P\d+\s+kB)', meminfo) + if not swap_m: + raise SUTError("Can't read swap information from /proc/meminfo") + + mem_m = re.search(r'MemTotal:\s+(?P\d+\s+kB)', meminfo) + if not mem_m: + raise SUTError("Can't read memory information from /proc/meminfo") + + ret = { + "distro": distro, + "distro_ver": distro_ver, + "kernel": kernel, + "arch": arch, + "cpu": cpu, + "swap": swap_m.group('swap'), + "ram": mem_m.group('memory') + } + + return ret + + async def get_tainted_info(self) -> tuple: + """ + Return information about kernel if tainted. + :returns: set(int, list[str]), + """ + ret = await self.run_command("cat /proc/sys/kernel/tainted") + if ret["returncode"] != 0: + raise SUTError("Can't read tainted kernel information") + + stdout = ret["stdout"].rstrip() + + tainted_num = len(TAINED_MSG) + code = int(stdout.rstrip()) + bits = format(code, f"0{tainted_num}b")[::-1] + + messages = [] + for i in range(0, tainted_num): + if bits[i] == "1": + msg = TAINED_MSG[i] + messages.append(msg) + + return code, messages diff --git a/altp/tempfile.py b/altp/tempfile.py new file mode 100644 index 0000000..f169f56 --- /dev/null +++ b/altp/tempfile.py @@ -0,0 +1,129 @@ +""" +.. module:: tempfile + :platform: Linux + :synopsis: module that contains LTP temporary files handling + +.. moduleauthor:: Andrea Cervesato +""" +import os +import pwd +import shutil +import pathlib +import tempfile + + +class TempDir: + """ + Temporary directory handler. + """ + SYMLINK_NAME = "latest" + FOLDER_PREFIX = "runltp." + + def __init__(self, root: str = None, max_rotate: int = 5) -> None: + """ + :param root: root directory (i.e. /tmp). If None, TempDir will handle + requests without adding any file or directory. + :type root: str | None + :param max_rotate: maximum number of temporary directories + :type max_rotate: int + """ + if root and not os.path.isdir(root): + raise ValueError(f"root folder doesn't exist: {root}") + + self._root = root + if root: + self._root = os.path.abspath(root) + + self._max_rotate = max(max_rotate, 0) + self._folder = self._rotate() + + def _rotate(self) -> str: + """ + Check for old folders and remove them, then create a new one and return + its full path. + """ + if not self._root: + return "" + + name = pwd.getpwuid(os.getuid()).pw_name + tmpbase = os.path.join(self._root, f"{self.FOLDER_PREFIX}{name}") + + os.makedirs(tmpbase, exist_ok=True) + + # delete the first max_rotate items + sorted_paths = sorted( + pathlib.Path(tmpbase).iterdir(), + key=os.path.getmtime) + + # don't consider latest symlink + num_paths = len(sorted_paths) - 1 + + if num_paths >= self._max_rotate: + max_items = num_paths - self._max_rotate + 1 + paths = sorted_paths[:max_items] + + for path in paths: + if path.name == self.SYMLINK_NAME: + continue + + shutil.rmtree(str(path.resolve())) + + # create a new folder + folder = tempfile.mkdtemp(dir=tmpbase) + + # create symlink to the latest temporary directory + latest = os.path.join(tmpbase, self.SYMLINK_NAME) + if os.path.islink(latest): + os.remove(latest) + + os.symlink( + folder, + os.path.join(tmpbase, self.SYMLINK_NAME), + target_is_directory=True) + + return folder + + @property + def root(self) -> str: + """ + The root folder. For example, if temporary folder is + "/tmp/runltp.pippo/tmpf547ftxv" the method will return "/tmp". + If root folder has not been given during object creation, this + method returns an empty string. + """ + return self._root if self._root else "" + + @property + def abspath(self) -> str: + """ + Absolute path of the temporary directory. + """ + return self._folder + + def mkdir(self, path: str) -> None: + """ + Create a directory inside temporary directory. + :param path: path of the directory + :type path: str + :returns: folder path. + """ + if not self._folder: + return + + dpath = os.path.join(self._folder, path) + os.mkdir(dpath) + + def mkfile(self, path: str, content: bytes) -> None: + """ + Create a file inside temporary directory. + :param path: path of the file + :type path: str + :param content: file content + :type content: str + """ + if not self._folder: + return + + fpath = os.path.join(self._folder, path) + with open(fpath, "w+", encoding="utf-8") as mypath: + mypath.write(content) diff --git a/altp/tests/__init__.py b/altp/tests/__init__.py new file mode 100644 index 0000000..e420087 --- /dev/null +++ b/altp/tests/__init__.py @@ -0,0 +1,12 @@ +""" +.. module:: __init__ + :platform: Linux + :synopsis: Entry point of the testing suite +.. moduleauthor:: Andrea Cervesato +""" + +import os +import sys + +# include runltp-ng library +sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) diff --git a/altp/tests/conftest.py b/altp/tests/conftest.py new file mode 100644 index 0000000..8f7f7e7 --- /dev/null +++ b/altp/tests/conftest.py @@ -0,0 +1,20 @@ +""" +Generic stuff for pytest. +""" +import altp +import pytest + + +@pytest.fixture(scope="session") +def event_loop(): + """ + Current event loop. Keep it in session scope, otherwise tests which + will use same coroutines will be associated to different event_loop. + In this way, pytest-asyncio plugin will work properly. + """ + loop = altp.get_event_loop() + + yield loop + + if not loop.is_closed(): + loop.close() diff --git a/altp/tests/sut.py b/altp/tests/sut.py new file mode 100644 index 0000000..4f74200 --- /dev/null +++ b/altp/tests/sut.py @@ -0,0 +1,252 @@ +""" +Test AsyncSUT implementations. +""" +import os +import time +import asyncio +import logging +import pytest +import altp +from altp.sut import IOBuffer +from altp.sut import SUTError + + +pytestmark = pytest.mark.asyncio + + +class Printer(IOBuffer): + """ + stdout printer. + """ + + def __init__(self) -> None: + self._logger = logging.getLogger("test.host") + + async def write(self, data: str) -> None: + print(data, end="") + + +@pytest.fixture +def sut(): + """ + Expose the SUT implementation via this fixture in order to test it. + """ + raise NotImplementedError() + + +class _TestSUT: + """ + Generic tests for SUT implementation. + """ + + _logger = logging.getLogger("test.asyncsut") + + def test_config_help(self, sut): + """ + Test if config_help has the right type. + """ + assert isinstance(sut.config_help, dict) + + async def test_ping_no_running(self, sut): + """ + Test ping method with no running sut. + """ + with pytest.raises(SUTError): + await sut.ping() + + async def test_ping(self, sut): + """ + Test ping method. + """ + await sut.communicate(iobuffer=Printer()) + ping_t = await sut.ping() + assert ping_t > 0 + + async def test_get_info(self, sut): + """ + Test get_info method. + """ + await sut.communicate(iobuffer=Printer()) + info = await sut.get_info() + + assert info["distro"] + assert info["distro_ver"] + assert info["kernel"] + assert info["arch"] + + async def test_get_tainted_info(self, sut): + """ + Test get_tainted_info. + """ + await sut.communicate(iobuffer=Printer()) + code, messages = await sut.get_tainted_info() + + assert code >= 0 + assert isinstance(messages, list) + + async def test_communicate(self, sut): + """ + Test communicate method. + """ + await sut.communicate(iobuffer=Printer()) + with pytest.raises(SUTError): + await sut.communicate(iobuffer=Printer()) + + async def test_ensure_communicate(self, sut): + """ + Test ensure_communicate method. + """ + await sut.ensure_communicate(iobuffer=Printer()) + with pytest.raises(SUTError): + await sut.ensure_communicate(iobuffer=Printer(), retries=1) + + @pytest.fixture + def sut_stop_sleep(self, request): + """ + Setup sleep time before calling stop after communicate. + By changing multiply factor it's possible to tweak stop sleep and + change the behaviour of `test_stop_communicate`. + """ + return request.param * 1.0 + + @pytest.mark.parametrize("sut_stop_sleep", [1, 2], indirect=True) + async def test_communicate_stop(self, sut, sut_stop_sleep): + """ + Test stop method when running communicate. + """ + async def stop(): + await asyncio.sleep(sut_stop_sleep) + await sut.stop(iobuffer=Printer()) + + await asyncio.gather(*[ + sut.communicate(iobuffer=Printer()), + stop() + ], return_exceptions=True) + + async def test_run_command(self, sut): + """ + Execute run_command once. + """ + await sut.communicate(iobuffer=Printer()) + res = await sut.run_command("echo 0") + + assert res["returncode"] == 0 + assert int(res["stdout"]) == 0 + assert 0 < res["exec_time"] < time.time() + + async def test_run_command_stop(self, sut): + """ + Execute run_command once, then call stop(). + """ + await sut.communicate(iobuffer=Printer()) + + async def stop(): + await asyncio.sleep(0.2) + await sut.stop(iobuffer=Printer()) + + async def test(): + res = await sut.run_command("sleep 2") + + assert res["returncode"] != 0 + assert 0 < res["exec_time"] < 2 + + await asyncio.gather(*[ + test(), + stop() + ]) + + async def test_run_command_parallel(self, sut): + """ + Execute run_command in parallel. + """ + if not sut.parallel_execution: + pytest.skip(reason="Parallel execution is not supported") + + await sut.communicate(iobuffer=Printer()) + + exec_count = os.cpu_count() + coros = [sut.run_command(f"echo {i}") + for i in range(exec_count)] + + results = await asyncio.gather(*coros) + + for data in results: + assert data["returncode"] == 0 + assert 0 <= int(data["stdout"]) < exec_count + assert 0 < data["exec_time"] < time.time() + + async def test_run_command_stop_parallel(self, sut): + """ + Execute multiple run_command in parallel, then call stop(). + """ + if not sut.parallel_execution: + pytest.skip(reason="Parallel execution is not supported") + + await sut.communicate(iobuffer=Printer()) + + async def stop(): + await asyncio.sleep(0.2) + await sut.stop(iobuffer=Printer()) + + async def test(): + exec_count = os.cpu_count() + coros = [sut.run_command("sleep 2") + for i in range(exec_count)] + results = await asyncio.gather(*coros, return_exceptions=True) + + for data in results: + if not isinstance(data, dict): + # we also have stop() return + continue + + assert data["returncode"] != 0 + assert 0 < data["exec_time"] < 2 + + await asyncio.gather(*[ + test(), + stop() + ]) + + async def test_fetch_file_bad_args(self, sut): + """ + Test fetch_file method with bad arguments. + """ + await sut.communicate(iobuffer=Printer()) + + with pytest.raises(ValueError): + await sut.fetch_file(None) + + with pytest.raises(SUTError): + await sut.fetch_file('this_file_doesnt_exist') + + async def test_fetch_file(self, sut): + """ + Test fetch_file method. + """ + await sut.communicate(iobuffer=Printer()) + + for i in range(0, 5): + myfile = f"/tmp/myfile{i}" + await sut.run_command(f"echo -n 'runltp-ng tests' > {myfile}") + data = await sut.fetch_file(myfile) + + assert data == b"runltp-ng tests" + + async def test_fetch_file_stop(self, sut): + """ + Test stop method when running fetch_file. + """ + target = "/tmp/target_file" + await sut.communicate(iobuffer=Printer()) + + async def fetch(): + await sut.run_command(f"truncate -s {1024*1024*1024} {target}"), + await sut.fetch_file(target) + + async def stop(): + await asyncio.sleep(2) + await sut.stop(iobuffer=Printer()) + + altp.create_task(fetch()) + + await stop() diff --git a/altp/tests/test_data.py b/altp/tests/test_data.py new file mode 100644 index 0000000..2f55096 --- /dev/null +++ b/altp/tests/test_data.py @@ -0,0 +1,79 @@ +""" +Unittests for data module. +""" +import asyncio +import pytest +import altp.data + + +pytestmark = pytest.mark.asyncio + + +async def test_read_runtest_error(): + """ + Test read_runtest method when raising errors. + """ + with pytest.raises(ValueError): + await altp.data.read_runtest("suite", None) + + with pytest.raises(ValueError): + await altp.data.read_runtest("suite", "test01") + + +async def test_read_runtest(): + """ + Test read_runtest method. + """ + tasks = [] + for i in range(100): + content = "# this is a test file\ntest01 test -f .\ntest02 test -d .\n" + tasks.append(altp.data.read_runtest(f"suite{i}", content)) + + suites = await asyncio.gather(*tasks, return_exceptions=True) + + for suite in suites: + assert suite.tests[0].name == "test01" + assert suite.tests[0].command == "test" + assert suite.tests[0].arguments == ['-f', '.'] + assert not suite.tests[0].parallelizable + + assert suite.tests[1].name == "test02" + assert suite.tests[1].command == "test" + assert suite.tests[1].arguments == ['-d', '.'] + assert not suite.tests[1].parallelizable + + +async def test_read_runtest_metadata_blacklist(): + """ + Test read_runtest method using metadata to blacklist some tests. + """ + tasks = [] + for param in altp.data.PARALLEL_BLACKLIST: + content = "# this is a test file\ntest01 test -f .\ntest02 test -d .\n" + metadata = { + "tests": { + "test01": { + param: "myvalue" + }, + "test02": {} + } + } + + tasks.append(altp.data.read_runtest( + "suite", + content, + metadata=metadata)) + + suites = await asyncio.gather(*tasks, return_exceptions=True) + + for suite in suites: + assert suite.name == "suite" + assert suite.tests[0].name == "test01" + assert suite.tests[0].command == "test" + assert suite.tests[0].arguments == ['-f', '.'] + assert not suite.tests[0].parallelizable + + assert suite.tests[1].name == "test02" + assert suite.tests[1].command == "test" + assert suite.tests[1].arguments == ['-d', '.'] + assert suite.tests[1].parallelizable diff --git a/altp/tests/test_events.py b/altp/tests/test_events.py new file mode 100644 index 0000000..b165867 --- /dev/null +++ b/altp/tests/test_events.py @@ -0,0 +1,121 @@ +""" +Unittest for events module. +""" +import asyncio +import pytest +import altp + + +pytestmark = pytest.mark.asyncio + + +def test_reset(): + """ + Test reset method. + """ + async def funct(): + pass + + altp.events.register("myevent", funct) + assert altp.events.is_registered("myevent") + + altp.events.reset() + assert not altp.events.is_registered("myevent") + + +def test_register_errors(): + """ + Test register method during errors. + """ + async def funct(): + pass + + with pytest.raises(ValueError): + altp.events.register(None, funct) + + with pytest.raises(ValueError): + altp.events.register("myevent", None) + + +def test_register(): + """ + Test register method. + """ + async def funct(): + pass + + altp.events.register("myevent", funct) + assert altp.events.is_registered("myevent") + + +def test_unregister_errors(): + """ + Test unregister method during errors. + """ + with pytest.raises(ValueError): + altp.events.unregister(None) + + +def test_unregister(): + """ + Test unregister method. + """ + async def funct(): + pass + + altp.events.register("myevent", funct) + assert altp.events.is_registered("myevent") + + altp.events.unregister("myevent") + assert not altp.events.is_registered("myevent") + + +async def test_fire_errors(): + """ + Test fire method during errors. + """ + with pytest.raises(ValueError): + await altp.events.fire(None, "prova") + + +async def test_fire(): + """ + Test fire method. + """ + times = 100 + called = [] + + async def diehard(error, name): + assert error is not None + assert name is not None + + async def tofire(param): + called.append(param) + + async def start(): + await altp.events.start() + + async def run(): + for i in range(times): + await altp.events.fire("myevent", i) + + while len(called) < times: + await asyncio.sleep(1e-3) + + await altp.events.stop() + + altp.events.register("myevent", tofire) + assert altp.events.is_registered("myevent") + + altp.events.register("internal_error", diehard) + assert altp.events.is_registered("internal_error") + + altp.create_task(start()) + await run() + + while len(called) < times: + asyncio.sleep(1e-3) + + called.sort() + for i in range(times): + assert called[i] == i diff --git a/altp/tests/test_export.py b/altp/tests/test_export.py new file mode 100644 index 0000000..a13a25e --- /dev/null +++ b/altp/tests/test_export.py @@ -0,0 +1,190 @@ +""" +Unit tests for Exporter implementations. +""" +import json +import asyncio +import pytest +from altp.data import Test +from altp.data import Suite +from altp.results import SuiteResults, TestResults +from altp.export import JSONExporter + + +pytestmark = pytest.mark.asyncio + + +class TestJSONExporter: + """ + Test JSONExporter class implementation. + """ + + async def test_save_file_bad_args(self): + """ + Test save_file method with bad arguments. + """ + exporter = JSONExporter() + + with pytest.raises(ValueError): + await exporter.save_file(list(), "") + + with pytest.raises(ValueError): + await exporter.save_file(None, "") + + with pytest.raises(ValueError): + await exporter.save_file([0, 1], None) + + async def test_save_file(self, tmpdir): + """ + Test save_file method. + """ + # create suite/test metadata objects + tests = [ + Test("ls0", "ls", ""), + Test("ls1", "ls", "-l"), + Test("ls2", "ls", "--error") + ] + suite0 = Suite("ls_suite0", tests) + + # create results objects + tests_res = [ + TestResults( + test=tests[0], + failed=0, + passed=1, + broken=0, + skipped=0, + warnings=0, + exec_time=1, + retcode=0, + stdout="folder\nfile.txt" + ), + TestResults( + test=tests[1], + failed=0, + passed=1, + broken=0, + skipped=0, + warnings=0, + exec_time=1, + retcode=0, + stdout="folder\nfile.txt" + ), + TestResults( + test=tests[2], + failed=1, + passed=0, + broken=0, + skipped=0, + warnings=0, + exec_time=1, + retcode=1, + stdout="" + ), + ] + + suite_res = [ + SuiteResults( + suite=suite0, + tests=tests_res, + distro="openSUSE-Leap", + distro_ver="15.3", + kernel="5.17", + arch="x86_64", + cpu="x86_64", + swap="10 kB", + ram="1000 kB", + exec_time=3), + ] + + exporter = JSONExporter() + tasks = [] + + for i in range(100): + output = tmpdir / f"output{i}.json" + tasks.append(exporter.save_file(suite_res, str(output))) + + await asyncio.gather(*tasks, return_exceptions=True) + + for i in range(100): + data = None + + output = tmpdir / f"output{i}.json" + with open(str(output), 'r') as json_data: + data = json.load(json_data) + + assert len(data["results"]) == 3 + assert data["results"][0] == { + "test": { + "command": "ls", + "arguments": "", + "failed": 0, + "passed": 1, + "broken": 0, + "skipped": 0, + "warnings": 0, + "duration": 1, + "result": "pass", + "log": "folder\nfile.txt", + "retval": [ + "0" + ], + }, + "status": "pass", + "test_fqn": "ls0", + } + assert data["results"][1] == { + "test": { + "command": "ls", + "arguments": "-l", + "failed": 0, + "passed": 1, + "broken": 0, + "skipped": 0, + "warnings": 0, + "duration": 1, + "result": "pass", + "log": "folder\nfile.txt", + "retval": [ + "0" + ], + }, + "status": "pass", + "test_fqn": "ls1", + } + assert data["results"][2] == { + "test": { + "command": "ls", + "arguments": "--error", + "failed": 1, + "passed": 0, + "broken": 0, + "skipped": 0, + "warnings": 0, + "duration": 1, + "result": "fail", + "log": "", + "retval": [ + "1" + ], + }, + "status": "fail", + "test_fqn": "ls2", + } + + assert data["environment"] == { + "distribution_version": "15.3", + "distribution": "openSUSE-Leap", + "kernel": "5.17", + "arch": "x86_64", + "cpu": "x86_64", + "swap": "10 kB", + "RAM": "1000 kB", + } + assert data["stats"] == { + "runtime": 3, + "passed": 2, + "failed": 1, + "broken": 0, + "skipped": 0, + "warnings": 0, + } diff --git a/altp/tests/test_host.py b/altp/tests/test_host.py new file mode 100644 index 0000000..1eef65a --- /dev/null +++ b/altp/tests/test_host.py @@ -0,0 +1,67 @@ +""" +Unittests for host SUT implementations. +""" +import pytest +from altp.host import HostSUT +from altp.tests.sut import _TestSUT +from altp.tests.sut import Printer + + +pytestmark = pytest.mark.asyncio + + +@pytest.fixture +async def sut(): + sut = HostSUT() + sut.setup() + + yield sut + + if await sut.is_running: + await sut.stop() + + +class TestHostSUT(_TestSUT): + """ + Test HostSUT implementation. + """ + + @pytest.fixture + def sut_stop_sleep(self, request): + """ + Host SUT test doesn't require time sleep in `test_stop_communicate`. + """ + return request.param * 0 + + async def test_cwd(self, tmpdir): + """ + Test CWD constructor argument. + """ + myfile = tmpdir / "myfile" + myfile.write("runltp-ng tests") + + sut = HostSUT() + sut.setup(cwd=str(tmpdir)) + await sut.communicate(iobuffer=Printer()) + + ret = await sut.run_command("cat myfile", iobuffer=Printer()) + assert ret["returncode"] == 0 + assert ret["stdout"] == "runltp-ng tests" + + async def test_env(self, tmpdir): + """ + Test ENV constructor argument. + """ + myfile = tmpdir / "myfile" + myfile.write("runltp-ng tests") + + sut = HostSUT() + sut.setup(cwd=str(tmpdir), env=dict(FILE=str(myfile))) + await sut.communicate(iobuffer=Printer()) + + ret = await sut.run_command("cat $FILE", iobuffer=Printer()) + assert ret["returncode"] == 0 + assert ret["stdout"] == "runltp-ng tests" + + async def test_fetch_file_stop(self): + pytest.skip(reason="Coroutines don't support I/O file handling") diff --git a/altp/tests/test_ltx.py b/altp/tests/test_ltx.py new file mode 100644 index 0000000..8569e43 --- /dev/null +++ b/altp/tests/test_ltx.py @@ -0,0 +1,331 @@ +""" +Unittests for ltx module. +""" +import os +import time +import signal +import subprocess +import pytest +import altp.ltx as ltx +from altp.ltx import LTXSUT +from altp.tests.sut import _TestSUT + +pytestmark = pytest.mark.asyncio + +TEST_LTX_BINARY = os.environ.get("TEST_LTX_BINARY", None) + + +@pytest.mark.ltx +@pytest.mark.skipif( + not TEST_LTX_BINARY or not os.path.isfile(TEST_LTX_BINARY), + reason="TEST_LTX_BINARY doesn't exist") +class TestLTX: + """ + Unittest for LTX class. + """ + + @pytest.fixture(scope="session") + async def handle(self): + """ + LTX session handler. + """ + with subprocess.Popen( + TEST_LTX_BINARY, + bufsize=0, + stdout=subprocess.PIPE, + stdin=subprocess.PIPE) as proc: + async with ltx.Session( + proc.stdin.fileno(), + proc.stdout.fileno()) as handle: + yield handle + + async def test_version(self, handle): + """ + Test version request. + """ + req = ltx.version() + replies = await handle.gather([req], timeout=1) + assert replies[req][0] == "0.1" + + async def test_ping(self, handle): + """ + Test ping request. + """ + start_t = time.monotonic() + req = ltx.ping() + replies = await handle.gather([req], timeout=1) + assert start_t < replies[req][0] * 1e-9 < time.monotonic() + + async def test_ping_flood(self, handle): + """ + Test multiple ping request in a row. + """ + times = 100 + requests = [] + for _ in range(times): + requests.append(ltx.ping()) + + start_t = time.monotonic() + replies = await handle.gather(requests, timeout=10) + end_t = time.monotonic() + + for reply in replies: + assert start_t < replies[reply][0] * 1e-9 < end_t + + async def test_execute(self, handle): + """ + Test execute request. + """ + stdout = [] + + def _stdout_callback(data): + stdout.append(data) + + start_t = time.monotonic() + req = ltx.execute(0, "uname", stdout_callback=_stdout_callback) + replies = await handle.gather([req], timeout=3) + reply = replies[req] + + assert ''.join(stdout) == "Linux\n" + assert reply[0] == "Linux\n" + assert start_t < reply[1] * 1e-9 < time.monotonic() + assert reply[2] == 1 + assert reply[3] == 0 + + async def test_execute_builtin(self, handle): + """ + Test execute request with builtin command. + """ + stdout = [] + + def _stdout_callback(data): + stdout.append(data) + + start_t = time.monotonic() + req = ltx.execute(0, "echo -n ciao", stdout_callback=_stdout_callback) + replies = await handle.gather([req], timeout=3) + reply = replies[req] + + assert ''.join(stdout) == "ciao" + assert reply[0] == "ciao" + assert start_t < reply[1] * 1e-9 < time.monotonic() + assert reply[2] == 1 + assert reply[3] == 0 + + async def test_execute_multiple(self, handle): + """ + Test multiple execute request in a row. + """ + times = os.cpu_count() + stdout = [] + + def _stdout_callback(data): + stdout.append(data) + + start_t = time.monotonic() + req = [] + for slot in range(times): + req.append(ltx.execute(slot, "echo -n ciao", + stdout_callback=_stdout_callback)) + + replies = await handle.gather(req, timeout=3) + end_t = time.monotonic() + + for reply in replies.values(): + assert reply[0] == "ciao" + assert start_t < reply[1] * 1e-9 < end_t + assert reply[2] == 1 + assert reply[3] == 0 + + for data in stdout: + assert data == "ciao" + + async def test_set_file(self, handle, tmp_path): + """ + Test set_file request. + """ + data = b'AaXa\x00\x01\x02Zz' * 1024 + pfile = tmp_path / 'file.bin' + + req = ltx.set_file(str(pfile), data) + await handle.gather([req], timeout=5) + + assert pfile.read_bytes() == data + + async def test_get_file(self, handle, tmp_path): + """ + Test get_file request. + """ + pfile = tmp_path / 'file.bin' + pfile.write_bytes(b'AaXa\x00\x01\x02Zz' * 1024) + + req = ltx.get_file(str(pfile)) + replies = await handle.gather([req], timeout=5) + + assert pfile.read_bytes() == replies[req][0] + + async def test_kill(self, handle): + """ + Test kill method. + """ + start_t = time.monotonic() + exec_req = ltx.execute(0, "sleep 1") + kill_req = ltx.kill(0) + replies = await handle.gather([exec_req, kill_req], timeout=3) + reply = replies[exec_req] + + assert reply[0] == "" + assert start_t < reply[1] * 1e-9 < time.monotonic() + assert reply[2] == 2 + assert reply[3] == signal.SIGKILL + + async def test_env(self, handle): + """ + Test env request. + """ + start_t = time.monotonic() + env_req = ltx.env(0, "LTPROOT", "/opt/ltp") + exec_req = ltx.execute(0, "echo -n $LTPROOT") + replies = await handle.gather([env_req, exec_req], timeout=3) + reply = replies[exec_req] + + assert reply[0] == "/opt/ltp" + assert start_t < reply[1] * 1e-9 < time.monotonic() + assert reply[2] == 1 + assert reply[3] == 0 + + async def test_env_multiple(self, handle): + """ + Test env request. + """ + start_t = time.monotonic() + env_req = ltx.env(128, "LTPROOT", "/opt/ltp") + exec_req = ltx.execute(0, "echo -n $LTPROOT") + replies = await handle.gather([env_req, exec_req], timeout=3) + reply = replies[exec_req] + + assert reply[0] == "/opt/ltp" + assert start_t < reply[1] * 1e-9 < time.monotonic() + assert reply[2] == 1 + assert reply[3] == 0 + + async def test_cwd(self, handle, tmpdir): + """ + Test cwd request. + """ + path = str(tmpdir) + + start_t = time.monotonic() + env_req = ltx.cwd(0, path) + exec_req = ltx.execute(0, "echo -n $PWD") + replies = await handle.gather([env_req, exec_req], timeout=3) + reply = replies[exec_req] + + assert reply[0] == path + assert start_t < reply[1] * 1e-9 < time.monotonic() + assert reply[2] == 1 + assert reply[3] == 0 + + async def test_cwd_multiple(self, handle, tmpdir): + """ + Test cwd request on multiple slots. + """ + path = str(tmpdir) + + start_t = time.monotonic() + env_req = ltx.cwd(128, path) + exec_req = ltx.execute(0, "echo -n $PWD") + replies = await handle.gather([env_req, exec_req], timeout=3) + reply = replies[exec_req] + + assert reply[0] == path + assert start_t < reply[1] * 1e-9 < time.monotonic() + assert reply[2] == 1 + assert reply[3] == 0 + + async def test_all_together(self, handle, tmp_path): + """ + Test all requests together. + """ + data = b'AaXa\x00\x01\x02Zz' * 1024 + pfile = tmp_path / 'file.bin' + + requests = [] + requests.append(ltx.version()) + requests.append(ltx.set_file(str(pfile), data)) + requests.append(ltx.ping()) + requests.append(ltx.env(0, "LTPROOT", "/opt/ltp")) + requests.append(ltx.execute(0, "sleep 5")) + requests.append(ltx.kill(0)) + requests.append(ltx.get_file(str(pfile))) + + await handle.gather(requests, timeout=10) + + +@pytest.mark.ltx +@pytest.mark.skipif( + not TEST_LTX_BINARY or not os.path.isfile(TEST_LTX_BINARY), + reason="TEST_LTX_BINARY doesn't exist") +class TestLTXSUT(_TestSUT): + """ + Test HostSUT implementation. + """ + + @pytest.fixture + async def sut(self, tmpdir): + """ + LTXSUT instance object. + """ + stdin_path = str(tmpdir / 'transport.in') + stdout_path = str(tmpdir / 'transport.out') + + os.mkfifo(stdin_path) + os.mkfifo(stdout_path) + + stdin = os.open(stdin_path, os.O_RDONLY | os.O_NONBLOCK) + stdout = os.open(stdout_path, os.O_RDWR) + + proc = subprocess.Popen( + TEST_LTX_BINARY, + stdin=stdin, + stdout=stdout, + stderr=stdout, + bufsize=0, + shell=True) + + sut = LTXSUT() + sut.setup( + cwd=str(tmpdir), + env=dict(HELLO="WORLD"), + stdin=stdin_path, + stdout=stdout_path) + + yield sut + + if await sut.is_running: + await sut.stop() + + proc.kill() + + async def test_cwd(self, sut, tmpdir): + """ + Test CWD constructor argument. + """ + await sut.communicate() + + ret = await sut.run_command("echo -n $PWD") + assert ret["returncode"] == 0 + assert ret["stdout"] == str(tmpdir) + + async def test_env(self, sut): + """ + Test ENV constructor argument. + """ + await sut.communicate() + + ret = await sut.run_command("echo -n $HELLO") + assert ret["returncode"] == 0 + assert ret["stdout"] == "WORLD" + + async def test_fetch_file_stop(self): + pytest.skip(reason="LTX doesn't support stop for GET_FILE") diff --git a/altp/tests/test_main.py b/altp/tests/test_main.py new file mode 100644 index 0000000..638f56f --- /dev/null +++ b/altp/tests/test_main.py @@ -0,0 +1,394 @@ +""" +Unittests for main module. +""" +import os +import pwd +import time +import json +import pytest +import altp.main + + +class TestMain: + """ + The the main module entry point. + """ + # number of tests created inside temporary folder + TESTS_NUM = 6 + + @pytest.fixture(autouse=True) + def prepare_tmpdir(self, tmpdir): + """ + Prepare the temporary directory adding runtest folder. + """ + # create simple testing suites + content = "" + for i in range(self.TESTS_NUM): + content += f"test0{i} echo ciao\n" + + tmpdir.mkdir("testcases").mkdir("bin") + runtest = tmpdir.mkdir("runtest") + + for i in range(3): + suite = runtest / f"suite{i}" + suite.write(content) + + # create a suite that is executing slower than the others + content = "" + for i in range(self.TESTS_NUM, self.TESTS_NUM * 2): + content += f"test0{i} sleep 0.05\n" + + suite = runtest / f"slow_suite" + suite.write(content) + + # enable parallelization for 'slow_suite' + tests = {} + for index in range(self.TESTS_NUM, self.TESTS_NUM * 2): + name = f"test0{index}" + tests[name] = {} + + metadata_d = {"tests": tests} + metadata = tmpdir.mkdir("metadata") / "ltp.json" + metadata.write(json.dumps(metadata_d)) + + # create a suite printing environment variables + suite = runtest / f"env_suite" + suite.write("test_env echo -n $VAR0:$VAR1:$VAR2") + + def read_report(self, temp, tests_num) -> dict: + """ + Check if report file contains the given number of tests. + """ + name = pwd.getpwuid(os.getuid()).pw_name + report = str(temp / f"runltp.{name}" / "latest" / "results.json") + assert os.path.isfile(report) + + # read report and check if all suite's tests have been executed + report_d = None + with open(report, 'r') as report_f: + report_d = json.loads(report_f.read()) + + assert len(report_d["results"]) == tests_num + + return report_d + + def test_sut_plugins(self, tmpdir): + """ + Test if SUT implementations are correctly loaded. + """ + suts = [] + suts.append(tmpdir / "sutA.py") + suts.append(tmpdir / "sutB.py") + suts.append(tmpdir / "sutC.txt") + + for index in range(0, len(suts)): + suts[index].write( + "from altp.sut import SUT\n\n" + f"class SUT{index}(SUT):\n" + " @property\n" + " def name(self) -> str:\n" + f" return 'mysut{index}'\n" + ) + + altp.main._discover_sut(str(tmpdir)) + + assert len(altp.main.LOADED_SUT) == 2 + + for index in range(0, len(altp.main.LOADED_SUT)): + assert altp.main.LOADED_SUT[index].name == f"mysut{index}" + + def test_wrong_options(self): + """ + Test wrong options. + """ + cmd_args = [ + "--run-command1234", "ls" + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == 2 + + def test_run_command(self, tmpdir): + """ + Test --run-command option. + """ + temp = tmpdir.mkdir("temp") + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-command", "ls" + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_OK + + def test_run_command_timeout(self, tmpdir): + """ + Test --run-command option with timeout. + """ + temp = tmpdir.mkdir("temp") + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-command", "ls", + "--exec-timeout", "0" + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_ERROR + + def test_run_suite(self, tmpdir): + """ + Test --run-suite option. + """ + temp = tmpdir.mkdir("temp") + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-suite", "suite0", "suite1", "suite2" + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_OK + + self.read_report(temp, self.TESTS_NUM * 3) + + def test_run_suite_timeout(self, tmpdir): + """ + Test --run-suite option with timeout. + """ + temp = tmpdir.mkdir("temp") + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-suite", "suite0", + "--suite-timeout", "0" + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_OK + + report_d = self.read_report(temp, self.TESTS_NUM) + for param in report_d["results"]: + assert param["test"]["passed"] == 0 + assert param["test"]["failed"] == 0 + assert param["test"]["broken"] == 0 + assert param["test"]["warnings"] == 0 + assert param["test"]["skipped"] == 1 + + def test_run_suite_verbose(self, tmpdir, capsys): + """ + Test --run-suite option with --verbose. + """ + temp = tmpdir.mkdir("temp") + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-suite", "suite0", + "--verbose", + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_OK + + captured = capsys.readouterr() + assert "ciao\n" in captured.out + + @pytest.mark.xfail(reason="This test passes if run alone. capsys bug?") + def test_run_suite_no_colors(self, tmpdir, capsys): + """ + Test --run-suite option with --no-colors. + """ + temp = tmpdir.mkdir("temp") + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-suite", "suite0", + "--no-colors", + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_OK + + out, _ = capsys.readouterr() + assert "test00: pass" in out + + def test_json_report(self, tmpdir): + """ + Test --json-report option. + """ + temp = tmpdir.mkdir("temp") + report = str(tmpdir / "report.json") + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-suite", "suite1", + "--json-report", report + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_OK + assert os.path.isfile(report) + + report_a = self.read_report(temp, self.TESTS_NUM) + report_b = None + with open(report, 'r') as report_f: + report_b = json.loads(report_f.read()) + + assert report_a == report_b + + def test_skip_tests(self, tmpdir): + """ + Test --skip-tests option. + """ + temp = tmpdir.mkdir("temp") + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-suite", "suite0", "suite2", + "--skip-tests", "test0[01]" + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_OK + + self.read_report(temp, (self.TESTS_NUM - 2) * 2) + + def test_skip_file(self, tmpdir): + """ + Test --skip-file option. + """ + skipfile = tmpdir / "skipfile" + skipfile.write("test01\ntest02") + + temp = tmpdir.mkdir("temp") + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-suite", "suite0", "suite2", + "--skip-file", str(skipfile) + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_OK + + self.read_report(temp, (self.TESTS_NUM - 2) * 2) + + def test_skip_tests_and_file(self, tmpdir): + """ + Test --skip-file option with --skip-tests. + """ + skipfile = tmpdir / "skipfile" + skipfile.write("test02\ntest03") + + temp = tmpdir.mkdir("temp") + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-suite", "suite0", "suite2", + "--skip-tests", "test0[01]", + "--skip-file", str(skipfile) + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_OK + + self.read_report(temp, (self.TESTS_NUM - 4) * 2) + + def test_workers(self, tmpdir): + """ + Test --workers option. + """ + temp = tmpdir.mkdir("temp") + + # run on single worker + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-suite", "slow_suite", + "--workers", "1", + ] + + first_t = 0 + start_t = time.time() + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + first_t = time.time() - start_t + + assert excinfo.value.code == altp.main.RC_OK + self.read_report(temp, self.TESTS_NUM) + + # run on multiple workers + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-suite", "slow_suite", + "--workers", str(os.cpu_count()), + ] + + second_t = 0 + start_t = time.time() + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + second_t = time.time() - start_t + + assert excinfo.value.code == altp.main.RC_OK + self.read_report(temp, self.TESTS_NUM) + + assert second_t < first_t + + def test_sut_help(self): + """ + Test "--sut help" command and check if SUT class(es) are loaded. + """ + cmd_args = [ + "--sut", "help" + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_OK + assert len(altp.main.LOADED_SUT) > 0 + + def test_env(self, tmpdir): + """ + Test --env option. + """ + temp = tmpdir.mkdir("temp") + cmd_args = [ + "--ltp-dir", str(tmpdir), + "--tmp-dir", str(temp), + "--run-suite", "env_suite", + "--env", "VAR0=0:VAR1=1:VAR2=2" + ] + + with pytest.raises(SystemExit) as excinfo: + altp.main.run(cmd_args=cmd_args) + + assert excinfo.value.code == altp.main.RC_OK + + report_d = self.read_report(temp, 1) + assert report_d["results"][0]["test"]["log"] == "0:1:2" diff --git a/altp/tests/test_qemu.py b/altp/tests/test_qemu.py new file mode 100644 index 0000000..204cc59 --- /dev/null +++ b/altp/tests/test_qemu.py @@ -0,0 +1,88 @@ +""" +Test SUT implementations. +""" +import os +import pytest +from altp.qemu import QemuSUT +from altp.sut import KernelPanicError +from altp.tests.sut import _TestSUT +from altp.tests.sut import Printer + +pytestmark = pytest.mark.asyncio + +TEST_QEMU_IMAGE = os.environ.get("TEST_QEMU_IMAGE", None) +TEST_QEMU_PASSWORD = os.environ.get("TEST_QEMU_PASSWORD", None) + + +@pytest.mark.qemu +@pytest.mark.skipif( + TEST_QEMU_IMAGE is None, + reason="TEST_QEMU_IMAGE is not defined") +@pytest.mark.skipif( + TEST_QEMU_PASSWORD is None, + reason="TEST_QEMU_PASSWORD is not defined") +class _TestQemuSUT(_TestSUT): + """ + Test Qemu SUT implementation. + """ + + async def test_kernel_panic(self, sut): + """ + Test kernel panic recognition. + """ + iobuff = Printer() + + await sut.communicate(iobuffer=iobuff) + await sut.run_command( + "echo 'Kernel panic\nThis is a generic message' > /tmp/panic.txt", + iobuffer=iobuff) + + with pytest.raises(KernelPanicError): + await sut.run_command( + "cat /tmp/panic.txt", + iobuffer=iobuff) + + async def test_fetch_file_stop(self): + pytest.skip(reason="Coroutines don't support I/O file handling") + + +class TestQemuSUTISA(_TestQemuSUT): + """ + Test QemuSUT implementation. + """ + + @pytest.fixture + async def sut(self, tmpdir): + iobuff = Printer() + + runner = QemuSUT() + runner.setup( + tmpdir=str(tmpdir), + image=TEST_QEMU_IMAGE, + password=TEST_QEMU_PASSWORD, + serial="isa") + + yield runner + + if await runner.is_running: + await runner.stop(iobuffer=iobuff) + + +class TestQemuSUTVirtIO(_TestQemuSUT): + """ + Test QemuSUT implementation. + """ + + @pytest.fixture + async def sut(self, tmpdir): + runner = QemuSUT() + runner.setup( + tmpdir=str(tmpdir), + image=TEST_QEMU_IMAGE, + password=TEST_QEMU_PASSWORD, + serial="virtio") + + yield runner + + if await runner.is_running: + await runner.stop() diff --git a/altp/tests/test_scheduler.py b/altp/tests/test_scheduler.py new file mode 100644 index 0000000..3021802 --- /dev/null +++ b/altp/tests/test_scheduler.py @@ -0,0 +1,669 @@ +""" +Unittests for runner module. +""" +import re +import time +import asyncio +import pytest +import altp.data +from altp.host import HostSUT +from altp.scheduler import TestScheduler +from altp.scheduler import SuiteScheduler +from altp.scheduler import KernelTainedError +from altp.scheduler import KernelTimeoutError +from altp.scheduler import KernelPanicError + +pytestmark = pytest.mark.asyncio + + +class MockHostSUT(HostSUT): + """ + HostSUT mock. + """ + + async def get_info(self) -> dict: + return { + "distro": "openSUSE", + "distro_ver": "15.3", + "kernel": "5.10", + "arch": "x86_64", + "cpu": "x86_64", + "swap": "0", + "ram": "1M", + } + + async def get_tainted_info(self) -> tuple: + return 0, [""] + + +class MockTestScheduler(TestScheduler): + """ + TestScheduler mock that is not checking for tainted kernel + and it doesn't write into /dev/kmsg + """ + + async def _write_kmsg(self, test) -> None: + pass + + +class MockSuiteScheduler(SuiteScheduler): + """ + SuiteScheduler mock that traces SUT reboots. + """ + + def __init__(self, **kwargs: dict) -> None: + super().__init__(**kwargs) + self._scheduler = MockTestScheduler( + sut=kwargs.get("sut", None), + timeout=kwargs.get("exec_timeout", 3600), + max_workers=kwargs.get("max_workers", 1) + ) + self._rebooted = 0 + + async def _restart_sut(self) -> None: + self._logger.info("Rebooting the SUT") + + await self._scheduler.stop() + await self._sut.stop() + await self._sut.communicate() + + self._rebooted += 1 + + @property + def rebooted(self) -> int: + return self._rebooted + + +@pytest.fixture +async def sut(): + """ + SUT object. + """ + obj = MockHostSUT() + obj.setup() + await obj.communicate() + yield obj + await obj.stop() + + +def make_parallelizable(suite): + """ + Make an entire suite parallel. + """ + for test in suite.tests: + test._parallelizable = True + + +class TestTestScheduler: + """ + Tests for TestScheduler. + """ + + @pytest.fixture + async def create_runner(self, sut): + def _callback( + timeout: float = 3600.0, + max_workers: int = 1) -> TestScheduler: + obj = MockTestScheduler( + sut=sut, + timeout=timeout, + max_workers=max_workers) + + return obj + + yield _callback + + async def test_schedule(self, create_runner): + """ + Test the schedule method. + """ + tests_num = 10 + content = "" + for i in range(tests_num): + content += f"test{i} sleep 0.02; echo ciao\n" + + suite = await altp.data.read_runtest("suite", content) + make_parallelizable(suite) + + runner = create_runner(max_workers=1) + + # single worker + start = time.time() + await runner.schedule(suite.tests) + end_single = time.time() - start + + assert len(runner.results) == tests_num + + # check completed tests + matcher = re.compile(r"test(?P\d+)") + numbers = list(range(tests_num)) + + for res in runner.results: + assert res.passed == 1 + assert res.failed == 0 + assert res.broken == 0 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 1 + assert res.return_code == 0 + assert res.stdout == "ciao\n" + + match = matcher.search(res.test.name) + assert match is not None + + number = int(match.group("number")) + numbers.remove(number) + + assert len(numbers) == 0 + + # multiple workers + runner = create_runner(max_workers=tests_num) + + start = time.time() + await runner.schedule(suite.tests) + end_multi = time.time() - start + + assert len(runner.results) == tests_num + assert end_multi < end_single + + async def test_schedule_stop(self, create_runner): + """ + Test the schedule method when stop is called. + """ + tests_num = 10 + content = "test0 echo ciao\n" + for i in range(1, tests_num): + content += f"test{i} sleep 1\n" + + suite = await altp.data.read_runtest("suite", content) + make_parallelizable(suite) + + runner = create_runner(max_workers=tests_num) + + async def stop(): + await asyncio.sleep(0.5) + await runner.stop() + + await asyncio.gather(*[ + runner.schedule(suite.tests), + stop() + ]) + + assert len(runner.results) == 1 + res = runner.results[0] + + assert res.test.name == "test0" + assert res.passed == 1 + assert res.failed == 0 + assert res.broken == 0 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 1 + assert res.return_code == 0 + assert res.stdout == "ciao\n" + + async def test_schedule_kernel_tainted(self, create_runner): + """ + Test the schedule method when kernel is tainted. + """ + tainted = [] + + async def mock_tainted(): + if tainted: + tainted.clear() + return 1, ["proprietary module was loaded"] + + tainted.append(1) + return 0, [""] + + runner = create_runner(max_workers=1) + runner._get_tainted_status = mock_tainted + + content = "" + for i in range(2): + content += f"test{i} echo ciao\n" + + suite = await altp.data.read_runtest("suite", content) + make_parallelizable(suite) + + with pytest.raises(KernelTainedError): + await runner.schedule(suite.tests) + + assert len(runner.results) == 1 + res = runner.results[0] + + assert res.test.name == "test0" + assert res.passed == 1 + assert res.failed == 0 + assert res.broken == 0 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 1 + assert res.return_code == 0 + assert res.stdout == "ciao\n" + + async def test_schedule_kernel_panic(self, create_runner): + """ + Test the schedule method on kernel panic. It runs some tests in + parallel then it generates a Kernel panic, it verifies that only one + test has been executed and it failed. + """ + content = "test0 echo Kernel panic\n" + content += "test1 echo ciao; sleep 3\n" + content += "test2 echo ciao; sleep 3\n" + content += "test3 echo ciao; sleep 3\n" + + suite = await altp.data.read_runtest("suite", content) + make_parallelizable(suite) + + runner = create_runner(max_workers=10) + + with pytest.raises(KernelPanicError): + await runner.schedule(suite.tests) + + assert len(runner.results) == 1 + res = runner.results[0] + + assert res.test.name == "test0" + assert res.passed == 0 + assert res.failed == 0 + assert res.broken == 1 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 1 + assert res.return_code == -1 + assert res.stdout == "Kernel panic\n" + + async def test_schedule_kernel_timeout(self, sut, create_runner): + """ + Test the schedule method on kernel timeout. + """ + async def kernel_timeout(command, iobuffer=None) -> dict: + raise asyncio.TimeoutError() + + sut.run_command = kernel_timeout + + content = "" + for i in range(2): + content += f"test{i} echo ciao\n" + + suite = await altp.data.read_runtest("suite", content) + make_parallelizable(suite) + + runner = create_runner(max_workers=1) + + with pytest.raises(KernelTimeoutError): + await runner.schedule(suite.tests) + + assert len(runner.results) == 1 + res = runner.results[0] + + assert res.passed == 0 + assert res.failed == 0 + assert res.broken == 1 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 1 + assert res.return_code == -1 + assert res.stdout == "" + + async def test_schedule_test_timeout(self, create_runner): + """ + Test the schedule method on test timeout. + """ + content = "test0 echo ciao; sleep 2\n" + content += "test1 echo ciao\n" + + suite = await altp.data.read_runtest("suite", content) + make_parallelizable(suite) + + runner = create_runner(timeout=0.5, max_workers=2) + + await runner.schedule(suite.tests) + + assert len(runner.results) == 2 + + assert runner.results[0].test.name == "test1" + assert runner.results[0].passed == 1 + assert runner.results[0].failed == 0 + assert runner.results[0].broken == 0 + assert runner.results[0].skipped == 0 + assert runner.results[0].warnings == 0 + assert 0 < runner.results[0].exec_time < 1 + assert runner.results[0].return_code == 0 + assert runner.results[0].stdout == "ciao\n" + + assert runner.results[1].test.name == "test0" + assert runner.results[1].passed == 0 + assert runner.results[1].failed == 0 + assert runner.results[1].broken == 1 + assert runner.results[1].skipped == 0 + assert runner.results[1].warnings == 0 + assert 0 < runner.results[1].exec_time < 2 + assert runner.results[1].return_code == -1 + assert runner.results[1].stdout == "ciao\n" + + +class TestSuiteScheduler: + """ + Tests for SuiteScheduler. + """ + + @pytest.fixture + async def create_runner(self, sut): + def _callback( + suite_timeout: float = 3600.0, + exec_timeout: float = 3600.0, + max_workers: int = 1) -> SuiteScheduler: + obj = MockSuiteScheduler( + sut=sut, + suite_timeout=suite_timeout, + exec_timeout=exec_timeout, + max_workers=max_workers) + + return obj + + yield _callback + + async def test_schedule(self, create_runner): + """ + Test the schedule method. + """ + tests_num = 10 + content = "" + for i in range(tests_num): + content += f"test{i} sleep 0.02; echo ciao\n" + + suite = await altp.data.read_runtest("suite", content) + make_parallelizable(suite) + + # single worker + runner = create_runner(max_workers=1) + + start = time.time() + await runner.schedule([suite]) + end_single = time.time() - start + + assert len(runner.results) == 1 + + assert runner.results[0].suite.name == "suite" + assert runner.results[0].distro is not None + assert runner.results[0].distro_ver is not None + assert runner.results[0].kernel is not None + assert runner.results[0].arch is not None + assert runner.results[0].cpu is not None + assert runner.results[0].swap is not None + assert runner.results[0].ram is not None + assert runner.results[0].passed == 10 + assert runner.results[0].failed == 0 + assert runner.results[0].broken == 0 + assert runner.results[0].skipped == 0 + assert runner.results[0].warnings == 0 + assert 0 < runner.results[0].exec_time < 10 + + # check completed tests + matcher = re.compile(r"test(?P\d+)") + numbers = list(range(tests_num)) + + for res in runner.results[0].tests_results: + assert res.passed == 1 + assert res.failed == 0 + assert res.broken == 0 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 1 + assert res.return_code == 0 + assert res.stdout == "ciao\n" + + match = matcher.search(res.test.name) + assert match is not None + + number = int(match.group("number")) + numbers.remove(number) + + assert len(numbers) == 0 + + # multiple workers + runner = create_runner(max_workers=tests_num) + + start = time.time() + await runner.schedule([suite]) + end_multi = time.time() - start + + assert len(runner.results) == 1 + assert end_multi < end_single + + async def test_schedule_stop(self, create_runner): + """ + Test the schedule method when stop is called. + """ + tests_num = 10 + content = "test0 echo ciao\n" + for i in range(1, tests_num): + content += f"test{i} sleep 1\n" + + suite = await altp.data.read_runtest("suite", content) + make_parallelizable(suite) + + runner = create_runner(max_workers=tests_num) + + async def stop(): + await asyncio.sleep(0.5) + await runner.stop() + + await asyncio.gather(*[ + runner.schedule([suite]), + stop() + ]) + + assert len(runner.results) == 1 + suite_res = runner.results[0] + + assert len(suite_res.tests_results) == 1 + res = suite_res.tests_results[0] + + assert res.test.name == "test0" + assert res.passed == 1 + assert res.failed == 0 + assert res.broken == 0 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 1 + assert res.return_code == 0 + assert res.stdout == "ciao\n" + + async def test_schedule_kernel_tainted(self, sut, create_runner): + """ + Test the schedule method when kernel is tainted. + """ + tainted = [] + + async def mock_tainted(): + if tainted: + tainted.clear() + return 1, ["proprietary module was loaded"] + + tainted.append(1) + return 0, [] + + tests_num = 4 + content = "" + for i in range(tests_num): + content += f"test{i} echo ciao\n" + + suite = await altp.data.read_runtest("suite", content) + make_parallelizable(suite) + + sut.get_tainted_info = mock_tainted + runner = create_runner(max_workers=1) + + await runner.schedule([suite]) + + assert runner.rebooted == tests_num + assert len(runner.results) == 1 + assert len(runner.results[0].tests_results) == tests_num + + # check completed tests + matcher = re.compile(r"test(?P\d+)") + numbers = list(range(tests_num)) + + for res in runner.results[0].tests_results: + assert res.passed == 1 + assert res.failed == 0 + assert res.broken == 0 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 1 + assert res.return_code == 0 + assert res.stdout == "ciao\n" + + match = matcher.search(res.test.name) + assert match is not None + + number = int(match.group("number")) + numbers.remove(number) + + assert len(numbers) == 0 + + @pytest.mark.parametrize("max_workers", [1, 10]) + async def test_schedule_kernel_panic(self, create_runner, max_workers): + """ + Test the schedule method on kernel panic. + """ + tests_num = 3 + + content = "test0 echo Kernel panic\n" + content += "test1 echo ciao; sleep 0.3\n" + for i in range(2, tests_num): + content += f"test{i} echo ciao; sleep 0.3\n" + + suite = await altp.data.read_runtest("suite", content) + runner = create_runner(max_workers=max_workers) + + await runner.schedule([suite]) + make_parallelizable(suite) + + assert runner.rebooted == 1 + assert len(runner.results) == 1 + assert len(runner.results[0].tests_results) == tests_num + + res = runner.results[0].tests_results[0] + assert res.passed == 0 + assert res.failed == 0 + assert res.broken == 1 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 1 + assert res.return_code == -1 + assert res.stdout == "Kernel panic\n" + + # check completed tests + matcher = re.compile(r"test(?P\d+)") + numbers = list(range(1, tests_num)) + + for res in runner.results[0].tests_results[1:]: + assert res.passed == 1 + assert res.failed == 0 + assert res.broken == 0 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 2 + assert res.return_code == 0 + assert res.stdout == "ciao\n" + + match = matcher.search(res.test.name) + assert match is not None + + number = int(match.group("number")) + numbers.remove(number) + + assert len(numbers) == 0 + + @pytest.mark.parametrize("max_workers", [1, 10]) + async def test_schedule_kernel_timeout( + self, + sut, + create_runner, + max_workers): + """ + Test the schedule method on kernel timeout. + """ + async def kernel_timeout(command, iobuffer=None) -> dict: + raise asyncio.TimeoutError() + + sut.run_command = kernel_timeout + + content = "" + for i in range(max_workers): + content += f"test{i} echo ciao\n" + + suite = await altp.data.read_runtest("suite", content) + make_parallelizable(suite) + + runner = create_runner(max_workers=max_workers) + + await runner.schedule([suite]) + + assert runner.rebooted == 1 + assert len(runner.results) == 1 + assert len(runner.results[0].tests_results) == max_workers + + # check completed tests + matcher = re.compile(r"test(?P\d+)") + numbers = list(range(max_workers)) + + for res in runner.results[0].tests_results: + assert res.passed == 0 + assert res.failed == 0 + assert res.broken == 1 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 1 + assert res.return_code == -1 + assert res.stdout == "" + + match = matcher.search(res.test.name) + assert match is not None + + number = int(match.group("number")) + numbers.remove(number) + + assert len(numbers) == 0 + + @pytest.mark.parametrize("max_workers", [1, 10]) + async def test_schedule_suite_timeout(self, create_runner, max_workers): + """ + Test the schedule method on suite timeout. + """ + content = "test0 echo ciao\n" + content += "test1 echo ciao; sleep 2\n" + + suite = await altp.data.read_runtest("suite", content) + make_parallelizable(suite) + + runner = create_runner(suite_timeout=0.5, max_workers=max_workers) + + await runner.schedule([suite]) + + assert len(runner.results) == 1 + res = runner.results[0].tests_results[0] + assert res.test.name == "test0" + assert res.passed == 1 + assert res.failed == 0 + assert res.broken == 0 + assert res.skipped == 0 + assert res.warnings == 0 + assert 0 < res.exec_time < 1 + assert res.return_code == 0 + assert res.stdout == "ciao\n" + + res = runner.results[0].tests_results[1] + assert res.test.name == "test1" + assert res.passed == 0 + assert res.failed == 0 + assert res.broken == 0 + assert res.skipped == 1 + assert res.warnings == 0 + assert res.exec_time == 0 + assert res.return_code == 32 + assert res.stdout == "" diff --git a/altp/tests/test_session.py b/altp/tests/test_session.py new file mode 100644 index 0000000..bc3f4ea --- /dev/null +++ b/altp/tests/test_session.py @@ -0,0 +1,223 @@ +""" +Unittests for the session module. +""" +import os +import json +import stat +import asyncio +import pytest +from altp.host import HostSUT +from altp.session import Session + + +pytestmark = pytest.mark.asyncio + +# number of tests created inside temporary folder +TESTS_NUM = 9 + + +@pytest.fixture(autouse=True) +def prepare_tmpdir(tmpdir): + """ + Prepare the temporary directory adding runtest folder. + """ + content = "" + for i in range(TESTS_NUM): + content += f"test0{i} echo ciao\n" + + tmpdir.mkdir("testcases").mkdir("bin") + runtest = tmpdir.mkdir("runtest") + + for i in range(3): + suite = runtest / f"suite{i}" + suite.write(content) + + +@pytest.fixture +async def sut_config(): + """ + SUT Configuration. + """ + yield {} + + +@pytest.fixture +async def sut(sut_config): + """ + SUT communication object. + """ + obj = HostSUT() + obj.setup(*sut_config) + await obj.communicate() + yield obj + await obj.stop() + + +class TestSession: + """ + Test for Session class. + """ + + @pytest.fixture + async def session(self, tmpdir, sut): + """ + Session communication object. + """ + session = Session( + tmpdir=str(tmpdir), + ltpdir=str(tmpdir), + sut=sut) + + yield session + + await asyncio.wait_for(session.stop(), timeout=30) + + async def test_run(self, tmpdir, session): + """ + Test run method when executing suites. + """ + await session.run(suites=["suite0", "suite1", "suite2"]) + + for i in range(3): + assert os.path.isfile(str(tmpdir / "runtest" / f"suite{i}")) + + async def test_run_skip_tests(self, tmpdir, sut): + """ + Test run method when executing suites. + """ + report = str(tmpdir / "report.json") + session = Session( + tmpdir=str(tmpdir), + ltpdir=str(tmpdir), + sut=sut, + skip_tests="test0[01]|test0[45]" + ) + + try: + await session.run(suites=["suite0"], report_path=report) + finally: + await asyncio.wait_for(session.stop(), timeout=30) + + assert os.path.isfile(report) + + report_data = None + with open(report, "r") as report_file: + report_data = json.loads(report_file.read()) + + assert len(report_data["results"]) == TESTS_NUM - 4 + + async def test_run_with_report(self, tmpdir, session): + """ + Test run method when generating report file. + """ + report = str(tmpdir / "report.json") + await session.run(suites=["suite0"], report_path=report) + + assert os.path.isfile(report) + + report_data = None + with open(report, "r") as report_file: + report_data = json.loads(report_file.read()) + + assert len(report_data["results"]) == TESTS_NUM + + async def test_run_stop(self, tmpdir, session): + """ + Test stop method during run. + """ + suite = tmpdir / "runtest" / "suite0" + + content = "test0 echo ciao\n" + content += "test1 echo ciao\n" + content += "test2 sleep 1; echo ciao\n" + suite.write(content) + + async def stop(): + await asyncio.sleep(0.2) + await session.stop() + + report = str(tmpdir / "report.json") + await asyncio.gather(*[ + session.run(suites=["suite0"], report_path=report), + stop(), + ]) + + assert os.path.isfile(report) + + report_data = None + with open(report, "r") as report_file: + report_data = json.loads(report_file.read()) + + assert len(report_data["results"]) == 2 + + async def test_run_command(self, sut, session): + """ + Test run method when running a single command. + """ + temp_file = "/tmp/file" + + await session.run(command=f"touch {temp_file}") + + await sut.ensure_communicate() + ret = await sut.run_command(f"test {temp_file}") + + assert ret["returncode"] == 0 + + async def test_run_command_stop(self, tmpdir, sut): + """ + Test stop when runnig a command. + """ + session = Session( + tmpdir=str(tmpdir), + ltpdir=str(tmpdir), + sut=sut) + + async def stop(): + await asyncio.sleep(0.2) + await asyncio.wait_for(session.stop(), timeout=30) + + await asyncio.gather(*[ + session.run(command="sleep 1"), + stop() + ]) + + async def test_env(self, tmpdir, sut): + """ + Test environment variables injected in the SUT by session object. + """ + # create runtest file + suite = tmpdir / "runtest" / "envsuite" + suite.write("test script.sh") + + # create test script + script_sh = tmpdir / "testcases" / "bin" / "script.sh" + script_sh.write("#!/bin/sh\necho -n $VAR0:$VAR1") + + st = os.stat(str(script_sh)) + os.chmod(str(script_sh), st.st_mode | stat.S_IEXEC) + + # run session with environment variables and save report + report_path = tmpdir / "report.json" + session = Session( + tmpdir=str(tmpdir), + ltpdir=str(tmpdir), + sut=sut, + env=dict(VAR0="0", VAR1="1") + ) + + try: + await session.run( + report_path=report_path, + suites=["envsuite"]) + finally: + await asyncio.wait_for(session.stop(), timeout=30) + + assert os.path.isfile(report_path) + + # read report and check if all tests have been executed + report_d = None + with open(report_path, 'r') as report_f: + report_d = json.loads(report_f.read()) + + assert len(report_d["results"]) == 1 + assert report_d["results"][0]["test"]["log"] == "0:1" diff --git a/altp/tests/test_ssh.py b/altp/tests/test_ssh.py new file mode 100644 index 0000000..5fb1cf4 --- /dev/null +++ b/altp/tests/test_ssh.py @@ -0,0 +1,164 @@ +""" +Unittests for ssh module. +""" +import os +import asyncio +import pytest +from altp.sut import IOBuffer +from altp.sut import KernelPanicError +from altp.ssh import SSHSUT +from altp.tests.sut import _TestSUT + +pytestmark = pytest.mark.asyncio + + +TEST_SSH_USERNAME = os.environ.get("TEST_SSH_USERNAME", None) +TEST_SSH_PASSWORD = os.environ.get("TEST_SSH_PASSWORD", None) +TEST_SSH_KEY_FILE = os.environ.get("TEST_SSH_KEY_FILE", None) + + +@pytest.mark.ssh +class _TestSSHSUT(_TestSUT): + """ + Test SSHSUT implementation using username/password. + """ + + @pytest.fixture + def config(self): + """ + Base configuration to connect to SUT. + """ + raise NotImplementedError() + + @pytest.fixture + async def sut(self, config): + sut = SSHSUT() + sut.setup(**config) + + yield sut + + if await sut.is_running: + await sut.stop() + + async def test_cwd(self, config): + """ + Test CWD constructor argument. + """ + kwargs = dict(cwd="/etc") + kwargs.update(config) + + sut = SSHSUT() + sut.setup(**kwargs) + await sut.communicate() + + ret = await sut.run_command("test -f fstab") + assert ret["returncode"] == 0 + + async def test_env(self, config): + """ + Test ENV constructor argument. + """ + kwargs = dict(env=dict(BOOOOOH="myfile")) + kwargs.update(config) + + sut = SSHSUT() + sut.setup(**kwargs) + await sut.communicate() + + ret = await sut.run_command("echo -n $BOOOOOH") + assert ret["returncode"] == 0 + assert ret["stdout"] == "myfile" + + async def test_reset_command(self, config): + """ + Test reset_command option. + """ + kwargs = dict(reset_cmd="echo ciao") + kwargs.update(config) + + sut = SSHSUT() + sut.setup(**kwargs) + await sut.communicate() + + class MyBuffer(IOBuffer): + data = "" + + async def write(self, data: str) -> None: + self.data = data + # wait for data inside the buffer + await asyncio.sleep(0.1) + + buffer = MyBuffer() + await sut.stop(iobuffer=buffer) + + assert buffer.data == 'ciao\n' + + @pytest.mark.parametrize("enable", ["0", "1"]) + async def test_sudo(self, config, enable): + """ + Test sudo parameter. + """ + kwargs = dict(sudo=enable) + kwargs.update(config) + + sut = SSHSUT() + sut.setup(**kwargs) + await sut.communicate() + ret = await sut.run_command("whoami") + + if enable == "1": + assert ret["stdout"] == "root\n" + else: + assert ret["stdout"] != "root\n" + + async def test_kernel_panic(self, sut): + """ + Test kernel panic recognition. + """ + await sut.communicate() + + with pytest.raises(KernelPanicError): + await sut.run_command( + "echo 'Kernel panic\nThis is a generic message'") + + +@pytest.mark.skipif( + TEST_SSH_USERNAME is None, + reason="TEST_SSH_USERNAME is not defined") +@pytest.mark.skipif( + TEST_SSH_PASSWORD is None, + reason="TEST_SSH_PASSWORD is not defined") +class TestSSHSUTPassword(_TestSSHSUT): + """ + Test SSHSUT implementation using username/password. + """ + + @pytest.fixture + def config(self, tmpdir): + return dict( + tmpdir=str(tmpdir), + host="localhost", + port=22, + user=TEST_SSH_USERNAME, + password=TEST_SSH_PASSWORD) + + +@pytest.mark.skipif( + TEST_SSH_USERNAME is None, + reason="TEST_SSH_USERNAME is not defined") +@pytest.mark.skipif( + TEST_SSH_KEY_FILE is None, + reason="TEST_SSH_KEY_FILE is not defined") +class TestSSHSUTKeyfile(_TestSSHSUT): + """ + Test SSHSUT implementation using username/password. + """ + + @pytest.fixture + def config(self, tmpdir): + return dict( + tmpdir=str(tmpdir), + host="localhost", + port=22, + user=TEST_SSH_USERNAME, + key_file=TEST_SSH_KEY_FILE) diff --git a/altp/tests/test_tempfile.py b/altp/tests/test_tempfile.py new file mode 100644 index 0000000..30854f1 --- /dev/null +++ b/altp/tests/test_tempfile.py @@ -0,0 +1,116 @@ +""" +Unittest for temporary module. +""" +import os +import pytest +from altp.tempfile import TempDir + + +class TestTempDir: + """ + Test the TempDir class implementation. + """ + + def test_constructor(self): + """ + Test TempDir constructor. + """ + with pytest.raises(ValueError): + TempDir(root="this_folder_doesnt_exist") + + # for some reasons, following test fails on systems which are slow + # to release directories after remove (in particular remote containers) + # even after os.sync or time.sleep. So we XFAIL this test by default + @pytest.mark.xfail + def test_rotate(self, tmpdir): + """ + Test folders rotation. + """ + max_rotate = 5 + plus_rotate = 5 + + currdir = str(tmpdir) + + tempdir = None + for _ in range(0, max_rotate + plus_rotate): + tempdir = TempDir(currdir, max_rotate=max_rotate) + + assert tempdir.abspath is not None + assert tempdir.abspath == os.readlink( + os.path.join(tempdir.abspath, "..", tempdir.SYMLINK_NAME)) + + os.sync() + + total = 0 + for _, dirs, _ in os.walk(os.path.join(tempdir.abspath, "..")): + for mydir in dirs: + if mydir != "latest": + total += 1 + + assert total == max_rotate + + def test_rotate_empty_root(self): + """ + Test folders rotation with empty root. + """ + tempdir = TempDir(None) + assert not os.path.isdir(tempdir.abspath) + + def test_mkdir(self, tmpdir): + """ + Test mkdir method. + """ + tempdir = TempDir(str(tmpdir)) + tempdir.mkdir("myfolder") + assert os.path.isdir(os.path.join(tempdir.abspath, "myfolder")) + + for i in range(0, 10): + tempdir.mkdir(f"myfolder/{i}") + assert os.path.isdir(os.path.join( + tempdir.abspath, f"myfolder/{i}")) + + def test_mkdir_no_root(self): + """ + Test mkdir method without root. + """ + tempdir = TempDir(None) + tempdir.mkdir("myfolder") + assert not os.path.isdir(os.path.join(tempdir.abspath, "myfolder")) + + def test_mkfile(self, tmpdir): + """ + Test mkfile method. + """ + content = "runltp-ng stuff" + tempdir = TempDir(str(tmpdir)) + + for i in range(0, 10): + tempdir.mkfile(f"myfile{i}", content) + + pos = os.path.join(tempdir.abspath, f"myfile{i}") + assert os.path.isfile(pos) + assert open(pos, "r").read() == "runltp-ng stuff" + + def test_mkfile_no_root(self): + """ + Test mkfile method without root. + """ + content = "runltp-ng stuff" + tempdir = TempDir(None) + + tempdir.mkfile("myfile", content) + assert not os.path.isfile(os.path.join(tempdir.abspath, "myfile")) + + def test_mkdir_mkfile(self, tmpdir): + """ + Test mkfile after mkdir. + """ + content = "runltp-ng stuff" + tempdir = TempDir(str(tmpdir)) + + tempdir.mkdir("mydir") + tempdir.mkfile("mydir/myfile", content) + + pos = os.path.join(tempdir.abspath, "mydir", "myfile") + assert os.path.isfile(pos) + assert open(pos, "r").read() == "runltp-ng stuff" diff --git a/altp/ui.py b/altp/ui.py new file mode 100644 index 0000000..5146226 --- /dev/null +++ b/altp/ui.py @@ -0,0 +1,388 @@ +""" +.. module:: ui + :platform: Linux + :synopsis: module that contains user interface + +.. moduleauthor:: Andrea Cervesato +""" +import platform +import traceback +import altp +import altp.events +from altp.data import Test +from altp.data import Suite +from altp.results import TestResults +from altp.results import SuiteResults + +# pylint: disable=missing-function-docstring +# pylint: disable=unused-argument + + +class ConsoleUserInterface: + """ + Console based user interface. + """ + + GREEN = "\033[1;32m" + YELLOW = "\033[1;33m" + RED = "\033[1;31m" + CYAN = "\033[1;36m" + RESET_COLOR = "\033[0m" + RESET_SCREEN = "\033[2J" + + def __init__(self, no_colors: bool = False) -> None: + self._no_colors = no_colors + self._line = "" + + altp.events.register("session_started", self.session_started) + altp.events.register("session_stopped", self.session_stopped) + altp.events.register("sut_start", self.sut_start) + altp.events.register("sut_stop", self.sut_stop) + altp.events.register("sut_restart", self.sut_restart) + altp.events.register("run_cmd_start", self.run_cmd_start) + altp.events.register("run_cmd_stdout", self.run_cmd_stdout) + altp.events.register("run_cmd_stop", self.run_cmd_stop) + altp.events.register("suite_download_started", + self.suite_download_started) + altp.events.register("suite_started", self.suite_started) + altp.events.register("suite_completed", self.suite_completed) + altp.events.register("session_error", self.session_error) + altp.events.register("internal_error", self.internal_error) + + def _print(self, msg: str, color: str = None, end: str = "\n"): + """ + Print a message. + """ + msg = msg.replace(self.RESET_SCREEN, '') + msg = msg.replace('\r', '') + + if color and not self._no_colors: + print(f"{color}{msg}{self.RESET_COLOR}", end=end, flush=True) + else: + print(msg, end=end, flush=True) + + @staticmethod + def _user_friendly_duration(duration: float) -> str: + """ + Return a user-friendly duration time from seconds. + For example, "3670.234" becomes "1h 0m 10s". + """ + minutes, seconds = divmod(duration, 60) + hours, minutes = divmod(minutes, 60) + uf_time = "" + + if hours > 0: + uf_time = f"{hours:.0f}h {minutes:.0f}m {seconds:.0f}s" + elif minutes > 0: + uf_time = f"{minutes:.0f}m {seconds:.0f}s" + else: + uf_time = f"{seconds:.3f}s" + + return uf_time + + async def session_started(self, tmpdir: str) -> None: + uname = platform.uname() + message = "Host information\n\n" + message += f"\tSystem: {uname.system}\n" + message += f"\tNode: {uname.node}\n" + message += f"\tKernel Release: {uname.release}\n" + message += f"\tKernel Version: {uname.version}\n" + message += f"\tMachine Architecture: {uname.machine}\n" + message += f"\tProcessor: {uname.processor}\n" + message += f"\n\tTemporary directory: {tmpdir}\n" + + self._print(message) + + async def session_stopped(self) -> None: + self._print("Session stopped") + + async def sut_start(self, sut: str) -> None: + self._print(f"Connecting to SUT: {sut}") + + async def sut_stop(self, sut: str) -> None: + self._print(f"\nDisconnecting from SUT: {sut}") + + async def sut_restart(self, sut: str) -> None: + self._print(f"Restarting SUT: {sut}") + + async def run_cmd_start(self, cmd: str) -> None: + self._print(f"{cmd}", color=self.CYAN) + + async def run_cmd_stdout(self, data: str) -> None: + self._print(data) + + async def run_cmd_stop( + self, + command: str, + stdout: str, + returncode: int) -> None: + self._print(f"\nExit code: {returncode}\n") + + async def suite_download_started( + self, + name: str, + target: str) -> None: + self._print(f"Downloading suite: {name}") + + async def suite_started(self, suite: Suite) -> None: + self._print(f"Starting suite: {suite.name}") + + async def suite_completed(self, results: SuiteResults) -> None: + duration = self._user_friendly_duration(results.exec_time) + + message = "\n" + message += f"Suite Name: {results.suite.name}" + " " * 32 + "\n" + message += f"Total Run: {len(results.suite.tests)}\n" + message += f"Elapsed Time: {duration}\n" + message += f"Passed Tests: {results.passed}\n" + message += f"Failed Tests: {results.failed}\n" + message += f"Skipped Tests: {results.skipped}\n" + message += f"Broken Tests: {results.broken}\n" + message += f"Warnings: {results.warnings}\n" + message += f"Kernel Version: {results.kernel}\n" + message += f"CPU: {results.cpu}\n" + message += f"Machine Architecture: {results.arch}\n" + message += f"RAM: {results.ram}\n" + message += f"Swap memory: {results.swap}\n" + message += f"Distro: {results.distro}\n" + message += f"Distro Version: {results.distro_ver}\n" + + self._print(message) + + async def suite_timeout(self, suite: Suite, timeout: float) -> None: + self._print( + f"Suite '{suite.name}' timed out after {timeout} seconds", + color=self.RED) + + async def session_error(self, error: str) -> None: + self._print(f"Error: {error}", color=self.RED) + + async def internal_error(self, exc: BaseException, func_name: str) -> None: + self._print( + f"\nUI error in function '{func_name}': {exc}\n", + color=self.RED) + + traceback.print_exc() + + +class SimpleUserInterface(ConsoleUserInterface): + """ + Console based user interface without many fancy stuff. + """ + + def __init__(self, no_colors: bool = False) -> None: + super().__init__(no_colors=no_colors) + + self._sut_not_responding = False + self._kernel_panic = False + self._kernel_tainted = None + self._timed_out = False + + altp.events.register("sut_not_responding", self.sut_not_responding) + altp.events.register("kernel_panic", self.kernel_panic) + altp.events.register("kernel_tainted", self.kernel_tainted) + altp.events.register("test_timed_out", self.test_timed_out) + altp.events.register("test_started", self.test_started) + altp.events.register("test_completed", self.test_completed) + + async def sut_not_responding(self) -> None: + self._sut_not_responding = True + # this message will replace ok/fail message + self._print("SUT not responding", color=self.RED) + + async def kernel_panic(self) -> None: + self._kernel_panic = True + # this message will replace ok/fail message + self._print("kernel panic", color=self.RED) + + async def kernel_tainted(self, message: str) -> None: + self._kernel_tainted = message + + async def test_timed_out(self, _: Test, timeout: int) -> None: + self._timed_out = True + # this message will replace ok/fail message + self._print("timed out", color=self.RED) + + async def test_started(self, test: Test) -> None: + self._print(f"{test.name}: ", end="") + + async def test_completed(self, results: TestResults) -> None: + if self._timed_out or self._sut_not_responding or self._kernel_panic: + self._sut_not_responding = False + self._kernel_panic = False + self._timed_out = False + return + + msg = "pass" + col = self.GREEN + + if results.failed > 0: + msg = "fail" + col = self.RED + elif results.skipped > 0: + msg = "skip" + col = self.YELLOW + elif results.broken > 0: + msg = "broken" + col = self.CYAN + + self._print(msg, color=col, end="") + + if self._kernel_tainted: + self._print(" | ", end="") + self._print("tainted", color=self.YELLOW, end="") + self._kernel_tainted = None + + uf_time = self._user_friendly_duration(results.exec_time) + self._print(f" ({uf_time})") + + +class VerboseUserInterface(ConsoleUserInterface): + """ + Verbose console based user interface. + """ + + def __init__(self, no_colors: bool = False) -> None: + super().__init__(no_colors=no_colors) + + self._timed_out = False + + altp.events.register("sut_stdout", self.sut_stdout) + altp.events.register("kernel_tainted", self.kernel_tainted) + altp.events.register("test_timed_out", self.test_timed_out) + altp.events.register("test_started", self.test_started) + altp.events.register("test_completed", self.test_completed) + altp.events.register("test_stdout", self.test_stdout) + + async def sut_stdout(self, _: str, data: str) -> None: + self._print(data, end='') + + async def kernel_tainted(self, message: str) -> None: + self._print(f"Tained kernel: {message}", color=self.YELLOW) + + async def test_timed_out(self, _: Test, timeout: int) -> None: + self._timed_out = True + + async def test_started(self, test: Test) -> None: + self._print("\n===== ", end="") + self._print(test.name, color=self.CYAN, end="") + self._print(" =====") + self._print("command: ", end="") + self._print(f"{test.command} {' '.join(test.arguments)}") + + async def test_completed(self, results: TestResults) -> None: + if self._timed_out: + self._print("Test timed out", color=self.RED) + + self._timed_out = False + + if "Summary:" not in results.stdout: + self._print("\nSummary:") + self._print(f"passed {results.passed}") + self._print(f"failed {results.failed}") + self._print(f"broken {results.broken}") + self._print(f"skipped {results.skipped}") + self._print(f"warnings {results.warnings}") + + uf_time = self._user_friendly_duration(results.exec_time) + self._print(f"\nDuration: {uf_time}\n") + + async def test_stdout(self, _: Test, data: str) -> None: + self._print(data, end='') + + +class ParallelUserInterface(ConsoleUserInterface): + """ + Console based user interface for parallel execution of the tests. + """ + LINE_UP = '\033[1A' + + def __init__(self, no_colors: bool = False) -> None: + super().__init__(no_colors=no_colors) + + self._sut_not_responding = False + self._kernel_panic = False + self._kernel_tainted = None + self._timed_out = False + self._running = [] + + altp.events.register("sut_not_responding", self.sut_not_responding) + altp.events.register("kernel_panic", self.kernel_panic) + altp.events.register("kernel_tainted", self.kernel_tainted) + altp.events.register("test_timed_out", self.test_timed_out) + altp.events.register("test_started", self.test_started) + altp.events.register("test_completed", self.test_completed) + + def _refresh_running_tests(self) -> None: + tests_num = len(self._running) + + self._print(" " * 64, end='\r') + self._print("") + self._print(f"*** {tests_num} background test(s) ***") + + for test_name in self._running: + self._print(f"- {test_name}") + + # move back at the very beginning so the next time + # we will override current running tests status + for _ in range(tests_num + 2): + self._print(self.LINE_UP, end='') + + async def sut_not_responding(self) -> None: + self._sut_not_responding = True + + async def kernel_panic(self) -> None: + self._kernel_panic = True + + async def kernel_tainted(self, message: str) -> None: + self._kernel_tainted = message + + async def test_timed_out(self, _: Test, timeout: int) -> None: + self._timed_out = True + + async def test_started(self, test: Test) -> None: + self._running.append(test.name) + self._refresh_running_tests() + + async def test_completed(self, results: TestResults) -> None: + self._print(f"{results.test.name}: ", end="") + + if self._timed_out: + self._print("timed out", color=self.RED) + elif self._sut_not_responding: + # this message will replace ok/fail message + self._print("SUT not responding", color=self.RED) + elif self._kernel_panic: + # this message will replace ok/fail message + self._print("kernel panic", color=self.RED) + else: + msg = "pass" + col = self.GREEN + + if results.failed > 0: + msg = "fail" + col = self.RED + elif results.skipped > 0: + msg = "skip" + col = self.YELLOW + elif results.broken > 0: + msg = "broken" + col = self.CYAN + + self._print(msg, color=col, end="") + + if self._kernel_tainted: + self._print(" | ", end="") + self._print("tainted", color=self.YELLOW, end="") + + uf_time = self._user_friendly_duration(results.exec_time) + self._print(f" ({uf_time})", end='') + self._print(" " * 16) # cleanup message that was there before + + self._sut_not_responding = False + self._kernel_panic = False + self._kernel_tainted = None + self._timed_out = False + + self._running.remove(results.test.name) + self._refresh_running_tests() diff --git a/pytest.ini b/pytest.ini index 9584ac8..0d6b53e 100644 --- a/pytest.ini +++ b/pytest.ini @@ -3,10 +3,12 @@ [pytest] ; default pytest parameters addopts = -v -W ignore::DeprecationWarning -W ignore::pytest.PytestCollectionWarning -testpaths = ltp/tests +testpaths = ltp/tests altp/tests +asyncio_mode = auto ; logging options log_cli = true log_level = DEBUG markers = ssh: marks tests using ssh (deselect with '-m "not ssh"') - qemu: marks tests using qemu (deselect with '-m "not qemu"') \ No newline at end of file + qemu: marks tests using qemu (deselect with '-m "not qemu"') + ltx: marks tests using ltx (deselect with '-m "not ltx"') \ No newline at end of file diff --git a/runltp-ng b/runltp-ng index e8f3d69..befcad4 100755 --- a/runltp-ng +++ b/runltp-ng @@ -11,6 +11,8 @@ import os import sys +ASYNC_RUN = os.environ.get("ASYNC_RUN", None) + # include ltp library sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) sys.path.insert(0, os.path.abspath(os.path.join( @@ -19,5 +21,9 @@ sys.path.insert(0, os.path.abspath(os.path.join( )) if __name__ == "__main__": - import ltp.main - ltp.main.run() + if ASYNC_RUN: + import altp.main + altp.main.run() + else: + import ltp.main + ltp.main.run() diff --git a/scripts/parallelizable.py b/scripts/parallelizable.py new file mode 100644 index 0000000..9534b34 --- /dev/null +++ b/scripts/parallelizable.py @@ -0,0 +1,128 @@ +""" +.. module:: parallelizable + :platform: Linux + :synopsis: Script that checks how many LTP tests can run in parallel +.. moduleauthor:: Andrea Cervesato +""" +import os +import sys +import json +import argparse +import asyncio + +sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) + +import altp +import altp.data + + +async def get_suites(args: argparse.Namespace) -> list: + """ + Read runtest files and return a list of suites. + """ + # read runtest names + runtest_path = os.path.join(args.ltp_dir, "runtest") + runtest_names = [] + if args.runtest: + runtest_names.extend(args.runtest) + else: + for (_, _, filenames) in os.walk(runtest_path): + runtest_names.extend(filenames) + break + + runtests = [ + os.path.join(runtest_path, runtest) + for runtest in runtest_names + ] + + # read metadata + metadata = os.path.join(args.ltp_dir, "metadata", "ltp.json") + metadata_content = None + with open(metadata, 'r') as metadata_f: + metadata_content = json.loads(metadata_f.read()) + + # create tasks + tasks = [] + for runtest in runtests: + with open(runtest, 'r') as runtest_f: + task = altp.data.read_runtest( + os.path.basename(runtest), + runtest_f.read(), + metadata=metadata_content) + + tasks.append(task) + + # execute tasks + suites = await asyncio.gather(*tasks) + return suites + + +async def print_results(suites: list) -> None: + """ + Print results on console. + """ + suites_tests = 0 + suites_parallel = 0 + + for suite in suites: + parallel = 0 + + for test in suite.tests: + parallel += 1 if test.parallelizable else 0 + + suites_tests += len(suite.tests) + suites_parallel += parallel + + print(f"Suite: {suite.name}") + print(f"Total tests: {len(suite.tests)}") + print(f"Parallelizable tests: {parallel}") + print() + + percent = (suites_parallel * 100.0) / suites_tests + + print("-------------------------------") + print(f"Total tests: {suites_tests}") + print(f"Parallelizable tests: {suites_parallel}") + print() + print(f"{percent:.2f}% of the tests are parallelizable") + print() + + +async def main(args: argparse.Namespace) -> None: + """ + Main function of the script. + """ + suites = await get_suites(args) + await print_results(suites) + + +if __name__ == "__main__": + """ + Script entry point. + """ + parser = argparse.ArgumentParser( + description='Parallel testing analysis script for LTP') + parser.add_argument( + "--ltp-dir", + "-l", + type=str, + default="/opt/ltp", + help="LTP install directory") + parser.add_argument( + "--runtest", + "-r", + nargs="*", + help="List of runtest files path to analyse") + + args = parser.parse_args() + + if not os.path.isdir(args.ltp_dir): + parser.error("LTP directory doesn't exist") + + if args.runtest: + for runtest in args.runtest: + if not os.path.isfile( + os.path.join(args.ltp_dir, "runtest", runtest)): + parser.error(f"'{runtest}' runtest file doesn't exist") + + altp.run(main(args))