407 lines
13 KiB
Python
Executable File
407 lines
13 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# vim: set syntax=python ts=4 :
|
|
# Copyright (c) 2020 Intel Corporation
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
"""Zephyr Test Runner (twister)
|
|
|
|
Also check the "User and Developer Guides" at https://docs.zephyrproject.org/
|
|
|
|
This script scans for the set of unit test applications in the git
|
|
repository and attempts to execute them. By default, it tries to
|
|
build each test case on one platform per architecture, using a precedence
|
|
list defined in an architecture configuration file, and if possible
|
|
run the tests in any available emulators or simulators on the system.
|
|
|
|
Test cases are detected by the presence of a 'testcase.yaml' or a sample.yaml
|
|
files in the application's project directory. This file may contain one or more
|
|
blocks, each identifying a test scenario. The title of the block is a name for
|
|
the test case, which only needs to be unique for the test cases specified in
|
|
that testsuite meta-data. The full canonical name for each test case is <path to
|
|
test case>/<block>.
|
|
|
|
Each test block in the testsuite meta data can define the following key/value
|
|
pairs:
|
|
|
|
tags: <list of tags> (required)
|
|
A set of string tags for the testsuite. Usually pertains to
|
|
functional domains but can be anything. Command line invocations
|
|
of this script can filter the set of tests to run based on tag.
|
|
|
|
skip: <True|False> (default False)
|
|
skip testsuite unconditionally. This can be used for broken tests.
|
|
|
|
slow: <True|False> (default False)
|
|
Don't build or run this test case unless --enable-slow was passed
|
|
in on the command line. Intended for time-consuming test cases
|
|
that are only run under certain circumstances, like daily
|
|
builds.
|
|
|
|
extra_args: <list of extra arguments>
|
|
Extra cache entries to pass to CMake when building or running the
|
|
test case.
|
|
|
|
extra_configs: <list of extra configurations>
|
|
Extra configuration options to be merged with a master prj.conf
|
|
when building or running the test case.
|
|
|
|
build_only: <True|False> (default False)
|
|
If true, don't try to run the test even if the selected platform
|
|
supports it.
|
|
|
|
build_on_all: <True|False> (default False)
|
|
If true, attempt to build test on all available platforms.
|
|
|
|
depends_on: <list of features>
|
|
A board or platform can announce what features it supports, this option
|
|
will enable the test only those platforms that provide this feature.
|
|
|
|
min_ram: <integer>
|
|
minimum amount of RAM needed for this test to build and run. This is
|
|
compared with information provided by the board metadata.
|
|
|
|
min_flash: <integer>
|
|
minimum amount of ROM needed for this test to build and run. This is
|
|
compared with information provided by the board metadata.
|
|
|
|
modules: <list of modules>
|
|
Add list of modules needed for this sample to build and run.
|
|
|
|
timeout: <number of seconds>
|
|
Length of time to run test in emulator before automatically killing it.
|
|
Default to 60 seconds.
|
|
|
|
arch_allow: <list of arches, such as x86, arm, arc>
|
|
Set of architectures that this test case should only be run for.
|
|
|
|
arch_exclude: <list of arches, such as x86, arm, arc>
|
|
Set of architectures that this test case should not run on.
|
|
|
|
platform_allow: <list of platforms>
|
|
Set of platforms that this test case should only be run for.
|
|
|
|
platform_exclude: <list of platforms>
|
|
Set of platforms that this test case should not run on.
|
|
|
|
extra_sections: <list of extra binary sections>
|
|
When computing sizes, twister will report errors if it finds
|
|
extra, unexpected sections in the Zephyr binary unless they are named
|
|
here. They will not be included in the size calculation.
|
|
|
|
filter: <expression>
|
|
Filter whether the testsuite should be run by evaluating an expression
|
|
against an environment containing the following values:
|
|
|
|
{ ARCH : <architecture>,
|
|
PLATFORM : <platform>,
|
|
<all CONFIG_* key/value pairs in the test's generated defconfig>,
|
|
<all DT_* key/value pairs in the test's generated device tree file>,
|
|
<all CMake key/value pairs in the test's generated CMakeCache.txt file>,
|
|
*<env>: any environment variable available
|
|
}
|
|
|
|
The grammar for the expression language is as follows:
|
|
|
|
expression ::= expression "and" expression
|
|
| expression "or" expression
|
|
| "not" expression
|
|
| "(" expression ")"
|
|
| symbol "==" constant
|
|
| symbol "!=" constant
|
|
| symbol "<" number
|
|
| symbol ">" number
|
|
| symbol ">=" number
|
|
| symbol "<=" number
|
|
| symbol "in" list
|
|
| symbol ":" string
|
|
| symbol
|
|
|
|
list ::= "[" list_contents "]"
|
|
|
|
list_contents ::= constant
|
|
| list_contents "," constant
|
|
|
|
constant ::= number
|
|
| string
|
|
|
|
|
|
For the case where expression ::= symbol, it evaluates to true
|
|
if the symbol is defined to a non-empty string.
|
|
|
|
Operator precedence, starting from lowest to highest:
|
|
|
|
or (left associative)
|
|
and (left associative)
|
|
not (right associative)
|
|
all comparison operators (non-associative)
|
|
|
|
arch_allow, arch_exclude, platform_allow, platform_exclude
|
|
are all syntactic sugar for these expressions. For instance
|
|
|
|
arch_exclude = x86 arc
|
|
|
|
Is the same as:
|
|
|
|
filter = not ARCH in ["x86", "arc"]
|
|
|
|
The ':' operator compiles the string argument as a regular expression,
|
|
and then returns a true value only if the symbol's value in the environment
|
|
matches. For example, if CONFIG_SOC="stm32f107xc" then
|
|
|
|
filter = CONFIG_SOC : "stm.*"
|
|
|
|
Would match it.
|
|
|
|
The set of test cases that actually run depends on directives in the testsuite
|
|
files and options passed in on the command line. If there is any confusion,
|
|
running with -v or examining the test plan report (testplan.json)
|
|
can help show why particular test cases were skipped.
|
|
|
|
To load arguments from a file, write '+' before the file name, e.g.,
|
|
+file_name. File content must be one or more valid arguments separated by
|
|
line break instead of white spaces.
|
|
|
|
Most everyday users will run with no arguments.
|
|
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import logging
|
|
import time
|
|
import shutil
|
|
import colorama
|
|
from colorama import Fore
|
|
from pathlib import Path
|
|
|
|
|
|
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
|
|
if not ZEPHYR_BASE:
|
|
# This file has been zephyr/scripts/twister for years,
|
|
# and that is not going to change anytime soon. Let the user
|
|
# run this script as ./scripts/twister without making them
|
|
# set ZEPHYR_BASE.
|
|
ZEPHYR_BASE = str(Path(__file__).resolve().parents[1])
|
|
|
|
# Propagate this decision to child processes.
|
|
os.environ['ZEPHYR_BASE'] = ZEPHYR_BASE
|
|
|
|
print(f'ZEPHYR_BASE unset, using "{ZEPHYR_BASE}"')
|
|
|
|
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister/"))
|
|
|
|
from twisterlib.testplan import TestPlan
|
|
from twisterlib.enviornment import TwisterEnv, parse_arguments
|
|
from twisterlib.reports import Reporting
|
|
from twisterlib.hardwaremap import HardwareMap
|
|
from twisterlib.coverage import run_coverage
|
|
from twisterlib.runner import TwisterRunner
|
|
|
|
logger = logging.getLogger('twister')
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
def setup_logging(outdir, log_file, verbose, timestamps):
|
|
# create file handler which logs even debug messages
|
|
if log_file:
|
|
fh = logging.FileHandler(log_file)
|
|
else:
|
|
fh = logging.FileHandler(os.path.join(outdir, "twister.log"))
|
|
|
|
fh.setLevel(logging.DEBUG)
|
|
|
|
# create console handler with a higher log level
|
|
ch = logging.StreamHandler()
|
|
|
|
if verbose > 1:
|
|
ch.setLevel(logging.DEBUG)
|
|
else:
|
|
ch.setLevel(logging.INFO)
|
|
|
|
# create formatter and add it to the handlers
|
|
if timestamps:
|
|
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
|
|
else:
|
|
formatter = logging.Formatter('%(levelname)-7s - %(message)s')
|
|
|
|
formatter_file = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
|
ch.setFormatter(formatter)
|
|
fh.setFormatter(formatter_file)
|
|
|
|
# add the handlers to logger
|
|
logger.addHandler(ch)
|
|
logger.addHandler(fh)
|
|
|
|
|
|
def init_color(colorama_strip):
|
|
colorama.init(strip=colorama_strip)
|
|
|
|
def main():
|
|
start_time = time.time()
|
|
|
|
options = parse_arguments(sys.argv[1:])
|
|
|
|
# Configure color output
|
|
color_strip = False if options.force_color else None
|
|
|
|
colorama.init(strip=color_strip)
|
|
init_color(colorama_strip=color_strip)
|
|
|
|
previous_results = None
|
|
# Cleanup
|
|
if options.no_clean or options.only_failed or options.test_only:
|
|
if os.path.exists(options.outdir):
|
|
print("Keeping artifacts untouched")
|
|
elif options.last_metrics:
|
|
ls = os.path.join(options.outdir, "twister.json")
|
|
if os.path.exists(ls):
|
|
with open(ls, "r") as fp:
|
|
previous_results = fp.read()
|
|
else:
|
|
sys.exit(f"Can't compare metrics with non existing file {ls}")
|
|
elif os.path.exists(options.outdir):
|
|
if options.clobber_output:
|
|
print("Deleting output directory {}".format(options.outdir))
|
|
shutil.rmtree(options.outdir)
|
|
else:
|
|
for i in range(1, 100):
|
|
new_out = options.outdir + ".{}".format(i)
|
|
if not os.path.exists(new_out):
|
|
print("Renaming output directory to {}".format(new_out))
|
|
shutil.move(options.outdir, new_out)
|
|
break
|
|
|
|
previous_results_file = None
|
|
os.makedirs(options.outdir, exist_ok=True)
|
|
if options.last_metrics and previous_results:
|
|
previous_results_file = os.path.join(options.outdir, "baseline.json")
|
|
with open(previous_results_file, "w") as fp:
|
|
fp.write(previous_results)
|
|
|
|
VERBOSE = options.verbose
|
|
setup_logging(options.outdir, options.log_file, VERBOSE, options.timestamps)
|
|
|
|
env = TwisterEnv(options)
|
|
env.discover()
|
|
|
|
hwm = HardwareMap(env)
|
|
ret = hwm.discover()
|
|
if ret == 0:
|
|
return 0
|
|
|
|
env.hwm = hwm
|
|
|
|
tplan = TestPlan(env)
|
|
try:
|
|
tplan.discover()
|
|
except RuntimeError as e:
|
|
logger.error(f"{e}")
|
|
return 1
|
|
|
|
if tplan.report() == 0:
|
|
return 0
|
|
|
|
try:
|
|
tplan.load()
|
|
except RuntimeError as e:
|
|
logger.error(f"{e}")
|
|
return 1
|
|
|
|
if options.list_tests and options.platform:
|
|
tplan.report_platform_tests(options.platform)
|
|
return 0
|
|
|
|
if VERBOSE > 1:
|
|
# if we are using command line platform filter, no need to list every
|
|
# other platform as excluded, we know that already.
|
|
# Show only the discards that apply to the selected platforms on the
|
|
# command line
|
|
|
|
for i in tplan.instances.values():
|
|
if i.status == "filtered":
|
|
if options.platform and i.platform.name not in options.platform:
|
|
continue
|
|
logger.debug(
|
|
"{:<25} {:<50} {}SKIPPED{}: {}".format(
|
|
i.platform.name,
|
|
i.testsuite.name,
|
|
Fore.YELLOW,
|
|
Fore.RESET,
|
|
i.reason))
|
|
|
|
if options.report_excluded:
|
|
tplan.report_excluded_tests()
|
|
return 0
|
|
|
|
report = Reporting(tplan, env)
|
|
report.json_report(os.path.join(options.outdir, "testplan.json"))
|
|
|
|
if options.save_tests:
|
|
report.json_report(options.save_tests)
|
|
return 0
|
|
|
|
if options.device_testing and not options.build_only:
|
|
print("\nDevice testing on:")
|
|
hwm.dump(filtered=tplan.selected_platforms)
|
|
print("")
|
|
|
|
if options.dry_run:
|
|
duration = time.time() - start_time
|
|
logger.info("Completed in %d seconds" % (duration))
|
|
return 0
|
|
|
|
if options.short_build_path:
|
|
tplan.create_build_dir_links()
|
|
|
|
runner = TwisterRunner(tplan.instances, tplan.testsuites, env)
|
|
runner.duts = hwm.duts
|
|
runner.run()
|
|
|
|
# figure out which report to use for size comparison
|
|
report_to_use = None
|
|
if options.compare_report:
|
|
report_to_use = options.compare_report
|
|
elif options.last_metrics:
|
|
report_to_use = previous_results_file
|
|
|
|
report.footprint_reports(report_to_use,
|
|
options.show_footprint,
|
|
options.all_deltas,
|
|
options.footprint_threshold,
|
|
options.last_metrics)
|
|
|
|
duration = time.time() - start_time
|
|
|
|
runner.results.summary()
|
|
|
|
report.summary(runner.results, options.disable_unrecognized_section_test, duration)
|
|
|
|
if options.coverage:
|
|
run_coverage(tplan, options)
|
|
|
|
if options.device_testing and not options.build_only:
|
|
hwm.summary(tplan.selected_platforms)
|
|
|
|
report.save_reports(options.report_name,
|
|
options.report_suffix,
|
|
options.report_dir,
|
|
options.no_update,
|
|
options.platform_reports
|
|
)
|
|
|
|
logger.info("Run completed")
|
|
if runner.results.failed or runner.results.error or (tplan.warnings and options.warnings_as_errors):
|
|
return 1
|
|
|
|
return 0
|
|
|
|
|
|
if __name__ == "__main__":
|
|
ret = 0
|
|
try:
|
|
ret = main()
|
|
finally:
|
|
if (os.name != "nt") and os.isatty(1):
|
|
# (OS is not Windows) and (stdout is interactive)
|
|
os.system("stty sane")
|
|
|
|
sys.exit(ret)
|