From 7ec4b982006044e213adcb5a937176e2375a5653 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 4 Dec 2019 17:22:35 +0300 Subject: [PATCH 01/39] test.py: make name a positional argument. Accept multiple test names, treat test name as a substring, and if the same name is given multiple times, run the test multiple times. --- test.py | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/test.py b/test.py index 90b8ce66f7..b5f6f7ca51 100755 --- a/test.py +++ b/test.py @@ -62,6 +62,7 @@ def colorformat(msg, **kwargs): fmt.update(kwargs) return msg.format(**fmt) + def status_to_string(success): if success: status = colorformat("{green}PASSED{nocolor}") if os.isatty(sys.stdout.fileno()) else "PASSED" @@ -79,16 +80,18 @@ class UnitTest: opts = UnitTest.seastar_args self.id = test_no self.name = name + # Name within the suite + self.shortname = os.path.basename(name) self.mode = mode self.kind = kind - self.path = os.path.join('build', self.mode, 'test', self.kind, self.name) + self.path = os.path.join("build", self.mode, "test", self.name) self.args = opts.split() + UnitTest.standard_args if self.kind == 'boost': boost_args = [] if options.jenkins: mode = 'debug' if self.mode == 'debug' else 'release' - xmlout = options.jenkins + "." + mode + "." + self.name + "." + str(self.id) + ".boost.xml" + xmlout = options.jenkins + "." + mode + "." + self.shortname + "." + str(self.id) + ".boost.xml" boost_args += ['--report_level=no', '--logger=HRF,test_suite:XML,test_suite,' + xmlout] boost_args += ['--'] self.args = boost_args + self.args @@ -178,8 +181,17 @@ def parse_cmd_line(): default_num_jobs = min(default_num_jobs_mem, default_num_jobs_cpu) parser = argparse.ArgumentParser(description="Scylla test runner") - parser.add_argument('--name', action="store", - help="Run only test whose name contains given string") + parser.add_argument( + "name", + nargs="*", + action="store", + help="""Can be empty. List of test names, to look for in + suites. Each name is used as a substring to look for in the + path to test file, e.g. "mem" will run all tests that have + "mem" in their name in all suites, "boost/mem" will only enable + tests starting with "mem" in "boost" suite. Default: run all + tests in all suites.""", + ) parser.add_argument('--mode', choices=all_modes, action="append", dest="modes", help="Run only tests for given build mode(s)") parser.add_argument('--repeat', action="store", default="1", type=int, @@ -212,24 +224,25 @@ def find_tests(options): def add_test_list(kind, mode): lst = glob.glob(os.path.join("test", kind, "*_test.cc")) for t in lst: - t = os.path.splitext(os.path.basename(t))[0] - if mode not in ['release', 'dev'] and os.path.join(kind, t) in long_tests: + t = os.path.join(kind, os.path.splitext(os.path.basename(t))[0]) + if mode not in ["release", "dev"] and t in long_tests: continue - args = custom_test_args.get(os.path.join(kind, t)) + args = custom_test_args.get(t) if isinstance(args, (str, type(None))): - args = [ args ] + args = [args] for a in args: - tests_to_run.append((t, a, kind, mode)) + patterns = options.name if options.name else [t] + for p in patterns: + if p in t: + tests_to_run.append((t, a, kind, mode)) for mode in options.modes: add_test_list('unit', mode) add_test_list('boost', mode) - if options.name: - tests_to_run = [t for t in tests_to_run if options.name in t[0]] - if not tests_to_run: - print("Test {} not found".format(options.name)) - sys.exit(1) + if not tests_to_run: + print("Test {} not found".format(options.name)) + sys.exit(1) tests_to_run = [t for t in tests_to_run for _ in range(options.repeat)] tests_to_run = [UnitTest(test_no, *t, options) for test_no, t in enumerate(tests_to_run)] From dab364c87d6c6340f3fbb7631e9dd57d7f6fd810 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Thu, 5 Dec 2019 12:19:29 +0300 Subject: [PATCH 02/39] test.py: sort imports --- test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test.py b/test.py index b5f6f7ca51..16727bbebc 100755 --- a/test.py +++ b/test.py @@ -20,15 +20,15 @@ # You should have received a copy of the GNU General Public License # along with Scylla. If not, see . # +import argparse import asyncio import glob -import os -import sys -import signal -import argparse -import subprocess import io import multiprocessing +import os +import signal +import subprocess +import sys import xml.etree.ElementTree as ET # Apply custom options to these tests From caf742f9560be61e830d32486d150caa01235a76 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Tue, 17 Dec 2019 16:29:19 +0300 Subject: [PATCH 03/39] test.py: flake8 style fix --- test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.py b/test.py index 16727bbebc..d871498628 100755 --- a/test.py +++ b/test.py @@ -271,7 +271,7 @@ async def run_all_tests(tests_to_run, signaled, options): for coro in done: result = coro.result() if isinstance(result, bool): - continue # skip signaled task result + continue # skip signaled task result results.append(result) test, success, out = result cookie = print_progress(test, success, cookie, options.verbose) From 1de69228f18e03153fa472404a5e628e5f9272ec Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Sat, 7 Dec 2019 20:37:53 +0300 Subject: [PATCH 04/39] test.py: add --tmpdir It will be used for test log files. --- test.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test.py b/test.py index d871498628..5930893021 100755 --- a/test.py +++ b/test.py @@ -192,6 +192,13 @@ def parse_cmd_line(): tests starting with "mem" in "boost" suite. Default: run all tests in all suites.""", ) + parser.add_argument( + "--tmpdir", + action="store", + default="testlog", + help="""Path to temporary test data and log files. The data is + further segregated per build mode. Default: ./testlog.""", + ) parser.add_argument('--mode', choices=all_modes, action="append", dest="modes", help="Run only tests for given build mode(s)") parser.add_argument('--repeat', action="store", default="1", type=int, From cbee76fb956cc67c91ea1cd6f60e1f6690cd1357 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 01:13:04 +0300 Subject: [PATCH 05/39] test.py: gitignore the default ./test.py tmpdir, ./testlog --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 85ce9643a5..6f7695a36e 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,4 @@ resources .pytest_cache /expressions.tokens tags +testlog/* From 879bea20ab3629038fece8ebf138b3eab82c525c Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Sat, 7 Dec 2019 20:48:28 +0300 Subject: [PATCH 06/39] test.py: add a log file Going forward I'd like to make terminal output brief&tabular, but some test details are necessary to preserve so that a failure is easy to debug. This information now goes to the log file. - open and truncate the log file on each harness start - log options of each invoked test in the log, so that a failure is easy to reproduce - log test result in the log Since tests are run concurrently, having an exact trace of concurrent execution also helps debugging flaky tests. --- test.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/test.py b/test.py index 5930893021..6bb0299e06 100755 --- a/test.py +++ b/test.py @@ -24,8 +24,10 @@ import argparse import asyncio import glob import io +import logging import multiprocessing import os +import pathlib import signal import subprocess import sys @@ -123,6 +125,7 @@ async def run_test(test, options): success = False process = None stdout = None + logging.info("Starting test #%d: %s %s", test.id, test.path, " ".join(test.args)) try: process = await asyncio.create_subprocess_exec( test.path, @@ -153,6 +156,7 @@ async def run_test(test, options): except Exception as e: print(' with error {e}\n'.format(e=e), file=file) report_error(e) + logging.info("Test #%d %s", test.id, "passed" if success else "failed") return (test, success, file.getvalue()) def setup_signal_handlers(loop, signaled): @@ -251,6 +255,9 @@ def find_tests(options): print("Test {} not found".format(options.name)) sys.exit(1) + logging.info("Found %d tests, repeat count is %d", + len(tests_to_run), options.repeat) + tests_to_run = [t for t in tests_to_run for _ in range(options.repeat)] tests_to_run = [UnitTest(test_no, *t, options) for test_no, t in enumerate(tests_to_run)] @@ -331,10 +338,26 @@ def write_xunit_report(options, results): with open(options.xunit, "w") as f: ET.ElementTree(xml_results).write(f, encoding="unicode") + +def open_log(tmpdir): + tmpdir = os.path.abspath(tmpdir) + pathlib.Path(tmpdir).mkdir(parents=True, exist_ok=True) + logging.basicConfig( + filename=os.path.join(tmpdir, "test.py.log"), + filemode="w", + level=logging.INFO, + format="%(asctime)s.%(msecs)03d %(levelname)s> %(message)s", + datefmt="%H:%M:%S", + ) + logging.critical("Started %s", " ".join(sys.argv)) + + async def main(): options = parse_cmd_line() + open_log(options.tmpdir) + tests_to_run = find_tests(options) signaled = asyncio.Event() From 233f921f9d49a814e138dab88971231e4d695087 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Sun, 8 Dec 2019 10:56:45 +0300 Subject: [PATCH 07/39] test.py: make test output brief&tabular New format: % ./test.py --verbose --mode=release ================================================================================ [N/TOTAL] TEST MODE RESULT ------------------------------------------------------------------------------ [1/111] boost/UUID_test release [ PASS ] [2/111] boost/enum_set_test release [ PASS ] [3/111] boost/like_matcher_test release [ PASS ] [4/111] boost/observable_test release [ PASS ] [5/111] boost/allocation_strategy_test release [ PASS ] ^C % ./test.py foo ================================================================================ [N/TOTAL] TEST MODE RESULT ------------------------------------------------------------------------------ [3/3] unit/memory_footprint_test debug [ PASS ] ------------------------------------------------------------------------------ --- test.py | 36 +++++++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/test.py b/test.py index 6bb0299e06..7a2b2af827 100755 --- a/test.py +++ b/test.py @@ -67,12 +67,13 @@ def colorformat(msg, **kwargs): def status_to_string(success): if success: - status = colorformat("{green}PASSED{nocolor}") if os.isatty(sys.stdout.fileno()) else "PASSED" + status = colorformat("{green}[ PASS ]{nocolor}") if os.isatty(sys.stdout.fileno()) else "[ PASS ]" else: - status = colorformat("{red}FAILED{nocolor}") if os.isatty(sys.stdout.fileno()) else "FAILED" + status = colorformat("{red}[ FAIL ]{nocolor}") if os.isatty(sys.stdout.fileno()) else "[ FAIL ]" return status + class UnitTest: standard_args = '--overprovisioned --unsafe-bypass-fsync 1 --blocked-reactor-notify-ms 2000000 --collectd 0'.split() seastar_args = '-c2 -m2G' @@ -99,13 +100,29 @@ class UnitTest: self.args = boost_args + self.args +def print_start_blurb(): + print("="*80) + print("{:7s} {:50s} {:^8s} {:8s}".format("[N/TOTAL]", "TEST", "MODE", "RESULT")) + print("-"*78) + + +def print_end_blurb(verbose): + if not verbose: + sys.stdout.write('\n') + print("-"*78) + + def print_progress(test, success, cookie, verbose): if isinstance(cookie, int): cookie = (0, 1, cookie) last_len, n, n_total = cookie - msg = "[{}/{}] {} {} {}".format(n, n_total, status_to_string(success), test.path, ' '.join(test.args)) - if verbose is False and sys.stdout.isatty(): + msg = "{:9s} {:50s} {:^8s} {:8s}".format( + "[{}/{}]".format(n, n_total), + test.name, test.mode[:8], + status_to_string(success) + ) + if verbose is False: print('\r' + ' ' * last_len, end='') last_len = len(msg) print('\r' + msg, end='') @@ -219,6 +236,9 @@ def parse_cmd_line(): help="Name of a file to write results of non-boost tests to in xunit format") args = parser.parse_args() + if not sys.stdout.isatty(): + args.verbose = True + if not args.modes: out = subprocess.Popen(['ninja', 'mode_list'], stdout=subprocess.PIPE).communicate()[0].decode() # [1/1] List configured modes @@ -291,6 +311,7 @@ async def run_all_tests(tests_to_run, signaled, options): cookie = print_progress(test, success, cookie, options.verbose) if not success: failed_tests.append((test, out)) + print_start_blurb() try: for test in tests_to_run: # +1 for 'signaled' event @@ -308,13 +329,13 @@ async def run_all_tests(tests_to_run, signaled, options): except asyncio.CancelledError: return None, None + print_end_blurb(options.verbose) + return failed_tests, results def print_summary(failed_tests, total_tests): - if not failed_tests: - print('\nOK.') - else: + if failed_tests: print('\n\nOutput of the failed tests:') for test, out in failed_tests: print("Test {} {} failed:\n{}".format(test.path, ' '.join(test.args), out)) @@ -323,6 +344,7 @@ def print_summary(failed_tests, total_tests): print(' {} {}'.format(test.path, ' '.join(test.args))) print('\nSummary: {} of the total {} tests failed'.format(len(failed_tests), total_tests)) + def write_xunit_report(options, results): unit_results = [r for r in results if r[0].kind != 'boost'] num_unit_failed = sum(1 for r in unit_results if not r[1]) From d2b546d464868a548e8846a7cdb1905f31887670 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Sun, 8 Dec 2019 10:59:55 +0300 Subject: [PATCH 08/39] test.py: output job count in the log --- test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test.py b/test.py index 7a2b2af827..70a60674ab 100755 --- a/test.py +++ b/test.py @@ -275,8 +275,8 @@ def find_tests(options): print("Test {} not found".format(options.name)) sys.exit(1) - logging.info("Found %d tests, repeat count is %d", - len(tests_to_run), options.repeat) + logging.info("Found %d tests, repeat count is %d, starting %d concurrent jobs", + len(tests_to_run), options.repeat, options.jobs) tests_to_run = [t for t in tests_to_run for _ in range(options.repeat)] tests_to_run = [UnitTest(test_no, *t, options) for test_no, t in enumerate(tests_to_run)] From f4efe03adeda7acb2cc0c79d7b10c02e139a5410 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Sun, 8 Dec 2019 11:45:30 +0300 Subject: [PATCH 09/39] test.py: always produce xml output, derive output paths from tmpdir It reduces the number of configurations to re-test when test.py is modified. and simplifies usage of test.py in build tools, since you no longer need to bother with extra arguments. --- test.py | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/test.py b/test.py index 70a60674ab..57f82ac12d 100755 --- a/test.py +++ b/test.py @@ -82,6 +82,7 @@ class UnitTest: if opts is None: opts = UnitTest.seastar_args self.id = test_no + # Name with test suite name self.name = name # Name within the suite self.shortname = os.path.basename(name) @@ -89,13 +90,13 @@ class UnitTest: self.kind = kind self.path = os.path.join("build", self.mode, "test", self.name) self.args = opts.split() + UnitTest.standard_args + # Unique file name, which is also readable by human, as filename prefix + self.uname = "{}.{}.{}".format(self.mode, self.shortname, self.id) if self.kind == 'boost': boost_args = [] - if options.jenkins: - mode = 'debug' if self.mode == 'debug' else 'release' - xmlout = options.jenkins + "." + mode + "." + self.shortname + "." + str(self.id) + ".boost.xml" - boost_args += ['--report_level=no', '--logger=HRF,test_suite:XML,test_suite,' + xmlout] + xmlout = os.path.join(options.jenkins, self.uname + ".boost.xml") + boost_args += ['--report_level=no', '--logger=HRF,test_suite:XML,test_suite,' + xmlout] boost_args += ['--'] self.args = boost_args + self.args @@ -226,14 +227,21 @@ def parse_cmd_line(): help="number of times to repeat test execution") parser.add_argument('--timeout', action="store", default="3000", type=int, help="timeout value for test execution") - parser.add_argument('--jenkins', action="store", - help="jenkins output file prefix") + parser.add_argument( + "--jenkins", + action="store", + help="""Jenkins output file prefix. Default: ${tmpdir}/xml""" + ) parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Verbose reporting') parser.add_argument('--jobs', '-j', action="store", default=default_num_jobs, type=int, help="Number of jobs to use for running the tests") - parser.add_argument('--xunit', action="store", - help="Name of a file to write results of non-boost tests to in xunit format") + parser.add_argument( + "--xunit", + action="store", + help="""Name of a file to write results of non-boost tests to in + xunit format. Default: ${tmpdir}/xml/xunit.xml""" + ) args = parser.parse_args() if not sys.stdout.isatty(): @@ -245,6 +253,16 @@ def parse_cmd_line(): # debug release dev args.modes = out.split('\n')[1].split(' ') + args.tmpdir = os.path.abspath(args.tmpdir) + pathlib.Path(args.tmpdir).mkdir(parents=True, exist_ok=True) + if not args.jenkins or not args.xunit: + xmldir = os.path.join(args.tmpdir, "xml") + pathlib.Path(xmldir).mkdir(parents=True, exist_ok=True) + if args.jenkins is None: + args.jenkins = xmldir + if args.xunit is None: + args.xunit = os.path.join(xmldir, "xunit.xml") + return args @@ -362,7 +380,6 @@ def write_xunit_report(options, results): def open_log(tmpdir): - tmpdir = os.path.abspath(tmpdir) pathlib.Path(tmpdir).mkdir(parents=True, exist_ok=True) logging.basicConfig( filename=os.path.join(tmpdir, "test.py.log"), @@ -392,8 +409,7 @@ async def main(): print_summary(failed_tests, len(tests_to_run)) - if options.xunit: - write_xunit_report(options, results) + write_xunit_report(options, results) return 0 if not failed_tests else -1 From b4aa4d35c36bfef13b1386ba170ba65f9046d1d3 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Mon, 9 Dec 2019 17:26:44 +0300 Subject: [PATCH 10/39] test.py: save test output in tmpdir It is handy to have it so that a reference of a failed test is available without re-running it. --- test.py | 61 +++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/test.py b/test.py index 57f82ac12d..04ccedc69a 100755 --- a/test.py +++ b/test.py @@ -92,6 +92,7 @@ class UnitTest: self.args = opts.split() + UnitTest.standard_args # Unique file name, which is also readable by human, as filename prefix self.uname = "{}.{}.{}".format(self.mode, self.shortname, self.id) + self.log_filename = os.path.join(options.tmpdir, self.uname + ".log") if self.kind == 'boost': boost_args = [] @@ -145,15 +146,16 @@ async def run_test(test, options): stdout = None logging.info("Starting test #%d: %s %s", test.id, test.path, " ".join(test.args)) try: - process = await asyncio.create_subprocess_exec( - test.path, - *test.args, - stderr=asyncio.subprocess.STDOUT, - stdout=asyncio.subprocess.PIPE, - env=dict(os.environ, - UBSAN_OPTIONS='halt_on_error=1:abort_on_error=1', - ASAN_OPTIONS='disable_coredump=0:abort_on_error=1', - BOOST_TEST_CATCH_SYSTEM_ERRORS='no'), + with open(test.log_filename, "wb") as log: + process = await asyncio.create_subprocess_exec( + test.path, + *test.args, + stderr=log, + stdout=log, + env=dict(os.environ, + UBSAN_OPTIONS='halt_on_error=1:abort_on_error=1', + ASAN_OPTIONS='disable_coredump=0:abort_on_error=1', + BOOST_TEST_CATCH_SYSTEM_ERRORS="no"), preexec_fn=os.setsid, ) stdout, _ = await asyncio.wait_for(process.communicate(), options.timeout) @@ -175,7 +177,7 @@ async def run_test(test, options): print(' with error {e}\n'.format(e=e), file=file) report_error(e) logging.info("Test #%d %s", test.id, "passed" if success else "failed") - return (test, success, file.getvalue()) + return (test, success) def setup_signal_handlers(loop, signaled): @@ -253,11 +255,18 @@ def parse_cmd_line(): # debug release dev args.modes = out.split('\n')[1].split(' ') + def prepare_dir(dirname, pattern): + # Ensure the dir exists + pathlib.Path(dirname).mkdir(parents=True, exist_ok=True) + # Remove old artefacts + for p in glob.glob(os.path.join(dirname, pattern), recursive=True): + pathlib.Path(p).unlink() + args.tmpdir = os.path.abspath(args.tmpdir) - pathlib.Path(args.tmpdir).mkdir(parents=True, exist_ok=True) + prepare_dir(args.tmpdir, "*.log") if not args.jenkins or not args.xunit: xmldir = os.path.join(args.tmpdir, "xml") - pathlib.Path(xmldir).mkdir(parents=True, exist_ok=True) + prepare_dir(xmldir, "*.xml") if args.jenkins is None: args.jenkins = xmldir if args.xunit is None: @@ -325,10 +334,10 @@ async def run_all_tests(tests_to_run, signaled, options): if isinstance(result, bool): continue # skip signaled task result results.append(result) - test, success, out = result + test, success = result cookie = print_progress(test, success, cookie, options.verbose) if not success: - failed_tests.append((test, out)) + failed_tests.append(test) print_start_blurb() try: for test in tests_to_run: @@ -352,13 +361,26 @@ async def run_all_tests(tests_to_run, signaled, options): return failed_tests, results +def read_log(log_filename): + """Intelligently read test log output""" + try: + with open(log_filename, "r") as log: + msg = log.read() + return msg if len(msg) else "===Empty log output===" + except FileNotFoundError: + return "===Log {} not found===".format(log_filename) + except OSError as e: + return "===Error reading log {}===".format(e) + + def print_summary(failed_tests, total_tests): if failed_tests: print('\n\nOutput of the failed tests:') - for test, out in failed_tests: - print("Test {} {} failed:\n{}".format(test.path, ' '.join(test.args), out)) + for test in failed_tests: + print("Test {} {} failed:".format(test.path, " ".join(test.args))) + print(read_log(test.log_filename)) print('\n\nThe following test(s) have failed:') - for test, _ in failed_tests: + for test in failed_tests: print(' {} {}'.format(test.path, ' '.join(test.args))) print('\nSummary: {} of the total {} tests failed'.format(len(failed_tests), total_tests)) @@ -370,11 +392,12 @@ def write_xunit_report(options, results): xml_results = ET.Element('testsuite', name='non-boost tests', tests=str(len(unit_results)), failures=str(num_unit_failed), errors='0') - for test, success, out in unit_results: + for test, success in unit_results: xml_res = ET.SubElement(xml_results, 'testcase', name=test.path) if not success: xml_fail = ET.SubElement(xml_res, 'failure') - xml_fail.text = "Test {} {} failed:\n{}".format(test.path, ' '.join(test.args), out) + xml_fail.text = "Test {} {} failed:".format(test.path, " ".join(test.args)) + xml_fail.text += read_log(test.log_filename) with open(options.xunit, "w") as f: ET.ElementTree(xml_results).write(f, encoding="unicode") From ec9ad04c8a6767034a517a623bebeeb4d9a2ee3d Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Mon, 9 Dec 2019 19:35:34 +0300 Subject: [PATCH 11/39] test.py: move 'success' to TestUnit class There will be other success attributes: program return status 0 doesn't mean the test is successful for all tests. --- test.py | 50 ++++++++++++++++++++++---------------------------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/test.py b/test.py index 04ccedc69a..80a8fe4ffc 100755 --- a/test.py +++ b/test.py @@ -93,6 +93,7 @@ class UnitTest: # Unique file name, which is also readable by human, as filename prefix self.uname = "{}.{}.{}".format(self.mode, self.shortname, self.id) self.log_filename = os.path.join(options.tmpdir, self.uname + ".log") + self.success = None if self.kind == 'boost': boost_args = [] @@ -114,7 +115,7 @@ def print_end_blurb(verbose): print("-"*78) -def print_progress(test, success, cookie, verbose): +def print_progress(test, cookie, verbose): if isinstance(cookie, int): cookie = (0, 1, cookie) @@ -122,7 +123,7 @@ def print_progress(test, success, cookie, verbose): msg = "{:9s} {:50s} {:^8s} {:8s}".format( "[{}/{}]".format(n, n_total), test.name, test.mode[:8], - status_to_string(success) + status_to_string(test.success) ) if verbose is False: print('\r' + ' ' * last_len, end='') @@ -141,7 +142,6 @@ async def run_test(test, options): print('=== stdout START ===', file=file) print(out, file=file) print('=== stdout END ===', file=file) - success = False process = None stdout = None logging.info("Starting test #%d: %s %s", test.id, test.path, " ".join(test.args)) @@ -159,7 +159,7 @@ async def run_test(test, options): preexec_fn=os.setsid, ) stdout, _ = await asyncio.wait_for(process.communicate(), options.timeout) - success = process.returncode == 0 + test.success = process.returncode == 0 if process.returncode != 0: print(' with error code {code}\n'.format(code=process.returncode), file=file) report_error(stdout.decode(encoding='UTF-8')) @@ -176,8 +176,8 @@ async def run_test(test, options): except Exception as e: print(' with error {e}\n'.format(e=e), file=file) report_error(e) - logging.info("Test #%d %s", test.id, "passed" if success else "failed") - return (test, success) + logging.info("Test #%d %s", test.id, "passed" if test.success else "failed") + return test def setup_signal_handlers(loop, signaled): @@ -312,8 +312,6 @@ def find_tests(options): async def run_all_tests(tests_to_run, signaled, options): - failed_tests = [] - results = [] cookie = len(tests_to_run) signaled_task = asyncio.create_task(signaled.wait()) pending = set([signaled_task]) @@ -333,11 +331,7 @@ async def run_all_tests(tests_to_run, signaled, options): result = coro.result() if isinstance(result, bool): continue # skip signaled task result - results.append(result) - test, success = result - cookie = print_progress(test, success, cookie, options.verbose) - if not success: - failed_tests.append(test) + cookie = print_progress(result, cookie, options.verbose) print_start_blurb() try: for test in tests_to_run: @@ -354,12 +348,10 @@ async def run_all_tests(tests_to_run, signaled, options): await reap(done, pending, signaled) except asyncio.CancelledError: - return None, None + return print_end_blurb(options.verbose) - return failed_tests, results - def read_log(log_filename): """Intelligently read test log output""" @@ -373,7 +365,7 @@ def read_log(log_filename): return "===Error reading log {}===".format(e) -def print_summary(failed_tests, total_tests): +def print_summary(tests, failed_tests): if failed_tests: print('\n\nOutput of the failed tests:') for test in failed_tests: @@ -382,19 +374,19 @@ def print_summary(failed_tests, total_tests): print('\n\nThe following test(s) have failed:') for test in failed_tests: print(' {} {}'.format(test.path, ' '.join(test.args))) - print('\nSummary: {} of the total {} tests failed'.format(len(failed_tests), total_tests)) + print('\nSummary: {} of the total {} tests failed'.format(len(failed_tests), len(tests))) -def write_xunit_report(options, results): - unit_results = [r for r in results if r[0].kind != 'boost'] - num_unit_failed = sum(1 for r in unit_results if not r[1]) +def write_xunit_report(tests, options): + unit_tests = [t for t in tests if t.kind == "unit"] + num_unit_failed = sum(1 for t in unit_tests if not t.success) xml_results = ET.Element('testsuite', name='non-boost tests', - tests=str(len(unit_results)), failures=str(num_unit_failed), errors='0') + tests=str(len(unit_tests)), failures=str(num_unit_failed), errors='0') - for test, success in unit_results: + for test in unit_tests: xml_res = ET.SubElement(xml_results, 'testcase', name=test.path) - if not success: + if not test.success: xml_fail = ET.SubElement(xml_res, 'failure') xml_fail.text = "Test {} {} failed:".format(test.path, " ".join(test.args)) xml_fail.text += read_log(test.log_filename) @@ -420,19 +412,21 @@ async def main(): open_log(options.tmpdir) - tests_to_run = find_tests(options) + tests = find_tests(options) signaled = asyncio.Event() setup_signal_handlers(asyncio.get_event_loop(), signaled) - failed_tests, results = await run_all_tests(tests_to_run, signaled, options) + await run_all_tests(tests, signaled, options) if signaled.is_set(): return -signaled.signo - print_summary(failed_tests, len(tests_to_run)) + failed_tests = [t for t in tests if t.success is not True] - write_xunit_report(options, results) + print_summary(tests, failed_tests) + + write_xunit_report(tests, options) return 0 if not failed_tests else -1 From c1f8169cd4d1a4584d6d8ce4f69b479da6d0660a Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Mon, 9 Dec 2019 19:48:33 +0300 Subject: [PATCH 12/39] test.py: add suite.yaml to boost and unit tests The plan is to move suite-specific settings to the configuration file. --- test/boost/suite.yaml | 1 + test/unit/suite.yaml | 1 + 2 files changed, 2 insertions(+) create mode 100644 test/boost/suite.yaml create mode 100644 test/unit/suite.yaml diff --git a/test/boost/suite.yaml b/test/boost/suite.yaml new file mode 100644 index 0000000000..76292b1fb7 --- /dev/null +++ b/test/boost/suite.yaml @@ -0,0 +1 @@ +type: boost diff --git a/test/unit/suite.yaml b/test/unit/suite.yaml new file mode 100644 index 0000000000..b566c7ecf1 --- /dev/null +++ b/test/unit/suite.yaml @@ -0,0 +1 @@ +type: unit From f95c97667f6262e558dd1b0eb526519b42b23f7a Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Mon, 9 Dec 2019 20:56:19 +0300 Subject: [PATCH 13/39] test.py: support arbitrary number of test suites Scan entire test/ for folders that contain suite.yaml, and load tests from these folders. Skip the rest. Each folder with a suite.yaml is expected to have a valid suite configuration in the yaml file. A suite is a folder with test of the same type. E.g. it can be a folder with unit tests, boost tests, or CQL tests. The harness will use suite.yaml to create an appropriate suite test driver, to execute tests in different formats. --- test.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/test.py b/test.py index 80a8fe4ffc..5f5dbbf6bf 100755 --- a/test.py +++ b/test.py @@ -32,6 +32,7 @@ import signal import subprocess import sys import xml.etree.ElementTree as ET +import yaml # Apply custom options to these tests custom_test_args = { @@ -279,7 +280,14 @@ def find_tests(options): tests_to_run = [] - def add_test_list(kind, mode): + def load_cfg(path): + with open(os.path.join(path, "suite.yaml"), "r") as cfg: + return yaml.safe_load(cfg.read()) + + + def add_test_list(path, mode): + cfg = load_cfg(path) + kind = cfg["type"] lst = glob.glob(os.path.join("test", kind, "*_test.cc")) for t in lst: t = os.path.join(kind, os.path.splitext(os.path.basename(t))[0]) @@ -294,9 +302,10 @@ def find_tests(options): if p in t: tests_to_run.append((t, a, kind, mode)) - for mode in options.modes: - add_test_list('unit', mode) - add_test_list('boost', mode) + for f in glob.glob(os.path.join("test", "*")): + if os.path.isdir(f) and os.path.isfile(os.path.join(f, "suite.yaml")): + for mode in options.modes: + add_test_list(f, mode) if not tests_to_run: print("Test {} not found".format(options.name)) From eed3201ca6c8574e475478a1f1a8716a64f3dd63 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 14:24:02 +0300 Subject: [PATCH 14/39] test.py: use path, rather than test kind, for search pattern Going forward there may be multiple suites of the same kind. --- test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.py b/test.py index 5f5dbbf6bf..c735d2bc42 100755 --- a/test.py +++ b/test.py @@ -288,7 +288,7 @@ def find_tests(options): def add_test_list(path, mode): cfg = load_cfg(path) kind = cfg["type"] - lst = glob.glob(os.path.join("test", kind, "*_test.cc")) + lst = glob.glob(os.path.join(path, "*_test.cc")) for t in lst: t = os.path.join(kind, os.path.splitext(os.path.basename(t))[0]) if mode not in ["release", "dev"] and t in long_tests: From 44e1c4267cfb9cb465e0f0a6cf275b40c2719ca0 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 15:02:02 +0300 Subject: [PATCH 15/39] test.py: introduce test suites - UnitTestSuite - for test/unit tests - BoostTestSuite - a tweak on UnitTestSuite, with options to log xml test output to a dedicated file --- test.py | 69 +++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 57 insertions(+), 12 deletions(-) diff --git a/test.py b/test.py index c735d2bc42..daf533c409 100755 --- a/test.py +++ b/test.py @@ -20,6 +20,7 @@ # You should have received a copy of the GNU General Public License # along with Scylla. If not, see . # +from abc import ABC, abstractmethod import argparse import asyncio import glob @@ -75,11 +76,60 @@ def status_to_string(success): return status +class TestSuite(ABC): + """A test suite is a folder with tests of the same type. + E.g. it can be unit tests, boost tests, or CQL tests.""" + + # All existing test suites, one suite per path. + suites = dict() + + def __init__(self, path, cfg): + self.path = path + self.name = os.path.basename(self.path) + self.cfg = cfg + self.tests = [] + + @staticmethod + def load_cfg(path): + with open(os.path.join(path, "suite.yaml"), "r") as cfg_file: + cfg = yaml.safe_load(cfg_file.read()) + if not isinstance(cfg, dict): + raise RuntimeError("Failed to load tests in {}: suite.yaml is empty".format(path)) + return cfg + + @staticmethod + def opt_create(path): + """Return a subclass of TestSuite with name cfg["type"].title + TestSuite. + Ensures there is only one suite instance per path.""" + suite = TestSuite.suites.get(path) + if not suite: + cfg = TestSuite.load_cfg(path) + kind = cfg.get("type") + if kind is None: + raise RuntimeError("Failed to load tests in {}: suite.yaml has no suite type".format(path)) + SpecificTestSuite = globals().get(kind.title() + "TestSuite") + if not SpecificTestSuite: + raise RuntimeError("Failed to load tests in {}: suite type '{}' not found".format(path, kind)) + suite = SpecificTestSuite(path, cfg) + TestSuite.suites[path] = suite + return suite + + +class UnitTestSuite(TestSuite): + """TestSuite instantiation for non-boost unit tests""" + pass + + +class BoostTestSuite(UnitTestSuite): + """TestSuite for boost unit tests""" + pass + + class UnitTest: standard_args = '--overprovisioned --unsafe-bypass-fsync 1 --blocked-reactor-notify-ms 2000000 --collectd 0'.split() seastar_args = '-c2 -m2G' - def __init__(self, test_no, name, opts, kind, mode, options): + def __init__(self, test_no, name, opts, suite, mode, options): if opts is None: opts = UnitTest.seastar_args self.id = test_no @@ -88,7 +138,7 @@ class UnitTest: # Name within the suite self.shortname = os.path.basename(name) self.mode = mode - self.kind = kind + self.suite = suite self.path = os.path.join("build", self.mode, "test", self.name) self.args = opts.split() + UnitTest.standard_args # Unique file name, which is also readable by human, as filename prefix @@ -96,7 +146,7 @@ class UnitTest: self.log_filename = os.path.join(options.tmpdir, self.uname + ".log") self.success = None - if self.kind == 'boost': + if isinstance(suite, BoostTestSuite): boost_args = [] xmlout = os.path.join(options.jenkins, self.uname + ".boost.xml") boost_args += ['--report_level=no', '--logger=HRF,test_suite:XML,test_suite,' + xmlout] @@ -280,14 +330,9 @@ def find_tests(options): tests_to_run = [] - def load_cfg(path): - with open(os.path.join(path, "suite.yaml"), "r") as cfg: - return yaml.safe_load(cfg.read()) - - def add_test_list(path, mode): - cfg = load_cfg(path) - kind = cfg["type"] + suite = TestSuite.opt_create(path) + kind = suite.cfg["type"] lst = glob.glob(os.path.join(path, "*_test.cc")) for t in lst: t = os.path.join(kind, os.path.splitext(os.path.basename(t))[0]) @@ -300,7 +345,7 @@ def find_tests(options): patterns = options.name if options.name else [t] for p in patterns: if p in t: - tests_to_run.append((t, a, kind, mode)) + tests_to_run.append((t, a, suite, mode)) for f in glob.glob(os.path.join("test", "*")): if os.path.isdir(f) and os.path.isfile(os.path.join(f, "suite.yaml")): @@ -387,7 +432,7 @@ def print_summary(tests, failed_tests): def write_xunit_report(tests, options): - unit_tests = [t for t in tests if t.kind == "unit"] + unit_tests = [t for t in tests if isinstance(t.suite, UnitTestSuite)] num_unit_failed = sum(1 for t in unit_tests if not t.success) xml_results = ET.Element('testsuite', name='non-boost tests', From 34a1b49fc3221135f676e6a973860b67b6b56443 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 15:29:56 +0300 Subject: [PATCH 16/39] test.py: move add_test_list() to TestSuite --- test.py | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/test.py b/test.py index daf533c409..d2fe149654 100755 --- a/test.py +++ b/test.py @@ -114,6 +114,20 @@ class TestSuite(ABC): TestSuite.suites[path] = suite return suite + def add_test_list(self, mode, options, tests_to_run): + lst = glob.glob(os.path.join(self.path, "*_test.cc")) + for t in lst: + t = os.path.join(self.name, os.path.splitext(os.path.basename(t))[0]) + if mode not in ["release", "dev"] and t in long_tests: + continue + args = custom_test_args.get(t) + if isinstance(args, (str, type(None))): + args = [args] + patterns = options.name if options.name else [t] + for a in args: + for p in patterns: + if p in t: + tests_to_run.append((t, a, self, mode)) class UnitTestSuite(TestSuite): """TestSuite instantiation for non-boost unit tests""" @@ -330,27 +344,11 @@ def find_tests(options): tests_to_run = [] - def add_test_list(path, mode): - suite = TestSuite.opt_create(path) - kind = suite.cfg["type"] - lst = glob.glob(os.path.join(path, "*_test.cc")) - for t in lst: - t = os.path.join(kind, os.path.splitext(os.path.basename(t))[0]) - if mode not in ["release", "dev"] and t in long_tests: - continue - args = custom_test_args.get(t) - if isinstance(args, (str, type(None))): - args = [args] - for a in args: - patterns = options.name if options.name else [t] - for p in patterns: - if p in t: - tests_to_run.append((t, a, suite, mode)) - for f in glob.glob(os.path.join("test", "*")): if os.path.isdir(f) and os.path.isfile(os.path.join(f, "suite.yaml")): for mode in options.modes: - add_test_list(f, mode) + suite = TestSuite.opt_create(f) + suite.add_test_list(mode, options, tests_to_run) if not tests_to_run: print("Test {} not found".format(options.name)) From b5b49441118e5f4a4db40b8a2fdedc01b08ac7e0 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 15:34:25 +0300 Subject: [PATCH 17/39] test.py: move repeat handling to TestSuite This way we can avoid iterating over all tests to handle --repeat. Besides, going forward the tests will be stored in two places: in the global list of all tests, for the runner, and per suite, for suite-based reporting, so it's easier if TestSuite if fully responsible for finding and adding tests. --- test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test.py b/test.py index d2fe149654..2201db9ade 100755 --- a/test.py +++ b/test.py @@ -127,7 +127,8 @@ class TestSuite(ABC): for a in args: for p in patterns: if p in t: - tests_to_run.append((t, a, self, mode)) + for i in range(options.repeat): + tests_to_run.append((t, a, self, mode)) class UnitTestSuite(TestSuite): """TestSuite instantiation for non-boost unit tests""" @@ -357,7 +358,6 @@ def find_tests(options): logging.info("Found %d tests, repeat count is %d, starting %d concurrent jobs", len(tests_to_run), options.repeat, options.jobs) - tests_to_run = [t for t in tests_to_run for _ in range(options.repeat)] tests_to_run = [UnitTest(test_no, *t, options) for test_no, t in enumerate(tests_to_run)] return tests_to_run From 32ffde91ba2ee5f5ddde97fac683993b114d8080 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 15:56:51 +0300 Subject: [PATCH 18/39] test.py: move test id assignment to TestSuite Going forward finding and creating tests will be a responsibility of TestSuite, so the id generator needs to be shared. --- test.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/test.py b/test.py index 2201db9ade..91f003d977 100755 --- a/test.py +++ b/test.py @@ -82,6 +82,7 @@ class TestSuite(ABC): # All existing test suites, one suite per path. suites = dict() + _next_id = 0 def __init__(self, path, cfg): self.path = path @@ -89,6 +90,11 @@ class TestSuite(ABC): self.cfg = cfg self.tests = [] + @property + def next_id(self): + TestSuite._next_id += 1 + return TestSuite._next_id + @staticmethod def load_cfg(path): with open(os.path.join(path, "suite.yaml"), "r") as cfg_file: @@ -128,7 +134,9 @@ class TestSuite(ABC): for p in patterns: if p in t: for i in range(options.repeat): - tests_to_run.append((t, a, self, mode)) + test = UnitTest(self.next_id, t, a, self, mode, options) + tests_to_run.append(test) + class UnitTestSuite(TestSuite): """TestSuite instantiation for non-boost unit tests""" @@ -358,8 +366,6 @@ def find_tests(options): logging.info("Found %d tests, repeat count is %d, starting %d concurrent jobs", len(tests_to_run), options.repeat, options.jobs) - tests_to_run = [UnitTest(test_no, *t, options) for test_no, t in enumerate(tests_to_run)] - return tests_to_run From 7e10bebcda1f28e83573bbd6e4fb1e8888381b75 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 16:51:31 +0300 Subject: [PATCH 19/39] test.py: move long test list to suite.yaml Use suite.yaml for long tests --- test.py | 14 ++++---------- test/unit/suite.yaml | 7 +++++++ 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/test.py b/test.py index 91f003d977..d9ce059f2a 100755 --- a/test.py +++ b/test.py @@ -51,14 +51,6 @@ custom_test_args = { 'unit/row_cache_stress_test': '-c1 -m1G --seconds 10', } -# Only run in dev, release configurations, skip in others -long_tests = set([ - 'unit/lsa_async_eviction_test', - 'unit/lsa_sync_eviction_test', - 'unit/row_cache_alloc_stress_test', - 'unit/row_cache_stress_test' -]) - CONCOLORS = {'green': '\033[1;32m', 'red': '\033[1;31m', 'nocolor': '\033[0m'} def colorformat(msg, **kwargs): @@ -122,10 +114,12 @@ class TestSuite(ABC): def add_test_list(self, mode, options, tests_to_run): lst = glob.glob(os.path.join(self.path, "*_test.cc")) + long_tests = set(self.cfg.get("long", [])) for t in lst: - t = os.path.join(self.name, os.path.splitext(os.path.basename(t))[0]) - if mode not in ["release", "dev"] and t in long_tests: + shortname = os.path.splitext(os.path.basename(t))[0] + if mode not in ["release", "dev"] and shortname in long_tests: continue + t = os.path.join(self.name, shortname) args = custom_test_args.get(t) if isinstance(args, (str, type(None))): args = [args] diff --git a/test/unit/suite.yaml b/test/unit/suite.yaml index b566c7ecf1..34c4a2576f 100644 --- a/test/unit/suite.yaml +++ b/test/unit/suite.yaml @@ -1 +1,8 @@ +# Suite test type. Supported types: unit, boost, cql type: unit +# A list of long tests, these are only run in dev and release modes +long: + - lsa_async_eviction_test + - lsa_sync_eviction_test + - row_cache_alloc_stress_test + - row_cache_stress_test From 4a20617be350cee1527fc51997e2969a24f0c626 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 17:38:30 +0300 Subject: [PATCH 20/39] test.py: introduce add_test(), which is suite-specific --- test.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/test.py b/test.py index d9ce059f2a..be66060182 100755 --- a/test.py +++ b/test.py @@ -112,6 +112,10 @@ class TestSuite(ABC): TestSuite.suites[path] = suite return suite + @abstractmethod + def add_test(self, name, args, mode, options, tests_to_run): + pass + def add_test_list(self, mode, options, tests_to_run): lst = glob.glob(os.path.join(self.path, "*_test.cc")) long_tests = set(self.cfg.get("long", [])) @@ -128,13 +132,17 @@ class TestSuite(ABC): for p in patterns: if p in t: for i in range(options.repeat): - test = UnitTest(self.next_id, t, a, self, mode, options) - tests_to_run.append(test) + self.add_test(t, a, mode, options, tests_to_run) class UnitTestSuite(TestSuite): """TestSuite instantiation for non-boost unit tests""" - pass + + def add_test(self, name, args, mode, options, tests_to_run): + """Create a UnitTest class with possibly custom command line options + and add it to the list of tests""" + test = UnitTest(self.next_id, name, args, self, mode, options) + tests_to_run.append(test) class BoostTestSuite(UnitTestSuite): From ef6cebcbd2f580c40941640c4851abeb0032a121 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 17:44:17 +0300 Subject: [PATCH 21/39] test.py: move command line argument processing to UnitTestSuite --- test.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/test.py b/test.py index be66060182..cefba865fc 100755 --- a/test.py +++ b/test.py @@ -124,25 +124,26 @@ class TestSuite(ABC): if mode not in ["release", "dev"] and shortname in long_tests: continue t = os.path.join(self.name, shortname) - args = custom_test_args.get(t) - if isinstance(args, (str, type(None))): - args = [args] patterns = options.name if options.name else [t] - for a in args: - for p in patterns: - if p in t: - for i in range(options.repeat): - self.add_test(t, a, mode, options, tests_to_run) + for p in patterns: + if p in t: + for i in range(options.repeat): + self.add_test(t, mode, options, tests_to_run) + class UnitTestSuite(TestSuite): """TestSuite instantiation for non-boost unit tests""" - def add_test(self, name, args, mode, options, tests_to_run): + def add_test(self, name, mode, options, tests_to_run): """Create a UnitTest class with possibly custom command line options and add it to the list of tests""" - test = UnitTest(self.next_id, name, args, self, mode, options) - tests_to_run.append(test) + args = custom_test_args.get(name) + if isinstance(args, (str, type(None))): + args = [args] + for a in args: + test = UnitTest(self.next_id, name, a, self, mode, options) + tests_to_run.append(test) class BoostTestSuite(UnitTestSuite): From d3126f08ed27fc40c23729685e7a2af3d2584d16 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 18:01:54 +0300 Subject: [PATCH 22/39] test.py: move custom unit test command line arguments to suite.yaml Load the command line arguments, if any, from suite.yaml, rather than keep them hard-coded in test.py. This is allows operations team to have easier access to these. Note I had to sacrifice dynamic smp count for mutation_reader_test (the new smp count is fixed at 3) since this is part of test configuration now. --- test.py | 38 +++++++++++--------------------------- test/boost/suite.yaml | 10 ++++++++++ test/unit/suite.yaml | 12 ++++++++++++ 3 files changed, 33 insertions(+), 27 deletions(-) diff --git a/test.py b/test.py index cefba865fc..d49214383b 100755 --- a/test.py +++ b/test.py @@ -35,22 +35,6 @@ import sys import xml.etree.ElementTree as ET import yaml -# Apply custom options to these tests -custom_test_args = { - 'boost/mutation_reader_test': '-c{} -m2G'.format(min(os.cpu_count(), 3)), - 'boost/sstable_test': '-c1 -m2G', - 'boost/sstable_datafile_test': '-c1 -m2G', - 'boost/sstable_3_x_test': '-c1 -m2G', - 'unit/lsa_async_eviction_test': '-c1 -m200M --size 1024 --batch 3000 --count 2000000', - 'unit/lsa_sync_eviction_test': [ - '-c1 -m100M --count 10 --standard-object-size 3000000', - '-c1 -m100M --count 24000 --standard-object-size 2048', - '-c1 -m1G --count 4000000 --standard-object-size 128' - ], - 'unit/row_cache_alloc_stress_test': '-c1 -m2G', - 'unit/row_cache_stress_test': '-c1 -m1G --seconds 10', -} - CONCOLORS = {'green': '\033[1;32m', 'red': '\033[1;31m', 'nocolor': '\033[0m'} def colorformat(msg, **kwargs): @@ -81,6 +65,8 @@ class TestSuite(ABC): self.name = os.path.basename(self.path) self.cfg = cfg self.tests = [] + # Map of custom test command line arguments, if configured + self.custom_args = cfg.get("custom_args", {}) @property def next_id(self): @@ -128,21 +114,19 @@ class TestSuite(ABC): for p in patterns: if p in t: for i in range(options.repeat): - self.add_test(t, mode, options, tests_to_run) + self.add_test(shortname, mode, options, tests_to_run) class UnitTestSuite(TestSuite): """TestSuite instantiation for non-boost unit tests""" - def add_test(self, name, mode, options, tests_to_run): - """Create a UnitTest class with possibly custom command line options - and add it to the list of tests""" - args = custom_test_args.get(name) - if isinstance(args, (str, type(None))): - args = [args] + def add_test(self, shortname, mode, options, tests_to_run): + """Create a UnitTest class with possibly custom command line + arguments and add it to the list of tests""" + args = self.custom_args.get(shortname, [None]) for a in args: - test = UnitTest(self.next_id, name, a, self, mode, options) + test = UnitTest(self.next_id, shortname, a, self, mode, options) tests_to_run.append(test) @@ -155,14 +139,14 @@ class UnitTest: standard_args = '--overprovisioned --unsafe-bypass-fsync 1 --blocked-reactor-notify-ms 2000000 --collectd 0'.split() seastar_args = '-c2 -m2G' - def __init__(self, test_no, name, opts, suite, mode, options): + def __init__(self, test_no, shortname, opts, suite, mode, options): if opts is None: opts = UnitTest.seastar_args self.id = test_no # Name with test suite name - self.name = name + self.name = os.path.join(suite.name, shortname) # Name within the suite - self.shortname = os.path.basename(name) + self.shortname = shortname self.mode = mode self.suite = suite self.path = os.path.join("build", self.mode, "test", self.name) diff --git a/test/boost/suite.yaml b/test/boost/suite.yaml index 76292b1fb7..94bf4822c4 100644 --- a/test/boost/suite.yaml +++ b/test/boost/suite.yaml @@ -1 +1,11 @@ type: boost +# Custom command line arguments for some of the tests +custom_args: + mutation_reader_test: + - '-c3 -m2G' + sstable_test: + - '-c1 -m2G' + sstable_datafile_test: + - '-c1 -m2G' + sstable_3_x_test: + - '-c1 -m2G' diff --git a/test/unit/suite.yaml b/test/unit/suite.yaml index 34c4a2576f..885f15c51b 100644 --- a/test/unit/suite.yaml +++ b/test/unit/suite.yaml @@ -6,3 +6,15 @@ long: - lsa_sync_eviction_test - row_cache_alloc_stress_test - row_cache_stress_test +# Custom command line arguments for some of the tests +custom_args: + lsa_async_eviction_test: + - '-c1 -m200M --size 1024 --batch 3000 --count 2000000' + lsa_sync_eviction_test: + - '-c1 -m100M --count 10 --standard-object-size 3000000' + - '-c1 -m100M --count 24000 --standard-object-size 2048' + - '-c1 -m1G --count 4000000 --standard-object-size 128' + row_cache_alloc_stress_test: + - '-c1 -m2G' + row_cache_stress_test: + - '-c1 -m1G --seconds 10' From fd6897d53ea6243af0f52fc47de2f4546aed28d0 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 18:31:08 +0300 Subject: [PATCH 23/39] test.py: move the default arguments handling to UnitTestSuite Move UnitTeset default seastar argument handling to UnitTestSuite (cleanup). --- test.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/test.py b/test.py index d49214383b..2a811c1aa9 100755 --- a/test.py +++ b/test.py @@ -124,7 +124,10 @@ class UnitTestSuite(TestSuite): def add_test(self, shortname, mode, options, tests_to_run): """Create a UnitTest class with possibly custom command line arguments and add it to the list of tests""" - args = self.custom_args.get(shortname, [None]) + + # Default seastar arguments, if not provided in custom test options, + # are two cores and 2G of RAM + args = self.custom_args.get(shortname, ["-c2 -m2G"]) for a in args: test = UnitTest(self.next_id, shortname, a, self, mode, options) tests_to_run.append(test) @@ -137,11 +140,7 @@ class BoostTestSuite(UnitTestSuite): class UnitTest: standard_args = '--overprovisioned --unsafe-bypass-fsync 1 --blocked-reactor-notify-ms 2000000 --collectd 0'.split() - seastar_args = '-c2 -m2G' - def __init__(self, test_no, shortname, opts, suite, mode, options): - if opts is None: - opts = UnitTest.seastar_args self.id = test_no # Name with test suite name self.name = os.path.join(suite.name, shortname) From c171882b512cf4337e7571159083b36d61628577 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Fri, 13 Dec 2019 22:51:12 +0300 Subject: [PATCH 24/39] test.py: introduce base class Test for CQL and Unit tests --- test.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/test.py b/test.py index 2a811c1aa9..f7fa31b9b2 100755 --- a/test.py +++ b/test.py @@ -29,6 +29,7 @@ import logging import multiprocessing import os import pathlib +import shlex import signal import subprocess import sys @@ -138,9 +139,9 @@ class BoostTestSuite(UnitTestSuite): pass -class UnitTest: - standard_args = '--overprovisioned --unsafe-bypass-fsync 1 --blocked-reactor-notify-ms 2000000 --collectd 0'.split() - def __init__(self, test_no, shortname, opts, suite, mode, options): +class Test: + """Base class for CQL, Unit and Boost tests""" + def __init__(self, test_no, shortname, suite, mode, options): self.id = test_no # Name with test suite name self.name = os.path.join(suite.name, shortname) @@ -148,13 +149,20 @@ class UnitTest: self.shortname = shortname self.mode = mode self.suite = suite - self.path = os.path.join("build", self.mode, "test", self.name) - self.args = opts.split() + UnitTest.standard_args # Unique file name, which is also readable by human, as filename prefix self.uname = "{}.{}.{}".format(self.mode, self.shortname, self.id) self.log_filename = os.path.join(options.tmpdir, self.uname + ".log") self.success = None + +class UnitTest(Test): + standard_args = shlex.split("--overprovisioned --unsafe-bypass-fsync 1 --blocked-reactor-notify-ms 2000000 --collectd 0") + + def __init__(self, test_no, shortname, args, suite, mode, options): + super().__init__(test_no, shortname, suite, mode, options) + self.path = os.path.join("build", self.mode, "test", self.name) + self.args = shlex.split(args) + UnitTest.standard_args + if isinstance(suite, BoostTestSuite): boost_args = [] xmlout = os.path.join(options.jenkins, self.uname + ".boost.xml") From 21fbe5fa81cd4c25bac1d70e04c43b37ec3f6a38 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 19:13:08 +0300 Subject: [PATCH 25/39] test.py: tidy up print_summary() Now that we have tabular output, make print_summary() more concise. --- test.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/test.py b/test.py index f7fa31b9b2..6db5018ff3 100755 --- a/test.py +++ b/test.py @@ -419,14 +419,12 @@ def read_log(log_filename): def print_summary(tests, failed_tests): if failed_tests: - print('\n\nOutput of the failed tests:') + print("The following test(s) have failed: {}".format( + " ".join([t.name for t in failed_tests]))) for test in failed_tests: - print("Test {} {} failed:".format(test.path, " ".join(test.args))) + print("Output of {} {}:".format(test.path, " ".join(test.args))) print(read_log(test.log_filename)) - print('\n\nThe following test(s) have failed:') - for test in failed_tests: - print(' {} {}'.format(test.path, ' '.join(test.args))) - print('\nSummary: {} of the total {} tests failed'.format(len(failed_tests), len(tests))) + print("Summary: {} of the total {} tests failed".format(len(failed_tests), len(tests))) def write_xunit_report(tests, options): From 18aafacfad50e6aa722178b13c362929e24962fe Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 19:14:39 +0300 Subject: [PATCH 26/39] test.py: ensure print_summary() is agnostic of test type Introduce a virtual Test.print_summary() to print a failed test summary. --- test.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/test.py b/test.py index 6db5018ff3..2c60064b71 100755 --- a/test.py +++ b/test.py @@ -154,6 +154,10 @@ class Test: self.log_filename = os.path.join(options.tmpdir, self.uname + ".log") self.success = None + @abstractmethod + def print_summary(self): + pass + class UnitTest(Test): standard_args = shlex.split("--overprovisioned --unsafe-bypass-fsync 1 --blocked-reactor-notify-ms 2000000 --collectd 0") @@ -170,6 +174,10 @@ class UnitTest(Test): boost_args += ['--'] self.args = boost_args + self.args + def print_summary(self): + print("Output of {} {}:".format(self.path, " ".join(self.args))) + print(read_log(self.log_filename)) + def print_start_blurb(): print("="*80) @@ -422,8 +430,7 @@ def print_summary(tests, failed_tests): print("The following test(s) have failed: {}".format( " ".join([t.name for t in failed_tests]))) for test in failed_tests: - print("Output of {} {}:".format(test.path, " ".join(test.args))) - print(read_log(test.log_filename)) + test.print_summary() print("Summary: {} of the total {} tests failed".format(len(failed_tests), len(tests))) From abcc182ab317de930209dec817c69c5340e7c3d5 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 20:29:34 +0300 Subject: [PATCH 27/39] test.py: virtualize write_xunit_report() Make sure any non-boost test can participate in the report. --- test.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/test.py b/test.py index 2c60064b71..45434e050d 100755 --- a/test.py +++ b/test.py @@ -132,6 +132,7 @@ class UnitTestSuite(TestSuite): for a in args: test = UnitTest(self.next_id, shortname, a, self, mode, options) tests_to_run.append(test) + self.tests.append(test) class BoostTestSuite(UnitTestSuite): @@ -434,19 +435,24 @@ def print_summary(tests, failed_tests): print("Summary: {} of the total {} tests failed".format(len(failed_tests), len(tests))) -def write_xunit_report(tests, options): - unit_tests = [t for t in tests if isinstance(t.suite, UnitTestSuite)] - num_unit_failed = sum(1 for t in unit_tests if not t.success) - - xml_results = ET.Element('testsuite', name='non-boost tests', - tests=str(len(unit_tests)), failures=str(num_unit_failed), errors='0') - - for test in unit_tests: - xml_res = ET.SubElement(xml_results, 'testcase', name=test.path) - if not test.success: +def write_xunit_report(options): + total = 0 + failed = 0 + xml_results = ET.Element("testsuite", name="non-boost tests", errors="0") + for suite in TestSuite.suites.values(): + if isinstance(suite, BoostTestSuite): + continue + for test in suite.tests: + total += 1 + xml_res = ET.SubElement(xml_results, 'testcase', name=test.uname) + if test.success is True: + continue + failed += 1 xml_fail = ET.SubElement(xml_res, 'failure') - xml_fail.text = "Test {} {} failed:".format(test.path, " ".join(test.args)) + xml_fail.text = "Test {} {} failed:\n".format(test.path, " ".join(test.args)) xml_fail.text += read_log(test.log_filename) + xml_results.set("tests", str(total)) + xml_results.set("failures", str(failed)) with open(options.xunit, "w") as f: ET.ElementTree(xml_results).write(f, encoding="unicode") @@ -483,7 +489,7 @@ async def main(): print_summary(tests, failed_tests) - write_xunit_report(tests, options) + write_xunit_report(options) return 0 if not failed_tests else -1 From d05f6c3cc7f1a0b9a31a0460b6cedb8d71297737 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 20:47:21 +0300 Subject: [PATCH 28/39] test.py: virtualize test search pattern per TestSuite CQL tests have .cql extension, while unit tests have .cc. --- test.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/test.py b/test.py index 45434e050d..15031502ec 100755 --- a/test.py +++ b/test.py @@ -99,12 +99,17 @@ class TestSuite(ABC): TestSuite.suites[path] = suite return suite + @property + @abstractmethod + def pattern(self): + pass + @abstractmethod def add_test(self, name, args, mode, options, tests_to_run): pass def add_test_list(self, mode, options, tests_to_run): - lst = glob.glob(os.path.join(self.path, "*_test.cc")) + lst = glob.glob(os.path.join(self.path, self.pattern)) long_tests = set(self.cfg.get("long", [])) for t in lst: shortname = os.path.splitext(os.path.basename(t))[0] @@ -134,6 +139,10 @@ class UnitTestSuite(TestSuite): tests_to_run.append(test) self.tests.append(test) + @property + def pattern(self): + return "*_test.cc" + class BoostTestSuite(UnitTestSuite): """TestSuite for boost unit tests""" From 169128f80beffd16f933891967fb3c4647b8b97c Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 21:22:36 +0300 Subject: [PATCH 29/39] test.py: virtualize Test.run(), to introduce CqlTest.Run next --- test.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test.py b/test.py index 15031502ec..58953712da 100755 --- a/test.py +++ b/test.py @@ -188,6 +188,10 @@ class UnitTest(Test): print("Output of {} {}:".format(self.path, " ".join(self.args))) print(read_log(self.log_filename)) + async def run(self, options): + await run_test(self, options) + return self + def print_start_blurb(): print("="*80) @@ -263,7 +267,7 @@ async def run_test(test, options): print(' with error {e}\n'.format(e=e), file=file) report_error(e) logging.info("Test #%d %s", test.id, "passed" if test.success else "failed") - return test + def setup_signal_handlers(loop, signaled): @@ -410,7 +414,7 @@ async def run_all_tests(tests_to_run, signaled, options): # Wait for some task to finish done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) await reap(done, pending, signaled) - pending.add(asyncio.create_task(run_test(test, options))) + pending.add(asyncio.create_task(test.run(options))) # Wait & reap ALL tasks but signaled_task # Do not use asyncio.ALL_COMPLETED to print a nice progress report while len(pending) > 1: From 4095ab08c8ec4bad2c49fe2dcf52800eafbe97a3 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Sat, 14 Dec 2019 00:52:40 +0300 Subject: [PATCH 30/39] test.py: remove tests_to_run Avoid storing each test twice, use per-tests list to construct a global iterable. --- test.py | 49 +++++++++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/test.py b/test.py index 58953712da..4eb7b414e6 100755 --- a/test.py +++ b/test.py @@ -25,6 +25,7 @@ import argparse import asyncio import glob import io +import itertools import logging import multiprocessing import os @@ -74,6 +75,10 @@ class TestSuite(ABC): TestSuite._next_id += 1 return TestSuite._next_id + @staticmethod + def test_count(): + return TestSuite._next_id + @staticmethod def load_cfg(path): with open(os.path.join(path, "suite.yaml"), "r") as cfg_file: @@ -99,16 +104,21 @@ class TestSuite(ABC): TestSuite.suites[path] = suite return suite + @staticmethod + def tests(): + return itertools.chain(*[suite.tests for suite in + TestSuite.suites.values()]) + @property @abstractmethod def pattern(self): pass @abstractmethod - def add_test(self, name, args, mode, options, tests_to_run): + def add_test(self, name, args, mode, options): pass - def add_test_list(self, mode, options, tests_to_run): + def add_test_list(self, mode, options): lst = glob.glob(os.path.join(self.path, self.pattern)) long_tests = set(self.cfg.get("long", [])) for t in lst: @@ -120,14 +130,13 @@ class TestSuite(ABC): for p in patterns: if p in t: for i in range(options.repeat): - self.add_test(shortname, mode, options, tests_to_run) - + self.add_test(shortname, mode, options) class UnitTestSuite(TestSuite): """TestSuite instantiation for non-boost unit tests""" - def add_test(self, shortname, mode, options, tests_to_run): + def add_test(self, shortname, mode, options): """Create a UnitTest class with possibly custom command line arguments and add it to the list of tests""" @@ -136,7 +145,6 @@ class UnitTestSuite(TestSuite): args = self.custom_args.get(shortname, ["-c2 -m2G"]) for a in args: test = UnitTest(self.next_id, shortname, a, self, mode, options) - tests_to_run.append(test) self.tests.append(test) @property @@ -367,26 +375,22 @@ def parse_cmd_line(): def find_tests(options): - tests_to_run = [] - for f in glob.glob(os.path.join("test", "*")): if os.path.isdir(f) and os.path.isfile(os.path.join(f, "suite.yaml")): for mode in options.modes: suite = TestSuite.opt_create(f) - suite.add_test_list(mode, options, tests_to_run) + suite.add_test_list(mode, options) - if not tests_to_run: + if not TestSuite.test_count(): print("Test {} not found".format(options.name)) sys.exit(1) logging.info("Found %d tests, repeat count is %d, starting %d concurrent jobs", - len(tests_to_run), options.repeat, options.jobs) - - return tests_to_run + TestSuite.test_count(), options.repeat, options.jobs) -async def run_all_tests(tests_to_run, signaled, options): - cookie = len(tests_to_run) +async def run_all_tests(signaled, options): + cookie = TestSuite.test_count() signaled_task = asyncio.create_task(signaled.wait()) pending = set([signaled_task]) @@ -408,7 +412,7 @@ async def run_all_tests(tests_to_run, signaled, options): cookie = print_progress(result, cookie, options.verbose) print_start_blurb() try: - for test in tests_to_run: + for test in TestSuite.tests(): # +1 for 'signaled' event if len(pending) > options.jobs: # Wait for some task to finish @@ -439,13 +443,14 @@ def read_log(log_filename): return "===Error reading log {}===".format(e) -def print_summary(tests, failed_tests): +def print_summary(failed_tests): if failed_tests: print("The following test(s) have failed: {}".format( " ".join([t.name for t in failed_tests]))) for test in failed_tests: test.print_summary() - print("Summary: {} of the total {} tests failed".format(len(failed_tests), len(tests))) + print("Summary: {} of the total {} tests failed".format( + len(failed_tests), TestSuite.test_count())) def write_xunit_report(options): @@ -488,19 +493,19 @@ async def main(): open_log(options.tmpdir) - tests = find_tests(options) + find_tests(options) signaled = asyncio.Event() setup_signal_handlers(asyncio.get_event_loop(), signaled) - await run_all_tests(tests, signaled, options) + await run_all_tests(signaled, options) if signaled.is_set(): return -signaled.signo - failed_tests = [t for t in tests if t.success is not True] + failed_tests = [t for t in TestSuite.tests() if t.success is not True] - print_summary(tests, failed_tests) + print_summary(failed_tests) write_xunit_report(options) From 0165413405a62ac38e00b501b7beebfc7d4a677e Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 18 Dec 2019 23:05:52 +0300 Subject: [PATCH 31/39] test.py: split test output per test mode Store test temporary files and logs in ${testdir}/${mode}. Remove --jenkins and --xunit, and always write XML files at a predefined location: ${testdir}/${mode}/xml/. Use .xunit.xml extension for tests which XML output is in xunit format, and junit.xml for an accumulated output of all non-boost tests in junit format. --- test.py | 57 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/test.py b/test.py index 4eb7b414e6..cce125ebfe 100755 --- a/test.py +++ b/test.py @@ -118,6 +118,10 @@ class TestSuite(ABC): def add_test(self, name, args, mode, options): pass + def junit_tests(self): + """Tests which participate in a consolidated junit report""" + return self.tests + def add_test_list(self, mode, options): lst = glob.glob(os.path.join(self.path, self.pattern)) long_tests = set(self.cfg.get("long", [])) @@ -154,7 +158,10 @@ class UnitTestSuite(TestSuite): class BoostTestSuite(UnitTestSuite): """TestSuite for boost unit tests""" - pass + + def junit_tests(self): + """Boost tests produce an own XML output, so are not included in a junit report""" + return [] class Test: @@ -168,8 +175,8 @@ class Test: self.mode = mode self.suite = suite # Unique file name, which is also readable by human, as filename prefix - self.uname = "{}.{}.{}".format(self.mode, self.shortname, self.id) - self.log_filename = os.path.join(options.tmpdir, self.uname + ".log") + self.uname = "{}.{}".format(self.shortname, self.id) + self.log_filename = os.path.join(options.tmpdir, self.mode, self.uname + ".log") self.success = None @abstractmethod @@ -187,7 +194,8 @@ class UnitTest(Test): if isinstance(suite, BoostTestSuite): boost_args = [] - xmlout = os.path.join(options.jenkins, self.uname + ".boost.xml") + xmlout = os.path.join(options.tmpdir, self.mode, "xml", + self.uname + ".xunit.xml") boost_args += ['--report_level=no', '--logger=HRF,test_suite:XML,test_suite,' + xmlout] boost_args += ['--'] self.args = boost_args + self.args @@ -327,21 +335,10 @@ def parse_cmd_line(): help="number of times to repeat test execution") parser.add_argument('--timeout', action="store", default="3000", type=int, help="timeout value for test execution") - parser.add_argument( - "--jenkins", - action="store", - help="""Jenkins output file prefix. Default: ${tmpdir}/xml""" - ) parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Verbose reporting') parser.add_argument('--jobs', '-j', action="store", default=default_num_jobs, type=int, help="Number of jobs to use for running the tests") - parser.add_argument( - "--xunit", - action="store", - help="""Name of a file to write results of non-boost tests to in - xunit format. Default: ${tmpdir}/xml/xunit.xml""" - ) args = parser.parse_args() if not sys.stdout.isatty(): @@ -362,13 +359,10 @@ def parse_cmd_line(): args.tmpdir = os.path.abspath(args.tmpdir) prepare_dir(args.tmpdir, "*.log") - if not args.jenkins or not args.xunit: - xmldir = os.path.join(args.tmpdir, "xml") - prepare_dir(xmldir, "*.xml") - if args.jenkins is None: - args.jenkins = xmldir - if args.xunit is None: - args.xunit = os.path.join(xmldir, "xunit.xml") + + for mode in args.modes: + prepare_dir(os.path.join(args.tmpdir, mode), "*.{log,reject}") + prepare_dir(os.path.join(args.tmpdir, mode, "xml"), "*.xml") return args @@ -453,25 +447,29 @@ def print_summary(failed_tests): len(failed_tests), TestSuite.test_count())) -def write_xunit_report(options): +def write_junit_report(tmpdir, mode): + junit_filename = os.path.join(tmpdir, mode, "xml", "junit.xml") total = 0 failed = 0 xml_results = ET.Element("testsuite", name="non-boost tests", errors="0") for suite in TestSuite.suites.values(): - if isinstance(suite, BoostTestSuite): - continue - for test in suite.tests: + for test in suite.junit_tests(): + if test.mode != mode: + continue total += 1 - xml_res = ET.SubElement(xml_results, 'testcase', name=test.uname) + xml_res = ET.SubElement(xml_results, 'testcase', + name="{}.{}.{}".format(test.shortname, mode, test.id)) if test.success is True: continue failed += 1 xml_fail = ET.SubElement(xml_res, 'failure') xml_fail.text = "Test {} {} failed:\n".format(test.path, " ".join(test.args)) xml_fail.text += read_log(test.log_filename) + if total == 0: + return xml_results.set("tests", str(total)) xml_results.set("failures", str(failed)) - with open(options.xunit, "w") as f: + with open(junit_filename, "w") as f: ET.ElementTree(xml_results).write(f, encoding="unicode") @@ -507,7 +505,8 @@ async def main(): print_summary(failed_tests) - write_xunit_report(options) + for mode in options.modes: + write_junit_report(options.tmpdir, mode) return 0 if not failed_tests else -1 From 0ec27267abdefa6e3d2048588c3c6d87520a2439 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Mon, 16 Dec 2019 21:11:41 +0300 Subject: [PATCH 32/39] test.py: remove custom colors and define a color palette Using a standard Python module improves readability, and allows using colors easily in other output. --- test.py | 42 +++++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/test.py b/test.py index cce125ebfe..9444d9774c 100755 --- a/test.py +++ b/test.py @@ -23,6 +23,7 @@ from abc import ABC, abstractmethod import argparse import asyncio +import colorama import glob import io import itertools @@ -37,21 +38,30 @@ import sys import xml.etree.ElementTree as ET import yaml -CONCOLORS = {'green': '\033[1;32m', 'red': '\033[1;31m', 'nocolor': '\033[0m'} -def colorformat(msg, **kwargs): - fmt = dict(CONCOLORS) - fmt.update(kwargs) - return msg.format(**fmt) +def create_formatter(*decorators): + """Return a function which decorates its argument with the given + color/style if stdout is a tty, and leaves intact otherwise.""" + def color(arg): + return "".join(decorators) + str(arg) + colorama.Style.RESET_ALL + + def nocolor(arg): + return str(arg) + return color if os.isatty(sys.stdout.fileno()) else nocolor -def status_to_string(success): - if success: - status = colorformat("{green}[ PASS ]{nocolor}") if os.isatty(sys.stdout.fileno()) else "[ PASS ]" - else: - status = colorformat("{red}[ FAIL ]{nocolor}") if os.isatty(sys.stdout.fileno()) else "[ FAIL ]" - - return status +class palette: + """Color palette for formatting terminal output""" + ok = create_formatter(colorama.Fore.GREEN, colorama.Style.BRIGHT) + fail = create_formatter(colorama.Fore.RED, colorama.Style.BRIGHT) + new = create_formatter(colorama.Fore.BLUE) + skip = create_formatter(colorama.Style.DIM) + path = create_formatter(colorama.Style.BRIGHT) + diff_in = create_formatter(colorama.Fore.GREEN) + diff_out = create_formatter(colorama.Fore.RED) + diff_mark = create_formatter(colorama.Fore.MAGENTA) + warn = create_formatter(colorama.Fore.YELLOW) + crit = create_formatter(colorama.Fore.RED, colorama.Style.BRIGHT) class TestSuite(ABC): @@ -229,7 +239,7 @@ def print_progress(test, cookie, verbose): msg = "{:9s} {:50s} {:^8s} {:8s}".format( "[{}/{}]".format(n, n_total), test.name, test.mode[:8], - status_to_string(test.success) + palette.ok("[ PASS ]") if test.success else palette.fail("[ FAIL ]") ) if verbose is False: print('\r' + ' ' * last_len, end='') @@ -376,7 +386,7 @@ def find_tests(options): suite.add_test_list(mode, options) if not TestSuite.test_count(): - print("Test {} not found".format(options.name)) + print("Test {} not found".format(palette.path(options.name[0]))) sys.exit(1) logging.info("Found %d tests, repeat count is %d, starting %d concurrent jobs", @@ -440,7 +450,7 @@ def read_log(log_filename): def print_summary(failed_tests): if failed_tests: print("The following test(s) have failed: {}".format( - " ".join([t.name for t in failed_tests]))) + palette.path(" ".join([t.name for t in failed_tests])))) for test in failed_tests: test.print_summary() print("Summary: {} of the total {} tests failed".format( @@ -511,6 +521,8 @@ async def main(): return 0 if not failed_tests else -1 if __name__ == "__main__": + colorama.init() + if sys.version_info < (3, 7): print("Python 3.7 or newer is required to run this program") sys.exit(-1) From b114bfe0bde27168f453a7416e5d800e339a6453 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Mon, 9 Dec 2019 21:26:38 +0300 Subject: [PATCH 33/39] test.py: initial import of CQL test driver, cql_repl cql_repl is a simple program which reads CQL from stdin, executes it, and writes results to stdout. It support --input, --output and --log options. --log is directed to cql_test.log by default. --input is stdin by default --output is stdout by default. The result set output is print with a basic JSON visitor. --- configure.py | 1 + test/tools/cql_repl.cc | 218 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 219 insertions(+) create mode 100644 test/tools/cql_repl.cc diff --git a/configure.py b/configure.py index 0b977af7b4..c5499c7ed4 100755 --- a/configure.py +++ b/configure.py @@ -383,6 +383,7 @@ scylla_tests = [ 'test/perf/perf_row_cache_update', 'test/perf/perf_simple_query', 'test/perf/perf_sstable', + 'test/tools/cql_repl', 'test/unit/lsa_async_eviction_test', 'test/unit/lsa_sync_eviction_test', 'test/unit/memory_footprint_test', diff --git a/test/tools/cql_repl.cc b/test/tools/cql_repl.cc new file mode 100644 index 0000000000..18c43451b0 --- /dev/null +++ b/test/tools/cql_repl.cc @@ -0,0 +1,218 @@ +/* + * Copyright (C) 2019 ScyllaDB + */ + +/* + * This file is part of Scylla. + * + * Scylla is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * Scylla is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Scylla. If not, see . + */ +#include +#include + +#include "test/lib/cql_test_env.hh" +#include "test/lib/cql_assertions.hh" + +#include +#include +#include +#include +#include "transport/messages/result_message.hh" +#include "types/user.hh" +#include "types/map.hh" +#include "types/list.hh" +#include "types/set.hh" +#include "db/config.hh" +#include "cql3/cql_config.hh" +#include "cql3/type_json.hh" +#include "test/lib/exception_utils.hh" + +static std::ofstream std_cout; + +// +// A helper class to serialize result set output to a formatted JSON +// +class json_visitor final : public cql_transport::messages::result_message::visitor { + Json::Value& _root; +public: + json_visitor(Json::Value& root) + : _root(root) + { + } + + virtual void visit(const cql_transport::messages::result_message::void_message&) override { + _root["status"] = "ok"; + } + + virtual void visit(const cql_transport::messages::result_message::set_keyspace& m) override { + _root["status"] = "ok"; + } + + virtual void visit(const cql_transport::messages::result_message::prepared::cql& m) override { + _root["status"] = "ok"; + } + + virtual void visit(const cql_transport::messages::result_message::prepared::thrift& m) override { + assert(false); + } + + virtual void visit(const cql_transport::messages::result_message::schema_change& m) override { + _root["status"] = "ok"; + } + + virtual void visit(const cql_transport::messages::result_message::bounce_to_shard& m) override { + assert(false); + } + + virtual void visit(const cql_transport::messages::result_message::rows& m) override { + Json::Value& output_rows = _root["rows"]; + const auto input_rows = m.rs().result_set().rows(); + const auto& meta = m.rs().result_set().get_metadata().get_names(); + for (auto&& in_row: input_rows) { + Json::Value out_row; + for (unsigned i = 0; i < meta.size(); ++i) { + const cql3::column_specification& col = *meta[i]; + const bytes_opt& cell = in_row[i]; + if (cell.has_value()) { + out_row[col.name->text()] = fmt::format("{}", to_json_string(*col.type, cell)); + } + } + output_rows.append(out_row); + } + } +}; + +// Prepare query_options with serial consistency +std::unique_ptr repl_options() { + const auto& so = cql3::query_options::specific_options::DEFAULT; + auto qo = std::make_unique( + db::consistency_level::ONE, + infinite_timeout_config, + std::vector{}, + // Ensure (optional) serial consistency is always specified. + cql3::query_options::specific_options{ + so.page_size, + so.state, + db::consistency_level::SERIAL, + so.timestamp, + } + ); + return qo; +} + +// Read-evaluate-print-loop for CQL +void repl(seastar::app_template& app) { + do_with_cql_env_thread([] (cql_test_env& e) { + + // Comments allowed by CQL - -- and // + const std::regex comment_re("^[[:space:]]*((--|//).*)?$"); + // A comment is not a delimiter even if ends with one + const std::regex delimiter_re("^(?![[:space:]]*(--|//)).*;[[:space:]]*$"); + + while (std::cin) { + std::string line; + std::ostringstream stmt; + if (!std::getline(std::cin, line)) { + break; + } + // Handle multiline input and comments + if (std::regex_match(line.begin(), line.end(), comment_re)) { + std_cout << line << std::endl; + continue; + } + stmt << line << std::endl; + while (!std::regex_match(line.begin(), line.end(), delimiter_re)) { + // Read the rest of input until delimiter or EOF + if (!std::getline(std::cin, line)) { + break; + } + stmt << line << std::endl; + } + // Print the statement + std_cout << stmt.str(); + Json::Value json; + try { + auto qo = repl_options(); + auto msg = e.execute_cql(stmt.str(), std::move(qo)).get0(); + json_visitor visitor(json); + msg->accept(visitor); + } catch (std::exception& e) { + json["status"] = "error"; + json["message"] = fmt::format("{}", e); + } + std_cout << json << std::endl; + } + }).get0(); +} + +// Reset stdin/stdout/log streams to locations pointed +// on the command line. +void apply_configuration(const boost::program_options::variables_map& cfg) { + + if (cfg.count("input")) { + static std::ifstream input(cfg["input"].as()); + std::cin.rdbuf(input.rdbuf()); + } + static std::ofstream log(cfg["log"].as()); + // Seastar always logs to std::cout, hack this around + // by redirecting std::cout to a file and capturing + // the old std::cout in std_cout + auto save_filebuf = std::cout.rdbuf(log.rdbuf()); + if (cfg.count("output")) { + std_cout.open(cfg["output"].as()); + } else { + std_cout.std::ios::rdbuf(save_filebuf); + } +} + +int main(int argc, char* argv[]) { + + namespace bpo = boost::program_options; + namespace fs = std::filesystem; + + seastar::app_template::config cfg; + cfg.name = fmt::format(R"({} - An embedded single-node version of Scylla. + +Runs read-evaluate-print loop, reading commands from stdin, +evaluating them and printing output, formatted as JSON, to stdout. +Creates a temporary database in /tmp and deletes it at exit. +Pre-configures a default keyspace, naturally, with replication +factor 1. + +Used in unit tests as a test driver for .test.cql files. + +Available )", argv[0]); + + seastar::app_template app(cfg); + + /* Define options for input, output and log file. */ + app.add_options() + ("input", bpo::value(), + "Input file with CQL, defaults to stdin") + ("output", bpo::value(), + "Output file for data, defaults to stdout") + ("log", bpo::value()->default_value( + fmt::format("{}.log", fs::path(argv[0]).stem().string())), + "Output file for Scylla log"); + + return app.run(argc, argv, [&app] { + + apply_configuration(app.configuration()); + + return seastar::async([&app] { + return repl(app); + }); + }); +} + From d3f9e64028836ae6d255e02fdf19d4989faf0418 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 22:44:10 +0300 Subject: [PATCH 34/39] test.py: add CqlTestSuite to run CQL tests Run the test and compare results. Manage temporary and .reject files. Now that there are CQL tests, improve logging. run_test success no longer means test success. --- test.py | 88 +++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 83 insertions(+), 5 deletions(-) diff --git a/test.py b/test.py index 9444d9774c..6bb35286dc 100755 --- a/test.py +++ b/test.py @@ -24,6 +24,7 @@ from abc import ABC, abstractmethod import argparse import asyncio import colorama +import filecmp import glob import io import itertools @@ -32,13 +33,13 @@ import multiprocessing import os import pathlib import shlex +import shutil import signal import subprocess import sys import xml.etree.ElementTree as ET import yaml - def create_formatter(*decorators): """Return a function which decorates its argument with the given color/style if stdout is a tty, and leaves intact otherwise.""" @@ -174,6 +175,19 @@ class BoostTestSuite(UnitTestSuite): return [] +class CqlTestSuite(TestSuite): + """TestSuite for CQL tests""" + + def add_test(self, shortname, mode, options): + """Create a CqlTest class and add it to the list""" + test = CqlTest(self.next_id, shortname, self, mode, options) + self.tests.append(test) + + @property + def pattern(self): + return "*_test.cql" + + class Test: """Base class for CQL, Unit and Boost tests""" def __init__(self, test_no, shortname, suite, mode, options): @@ -189,6 +203,10 @@ class Test: self.log_filename = os.path.join(options.tmpdir, self.mode, self.uname + ".log") self.success = None + @abstractmethod + async def run(self, options): + pass + @abstractmethod def print_summary(self): pass @@ -215,10 +233,70 @@ class UnitTest(Test): print(read_log(self.log_filename)) async def run(self, options): - await run_test(self, options) + self.success = await run_test(self, options) + logging.info("Test #%d %s", self.id, "succeeded" if self.success else "failed ") return self +class CqlTest(Test): + """Run the sequence of CQL commands stored in the file and check + output""" + + def __init__(self, test_no, shortname, suite, mode, options): + super().__init__(test_no, shortname, suite, mode, options) + # Path to cql_repl driver, in the given build mode + self.path = os.path.join("build", self.mode, "test/tools/cql_repl") + self.cql = os.path.join(suite.path, self.shortname + ".cql") + self.result = os.path.join(suite.path, self.shortname + ".result") + self.tmpfile = os.path.join(options.tmpdir, self.mode, self.uname + ".reject") + self.reject = os.path.join(suite.path, self.shortname + ".reject") + self.args = shlex.split("-c2 -m2G --input={} --output={} --log={}".format( + self.cql, self.tmpfile, self.log_filename)) + self.args += UnitTest.standard_args + self.is_executed_ok = False + self.is_new = False + self.is_equal_result = None + self.summary = "not run" + + async def run(self, options): + self.is_executed_ok = await run_test(self, options) + self.success = False + self.summary = "failed" + + def set_summary(summary): + self.summary = summary + logging.info("Test %d %s", self.id, summary) + + if not os.path.isfile(self.tmpfile): + set_summary("failed: no output file") + elif not os.path.isfile(self.result): + set_summary("failed: no result file") + self.is_new = True + else: + self.is_equal_result = filecmp.cmp(self.result, self.tmpfile) + if self.is_equal_result is False: + set_summary("failed: test output does not match expected result") + elif self.is_executed_ok: + self.success = True + set_summary("succeeded") + else: + set_summary("failed: correct output but non-zero return status.\nCheck test log.") + + if self.is_new or self.is_equal_result is False: + # Put a copy of the .reject file close to the .result file + # so that it's easy to analyze the diff or overwrite .result + # with .reject. Preserve the original .reject file: in + # multiple modes the copy .reject file may be overwritten. + shutil.copyfile(self.tmpfile, self.reject) + elif os.path.exists(self.tmpfile): + pathlib.Path(self.tmpfile).unlink() + + return self + + def print_summary(self): + print("Test {} ({}) {}".format(self.name, self.mode, self.summary)) + + def print_start_blurb(): print("="*80) print("{:7s} {:50s} {:^8s} {:8s}".format("[N/TOTAL]", "TEST", "MODE", "RESULT")) @@ -252,6 +330,7 @@ def print_progress(test, cookie, verbose): async def run_test(test, options): + """Run test program, return True if success else False""" file = io.StringIO() def report_error(out): @@ -275,11 +354,10 @@ async def run_test(test, options): preexec_fn=os.setsid, ) stdout, _ = await asyncio.wait_for(process.communicate(), options.timeout) - test.success = process.returncode == 0 if process.returncode != 0: print(' with error code {code}\n'.format(code=process.returncode), file=file) report_error(stdout.decode(encoding='UTF-8')) - + return process.returncode == 0 except (asyncio.TimeoutError, asyncio.CancelledError) as e: if process is not None: process.kill() @@ -292,7 +370,7 @@ async def run_test(test, options): except Exception as e: print(' with error {e}\n'.format(e=e), file=file) report_error(e) - logging.info("Test #%d %s", test.id, "passed" if test.success else "failed") + return False def setup_signal_handlers(loop, signaled): From 4f64f0c652e02ce5de8980464cd27ed5a9f15d2d Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Tue, 17 Dec 2019 00:01:25 +0300 Subject: [PATCH 35/39] test.py: print a colored unidiff in case of test failure Print a colored unidiff between result and reject files in case of test failure. --- test.py | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/test.py b/test.py index 6bb35286dc..b9060f8ac8 100755 --- a/test.py +++ b/test.py @@ -24,6 +24,7 @@ from abc import ABC, abstractmethod import argparse import asyncio import colorama +import difflib import filecmp import glob import io @@ -37,6 +38,7 @@ import shutil import signal import subprocess import sys +import time import xml.etree.ElementTree as ET import yaml @@ -294,7 +296,10 @@ class CqlTest(Test): return self def print_summary(self): - print("Test {} ({}) {}".format(self.name, self.mode, self.summary)) + print("Test {} ({}) {}".format(palette.path(self.name), self.mode, + self.summary)) + if self.is_equal_result is False: + print_unidiff(self.result, self.reject) def print_start_blurb(): @@ -531,10 +536,34 @@ def print_summary(failed_tests): palette.path(" ".join([t.name for t in failed_tests])))) for test in failed_tests: test.print_summary() + print("-"*78) print("Summary: {} of the total {} tests failed".format( len(failed_tests), TestSuite.test_count())) +def print_unidiff(fromfile, tofile): + with open(fromfile, "r") as frm, open(tofile, "r") as to: + diff = difflib.unified_diff( + frm.readlines(), + to.readlines(), + fromfile=fromfile, + tofile=tofile, + fromfiledate=time.ctime(os.stat(fromfile).st_mtime), + tofiledate=time.ctime(os.stat(tofile).st_mtime), + n=10) # Number of context lines + + for i, line in enumerate(diff): + if i > 60: + break + if line.startswith('+'): + line = palette.diff_in(line) + elif line.startswith('-'): + line = palette.diff_out(line) + elif line.startswith('@'): + line = palette.diff_mark(line) + sys.stdout.write(line) + + def write_junit_report(tmpdir, mode): junit_filename = os.path.join(tmpdir, mode, "xml", "junit.xml") total = 0 From 44d31db1fc27d21f9e5eea04898fb4166ae1106a Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 22:45:25 +0300 Subject: [PATCH 36/39] test.py: add CQL .reject files to gitignore To avoid accidental commit, add .reject files to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 6f7695a36e..08318faefe 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,4 @@ resources /expressions.tokens tags testlog/* +test/*/*.reject From ba87e73f8ee275713e477bdef330f282e4d4e77e Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Wed, 11 Dec 2019 01:14:07 +0300 Subject: [PATCH 37/39] test.py: add a basic CQL test --- test/cql/lwt_test.cql | 5 +++++ test/cql/lwt_test.result | 28 ++++++++++++++++++++++++++++ test/cql/suite.yaml | 1 + 3 files changed, 34 insertions(+) create mode 100644 test/cql/lwt_test.cql create mode 100644 test/cql/lwt_test.result create mode 100644 test/cql/suite.yaml diff --git a/test/cql/lwt_test.cql b/test/cql/lwt_test.cql new file mode 100644 index 0000000000..39f7c8b71e --- /dev/null +++ b/test/cql/lwt_test.cql @@ -0,0 +1,5 @@ +create table t1 (a int primary key); +insert into t1 (a) values (1); +insert into t1 (a) values (2); +select * from t1 allow filtering; +drop table t1; diff --git a/test/cql/lwt_test.result b/test/cql/lwt_test.result new file mode 100644 index 0000000000..3e648bcb6b --- /dev/null +++ b/test/cql/lwt_test.result @@ -0,0 +1,28 @@ +create table t1 (a int primary key); +{ + "status" : "ok" +} +insert into t1 (a) values (1); +{ + "status" : "ok" +} +insert into t1 (a) values (2); +{ + "status" : "ok" +} +select * from t1 allow filtering; +{ + "rows" : + [ + { + "a" : "1" + }, + { + "a" : "2" + } + ] +} +drop table t1; +{ + "status" : "ok" +} diff --git a/test/cql/suite.yaml b/test/cql/suite.yaml new file mode 100644 index 0000000000..511b255ea4 --- /dev/null +++ b/test/cql/suite.yaml @@ -0,0 +1 @@ +type: CQL From a351ea57d552774943061f88f41cb42fcb3d1f17 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Mon, 30 Dec 2019 19:26:48 +0300 Subject: [PATCH 38/39] test.py: sort tests within a suite, and sort suites This makes it easier to navigate the test artefacts. No need to sort suites since they are already stored in a dict. --- test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test.py b/test.py index b9060f8ac8..c9af2b3248 100755 --- a/test.py +++ b/test.py @@ -137,6 +137,8 @@ class TestSuite(ABC): def add_test_list(self, mode, options): lst = glob.glob(os.path.join(self.path, self.pattern)) + if lst: + lst.sort() long_tests = set(self.cfg.get("long", [])) for t in lst: shortname = os.path.splitext(os.path.basename(t))[0] From a665fab30627ea29445099dbf634db2332c8f987 Mon Sep 17 00:00:00 2001 From: Konstantin Osipov Date: Tue, 14 Jan 2020 00:00:24 +0300 Subject: [PATCH 39/39] test.py: introduce BoostTest and virtualize custom boost arguments --- test.py | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/test.py b/test.py index c9af2b3248..6bfba62c8c 100755 --- a/test.py +++ b/test.py @@ -42,6 +42,7 @@ import time import xml.etree.ElementTree as ET import yaml + def create_formatter(*decorators): """Return a function which decorates its argument with the given color/style if stdout is a tty, and leaves intact otherwise.""" @@ -80,8 +81,6 @@ class TestSuite(ABC): self.name = os.path.basename(self.path) self.cfg = cfg self.tests = [] - # Map of custom test command line arguments, if configured - self.custom_args = cfg.get("custom_args", {}) @property def next_id(self): @@ -155,6 +154,14 @@ class TestSuite(ABC): class UnitTestSuite(TestSuite): """TestSuite instantiation for non-boost unit tests""" + def __init__(self, path, cfg): + super().__init__(path, cfg) + # Map of custom test command line arguments, if configured + self.custom_args = cfg.get("custom_args", {}) + + def create_test(self, *args, **kwargs): + return UnitTest(*args, **kwargs) + def add_test(self, shortname, mode, options): """Create a UnitTest class with possibly custom command line arguments and add it to the list of tests""" @@ -163,7 +170,7 @@ class UnitTestSuite(TestSuite): # are two cores and 2G of RAM args = self.custom_args.get(shortname, ["-c2 -m2G"]) for a in args: - test = UnitTest(self.next_id, shortname, a, self, mode, options) + test = self.create_test(self.next_id, shortname, a, self, mode, options) self.tests.append(test) @property @@ -174,6 +181,9 @@ class UnitTestSuite(TestSuite): class BoostTestSuite(UnitTestSuite): """TestSuite for boost unit tests""" + def create_test(self, *args, **kwargs): + return BoostTest(*args, **kwargs) + def junit_tests(self): """Boost tests produce an own XML output, so are not included in a junit report""" return [] @@ -224,14 +234,6 @@ class UnitTest(Test): self.path = os.path.join("build", self.mode, "test", self.name) self.args = shlex.split(args) + UnitTest.standard_args - if isinstance(suite, BoostTestSuite): - boost_args = [] - xmlout = os.path.join(options.tmpdir, self.mode, "xml", - self.uname + ".xunit.xml") - boost_args += ['--report_level=no', '--logger=HRF,test_suite:XML,test_suite,' + xmlout] - boost_args += ['--'] - self.args = boost_args + self.args - def print_summary(self): print("Output of {} {}:".format(self.path, " ".join(self.args))) print(read_log(self.log_filename)) @@ -242,6 +244,18 @@ class UnitTest(Test): return self +class BoostTest(UnitTest): + """A unit test which can produce its own XML output""" + + def __init__(self, test_no, shortname, args, suite, mode, options): + super().__init__(test_no, shortname, args, suite, mode, options) + boost_args = [] + xmlout = os.path.join(options.tmpdir, self.mode, "xml", self.uname + ".xunit.xml") + boost_args += ['--report_level=no', '--logger=HRF,test_suite:XML,test_suite,' + xmlout] + boost_args += ['--'] + self.args = boost_args + self.args + + class CqlTest(Test): """Run the sequence of CQL commands stored in the file and check output"""