diff --git a/.gitignore b/.gitignore
index 85ce9643a5..08318faefe 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,3 +22,5 @@ resources
.pytest_cache
/expressions.tokens
tags
+testlog/*
+test/*/*.reject
diff --git a/configure.py b/configure.py
index 0b977af7b4..c5499c7ed4 100755
--- a/configure.py
+++ b/configure.py
@@ -383,6 +383,7 @@ scylla_tests = [
'test/perf/perf_row_cache_update',
'test/perf/perf_simple_query',
'test/perf/perf_sstable',
+ 'test/tools/cql_repl',
'test/unit/lsa_async_eviction_test',
'test/unit/lsa_sync_eviction_test',
'test/unit/memory_footprint_test',
diff --git a/test.py b/test.py
index 90b8ce66f7..6bfba62c8c 100755
--- a/test.py
+++ b/test.py
@@ -20,87 +20,327 @@
# You should have received a copy of the GNU General Public License
# along with Scylla. If not, see .
#
-import asyncio
-import glob
-import os
-import sys
-import signal
+from abc import ABC, abstractmethod
import argparse
-import subprocess
+import asyncio
+import colorama
+import difflib
+import filecmp
+import glob
import io
+import itertools
+import logging
import multiprocessing
+import os
+import pathlib
+import shlex
+import shutil
+import signal
+import subprocess
+import sys
+import time
import xml.etree.ElementTree as ET
+import yaml
-# Apply custom options to these tests
-custom_test_args = {
- 'boost/mutation_reader_test': '-c{} -m2G'.format(min(os.cpu_count(), 3)),
- 'boost/sstable_test': '-c1 -m2G',
- 'boost/sstable_datafile_test': '-c1 -m2G',
- 'boost/sstable_3_x_test': '-c1 -m2G',
- 'unit/lsa_async_eviction_test': '-c1 -m200M --size 1024 --batch 3000 --count 2000000',
- 'unit/lsa_sync_eviction_test': [
- '-c1 -m100M --count 10 --standard-object-size 3000000',
- '-c1 -m100M --count 24000 --standard-object-size 2048',
- '-c1 -m1G --count 4000000 --standard-object-size 128'
- ],
- 'unit/row_cache_alloc_stress_test': '-c1 -m2G',
- 'unit/row_cache_stress_test': '-c1 -m1G --seconds 10',
-}
-# Only run in dev, release configurations, skip in others
-long_tests = set([
- 'unit/lsa_async_eviction_test',
- 'unit/lsa_sync_eviction_test',
- 'unit/row_cache_alloc_stress_test',
- 'unit/row_cache_stress_test'
-])
+def create_formatter(*decorators):
+ """Return a function which decorates its argument with the given
+ color/style if stdout is a tty, and leaves intact otherwise."""
+ def color(arg):
+ return "".join(decorators) + str(arg) + colorama.Style.RESET_ALL
-CONCOLORS = {'green': '\033[1;32m', 'red': '\033[1;31m', 'nocolor': '\033[0m'}
+ def nocolor(arg):
+ return str(arg)
+ return color if os.isatty(sys.stdout.fileno()) else nocolor
-def colorformat(msg, **kwargs):
- fmt = dict(CONCOLORS)
- fmt.update(kwargs)
- return msg.format(**fmt)
-def status_to_string(success):
- if success:
- status = colorformat("{green}PASSED{nocolor}") if os.isatty(sys.stdout.fileno()) else "PASSED"
- else:
- status = colorformat("{red}FAILED{nocolor}") if os.isatty(sys.stdout.fileno()) else "FAILED"
+class palette:
+ """Color palette for formatting terminal output"""
+ ok = create_formatter(colorama.Fore.GREEN, colorama.Style.BRIGHT)
+ fail = create_formatter(colorama.Fore.RED, colorama.Style.BRIGHT)
+ new = create_formatter(colorama.Fore.BLUE)
+ skip = create_formatter(colorama.Style.DIM)
+ path = create_formatter(colorama.Style.BRIGHT)
+ diff_in = create_formatter(colorama.Fore.GREEN)
+ diff_out = create_formatter(colorama.Fore.RED)
+ diff_mark = create_formatter(colorama.Fore.MAGENTA)
+ warn = create_formatter(colorama.Fore.YELLOW)
+ crit = create_formatter(colorama.Fore.RED, colorama.Style.BRIGHT)
- return status
-class UnitTest:
- standard_args = '--overprovisioned --unsafe-bypass-fsync 1 --blocked-reactor-notify-ms 2000000 --collectd 0'.split()
- seastar_args = '-c2 -m2G'
+class TestSuite(ABC):
+ """A test suite is a folder with tests of the same type.
+ E.g. it can be unit tests, boost tests, or CQL tests."""
- def __init__(self, test_no, name, opts, kind, mode, options):
- if opts is None:
- opts = UnitTest.seastar_args
+ # All existing test suites, one suite per path.
+ suites = dict()
+ _next_id = 0
+
+ def __init__(self, path, cfg):
+ self.path = path
+ self.name = os.path.basename(self.path)
+ self.cfg = cfg
+ self.tests = []
+
+ @property
+ def next_id(self):
+ TestSuite._next_id += 1
+ return TestSuite._next_id
+
+ @staticmethod
+ def test_count():
+ return TestSuite._next_id
+
+ @staticmethod
+ def load_cfg(path):
+ with open(os.path.join(path, "suite.yaml"), "r") as cfg_file:
+ cfg = yaml.safe_load(cfg_file.read())
+ if not isinstance(cfg, dict):
+ raise RuntimeError("Failed to load tests in {}: suite.yaml is empty".format(path))
+ return cfg
+
+ @staticmethod
+ def opt_create(path):
+ """Return a subclass of TestSuite with name cfg["type"].title + TestSuite.
+ Ensures there is only one suite instance per path."""
+ suite = TestSuite.suites.get(path)
+ if not suite:
+ cfg = TestSuite.load_cfg(path)
+ kind = cfg.get("type")
+ if kind is None:
+ raise RuntimeError("Failed to load tests in {}: suite.yaml has no suite type".format(path))
+ SpecificTestSuite = globals().get(kind.title() + "TestSuite")
+ if not SpecificTestSuite:
+ raise RuntimeError("Failed to load tests in {}: suite type '{}' not found".format(path, kind))
+ suite = SpecificTestSuite(path, cfg)
+ TestSuite.suites[path] = suite
+ return suite
+
+ @staticmethod
+ def tests():
+ return itertools.chain(*[suite.tests for suite in
+ TestSuite.suites.values()])
+
+ @property
+ @abstractmethod
+ def pattern(self):
+ pass
+
+ @abstractmethod
+ def add_test(self, name, args, mode, options):
+ pass
+
+ def junit_tests(self):
+ """Tests which participate in a consolidated junit report"""
+ return self.tests
+
+ def add_test_list(self, mode, options):
+ lst = glob.glob(os.path.join(self.path, self.pattern))
+ if lst:
+ lst.sort()
+ long_tests = set(self.cfg.get("long", []))
+ for t in lst:
+ shortname = os.path.splitext(os.path.basename(t))[0]
+ if mode not in ["release", "dev"] and shortname in long_tests:
+ continue
+ t = os.path.join(self.name, shortname)
+ patterns = options.name if options.name else [t]
+ for p in patterns:
+ if p in t:
+ for i in range(options.repeat):
+ self.add_test(shortname, mode, options)
+
+
+class UnitTestSuite(TestSuite):
+ """TestSuite instantiation for non-boost unit tests"""
+
+ def __init__(self, path, cfg):
+ super().__init__(path, cfg)
+ # Map of custom test command line arguments, if configured
+ self.custom_args = cfg.get("custom_args", {})
+
+ def create_test(self, *args, **kwargs):
+ return UnitTest(*args, **kwargs)
+
+ def add_test(self, shortname, mode, options):
+ """Create a UnitTest class with possibly custom command line
+ arguments and add it to the list of tests"""
+
+ # Default seastar arguments, if not provided in custom test options,
+ # are two cores and 2G of RAM
+ args = self.custom_args.get(shortname, ["-c2 -m2G"])
+ for a in args:
+ test = self.create_test(self.next_id, shortname, a, self, mode, options)
+ self.tests.append(test)
+
+ @property
+ def pattern(self):
+ return "*_test.cc"
+
+
+class BoostTestSuite(UnitTestSuite):
+ """TestSuite for boost unit tests"""
+
+ def create_test(self, *args, **kwargs):
+ return BoostTest(*args, **kwargs)
+
+ def junit_tests(self):
+ """Boost tests produce an own XML output, so are not included in a junit report"""
+ return []
+
+
+class CqlTestSuite(TestSuite):
+ """TestSuite for CQL tests"""
+
+ def add_test(self, shortname, mode, options):
+ """Create a CqlTest class and add it to the list"""
+ test = CqlTest(self.next_id, shortname, self, mode, options)
+ self.tests.append(test)
+
+ @property
+ def pattern(self):
+ return "*_test.cql"
+
+
+class Test:
+ """Base class for CQL, Unit and Boost tests"""
+ def __init__(self, test_no, shortname, suite, mode, options):
self.id = test_no
- self.name = name
+ # Name with test suite name
+ self.name = os.path.join(suite.name, shortname)
+ # Name within the suite
+ self.shortname = shortname
self.mode = mode
- self.kind = kind
- self.path = os.path.join('build', self.mode, 'test', self.kind, self.name)
- self.args = opts.split() + UnitTest.standard_args
+ self.suite = suite
+ # Unique file name, which is also readable by human, as filename prefix
+ self.uname = "{}.{}".format(self.shortname, self.id)
+ self.log_filename = os.path.join(options.tmpdir, self.mode, self.uname + ".log")
+ self.success = None
- if self.kind == 'boost':
- boost_args = []
- if options.jenkins:
- mode = 'debug' if self.mode == 'debug' else 'release'
- xmlout = options.jenkins + "." + mode + "." + self.name + "." + str(self.id) + ".boost.xml"
- boost_args += ['--report_level=no', '--logger=HRF,test_suite:XML,test_suite,' + xmlout]
- boost_args += ['--']
- self.args = boost_args + self.args
+ @abstractmethod
+ async def run(self, options):
+ pass
+
+ @abstractmethod
+ def print_summary(self):
+ pass
-def print_progress(test, success, cookie, verbose):
+class UnitTest(Test):
+ standard_args = shlex.split("--overprovisioned --unsafe-bypass-fsync 1 --blocked-reactor-notify-ms 2000000 --collectd 0")
+
+ def __init__(self, test_no, shortname, args, suite, mode, options):
+ super().__init__(test_no, shortname, suite, mode, options)
+ self.path = os.path.join("build", self.mode, "test", self.name)
+ self.args = shlex.split(args) + UnitTest.standard_args
+
+ def print_summary(self):
+ print("Output of {} {}:".format(self.path, " ".join(self.args)))
+ print(read_log(self.log_filename))
+
+ async def run(self, options):
+ self.success = await run_test(self, options)
+ logging.info("Test #%d %s", self.id, "succeeded" if self.success else "failed ")
+ return self
+
+
+class BoostTest(UnitTest):
+ """A unit test which can produce its own XML output"""
+
+ def __init__(self, test_no, shortname, args, suite, mode, options):
+ super().__init__(test_no, shortname, args, suite, mode, options)
+ boost_args = []
+ xmlout = os.path.join(options.tmpdir, self.mode, "xml", self.uname + ".xunit.xml")
+ boost_args += ['--report_level=no', '--logger=HRF,test_suite:XML,test_suite,' + xmlout]
+ boost_args += ['--']
+ self.args = boost_args + self.args
+
+
+class CqlTest(Test):
+ """Run the sequence of CQL commands stored in the file and check
+ output"""
+
+ def __init__(self, test_no, shortname, suite, mode, options):
+ super().__init__(test_no, shortname, suite, mode, options)
+ # Path to cql_repl driver, in the given build mode
+ self.path = os.path.join("build", self.mode, "test/tools/cql_repl")
+ self.cql = os.path.join(suite.path, self.shortname + ".cql")
+ self.result = os.path.join(suite.path, self.shortname + ".result")
+ self.tmpfile = os.path.join(options.tmpdir, self.mode, self.uname + ".reject")
+ self.reject = os.path.join(suite.path, self.shortname + ".reject")
+ self.args = shlex.split("-c2 -m2G --input={} --output={} --log={}".format(
+ self.cql, self.tmpfile, self.log_filename))
+ self.args += UnitTest.standard_args
+ self.is_executed_ok = False
+ self.is_new = False
+ self.is_equal_result = None
+ self.summary = "not run"
+
+ async def run(self, options):
+ self.is_executed_ok = await run_test(self, options)
+ self.success = False
+ self.summary = "failed"
+
+ def set_summary(summary):
+ self.summary = summary
+ logging.info("Test %d %s", self.id, summary)
+
+ if not os.path.isfile(self.tmpfile):
+ set_summary("failed: no output file")
+ elif not os.path.isfile(self.result):
+ set_summary("failed: no result file")
+ self.is_new = True
+ else:
+ self.is_equal_result = filecmp.cmp(self.result, self.tmpfile)
+ if self.is_equal_result is False:
+ set_summary("failed: test output does not match expected result")
+ elif self.is_executed_ok:
+ self.success = True
+ set_summary("succeeded")
+ else:
+ set_summary("failed: correct output but non-zero return status.\nCheck test log.")
+
+ if self.is_new or self.is_equal_result is False:
+ # Put a copy of the .reject file close to the .result file
+ # so that it's easy to analyze the diff or overwrite .result
+ # with .reject. Preserve the original .reject file: in
+ # multiple modes the copy .reject file may be overwritten.
+ shutil.copyfile(self.tmpfile, self.reject)
+ elif os.path.exists(self.tmpfile):
+ pathlib.Path(self.tmpfile).unlink()
+
+ return self
+
+ def print_summary(self):
+ print("Test {} ({}) {}".format(palette.path(self.name), self.mode,
+ self.summary))
+ if self.is_equal_result is False:
+ print_unidiff(self.result, self.reject)
+
+
+def print_start_blurb():
+ print("="*80)
+ print("{:7s} {:50s} {:^8s} {:8s}".format("[N/TOTAL]", "TEST", "MODE", "RESULT"))
+ print("-"*78)
+
+
+def print_end_blurb(verbose):
+ if not verbose:
+ sys.stdout.write('\n')
+ print("-"*78)
+
+
+def print_progress(test, cookie, verbose):
if isinstance(cookie, int):
cookie = (0, 1, cookie)
last_len, n, n_total = cookie
- msg = "[{}/{}] {} {} {}".format(n, n_total, status_to_string(success), test.path, ' '.join(test.args))
- if verbose is False and sys.stdout.isatty():
+ msg = "{:9s} {:50s} {:^8s} {:8s}".format(
+ "[{}/{}]".format(n, n_total),
+ test.name, test.mode[:8],
+ palette.ok("[ PASS ]") if test.success else palette.fail("[ FAIL ]")
+ )
+ if verbose is False:
print('\r' + ' ' * last_len, end='')
last_len = len(msg)
print('\r' + msg, end='')
@@ -111,33 +351,34 @@ def print_progress(test, success, cookie, verbose):
async def run_test(test, options):
+ """Run test program, return True if success else False"""
file = io.StringIO()
def report_error(out):
print('=== stdout START ===', file=file)
print(out, file=file)
print('=== stdout END ===', file=file)
- success = False
process = None
stdout = None
+ logging.info("Starting test #%d: %s %s", test.id, test.path, " ".join(test.args))
try:
- process = await asyncio.create_subprocess_exec(
- test.path,
- *test.args,
- stderr=asyncio.subprocess.STDOUT,
- stdout=asyncio.subprocess.PIPE,
- env=dict(os.environ,
- UBSAN_OPTIONS='halt_on_error=1:abort_on_error=1',
- ASAN_OPTIONS='disable_coredump=0:abort_on_error=1',
- BOOST_TEST_CATCH_SYSTEM_ERRORS='no'),
+ with open(test.log_filename, "wb") as log:
+ process = await asyncio.create_subprocess_exec(
+ test.path,
+ *test.args,
+ stderr=log,
+ stdout=log,
+ env=dict(os.environ,
+ UBSAN_OPTIONS='halt_on_error=1:abort_on_error=1',
+ ASAN_OPTIONS='disable_coredump=0:abort_on_error=1',
+ BOOST_TEST_CATCH_SYSTEM_ERRORS="no"),
preexec_fn=os.setsid,
)
stdout, _ = await asyncio.wait_for(process.communicate(), options.timeout)
- success = process.returncode == 0
if process.returncode != 0:
print(' with error code {code}\n'.format(code=process.returncode), file=file)
report_error(stdout.decode(encoding='UTF-8'))
-
+ return process.returncode == 0
except (asyncio.TimeoutError, asyncio.CancelledError) as e:
if process is not None:
process.kill()
@@ -150,7 +391,8 @@ async def run_test(test, options):
except Exception as e:
print(' with error {e}\n'.format(e=e), file=file)
report_error(e)
- return (test, success, file.getvalue())
+ return False
+
def setup_signal_handlers(loop, signaled):
@@ -178,69 +420,80 @@ def parse_cmd_line():
default_num_jobs = min(default_num_jobs_mem, default_num_jobs_cpu)
parser = argparse.ArgumentParser(description="Scylla test runner")
- parser.add_argument('--name', action="store",
- help="Run only test whose name contains given string")
+ parser.add_argument(
+ "name",
+ nargs="*",
+ action="store",
+ help="""Can be empty. List of test names, to look for in
+ suites. Each name is used as a substring to look for in the
+ path to test file, e.g. "mem" will run all tests that have
+ "mem" in their name in all suites, "boost/mem" will only enable
+ tests starting with "mem" in "boost" suite. Default: run all
+ tests in all suites.""",
+ )
+ parser.add_argument(
+ "--tmpdir",
+ action="store",
+ default="testlog",
+ help="""Path to temporary test data and log files. The data is
+ further segregated per build mode. Default: ./testlog.""",
+ )
parser.add_argument('--mode', choices=all_modes, action="append", dest="modes",
help="Run only tests for given build mode(s)")
parser.add_argument('--repeat', action="store", default="1", type=int,
help="number of times to repeat test execution")
parser.add_argument('--timeout', action="store", default="3000", type=int,
help="timeout value for test execution")
- parser.add_argument('--jenkins', action="store",
- help="jenkins output file prefix")
parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Verbose reporting')
parser.add_argument('--jobs', '-j', action="store", default=default_num_jobs, type=int,
help="Number of jobs to use for running the tests")
- parser.add_argument('--xunit', action="store",
- help="Name of a file to write results of non-boost tests to in xunit format")
args = parser.parse_args()
+ if not sys.stdout.isatty():
+ args.verbose = True
+
if not args.modes:
out = subprocess.Popen(['ninja', 'mode_list'], stdout=subprocess.PIPE).communicate()[0].decode()
# [1/1] List configured modes
# debug release dev
args.modes = out.split('\n')[1].split(' ')
+ def prepare_dir(dirname, pattern):
+ # Ensure the dir exists
+ pathlib.Path(dirname).mkdir(parents=True, exist_ok=True)
+ # Remove old artefacts
+ for p in glob.glob(os.path.join(dirname, pattern), recursive=True):
+ pathlib.Path(p).unlink()
+
+ args.tmpdir = os.path.abspath(args.tmpdir)
+ prepare_dir(args.tmpdir, "*.log")
+
+ for mode in args.modes:
+ prepare_dir(os.path.join(args.tmpdir, mode), "*.{log,reject}")
+ prepare_dir(os.path.join(args.tmpdir, mode, "xml"), "*.xml")
+
return args
def find_tests(options):
- tests_to_run = []
+ for f in glob.glob(os.path.join("test", "*")):
+ if os.path.isdir(f) and os.path.isfile(os.path.join(f, "suite.yaml")):
+ for mode in options.modes:
+ suite = TestSuite.opt_create(f)
+ suite.add_test_list(mode, options)
- def add_test_list(kind, mode):
- lst = glob.glob(os.path.join("test", kind, "*_test.cc"))
- for t in lst:
- t = os.path.splitext(os.path.basename(t))[0]
- if mode not in ['release', 'dev'] and os.path.join(kind, t) in long_tests:
- continue
- args = custom_test_args.get(os.path.join(kind, t))
- if isinstance(args, (str, type(None))):
- args = [ args ]
- for a in args:
- tests_to_run.append((t, a, kind, mode))
+ if not TestSuite.test_count():
+ print("Test {} not found".format(palette.path(options.name[0])))
+ sys.exit(1)
- for mode in options.modes:
- add_test_list('unit', mode)
- add_test_list('boost', mode)
-
- if options.name:
- tests_to_run = [t for t in tests_to_run if options.name in t[0]]
- if not tests_to_run:
- print("Test {} not found".format(options.name))
- sys.exit(1)
-
- tests_to_run = [t for t in tests_to_run for _ in range(options.repeat)]
- tests_to_run = [UnitTest(test_no, *t, options) for test_no, t in enumerate(tests_to_run)]
-
- return tests_to_run
+ logging.info("Found %d tests, repeat count is %d, starting %d concurrent jobs",
+ TestSuite.test_count(), options.repeat, options.jobs)
-async def run_all_tests(tests_to_run, signaled, options):
- failed_tests = []
- results = []
- cookie = len(tests_to_run)
+async def run_all_tests(signaled, options):
+ cookie = TestSuite.test_count()
signaled_task = asyncio.create_task(signaled.wait())
pending = set([signaled_task])
@@ -258,20 +511,17 @@ async def run_all_tests(tests_to_run, signaled, options):
for coro in done:
result = coro.result()
if isinstance(result, bool):
- continue # skip signaled task result
- results.append(result)
- test, success, out = result
- cookie = print_progress(test, success, cookie, options.verbose)
- if not success:
- failed_tests.append((test, out))
+ continue # skip signaled task result
+ cookie = print_progress(result, cookie, options.verbose)
+ print_start_blurb()
try:
- for test in tests_to_run:
+ for test in TestSuite.tests():
# +1 for 'signaled' event
if len(pending) > options.jobs:
# Wait for some task to finish
done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
await reap(done, pending, signaled)
- pending.add(asyncio.create_task(run_test(test, options)))
+ pending.add(asyncio.create_task(test.run(options)))
# Wait & reap ALL tasks but signaled_task
# Do not use asyncio.ALL_COMPLETED to print a nice progress report
while len(pending) > 1:
@@ -279,60 +529,123 @@ async def run_all_tests(tests_to_run, signaled, options):
await reap(done, pending, signaled)
except asyncio.CancelledError:
- return None, None
+ return
- return failed_tests, results
+ print_end_blurb(options.verbose)
-def print_summary(failed_tests, total_tests):
- if not failed_tests:
- print('\nOK.')
- else:
- print('\n\nOutput of the failed tests:')
- for test, out in failed_tests:
- print("Test {} {} failed:\n{}".format(test.path, ' '.join(test.args), out))
- print('\n\nThe following test(s) have failed:')
- for test, _ in failed_tests:
- print(' {} {}'.format(test.path, ' '.join(test.args)))
- print('\nSummary: {} of the total {} tests failed'.format(len(failed_tests), total_tests))
+def read_log(log_filename):
+ """Intelligently read test log output"""
+ try:
+ with open(log_filename, "r") as log:
+ msg = log.read()
+ return msg if len(msg) else "===Empty log output==="
+ except FileNotFoundError:
+ return "===Log {} not found===".format(log_filename)
+ except OSError as e:
+ return "===Error reading log {}===".format(e)
-def write_xunit_report(options, results):
- unit_results = [r for r in results if r[0].kind != 'boost']
- num_unit_failed = sum(1 for r in unit_results if not r[1])
- xml_results = ET.Element('testsuite', name='non-boost tests',
- tests=str(len(unit_results)), failures=str(num_unit_failed), errors='0')
+def print_summary(failed_tests):
+ if failed_tests:
+ print("The following test(s) have failed: {}".format(
+ palette.path(" ".join([t.name for t in failed_tests]))))
+ for test in failed_tests:
+ test.print_summary()
+ print("-"*78)
+ print("Summary: {} of the total {} tests failed".format(
+ len(failed_tests), TestSuite.test_count()))
- for test, success, out in unit_results:
- xml_res = ET.SubElement(xml_results, 'testcase', name=test.path)
- if not success:
+
+def print_unidiff(fromfile, tofile):
+ with open(fromfile, "r") as frm, open(tofile, "r") as to:
+ diff = difflib.unified_diff(
+ frm.readlines(),
+ to.readlines(),
+ fromfile=fromfile,
+ tofile=tofile,
+ fromfiledate=time.ctime(os.stat(fromfile).st_mtime),
+ tofiledate=time.ctime(os.stat(tofile).st_mtime),
+ n=10) # Number of context lines
+
+ for i, line in enumerate(diff):
+ if i > 60:
+ break
+ if line.startswith('+'):
+ line = palette.diff_in(line)
+ elif line.startswith('-'):
+ line = palette.diff_out(line)
+ elif line.startswith('@'):
+ line = palette.diff_mark(line)
+ sys.stdout.write(line)
+
+
+def write_junit_report(tmpdir, mode):
+ junit_filename = os.path.join(tmpdir, mode, "xml", "junit.xml")
+ total = 0
+ failed = 0
+ xml_results = ET.Element("testsuite", name="non-boost tests", errors="0")
+ for suite in TestSuite.suites.values():
+ for test in suite.junit_tests():
+ if test.mode != mode:
+ continue
+ total += 1
+ xml_res = ET.SubElement(xml_results, 'testcase',
+ name="{}.{}.{}".format(test.shortname, mode, test.id))
+ if test.success is True:
+ continue
+ failed += 1
xml_fail = ET.SubElement(xml_res, 'failure')
- xml_fail.text = "Test {} {} failed:\n{}".format(test.path, ' '.join(test.args), out)
- with open(options.xunit, "w") as f:
+ xml_fail.text = "Test {} {} failed:\n".format(test.path, " ".join(test.args))
+ xml_fail.text += read_log(test.log_filename)
+ if total == 0:
+ return
+ xml_results.set("tests", str(total))
+ xml_results.set("failures", str(failed))
+ with open(junit_filename, "w") as f:
ET.ElementTree(xml_results).write(f, encoding="unicode")
+
+def open_log(tmpdir):
+ pathlib.Path(tmpdir).mkdir(parents=True, exist_ok=True)
+ logging.basicConfig(
+ filename=os.path.join(tmpdir, "test.py.log"),
+ filemode="w",
+ level=logging.INFO,
+ format="%(asctime)s.%(msecs)03d %(levelname)s> %(message)s",
+ datefmt="%H:%M:%S",
+ )
+ logging.critical("Started %s", " ".join(sys.argv))
+
+
async def main():
options = parse_cmd_line()
- tests_to_run = find_tests(options)
+ open_log(options.tmpdir)
+
+ find_tests(options)
signaled = asyncio.Event()
setup_signal_handlers(asyncio.get_event_loop(), signaled)
- failed_tests, results = await run_all_tests(tests_to_run, signaled, options)
+ await run_all_tests(signaled, options)
if signaled.is_set():
return -signaled.signo
- print_summary(failed_tests, len(tests_to_run))
+ failed_tests = [t for t in TestSuite.tests() if t.success is not True]
- if options.xunit:
- write_xunit_report(options, results)
+ print_summary(failed_tests)
+
+ for mode in options.modes:
+ write_junit_report(options.tmpdir, mode)
return 0 if not failed_tests else -1
if __name__ == "__main__":
+ colorama.init()
+
if sys.version_info < (3, 7):
print("Python 3.7 or newer is required to run this program")
sys.exit(-1)
diff --git a/test/boost/suite.yaml b/test/boost/suite.yaml
new file mode 100644
index 0000000000..94bf4822c4
--- /dev/null
+++ b/test/boost/suite.yaml
@@ -0,0 +1,11 @@
+type: boost
+# Custom command line arguments for some of the tests
+custom_args:
+ mutation_reader_test:
+ - '-c3 -m2G'
+ sstable_test:
+ - '-c1 -m2G'
+ sstable_datafile_test:
+ - '-c1 -m2G'
+ sstable_3_x_test:
+ - '-c1 -m2G'
diff --git a/test/cql/lwt_test.cql b/test/cql/lwt_test.cql
new file mode 100644
index 0000000000..39f7c8b71e
--- /dev/null
+++ b/test/cql/lwt_test.cql
@@ -0,0 +1,5 @@
+create table t1 (a int primary key);
+insert into t1 (a) values (1);
+insert into t1 (a) values (2);
+select * from t1 allow filtering;
+drop table t1;
diff --git a/test/cql/lwt_test.result b/test/cql/lwt_test.result
new file mode 100644
index 0000000000..3e648bcb6b
--- /dev/null
+++ b/test/cql/lwt_test.result
@@ -0,0 +1,28 @@
+create table t1 (a int primary key);
+{
+ "status" : "ok"
+}
+insert into t1 (a) values (1);
+{
+ "status" : "ok"
+}
+insert into t1 (a) values (2);
+{
+ "status" : "ok"
+}
+select * from t1 allow filtering;
+{
+ "rows" :
+ [
+ {
+ "a" : "1"
+ },
+ {
+ "a" : "2"
+ }
+ ]
+}
+drop table t1;
+{
+ "status" : "ok"
+}
diff --git a/test/cql/suite.yaml b/test/cql/suite.yaml
new file mode 100644
index 0000000000..511b255ea4
--- /dev/null
+++ b/test/cql/suite.yaml
@@ -0,0 +1 @@
+type: CQL
diff --git a/test/tools/cql_repl.cc b/test/tools/cql_repl.cc
new file mode 100644
index 0000000000..18c43451b0
--- /dev/null
+++ b/test/tools/cql_repl.cc
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2019 ScyllaDB
+ */
+
+/*
+ * This file is part of Scylla.
+ *
+ * Scylla is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Scylla is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Scylla. If not, see .
+ */
+#include
+#include
+
+#include "test/lib/cql_test_env.hh"
+#include "test/lib/cql_assertions.hh"
+
+#include
+#include
+#include
+#include
+#include "transport/messages/result_message.hh"
+#include "types/user.hh"
+#include "types/map.hh"
+#include "types/list.hh"
+#include "types/set.hh"
+#include "db/config.hh"
+#include "cql3/cql_config.hh"
+#include "cql3/type_json.hh"
+#include "test/lib/exception_utils.hh"
+
+static std::ofstream std_cout;
+
+//
+// A helper class to serialize result set output to a formatted JSON
+//
+class json_visitor final : public cql_transport::messages::result_message::visitor {
+ Json::Value& _root;
+public:
+ json_visitor(Json::Value& root)
+ : _root(root)
+ {
+ }
+
+ virtual void visit(const cql_transport::messages::result_message::void_message&) override {
+ _root["status"] = "ok";
+ }
+
+ virtual void visit(const cql_transport::messages::result_message::set_keyspace& m) override {
+ _root["status"] = "ok";
+ }
+
+ virtual void visit(const cql_transport::messages::result_message::prepared::cql& m) override {
+ _root["status"] = "ok";
+ }
+
+ virtual void visit(const cql_transport::messages::result_message::prepared::thrift& m) override {
+ assert(false);
+ }
+
+ virtual void visit(const cql_transport::messages::result_message::schema_change& m) override {
+ _root["status"] = "ok";
+ }
+
+ virtual void visit(const cql_transport::messages::result_message::bounce_to_shard& m) override {
+ assert(false);
+ }
+
+ virtual void visit(const cql_transport::messages::result_message::rows& m) override {
+ Json::Value& output_rows = _root["rows"];
+ const auto input_rows = m.rs().result_set().rows();
+ const auto& meta = m.rs().result_set().get_metadata().get_names();
+ for (auto&& in_row: input_rows) {
+ Json::Value out_row;
+ for (unsigned i = 0; i < meta.size(); ++i) {
+ const cql3::column_specification& col = *meta[i];
+ const bytes_opt& cell = in_row[i];
+ if (cell.has_value()) {
+ out_row[col.name->text()] = fmt::format("{}", to_json_string(*col.type, cell));
+ }
+ }
+ output_rows.append(out_row);
+ }
+ }
+};
+
+// Prepare query_options with serial consistency
+std::unique_ptr repl_options() {
+ const auto& so = cql3::query_options::specific_options::DEFAULT;
+ auto qo = std::make_unique(
+ db::consistency_level::ONE,
+ infinite_timeout_config,
+ std::vector{},
+ // Ensure (optional) serial consistency is always specified.
+ cql3::query_options::specific_options{
+ so.page_size,
+ so.state,
+ db::consistency_level::SERIAL,
+ so.timestamp,
+ }
+ );
+ return qo;
+}
+
+// Read-evaluate-print-loop for CQL
+void repl(seastar::app_template& app) {
+ do_with_cql_env_thread([] (cql_test_env& e) {
+
+ // Comments allowed by CQL - -- and //
+ const std::regex comment_re("^[[:space:]]*((--|//).*)?$");
+ // A comment is not a delimiter even if ends with one
+ const std::regex delimiter_re("^(?![[:space:]]*(--|//)).*;[[:space:]]*$");
+
+ while (std::cin) {
+ std::string line;
+ std::ostringstream stmt;
+ if (!std::getline(std::cin, line)) {
+ break;
+ }
+ // Handle multiline input and comments
+ if (std::regex_match(line.begin(), line.end(), comment_re)) {
+ std_cout << line << std::endl;
+ continue;
+ }
+ stmt << line << std::endl;
+ while (!std::regex_match(line.begin(), line.end(), delimiter_re)) {
+ // Read the rest of input until delimiter or EOF
+ if (!std::getline(std::cin, line)) {
+ break;
+ }
+ stmt << line << std::endl;
+ }
+ // Print the statement
+ std_cout << stmt.str();
+ Json::Value json;
+ try {
+ auto qo = repl_options();
+ auto msg = e.execute_cql(stmt.str(), std::move(qo)).get0();
+ json_visitor visitor(json);
+ msg->accept(visitor);
+ } catch (std::exception& e) {
+ json["status"] = "error";
+ json["message"] = fmt::format("{}", e);
+ }
+ std_cout << json << std::endl;
+ }
+ }).get0();
+}
+
+// Reset stdin/stdout/log streams to locations pointed
+// on the command line.
+void apply_configuration(const boost::program_options::variables_map& cfg) {
+
+ if (cfg.count("input")) {
+ static std::ifstream input(cfg["input"].as());
+ std::cin.rdbuf(input.rdbuf());
+ }
+ static std::ofstream log(cfg["log"].as());
+ // Seastar always logs to std::cout, hack this around
+ // by redirecting std::cout to a file and capturing
+ // the old std::cout in std_cout
+ auto save_filebuf = std::cout.rdbuf(log.rdbuf());
+ if (cfg.count("output")) {
+ std_cout.open(cfg["output"].as());
+ } else {
+ std_cout.std::ios::rdbuf(save_filebuf);
+ }
+}
+
+int main(int argc, char* argv[]) {
+
+ namespace bpo = boost::program_options;
+ namespace fs = std::filesystem;
+
+ seastar::app_template::config cfg;
+ cfg.name = fmt::format(R"({} - An embedded single-node version of Scylla.
+
+Runs read-evaluate-print loop, reading commands from stdin,
+evaluating them and printing output, formatted as JSON, to stdout.
+Creates a temporary database in /tmp and deletes it at exit.
+Pre-configures a default keyspace, naturally, with replication
+factor 1.
+
+Used in unit tests as a test driver for .test.cql files.
+
+Available )", argv[0]);
+
+ seastar::app_template app(cfg);
+
+ /* Define options for input, output and log file. */
+ app.add_options()
+ ("input", bpo::value(),
+ "Input file with CQL, defaults to stdin")
+ ("output", bpo::value(),
+ "Output file for data, defaults to stdout")
+ ("log", bpo::value()->default_value(
+ fmt::format("{}.log", fs::path(argv[0]).stem().string())),
+ "Output file for Scylla log");
+
+ return app.run(argc, argv, [&app] {
+
+ apply_configuration(app.configuration());
+
+ return seastar::async([&app] {
+ return repl(app);
+ });
+ });
+}
+
diff --git a/test/unit/suite.yaml b/test/unit/suite.yaml
new file mode 100644
index 0000000000..885f15c51b
--- /dev/null
+++ b/test/unit/suite.yaml
@@ -0,0 +1,20 @@
+# Suite test type. Supported types: unit, boost, cql
+type: unit
+# A list of long tests, these are only run in dev and release modes
+long:
+ - lsa_async_eviction_test
+ - lsa_sync_eviction_test
+ - row_cache_alloc_stress_test
+ - row_cache_stress_test
+# Custom command line arguments for some of the tests
+custom_args:
+ lsa_async_eviction_test:
+ - '-c1 -m200M --size 1024 --batch 3000 --count 2000000'
+ lsa_sync_eviction_test:
+ - '-c1 -m100M --count 10 --standard-object-size 3000000'
+ - '-c1 -m100M --count 24000 --standard-object-size 2048'
+ - '-c1 -m1G --count 4000000 --standard-object-size 128'
+ row_cache_alloc_stress_test:
+ - '-c1 -m2G'
+ row_cache_stress_test:
+ - '-c1 -m1G --seconds 10'