浏览代码

Misc test-runner improvements

- Added --disk/--trace/--output options for information-heavy debugging

- Renamed --skip/--count/--every to --start/--stop/--step.

  This matches common terms for ranges, and frees --skip for being used
  to skip test cases in the future.

- Better handling of SIGTERM, now all tests are killed, reported as
  failures, and testing is halted irregardless of -k.

  This is a compromise, you throw away the rest of the tests, which
  is normally what -k is for, but prevents annoying-to-terminate
  processes when debugging, which is a very interactive process.
Christopher Haster 3 年之前
父节点
当前提交
5ee4b052ae
共有 2 个文件被更改,包括 72 次插入54 次删除
  1. 28 28
      runners/test_runner.c
  2. 44 26
      scripts/test_.py

+ 28 - 28
runners/test_runner.c

@@ -161,11 +161,11 @@ static const char *test_case = NULL;
 static size_t test_perm = -1;
 static size_t test_perm = -1;
 static const char *test_geometry = NULL;
 static const char *test_geometry = NULL;
 static test_types_t test_types = 0;
 static test_types_t test_types = 0;
-static size_t test_skip = 0;
-static size_t test_count = -1;
-static size_t test_every = 1;
+static size_t test_start = 0;
+static size_t test_stop = -1;
+static size_t test_step = 1;
 
 
-static const char *test_persist = NULL;
+static const char *test_disk = NULL;
 FILE *test_trace = NULL;
 FILE *test_trace = NULL;
 
 
 // note, these skips are different than filtered tests
 // note, these skips are different than filtered tests
@@ -188,9 +188,9 @@ static bool test_perm_skip(size_t perm) {
 }
 }
 
 
 static bool test_step_skip(size_t step) {
 static bool test_step_skip(size_t step) {
-    return !(step >= test_skip
-            && (step-test_skip) < test_count
-            && (step-test_skip) % test_every == 0);
+    return !(step >= test_start
+            && step < test_stop
+            && (step-test_start) % test_step == 0);
 }
 }
 
 
 static void test_case_permcount(
 static void test_case_permcount(
@@ -528,7 +528,7 @@ static void run(void) {
                     .power_cycles       = 0,
                     .power_cycles       = 0,
                 };
                 };
 
 
-                int err = lfs_testbd_createcfg(&cfg, test_persist, &bdcfg);
+                int err = lfs_testbd_createcfg(&cfg, test_disk, &bdcfg);
                 if (err) {
                 if (err) {
                     fprintf(stderr, "error: "
                     fprintf(stderr, "error: "
                             "could not create block device: %d\n", err);
                             "could not create block device: %d\n", err);
@@ -572,10 +572,10 @@ enum opt_flags {
     OPT_NORMAL          = 'n',
     OPT_NORMAL          = 'n',
     OPT_REENTRANT       = 'r',
     OPT_REENTRANT       = 'r',
     OPT_VALGRIND        = 'V',
     OPT_VALGRIND        = 'V',
-    OPT_SKIP            = 5,
-    OPT_COUNT           = 6,
-    OPT_EVERY           = 7,
-    OPT_PERSIST         = 'p',
+    OPT_START           = 5,
+    OPT_STEP            = 6,
+    OPT_STOP            = 7,
+    OPT_DISK            = 'd',
     OPT_TRACE           = 't',
     OPT_TRACE           = 't',
 };
 };
 
 
@@ -595,10 +595,10 @@ const struct option long_opts[] = {
     {"normal",          no_argument,       NULL, OPT_NORMAL},
     {"normal",          no_argument,       NULL, OPT_NORMAL},
     {"reentrant",       no_argument,       NULL, OPT_REENTRANT},
     {"reentrant",       no_argument,       NULL, OPT_REENTRANT},
     {"valgrind",        no_argument,       NULL, OPT_VALGRIND},
     {"valgrind",        no_argument,       NULL, OPT_VALGRIND},
-    {"skip",            required_argument, NULL, OPT_SKIP},
-    {"count",           required_argument, NULL, OPT_COUNT},
-    {"every",           required_argument, NULL, OPT_EVERY},
-    {"persist",         required_argument, NULL, OPT_PERSIST},
+    {"start",           required_argument, NULL, OPT_START},
+    {"stop",            required_argument, NULL, OPT_STOP},
+    {"step",            required_argument, NULL, OPT_STEP},
+    {"disk",            required_argument, NULL, OPT_DISK},
     {"trace",           required_argument, NULL, OPT_TRACE},
     {"trace",           required_argument, NULL, OPT_TRACE},
     {NULL, 0, NULL, 0},
     {NULL, 0, NULL, 0},
 };
 };
@@ -617,10 +617,10 @@ const char *const help_text[] = {
     "Filter for normal tests. Can be combined.",
     "Filter for normal tests. Can be combined.",
     "Filter for reentrant tests. Can be combined.",
     "Filter for reentrant tests. Can be combined.",
     "Filter for Valgrind tests. Can be combined.",
     "Filter for Valgrind tests. Can be combined.",
-    "Skip the first n tests.",
-    "Stop after n tests.",
-    "Only run every n tests, calculated after --skip and --stop.",
-    "Persist the disk to this file.",
+    "Start at the nth test.",
+    "Stop before the nth test.",
+    "Only run every n tests, calculated after --start and --stop.",
+    "Use this file as the disk.",
     "Redirect trace output to this file.",
     "Redirect trace output to this file.",
 };
 };
 
 
@@ -766,35 +766,35 @@ invalid_define:
             case OPT_VALGRIND:
             case OPT_VALGRIND:
                 test_types |= TEST_VALGRIND;
                 test_types |= TEST_VALGRIND;
                 break;
                 break;
-            case OPT_SKIP: {
+            case OPT_START: {
                 char *parsed = NULL;
                 char *parsed = NULL;
-                test_skip = strtoumax(optarg, &parsed, 0);
+                test_start = strtoumax(optarg, &parsed, 0);
                 if (parsed == optarg) {
                 if (parsed == optarg) {
                     fprintf(stderr, "error: invalid skip: %s\n", optarg);
                     fprintf(stderr, "error: invalid skip: %s\n", optarg);
                     exit(-1);
                     exit(-1);
                 }
                 }
                 break;
                 break;
             }
             }
-            case OPT_COUNT: {
+            case OPT_STOP: {
                 char *parsed = NULL;
                 char *parsed = NULL;
-                test_count = strtoumax(optarg, &parsed, 0);
+                test_stop = strtoumax(optarg, &parsed, 0);
                 if (parsed == optarg) {
                 if (parsed == optarg) {
                     fprintf(stderr, "error: invalid count: %s\n", optarg);
                     fprintf(stderr, "error: invalid count: %s\n", optarg);
                     exit(-1);
                     exit(-1);
                 }
                 }
                 break;
                 break;
             }
             }
-            case OPT_EVERY: {
+            case OPT_STEP: {
                 char *parsed = NULL;
                 char *parsed = NULL;
-                test_every = strtoumax(optarg, &parsed, 0);
+                test_step = strtoumax(optarg, &parsed, 0);
                 if (parsed == optarg) {
                 if (parsed == optarg) {
                     fprintf(stderr, "error: invalid every: %s\n", optarg);
                     fprintf(stderr, "error: invalid every: %s\n", optarg);
                     exit(-1);
                     exit(-1);
                 }
                 }
                 break;
                 break;
             }
             }
-            case OPT_PERSIST:
-                test_persist = optarg;
+            case OPT_DISK:
+                test_disk = optarg;
                 break;
                 break;
             case OPT_TRACE:
             case OPT_TRACE:
                 if (strcmp(optarg, "-") == 0) {
                 if (strcmp(optarg, "-") == 0) {

+ 44 - 26
scripts/test_.py

@@ -570,17 +570,17 @@ class TestFailure(Exception):
         self.output = output
         self.output = output
         self.assert_ = assert_
         self.assert_ = assert_
 
 
-def run_step(name, runner_, **args):
+def run_stage(name, runner_, **args):
     # get expected suite/case/perm counts
     # get expected suite/case/perm counts
     expected_suite_perms, expected_case_perms, expected_perms, total_perms = (
     expected_suite_perms, expected_case_perms, expected_perms, total_perms = (
         find_cases(runner_, **args))
         find_cases(runner_, **args))
 
 
-    # TODO persist/trace
     # TODO valgrind/gdb/exec
     # TODO valgrind/gdb/exec
     passed_suite_perms = co.defaultdict(lambda: 0)
     passed_suite_perms = co.defaultdict(lambda: 0)
     passed_case_perms = co.defaultdict(lambda: 0)
     passed_case_perms = co.defaultdict(lambda: 0)
     passed_perms = 0
     passed_perms = 0
     failures = []
     failures = []
+    killed = False
 
 
     pattern = re.compile('^(?:'
     pattern = re.compile('^(?:'
             '(?P<op>running|finished|skipped) '
             '(?P<op>running|finished|skipped) '
@@ -598,14 +598,20 @@ def run_step(name, runner_, **args):
         nonlocal locals
         nonlocal locals
 
 
         # run the tests!
         # run the tests!
-        cmd = runner_
+        cmd = runner_.copy()
+        if args.get('disk'):
+            cmd.append('--disk=%s' % args['disk'])
+        if args.get('trace'):
+            cmd.append('--trace=%s' % args['trace'])
         if args.get('verbose'):
         if args.get('verbose'):
             print(' '.join(shlex.quote(c) for c in cmd))
             print(' '.join(shlex.quote(c) for c in cmd))
         mpty, spty = pty.openpty()
         mpty, spty = pty.openpty()
         proc = sp.Popen(cmd, stdout=spty, stderr=spty)
         proc = sp.Popen(cmd, stdout=spty, stderr=spty)
         os.close(spty)
         os.close(spty)
-        mpty = os.fdopen(mpty, 'r', 1)
         children.add(proc)
         children.add(proc)
+        mpty = os.fdopen(mpty, 'r', 1)
+        if args.get('output'):
+            output = openio(args['output'], 'w')
 
 
         last_id = None
         last_id = None
         last_output = []
         last_output = []
@@ -622,7 +628,9 @@ def run_step(name, runner_, **args):
                 if not line:
                 if not line:
                     break
                     break
                 last_output.append(line)
                 last_output.append(line)
-                if args.get('verbose'):
+                if args.get('output'):
+                    output.write(line)
+                elif args.get('verbose'):
                     sys.stdout.write(line)
                     sys.stdout.write(line)
 
 
                 m = pattern.match(line)
                 m = pattern.match(line)
@@ -652,6 +660,8 @@ def run_step(name, runner_, **args):
         finally:
         finally:
             children.remove(proc)
             children.remove(proc)
             mpty.close()
             mpty.close()
+            if args.get('output'):
+                output.close()
 
 
         proc.wait()
         proc.wait()
         if proc.returncode != 0:
         if proc.returncode != 0:
@@ -661,16 +671,16 @@ def run_step(name, runner_, **args):
                 last_output,
                 last_output,
                 last_assert)
                 last_assert)
 
 
-    def run_job(runner, skip=None, every=None):
+    def run_job(runner, start=None, step=None):
         nonlocal failures
         nonlocal failures
         nonlocal locals
         nonlocal locals
 
 
-        while (skip or 0) < total_perms:
+        while (start or 0) < total_perms:
             runner_ = runner.copy()
             runner_ = runner.copy()
-            if skip is not None:
-                runner_.append('--skip=%d' % skip)
-            if every is not None:
-                runner_.append('--every=%d' % every)
+            if start is not None:
+                runner_.append('--start=%d' % start)
+            if step is not None:
+                runner_.append('--step=%d' % step)
 
 
             try:
             try:
                 # run the tests
                 # run the tests
@@ -684,13 +694,13 @@ def run_step(name, runner_, **args):
 
 
                 failures.append(failure)
                 failures.append(failure)
 
 
-                if args.get('keep_going'):
+                if args.get('keep_going') and not killed:
                     # resume after failed test
                     # resume after failed test
-                    skip = (skip or 0) + locals.seen_perms*(every or 1)
+                    start = (start or 0) + locals.seen_perms*(step or 1)
                     continue
                     continue
                 else:
                 else:
                     # stop other tests
                     # stop other tests
-                    for child in children:
+                    for child in children.copy():
                         child.kill()
                         child.kill()
 
 
             break
             break
@@ -733,6 +743,10 @@ def run_step(name, runner_, **args):
                             if failures else ''))
                             if failures else ''))
                 sys.stdout.flush()
                 sys.stdout.flush()
                 needs_newline = True
                 needs_newline = True
+    except KeyboardInterrupt:
+        # this is handled by the runner threads, we just
+        # need to not abort here
+        killed = True
     finally:
     finally:
         if needs_newline:
         if needs_newline:
             print()
             print()
@@ -743,7 +757,8 @@ def run_step(name, runner_, **args):
     return (
     return (
         expected_perms,
         expected_perms,
         passed_perms,
         passed_perms,
-        failures)
+        failures,
+        killed)
     
     
 
 
 def run(**args):
 def run(**args):
@@ -767,41 +782,41 @@ def run(**args):
     if args.get('by_suites'):
     if args.get('by_suites'):
         for type in ['normal', 'reentrant', 'valgrind']:
         for type in ['normal', 'reentrant', 'valgrind']:
             for suite in expected_suite_perms.keys():
             for suite in expected_suite_perms.keys():
-                expected_, passed_, failures_ = run_step(
+                expected_, passed_, failures_, killed = run_stage(
                     '%s %s' % (type, suite),
                     '%s %s' % (type, suite),
                     runner_ + ['--%s' % type, suite],
                     runner_ + ['--%s' % type, suite],
                     **args)
                     **args)
                 expected += expected_
                 expected += expected_
                 passed += passed_
                 passed += passed_
                 failures.extend(failures_)
                 failures.extend(failures_)
-                if failures and not args.get('keep_going'):
+                if (failures and not args.get('keep_going')) or killed:
                     break
                     break
-            if failures and not args.get('keep_going'):
+            if (failures and not args.get('keep_going')) or killed:
                 break
                 break
     elif args.get('by_cases'):
     elif args.get('by_cases'):
         for type in ['normal', 'reentrant', 'valgrind']:
         for type in ['normal', 'reentrant', 'valgrind']:
             for case in expected_case_perms.keys():
             for case in expected_case_perms.keys():
-                expected_, passed_, failures_ = run_step(
+                expected_, passed_, failures_, killed = run_stage(
                     '%s %s' % (type, case),
                     '%s %s' % (type, case),
                     runner_ + ['--%s' % type, case],
                     runner_ + ['--%s' % type, case],
                     **args)
                     **args)
                 expected += expected_
                 expected += expected_
                 passed += passed_
                 passed += passed_
                 failures.extend(failures_)
                 failures.extend(failures_)
-                if failures and not args.get('keep_going'):
+                if (failures and not args.get('keep_going')) or killed:
                     break
                     break
-            if failures and not args.get('keep_going'):
+            if (failures and not args.get('keep_going')) or killed:
                 break
                 break
     else:
     else:
         for type in ['normal', 'reentrant', 'valgrind']:
         for type in ['normal', 'reentrant', 'valgrind']:
-            expected_, passed_, failures_ = run_step(
+            expected_, passed_, failures_, killed = run_stage(
                 '%s tests' % type,
                 '%s tests' % type,
                 runner_ + ['--%s' % type],
                 runner_ + ['--%s' % type],
                 **args)
                 **args)
             expected += expected_
             expected += expected_
             passed += passed_
             passed += passed_
             failures.extend(failures_)
             failures.extend(failures_)
-            if failures and not args.get('keep_going'):
+            if (failures and not args.get('keep_going')) or killed:
                 break
                 break
 
 
     # show summary
     # show summary
@@ -867,7 +882,8 @@ if __name__ == "__main__":
     import argparse
     import argparse
     import sys
     import sys
     parser = argparse.ArgumentParser(
     parser = argparse.ArgumentParser(
-        description="Build and run tests.")
+        description="Build and run tests.",
+        conflict_handler='resolve')
     # TODO document test case/perm specifier
     # TODO document test case/perm specifier
     parser.add_argument('test_paths', nargs='*',
     parser.add_argument('test_paths', nargs='*',
         help="Description of testis to run. May be a directory, path, or \
         help="Description of testis to run. May be a directory, path, or \
@@ -900,10 +916,12 @@ if __name__ == "__main__":
         help="Filter for reentrant tests. Can be combined.")
         help="Filter for reentrant tests. Can be combined.")
     test_parser.add_argument('-V', '--valgrind', action='store_true',
     test_parser.add_argument('-V', '--valgrind', action='store_true',
         help="Filter for Valgrind tests. Can be combined.")
         help="Filter for Valgrind tests. Can be combined.")
-    test_parser.add_argument('-p', '--persist',
-        help="Persist the disk to this file.")
+    test_parser.add_argument('-d', '--disk',
+        help="Use this file as the disk.")
     test_parser.add_argument('-t', '--trace',
     test_parser.add_argument('-t', '--trace',
         help="Redirect trace output to this file.")
         help="Redirect trace output to this file.")
+    test_parser.add_argument('-o', '--output',
+        help="Redirect stdout and stderr to this file.")
     test_parser.add_argument('--runner', default=[RUNNER_PATH],
     test_parser.add_argument('--runner', default=[RUNNER_PATH],
         type=lambda x: x.split(),
         type=lambda x: x.split(),
         help="Path to runner, defaults to %r" % RUNNER_PATH)
         help="Path to runner, defaults to %r" % RUNNER_PATH)