Преглед изворни кода

Cleaned up a number of small tweaks in the scripts

- Added the littlefs license note to the scripts.

- Adopted parse_intermixed_args everywhere for more consistent arg
  handling.

- Removed argparse's implicit help text formatting as it does not
  work with perse_intermixed_args and breaks sometimes.

- Used string concatenation for argparse everywhere, uses backslashed
  line continuations only works with argparse because it strips
  redundant whitespace.

- Consistent argparse formatting.

- Consistent openio mode handling.

- Consistent color argument handling.

- Adopted functools.lru_cache in tracebd.py.

- Moved unicode printing behind --subscripts in traceby.py, making all
  scripts ascii by default.

- Renamed pretty_asserts.py -> prettyasserts.py.

- Renamed struct.py -> struct_.py, the original name conflicts with
  Python's built in struct module in horrible ways.
Christopher Haster пре 3 година
родитељ
комит
20ec0be875
11 измењених фајлова са 307 додато и 162 уклоњено
  1. 4 4
      Makefile
  2. 13 6
      scripts/code.py
  3. 15 11
      scripts/coverage.py
  4. 12 5
      scripts/data.py
  5. 27 9
      scripts/prettyasserts.py
  6. 9 3
      scripts/stack.py
  7. 10 4
      scripts/struct_.py
  8. 12 3
      scripts/summary.py
  9. 8 2
      scripts/tailpipe.py
  10. 141 63
      scripts/test.py
  11. 56 52
      scripts/tracebd.py

+ 4 - 4
Makefile

@@ -133,7 +133,7 @@ stack: $(CI)
 
 .PHONY: struct
 struct: $(OBJ)
-	./scripts/struct.py $^ -S $(STRUCTFLAGS)
+	./scripts/struct_.py $^ -S $(STRUCTFLAGS)
 
 .PHONY: coverage
 coverage: $(GCDA)
@@ -171,7 +171,7 @@ $(BUILDDIR)lfs.stack.csv: $(CI)
 	./scripts/stack.py $^ -q $(CODEFLAGS) -o $@
 
 $(BUILDDIR)lfs.struct.csv: $(OBJ)
-	./scripts/struct.py $^ -q $(CODEFLAGS) -o $@
+	./scripts/struct_.py $^ -q $(CODEFLAGS) -o $@
 
 $(BUILDDIR)lfs.coverage.csv: $(GCDA)
 	./scripts/coverage.py $^ -q $(COVERAGEFLAGS) -o $@
@@ -195,10 +195,10 @@ $(BUILDDIR)%.s: %.c
 	$(CC) -S $(CFLAGS) $< -o $@
 
 $(BUILDDIR)%.a.c: %.c
-	./scripts/pretty_asserts.py -p LFS_ASSERT $< -o $@
+	./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
 
 $(BUILDDIR)%.a.c: $(BUILDDIR)%.c
-	./scripts/pretty_asserts.py -p LFS_ASSERT $< -o $@
+	./scripts/prettyasserts.py -p LFS_ASSERT $< -o $@
 
 $(BUILDDIR)%.t.c: %.toml
 	./scripts/test.py -c $< $(TESTCFLAGS) -o $@

+ 13 - 6
scripts/code.py

@@ -1,9 +1,16 @@
 #!/usr/bin/env python3
 #
-# Script to find code size at the function level. Basically just a bit wrapper
+# Script to find code size at the function level. Basically just a big wrapper
 # around nm with some extra conveniences for comparing builds. Heavily inspired
 # by Linux's Bloat-O-Meter.
 #
+# Example:
+# ./scripts/code.py lfs.o lfs_util.o -S
+#
+# Copyright (c) 2022, The littlefs authors.
+# Copyright (c) 2020, Arm Limited. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
 
 import collections as co
 import csv
@@ -126,7 +133,7 @@ class CodeResult(co.namedtuple('CodeResult', 'file,function,code_size')):
 
 def openio(path, mode='r'):
     if path == '-':
-        if 'r' in mode:
+        if mode == 'r':
             return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
         else:
             return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
@@ -417,7 +424,7 @@ if __name__ == "__main__":
         nargs='*',
         default=OBJ_PATHS,
         help="Description of where to find *.o files. May be a directory "
-            "or a list of paths. Defaults to %(default)r.")
+            "or a list of paths. Defaults to %r." % OBJ_PATHS)
     parser.add_argument(
         '-v', '--verbose',
         action='store_true',
@@ -468,16 +475,16 @@ if __name__ == "__main__":
         '--type',
         default=TYPE,
         help="Type of symbols to report, this uses the same single-character "
-            "type-names emitted by nm. Defaults to %(default)r.")
+            "type-names emitted by nm. Defaults to %r." % TYPE)
     parser.add_argument(
         '--nm-tool',
         type=lambda x: x.split(),
         default=NM_TOOL,
-        help="Path to the nm tool to use. Defaults to %(default)r")
+        help="Path to the nm tool to use. Defaults to %r." % NM_TOOL)
     parser.add_argument(
         '--build-dir',
         help="Specify the relative build directory. Used to map object files "
             "to the correct source files.")
     sys.exit(main(**{k: v
-        for k, v in vars(parser.parse_args()).items()
+        for k, v in vars(parser.parse_intermixed_args()).items()
         if v is not None}))

+ 15 - 11
scripts/coverage.py

@@ -1,8 +1,13 @@
 #!/usr/bin/env python3
 #
-# Script to find test coverage. Basically just a big wrapper around gcov with
-# some extra conveniences for comparing builds. Heavily inspired by Linux's
-# Bloat-O-Meter.
+# Script to find coverage info after running tests.
+#
+# Example:
+# ./scripts/coverage.py lfs.t.a.gcda lfs_util.t.a.gcda -s
+#
+# Copyright (c) 2022, The littlefs authors.
+# Copyright (c) 2020, Arm Limited. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
 #
 
 import collections as co
@@ -208,7 +213,7 @@ class CoverageResult(co.namedtuple('CoverageResult',
 
 def openio(path, mode='r'):
     if path == '-':
-        if 'r' in mode:
+        if mode == 'r':
             return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
         else:
             return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
@@ -682,7 +687,7 @@ if __name__ == "__main__":
         nargs='*',
         default=GCDA_PATHS,
         help="Description of where to find *.gcda files. May be a directory "
-            "or a list of paths. Defaults to %(default)r.")
+            "or a list of paths. Defaults to %r." % GCDA_PATHS)
     parser.add_argument(
         '-v', '--verbose',
         action='store_true',
@@ -752,18 +757,17 @@ if __name__ == "__main__":
         '-c', '--context',
         type=lambda x: int(x, 0),
         default=3,
-        help="Show a additional lines of context. Defaults to %(default)r.")
+        help="Show a additional lines of context. Defaults to 3.")
     parser.add_argument(
         '-W', '--width',
         type=lambda x: int(x, 0),
         default=80,
-        help="Assume source is styled with this many columns. Defaults "
-            "to %(default)r.")
+        help="Assume source is styled with this many columns. Defaults to 80.")
     parser.add_argument(
         '--color',
         choices=['never', 'always', 'auto'],
         default='auto',
-        help="When to use terminal colors.")
+        help="When to use terminal colors. Defaults to 'auto'.")
     parser.add_argument(
         '-e', '--error-on-lines',
         action='store_true',
@@ -780,11 +784,11 @@ if __name__ == "__main__":
         '--gcov-tool',
         default=GCOV_TOOL,
         type=lambda x: x.split(),
-        help="Path to the gcov tool to use. Defaults to %(default)r.")
+        help="Path to the gcov tool to use. Defaults to %r." % GCOV_TOOL)
     parser.add_argument(
         '--build-dir',
         help="Specify the relative build directory. Used to map object files "
             "to the correct source files.")
     sys.exit(main(**{k: v
-        for k, v in vars(parser.parse_args()).items()
+        for k, v in vars(parser.parse_intermixed_args()).items()
         if v is not None}))

+ 12 - 5
scripts/data.py

@@ -4,6 +4,13 @@
 # around nm with some extra conveniences for comparing builds. Heavily inspired
 # by Linux's Bloat-O-Meter.
 #
+# Example:
+# ./scripts/data.py lfs.o lfs_util.o -S
+#
+# Copyright (c) 2022, The littlefs authors.
+# Copyright (c) 2020, Arm Limited. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
 
 import collections as co
 import csv
@@ -126,7 +133,7 @@ class DataResult(co.namedtuple('DataResult', 'file,function,data_size')):
 
 def openio(path, mode='r'):
     if path == '-':
-        if 'r' in mode:
+        if mode == 'r':
             return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
         else:
             return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
@@ -417,7 +424,7 @@ if __name__ == "__main__":
         nargs='*',
         default=OBJ_PATHS,
         help="Description of where to find *.o files. May be a directory "
-            "or a list of paths. Defaults to %(default)r.")
+            "or a list of paths. Defaults to %r." % OBJ_PATHS)
     parser.add_argument(
         '-v', '--verbose',
         action='store_true',
@@ -468,16 +475,16 @@ if __name__ == "__main__":
         '--type',
         default=TYPE,
         help="Type of symbols to report, this uses the same single-character "
-            "type-names emitted by nm. Defaults to %(default)r.")
+            "type-names emitted by nm. Defaults to %r." % TYPE)
     parser.add_argument(
         '--nm-tool',
         type=lambda x: x.split(),
         default=NM_TOOL,
-        help="Path to the nm tool to use. Defaults to %(default)r")
+        help="Path to the nm tool to use. Defaults to %r." % NM_TOOL)
     parser.add_argument(
         '--build-dir',
         help="Specify the relative build directory. Used to map object files "
             "to the correct source files.")
     sys.exit(main(**{k: v
-        for k, v in vars(parser.parse_args()).items()
+        for k, v in vars(parser.parse_intermixed_args()).items()
         if v is not None}))

+ 27 - 9
scripts/pretty_asserts.py → scripts/prettyasserts.py

@@ -1,4 +1,14 @@
 #!/usr/bin/env python3
+#
+# Preprocessor that makes asserts easier to debug.
+#
+# Example:
+# ./scripts/prettyasserts.py -p LFS_ASSERT lfs.c -o lfs.a.c
+#
+# Copyright (c) 2022, The littlefs authors.
+# Copyright (c) 2020, Arm Limited. All rights reserved.
+# SPDX-License-Identifier: BSD-3-Clause
+#
 
 import re
 import sys
@@ -34,7 +44,7 @@ LEXEMES = {
 
 def openio(path, mode='r'):
     if path == '-':
-        if 'r' in mode:
+        if mode == 'r':
             return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
         else:
             return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
@@ -414,17 +424,25 @@ if __name__ == "__main__":
     import argparse
     import sys
     parser = argparse.ArgumentParser(
-        description="Preprocessor that makes asserts easy to debug.")
-    parser.add_argument('input',
+        description="Preprocessor that makes asserts easier to debug.")
+    parser.add_argument(
+        'input',
         help="Input C file.")
-    parser.add_argument('-o', '--output', required=True,
+    parser.add_argument(
+        '-o', '--output',
+        required=True,
         help="Output C file.")
-    parser.add_argument('-p', '--pattern', action='append',
+    parser.add_argument(
+        '-p', '--pattern',
+        action='append',
         help="Regex patterns to search for starting an assert statement. This"
             " implicitly includes \"assert\" and \"=>\".")
-    parser.add_argument('-l', '--limit',
-        default=LIMIT, type=lambda x: int(x, 0),
-        help="Maximum number of characters to display in strcmp and memcmp.")
+    parser.add_argument(
+        '-l', '--limit',
+        type=lambda x: int(x, 0),
+        default=LIMIT,
+        help="Maximum number of characters to display in strcmp and memcmp. "
+            "Defaults to %r." % LIMIT)
     sys.exit(main(**{k: v
-        for k, v in vars(parser.parse_args()).items()
+        for k, v in vars(parser.parse_intermixed_args()).items()
         if v is not None}))

+ 9 - 3
scripts/stack.py

@@ -3,6 +3,12 @@
 # Script to find stack usage at the function level. Will detect recursion and
 # report as infinite stack usage.
 #
+# Example:
+# ./scripts/stack.py lfs.ci lfs_util.ci -S
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
 
 import collections as co
 import csv
@@ -124,7 +130,7 @@ class StackResult(co.namedtuple('StackResult',
 
 def openio(path, mode='r'):
     if path == '-':
-        if 'r' in mode:
+        if mode == 'r':
             return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
         else:
             return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
@@ -425,7 +431,7 @@ def table(results, calls, diff_results=None, *,
                             prefixes[2+is_last] + "'-> ",
                             prefixes[2+is_last] + "|   ",
                             prefixes[2+is_last] + "    "))
-                
+
 
         table_calls(names, depth)
 
@@ -643,5 +649,5 @@ if __name__ == "__main__":
         help="Specify the relative build directory. Used to map object files "
             "to the correct source files.")
     sys.exit(main(**{k: v
-        for k, v in vars(parser.parse_args()).items()
+        for k, v in vars(parser.parse_intermixed_args()).items()
         if v is not None}))

+ 10 - 4
scripts/struct.py → scripts/struct_.py

@@ -2,6 +2,12 @@
 #
 # Script to find struct sizes.
 #
+# Example:
+# ./scripts/struct_.py lfs.o lfs_util.o -S
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
 
 import collections as co
 import csv
@@ -123,7 +129,7 @@ class StructResult(co.namedtuple('StructResult', 'file,struct,struct_size')):
 
 def openio(path, mode='r'):
     if path == '-':
-        if 'r' in mode:
+        if mode == 'r':
             return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
         else:
             return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
@@ -461,7 +467,7 @@ if __name__ == "__main__":
         nargs='*',
         default=OBJ_PATHS,
         help="Description of where to find *.o files. May be a directory "
-            "or a list of paths. Defaults to %(default)r.")
+            "or a list of paths. Defaults to %r." % OBJ_PATHS)
     parser.add_argument(
         '-v', '--verbose',
         action='store_true',
@@ -512,11 +518,11 @@ if __name__ == "__main__":
         '--objdump-tool',
         type=lambda x: x.split(),
         default=OBJDUMP_TOOL,
-        help="Path to the objdump tool to use.")
+        help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL)
     parser.add_argument(
         '--build-dir',
         help="Specify the relative build directory. Used to map object files "
             "to the correct source files.")
     sys.exit(main(**{k: v
-        for k, v in vars(parser.parse_args()).items()
+        for k, v in vars(parser.parse_intermixed_args()).items()
         if v is not None}))

+ 12 - 3
scripts/summary.py

@@ -2,6 +2,15 @@
 #
 # Script to summarize the outputs of other scripts. Operates on CSV files.
 #
+# Example:
+# ./scripts/code.py lfs.o lfs_util.o -q -o lfs.code.csv
+# ./scripts/data.py lfs.o lfs_util.o -q -o lfs.data.csv
+# ./scripts/summary.py lfs.code.csv lfs.data.csv -q -o lfs.csv
+# ./scripts/summary.py -Y lfs.csv -f code=code_size,data=data_size
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
 
 import collections as co
 import csv
@@ -43,7 +52,7 @@ MERGES = {
 
 def openio(path, mode='r'):
     if path == '-':
-        if 'r' in mode:
+        if mode == 'r':
             return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
         else:
             return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
@@ -660,7 +669,7 @@ if __name__ == "__main__":
         nargs='*',
         default=CSV_PATHS,
         help="Description of where to find *.csv files. May be a directory "
-            "or list of paths. Defaults to %(default)r.")
+            "or list of paths. Defaults to %r." % CSV_PATHS)
     parser.add_argument(
         '-q', '--quiet',
         action='store_true',
@@ -722,5 +731,5 @@ if __name__ == "__main__":
         action='store_true',
         help="Only show the totals.")
     sys.exit(main(**{k: v
-        for k, v in vars(parser.parse_args()).items()
+        for k, v in vars(parser.parse_intermixed_args()).items()
         if v is not None}))

+ 8 - 2
scripts/tailpipe.py

@@ -2,6 +2,12 @@
 #
 # Efficiently displays the last n lines of a file/pipe.
 #
+# Example:
+# ./scripts/tailpipe.py trace -n5
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
 
 import os
 import sys
@@ -11,7 +17,7 @@ import time
 
 def openio(path, mode='r'):
     if path == '-':
-        if 'r' in mode:
+        if mode == 'r':
             return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
         else:
             return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
@@ -111,5 +117,5 @@ if __name__ == "__main__":
         help="Reopen the pipe on EOF, useful when multiple "
             "processes are writing.")
     sys.exit(main(**{k: v
-        for k, v in vars(parser.parse_args()).items()
+        for k, v in vars(parser.parse_intermixed_args()).items()
         if v is not None}))

+ 141 - 63
scripts/test.py

@@ -2,6 +2,12 @@
 #
 # Script to compile and runs tests.
 #
+# Example:
+# ./scripts/test.py runners/test_runner -b
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
 
 import collections as co
 import csv
@@ -114,7 +120,7 @@ class TestCase:
                         for x in range(start, stop, step):
                             yield from parse_define('%s(%d)%s' % (
                                 v_[:m.start()], x, v_[m.end():]))
-                    else:                            
+                    else:
                         yield v_
             # or a literal value
             else:
@@ -337,7 +343,7 @@ def compile(test_paths, **args):
                                     k+'_i', define_cbs[v]))
                             f.writeln(4*' '+'},')
                         f.writeln('};')
-                        f.writeln()    
+                        f.writeln()
 
                     # create case filter function
                     if suite.if_ is not None or case.if_ is not None:
@@ -505,7 +511,8 @@ def find_runner(runner, **args):
     cmd = runner.copy()
 
     # run under some external command?
-    cmd[:0] = args.get('exec', [])
+    if args.get('exec'):
+        cmd[:0] = args['exec']
 
     # run under valgrind?
     if args.get('valgrind'):
@@ -914,7 +921,7 @@ def run_stage(name, runner_, ids, output_, **args):
                     for child in children.copy():
                         child.kill()
                     break
-    
+
 
     # parallel jobs?
     runners = []
@@ -984,7 +991,7 @@ def run_stage(name, runner_, ids, output_, **args):
         powerlosses,
         failures,
         killed)
-    
+
 
 def run(runner, test_ids=[], **args):
     # query runner for tests
@@ -1176,102 +1183,173 @@ if __name__ == "__main__":
     parser = argparse.ArgumentParser(
         description="Build and run tests.",
         conflict_handler='ignore')
-    parser.add_argument('-v', '--verbose', action='store_true',
+    parser.add_argument(
+        '-v', '--verbose',
+        action='store_true',
         help="Output commands that run behind the scenes.")
-    parser.add_argument('--color',
-        choices=['never', 'always', 'auto'], default='auto',
-        help="When to use terminal colors.")
+    parser.add_argument(
+        '--color',
+        choices=['never', 'always', 'auto'],
+        default='auto',
+        help="When to use terminal colors. Defaults to 'auto'.")
 
     # test flags
     test_parser = parser.add_argument_group('test options')
-    test_parser.add_argument('runner', nargs='?',
+    test_parser.add_argument(
+        'runner',
+        nargs='?',
         type=lambda x: x.split(),
         help="Test runner to use for testing. Defaults to %r." % RUNNER_PATH)
-    test_parser.add_argument('test_ids', nargs='*',
+    test_parser.add_argument(
+        'test_ids',
+        nargs='*',
         help="Description of tests to run.")
-    test_parser.add_argument('-Y', '--summary', action='store_true',
+    test_parser.add_argument(
+        '-Y', '--summary',
+        action='store_true',
         help="Show quick summary.")
-    test_parser.add_argument('-l', '--list-suites', action='store_true',
+    test_parser.add_argument(
+        '-l', '--list-suites',
+        action='store_true',
         help="List test suites.")
-    test_parser.add_argument('-L', '--list-cases', action='store_true',
+    test_parser.add_argument(
+        '-L', '--list-cases',
+        action='store_true',
         help="List test cases.")
-    test_parser.add_argument('--list-suite-paths', action='store_true',
+    test_parser.add_argument(
+        '--list-suite-paths',
+        action='store_true',
         help="List the path for each test suite.")
-    test_parser.add_argument('--list-case-paths', action='store_true',
+    test_parser.add_argument(
+        '--list-case-paths',
+        action='store_true',
         help="List the path and line number for each test case.")
-    test_parser.add_argument('--list-defines', action='store_true',
+    test_parser.add_argument(
+        '--list-defines',
+        action='store_true',
         help="List all defines in this test-runner.")
-    test_parser.add_argument('--list-permutation-defines', action='store_true',
+    test_parser.add_argument(
+        '--list-permutation-defines',
+        action='store_true',
         help="List explicit defines in this test-runner.")
-    test_parser.add_argument('--list-implicit-defines', action='store_true',
+    test_parser.add_argument(
+        '--list-implicit-defines',
+        action='store_true',
         help="List implicit defines in this test-runner.")
-    test_parser.add_argument('--list-geometries', action='store_true',
+    test_parser.add_argument(
+        '--list-geometries',
+        action='store_true',
         help="List the available disk geometries.")
-    test_parser.add_argument('--list-powerlosses', action='store_true',
+    test_parser.add_argument(
+        '--list-powerlosses',
+        action='store_true',
         help="List the available power-loss scenarios.")
-    test_parser.add_argument('-D', '--define', action='append',
+    test_parser.add_argument(
+        '-D', '--define',
+        action='append',
         help="Override a test define.")
-    test_parser.add_argument('-g', '--geometry',
-        help="Comma-separated list of disk geometries to test. \
-            Defaults to d,e,E,n,N.")
-    test_parser.add_argument('-p', '--powerloss',
-        help="Comma-separated list of power-loss scenarios to test. \
-            Defaults to 0,l.")
-    test_parser.add_argument('-d', '--disk',
+    test_parser.add_argument(
+        '-g', '--geometry',
+        help="Comma-separated list of disk geometries to test. "
+            "Defaults to d,e,E,n,N.")
+    test_parser.add_argument(
+        '-p', '--powerloss',
+        help="Comma-separated list of power-loss scenarios to test. "
+            "Defaults to 0,l.")
+    test_parser.add_argument(
+        '-d', '--disk',
         help="Direct block device operations to this file.")
-    test_parser.add_argument('-t', '--trace',
+    test_parser.add_argument(
+        '-t', '--trace',
         help="Direct trace output to this file.")
-    test_parser.add_argument('-O', '--stdout',
+    test_parser.add_argument(
+        '-O', '--stdout',
         help="Direct stdout to this file. Note stderr is already merged here.")
-    test_parser.add_argument('-o', '--output',
+    test_parser.add_argument(
+        '-o', '--output',
         help="CSV file to store results.")
-    test_parser.add_argument('--read-sleep',
+    test_parser.add_argument(
+        '--read-sleep',
         help="Artificial read delay in seconds.")
-    test_parser.add_argument('--prog-sleep',
+    test_parser.add_argument(
+        '--prog-sleep',
         help="Artificial prog delay in seconds.")
-    test_parser.add_argument('--erase-sleep',
+    test_parser.add_argument(
+        '--erase-sleep',
         help="Artificial erase delay in seconds.")
-    test_parser.add_argument('-j', '--jobs', nargs='?', type=int,
+    test_parser.add_argument(
+        '-j', '--jobs',
+        nargs='?',
+        type=lambda x: int(x, 0),
         const=len(os.sched_getaffinity(0)),
         help="Number of parallel runners to run.")
-    test_parser.add_argument('-k', '--keep-going', action='store_true',
+    test_parser.add_argument(
+        '-k', '--keep-going',
+        action='store_true',
         help="Don't stop on first error.")
-    test_parser.add_argument('-i', '--isolate', action='store_true',
+    test_parser.add_argument(
+        '-i', '--isolate',
+        action='store_true',
         help="Run each test permutation in a separate process.")
-    test_parser.add_argument('-b', '--by-suites', action='store_true',
+    test_parser.add_argument(
+        '-b', '--by-suites',
+        action='store_true',
         help="Step through tests by suite.")
-    test_parser.add_argument('-B', '--by-cases', action='store_true',
+    test_parser.add_argument(
+        '-B', '--by-cases',
+        action='store_true',
         help="Step through tests by case.")
-    test_parser.add_argument('--context', type=lambda x: int(x, 0),
-        help="Show this many lines of stdout on test failure. \
-            Defaults to 5.")
-    test_parser.add_argument('--gdb', action='store_true',
+    test_parser.add_argument(
+        '--context',
+        type=lambda x: int(x, 0),
+        default=5,
+        help="Show this many lines of stdout on test failure. "
+            "Defaults to 5.")
+    test_parser.add_argument(
+        '--gdb',
+        action='store_true',
         help="Drop into gdb on test failure.")
-    test_parser.add_argument('--gdb-case', action='store_true',
-        help="Drop into gdb on test failure but stop at the beginning \
-            of the failing test case.")
-    test_parser.add_argument('--gdb-main', action='store_true',
-        help="Drop into gdb on test failure but stop at the beginning \
-            of main.")
-    test_parser.add_argument('--exec', default=[], type=lambda e: e.split(),
+    test_parser.add_argument(
+        '--gdb-case',
+        action='store_true',
+        help="Drop into gdb on test failure but stop at the beginning "
+            "of the failing test case.")
+    test_parser.add_argument(
+        '--gdb-main',
+        action='store_true',
+        help="Drop into gdb on test failure but stop at the beginning "
+            "of main.")
+    test_parser.add_argument(
+        '--exec',
+        type=lambda e: e.split(),
         help="Run under another executable.")
-    test_parser.add_argument('--valgrind', action='store_true',
-        help="Run under Valgrind to find memory errors. Implicitly sets \
-            --isolate.")
+    test_parser.add_argument(
+        '--valgrind',
+        action='store_true',
+        help="Run under Valgrind to find memory errors. Implicitly sets "
+            "--isolate.")
 
     # compilation flags
     comp_parser = parser.add_argument_group('compilation options')
-    comp_parser.add_argument('test_paths', nargs='*',
-        help="Description of *.toml files to compile. May be a directory \
-            or a list of paths.")
-    comp_parser.add_argument('-c', '--compile', action='store_true',
+    comp_parser.add_argument(
+        'test_paths',
+        nargs='*',
+        help="Description of *.toml files to compile. May be a directory "
+            "or a list of paths.")
+    comp_parser.add_argument(
+        '-c', '--compile',
+        action='store_true',
         help="Compile a test suite or source file.")
-    comp_parser.add_argument('-s', '--source',
+    comp_parser.add_argument(
+        '-s', '--source',
         help="Source file to compile, possibly injecting internal tests.")
-    comp_parser.add_argument('--include', default=HEADER_PATH,
-        help="Inject this header file into every compiled test file. \
-            Defaults to %r." % HEADER_PATH)
-    comp_parser.add_argument('-o', '--output',
+    comp_parser.add_argument(
+        '--include',
+        default=HEADER_PATH,
+        help="Inject this header file into every compiled test file. "
+            "Defaults to %r." % HEADER_PATH)
+    comp_parser.add_argument(
+        '-o', '--output',
         help="Output file.")
 
     # runner + test_ids overlaps test_paths, so we need to do some munging here

+ 56 - 52
scripts/tracebd.py

@@ -2,8 +2,15 @@
 #
 # Display operations on block devices based on trace output
 #
+# Example:
+# ./scripts/tracebd.py trace
+#
+# Copyright (c) 2022, The littlefs authors.
+# SPDX-License-Identifier: BSD-3-Clause
+#
 
 import collections as co
+import functools as ft
 import itertools as it
 import math as m
 import os
@@ -15,7 +22,7 @@ import time
 
 def openio(path, mode='r'):
     if path == '-':
-        if 'r' in mode:
+        if mode == 'r':
             return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
         else:
             return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
@@ -23,11 +30,11 @@ def openio(path, mode='r'):
         return open(path, mode)
 
 # space filling Hilbert-curve
+#
+# note we memoize the last curve since this is a bit expensive
+#
+@ft.lru_cache(1)
 def hilbert_curve(width, height):
-    # memoize the last curve
-    if getattr(hilbert_curve, 'last', (None,))[0] == (width, height):
-        return hilbert_curve.last[1]
-
     # based on generalized Hilbert curves:
     # https://github.com/jakubcerveny/gilbert
     #
@@ -83,16 +90,14 @@ def hilbert_curve(width, height):
     else:
         curve = hilbert_(0, 0, 0, +height, +width, 0)
 
-    curve = list(curve)
-    hilbert_curve.last = ((width, height), curve)
-    return curve
+    return list(curve)
 
 # space filling Z-curve/Lebesgue-curve
+#
+# note we memoize the last curve since this is a bit expensive
+#
+@ft.lru_cache(1)
 def lebesgue_curve(width, height):
-    # memoize the last curve
-    if getattr(lebesgue_curve, 'last', (None,))[0] == (width, height):
-        return lebesgue_curve.last[1]
-
     # we create a truncated Z-curve by simply filtering out the points
     # that are outside our region
     curve = []
@@ -104,7 +109,6 @@ def lebesgue_curve(width, height):
         if x < width and y < height:
             curve.append((x, y))
 
-    lebesgue_curve.last = ((width, height), curve)
     return curve
 
 
@@ -151,29 +155,30 @@ class Block:
 
     def __add__(self, other):
         return Block(
-            max(self.wear, other.wear), 
+            max(self.wear, other.wear),
             self.readed | other.readed,
             self.proged | other.proged,
             self.erased | other.erased)
 
-    def draw(self,
-            ascii=False,
+    def draw(self, *,
+            subscripts=False,
             chars=None,
             wear_chars=None,
-            color='always',
+            color=True,
             read=True,
             prog=True,
             erase=True,
             wear=False,
             max_wear=None,
-            block_cycles=None):
+            block_cycles=None,
+            **_):
         if not chars: chars = '.rpe'
         c = chars[0]
         f = []
 
         if wear:
-            if not wear_chars and ascii: wear_chars = '0123456789'
-            elif not wear_chars:         wear_chars = '.₁₂₃₄₅₆789'
+            if not wear_chars and subscripts: wear_chars = '.₁₂₃₄₅₆789'
+            elif not wear_chars:              wear_chars = '0123456789'
 
             if block_cycles:
                 w = self.wear / block_cycles
@@ -183,8 +188,7 @@ class Block:
             c = wear_chars[min(
                 int(w*(len(wear_chars)-1)),
                 len(wear_chars)-1)]
-            if color == 'wear' or (
-                    color == 'always' and not read and not prog and not erase):
+            if color:
                 if w*9 >= 9:   f.append('\x1b[1;31m')
                 elif w*9 >= 7: f.append('\x1b[35m')
 
@@ -192,12 +196,12 @@ class Block:
         elif prog and self.proged: c = chars[2]
         elif read and self.readed: c = chars[1]
 
-        if color == 'ops' or color == 'always':
+        if color:
             if erase and self.erased:  f.append('\x1b[44m')
             elif prog and self.proged: f.append('\x1b[45m')
             elif read and self.readed: f.append('\x1b[42m')
 
-        if color in ['always', 'wear', 'ops'] and f:
+        if color:
             return '%s%c\x1b[m' % (''.join(f), c)
         else:
             return c
@@ -318,16 +322,13 @@ def main(path='-', *,
         prog=False,
         erase=False,
         wear=False,
-        reset=False,
-        ascii=False,
-        chars=None,
-        wear_chars=None,
         color='auto',
         block=(None,None),
         off=(None,None),
         block_size=None,
         block_count=None,
         block_cycles=None,
+        reset=False,
         width=None,
         height=1,
         scale=None,
@@ -336,13 +337,20 @@ def main(path='-', *,
         sleep=None,
         hilbert=False,
         lebesgue=False,
-        keep_open=False):
+        keep_open=False,
+        **args):
+    # exclusive wear or read/prog/erase by default
     if not read and not prog and not erase and not wear:
         read = True
         prog = True
         erase = True
+    # figure out what color should be
     if color == 'auto':
-        color = 'always' if sys.stdout.isatty() else 'never'
+        color = sys.stdout.isatty()
+    elif color == 'always':
+        color = True
+    else:
+        color = False
 
     block_start = block[0]
     block_stop = block[1] if len(block) > 1 else block[0]+1
@@ -438,7 +446,7 @@ def main(path='-', *,
             with lock:
                 if reset:
                     bd.reset()
-                    
+
                 # ignore the new values if block_stop/off_stop is explicit
                 bd.smoosh(
                     size=(size if off_stop is None
@@ -513,16 +521,14 @@ def main(path='-', *,
 
         def draw(b):
             return b.draw(
-                ascii=ascii,
-                chars=chars,
-                wear_chars=wear_chars,
-                color=color,
                 read=read,
                 prog=prog,
                 erase=erase,
                 wear=wear,
+                color=color,
                 max_wear=max_wear,
-                block_cycles=block_cycles)
+                block_cycles=block_cycles,
+                **args)
 
         # fold via a curve?
         if height > 1:
@@ -562,7 +568,7 @@ def main(path='-', *,
     def print_line():
         nonlocal last_rows
         if not lines:
-            return 
+            return
 
         # give ourself a canvas
         while last_rows < len(history)*height:
@@ -672,15 +678,8 @@ if __name__ == "__main__":
         action='store_true',
         help="Render wear.")
     parser.add_argument(
-        '-R',
-        '--reset',
-        action='store_true',
-        help="Reset wear on block device initialization.")
-    parser.add_argument(
-        '-A',
-        '--ascii',
-        action='store_true',
-        help="Don't use unicode characters.")
+        '--subscripts',
+        help="Use unicode subscripts for showing wear.")
     parser.add_argument(
         '--chars',
         help="Characters to use for noop, read, prog, erase operations.")
@@ -689,8 +688,9 @@ if __name__ == "__main__":
         help="Characters to use to show wear.")
     parser.add_argument(
         '--color',
-        choices=['never', 'always', 'auto', 'ops', 'wear'],
-        help="When to use terminal colors, defaults to auto.")
+        choices=['never', 'always', 'auto'],
+        default='auto',
+        help="When to use terminal colors. Defaults to 'auto'.")
     parser.add_argument(
         '-b',
         '--block',
@@ -715,6 +715,11 @@ if __name__ == "__main__":
         '--block-cycles',
         type=lambda x: int(x, 0),
         help="Assumed maximum number of erase cycles when measuring wear.")
+    parser.add_argument(
+        '-R',
+        '--reset',
+        action='store_true',
+        help="Reset wear on block device initialization.")
     parser.add_argument(
         '-W',
         '--width',
@@ -735,13 +740,12 @@ if __name__ == "__main__":
         '-n',
         '--lines',
         type=lambda x: int(x, 0),
-        help="Number of lines to show, with 0 indicating no limit. "
-            "Defaults to 0.")
+        help="Number of lines to show.")
     parser.add_argument(
         '-c',
         '--coalesce',
         type=lambda x: int(x, 0),
-        help="Number of operations to coalesce together. Defaults to 1.")
+        help="Number of operations to coalesce together.")
     parser.add_argument(
         '-s',
         '--sleep',
@@ -765,5 +769,5 @@ if __name__ == "__main__":
         help="Reopen the pipe on EOF, useful when multiple "
             "processes are writing.")
     sys.exit(main(**{k: v
-        for k, v in vars(parser.parse_args()).items()
+        for k, v in vars(parser.parse_intermixed_args()).items()
         if v is not None}))