浏览代码

Merge pull request #669 from littlefs-project/devel

Minor release: v2.5
Christopher Haster 3 年之前
父节点
当前提交
40dba4a556
共有 24 个文件被更改,包括 2441 次插入514 次删除
  1. 1 1
      .github/workflows/post-release.yml
  2. 61 80
      .github/workflows/release.yml
  3. 1 1
      .github/workflows/status.yml
  4. 104 78
      .github/workflows/test.yml
  5. 2 0
      .gitignore
  6. 1 0
      LICENSE.md
  7. 76 17
      Makefile
  8. 14 0
      bd/lfs_filebd.c
  9. 1 0
      bd/lfs_filebd.h
  10. 1 0
      bd/lfs_rambd.c
  11. 1 0
      bd/lfs_rambd.h
  12. 1 0
      bd/lfs_testbd.c
  13. 1 0
      bd/lfs_testbd.h
  14. 594 228
      lfs.c
  15. 7 1
      lfs.h
  16. 1 0
      lfs_util.c
  17. 1 0
      lfs_util.h
  18. 114 44
      scripts/code.py
  19. 130 61
      scripts/coverage.py
  20. 283 0
      scripts/data.py
  21. 430 0
      scripts/stack.py
  22. 331 0
      scripts/structs.py
  23. 279 0
      scripts/summary.py
  24. 6 3
      scripts/test.py

+ 1 - 1
.github/workflows/post-release.yml

@@ -6,7 +6,7 @@ on:
 
 
 jobs:
 jobs:
   post-release:
   post-release:
-    runs-on: ubuntu-18.04
+    runs-on: ubuntu-20.04
     steps:
     steps:
       # trigger post-release in dependency repo, this indirection allows the
       # trigger post-release in dependency repo, this indirection allows the
       # dependency repo to be updated often without affecting this repo. At
       # dependency repo to be updated often without affecting this repo. At

+ 61 - 80
.github/workflows/release.yml

@@ -7,7 +7,7 @@ on:
 
 
 jobs:
 jobs:
   release:
   release:
-    runs-on: ubuntu-18.04
+    runs-on: ubuntu-20.04
 
 
     # need to manually check for a couple things
     # need to manually check for a couple things
     # - tests passed?
     # - tests passed?
@@ -73,89 +73,70 @@ jobs:
           # previous results to compare against?
           # previous results to compare against?
           [ -n "$LFS_PREV_VERSION" ] && curl -sS \
           [ -n "$LFS_PREV_VERSION" ] && curl -sS \
             "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/`
             "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/`
-              `status/$LFS_PREV_VERSION" \
+              `status/$LFS_PREV_VERSION?per_page=100" \
             | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
             | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
             >> prev-results.json \
             >> prev-results.json \
             || true
             || true
 
 
-          # unfortunately these each have their own format
-          [ -e results/code-thumb.csv ] && ( \
-            export PREV="$(jq -re '
-                  select(.context == "results / code").description
-                  | capture("Code size is (?<result>[0-9]+)").result' \
-                prev-results.json || echo 0)"
-            ./scripts/code.py -u results/code-thumb.csv -s | awk '
-              NR==2 {printf "Code size,%d B",$2}
-              NR==2 && ENVIRON["PREV"]+0 != 0 {
-                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
-              NR==2 {printf "\n"}' \
-            >> results.csv)
-          [ -e results/code-thumb-readonly.csv ] && ( \
-            export PREV="$(jq -re '
-                  select(.context == "results / code (readonly)").description
-                  | capture("Code size is (?<result>[0-9]+)").result' \
-                prev-results.json || echo 0)"
-            ./scripts/code.py -u results/code-thumb-readonly.csv -s | awk '
-              NR==2 {printf "Code size<br/>(readonly),%d B",$2}
-              NR==2 && ENVIRON["PREV"]+0 != 0 {
-                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
-              NR==2 {printf "\n"}' \
-            >> results.csv)
-          [ -e results/code-thumb-threadsafe.csv ] && ( \
-            export PREV="$(jq -re '
-                  select(.context == "results / code (threadsafe)").description
-                  | capture("Code size is (?<result>[0-9]+)").result' \
-                prev-results.json || echo 0)"
-            ./scripts/code.py -u results/code-thumb-threadsafe.csv -s | awk '
-              NR==2 {printf "Code size<br/>(threadsafe),%d B",$2}
-              NR==2 && ENVIRON["PREV"]+0 != 0 {
-                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
-              NR==2 {printf "\n"}' \
-            >> results.csv)
-          [ -e results/code-thumb-migrate.csv ] && ( \
-            export PREV="$(jq -re '
-                  select(.context == "results / code (migrate)").description
-                  | capture("Code size is (?<result>[0-9]+)").result' \
-                prev-results.json || echo 0)"
-            ./scripts/code.py -u results/code-thumb-migrate.csv -s | awk '
-              NR==2 {printf "Code size<br/>(migrate),%d B",$2}
-              NR==2 && ENVIRON["PREV"]+0 != 0 {
-                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
-              NR==2 {printf "\n"}' \
-            >> results.csv)
-          [ -e results/code-thumb-error-asserts.csv ] && ( \
-            export PREV="$(jq -re '
-                  select(.context == "results / code (error-asserts)").description
-                  | capture("Code size is (?<result>[0-9]+)").result' \
-                prev-results.json || echo 0)"
-            ./scripts/code.py -u results/code-thumb-error-asserts.csv -s | awk '
-              NR==2 {printf "Code size<br/>(error-asserts),%d B",$2}
-              NR==2 && ENVIRON["PREV"]+0 != 0 {
-                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
-              NR==2 {printf "\n"}' \
-            >> results.csv)
-          [ -e results/coverage.csv ] && ( \
-            export PREV="$(jq -re '
-                  select(.context == "results / coverage").description
-                  | capture("Coverage is (?<result>[0-9\\.]+)").result' \
-                prev-results.json || echo 0)"
-            ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' '
-              NR==2 {printf "Coverage,%.1f%% of %d lines",$4,$3}
-              NR==2 && ENVIRON["PREV"]+0 != 0 {
-                printf " (%+.1f%%)",$4-ENVIRON["PREV"]}
-              NR==2 {printf "\n"}' \
-            >> results.csv)
-
-          # transpose to GitHub table
-          [ -e results.csv ] || exit 0
-          awk -F ',' '
-            {label[NR]=$1; value[NR]=$2}
-            END {
-              for (r=1; r<=NR; r++) {printf "| %s ",label[r]}; printf "|\n";
-              for (r=1; r<=NR; r++) {printf "|:--"}; printf "|\n";
-              for (r=1; r<=NR; r++) {printf "| %s ",value[r]}; printf "|\n"}' \
-            results.csv > results.txt
-          echo "RESULTS:"
+          # build table for GitHub
+          echo "<table>" >> results.txt
+          echo "<thead>" >> results.txt
+          echo "<tr>" >> results.txt
+          echo "<th align=left>Configuration</th>" >> results.txt
+          for r in Code Stack Structs Coverage
+          do
+            echo "<th align=right>$r</th>" >> results.txt
+          done
+          echo "</tr>" >> results.txt
+          echo "</thead>" >> results.txt
+
+          echo "<tbody>" >> results.txt
+          for c in "" readonly threadsafe migrate error-asserts
+          do
+            echo "<tr>" >> results.txt
+            c_or_default=${c:-default}
+            echo "<td align=left>${c_or_default^}</td>" >> results.txt
+            for r in code stack structs
+            do
+              # per-config results
+              echo "<td align=right>" >> results.txt
+              [ -e results/thumb${c:+-$c}.csv ] && ( \
+                export PREV="$(jq -re '
+                      select(.context == "'"results (thumb${c:+, $c}) / $r"'").description
+                      | capture("(?<result>[0-9∞]+)").result' \
+                    prev-results.json || echo 0)"
+                ./scripts/summary.py results/thumb${c:+-$c}.csv -f $r -Y | awk '
+                  NR==2 {printf "%s B",$2}
+                  NR==2 && ENVIRON["PREV"]+0 != 0 {
+                    printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
+                  NR==2 {printf "\n"}' \
+                | sed -e 's/ /\&nbsp;/g' \
+                >> results.txt)
+              echo "</td>" >> results.txt
+            done
+            # coverage results
+            if [ -z $c ]
+            then
+              echo "<td rowspan=0 align=right>" >> results.txt
+              [ -e results/coverage.csv ] && ( \
+                export PREV="$(jq -re '
+                      select(.context == "results / coverage").description
+                      | capture("(?<result>[0-9\\.]+)").result' \
+                    prev-results.json || echo 0)"
+                ./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
+                  NR==2 {printf "%.1f%% of %d lines",$4,$3}
+                  NR==2 && ENVIRON["PREV"]+0 != 0 {
+                    printf " (%+.1f%%)",$4-ENVIRON["PREV"]}
+                  NR==2 {printf "\n"}' \
+                | sed -e 's/ /\&nbsp;/g' \
+                >> results.txt)
+              echo "</td>" >> results.txt
+            fi
+            echo "</tr>" >> results.txt
+          done
+          echo "</tbody>" >> results.txt
+          echo "</table>" >> results.txt
+
           cat results.txt
           cat results.txt
 
 
       # find changes from history
       # find changes from history

+ 1 - 1
.github/workflows/status.yml

@@ -6,7 +6,7 @@ on:
 
 
 jobs:
 jobs:
   status:
   status:
-    runs-on: ubuntu-18.04
+    runs-on: ubuntu-20.04
     steps:
     steps:
       # custom statuses?
       # custom statuses?
       - uses: dawidd6/action-download-artifact@v2
       - uses: dawidd6/action-download-artifact@v2

+ 104 - 78
.github/workflows/test.yml

@@ -8,7 +8,7 @@ env:
 jobs:
 jobs:
   # run tests
   # run tests
   test:
   test:
-    runs-on: ubuntu-18.04
+    runs-on: ubuntu-20.04
     strategy:
     strategy:
       fail-fast: false
       fail-fast: false
       matrix:
       matrix:
@@ -18,11 +18,27 @@ jobs:
       - uses: actions/checkout@v2
       - uses: actions/checkout@v2
       - name: install
       - name: install
         run: |
         run: |
-          # need toml, also pip3 isn't installed by default?
+          # need a few additional tools
+          #
+          # note this includes gcc-10, which is required for -fcallgraph-info=su
           sudo apt-get update -qq
           sudo apt-get update -qq
-          sudo apt-get install -qq python3 python3-pip lcov
+          sudo apt-get install -qq gcc-10 python3 python3-pip lcov
           sudo pip3 install toml
           sudo pip3 install toml
-          gcc --version
+          echo "CC=gcc-10" >> $GITHUB_ENV
+          gcc-10 --version
+          lcov --version
+          python3 --version
+
+          # need newer lcov version for gcc-10
+          #sudo apt-get remove lcov
+          #wget https://launchpad.net/ubuntu/+archive/primary/+files/lcov_1.15-1_all.deb
+          #sudo apt install ./lcov_1.15-1_all.deb
+          #lcov --version
+          #which lcov
+          #ls -lha /usr/bin/lcov
+          wget https://github.com/linux-test-project/lcov/releases/download/v1.15/lcov-1.15.tar.gz
+          tar xf lcov-1.15.tar.gz
+          sudo make -C lcov-1.15 install
 
 
           # setup a ram-backed disk to speed up reentrant tests
           # setup a ram-backed disk to speed up reentrant tests
           mkdir disks
           mkdir disks
@@ -41,36 +57,36 @@ jobs:
         if: ${{matrix.arch == 'thumb'}}
         if: ${{matrix.arch == 'thumb'}}
         run: |
         run: |
           sudo apt-get install -qq \
           sudo apt-get install -qq \
-            gcc-arm-linux-gnueabi \
+            gcc-10-arm-linux-gnueabi \
             libc6-dev-armel-cross \
             libc6-dev-armel-cross \
             qemu-user
             qemu-user
-          echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV
+          echo "CC=arm-linux-gnueabi-gcc-10 -mthumb --static" >> $GITHUB_ENV
           echo "EXEC=qemu-arm" >> $GITHUB_ENV
           echo "EXEC=qemu-arm" >> $GITHUB_ENV
-          arm-linux-gnueabi-gcc --version
+          arm-linux-gnueabi-gcc-10 --version
           qemu-arm -version
           qemu-arm -version
       # cross-compile with MIPS (32-bit, big-endian)
       # cross-compile with MIPS (32-bit, big-endian)
       - name: install-mips
       - name: install-mips
         if: ${{matrix.arch == 'mips'}}
         if: ${{matrix.arch == 'mips'}}
         run: |
         run: |
           sudo apt-get install -qq \
           sudo apt-get install -qq \
-            gcc-mips-linux-gnu \
+            gcc-10-mips-linux-gnu \
             libc6-dev-mips-cross \
             libc6-dev-mips-cross \
             qemu-user
             qemu-user
-          echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV
+          echo "CC=mips-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
           echo "EXEC=qemu-mips" >> $GITHUB_ENV
           echo "EXEC=qemu-mips" >> $GITHUB_ENV
-          mips-linux-gnu-gcc --version
+          mips-linux-gnu-gcc-10 --version
           qemu-mips -version
           qemu-mips -version
       # cross-compile with PowerPC (32-bit, big-endian)
       # cross-compile with PowerPC (32-bit, big-endian)
       - name: install-powerpc
       - name: install-powerpc
         if: ${{matrix.arch == 'powerpc'}}
         if: ${{matrix.arch == 'powerpc'}}
         run: |
         run: |
           sudo apt-get install -qq \
           sudo apt-get install -qq \
-            gcc-powerpc-linux-gnu \
+            gcc-10-powerpc-linux-gnu \
             libc6-dev-powerpc-cross \
             libc6-dev-powerpc-cross \
             qemu-user
             qemu-user
-          echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV
+          echo "CC=powerpc-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
           echo "EXEC=qemu-ppc" >> $GITHUB_ENV
           echo "EXEC=qemu-ppc" >> $GITHUB_ENV
-          powerpc-linux-gnu-gcc --version
+          powerpc-linux-gnu-gcc-10 --version
           qemu-ppc -version
           qemu-ppc -version
 
 
       # make sure example can at least compile
       # make sure example can at least compile
@@ -148,102 +164,108 @@ jobs:
           retention-days: 1
           retention-days: 1
 
 
       # update results
       # update results
-      - name: results-code
+      - name: results
         run: |
         run: |
           mkdir -p results
           mkdir -p results
           make clean
           make clean
-          make code \
+          make lfs.csv \
             CFLAGS+=" \
             CFLAGS+=" \
               -DLFS_NO_ASSERT \
               -DLFS_NO_ASSERT \
               -DLFS_NO_DEBUG \
               -DLFS_NO_DEBUG \
               -DLFS_NO_WARN \
               -DLFS_NO_WARN \
-              -DLFS_NO_ERROR" \
-            CODEFLAGS+="-o results/code-${{matrix.arch}}.csv"
-      - name: results-code-readonly
+              -DLFS_NO_ERROR"
+          cp lfs.csv results/${{matrix.arch}}.csv
+          ./scripts/summary.py results/${{matrix.arch}}.csv
+      - name: results-readonly
         run: |
         run: |
           mkdir -p results
           mkdir -p results
           make clean
           make clean
-          make code \
+          make lfs.csv \
             CFLAGS+=" \
             CFLAGS+=" \
               -DLFS_NO_ASSERT \
               -DLFS_NO_ASSERT \
               -DLFS_NO_DEBUG \
               -DLFS_NO_DEBUG \
               -DLFS_NO_WARN \
               -DLFS_NO_WARN \
               -DLFS_NO_ERROR \
               -DLFS_NO_ERROR \
-              -DLFS_READONLY" \
-            CODEFLAGS+="-o results/code-${{matrix.arch}}-readonly.csv"
-      - name: results-code-threadsafe
+              -DLFS_READONLY"
+          cp lfs.csv results/${{matrix.arch}}-readonly.csv
+          ./scripts/summary.py results/${{matrix.arch}}-readonly.csv
+      - name: results-threadsafe
         run: |
         run: |
           mkdir -p results
           mkdir -p results
           make clean
           make clean
-          make code \
+          make lfs.csv \
             CFLAGS+=" \
             CFLAGS+=" \
               -DLFS_NO_ASSERT \
               -DLFS_NO_ASSERT \
               -DLFS_NO_DEBUG \
               -DLFS_NO_DEBUG \
               -DLFS_NO_WARN \
               -DLFS_NO_WARN \
               -DLFS_NO_ERROR \
               -DLFS_NO_ERROR \
-              -DLFS_THREADSAFE" \
-            CODEFLAGS+="-o results/code-${{matrix.arch}}-threadsafe.csv"
-      - name: results-code-migrate
+              -DLFS_THREADSAFE"
+          cp lfs.csv results/${{matrix.arch}}-threadsafe.csv
+          ./scripts/summary.py results/${{matrix.arch}}-threadsafe.csv
+      - name: results-migrate
         run: |
         run: |
           mkdir -p results
           mkdir -p results
           make clean
           make clean
-          make code \
+          make lfs.csv \
             CFLAGS+=" \
             CFLAGS+=" \
               -DLFS_NO_ASSERT \
               -DLFS_NO_ASSERT \
               -DLFS_NO_DEBUG \
               -DLFS_NO_DEBUG \
               -DLFS_NO_WARN \
               -DLFS_NO_WARN \
               -DLFS_NO_ERROR \
               -DLFS_NO_ERROR \
-              -DLFS_MIGRATE" \
-            CODEFLAGS+="-o results/code-${{matrix.arch}}-migrate.csv"
-      - name: results-code-error-asserts
+              -DLFS_MIGRATE"
+          cp lfs.csv results/${{matrix.arch}}-migrate.csv
+          ./scripts/summary.py results/${{matrix.arch}}-migrate.csv
+      - name: results-error-asserts
         run: |
         run: |
           mkdir -p results
           mkdir -p results
           make clean
           make clean
-          make code \
+          make lfs.csv \
             CFLAGS+=" \
             CFLAGS+=" \
               -DLFS_NO_DEBUG \
               -DLFS_NO_DEBUG \
               -DLFS_NO_WARN \
               -DLFS_NO_WARN \
               -DLFS_NO_ERROR \
               -DLFS_NO_ERROR \
-              -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" \
-            CODEFLAGS+="-o results/code-${{matrix.arch}}-error-asserts.csv"
+              -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'"
+          cp lfs.csv results/${{matrix.arch}}-error-asserts.csv
+          ./scripts/summary.py results/${{matrix.arch}}-error-asserts.csv
       - name: upload-results
       - name: upload-results
         uses: actions/upload-artifact@v2
         uses: actions/upload-artifact@v2
         with:
         with:
           name: results
           name: results
           path: results
           path: results
-      # limit reporting to Thumb, otherwise there would be too many numbers
-      # flying around for the results to be easily readable
+
+      # create statuses with results
       - name: collect-status
       - name: collect-status
-        if: ${{matrix.arch == 'thumb'}}
         run: |
         run: |
           mkdir -p status
           mkdir -p status
-          for f in $(shopt -s nullglob ; echo results/code*.csv)
+          for f in $(shopt -s nullglob ; echo results/*.csv)
           do
           do
-            export STEP="results-code$(
-              echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p')"
-            export CONTEXT="results / code$(
-              echo $f | sed -n 's/.*code-.*-\(.*\).csv/ (\1)/p')"
-            export PREV="$(curl -sS \
-              "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
-              | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
-                | select(.context == env.CONTEXT).description
-                | capture("Code size is (?<result>[0-9]+)").result' \
-              || echo 0)"
-            export DESCRIPTION="$(./scripts/code.py -u $f -s | awk '
-              NR==2 {printf "Code size is %d B",$2}
-              NR==2 && ENVIRON["PREV"]+0 != 0 {
-                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
-            jq -n '{
-              state: "success",
-              context: env.CONTEXT,
-              description: env.DESCRIPTION,
-              target_job: "${{github.job}} (${{matrix.arch}})",
-              target_step: env.STEP}' \
-              | tee status/code$(
-                echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p').json
+            export STEP="results$(
+              echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p')"
+            for r in code stack structs
+            do
+              export CONTEXT="results (${{matrix.arch}}$(
+                echo $f | sed -n 's/[^-]*-\(.*\).csv/, \1/p')) / $r"
+              export PREV="$(curl -sS \
+                "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
+                | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
+                  | select(.context == env.CONTEXT).description
+                  | capture("(?<result>[0-9∞]+)").result' \
+                || echo 0)"
+              export DESCRIPTION="$(./scripts/summary.py $f -f $r -Y | awk '
+                NR==2 {printf "%s B",$2}
+                NR==2 && ENVIRON["PREV"]+0 != 0 {
+                  printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
+              jq -n '{
+                state: "success",
+                context: env.CONTEXT,
+                description: env.DESCRIPTION,
+                target_job: "${{github.job}} (${{matrix.arch}})",
+                target_step: env.STEP}' \
+                | tee status/$r-${{matrix.arch}}$(
+                  echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p').json
+            done
           done
           done
       - name: upload-status
       - name: upload-status
-        if: ${{matrix.arch == 'thumb'}}
         uses: actions/upload-artifact@v2
         uses: actions/upload-artifact@v2
         with:
         with:
           name: status
           name: status
@@ -252,7 +274,7 @@ jobs:
 
 
   # run under Valgrind to check for memory errors
   # run under Valgrind to check for memory errors
   valgrind:
   valgrind:
-    runs-on: ubuntu-18.04
+    runs-on: ubuntu-20.04
     steps:
     steps:
       - uses: actions/checkout@v2
       - uses: actions/checkout@v2
       - name: install
       - name: install
@@ -272,7 +294,7 @@ jobs:
 
 
   # self-host with littlefs-fuse for a fuzz-like test
   # self-host with littlefs-fuse for a fuzz-like test
   fuse:
   fuse:
-    runs-on: ubuntu-18.04
+    runs-on: ubuntu-20.04
     if: ${{!endsWith(github.ref, '-prefix')}}
     if: ${{!endsWith(github.ref, '-prefix')}}
     steps:
     steps:
       - uses: actions/checkout@v2
       - uses: actions/checkout@v2
@@ -297,16 +319,18 @@ jobs:
 
 
           # setup disk for littlefs-fuse
           # setup disk for littlefs-fuse
           mkdir mount
           mkdir mount
-          sudo chmod a+rw /dev/loop0
+          LOOP=$(sudo losetup -f)
+          sudo chmod a+rw $LOOP
           dd if=/dev/zero bs=512 count=128K of=disk
           dd if=/dev/zero bs=512 count=128K of=disk
-          losetup /dev/loop0 disk
+          losetup $LOOP disk
+          echo "LOOP=$LOOP" >> $GITHUB_ENV
       - name: test
       - name: test
         run: |
         run: |
           # self-host test
           # self-host test
           make -C littlefs-fuse
           make -C littlefs-fuse
 
 
-          littlefs-fuse/lfs --format /dev/loop0
-          littlefs-fuse/lfs /dev/loop0 mount
+          littlefs-fuse/lfs --format $LOOP
+          littlefs-fuse/lfs $LOOP mount
 
 
           ls mount
           ls mount
           mkdir mount/littlefs
           mkdir mount/littlefs
@@ -318,7 +342,7 @@ jobs:
 
 
   # test migration using littlefs-fuse
   # test migration using littlefs-fuse
   migrate:
   migrate:
-    runs-on: ubuntu-18.04
+    runs-on: ubuntu-20.04
     if: ${{!endsWith(github.ref, '-prefix')}}
     if: ${{!endsWith(github.ref, '-prefix')}}
     steps:
     steps:
       - uses: actions/checkout@v2
       - uses: actions/checkout@v2
@@ -348,9 +372,11 @@ jobs:
 
 
           # setup disk for littlefs-fuse
           # setup disk for littlefs-fuse
           mkdir mount
           mkdir mount
-          sudo chmod a+rw /dev/loop0
+          LOOP=$(sudo losetup -f)
+          sudo chmod a+rw $LOOP
           dd if=/dev/zero bs=512 count=128K of=disk
           dd if=/dev/zero bs=512 count=128K of=disk
-          losetup /dev/loop0 disk
+          losetup $LOOP disk
+          echo "LOOP=$LOOP" >> $GITHUB_ENV
       - name: test
       - name: test
         run: |
         run: |
           # compile v1 and v2
           # compile v1 and v2
@@ -358,8 +384,8 @@ jobs:
           make -C v2
           make -C v2
 
 
           # run self-host test with v1
           # run self-host test with v1
-          v1/lfs --format /dev/loop0
-          v1/lfs /dev/loop0 mount
+          v1/lfs --format $LOOP
+          v1/lfs $LOOP mount
 
 
           ls mount
           ls mount
           mkdir mount/littlefs
           mkdir mount/littlefs
@@ -373,8 +399,8 @@ jobs:
           cd ../..
           cd ../..
           fusermount -u mount
           fusermount -u mount
 
 
-          v2/lfs --migrate /dev/loop0
-          v2/lfs /dev/loop0 mount
+          v2/lfs --migrate $LOOP
+          v2/lfs $LOOP mount
 
 
           # run self-host test with v2 right where we left off
           # run self-host test with v2 right where we left off
           ls mount
           ls mount
@@ -385,7 +411,7 @@ jobs:
 
 
   # collect coverage info
   # collect coverage info
   coverage:
   coverage:
-    runs-on: ubuntu-18.04
+    runs-on: ubuntu-20.04
     needs: [test]
     needs: [test]
     steps:
     steps:
       - uses: actions/checkout@v2
       - uses: actions/checkout@v2
@@ -421,14 +447,14 @@ jobs:
           export STEP="results-coverage"
           export STEP="results-coverage"
           export CONTEXT="results / coverage"
           export CONTEXT="results / coverage"
           export PREV="$(curl -sS \
           export PREV="$(curl -sS \
-            "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
+            "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
             | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
             | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
               | select(.context == env.CONTEXT).description
               | select(.context == env.CONTEXT).description
-              | capture("Coverage is (?<result>[0-9\\.]+)").result' \
+              | capture("(?<result>[0-9\\.]+)").result' \
             || echo 0)"
             || echo 0)"
           export DESCRIPTION="$(
           export DESCRIPTION="$(
-            ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' '
-              NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3}
+            ./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
+              NR==2 {printf "%.1f%% of %d lines",$4,$3}
               NR==2 && ENVIRON["PREV"]+0 != 0 {
               NR==2 && ENVIRON["PREV"]+0 != 0 {
                 printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
                 printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
           jq -n '{
           jq -n '{

+ 2 - 0
.gitignore

@@ -2,6 +2,8 @@
 *.o
 *.o
 *.d
 *.d
 *.a
 *.a
+*.ci
+*.csv
 
 
 # Testing things
 # Testing things
 blocks/
 blocks/

+ 1 - 0
LICENSE.md

@@ -1,3 +1,4 @@
+Copyright (c) 2022, The littlefs authors.  
 Copyright (c) 2017, Arm Limited. All rights reserved.
 Copyright (c) 2017, Arm Limited. All rights reserved.
 
 
 Redistribution and use in source and binary forms, with or without modification,
 Redistribution and use in source and binary forms, with or without modification,

+ 76 - 17
Makefile

@@ -17,44 +17,63 @@ TARGET ?= $(BUILDDIR)lfs.a
 endif
 endif
 
 
 
 
-CC ?= gcc
-AR ?= ar
-SIZE ?= size
-CTAGS ?= ctags
-NM ?= nm
-LCOV ?= lcov
+CC      ?= gcc
+AR      ?= ar
+SIZE    ?= size
+CTAGS   ?= ctags
+NM      ?= nm
+OBJDUMP ?= objdump
+LCOV    ?= lcov
 
 
 SRC ?= $(wildcard *.c)
 SRC ?= $(wildcard *.c)
 OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
 OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
 DEP := $(SRC:%.c=$(BUILDDIR)%.d)
 DEP := $(SRC:%.c=$(BUILDDIR)%.d)
 ASM := $(SRC:%.c=$(BUILDDIR)%.s)
 ASM := $(SRC:%.c=$(BUILDDIR)%.s)
+CGI := $(SRC:%.c=$(BUILDDIR)%.ci)
 
 
 ifdef DEBUG
 ifdef DEBUG
-override CFLAGS += -O0 -g3
+override CFLAGS += -O0
 else
 else
 override CFLAGS += -Os
 override CFLAGS += -Os
 endif
 endif
 ifdef TRACE
 ifdef TRACE
 override CFLAGS += -DLFS_YES_TRACE
 override CFLAGS += -DLFS_YES_TRACE
 endif
 endif
+override CFLAGS += -g3
 override CFLAGS += -I.
 override CFLAGS += -I.
 override CFLAGS += -std=c99 -Wall -pedantic
 override CFLAGS += -std=c99 -Wall -pedantic
 override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
 override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
 
 
 ifdef VERBOSE
 ifdef VERBOSE
-override TESTFLAGS += -v
-override CODEFLAGS += -v
+override TESTFLAGS     += -v
+override CALLSFLAGS    += -v
+override CODEFLAGS     += -v
+override DATAFLAGS     += -v
+override STACKFLAGS    += -v
+override STRUCTSFLAGS  += -v
 override COVERAGEFLAGS += -v
 override COVERAGEFLAGS += -v
 endif
 endif
 ifdef EXEC
 ifdef EXEC
 override TESTFLAGS += --exec="$(EXEC)"
 override TESTFLAGS += --exec="$(EXEC)"
 endif
 endif
+ifdef COVERAGE
+override TESTFLAGS += --coverage
+endif
 ifdef BUILDDIR
 ifdef BUILDDIR
-override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
-override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
+override TESTFLAGS     += --build-dir="$(BUILDDIR:/=)"
+override CALLSFLAGS    += --build-dir="$(BUILDDIR:/=)"
+override CODEFLAGS     += --build-dir="$(BUILDDIR:/=)"
+override DATAFLAGS     += --build-dir="$(BUILDDIR:/=)"
+override STACKFLAGS    += --build-dir="$(BUILDDIR:/=)"
+override STRUCTSFLAGS  += --build-dir="$(BUILDDIR:/=)"
+override COVERAGEFLAGS += --build-dir="$(BUILDDIR:/=)"
 endif
 endif
 ifneq ($(NM),nm)
 ifneq ($(NM),nm)
 override CODEFLAGS += --nm-tool="$(NM)"
 override CODEFLAGS += --nm-tool="$(NM)"
+override DATAFLAGS += --nm-tool="$(NM)"
+endif
+ifneq ($(OBJDUMP),objdump)
+override STRUCTSFLAGS += --objdump-tool="$(OBJDUMP)"
 endif
 endif
 
 
 
 
@@ -73,9 +92,9 @@ size: $(OBJ)
 tags:
 tags:
 	$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
 	$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
 
 
-.PHONY: code
-code: $(OBJ)
-	./scripts/code.py $^ $(CODEFLAGS)
+.PHONY: calls
+calls: $(CGI)
+	./scripts/calls.py $^ $(CALLSFLAGS)
 
 
 .PHONY: test
 .PHONY: test
 test:
 test:
@@ -84,9 +103,30 @@ test:
 test%: tests/test$$(firstword $$(subst \#, ,%)).toml
 test%: tests/test$$(firstword $$(subst \#, ,%)).toml
 	./scripts/test.py $@ $(TESTFLAGS)
 	./scripts/test.py $@ $(TESTFLAGS)
 
 
+.PHONY: code
+code: $(OBJ)
+	./scripts/code.py $^ -S $(CODEFLAGS)
+
+.PHONY: data
+data: $(OBJ)
+	./scripts/data.py $^ -S $(DATAFLAGS)
+
+.PHONY: stack
+stack: $(CGI)
+	./scripts/stack.py $^ -S $(STACKFLAGS)
+
+.PHONY: structs
+structs: $(OBJ)
+	./scripts/structs.py $^ -S $(STRUCTSFLAGS)
+
 .PHONY: coverage
 .PHONY: coverage
 coverage:
 coverage:
-	./scripts/coverage.py $(BUILDDIR)tests/*.toml.info $(COVERAGEFLAGS)
+	./scripts/coverage.py $(BUILDDIR)tests/*.toml.info -s $(COVERAGEFLAGS)
+
+.PHONY: summary
+summary: $(BUILDDIR)lfs.csv
+	./scripts/summary.py -Y $^ $(SUMMARYFLAGS)
+
 
 
 # rules
 # rules
 -include $(DEP)
 -include $(DEP)
@@ -95,20 +135,39 @@ coverage:
 $(BUILDDIR)lfs: $(OBJ)
 $(BUILDDIR)lfs: $(OBJ)
 	$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
 	$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
 
 
-$(BUILDDIR)%.a: $(OBJ)
+$(BUILDDIR)lfs.a: $(OBJ)
 	$(AR) rcs $@ $^
 	$(AR) rcs $@ $^
 
 
+$(BUILDDIR)lfs.csv: $(OBJ) $(CGI)
+	./scripts/code.py $(OBJ) -q $(CODEFLAGS) -o $@
+	./scripts/data.py $(OBJ) -q -m $@ $(DATAFLAGS) -o $@
+	./scripts/stack.py $(CGI) -q -m $@ $(STACKFLAGS) -o $@
+	./scripts/structs.py $(OBJ) -q -m $@ $(STRUCTSFLAGS) -o $@
+	$(if $(COVERAGE),\
+		./scripts/coverage.py $(BUILDDIR)tests/*.toml.info \
+			-q -m $@ $(COVERAGEFLAGS) -o $@)
+
 $(BUILDDIR)%.o: %.c
 $(BUILDDIR)%.o: %.c
 	$(CC) -c -MMD $(CFLAGS) $< -o $@
 	$(CC) -c -MMD $(CFLAGS) $< -o $@
 
 
 $(BUILDDIR)%.s: %.c
 $(BUILDDIR)%.s: %.c
 	$(CC) -S $(CFLAGS) $< -o $@
 	$(CC) -S $(CFLAGS) $< -o $@
 
 
+# gcc depends on the output file for intermediate file names, so
+# we can't omit to .o output. We also need to serialize with the
+# normal .o rule because otherwise we can end up with multiprocess
+# problems with two instances of gcc modifying the same .o
+$(BUILDDIR)%.ci: %.c | $(BUILDDIR)%.o
+	$(CC) -c -MMD -fcallgraph-info=su $(CFLAGS) $< -o $|
+
 # clean everything
 # clean everything
 .PHONY: clean
 .PHONY: clean
 clean:
 clean:
-	rm -f $(TARGET)
+	rm -f $(BUILDDIR)lfs
+	rm -f $(BUILDDIR)lfs.a
+	rm -f $(BUILDDIR)lfs.csv
 	rm -f $(OBJ)
 	rm -f $(OBJ)
+	rm -f $(CGI)
 	rm -f $(DEP)
 	rm -f $(DEP)
 	rm -f $(ASM)
 	rm -f $(ASM)
 	rm -f $(BUILDDIR)tests/*.toml.*
 	rm -f $(BUILDDIR)tests/*.toml.*

+ 14 - 0
bd/lfs_filebd.c

@@ -1,6 +1,7 @@
 /*
 /*
  * Block device emulated in a file
  * Block device emulated in a file
  *
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  * SPDX-License-Identifier: BSD-3-Clause
  */
  */
@@ -10,6 +11,10 @@
 #include <unistd.h>
 #include <unistd.h>
 #include <errno.h>
 #include <errno.h>
 
 
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
 int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
 int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
         const struct lfs_filebd_config *bdcfg) {
         const struct lfs_filebd_config *bdcfg) {
     LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p {.context=%p, "
     LFS_FILEBD_TRACE("lfs_filebd_createcfg(%p {.context=%p, "
@@ -27,7 +32,12 @@ int lfs_filebd_createcfg(const struct lfs_config *cfg, const char *path,
     bd->cfg = bdcfg;
     bd->cfg = bdcfg;
 
 
     // open file
     // open file
+    #ifdef _WIN32
+    bd->fd = open(path, O_RDWR | O_CREAT | O_BINARY, 0666);
+    #else
     bd->fd = open(path, O_RDWR | O_CREAT, 0666);
     bd->fd = open(path, O_RDWR | O_CREAT, 0666);
+    #endif
+
     if (bd->fd < 0) {
     if (bd->fd < 0) {
         int err = -errno;
         int err = -errno;
         LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", err);
         LFS_FILEBD_TRACE("lfs_filebd_createcfg -> %d", err);
@@ -193,7 +203,11 @@ int lfs_filebd_sync(const struct lfs_config *cfg) {
     LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg);
     LFS_FILEBD_TRACE("lfs_filebd_sync(%p)", (void*)cfg);
     // file sync
     // file sync
     lfs_filebd_t *bd = cfg->context;
     lfs_filebd_t *bd = cfg->context;
+    #ifdef _WIN32
+    int err = FlushFileBuffers((HANDLE) _get_osfhandle(fd)) ? 0 : -1;
+    #else
     int err = fsync(bd->fd);
     int err = fsync(bd->fd);
+    #endif
     if (err) {
     if (err) {
         err = -errno;
         err = -errno;
         LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0);
         LFS_FILEBD_TRACE("lfs_filebd_sync -> %d", 0);

+ 1 - 0
bd/lfs_filebd.h

@@ -1,6 +1,7 @@
 /*
 /*
  * Block device emulated in a file
  * Block device emulated in a file
  *
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  * SPDX-License-Identifier: BSD-3-Clause
  */
  */

+ 1 - 0
bd/lfs_rambd.c

@@ -1,6 +1,7 @@
 /*
 /*
  * Block device emulated in RAM
  * Block device emulated in RAM
  *
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  * SPDX-License-Identifier: BSD-3-Clause
  */
  */

+ 1 - 0
bd/lfs_rambd.h

@@ -1,6 +1,7 @@
 /*
 /*
  * Block device emulated in RAM
  * Block device emulated in RAM
  *
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  * SPDX-License-Identifier: BSD-3-Clause
  */
  */

+ 1 - 0
bd/lfs_testbd.c

@@ -2,6 +2,7 @@
  * Testing block device, wraps filebd and rambd while providing a bunch
  * Testing block device, wraps filebd and rambd while providing a bunch
  * of hooks for testing littlefs in various conditions.
  * of hooks for testing littlefs in various conditions.
  *
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  * SPDX-License-Identifier: BSD-3-Clause
  */
  */

+ 1 - 0
bd/lfs_testbd.h

@@ -2,6 +2,7 @@
  * Testing block device, wraps filebd and rambd while providing a bunch
  * Testing block device, wraps filebd and rambd while providing a bunch
  * of hooks for testing littlefs in various conditions.
  * of hooks for testing littlefs in various conditions.
  *
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  * SPDX-License-Identifier: BSD-3-Clause
  */
  */

文件差异内容过多而无法显示
+ 594 - 228
lfs.c


+ 7 - 1
lfs.h

@@ -1,6 +1,7 @@
 /*
 /*
  * The little filesystem
  * The little filesystem
  *
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  * SPDX-License-Identifier: BSD-3-Clause
  */
  */
@@ -22,7 +23,7 @@ extern "C"
 // Software library version
 // Software library version
 // Major (top-nibble), incremented on backwards incompatible changes
 // Major (top-nibble), incremented on backwards incompatible changes
 // Minor (bottom-nibble), incremented on feature additions
 // Minor (bottom-nibble), incremented on feature additions
-#define LFS_VERSION 0x00020004
+#define LFS_VERSION 0x00020005
 #define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
 #define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16))
 #define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >>  0))
 #define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >>  0))
 
 
@@ -513,6 +514,7 @@ int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type);
 
 
 /// File operations ///
 /// File operations ///
 
 
+#ifndef LFS_NO_MALLOC
 // Open a file
 // Open a file
 //
 //
 // The mode that the file is opened in is determined by the flags, which
 // The mode that the file is opened in is determined by the flags, which
@@ -522,6 +524,10 @@ int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type);
 int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
 int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
         const char *path, int flags);
         const char *path, int flags);
 
 
+// if LFS_NO_MALLOC is defined, lfs_file_open() will fail with LFS_ERR_NOMEM
+// thus use lfs_file_opencfg() with config.buffer set.
+#endif
+
 // Open a file with extra configuration
 // Open a file with extra configuration
 //
 //
 // The mode that the file is opened in is determined by the flags, which
 // The mode that the file is opened in is determined by the flags, which

+ 1 - 0
lfs_util.c

@@ -1,6 +1,7 @@
 /*
 /*
  * lfs util functions
  * lfs util functions
  *
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  * SPDX-License-Identifier: BSD-3-Clause
  */
  */

+ 1 - 0
lfs_util.h

@@ -1,6 +1,7 @@
 /*
 /*
  * lfs utility functions
  * lfs utility functions
  *
  *
+ * Copyright (c) 2022, The littlefs authors.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * Copyright (c) 2017, Arm Limited. All rights reserved.
  * SPDX-License-Identifier: BSD-3-Clause
  * SPDX-License-Identifier: BSD-3-Clause
  */
  */

+ 114 - 44
scripts/code.py

@@ -15,7 +15,7 @@ import csv
 import collections as co
 import collections as co
 
 
 
 
-OBJ_PATHS = ['*.o', 'bd/*.o']
+OBJ_PATHS = ['*.o']
 
 
 def collect(paths, **args):
 def collect(paths, **args):
     results = co.defaultdict(lambda: 0)
     results = co.defaultdict(lambda: 0)
@@ -31,7 +31,8 @@ def collect(paths, **args):
         proc = sp.Popen(cmd,
         proc = sp.Popen(cmd,
             stdout=sp.PIPE,
             stdout=sp.PIPE,
             stderr=sp.PIPE if not args.get('verbose') else None,
             stderr=sp.PIPE if not args.get('verbose') else None,
-            universal_newlines=True)
+            universal_newlines=True,
+            errors='replace')
         for line in proc.stdout:
         for line in proc.stdout:
             m = pattern.match(line)
             m = pattern.match(line)
             if m:
             if m:
@@ -48,16 +49,30 @@ def collect(paths, **args):
         # map to source files
         # map to source files
         if args.get('build_dir'):
         if args.get('build_dir'):
             file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
             file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
+        # replace .o with .c, different scripts report .o/.c, we need to
+        # choose one if we want to deduplicate csv files
+        file = re.sub('\.o$', '.c', file)
         # discard internal functions
         # discard internal functions
-        if func.startswith('__'):
-            continue
+        if not args.get('everything'):
+            if func.startswith('__'):
+                continue
         # discard .8449 suffixes created by optimizer
         # discard .8449 suffixes created by optimizer
         func = re.sub('\.[0-9]+', '', func)
         func = re.sub('\.[0-9]+', '', func)
+
         flat_results.append((file, func, size))
         flat_results.append((file, func, size))
 
 
     return flat_results
     return flat_results
 
 
 def main(**args):
 def main(**args):
+    def openio(path, mode='r'):
+        if path == '-':
+            if 'r' in mode:
+                return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+            else:
+                return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+        else:
+            return open(path, mode)
+
     # find sizes
     # find sizes
     if not args.get('use', None):
     if not args.get('use', None):
         # find .o files
         # find .o files
@@ -75,13 +90,14 @@ def main(**args):
 
 
         results = collect(paths, **args)
         results = collect(paths, **args)
     else:
     else:
-        with open(args['use']) as f:
+        with openio(args['use']) as f:
             r = csv.DictReader(f)
             r = csv.DictReader(f)
             results = [
             results = [
                 (   result['file'],
                 (   result['file'],
-                    result['function'],
-                    int(result['size']))
-                for result in r]
+                    result['name'],
+                    int(result['code_size']))
+                for result in r
+                if result.get('code_size') not in {None, ''}]
 
 
     total = 0
     total = 0
     for _, _, size in results:
     for _, _, size in results:
@@ -89,13 +105,17 @@ def main(**args):
 
 
     # find previous results?
     # find previous results?
     if args.get('diff'):
     if args.get('diff'):
-        with open(args['diff']) as f:
-            r = csv.DictReader(f)
-            prev_results = [
-                (   result['file'],
-                    result['function'],
-                    int(result['size']))
-                for result in r]
+        try:
+            with openio(args['diff']) as f:
+                r = csv.DictReader(f)
+                prev_results = [
+                    (   result['file'],
+                        result['name'],
+                        int(result['code_size']))
+                    for result in r
+                    if result.get('code_size') not in {None, ''}]
+        except FileNotFoundError:
+            prev_results = []
 
 
         prev_total = 0
         prev_total = 0
         for _, _, size in prev_results:
         for _, _, size in prev_results:
@@ -103,14 +123,34 @@ def main(**args):
 
 
     # write results to CSV
     # write results to CSV
     if args.get('output'):
     if args.get('output'):
-        with open(args['output'], 'w') as f:
-            w = csv.writer(f)
-            w.writerow(['file', 'function', 'size'])
-            for file, func, size in sorted(results):
-                w.writerow((file, func, size))
+        merged_results = co.defaultdict(lambda: {})
+        other_fields = []
+
+        # merge?
+        if args.get('merge'):
+            try:
+                with openio(args['merge']) as f:
+                    r = csv.DictReader(f)
+                    for result in r:
+                        file = result.pop('file', '')
+                        func = result.pop('name', '')
+                        result.pop('code_size', None)
+                        merged_results[(file, func)] = result
+                        other_fields = result.keys()
+            except FileNotFoundError:
+                pass
+
+        for file, func, size in results:
+            merged_results[(file, func)]['code_size'] = size
+
+        with openio(args['output'], 'w') as f:
+            w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size'])
+            w.writeheader()
+            for (file, func), result in sorted(merged_results.items()):
+                w.writerow({'file': file, 'name': func, **result})
 
 
     # print results
     # print results
-    def dedup_entries(results, by='function'):
+    def dedup_entries(results, by='name'):
         entries = co.defaultdict(lambda: 0)
         entries = co.defaultdict(lambda: 0)
         for file, func, size in results:
         for file, func, size in results:
             entry = (file if by == 'file' else func)
             entry = (file if by == 'file' else func)
@@ -126,45 +166,67 @@ def main(**args):
             diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
             diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
         return diff
         return diff
 
 
+    def sorted_entries(entries):
+        if args.get('size_sort'):
+            return sorted(entries, key=lambda x: (-x[1], x))
+        elif args.get('reverse_size_sort'):
+            return sorted(entries, key=lambda x: (+x[1], x))
+        else:
+            return sorted(entries)
+
+    def sorted_diff_entries(entries):
+        if args.get('size_sort'):
+            return sorted(entries, key=lambda x: (-x[1][1], x))
+        elif args.get('reverse_size_sort'):
+            return sorted(entries, key=lambda x: (+x[1][1], x))
+        else:
+            return sorted(entries, key=lambda x: (-x[1][3], x))
+
     def print_header(by=''):
     def print_header(by=''):
         if not args.get('diff'):
         if not args.get('diff'):
             print('%-36s %7s' % (by, 'size'))
             print('%-36s %7s' % (by, 'size'))
         else:
         else:
             print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
             print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
 
 
-    def print_entries(by='function'):
+    def print_entry(name, size):
+        print("%-36s %7d" % (name, size))
+
+    def print_diff_entry(name, old, new, diff, ratio):
+        print("%-36s %7s %7s %+7d%s" % (name,
+            old or "-",
+            new or "-",
+            diff,
+            ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+    def print_entries(by='name'):
         entries = dedup_entries(results, by=by)
         entries = dedup_entries(results, by=by)
 
 
         if not args.get('diff'):
         if not args.get('diff'):
             print_header(by=by)
             print_header(by=by)
-            for name, size in sorted(entries.items()):
-                print("%-36s %7d" % (name, size))
+            for name, size in sorted_entries(entries.items()):
+                print_entry(name, size)
         else:
         else:
             prev_entries = dedup_entries(prev_results, by=by)
             prev_entries = dedup_entries(prev_results, by=by)
             diff = diff_entries(prev_entries, entries)
             diff = diff_entries(prev_entries, entries)
             print_header(by='%s (%d added, %d removed)' % (by,
             print_header(by='%s (%d added, %d removed)' % (by,
                 sum(1 for old, _, _, _ in diff.values() if not old),
                 sum(1 for old, _, _, _ in diff.values() if not old),
                 sum(1 for _, new, _, _ in diff.values() if not new)))
                 sum(1 for _, new, _, _ in diff.values() if not new)))
-            for name, (old, new, diff, ratio) in sorted(diff.items(),
-                    key=lambda x: (-x[1][3], x)):
+            for name, (old, new, diff, ratio) in sorted_diff_entries(
+                    diff.items()):
                 if ratio or args.get('all'):
                 if ratio or args.get('all'):
-                    print("%-36s %7s %7s %+7d%s" % (name,
-                        old or "-",
-                        new or "-",
-                        diff,
-                        ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+                    print_diff_entry(name, old, new, diff, ratio)
 
 
     def print_totals():
     def print_totals():
         if not args.get('diff'):
         if not args.get('diff'):
-            print("%-36s %7d" % ('TOTAL', total))
+            print_entry('TOTAL', total)
         else:
         else:
-            ratio = (total-prev_total)/prev_total if prev_total else 1.0
-            print("%-36s %7s %7s %+7d%s" % (
-                'TOTAL',
-                prev_total if prev_total else '-',
-                total if total else '-',
+            ratio = (0.0 if not prev_total and not total
+                else 1.0 if not prev_total
+                else (total-prev_total)/prev_total)
+            print_diff_entry('TOTAL',
+                prev_total, total,
                 total-prev_total,
                 total-prev_total,
-                ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+                ratio)
 
 
     if args.get('quiet'):
     if args.get('quiet'):
         pass
         pass
@@ -175,7 +237,7 @@ def main(**args):
         print_entries(by='file')
         print_entries(by='file')
         print_totals()
         print_totals()
     else:
     else:
-        print_entries(by='function')
+        print_entries(by='name')
         print_totals()
         print_totals()
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
@@ -188,22 +250,30 @@ if __name__ == "__main__":
             or a list of paths. Defaults to %r." % OBJ_PATHS)
             or a list of paths. Defaults to %r." % OBJ_PATHS)
     parser.add_argument('-v', '--verbose', action='store_true',
     parser.add_argument('-v', '--verbose', action='store_true',
         help="Output commands that run behind the scenes.")
         help="Output commands that run behind the scenes.")
+    parser.add_argument('-q', '--quiet', action='store_true',
+        help="Don't show anything, useful with -o.")
     parser.add_argument('-o', '--output',
     parser.add_argument('-o', '--output',
         help="Specify CSV file to store results.")
         help="Specify CSV file to store results.")
     parser.add_argument('-u', '--use',
     parser.add_argument('-u', '--use',
         help="Don't compile and find code sizes, instead use this CSV file.")
         help="Don't compile and find code sizes, instead use this CSV file.")
     parser.add_argument('-d', '--diff',
     parser.add_argument('-d', '--diff',
         help="Specify CSV file to diff code size against.")
         help="Specify CSV file to diff code size against.")
+    parser.add_argument('-m', '--merge',
+        help="Merge with an existing CSV file when writing to output.")
     parser.add_argument('-a', '--all', action='store_true',
     parser.add_argument('-a', '--all', action='store_true',
         help="Show all functions, not just the ones that changed.")
         help="Show all functions, not just the ones that changed.")
-    parser.add_argument('--files', action='store_true',
+    parser.add_argument('-A', '--everything', action='store_true',
+        help="Include builtin and libc specific symbols.")
+    parser.add_argument('-s', '--size-sort', action='store_true',
+        help="Sort by size.")
+    parser.add_argument('-S', '--reverse-size-sort', action='store_true',
+        help="Sort by size, but backwards.")
+    parser.add_argument('-F', '--files', action='store_true',
         help="Show file-level code sizes. Note this does not include padding! "
         help="Show file-level code sizes. Note this does not include padding! "
             "So sizes may differ from other tools.")
             "So sizes may differ from other tools.")
-    parser.add_argument('-s', '--summary', action='store_true',
+    parser.add_argument('-Y', '--summary', action='store_true',
         help="Only show the total code size.")
         help="Only show the total code size.")
-    parser.add_argument('-q', '--quiet', action='store_true',
-        help="Don't show anything, useful with -o.")
-    parser.add_argument('--type', default='tTrRdDbB',
+    parser.add_argument('--type', default='tTrRdD',
         help="Type of symbols to report, this uses the same single-character "
         help="Type of symbols to report, this uses the same single-character "
             "type-names emitted by nm. Defaults to %(default)r.")
             "type-names emitted by nm. Defaults to %(default)r.")
     parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
     parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),

+ 130 - 61
scripts/coverage.py

@@ -55,8 +55,9 @@ def collect(paths, **args):
     for (file, func), (hits, count) in reduced_funcs.items():
     for (file, func), (hits, count) in reduced_funcs.items():
         # discard internal/testing functions (test_* injected with
         # discard internal/testing functions (test_* injected with
         # internal testing)
         # internal testing)
-        if func.startswith('__') or func.startswith('test_'):
-            continue
+        if not args.get('everything'):
+            if func.startswith('__') or func.startswith('test_'):
+                continue
         # discard .8449 suffixes created by optimizer
         # discard .8449 suffixes created by optimizer
         func = re.sub('\.[0-9]+', '', func)
         func = re.sub('\.[0-9]+', '', func)
         results.append((file, func, hits, count))
         results.append((file, func, hits, count))
@@ -65,6 +66,15 @@ def collect(paths, **args):
 
 
 
 
 def main(**args):
 def main(**args):
+    def openio(path, mode='r'):
+        if path == '-':
+            if 'r' in mode:
+                return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+            else:
+                return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+        else:
+            return open(path, mode)
+
     # find coverage
     # find coverage
     if not args.get('use'):
     if not args.get('use'):
         # find *.info files
         # find *.info files
@@ -82,14 +92,16 @@ def main(**args):
 
 
         results = collect(paths, **args)
         results = collect(paths, **args)
     else:
     else:
-        with open(args['use']) as f:
+        with openio(args['use']) as f:
             r = csv.DictReader(f)
             r = csv.DictReader(f)
             results = [
             results = [
                 (   result['file'],
                 (   result['file'],
-                    result['function'],
-                    int(result['hits']),
-                    int(result['count']))
-                for result in r]
+                    result['name'],
+                    int(result['coverage_hits']),
+                    int(result['coverage_count']))
+                for result in r
+                if result.get('coverage_hits') not in {None, ''}
+                if result.get('coverage_count') not in {None, ''}]
 
 
     total_hits, total_count = 0, 0
     total_hits, total_count = 0, 0
     for _, _, hits, count in results:
     for _, _, hits, count in results:
@@ -98,14 +110,19 @@ def main(**args):
 
 
     # find previous results?
     # find previous results?
     if args.get('diff'):
     if args.get('diff'):
-        with open(args['diff']) as f:
-            r = csv.DictReader(f)
-            prev_results = [
-                (   result['file'],
-                    result['function'],
-                    int(result['hits']),
-                    int(result['count']))
-                for result in r]
+        try:
+            with openio(args['diff']) as f:
+                r = csv.DictReader(f)
+                prev_results = [
+                    (   result['file'],
+                        result['name'],
+                        int(result['coverage_hits']),
+                        int(result['coverage_count']))
+                    for result in r
+                    if result.get('coverage_hits') not in {None, ''}
+                    if result.get('coverage_count') not in {None, ''}]
+        except FileNotFoundError:
+            prev_results = []
 
 
         prev_total_hits, prev_total_count = 0, 0
         prev_total_hits, prev_total_count = 0, 0
         for _, _, hits, count in prev_results:
         for _, _, hits, count in prev_results:
@@ -114,14 +131,36 @@ def main(**args):
 
 
     # write results to CSV
     # write results to CSV
     if args.get('output'):
     if args.get('output'):
-        with open(args['output'], 'w') as f:
-            w = csv.writer(f)
-            w.writerow(['file', 'function', 'hits', 'count'])
-            for file, func, hits, count in sorted(results):
-                w.writerow((file, func, hits, count))
+        merged_results = co.defaultdict(lambda: {})
+        other_fields = []
+
+        # merge?
+        if args.get('merge'):
+            try:
+                with openio(args['merge']) as f:
+                    r = csv.DictReader(f)
+                    for result in r:
+                        file = result.pop('file', '')
+                        func = result.pop('name', '')
+                        result.pop('coverage_hits', None)
+                        result.pop('coverage_count', None)
+                        merged_results[(file, func)] = result
+                        other_fields = result.keys()
+            except FileNotFoundError:
+                pass
+
+        for file, func, hits, count in results:
+            merged_results[(file, func)]['coverage_hits'] = hits
+            merged_results[(file, func)]['coverage_count'] = count
+
+        with openio(args['output'], 'w') as f:
+            w = csv.DictWriter(f, ['file', 'name', *other_fields, 'coverage_hits', 'coverage_count'])
+            w.writeheader()
+            for (file, func), result in sorted(merged_results.items()):
+                w.writerow({'file': file, 'name': func, **result})
 
 
     # print results
     # print results
-    def dedup_entries(results, by='function'):
+    def dedup_entries(results, by='name'):
         entries = co.defaultdict(lambda: (0, 0))
         entries = co.defaultdict(lambda: (0, 0))
         for file, func, hits, count in results:
         for file, func, hits, count in results:
             entry = (file if by == 'file' else func)
             entry = (file if by == 'file' else func)
@@ -147,23 +186,59 @@ def main(**args):
                     - (old_hits/old_count if old_count else 1.0)))
                     - (old_hits/old_count if old_count else 1.0)))
         return diff
         return diff
 
 
+    def sorted_entries(entries):
+        if args.get('coverage_sort'):
+            return sorted(entries, key=lambda x: (-(x[1][0]/x[1][1] if x[1][1] else -1), x))
+        elif args.get('reverse_coverage_sort'):
+            return sorted(entries, key=lambda x: (+(x[1][0]/x[1][1] if x[1][1] else -1), x))
+        else:
+            return sorted(entries)
+
+    def sorted_diff_entries(entries):
+        if args.get('coverage_sort'):
+            return sorted(entries, key=lambda x: (-(x[1][2]/x[1][3] if x[1][3] else -1), x))
+        elif args.get('reverse_coverage_sort'):
+            return sorted(entries, key=lambda x: (+(x[1][2]/x[1][3] if x[1][3] else -1), x))
+        else:
+            return sorted(entries, key=lambda x: (-x[1][6], x))
+
     def print_header(by=''):
     def print_header(by=''):
         if not args.get('diff'):
         if not args.get('diff'):
             print('%-36s %19s' % (by, 'hits/line'))
             print('%-36s %19s' % (by, 'hits/line'))
         else:
         else:
             print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
             print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
 
 
-    def print_entries(by='function'):
+    def print_entry(name, hits, count):
+        print("%-36s %11s %7s" % (name,
+            '%d/%d' % (hits, count)
+                if count else '-',
+            '%.1f%%' % (100*hits/count)
+                if count else '-'))
+
+    def print_diff_entry(name,
+            old_hits, old_count,
+            new_hits, new_count,
+            diff_hits, diff_count,
+            ratio):
+        print("%-36s %11s %7s %11s %7s %11s%s" % (name,
+            '%d/%d' % (old_hits, old_count)
+                if old_count else '-',
+            '%.1f%%' % (100*old_hits/old_count)
+                if old_count else '-',
+            '%d/%d' % (new_hits, new_count)
+                if new_count else '-',
+            '%.1f%%' % (100*new_hits/new_count)
+                if new_count else '-',
+            '%+d/%+d' % (diff_hits, diff_count),
+            ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+    def print_entries(by='name'):
         entries = dedup_entries(results, by=by)
         entries = dedup_entries(results, by=by)
 
 
         if not args.get('diff'):
         if not args.get('diff'):
             print_header(by=by)
             print_header(by=by)
-            for name, (hits, count) in sorted(entries.items()):
-                print("%-36s %11s %7s" % (name,
-                    '%d/%d' % (hits, count)
-                        if count else '-',
-                    '%.1f%%' % (100*hits/count)
-                        if count else '-'))
+            for name, (hits, count) in sorted_entries(entries.items()):
+                print_entry(name, hits, count)
         else:
         else:
             prev_entries = dedup_entries(prev_results, by=by)
             prev_entries = dedup_entries(prev_results, by=by)
             diff = diff_entries(prev_entries, entries)
             diff = diff_entries(prev_entries, entries)
@@ -173,45 +248,28 @@ def main(**args):
             for name, (
             for name, (
                     old_hits, old_count,
                     old_hits, old_count,
                     new_hits, new_count,
                     new_hits, new_count,
-                    diff_hits, diff_count, ratio) in sorted(diff.items(),
-                        key=lambda x: (-x[1][6], x)):
+                    diff_hits, diff_count, ratio) in sorted_diff_entries(
+                        diff.items()):
                 if ratio or args.get('all'):
                 if ratio or args.get('all'):
-                    print("%-36s %11s %7s %11s %7s %11s%s" % (name,
-                        '%d/%d' % (old_hits, old_count)
-                            if old_count else '-',
-                        '%.1f%%' % (100*old_hits/old_count)
-                            if old_count else '-',
-                        '%d/%d' % (new_hits, new_count)
-                            if new_count else '-',
-                        '%.1f%%' % (100*new_hits/new_count)
-                            if new_count else '-',
-                        '%+d/%+d' % (diff_hits, diff_count),
-                        ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+                    print_diff_entry(name,
+                        old_hits, old_count,
+                        new_hits, new_count,
+                        diff_hits, diff_count,
+                        ratio)
 
 
     def print_totals():
     def print_totals():
         if not args.get('diff'):
         if not args.get('diff'):
-            print("%-36s %11s %7s" % ('TOTAL',
-                '%d/%d' % (total_hits, total_count)
-                    if total_count else '-',
-                '%.1f%%' % (100*total_hits/total_count)
-                    if total_count else '-'))
+            print_entry('TOTAL', total_hits, total_count)
         else:
         else:
             ratio = ((total_hits/total_count
             ratio = ((total_hits/total_count
                     if total_count else 1.0)
                     if total_count else 1.0)
                 - (prev_total_hits/prev_total_count
                 - (prev_total_hits/prev_total_count
                     if prev_total_count else 1.0))
                     if prev_total_count else 1.0))
-            print("%-36s %11s %7s %11s %7s %11s%s" % ('TOTAL',
-                '%d/%d' % (prev_total_hits, prev_total_count)
-                    if prev_total_count else '-',
-                '%.1f%%' % (100*prev_total_hits/prev_total_count)
-                    if prev_total_count else '-',
-                '%d/%d' % (total_hits, total_count)
-                    if total_count else '-',
-                '%.1f%%' % (100*total_hits/total_count)
-                    if total_count else '-',
-                '%+d/%+d' % (total_hits-prev_total_hits,
-                    total_count-prev_total_count),
-                ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+            print_diff_entry('TOTAL',
+                prev_total_hits, prev_total_count,
+                total_hits, total_count,
+                total_hits-prev_total_hits, total_count-prev_total_count,
+                ratio)
 
 
     if args.get('quiet'):
     if args.get('quiet'):
         pass
         pass
@@ -222,7 +280,7 @@ def main(**args):
         print_entries(by='file')
         print_entries(by='file')
         print_totals()
         print_totals()
     else:
     else:
-        print_entries(by='function')
+        print_entries(by='name')
         print_totals()
         print_totals()
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
@@ -243,12 +301,23 @@ if __name__ == "__main__":
         help="Don't do any work, instead use this CSV file.")
         help="Don't do any work, instead use this CSV file.")
     parser.add_argument('-d', '--diff',
     parser.add_argument('-d', '--diff',
         help="Specify CSV file to diff code size against.")
         help="Specify CSV file to diff code size against.")
+    parser.add_argument('-m', '--merge',
+        help="Merge with an existing CSV file when writing to output.")
     parser.add_argument('-a', '--all', action='store_true',
     parser.add_argument('-a', '--all', action='store_true',
         help="Show all functions, not just the ones that changed.")
         help="Show all functions, not just the ones that changed.")
-    parser.add_argument('--files', action='store_true',
+    parser.add_argument('-A', '--everything', action='store_true',
+        help="Include builtin and libc specific symbols.")
+    parser.add_argument('-s', '--coverage-sort', action='store_true',
+        help="Sort by coverage.")
+    parser.add_argument('-S', '--reverse-coverage-sort', action='store_true',
+        help="Sort by coverage, but backwards.")
+    parser.add_argument('-F', '--files', action='store_true',
         help="Show file-level coverage.")
         help="Show file-level coverage.")
-    parser.add_argument('-s', '--summary', action='store_true',
+    parser.add_argument('-Y', '--summary', action='store_true',
         help="Only show the total coverage.")
         help="Only show the total coverage.")
     parser.add_argument('-q', '--quiet', action='store_true',
     parser.add_argument('-q', '--quiet', action='store_true',
         help="Don't show anything, useful with -o.")
         help="Don't show anything, useful with -o.")
+    parser.add_argument('--build-dir',
+        help="Specify the relative build directory. Used to map object files \
+            to the correct source files.")
     sys.exit(main(**vars(parser.parse_args())))
     sys.exit(main(**vars(parser.parse_args())))

+ 283 - 0
scripts/data.py

@@ -0,0 +1,283 @@
+#!/usr/bin/env python3
+#
+# Script to find data size at the function level. Basically just a bit wrapper
+# around nm with some extra conveniences for comparing builds. Heavily inspired
+# by Linux's Bloat-O-Meter.
+#
+
+import os
+import glob
+import itertools as it
+import subprocess as sp
+import shlex
+import re
+import csv
+import collections as co
+
+
+OBJ_PATHS = ['*.o']
+
+def collect(paths, **args):
+    results = co.defaultdict(lambda: 0)
+    pattern = re.compile(
+        '^(?P<size>[0-9a-fA-F]+)' +
+        ' (?P<type>[%s])' % re.escape(args['type']) +
+        ' (?P<func>.+?)$')
+    for path in paths:
+        # note nm-tool may contain extra args
+        cmd = args['nm_tool'] + ['--size-sort', path]
+        if args.get('verbose'):
+            print(' '.join(shlex.quote(c) for c in cmd))
+        proc = sp.Popen(cmd,
+            stdout=sp.PIPE,
+            stderr=sp.PIPE if not args.get('verbose') else None,
+            universal_newlines=True,
+            errors='replace')
+        for line in proc.stdout:
+            m = pattern.match(line)
+            if m:
+                results[(path, m.group('func'))] += int(m.group('size'), 16)
+        proc.wait()
+        if proc.returncode != 0:
+            if not args.get('verbose'):
+                for line in proc.stderr:
+                    sys.stdout.write(line)
+            sys.exit(-1)
+
+    flat_results = []
+    for (file, func), size in results.items():
+        # map to source files
+        if args.get('build_dir'):
+            file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
+        # replace .o with .c, different scripts report .o/.c, we need to
+        # choose one if we want to deduplicate csv files
+        file = re.sub('\.o$', '.c', file)
+        # discard internal functions
+        if not args.get('everything'):
+            if func.startswith('__'):
+                continue
+        # discard .8449 suffixes created by optimizer
+        func = re.sub('\.[0-9]+', '', func)
+        flat_results.append((file, func, size))
+
+    return flat_results
+
+def main(**args):
+    def openio(path, mode='r'):
+        if path == '-':
+            if 'r' in mode:
+                return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+            else:
+                return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+        else:
+            return open(path, mode)
+
+    # find sizes
+    if not args.get('use', None):
+        # find .o files
+        paths = []
+        for path in args['obj_paths']:
+            if os.path.isdir(path):
+                path = path + '/*.o'
+
+            for path in glob.glob(path):
+                paths.append(path)
+
+        if not paths:
+            print('no .obj files found in %r?' % args['obj_paths'])
+            sys.exit(-1)
+
+        results = collect(paths, **args)
+    else:
+        with openio(args['use']) as f:
+            r = csv.DictReader(f)
+            results = [
+                (   result['file'],
+                    result['name'],
+                    int(result['data_size']))
+                for result in r
+                if result.get('data_size') not in {None, ''}]
+
+    total = 0
+    for _, _, size in results:
+        total += size
+
+    # find previous results?
+    if args.get('diff'):
+        try:
+            with openio(args['diff']) as f:
+                r = csv.DictReader(f)
+                prev_results = [
+                    (   result['file'],
+                        result['name'],
+                        int(result['data_size']))
+                    for result in r
+                    if result.get('data_size') not in {None, ''}]
+        except FileNotFoundError:
+            prev_results = []
+
+        prev_total = 0
+        for _, _, size in prev_results:
+            prev_total += size
+
+    # write results to CSV
+    if args.get('output'):
+        merged_results = co.defaultdict(lambda: {})
+        other_fields = []
+
+        # merge?
+        if args.get('merge'):
+            try:
+                with openio(args['merge']) as f:
+                    r = csv.DictReader(f)
+                    for result in r:
+                        file = result.pop('file', '')
+                        func = result.pop('name', '')
+                        result.pop('data_size', None)
+                        merged_results[(file, func)] = result
+                        other_fields = result.keys()
+            except FileNotFoundError:
+                pass
+
+        for file, func, size in results:
+            merged_results[(file, func)]['data_size'] = size
+
+        with openio(args['output'], 'w') as f:
+            w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
+            w.writeheader()
+            for (file, func), result in sorted(merged_results.items()):
+                w.writerow({'file': file, 'name': func, **result})
+
+    # print results
+    def dedup_entries(results, by='name'):
+        entries = co.defaultdict(lambda: 0)
+        for file, func, size in results:
+            entry = (file if by == 'file' else func)
+            entries[entry] += size
+        return entries
+
+    def diff_entries(olds, news):
+        diff = co.defaultdict(lambda: (0, 0, 0, 0))
+        for name, new in news.items():
+            diff[name] = (0, new, new, 1.0)
+        for name, old in olds.items():
+            _, new, _, _ = diff[name]
+            diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
+        return diff
+
+    def sorted_entries(entries):
+        if args.get('size_sort'):
+            return sorted(entries, key=lambda x: (-x[1], x))
+        elif args.get('reverse_size_sort'):
+            return sorted(entries, key=lambda x: (+x[1], x))
+        else:
+            return sorted(entries)
+
+    def sorted_diff_entries(entries):
+        if args.get('size_sort'):
+            return sorted(entries, key=lambda x: (-x[1][1], x))
+        elif args.get('reverse_size_sort'):
+            return sorted(entries, key=lambda x: (+x[1][1], x))
+        else:
+            return sorted(entries, key=lambda x: (-x[1][3], x))
+
+    def print_header(by=''):
+        if not args.get('diff'):
+            print('%-36s %7s' % (by, 'size'))
+        else:
+            print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
+
+    def print_entry(name, size):
+        print("%-36s %7d" % (name, size))
+
+    def print_diff_entry(name, old, new, diff, ratio):
+        print("%-36s %7s %7s %+7d%s" % (name,
+            old or "-",
+            new or "-",
+            diff,
+            ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+    def print_entries(by='name'):
+        entries = dedup_entries(results, by=by)
+
+        if not args.get('diff'):
+            print_header(by=by)
+            for name, size in sorted_entries(entries.items()):
+                print_entry(name, size)
+        else:
+            prev_entries = dedup_entries(prev_results, by=by)
+            diff = diff_entries(prev_entries, entries)
+            print_header(by='%s (%d added, %d removed)' % (by,
+                sum(1 for old, _, _, _ in diff.values() if not old),
+                sum(1 for _, new, _, _ in diff.values() if not new)))
+            for name, (old, new, diff, ratio) in sorted_diff_entries(
+                    diff.items()):
+                if ratio or args.get('all'):
+                    print_diff_entry(name, old, new, diff, ratio)
+
+    def print_totals():
+        if not args.get('diff'):
+            print_entry('TOTAL', total)
+        else:
+            ratio = (0.0 if not prev_total and not total
+                else 1.0 if not prev_total
+                else (total-prev_total)/prev_total)
+            print_diff_entry('TOTAL',
+                prev_total, total,
+                total-prev_total,
+                ratio)
+
+    if args.get('quiet'):
+        pass
+    elif args.get('summary'):
+        print_header()
+        print_totals()
+    elif args.get('files'):
+        print_entries(by='file')
+        print_totals()
+    else:
+        print_entries(by='name')
+        print_totals()
+
+if __name__ == "__main__":
+    import argparse
+    import sys
+    parser = argparse.ArgumentParser(
+        description="Find data size at the function level.")
+    parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
+        help="Description of where to find *.o files. May be a directory \
+            or a list of paths. Defaults to %r." % OBJ_PATHS)
+    parser.add_argument('-v', '--verbose', action='store_true',
+        help="Output commands that run behind the scenes.")
+    parser.add_argument('-q', '--quiet', action='store_true',
+        help="Don't show anything, useful with -o.")
+    parser.add_argument('-o', '--output',
+        help="Specify CSV file to store results.")
+    parser.add_argument('-u', '--use',
+        help="Don't compile and find data sizes, instead use this CSV file.")
+    parser.add_argument('-d', '--diff',
+        help="Specify CSV file to diff data size against.")
+    parser.add_argument('-m', '--merge',
+        help="Merge with an existing CSV file when writing to output.")
+    parser.add_argument('-a', '--all', action='store_true',
+        help="Show all functions, not just the ones that changed.")
+    parser.add_argument('-A', '--everything', action='store_true',
+        help="Include builtin and libc specific symbols.")
+    parser.add_argument('-s', '--size-sort', action='store_true',
+        help="Sort by size.")
+    parser.add_argument('-S', '--reverse-size-sort', action='store_true',
+        help="Sort by size, but backwards.")
+    parser.add_argument('-F', '--files', action='store_true',
+        help="Show file-level data sizes. Note this does not include padding! "
+            "So sizes may differ from other tools.")
+    parser.add_argument('-Y', '--summary', action='store_true',
+        help="Only show the total data size.")
+    parser.add_argument('--type', default='dDbB',
+        help="Type of symbols to report, this uses the same single-character "
+            "type-names emitted by nm. Defaults to %(default)r.")
+    parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
+        help="Path to the nm tool to use.")
+    parser.add_argument('--build-dir',
+        help="Specify the relative build directory. Used to map object files \
+            to the correct source files.")
+    sys.exit(main(**vars(parser.parse_args())))

+ 430 - 0
scripts/stack.py

@@ -0,0 +1,430 @@
+#!/usr/bin/env python3
+#
+# Script to find stack usage at the function level. Will detect recursion and
+# report as infinite stack usage.
+#
+
+import os
+import glob
+import itertools as it
+import re
+import csv
+import collections as co
+import math as m
+
+
+CI_PATHS = ['*.ci']
+
+def collect(paths, **args):
+    # parse the vcg format
+    k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL)
+    v_pattern = re.compile('(?:"(.*?)"|([a-z]+))', re.DOTALL)
+    def parse_vcg(rest):
+        def parse_vcg(rest):
+            node = []
+            while True:
+                rest = rest.lstrip()
+                m = k_pattern.match(rest)
+                if not m:
+                    return (node, rest)
+                k, rest = m.group(1), rest[m.end(0):]
+
+                rest = rest.lstrip()
+                if rest.startswith('{'):
+                    v, rest = parse_vcg(rest[1:])
+                    assert rest[0] == '}', "unexpected %r" % rest[0:1]
+                    rest = rest[1:]
+                    node.append((k, v))
+                else:
+                    m = v_pattern.match(rest)
+                    assert m, "unexpected %r" % rest[0:1]
+                    v, rest = m.group(1) or m.group(2), rest[m.end(0):]
+                    node.append((k, v))
+
+        node, rest = parse_vcg(rest)
+        assert rest == '', "unexpected %r" % rest[0:1]
+        return node
+
+    # collect into functions
+    results = co.defaultdict(lambda: (None, None, 0, set()))
+    f_pattern = re.compile(
+        r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)')
+    for path in paths:
+        with open(path) as f:
+            vcg = parse_vcg(f.read())
+        for k, graph in vcg:
+            if k != 'graph':
+                continue
+            for k, info in graph:
+                if k == 'node':
+                    info = dict(info)
+                    m = f_pattern.match(info['label'])
+                    if m:
+                        function, file, size, type = m.groups()
+                        if not args.get('quiet') and type != 'static':
+                            print('warning: found non-static stack for %s (%s)'
+                                % (function, type))
+                        _, _, _, targets = results[info['title']]
+                        results[info['title']] = (
+                            file, function, int(size), targets)
+                elif k == 'edge':
+                    info = dict(info)
+                    _, _, _, targets = results[info['sourcename']]
+                    targets.add(info['targetname'])
+                else:
+                    continue
+
+    if not args.get('everything'):
+        for source, (s_file, s_function, _, _) in list(results.items()):
+            # discard internal functions
+            if s_file.startswith('<') or s_file.startswith('/usr/include'):
+                del results[source]
+
+    # find maximum stack size recursively, this requires also detecting cycles
+    # (in case of recursion)
+    def find_limit(source, seen=None):
+        seen = seen or set()
+        if source not in results:
+            return 0
+        _, _, frame, targets = results[source]
+
+        limit = 0
+        for target in targets:
+            if target in seen:
+                # found a cycle
+                return float('inf')
+            limit_ = find_limit(target, seen | {target})
+            limit = max(limit, limit_)
+
+        return frame + limit
+
+    def find_deps(targets):
+        deps = set()
+        for target in targets:
+            if target in results:
+                t_file, t_function, _, _ = results[target]
+                deps.add((t_file, t_function))
+        return deps
+
+    # flatten into a list
+    flat_results = []
+    for source, (s_file, s_function, frame, targets) in results.items():
+        limit = find_limit(source)
+        deps = find_deps(targets)
+        flat_results.append((s_file, s_function, frame, limit, deps))
+
+    return flat_results
+
+def main(**args):
+    def openio(path, mode='r'):
+        if path == '-':
+            if 'r' in mode:
+                return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+            else:
+                return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+        else:
+            return open(path, mode)
+
+    # find sizes
+    if not args.get('use', None):
+        # find .ci files
+        paths = []
+        for path in args['ci_paths']:
+            if os.path.isdir(path):
+                path = path + '/*.ci'
+
+            for path in glob.glob(path):
+                paths.append(path)
+
+        if not paths:
+            print('no .ci files found in %r?' % args['ci_paths'])
+            sys.exit(-1)
+
+        results = collect(paths, **args)
+    else:
+        with openio(args['use']) as f:
+            r = csv.DictReader(f)
+            results = [
+                (   result['file'],
+                    result['name'],
+                    int(result['stack_frame']),
+                    float(result['stack_limit']), # note limit can be inf
+                    set())
+                for result in r
+                if result.get('stack_frame') not in {None, ''}
+                if result.get('stack_limit') not in {None, ''}]
+
+    total_frame = 0
+    total_limit = 0
+    for _, _, frame, limit, _ in results:
+        total_frame += frame
+        total_limit = max(total_limit, limit)
+
+    # find previous results?
+    if args.get('diff'):
+        try:
+            with openio(args['diff']) as f:
+                r = csv.DictReader(f)
+                prev_results = [
+                    (   result['file'],
+                        result['name'],
+                        int(result['stack_frame']),
+                        float(result['stack_limit']),
+                        set())
+                    for result in r
+                    if result.get('stack_frame') not in {None, ''}
+                    if result.get('stack_limit') not in {None, ''}]
+        except FileNotFoundError:
+            prev_results = []
+
+        prev_total_frame = 0
+        prev_total_limit = 0
+        for _, _, frame, limit, _ in prev_results:
+            prev_total_frame += frame
+            prev_total_limit = max(prev_total_limit, limit)
+
+    # write results to CSV
+    if args.get('output'):
+        merged_results = co.defaultdict(lambda: {})
+        other_fields = []
+
+        # merge?
+        if args.get('merge'):
+            try:
+                with openio(args['merge']) as f:
+                    r = csv.DictReader(f)
+                    for result in r:
+                        file = result.pop('file', '')
+                        func = result.pop('name', '')
+                        result.pop('stack_frame', None)
+                        result.pop('stack_limit', None)
+                        merged_results[(file, func)] = result
+                        other_fields = result.keys()
+            except FileNotFoundError:
+                pass
+
+        for file, func, frame, limit, _ in results:
+            merged_results[(file, func)]['stack_frame'] = frame
+            merged_results[(file, func)]['stack_limit'] = limit
+
+        with openio(args['output'], 'w') as f:
+            w = csv.DictWriter(f, ['file', 'name', *other_fields, 'stack_frame', 'stack_limit'])
+            w.writeheader()
+            for (file, func), result in sorted(merged_results.items()):
+                w.writerow({'file': file, 'name': func, **result})
+
+    # print results
+    def dedup_entries(results, by='name'):
+        entries = co.defaultdict(lambda: (0, 0, set()))
+        for file, func, frame, limit, deps in results:
+            entry = (file if by == 'file' else func)
+            entry_frame, entry_limit, entry_deps = entries[entry]
+            entries[entry] = (
+                entry_frame + frame,
+                max(entry_limit, limit),
+                entry_deps | {file if by == 'file' else func
+                    for file, func in deps})
+        return entries
+
+    def diff_entries(olds, news):
+        diff = co.defaultdict(lambda: (None, None, None, None, 0, 0, 0, set()))
+        for name, (new_frame, new_limit, deps) in news.items():
+            diff[name] = (
+                None, None,
+                new_frame, new_limit,
+                new_frame, new_limit,
+                1.0,
+                deps)
+        for name, (old_frame, old_limit, _) in olds.items():
+            _, _, new_frame, new_limit, _, _, _, deps = diff[name]
+            diff[name] = (
+                old_frame, old_limit,
+                new_frame, new_limit,
+                (new_frame or 0) - (old_frame or 0),
+                0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
+                    else (new_limit or 0) - (old_limit or 0),
+                0.0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
+                    else +float('inf') if m.isinf(new_limit or 0)
+                    else -float('inf') if m.isinf(old_limit or 0)
+                    else +0.0 if not old_limit and not new_limit
+                    else +1.0 if not old_limit
+                    else ((new_limit or 0) - (old_limit or 0))/(old_limit or 0),
+                deps)
+        return diff
+
+    def sorted_entries(entries):
+        if args.get('limit_sort'):
+            return sorted(entries, key=lambda x: (-x[1][1], x))
+        elif args.get('reverse_limit_sort'):
+            return sorted(entries, key=lambda x: (+x[1][1], x))
+        elif args.get('frame_sort'):
+            return sorted(entries, key=lambda x: (-x[1][0], x))
+        elif args.get('reverse_frame_sort'):
+            return sorted(entries, key=lambda x: (+x[1][0], x))
+        else:
+            return sorted(entries)
+
+    def sorted_diff_entries(entries):
+        if args.get('limit_sort'):
+            return sorted(entries, key=lambda x: (-(x[1][3] or 0), x))
+        elif args.get('reverse_limit_sort'):
+            return sorted(entries, key=lambda x: (+(x[1][3] or 0), x))
+        elif args.get('frame_sort'):
+            return sorted(entries, key=lambda x: (-(x[1][2] or 0), x))
+        elif args.get('reverse_frame_sort'):
+            return sorted(entries, key=lambda x: (+(x[1][2] or 0), x))
+        else:
+            return sorted(entries, key=lambda x: (-x[1][6], x))
+
+    def print_header(by=''):
+        if not args.get('diff'):
+            print('%-36s %7s %7s' % (by, 'frame', 'limit'))
+        else:
+            print('%-36s %15s %15s %15s' % (by, 'old', 'new', 'diff'))
+
+    def print_entry(name, frame, limit):
+        print("%-36s %7d %7s" % (name,
+            frame, '∞' if m.isinf(limit) else int(limit)))
+
+    def print_diff_entry(name,
+            old_frame, old_limit,
+            new_frame, new_limit,
+            diff_frame, diff_limit,
+            ratio):
+        print('%-36s %7s %7s %7s %7s %+7d %7s%s' % (name,
+            old_frame if old_frame is not None else "-",
+            ('∞' if m.isinf(old_limit) else int(old_limit))
+                if old_limit is not None else "-",
+            new_frame if new_frame is not None else "-",
+            ('∞' if m.isinf(new_limit) else int(new_limit))
+                if new_limit is not None else "-",
+            diff_frame,
+            ('+∞' if diff_limit > 0 and m.isinf(diff_limit)
+                else '-∞' if diff_limit < 0 and m.isinf(diff_limit)
+                else '%+d' % diff_limit),
+            '' if not ratio
+                else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
+                else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
+                else ' (%+.1f%%)' % (100*ratio)))
+
+    def print_entries(by='name'):
+        # build optional tree of dependencies
+        def print_deps(entries, depth, print,
+                filter=lambda _: True,
+                prefixes=('', '', '', '')):
+            entries = entries if isinstance(entries, list) else list(entries)
+            filtered_entries = [(name, entry)
+                for name, entry in entries
+                if filter(name)]
+            for i, (name, entry) in enumerate(filtered_entries):
+                last = (i == len(filtered_entries)-1)
+                print(prefixes[0+last] + name, entry)
+
+                if depth > 0:
+                    deps = entry[-1]
+                    print_deps(entries, depth-1, print,
+                        lambda name: name in deps,
+                        (   prefixes[2+last] + "|-> ",
+                            prefixes[2+last] + "'-> ",
+                            prefixes[2+last] + "|   ",
+                            prefixes[2+last] + "    "))
+
+        entries = dedup_entries(results, by=by)
+
+        if not args.get('diff'):
+            print_header(by=by)
+            print_deps(
+                sorted_entries(entries.items()),
+                args.get('depth') or 0,
+                lambda name, entry: print_entry(name, *entry[:-1]))
+        else:
+            prev_entries = dedup_entries(prev_results, by=by)
+            diff = diff_entries(prev_entries, entries)
+
+            print_header(by='%s (%d added, %d removed)' % (by,
+                sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None),
+                sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None)))
+            print_deps(
+                filter(
+                    lambda x: x[1][6] or args.get('all'),
+                    sorted_diff_entries(diff.items())),
+                args.get('depth') or 0,
+                lambda name, entry: print_diff_entry(name, *entry[:-1]))
+
+    def print_totals():
+        if not args.get('diff'):
+            print_entry('TOTAL', total_frame, total_limit)
+        else:
+            diff_frame = total_frame - prev_total_frame
+            diff_limit = (
+                0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
+                    else (total_limit or 0) - (prev_total_limit or 0))
+            ratio = (
+                0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
+                    else +float('inf') if m.isinf(total_limit or 0)
+                    else -float('inf') if m.isinf(prev_total_limit or 0)
+                    else 0.0 if not prev_total_limit and not total_limit
+                    else 1.0 if not prev_total_limit
+                    else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0))
+            print_diff_entry('TOTAL',
+                prev_total_frame, prev_total_limit,
+                total_frame, total_limit,
+                diff_frame, diff_limit,
+                ratio)
+
+    if args.get('quiet'):
+        pass
+    elif args.get('summary'):
+        print_header()
+        print_totals()
+    elif args.get('files'):
+        print_entries(by='file')
+        print_totals()
+    else:
+        print_entries(by='name')
+        print_totals()
+
+
+if __name__ == "__main__":
+    import argparse
+    import sys
+    parser = argparse.ArgumentParser(
+        description="Find stack usage at the function level.")
+    parser.add_argument('ci_paths', nargs='*', default=CI_PATHS,
+        help="Description of where to find *.ci files. May be a directory \
+            or a list of paths. Defaults to %r." % CI_PATHS)
+    parser.add_argument('-v', '--verbose', action='store_true',
+        help="Output commands that run behind the scenes.")
+    parser.add_argument('-q', '--quiet', action='store_true',
+        help="Don't show anything, useful with -o.")
+    parser.add_argument('-o', '--output',
+        help="Specify CSV file to store results.")
+    parser.add_argument('-u', '--use',
+        help="Don't parse callgraph files, instead use this CSV file.")
+    parser.add_argument('-d', '--diff',
+        help="Specify CSV file to diff against.")
+    parser.add_argument('-m', '--merge',
+        help="Merge with an existing CSV file when writing to output.")
+    parser.add_argument('-a', '--all', action='store_true',
+        help="Show all functions, not just the ones that changed.")
+    parser.add_argument('-A', '--everything', action='store_true',
+        help="Include builtin and libc specific symbols.")
+    parser.add_argument('-s', '--limit-sort', action='store_true',
+        help="Sort by stack limit.")
+    parser.add_argument('-S', '--reverse-limit-sort', action='store_true',
+        help="Sort by stack limit, but backwards.")
+    parser.add_argument('--frame-sort', action='store_true',
+        help="Sort by stack frame size.")
+    parser.add_argument('--reverse-frame-sort', action='store_true',
+        help="Sort by stack frame size, but backwards.")
+    parser.add_argument('-L', '--depth', default=0, type=lambda x: int(x, 0),
+        nargs='?', const=float('inf'),
+        help="Depth of dependencies to show.")
+    parser.add_argument('-F', '--files', action='store_true',
+        help="Show file-level calls.")
+    parser.add_argument('-Y', '--summary', action='store_true',
+        help="Only show the total stack size.")
+    parser.add_argument('--build-dir',
+        help="Specify the relative build directory. Used to map object files \
+            to the correct source files.")
+    sys.exit(main(**vars(parser.parse_args())))

+ 331 - 0
scripts/structs.py

@@ -0,0 +1,331 @@
+#!/usr/bin/env python3
+#
+# Script to find struct sizes.
+#
+
+import os
+import glob
+import itertools as it
+import subprocess as sp
+import shlex
+import re
+import csv
+import collections as co
+
+
+OBJ_PATHS = ['*.o']
+
+def collect(paths, **args):
+    decl_pattern = re.compile(
+        '^\s+(?P<no>[0-9]+)'
+            '\s+(?P<dir>[0-9]+)'
+            '\s+.*'
+            '\s+(?P<file>[^\s]+)$')
+    struct_pattern = re.compile(
+        '^(?:.*DW_TAG_(?P<tag>[a-z_]+).*'
+            '|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
+            '|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*'
+            '|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
+
+    results = co.defaultdict(lambda: 0)
+    for path in paths:
+        # find decl, we want to filter by structs in .h files
+        decls = {}
+        # note objdump-tool may contain extra args
+        cmd = args['objdump_tool'] + ['--dwarf=rawline', path]
+        if args.get('verbose'):
+            print(' '.join(shlex.quote(c) for c in cmd))
+        proc = sp.Popen(cmd,
+            stdout=sp.PIPE,
+            stderr=sp.PIPE if not args.get('verbose') else None,
+            universal_newlines=True,
+            errors='replace')
+        for line in proc.stdout:
+            # find file numbers
+            m = decl_pattern.match(line)
+            if m:
+                decls[int(m.group('no'))] = m.group('file')
+        proc.wait()
+        if proc.returncode != 0:
+            if not args.get('verbose'):
+                for line in proc.stderr:
+                    sys.stdout.write(line)
+            sys.exit(-1)
+
+        # collect structs as we parse dwarf info
+        found = False
+        name = None
+        decl = None
+        size = None
+
+        # note objdump-tool may contain extra args
+        cmd = args['objdump_tool'] + ['--dwarf=info', path]
+        if args.get('verbose'):
+            print(' '.join(shlex.quote(c) for c in cmd))
+        proc = sp.Popen(cmd,
+            stdout=sp.PIPE,
+            stderr=sp.PIPE if not args.get('verbose') else None,
+            universal_newlines=True,
+            errors='replace')
+        for line in proc.stdout:
+            # state machine here to find structs
+            m = struct_pattern.match(line)
+            if m:
+                if m.group('tag'):
+                    if (name is not None
+                            and decl is not None
+                            and size is not None):
+                        decl = decls.get(decl, '?')
+                        results[(decl, name)] = size
+                    found = (m.group('tag') == 'structure_type')
+                    name = None
+                    decl = None
+                    size = None
+                elif found and m.group('name'):
+                    name = m.group('name')
+                elif found and name and m.group('decl'):
+                    decl = int(m.group('decl'))
+                elif found and name and m.group('size'):
+                    size = int(m.group('size'))
+        proc.wait()
+        if proc.returncode != 0:
+            if not args.get('verbose'):
+                for line in proc.stderr:
+                    sys.stdout.write(line)
+            sys.exit(-1)
+
+    flat_results = []
+    for (file, struct), size in results.items():
+        # map to source files
+        if args.get('build_dir'):
+            file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
+        # only include structs declared in header files in the current
+        # directory, ignore internal-only # structs (these are represented
+        # in other measurements)
+        if not args.get('everything'):
+            if not file.endswith('.h'):
+                continue
+        # replace .o with .c, different scripts report .o/.c, we need to
+        # choose one if we want to deduplicate csv files
+        file = re.sub('\.o$', '.c', file)
+
+        flat_results.append((file, struct, size))
+
+    return flat_results
+
+
+def main(**args):
+    def openio(path, mode='r'):
+        if path == '-':
+            if 'r' in mode:
+                return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+            else:
+                return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+        else:
+            return open(path, mode)
+
+    # find sizes
+    if not args.get('use', None):
+        # find .o files
+        paths = []
+        for path in args['obj_paths']:
+            if os.path.isdir(path):
+                path = path + '/*.o'
+
+            for path in glob.glob(path):
+                paths.append(path)
+
+        if not paths:
+            print('no .obj files found in %r?' % args['obj_paths'])
+            sys.exit(-1)
+
+        results = collect(paths, **args)
+    else:
+        with openio(args['use']) as f:
+            r = csv.DictReader(f)
+            results = [
+                (   result['file'],
+                    result['name'],
+                    int(result['struct_size']))
+                for result in r
+                if result.get('struct_size') not in {None, ''}]
+
+    total = 0
+    for _, _, size in results:
+        total += size
+
+    # find previous results?
+    if args.get('diff'):
+        try:
+            with openio(args['diff']) as f:
+                r = csv.DictReader(f)
+                prev_results = [
+                    (   result['file'],
+                        result['name'],
+                        int(result['struct_size']))
+                    for result in r
+                    if result.get('struct_size') not in {None, ''}]
+        except FileNotFoundError:
+            prev_results = []
+
+        prev_total = 0
+        for _, _, size in prev_results:
+            prev_total += size
+
+    # write results to CSV
+    if args.get('output'):
+        merged_results = co.defaultdict(lambda: {})
+        other_fields = []
+
+        # merge?
+        if args.get('merge'):
+            try:
+                with openio(args['merge']) as f:
+                    r = csv.DictReader(f)
+                    for result in r:
+                        file = result.pop('file', '')
+                        struct = result.pop('name', '')
+                        result.pop('struct_size', None)
+                        merged_results[(file, struct)] = result
+                        other_fields = result.keys()
+            except FileNotFoundError:
+                pass
+
+        for file, struct, size in results:
+            merged_results[(file, struct)]['struct_size'] = size
+
+        with openio(args['output'], 'w') as f:
+            w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
+            w.writeheader()
+            for (file, struct), result in sorted(merged_results.items()):
+                w.writerow({'file': file, 'name': struct, **result})
+
+    # print results
+    def dedup_entries(results, by='name'):
+        entries = co.defaultdict(lambda: 0)
+        for file, struct, size in results:
+            entry = (file if by == 'file' else struct)
+            entries[entry] += size
+        return entries
+
+    def diff_entries(olds, news):
+        diff = co.defaultdict(lambda: (0, 0, 0, 0))
+        for name, new in news.items():
+            diff[name] = (0, new, new, 1.0)
+        for name, old in olds.items():
+            _, new, _, _ = diff[name]
+            diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
+        return diff
+
+    def sorted_entries(entries):
+        if args.get('size_sort'):
+            return sorted(entries, key=lambda x: (-x[1], x))
+        elif args.get('reverse_size_sort'):
+            return sorted(entries, key=lambda x: (+x[1], x))
+        else:
+            return sorted(entries)
+
+    def sorted_diff_entries(entries):
+        if args.get('size_sort'):
+            return sorted(entries, key=lambda x: (-x[1][1], x))
+        elif args.get('reverse_size_sort'):
+            return sorted(entries, key=lambda x: (+x[1][1], x))
+        else:
+            return sorted(entries, key=lambda x: (-x[1][3], x))
+
+    def print_header(by=''):
+        if not args.get('diff'):
+            print('%-36s %7s' % (by, 'size'))
+        else:
+            print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
+
+    def print_entry(name, size):
+        print("%-36s %7d" % (name, size))
+
+    def print_diff_entry(name, old, new, diff, ratio):
+        print("%-36s %7s %7s %+7d%s" % (name,
+            old or "-",
+            new or "-",
+            diff,
+            ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+    def print_entries(by='name'):
+        entries = dedup_entries(results, by=by)
+
+        if not args.get('diff'):
+            print_header(by=by)
+            for name, size in sorted_entries(entries.items()):
+                print_entry(name, size)
+        else:
+            prev_entries = dedup_entries(prev_results, by=by)
+            diff = diff_entries(prev_entries, entries)
+            print_header(by='%s (%d added, %d removed)' % (by,
+                sum(1 for old, _, _, _ in diff.values() if not old),
+                sum(1 for _, new, _, _ in diff.values() if not new)))
+            for name, (old, new, diff, ratio) in sorted_diff_entries(
+                    diff.items()):
+                if ratio or args.get('all'):
+                    print_diff_entry(name, old, new, diff, ratio)
+
+    def print_totals():
+        if not args.get('diff'):
+            print_entry('TOTAL', total)
+        else:
+            ratio = (0.0 if not prev_total and not total
+                else 1.0 if not prev_total
+                else (total-prev_total)/prev_total)
+            print_diff_entry('TOTAL',
+                prev_total, total,
+                total-prev_total,
+                ratio)
+
+    if args.get('quiet'):
+        pass
+    elif args.get('summary'):
+        print_header()
+        print_totals()
+    elif args.get('files'):
+        print_entries(by='file')
+        print_totals()
+    else:
+        print_entries(by='name')
+        print_totals()
+
+if __name__ == "__main__":
+    import argparse
+    import sys
+    parser = argparse.ArgumentParser(
+        description="Find struct sizes.")
+    parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
+        help="Description of where to find *.o files. May be a directory \
+            or a list of paths. Defaults to %r." % OBJ_PATHS)
+    parser.add_argument('-v', '--verbose', action='store_true',
+        help="Output commands that run behind the scenes.")
+    parser.add_argument('-q', '--quiet', action='store_true',
+        help="Don't show anything, useful with -o.")
+    parser.add_argument('-o', '--output',
+        help="Specify CSV file to store results.")
+    parser.add_argument('-u', '--use',
+        help="Don't compile and find struct sizes, instead use this CSV file.")
+    parser.add_argument('-d', '--diff',
+        help="Specify CSV file to diff struct size against.")
+    parser.add_argument('-m', '--merge',
+        help="Merge with an existing CSV file when writing to output.")
+    parser.add_argument('-a', '--all', action='store_true',
+        help="Show all functions, not just the ones that changed.")
+    parser.add_argument('-A', '--everything', action='store_true',
+        help="Include builtin and libc specific symbols.")
+    parser.add_argument('-s', '--size-sort', action='store_true',
+        help="Sort by size.")
+    parser.add_argument('-S', '--reverse-size-sort', action='store_true',
+        help="Sort by size, but backwards.")
+    parser.add_argument('-F', '--files', action='store_true',
+        help="Show file-level struct sizes.")
+    parser.add_argument('-Y', '--summary', action='store_true',
+        help="Only show the total struct size.")
+    parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(),
+        help="Path to the objdump tool to use.")
+    parser.add_argument('--build-dir',
+        help="Specify the relative build directory. Used to map object files \
+            to the correct source files.")
+    sys.exit(main(**vars(parser.parse_args())))

+ 279 - 0
scripts/summary.py

@@ -0,0 +1,279 @@
+#!/usr/bin/env python3
+#
+# Script to summarize the outputs of other scripts. Operates on CSV files.
+#
+
+import functools as ft
+import collections as co
+import os
+import csv
+import re
+import math as m
+
+# displayable fields
+Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio')
+FIELDS = [
+    # name, parse, accumulate, fmt, print, null
+    Field('code',
+        lambda r: int(r['code_size']),
+        sum,
+        lambda r: r,
+        '%7s',
+        lambda r: r,
+        '-',
+        lambda old, new: (new-old)/old),
+    Field('data',
+        lambda r: int(r['data_size']),
+        sum,
+        lambda r: r,
+        '%7s',
+        lambda r: r,
+        '-',
+        lambda old, new: (new-old)/old),
+    Field('stack',
+        lambda r: float(r['stack_limit']),
+        max,
+        lambda r: r,
+        '%7s',
+        lambda r: '∞' if m.isinf(r) else int(r),
+        '-',
+        lambda old, new: (new-old)/old),
+    Field('structs',
+        lambda r: int(r['struct_size']),
+        sum,
+        lambda r: r,
+        '%8s',
+        lambda r: r,
+        '-',
+        lambda old, new: (new-old)/old),
+    Field('coverage',
+        lambda r: (int(r['coverage_hits']), int(r['coverage_count'])),
+        lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs),
+        lambda r: r[0]/r[1],
+        '%19s',
+        lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])),
+        '%11s %7s' % ('-', '-'),
+        lambda old, new: ((new[0]/new[1]) - (old[0]/old[1])))
+]
+
+
+def main(**args):
+    def openio(path, mode='r'):
+        if path == '-':
+            if 'r' in mode:
+                return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
+            else:
+                return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
+        else:
+            return open(path, mode)
+
+    # find results
+    results = co.defaultdict(lambda: {})
+    for path in args.get('csv_paths', '-'):
+        try:
+            with openio(path) as f:
+                r = csv.DictReader(f)
+                for result in r:
+                    file = result.pop('file', '')
+                    name = result.pop('name', '')
+                    prev = results[(file, name)]
+                    for field in FIELDS:
+                        try:
+                            r = field.parse(result)
+                            if field.name in prev:
+                                results[(file, name)][field.name] = field.acc(
+                                    [prev[field.name], r])
+                            else:
+                                results[(file, name)][field.name] = r
+                        except (KeyError, ValueError):
+                            pass
+        except FileNotFoundError:
+            pass
+
+    # find fields
+    if args.get('all_fields'):
+        fields = FIELDS
+    elif args.get('fields') is not None:
+        fields_dict = {field.name: field for field in FIELDS}
+        fields = [fields_dict[f] for f in args['fields']]
+    else:
+        fields = []
+        for field in FIELDS:
+            if any(field.name in result for result in results.values()):
+                fields.append(field)
+
+    # find total for every field
+    total = {}
+    for result in results.values():
+        for field in fields:
+            if field.name in result and field.name in total:
+                total[field.name] = field.acc(
+                    [total[field.name], result[field.name]])
+            elif field.name in result:
+                total[field.name] = result[field.name]
+
+    # find previous results?
+    if args.get('diff'):
+        prev_results = co.defaultdict(lambda: {})
+        try:
+            with openio(args['diff']) as f:
+                r = csv.DictReader(f)
+                for result in r:
+                    file = result.pop('file', '')
+                    name = result.pop('name', '')
+                    prev = prev_results[(file, name)]
+                    for field in FIELDS:
+                        try:
+                            r = field.parse(result)
+                            if field.name in prev:
+                                prev_results[(file, name)][field.name] = field.acc(
+                                    [prev[field.name], r])
+                            else:
+                                prev_results[(file, name)][field.name] = r
+                        except (KeyError, ValueError):
+                            pass
+        except FileNotFoundError:
+            pass
+
+        prev_total = {}
+        for result in prev_results.values():
+            for field in fields:
+                if field.name in result and field.name in prev_total:
+                    prev_total[field.name] = field.acc(
+                        [prev_total[field.name], result[field.name]])
+                elif field.name in result:
+                    prev_total[field.name] = result[field.name]
+
+    # print results
+    def dedup_entries(results, by='name'):
+        entries = co.defaultdict(lambda: {})
+        for (file, func), result in results.items():
+            entry = (file if by == 'file' else func)
+            prev = entries[entry]
+            for field in fields:
+                if field.name in result and field.name in prev:
+                    entries[entry][field.name] = field.acc(
+                        [prev[field.name], result[field.name]])
+                elif field.name in result:
+                    entries[entry][field.name] = result[field.name]
+        return entries
+
+    def sorted_entries(entries):
+        if args.get('sort') is not None:
+            field = {field.name: field for field in FIELDS}[args['sort']]
+            return sorted(entries, key=lambda x: (
+                -(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
+        elif args.get('reverse_sort') is not None:
+            field = {field.name: field for field in FIELDS}[args['reverse_sort']]
+            return sorted(entries, key=lambda x: (
+                +(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
+        else:
+            return sorted(entries)
+
+    def print_header(by=''):
+        if not args.get('diff'):
+            print('%-36s' % by, end='')
+            for field in fields:
+                print((' '+field.fmt) % field.name, end='')
+            print()
+        else:
+            print('%-36s' % by, end='')
+            for field in fields:
+                print((' '+field.fmt) % field.name, end='')
+                print(' %-9s' % '', end='')
+            print()
+
+    def print_entry(name, result):
+        print('%-36s' % name, end='')
+        for field in fields:
+            r = result.get(field.name)
+            if r is not None:
+                print((' '+field.fmt) % field.repr(r), end='')
+            else:
+                print((' '+field.fmt) % '-', end='')
+        print()
+
+    def print_diff_entry(name, old, new):
+        print('%-36s' % name, end='')
+        for field in fields:
+            n = new.get(field.name)
+            if n is not None:
+                print((' '+field.fmt) % field.repr(n), end='')
+            else:
+                print((' '+field.fmt) % '-', end='')
+            o = old.get(field.name)
+            ratio = (
+                0.0 if m.isinf(o or 0) and m.isinf(n or 0)
+                    else +float('inf') if m.isinf(n or 0)
+                    else -float('inf') if m.isinf(o or 0)
+                    else 0.0 if not o and not n
+                    else +1.0 if not o
+                    else -1.0 if not n
+                    else field.ratio(o, n))
+            print(' %-9s' % (
+                '' if not ratio
+                    else '(+∞%)' if ratio > 0 and m.isinf(ratio)
+                    else '(-∞%)' if ratio < 0 and m.isinf(ratio)
+                    else '(%+.1f%%)' % (100*ratio)), end='')
+        print()
+
+    def print_entries(by='name'):
+        entries = dedup_entries(results, by=by)
+
+        if not args.get('diff'):
+            print_header(by=by)
+            for name, result in sorted_entries(entries.items()):
+                print_entry(name, result)
+        else:
+            prev_entries = dedup_entries(prev_results, by=by)
+            print_header(by='%s (%d added, %d removed)' % (by,
+                sum(1 for name in entries if name not in prev_entries),
+                sum(1 for name in prev_entries if name not in entries)))
+            for name, result in sorted_entries(entries.items()):
+                if args.get('all') or result != prev_entries.get(name, {}):
+                    print_diff_entry(name, prev_entries.get(name, {}), result)
+
+    def print_totals():
+        if not args.get('diff'):
+            print_entry('TOTAL', total)
+        else:
+            print_diff_entry('TOTAL', prev_total, total)
+
+    if args.get('summary'):
+        print_header()
+        print_totals()
+    elif args.get('files'):
+        print_entries(by='file')
+        print_totals()
+    else:
+        print_entries(by='name')
+        print_totals()
+
+
+if __name__ == "__main__":
+    import argparse
+    import sys
+    parser = argparse.ArgumentParser(
+        description="Summarize measurements")
+    parser.add_argument('csv_paths', nargs='*', default='-',
+        help="Description of where to find *.csv files. May be a directory \
+            or list of paths. *.csv files will be merged to show the total \
+            coverage.")
+    parser.add_argument('-d', '--diff',
+        help="Specify CSV file to diff against.")
+    parser.add_argument('-a', '--all', action='store_true',
+        help="Show all objects, not just the ones that changed.")
+    parser.add_argument('-e', '--all-fields', action='store_true',
+        help="Show all fields, even those with no results.")
+    parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x),
+        help="Comma separated list of fields to print, by default all fields \
+            that are found in the CSV files are printed.")
+    parser.add_argument('-s', '--sort',
+        help="Sort by this field.")
+    parser.add_argument('-S', '--reverse-sort',
+        help="Sort by this field, but backwards.")
+    parser.add_argument('-F', '--files', action='store_true',
+        help="Show file-level calls.")
+    parser.add_argument('-Y', '--summary', action='store_true',
+        help="Only show the totals.")
+    sys.exit(main(**vars(parser.parse_args())))

+ 6 - 3
scripts/test.py

@@ -784,10 +784,13 @@ def main(**args):
             stdout=sp.PIPE if not args.get('verbose') else None,
             stdout=sp.PIPE if not args.get('verbose') else None,
             stderr=sp.STDOUT if not args.get('verbose') else None,
             stderr=sp.STDOUT if not args.get('verbose') else None,
             universal_newlines=True)
             universal_newlines=True)
+        stdout = []
+        for line in proc.stdout:
+            stdout.append(line)
         proc.wait()
         proc.wait()
         if proc.returncode != 0:
         if proc.returncode != 0:
             if not args.get('verbose'):
             if not args.get('verbose'):
-                for line in proc.stdout:
+                for line in stdout:
                     sys.stdout.write(line)
                     sys.stdout.write(line)
             sys.exit(-1)
             sys.exit(-1)
 
 
@@ -803,9 +806,9 @@ def main(**args):
             failure.case.test(failure=failure, **args)
             failure.case.test(failure=failure, **args)
             sys.exit(0)
             sys.exit(0)
 
 
-    print('tests passed %d/%d (%.2f%%)' % (passed, total,
+    print('tests passed %d/%d (%.1f%%)' % (passed, total,
         100*(passed/total if total else 1.0)))
         100*(passed/total if total else 1.0)))
-    print('tests failed %d/%d (%.2f%%)' % (failed, total,
+    print('tests failed %d/%d (%.1f%%)' % (failed, total,
         100*(failed/total if total else 1.0)))
         100*(failed/total if total else 1.0)))
     return 1 if failed > 0 else 0
     return 1 if failed > 0 else 0
 
 

部分文件因为文件数量过多而无法显示