Переглянути джерело

Merge pull request #516 from littlefs-project/ci-revamp

Adopt GitHub Actions, bring in a number of script/Makefile improvements
Christopher Haster 4 роки тому
батько
коміт
a2c744c8f8
11 змінених файлів з 1418 додано та 553 видалено
  1. 26 0
      .github/workflows/post-release.yml
  2. 215 0
      .github/workflows/release.yml
  3. 55 0
      .github/workflows/status.yml
  4. 446 0
      .github/workflows/test.yml
  5. 0 477
      .travis.yml
  6. 65 18
      Makefile
  7. 2 2
      lfs.c
  8. 10 0
      lfs_util.h
  9. 214 0
      scripts/code.py
  10. 254 0
      scripts/coverage.py
  11. 131 56
      scripts/test.py

+ 26 - 0
.github/workflows/post-release.yml

@@ -0,0 +1,26 @@
+name: post-release
+on:
+  release:
+    branches: [master]
+    types: [released]
+
+jobs:
+  post-release:
+    runs-on: ubuntu-18.04
+    steps:
+      # trigger post-release in dependency repo, this indirection allows the
+      # dependency repo to be updated often without affecting this repo. At
+      # the time of this comment, the dependency repo is responsible for
+      # creating PRs for other dependent repos post-release.
+      - name: trigger-post-release
+        continue-on-error: true
+        run: |
+          curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
+            "$GITHUB_API_URL/repos/${{secrets.POST_RELEASE_REPO}}/dispatches" \
+            -d "$(jq -n '{
+              event_type: "post-release",
+              client_payload: {
+                repo: env.GITHUB_REPOSITORY,
+                version: "${{github.event.release.tag_name}}"}}' \
+              | tee /dev/stderr)"
+

+ 215 - 0
.github/workflows/release.yml

@@ -0,0 +1,215 @@
+name: release
+on:
+  workflow_run:
+    workflows: [test]
+    branches: [master]
+    types: [completed]
+
+jobs:
+  release:
+    runs-on: ubuntu-18.04
+
+    # need to manually check for a couple things
+    # - tests passed?
+    # - we are the most recent commit on master?
+    if: ${{github.event.workflow_run.conclusion == 'success' &&
+      github.event.workflow_run.head_sha == github.sha}}
+
+    steps:
+      - uses: actions/checkout@v2
+        with:
+          ref: ${{github.event.workflow_run.head_sha}}
+          # need workflow access since we push branches
+          # containing workflows
+          token: ${{secrets.BOT_TOKEN}}
+          # need all tags
+          fetch-depth: 0
+
+      # try to get results from tests
+      - uses: dawidd6/action-download-artifact@v2
+        continue-on-error: true
+        with:
+          workflow: ${{github.event.workflow_run.name}}
+          run_id: ${{github.event.workflow_run.id}}
+          name: results
+          path: results
+
+      - name: find-version
+        run: |
+          # rip version from lfs.h
+          LFS_VERSION="$(grep -o '^#define LFS_VERSION .*$' lfs.h \
+            | awk '{print $3}')"
+          LFS_VERSION_MAJOR="$((0xffff & ($LFS_VERSION >> 16)))"
+          LFS_VERSION_MINOR="$((0xffff & ($LFS_VERSION >>  0)))"
+
+          # find a new patch version based on what we find in our tags
+          LFS_VERSION_PATCH="$( \
+            ( git describe --tags --abbrev=0 \
+                --match="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.*" \
+              || echo 'v0.0.-1' ) \
+            | awk -F '.' '{print $3+1}')"
+
+          # found new version
+          LFS_VERSION="v$LFS_VERSION_MAJOR`
+            `.$LFS_VERSION_MINOR`
+            `.$LFS_VERSION_PATCH"
+          echo "LFS_VERSION=$LFS_VERSION"
+          echo "LFS_VERSION=$LFS_VERSION" >> $GITHUB_ENV
+          echo "LFS_VERSION_MAJOR=$LFS_VERSION_MAJOR" >> $GITHUB_ENV
+          echo "LFS_VERSION_MINOR=$LFS_VERSION_MINOR" >> $GITHUB_ENV
+          echo "LFS_VERSION_PATCH=$LFS_VERSION_PATCH" >> $GITHUB_ENV
+
+      # try to find previous version?
+      - name: find-prev-version
+        continue-on-error: true
+        run: |
+          LFS_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')"
+          echo "LFS_PREV_VERSION=$LFS_PREV_VERSION"
+          echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV
+
+      # try to find results from tests
+      - name: collect-results
+        run: |
+          # previous results to compare against?
+          [ -n "$LFS_PREV_VERSION" ] && curl -sS \
+            "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/`
+              `status/$LFS_PREV_VERSION" \
+            | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
+            >> prev-results.json \
+            || true
+
+          # unfortunately these each have their own format
+          [ -e results/code-thumb.csv ] && ( \
+            export PREV="$(jq -re '
+                  select(.context == "results / code").description
+                  | capture("Code size is (?<result>[0-9]+)").result' \
+                prev-results.json || echo 0)"
+            ./scripts/code.py -u results/code-thumb.csv -s | awk '
+              NR==2 {printf "Code size,%d B",$2}
+              NR==2 && ENVIRON["PREV"]+0 != 0 {
+                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
+              NR==2 {printf "\n"}' \
+            >> results.csv)
+          [ -e results/code-thumb-readonly.csv ] && ( \
+            export PREV="$(jq -re '
+                  select(.context == "results / code (readonly)").description
+                  | capture("Code size is (?<result>[0-9]+)").result' \
+                prev-results.json || echo 0)"
+            ./scripts/code.py -u results/code-thumb-readonly.csv -s | awk '
+              NR==2 {printf "Code size<br/>(readonly),%d B",$2}
+              NR==2 && ENVIRON["PREV"]+0 != 0 {
+                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
+              NR==2 {printf "\n"}' \
+            >> results.csv)
+          [ -e results/code-thumb-threadsafe.csv ] && ( \
+            export PREV="$(jq -re '
+                  select(.context == "results / code (threadsafe)").description
+                  | capture("Code size is (?<result>[0-9]+)").result' \
+                prev-results.json || echo 0)"
+            ./scripts/code.py -u results/code-thumb-threadsafe.csv -s | awk '
+              NR==2 {printf "Code size<br/>(threadsafe),%d B",$2}
+              NR==2 && ENVIRON["PREV"]+0 != 0 {
+                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
+              NR==2 {printf "\n"}' \
+            >> results.csv)
+          [ -e results/code-thumb-migrate.csv ] && ( \
+            export PREV="$(jq -re '
+                  select(.context == "results / code (migrate)").description
+                  | capture("Code size is (?<result>[0-9]+)").result' \
+                prev-results.json || echo 0)"
+            ./scripts/code.py -u results/code-thumb-migrate.csv -s | awk '
+              NR==2 {printf "Code size<br/>(migrate),%d B",$2}
+              NR==2 && ENVIRON["PREV"]+0 != 0 {
+                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
+              NR==2 {printf "\n"}' \
+            >> results.csv)
+          [ -e results/code-thumb-error-asserts.csv ] && ( \
+            export PREV="$(jq -re '
+                  select(.context == "results / code (error-asserts)").description
+                  | capture("Code size is (?<result>[0-9]+)").result' \
+                prev-results.json || echo 0)"
+            ./scripts/code.py -u results/code-thumb-error-asserts.csv -s | awk '
+              NR==2 {printf "Code size<br/>(error-asserts),%d B",$2}
+              NR==2 && ENVIRON["PREV"]+0 != 0 {
+                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}
+              NR==2 {printf "\n"}' \
+            >> results.csv)
+          [ -e results/coverage.csv ] && ( \
+            export PREV="$(jq -re '
+                  select(.context == "results / coverage").description
+                  | capture("Coverage is (?<result>[0-9\\.]+)").result' \
+                prev-results.json || echo 0)"
+            ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' '
+              NR==2 {printf "Coverage,%.1f%% of %d lines",$4,$3}
+              NR==2 && ENVIRON["PREV"]+0 != 0 {
+                printf " (%+.1f%%)",$4-ENVIRON["PREV"]}
+              NR==2 {printf "\n"}' \
+            >> results.csv)
+
+          # transpose to GitHub table
+          [ -e results.csv ] || exit 0
+          awk -F ',' '
+            {label[NR]=$1; value[NR]=$2}
+            END {
+              for (r=1; r<=NR; r++) {printf "| %s ",label[r]}; printf "|\n";
+              for (r=1; r<=NR; r++) {printf "|:--"}; printf "|\n";
+              for (r=1; r<=NR; r++) {printf "| %s ",value[r]}; printf "|\n"}' \
+            results.csv > results.txt
+          echo "RESULTS:"
+          cat results.txt
+
+      # find changes from history
+      - name: collect-changes
+        run: |
+          [ -n "$LFS_PREV_VERSION" ] || exit 0
+          # use explicit link to github commit so that release notes can
+          # be copied elsewhere
+          git log "$LFS_PREV_VERSION.." \
+            --grep='^Merge' --invert-grep \
+            --format="format:[\`%h\`](`
+              `https://github.com/$GITHUB_REPOSITORY/commit/%h) %s" \
+            > changes.txt
+          echo "CHANGES:"
+          cat changes.txt
+
+      # create and update major branches (vN and vN-prefix)
+      - name: create-major-branches
+        run: |
+          # create major branch
+          git branch "v$LFS_VERSION_MAJOR" HEAD
+
+          # create major prefix branch
+          git config user.name ${{secrets.BOT_USER}}
+          git config user.email ${{secrets.BOT_EMAIL}}
+          git fetch "https://github.com/$GITHUB_REPOSITORY.git" \
+            "v$LFS_VERSION_MAJOR-prefix" || true
+          ./scripts/prefix.py "lfs$LFS_VERSION_MAJOR"
+          git branch "v$LFS_VERSION_MAJOR-prefix" $( \
+            git commit-tree $(git write-tree) \
+              $(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
+              -p HEAD \
+              -m "Generated v$LFS_VERSION_MAJOR prefixes")
+          git reset --hard
+
+          # push!
+          git push --atomic origin \
+            "v$LFS_VERSION_MAJOR" \
+            "v$LFS_VERSION_MAJOR-prefix"
+
+      # build release notes
+      - name: create-release
+        run: |
+          # create release and patch version tag (vN.N.N)
+          # only draft if not a patch release
+          [ -e results.txt ] && export RESULTS="$(cat results.txt)"
+          [ -e changes.txt ] && export CHANGES="$(cat changes.txt)"
+          curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
+            "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \
+            -d "$(jq -n '{
+              tag_name: env.LFS_VERSION,
+              name: env.LFS_VERSION | rtrimstr(".0"),
+              target_commitish: "${{github.event.workflow_run.head_sha}}",
+              draft: env.LFS_VERSION | endswith(".0"),
+              body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \
+              | tee /dev/stderr)"
+

+ 55 - 0
.github/workflows/status.yml

@@ -0,0 +1,55 @@
+name: status
+on:
+  workflow_run:
+    workflows: [test]
+    types: [completed]
+
+jobs:
+  status:
+    runs-on: ubuntu-18.04
+    steps:
+      # custom statuses?
+      - uses: dawidd6/action-download-artifact@v2
+        continue-on-error: true
+        with:
+          workflow: ${{github.event.workflow_run.name}}
+          run_id: ${{github.event.workflow_run.id}}
+          name: status
+          path: status
+      - name: update-status
+        continue-on-error: true
+        run: |
+          ls status
+          for s in $(shopt -s nullglob ; echo status/*.json)
+          do
+            # parse requested status
+            export STATE="$(jq -er '.state' $s)"
+            export CONTEXT="$(jq -er '.context' $s)"
+            export DESCRIPTION="$(jq -er '.description' $s)"
+            # help lookup URL for job/steps because GitHub makes
+            # it VERY HARD to link to specific jobs
+            export TARGET_URL="$(
+              jq -er '.target_url // empty' $s || (
+                export TARGET_JOB="$(jq -er '.target_job' $s)"
+                export TARGET_STEP="$(jq -er '.target_step // ""' $s)"
+                curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \
+                  "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/`
+                    `${{github.event.workflow_run.id}}/jobs" \
+                  | jq -er '.jobs[]
+                    | select(.name == env.TARGET_JOB)
+                    | .html_url
+                      + "?check_suite_focus=true"
+                      + ((.steps[]
+                        | select(.name == env.TARGET_STEP)
+                        | "#step:\(.number):0") // "")'))"
+            # update status
+            curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \
+              "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/`
+                `${{github.event.workflow_run.head_sha}}" \
+              -d "$(jq -n '{
+                state: env.STATE,
+                context: env.CONTEXT,
+                description: env.DESCRIPTION,
+                target_url: env.TARGET_URL}' \
+                | tee /dev/stderr)"
+          done

+ 446 - 0
.github/workflows/test.yml

@@ -0,0 +1,446 @@
+name: test
+on: [push, pull_request]
+
+env:
+  CFLAGS: -Werror
+  MAKEFLAGS: -j
+
+jobs:
+  # run tests
+  test:
+    runs-on: ubuntu-18.04
+    strategy:
+      fail-fast: false
+      matrix:
+        arch: [x86_64, thumb, mips, powerpc]
+
+    steps:
+      - uses: actions/checkout@v2
+      - name: install
+        run: |
+          # need toml, also pip3 isn't installed by default?
+          sudo apt-get update -qq
+          sudo apt-get install -qq python3 python3-pip lcov
+          sudo pip3 install toml
+          gcc --version
+
+          # setup a ram-backed disk to speed up reentrant tests
+          mkdir disks
+          sudo mount -t tmpfs -o size=100m tmpfs disks
+          TESTFLAGS="$TESTFLAGS --disk=disks/disk"
+
+          # collect coverage
+          mkdir -p coverage
+          TESTFLAGS="$TESTFLAGS --coverage=`
+            `coverage/${{github.job}}-${{matrix.arch}}.info"
+
+          echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV
+
+      # cross-compile with ARM Thumb (32-bit, little-endian)
+      - name: install-thumb
+        if: ${{matrix.arch == 'thumb'}}
+        run: |
+          sudo apt-get install -qq \
+            gcc-arm-linux-gnueabi \
+            libc6-dev-armel-cross \
+            qemu-user
+          echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV
+          echo "EXEC=qemu-arm" >> $GITHUB_ENV
+          arm-linux-gnueabi-gcc --version
+          qemu-arm -version
+      # cross-compile with MIPS (32-bit, big-endian)
+      - name: install-mips
+        if: ${{matrix.arch == 'mips'}}
+        run: |
+          sudo apt-get install -qq \
+            gcc-mips-linux-gnu \
+            libc6-dev-mips-cross \
+            qemu-user
+          echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV
+          echo "EXEC=qemu-mips" >> $GITHUB_ENV
+          mips-linux-gnu-gcc --version
+          qemu-mips -version
+      # cross-compile with PowerPC (32-bit, big-endian)
+      - name: install-powerpc
+        if: ${{matrix.arch == 'powerpc'}}
+        run: |
+          sudo apt-get install -qq \
+            gcc-powerpc-linux-gnu \
+            libc6-dev-powerpc-cross \
+            qemu-user
+          echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV
+          echo "EXEC=qemu-ppc" >> $GITHUB_ENV
+          powerpc-linux-gnu-gcc --version
+          qemu-ppc -version
+
+      # make sure example can at least compile
+      - name: test-example
+        run: |
+          sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
+          make all CFLAGS+=" \
+            -Duser_provided_block_device_read=NULL \
+            -Duser_provided_block_device_prog=NULL \
+            -Duser_provided_block_device_erase=NULL \
+            -Duser_provided_block_device_sync=NULL \
+            -include stdio.h"
+          rm test.c
+
+      # test configurations
+      # normal+reentrant tests
+      - name: test-default
+        run: |
+          make clean
+          make test TESTFLAGS+="-nrk"
+      # NOR flash: read/prog = 1 block = 4KiB
+      - name: test-nor
+        run: |
+          make clean
+          make test TESTFLAGS+="-nrk \
+            -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
+      # SD/eMMC: read/prog = 512 block = 512
+      - name: test-emmc
+        run: |
+          make clean
+          make test TESTFLAGS+="-nrk \
+            -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
+      # NAND flash: read/prog = 4KiB block = 32KiB
+      - name: test-nand
+        run: |
+          make clean
+          make test TESTFLAGS+="-nrk \
+            -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
+      # other extreme geometries that are useful for various corner cases
+      - name: test-no-intrinsics
+        run: |
+          make clean
+          make test TESTFLAGS+="-nrk \
+            -DLFS_NO_INTRINSICS"
+      - name: test-byte-writes
+        # it just takes too long to test byte-level writes when in qemu,
+        # should be plenty covered by the other configurations
+        if: ${{matrix.arch == 'x86_64'}}
+        run: |
+          make clean
+          make test TESTFLAGS+="-nrk \
+            -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
+      - name: test-block-cycles
+        run: |
+          make clean
+          make test TESTFLAGS+="-nrk \
+            -DLFS_BLOCK_CYCLES=1"
+      - name: test-odd-block-count
+        run: |
+          make clean
+          make test TESTFLAGS+="-nrk \
+            -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
+      - name: test-odd-block-size
+        run: |
+          make clean
+          make test TESTFLAGS+="-nrk \
+            -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
+
+      # upload coverage for later coverage
+      - name: upload-coverage
+        uses: actions/upload-artifact@v2
+        with:
+          name: coverage
+          path: coverage
+          retention-days: 1
+
+      # update results
+      - name: results-code
+        run: |
+          mkdir -p results
+          make clean
+          make code \
+            CFLAGS+=" \
+              -DLFS_NO_ASSERT \
+              -DLFS_NO_DEBUG \
+              -DLFS_NO_WARN \
+              -DLFS_NO_ERROR" \
+            CODEFLAGS+="-o results/code-${{matrix.arch}}.csv"
+      - name: results-code-readonly
+        run: |
+          mkdir -p results
+          make clean
+          make code \
+            CFLAGS+=" \
+              -DLFS_NO_ASSERT \
+              -DLFS_NO_DEBUG \
+              -DLFS_NO_WARN \
+              -DLFS_NO_ERROR \
+              -DLFS_READONLY" \
+            CODEFLAGS+="-o results/code-${{matrix.arch}}-readonly.csv"
+      - name: results-code-threadsafe
+        run: |
+          mkdir -p results
+          make clean
+          make code \
+            CFLAGS+=" \
+              -DLFS_NO_ASSERT \
+              -DLFS_NO_DEBUG \
+              -DLFS_NO_WARN \
+              -DLFS_NO_ERROR \
+              -DLFS_THREADSAFE" \
+            CODEFLAGS+="-o results/code-${{matrix.arch}}-threadsafe.csv"
+      - name: results-code-migrate
+        run: |
+          mkdir -p results
+          make clean
+          make code \
+            CFLAGS+=" \
+              -DLFS_NO_ASSERT \
+              -DLFS_NO_DEBUG \
+              -DLFS_NO_WARN \
+              -DLFS_NO_ERROR \
+              -DLFS_MIGRATE" \
+            CODEFLAGS+="-o results/code-${{matrix.arch}}-migrate.csv"
+      - name: results-code-error-asserts
+        run: |
+          mkdir -p results
+          make clean
+          make code \
+            CFLAGS+=" \
+              -DLFS_NO_DEBUG \
+              -DLFS_NO_WARN \
+              -DLFS_NO_ERROR \
+              -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" \
+            CODEFLAGS+="-o results/code-${{matrix.arch}}-error-asserts.csv"
+      - name: upload-results
+        uses: actions/upload-artifact@v2
+        with:
+          name: results
+          path: results
+      # limit reporting to Thumb, otherwise there would be too many numbers
+      # flying around for the results to be easily readable
+      - name: collect-status
+        if: ${{matrix.arch == 'thumb'}}
+        run: |
+          mkdir -p status
+          for f in $(shopt -s nullglob ; echo results/code*.csv)
+          do
+            export STEP="results-code$(
+              echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p')"
+            export CONTEXT="results / code$(
+              echo $f | sed -n 's/.*code-.*-\(.*\).csv/ (\1)/p')"
+            export PREV="$(curl -sS \
+              "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
+              | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
+                | select(.context == env.CONTEXT).description
+                | capture("Code size is (?<result>[0-9]+)").result' \
+              || echo 0)"
+            export DESCRIPTION="$(./scripts/code.py -u $f -s | awk '
+              NR==2 {printf "Code size is %d B",$2}
+              NR==2 && ENVIRON["PREV"]+0 != 0 {
+                printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
+            jq -n '{
+              state: "success",
+              context: env.CONTEXT,
+              description: env.DESCRIPTION,
+              target_job: "${{github.job}} (${{matrix.arch}})",
+              target_step: env.STEP}' \
+              | tee status/code$(
+                echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p').json
+          done
+      - name: upload-status
+        if: ${{matrix.arch == 'thumb'}}
+        uses: actions/upload-artifact@v2
+        with:
+          name: status
+          path: status
+          retention-days: 1
+
+  # run under Valgrind to check for memory errors
+  valgrind:
+    runs-on: ubuntu-18.04
+    steps:
+      - uses: actions/checkout@v2
+      - name: install
+        run: |
+          # need toml, also pip3 isn't installed by default?
+          sudo apt-get update -qq
+          sudo apt-get install -qq python3 python3-pip
+          sudo pip3 install toml
+      - name: install-valgrind
+        run: |
+          sudo apt-get update -qq
+          sudo apt-get install -qq valgrind
+          valgrind --version
+      # normal tests, we don't need to test all geometries
+      - name: test-valgrind
+        run: make test TESTFLAGS+="-k --valgrind"
+
+  # self-host with littlefs-fuse for a fuzz-like test
+  fuse:
+    runs-on: ubuntu-18.04
+    if: ${{!endsWith(github.ref, '-prefix')}}
+    steps:
+      - uses: actions/checkout@v2
+      - name: install
+        run: |
+          # need toml, also pip3 isn't installed by default?
+          sudo apt-get update -qq
+          sudo apt-get install -qq python3 python3-pip libfuse-dev
+          sudo pip3 install toml
+          fusermount -V
+          gcc --version
+      - uses: actions/checkout@v2
+        with:
+          repository: littlefs-project/littlefs-fuse
+          ref: v2
+          path: littlefs-fuse
+      - name: setup
+        run: |
+          # copy our new version into littlefs-fuse
+          rm -rf littlefs-fuse/littlefs/*
+          cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
+
+          # setup disk for littlefs-fuse
+          mkdir mount
+          sudo chmod a+rw /dev/loop0
+          dd if=/dev/zero bs=512 count=128K of=disk
+          losetup /dev/loop0 disk
+      - name: test
+        run: |
+          # self-host test
+          make -C littlefs-fuse
+
+          littlefs-fuse/lfs --format /dev/loop0
+          littlefs-fuse/lfs /dev/loop0 mount
+
+          ls mount
+          mkdir mount/littlefs
+          cp -r $(git ls-tree --name-only HEAD) mount/littlefs
+          cd mount/littlefs
+          stat .
+          ls -flh
+          make -B test
+
+  # test migration using littlefs-fuse
+  migrate:
+    runs-on: ubuntu-18.04
+    if: ${{!endsWith(github.ref, '-prefix')}}
+    steps:
+      - uses: actions/checkout@v2
+      - name: install
+        run: |
+          # need toml, also pip3 isn't installed by default?
+          sudo apt-get update -qq
+          sudo apt-get install -qq python3 python3-pip libfuse-dev
+          sudo pip3 install toml
+          fusermount -V
+          gcc --version
+      - uses: actions/checkout@v2
+        with:
+          repository: littlefs-project/littlefs-fuse
+          ref: v2
+          path: v2
+      - uses: actions/checkout@v2
+        with:
+          repository: littlefs-project/littlefs-fuse
+          ref: v1
+          path: v1
+      - name: setup
+        run: |
+          # copy our new version into littlefs-fuse
+          rm -rf v2/littlefs/*
+          cp -r $(git ls-tree --name-only HEAD) v2/littlefs
+
+          # setup disk for littlefs-fuse
+          mkdir mount
+          sudo chmod a+rw /dev/loop0
+          dd if=/dev/zero bs=512 count=128K of=disk
+          losetup /dev/loop0 disk
+      - name: test
+        run: |
+          # compile v1 and v2
+          make -C v1
+          make -C v2
+
+          # run self-host test with v1
+          v1/lfs --format /dev/loop0
+          v1/lfs /dev/loop0 mount
+
+          ls mount
+          mkdir mount/littlefs
+          cp -r $(git ls-tree --name-only HEAD) mount/littlefs
+          cd mount/littlefs
+          stat .
+          ls -flh
+          make -B test
+
+          # attempt to migrate
+          cd ../..
+          fusermount -u mount
+
+          v2/lfs --migrate /dev/loop0
+          v2/lfs /dev/loop0 mount
+
+          # run self-host test with v2 right where we left off
+          ls mount
+          cd mount/littlefs
+          stat .
+          ls -flh
+          make -B test
+
+  # collect coverage info
+  coverage:
+    runs-on: ubuntu-18.04
+    needs: [test]
+    steps:
+      - uses: actions/checkout@v2
+      - name: install
+        run: |
+          sudo apt-get update -qq
+          sudo apt-get install -qq python3 python3-pip lcov
+          sudo pip3 install toml
+      # yes we continue-on-error nearly every step, continue-on-error
+      # at job level apparently still marks a job as failed, which isn't
+      # what we want
+      - uses: actions/download-artifact@v2
+        continue-on-error: true
+        with:
+          name: coverage
+          path: coverage
+      - name: results-coverage
+        continue-on-error: true
+        run: |
+          mkdir -p results
+          lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \
+            -o results/coverage.info
+          ./scripts/coverage.py results/coverage.info -o results/coverage.csv
+      - name: upload-results
+        uses: actions/upload-artifact@v2
+        with:
+          name: results
+          path: results
+      - name: collect-status
+        run: |
+          mkdir -p status
+          [ -e results/coverage.csv ] || exit 0
+          export STEP="results-coverage"
+          export CONTEXT="results / coverage"
+          export PREV="$(curl -sS \
+            "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \
+            | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
+              | select(.context == env.CONTEXT).description
+              | capture("Coverage is (?<result>[0-9\\.]+)").result' \
+            || echo 0)"
+          export DESCRIPTION="$(
+            ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' '
+              NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3}
+              NR==2 && ENVIRON["PREV"]+0 != 0 {
+                printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
+          jq -n '{
+            state: "success",
+            context: env.CONTEXT,
+            description: env.DESCRIPTION,
+            target_job: "${{github.job}}",
+            target_step: env.STEP}' \
+            | tee status/coverage.json
+      - name: upload-status
+        uses: actions/upload-artifact@v2
+        with:
+          name: status
+          path: status
+          retention-days: 1

+ 0 - 477
.travis.yml

@@ -1,477 +0,0 @@
-# environment variables
-env:
-  global:
-    - CFLAGS=-Werror
-    - MAKEFLAGS=-j
-
-# cache installation dirs
-cache:
-  pip: true
-  directories:
-    - $HOME/.cache/apt
-
-# common installation
-_: &install-common
-  # need toml, also pip3 isn't installed by default?
-  - sudo apt-get install python3 python3-pip
-  - sudo pip3 install toml
-  # setup a ram-backed disk to speed up reentrant tests
-  - mkdir disks
-  - sudo mount -t tmpfs -o size=100m tmpfs disks
-  - export TFLAGS="$TFLAGS --disk=disks/disk"
-
-# test cases
-_: &test-example
-  # make sure example can at least compile
-  - sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c &&
-    make all CFLAGS+="
-        -Duser_provided_block_device_read=NULL
-        -Duser_provided_block_device_prog=NULL
-        -Duser_provided_block_device_erase=NULL
-        -Duser_provided_block_device_sync=NULL
-        -include stdio.h"
-# default tests
-_: &test-default
-  # normal+reentrant tests
-  - make test TFLAGS+="-nrk"
-# common real-life geometries
-_: &test-nor
-  # NOR flash: read/prog = 1 block = 4KiB
-  - make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
-_: &test-emmc
-  # eMMC: read/prog = 512 block = 512
-  - make test TFLAGS+="-nrk -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
-_: &test-nand
-  # NAND flash: read/prog = 4KiB block = 32KiB
-  - make test TFLAGS+="-nrk -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
-# other extreme geometries that are useful for testing various corner cases
-_: &test-no-intrinsics
-  - make test TFLAGS+="-nrk -DLFS_NO_INTRINSICS"
-_: &test-no-inline
-  - make test TFLAGS+="-nrk -DLFS_INLINE_MAX=0"
-_: &test-byte-writes
-  - make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
-_: &test-block-cycles
-  - make test TFLAGS+="-nrk -DLFS_BLOCK_CYCLES=1"
-_: &test-odd-block-count
-  - make test TFLAGS+="-nrk -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
-_: &test-odd-block-size
-  - make test TFLAGS+="-nrk -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
-
-# report size 
-_: &report-size
-  # compile and find the code size with the smallest configuration
-  - make -j1 clean size
-        OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')"
-        CFLAGS+="-DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR"
-        | tee sizes
-  # update status if we succeeded, compare with master if possible
-  - |
-    if [ "$TRAVIS_TEST_RESULT" -eq 0 ]
-    then
-        CURR=$(tail -n1 sizes | awk '{print $1}')
-        PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \
-            | jq -re "select(.sha != \"$TRAVIS_COMMIT\")
-                | .statuses[] | select(.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\").description
-                | capture(\"code size is (?<size>[0-9]+)\").size" \
-            || echo 0)
-  
-        STATUS="Passed, code size is ${CURR}B"
-        if [ "$PREV" -ne 0 ]
-        then
-            STATUS="$STATUS ($(python -c "print '%+.2f' % (100*($CURR-$PREV)/$PREV.0)")%)"
-        fi
-    fi
-
-# stage control
-stages:
-  - name: test
-  - name: deploy
-    if: branch = master AND type = push
-
-# job control
-jobs:
-  # native testing
-  - &x86
-    stage: test
-    env:
-      - NAME=littlefs-x86
-    install: *install-common
-    script: [*test-example, *report-size]
-  - {<<: *x86, script: [*test-default,          *report-size]}
-  - {<<: *x86, script: [*test-nor,              *report-size]}
-  - {<<: *x86, script: [*test-emmc,             *report-size]}
-  - {<<: *x86, script: [*test-nand,             *report-size]}
-  - {<<: *x86, script: [*test-no-intrinsics,    *report-size]}
-  - {<<: *x86, script: [*test-no-inline,        *report-size]}
-  - {<<: *x86, script: [*test-byte-writes,      *report-size]}
-  - {<<: *x86, script: [*test-block-cycles,     *report-size]}
-  - {<<: *x86, script: [*test-odd-block-count,  *report-size]}
-  - {<<: *x86, script: [*test-odd-block-size,   *report-size]}
-
-  # cross-compile with ARM (thumb mode)
-  - &arm
-    stage: test
-    env:
-      - NAME=littlefs-arm
-      - CC="arm-linux-gnueabi-gcc --static -mthumb"
-      - TFLAGS="$TFLAGS --exec=qemu-arm"
-    install:
-      - *install-common
-      - sudo apt-get install
-            gcc-arm-linux-gnueabi
-            libc6-dev-armel-cross
-            qemu-user
-      - arm-linux-gnueabi-gcc --version
-      - qemu-arm -version
-    script: [*test-example, *report-size]
-  - {<<: *arm, script: [*test-default,          *report-size]}
-  - {<<: *arm, script: [*test-nor,              *report-size]}
-  - {<<: *arm, script: [*test-emmc,             *report-size]}
-  - {<<: *arm, script: [*test-nand,             *report-size]}
-  - {<<: *arm, script: [*test-no-intrinsics,    *report-size]}
-  - {<<: *arm, script: [*test-no-inline,        *report-size]}
-  # it just takes way to long to run byte-level writes in qemu,
-  # note this is still tested in the native tests
-  #- {<<: *arm, script: [*test-byte-writes,      *report-size]}
-  - {<<: *arm, script: [*test-block-cycles,     *report-size]}
-  - {<<: *arm, script: [*test-odd-block-count,  *report-size]}
-  - {<<: *arm, script: [*test-odd-block-size,   *report-size]}
-
-  # cross-compile with MIPS
-  - &mips
-    stage: test
-    env:
-      - NAME=littlefs-mips
-      - CC="mips-linux-gnu-gcc --static"
-      - TFLAGS="$TFLAGS --exec=qemu-mips"
-    install:
-      - *install-common
-      - sudo apt-get install
-            gcc-mips-linux-gnu
-            libc6-dev-mips-cross
-            qemu-user
-      - mips-linux-gnu-gcc --version
-      - qemu-mips -version
-    script: [*test-example, *report-size]
-  - {<<: *mips, script: [*test-default,          *report-size]}
-  - {<<: *mips, script: [*test-nor,              *report-size]}
-  - {<<: *mips, script: [*test-emmc,             *report-size]}
-  - {<<: *mips, script: [*test-nand,             *report-size]}
-  - {<<: *mips, script: [*test-no-intrinsics,    *report-size]}
-  - {<<: *mips, script: [*test-no-inline,        *report-size]}
-  # it just takes way to long to run byte-level writes in qemu,
-  # note this is still tested in the native tests
-  #- {<<: *mips, script: [*test-byte-writes,      *report-size]}
-  - {<<: *mips, script: [*test-block-cycles,     *report-size]}
-  - {<<: *mips, script: [*test-odd-block-count,  *report-size]}
-  - {<<: *mips, script: [*test-odd-block-size,   *report-size]}
-
-  # cross-compile with PowerPC
-  - &powerpc
-    stage: test
-    env:
-      - NAME=littlefs-powerpc
-      - CC="powerpc-linux-gnu-gcc --static"
-      - TFLAGS="$TFLAGS --exec=qemu-ppc"
-    install:
-      - *install-common
-      - sudo apt-get install
-            gcc-powerpc-linux-gnu
-            libc6-dev-powerpc-cross
-            qemu-user
-      - powerpc-linux-gnu-gcc --version
-      - qemu-ppc -version
-    script: [*test-example, *report-size]
-  - {<<: *powerpc, script: [*test-default,          *report-size]}
-  - {<<: *powerpc, script: [*test-nor,              *report-size]}
-  - {<<: *powerpc, script: [*test-emmc,             *report-size]}
-  - {<<: *powerpc, script: [*test-nand,             *report-size]}
-  - {<<: *powerpc, script: [*test-no-intrinsics,    *report-size]}
-  - {<<: *powerpc, script: [*test-no-inline,        *report-size]}
-  # it just takes way to long to run byte-level writes in qemu,
-  # note this is still tested in the native tests
-  #- {<<: *powerpc, script: [*test-byte-writes,      *report-size]}
-  - {<<: *powerpc, script: [*test-block-cycles,     *report-size]}
-  - {<<: *powerpc, script: [*test-odd-block-count,  *report-size]}
-  - {<<: *powerpc, script: [*test-odd-block-size,   *report-size]}
-
-  # test under valgrind, checking for memory errors
-  - &valgrind
-    stage: test
-    env:
-      - NAME=littlefs-valgrind
-    install:
-      - *install-common
-      - sudo apt-get install valgrind
-      - valgrind --version
-    script:
-      - make test TFLAGS+="-k --valgrind"
-
-  # test compilation in read-only mode
-  - stage: test
-    env:
-      - NAME=littlefs-readonly
-      - CC="arm-linux-gnueabi-gcc --static -mthumb"
-      - CFLAGS="-Werror -DLFS_READONLY"
-    if: branch !~ -prefix$
-    install:
-      - *install-common
-      - sudo apt-get install
-            gcc-arm-linux-gnueabi
-            libc6-dev-armel-cross
-      - arm-linux-gnueabi-gcc --version
-    # report-size will compile littlefs and report the size
-    script: [*report-size]
-
-  # test compilation with asserts that return -1
-  - stage: test
-    env:
-      - NAME=littlefs-assert-return
-      - CC="arm-linux-gnueabi-gcc --static -mthumb"
-      - CFLAGS="-Werror -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'"
-    if: branch !~ -prefix$
-    install:
-      - *install-common
-      - sudo apt-get install
-            gcc-arm-linux-gnueabi
-            libc6-dev-armel-cross
-      - arm-linux-gnueabi-gcc --version
-    # report-size will compile littlefs and report the size
-    script: [*report-size]
-
-  # test compilation in thread-safe mode
-  - stage: test
-    env:
-      - NAME=littlefs-threadsafe
-      - CC="arm-linux-gnueabi-gcc --static -mthumb"
-      - CFLAGS="-Werror -DLFS_THREADSAFE"
-    if: branch !~ -prefix$
-    install:
-      - *install-common
-      - sudo apt-get install
-            gcc-arm-linux-gnueabi
-            libc6-dev-armel-cross
-      - arm-linux-gnueabi-gcc --version
-    # report-size will compile littlefs and report the size
-    script: [*report-size]
-
-  # self-host with littlefs-fuse for fuzz test
-  - stage: test
-    env:
-      - NAME=littlefs-fuse
-    if: branch !~ -prefix$
-    install:
-      - *install-common
-      - sudo apt-get install libfuse-dev
-      - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2
-      - fusermount -V
-      - gcc --version
-
-      # setup disk for littlefs-fuse
-      - rm -rf littlefs-fuse/littlefs/*
-      - cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
-
-      - mkdir mount
-      - sudo chmod a+rw /dev/loop0
-      - dd if=/dev/zero bs=512 count=128K of=disk
-      - losetup /dev/loop0 disk
-    script:
-      # self-host test
-      - make -C littlefs-fuse
-
-      - littlefs-fuse/lfs --format /dev/loop0
-      - littlefs-fuse/lfs /dev/loop0 mount
-
-      - ls mount
-      - mkdir mount/littlefs
-      - cp -r $(git ls-tree --name-only HEAD) mount/littlefs
-      - cd mount/littlefs
-      - stat .
-      - ls -flh
-      - make -B test
-
-  # test migration using littlefs-fuse
-  - stage: test
-    env:
-      - NAME=littlefs-migration
-    if: branch !~ -prefix$
-    install:
-      - *install-common
-      - sudo apt-get install libfuse-dev
-      - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2
-      - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1
-      - fusermount -V
-      - gcc --version
-
-      # setup disk for littlefs-fuse
-      - rm -rf v2/littlefs/*
-      - cp -r $(git ls-tree --name-only HEAD) v2/littlefs
-
-      - mkdir mount
-      - sudo chmod a+rw /dev/loop0
-      - dd if=/dev/zero bs=512 count=128K of=disk
-      - losetup /dev/loop0 disk
-    script:
-      # compile v1 and v2
-      - make -C v1
-      - make -C v2
-
-      # run self-host test with v1
-      - v1/lfs --format /dev/loop0
-      - v1/lfs /dev/loop0 mount
-
-      - ls mount
-      - mkdir mount/littlefs
-      - cp -r $(git ls-tree --name-only HEAD) mount/littlefs
-      - cd mount/littlefs
-      - stat .
-      - ls -flh
-      - make -B test
-
-      # attempt to migrate
-      - cd ../..
-      - fusermount -u mount
-
-      - v2/lfs --migrate /dev/loop0
-      - v2/lfs /dev/loop0 mount
-
-      # run self-host test with v2 right where we left off
-      - ls mount
-      - cd mount/littlefs
-      - stat .
-      - ls -flh
-      - make -B test
-
-  # automatically create releases
-  - stage: deploy
-    env:
-      - NAME=deploy
-    script:
-      - |
-        bash << 'SCRIPT'
-        set -ev
-        # Find version defined in lfs.h
-        LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3)
-        LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16)))
-        LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >>  0)))
-        # Grab latests patch from repo tags, default to 0, needs finagling
-        # to get past github's pagination api
-        PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.
-        PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \
-            | sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \
-            || echo $PREV_URL)
-        LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \
-            | jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g")
-                .captures[].string | tonumber) | max + 1' \
-            || echo 0)
-        # We have our new version
-        LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH"
-        echo "VERSION $LFS_VERSION"
-        # Check that we're the most recent commit
-        CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \
-            https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \
-            | jq -re '.sha')
-        [ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0
-        # Create major branch
-        git branch v$LFS_VERSION_MAJOR HEAD
-        # Create major prefix branch
-        git config user.name "geky bot"
-        git config user.email "bot@geky.net"
-        git fetch https://github.com/$TRAVIS_REPO_SLUG.git \
-            --depth=50 v$LFS_VERSION_MAJOR-prefix || true
-        ./scripts/prefix.py lfs$LFS_VERSION_MAJOR
-        git branch v$LFS_VERSION_MAJOR-prefix $( \
-            git commit-tree $(git write-tree) \
-                $(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \
-                -p HEAD \
-                -m "Generated v$LFS_VERSION_MAJOR prefixes")
-        git reset --hard
-        # Update major version branches (vN and vN-prefix)
-        git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \
-            v$LFS_VERSION_MAJOR \
-            v$LFS_VERSION_MAJOR-prefix
-        # Build release notes
-        PREV=$(git tag --sort=-v:refname -l "v*" | head -1)
-        if [ ! -z "$PREV" ]
-        then
-            echo "PREV $PREV"
-            CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep)
-            printf "CHANGES\n%s\n\n" "$CHANGES"
-        fi
-        case ${GEKY_BOT_DRAFT:-minor} in
-            true)  DRAFT=true ;;
-            minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;;
-            false) DRAFT=false ;;
-        esac
-        # Create the release and patch version tag (vN.N.N)
-        curl -f -u "$GEKY_BOT_RELEASES" -X POST \
-            https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \
-            -d "{
-                \"tag_name\": \"$LFS_VERSION\",
-                \"name\": \"${LFS_VERSION%.0}\",
-                \"target_commitish\": \"$TRAVIS_COMMIT\",
-                \"draft\": $DRAFT,
-                \"body\": $(jq -sR '.' <<< "$CHANGES")
-            }" #"
-        SCRIPT
-
-# manage statuses
-before_install:
-  - |
-    # don't clobber other (not us) failures
-    if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-        | jq -e ".statuses[] | select(
-            .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
-            .state == \"failure\" and
-            (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
-    then
-        curl -u "$GEKY_BOT_STATUSES" -X POST \
-            https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-            -d "{
-                \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
-                \"state\": \"pending\",
-                \"description\": \"${STATUS:-In progress}\",
-                \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
-            }"
-    fi
-
-after_failure:
-  - |
-    # don't clobber other (not us) failures
-    if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-        | jq -e ".statuses[] | select(
-            .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
-            .state == \"failure\" and
-            (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
-    then
-        curl -u "$GEKY_BOT_STATUSES" -X POST \
-            https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-            -d "{
-                \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
-                \"state\": \"failure\",
-                \"description\": \"${STATUS:-Failed}\",
-                \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
-            }"
-    fi
-
-after_success:
-  - |
-    # don't clobber other (not us) failures
-    # only update if we were last job to mark in progress,
-    # this isn't perfect but is probably good enough
-    if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-        | jq -e ".statuses[] | select(
-            .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and
-            (.state == \"failure\" or .state == \"pending\") and
-            (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))"
-    then
-        curl -u "$GEKY_BOT_STATUSES" -X POST \
-            https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \
-            -d "{
-                \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\",
-                \"state\": \"success\",
-                \"description\": \"${STATUS:-Passed}\",
-                \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\"
-            }"
-    fi

+ 65 - 18
Makefile

@@ -1,25 +1,39 @@
-TARGET = lfs.a
+ifdef BUILDDIR
+# make sure BUILDDIR ends with a slash
+override BUILDDIR := $(BUILDDIR)/
+# bit of a hack, but we want to make sure BUILDDIR directory structure
+# is correct before any commands
+$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \
+	$(BUILDDIR) \
+	$(BUILDDIR)bd \
+	$(BUILDDIR)tests))
+endif
+
+# overridable target/src/tools/flags/etc
 ifneq ($(wildcard test.c main.c),)
-override TARGET = lfs
+TARGET ?= $(BUILDDIR)lfs
+else
+TARGET ?= $(BUILDDIR)lfs.a
 endif
 
+
 CC ?= gcc
 AR ?= ar
 SIZE ?= size
+CTAGS ?= ctags
+NM ?= nm
+LCOV ?= lcov
 
-SRC += $(wildcard *.c bd/*.c)
-OBJ := $(SRC:.c=.o)
-DEP := $(SRC:.c=.d)
-ASM := $(SRC:.c=.s)
+SRC ?= $(wildcard *.c)
+OBJ := $(SRC:%.c=$(BUILDDIR)%.o)
+DEP := $(SRC:%.c=$(BUILDDIR)%.d)
+ASM := $(SRC:%.c=$(BUILDDIR)%.s)
 
 ifdef DEBUG
 override CFLAGS += -O0 -g3
 else
 override CFLAGS += -Os
 endif
-ifdef WORD
-override CFLAGS += -m$(WORD)
-endif
 ifdef TRACE
 override CFLAGS += -DLFS_YES_TRACE
 endif
@@ -28,40 +42,73 @@ override CFLAGS += -std=c99 -Wall -pedantic
 override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef
 
 ifdef VERBOSE
-override TFLAGS += -v
+override TESTFLAGS += -v
+override CODEFLAGS += -v
+override COVERAGEFLAGS += -v
+endif
+ifdef EXEC
+override TESTFLAGS += --exec="$(EXEC)"
+endif
+ifdef BUILDDIR
+override TESTFLAGS += --build-dir="$(BUILDDIR:/=)"
+override CODEFLAGS += --build-dir="$(BUILDDIR:/=)"
+endif
+ifneq ($(NM),nm)
+override CODEFLAGS += --nm-tool="$(NM)"
 endif
 
 
-all: $(TARGET)
+# commands
+.PHONY: all build
+all build: $(TARGET)
 
+.PHONY: asm
 asm: $(ASM)
 
+.PHONY: size
 size: $(OBJ)
 	$(SIZE) -t $^
 
+.PHONY: tags
+tags:
+	$(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC)
+
+.PHONY: code
+code: $(OBJ)
+	./scripts/code.py $^ $(CODEFLAGS)
+
+.PHONY: test
 test:
-	./scripts/test.py $(TFLAGS)
+	./scripts/test.py $(TESTFLAGS)
 .SECONDEXPANSION:
 test%: tests/test$$(firstword $$(subst \#, ,%)).toml
-	./scripts/test.py $@ $(TFLAGS)
+	./scripts/test.py $@ $(TESTFLAGS)
+
+.PHONY: coverage
+coverage:
+	./scripts/coverage.py $(BUILDDIR)tests/*.toml.info $(COVERAGEFLAGS)
 
+# rules
 -include $(DEP)
+.SUFFIXES:
 
-lfs: $(OBJ)
+$(BUILDDIR)lfs: $(OBJ)
 	$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
 
-%.a: $(OBJ)
+$(BUILDDIR)%.a: $(OBJ)
 	$(AR) rcs $@ $^
 
-%.o: %.c
+$(BUILDDIR)%.o: %.c
 	$(CC) -c -MMD $(CFLAGS) $< -o $@
 
-%.s: %.c
+$(BUILDDIR)%.s: %.c
 	$(CC) -S $(CFLAGS) $< -o $@
 
+# clean everything
+.PHONY: clean
 clean:
 	rm -f $(TARGET)
 	rm -f $(OBJ)
 	rm -f $(DEP)
 	rm -f $(ASM)
-	rm -f tests/*.toml.*
+	rm -f $(BUILDDIR)tests/*.toml.*

+ 2 - 2
lfs.c

@@ -4763,7 +4763,7 @@ static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) {
 
                 lfs1_entry_tole32(&entry1.d);
                 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
-                        {LFS_MKTAG(LFS_TYPE_CREATE, id, 0)},
+                        {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
                         {LFS_MKTAG_IF_ELSE(isdir,
                             LFS_TYPE_DIR, id, entry1.d.nlen,
                             LFS_TYPE_REG, id, entry1.d.nlen),
@@ -4868,7 +4868,7 @@ static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) {
 
         lfs_superblock_tole32(&superblock);
         err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
-                {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0)},
+                {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
                 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
                 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
                     &superblock}));

+ 10 - 0
lfs_util.h

@@ -49,6 +49,7 @@ extern "C"
 // code footprint
 
 // Logging functions
+#ifndef LFS_TRACE
 #ifdef LFS_YES_TRACE
 #define LFS_TRACE_(fmt, ...) \
     printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
@@ -56,7 +57,9 @@ extern "C"
 #else
 #define LFS_TRACE(...)
 #endif
+#endif
 
+#ifndef LFS_DEBUG
 #ifndef LFS_NO_DEBUG
 #define LFS_DEBUG_(fmt, ...) \
     printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
@@ -64,7 +67,9 @@ extern "C"
 #else
 #define LFS_DEBUG(...)
 #endif
+#endif
 
+#ifndef LFS_WARN
 #ifndef LFS_NO_WARN
 #define LFS_WARN_(fmt, ...) \
     printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
@@ -72,7 +77,9 @@ extern "C"
 #else
 #define LFS_WARN(...)
 #endif
+#endif
 
+#ifndef LFS_ERROR
 #ifndef LFS_NO_ERROR
 #define LFS_ERROR_(fmt, ...) \
     printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__)
@@ -80,13 +87,16 @@ extern "C"
 #else
 #define LFS_ERROR(...)
 #endif
+#endif
 
 // Runtime assertions
+#ifndef LFS_ASSERT
 #ifndef LFS_NO_ASSERT
 #define LFS_ASSERT(test) assert(test)
 #else
 #define LFS_ASSERT(test)
 #endif
+#endif
 
 
 // Builtin functions, these may be replaced by more efficient

+ 214 - 0
scripts/code.py

@@ -0,0 +1,214 @@
+#!/usr/bin/env python3
+#
+# Script to find code size at the function level. Basically just a bit wrapper
+# around nm with some extra conveniences for comparing builds. Heavily inspired
+# by Linux's Bloat-O-Meter.
+#
+
+import os
+import glob
+import itertools as it
+import subprocess as sp
+import shlex
+import re
+import csv
+import collections as co
+
+
+OBJ_PATHS = ['*.o', 'bd/*.o']
+
+def collect(paths, **args):
+    results = co.defaultdict(lambda: 0)
+    pattern = re.compile(
+        '^(?P<size>[0-9a-fA-F]+)' +
+        ' (?P<type>[%s])' % re.escape(args['type']) +
+        ' (?P<func>.+?)$')
+    for path in paths:
+        # note nm-tool may contain extra args
+        cmd = args['nm_tool'] + ['--size-sort', path]
+        if args.get('verbose'):
+            print(' '.join(shlex.quote(c) for c in cmd))
+        proc = sp.Popen(cmd,
+            stdout=sp.PIPE,
+            stderr=sp.PIPE if not args.get('verbose') else None,
+            universal_newlines=True)
+        for line in proc.stdout:
+            m = pattern.match(line)
+            if m:
+                results[(path, m.group('func'))] += int(m.group('size'), 16)
+        proc.wait()
+        if proc.returncode != 0:
+            if not args.get('verbose'):
+                for line in proc.stderr:
+                    sys.stdout.write(line)
+            sys.exit(-1)
+
+    flat_results = []
+    for (file, func), size in results.items():
+        # map to source files
+        if args.get('build_dir'):
+            file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
+        # discard internal functions
+        if func.startswith('__'):
+            continue
+        # discard .8449 suffixes created by optimizer
+        func = re.sub('\.[0-9]+', '', func)
+        flat_results.append((file, func, size))
+
+    return flat_results
+
+def main(**args):
+    # find sizes
+    if not args.get('use', None):
+        # find .o files
+        paths = []
+        for path in args['obj_paths']:
+            if os.path.isdir(path):
+                path = path + '/*.o'
+
+            for path in glob.glob(path):
+                paths.append(path)
+
+        if not paths:
+            print('no .obj files found in %r?' % args['obj_paths'])
+            sys.exit(-1)
+
+        results = collect(paths, **args)
+    else:
+        with open(args['use']) as f:
+            r = csv.DictReader(f)
+            results = [
+                (   result['file'],
+                    result['function'],
+                    int(result['size']))
+                for result in r]
+
+    total = 0
+    for _, _, size in results:
+        total += size
+
+    # find previous results?
+    if args.get('diff'):
+        with open(args['diff']) as f:
+            r = csv.DictReader(f)
+            prev_results = [
+                (   result['file'],
+                    result['function'],
+                    int(result['size']))
+                for result in r]
+
+        prev_total = 0
+        for _, _, size in prev_results:
+            prev_total += size
+
+    # write results to CSV
+    if args.get('output'):
+        with open(args['output'], 'w') as f:
+            w = csv.writer(f)
+            w.writerow(['file', 'function', 'size'])
+            for file, func, size in sorted(results):
+                w.writerow((file, func, size))
+
+    # print results
+    def dedup_entries(results, by='function'):
+        entries = co.defaultdict(lambda: 0)
+        for file, func, size in results:
+            entry = (file if by == 'file' else func)
+            entries[entry] += size
+        return entries
+
+    def diff_entries(olds, news):
+        diff = co.defaultdict(lambda: (0, 0, 0, 0))
+        for name, new in news.items():
+            diff[name] = (0, new, new, 1.0)
+        for name, old in olds.items():
+            _, new, _, _ = diff[name]
+            diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
+        return diff
+
+    def print_header(by=''):
+        if not args.get('diff'):
+            print('%-36s %7s' % (by, 'size'))
+        else:
+            print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
+
+    def print_entries(by='function'):
+        entries = dedup_entries(results, by=by)
+
+        if not args.get('diff'):
+            print_header(by=by)
+            for name, size in sorted(entries.items()):
+                print("%-36s %7d" % (name, size))
+        else:
+            prev_entries = dedup_entries(prev_results, by=by)
+            diff = diff_entries(prev_entries, entries)
+            print_header(by='%s (%d added, %d removed)' % (by,
+                sum(1 for old, _, _, _ in diff.values() if not old),
+                sum(1 for _, new, _, _ in diff.values() if not new)))
+            for name, (old, new, diff, ratio) in sorted(diff.items(),
+                    key=lambda x: (-x[1][3], x)):
+                if ratio or args.get('all'):
+                    print("%-36s %7s %7s %+7d%s" % (name,
+                        old or "-",
+                        new or "-",
+                        diff,
+                        ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+    def print_totals():
+        if not args.get('diff'):
+            print("%-36s %7d" % ('TOTAL', total))
+        else:
+            ratio = (total-prev_total)/prev_total if prev_total else 1.0
+            print("%-36s %7s %7s %+7d%s" % (
+                'TOTAL',
+                prev_total if prev_total else '-',
+                total if total else '-',
+                total-prev_total,
+                ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+    if args.get('quiet'):
+        pass
+    elif args.get('summary'):
+        print_header()
+        print_totals()
+    elif args.get('files'):
+        print_entries(by='file')
+        print_totals()
+    else:
+        print_entries(by='function')
+        print_totals()
+
+if __name__ == "__main__":
+    import argparse
+    import sys
+    parser = argparse.ArgumentParser(
+        description="Find code size at the function level.")
+    parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
+        help="Description of where to find *.o files. May be a directory \
+            or a list of paths. Defaults to %r." % OBJ_PATHS)
+    parser.add_argument('-v', '--verbose', action='store_true',
+        help="Output commands that run behind the scenes.")
+    parser.add_argument('-o', '--output',
+        help="Specify CSV file to store results.")
+    parser.add_argument('-u', '--use',
+        help="Don't compile and find code sizes, instead use this CSV file.")
+    parser.add_argument('-d', '--diff',
+        help="Specify CSV file to diff code size against.")
+    parser.add_argument('-a', '--all', action='store_true',
+        help="Show all functions, not just the ones that changed.")
+    parser.add_argument('--files', action='store_true',
+        help="Show file-level code sizes. Note this does not include padding! "
+            "So sizes may differ from other tools.")
+    parser.add_argument('-s', '--summary', action='store_true',
+        help="Only show the total code size.")
+    parser.add_argument('-q', '--quiet', action='store_true',
+        help="Don't show anything, useful with -o.")
+    parser.add_argument('--type', default='tTrRdDbB',
+        help="Type of symbols to report, this uses the same single-character "
+            "type-names emitted by nm. Defaults to %(default)r.")
+    parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
+        help="Path to the nm tool to use.")
+    parser.add_argument('--build-dir',
+        help="Specify the relative build directory. Used to map object files \
+            to the correct source files.")
+    sys.exit(main(**vars(parser.parse_args())))

+ 254 - 0
scripts/coverage.py

@@ -0,0 +1,254 @@
+#!/usr/bin/env python3
+#
+# Parse and report coverage info from .info files generated by lcov
+#
+import os
+import glob
+import csv
+import re
+import collections as co
+import bisect as b
+
+
+INFO_PATHS = ['tests/*.toml.info']
+
+def collect(paths, **args):
+    file = None
+    funcs = []
+    lines = co.defaultdict(lambda: 0)
+    pattern = re.compile(
+        '^(?P<file>SF:/?(?P<file_name>.*))$'
+        '|^(?P<func>FN:(?P<func_lineno>[0-9]*),(?P<func_name>.*))$'
+        '|^(?P<line>DA:(?P<line_lineno>[0-9]*),(?P<line_hits>[0-9]*))$')
+    for path in paths:
+        with open(path) as f:
+            for line in f:
+                m = pattern.match(line)
+                if m and m.group('file'):
+                    file = m.group('file_name')
+                elif m and file and m.group('func'):
+                    funcs.append((file, int(m.group('func_lineno')),
+                        m.group('func_name')))
+                elif m and file and m.group('line'):
+                    lines[(file, int(m.group('line_lineno')))] += (
+                        int(m.group('line_hits')))
+
+    # map line numbers to functions
+    funcs.sort()
+    def func_from_lineno(file, lineno):
+        i = b.bisect(funcs, (file, lineno))
+        if i and funcs[i-1][0] == file:
+            return funcs[i-1][2]
+        else:
+            return None
+
+    # reduce to function info
+    reduced_funcs = co.defaultdict(lambda: (0, 0))
+    for (file, line_lineno), line_hits in lines.items():
+        func = func_from_lineno(file, line_lineno)
+        if not func:
+            continue
+        hits, count = reduced_funcs[(file, func)]
+        reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1)
+
+    results = []
+    for (file, func), (hits, count) in reduced_funcs.items():
+        # discard internal/testing functions (test_* injected with
+        # internal testing)
+        if func.startswith('__') or func.startswith('test_'):
+            continue
+        # discard .8449 suffixes created by optimizer
+        func = re.sub('\.[0-9]+', '', func)
+        results.append((file, func, hits, count))
+
+    return results
+
+
+def main(**args):
+    # find coverage
+    if not args.get('use'):
+        # find *.info files
+        paths = []
+        for path in args['info_paths']:
+            if os.path.isdir(path):
+                path = path + '/*.gcov'
+
+            for path in glob.glob(path):
+                paths.append(path)
+
+        if not paths:
+            print('no .info files found in %r?' % args['info_paths'])
+            sys.exit(-1)
+
+        results = collect(paths, **args)
+    else:
+        with open(args['use']) as f:
+            r = csv.DictReader(f)
+            results = [
+                (   result['file'],
+                    result['function'],
+                    int(result['hits']),
+                    int(result['count']))
+                for result in r]
+
+    total_hits, total_count = 0, 0
+    for _, _, hits, count in results:
+        total_hits += hits
+        total_count += count
+
+    # find previous results?
+    if args.get('diff'):
+        with open(args['diff']) as f:
+            r = csv.DictReader(f)
+            prev_results = [
+                (   result['file'],
+                    result['function'],
+                    int(result['hits']),
+                    int(result['count']))
+                for result in r]
+
+        prev_total_hits, prev_total_count = 0, 0
+        for _, _, hits, count in prev_results:
+            prev_total_hits += hits
+            prev_total_count += count
+
+    # write results to CSV
+    if args.get('output'):
+        with open(args['output'], 'w') as f:
+            w = csv.writer(f)
+            w.writerow(['file', 'function', 'hits', 'count'])
+            for file, func, hits, count in sorted(results):
+                w.writerow((file, func, hits, count))
+
+    # print results
+    def dedup_entries(results, by='function'):
+        entries = co.defaultdict(lambda: (0, 0))
+        for file, func, hits, count in results:
+            entry = (file if by == 'file' else func)
+            entry_hits, entry_count = entries[entry]
+            entries[entry] = (entry_hits + hits, entry_count + count)
+        return entries
+
+    def diff_entries(olds, news):
+        diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0))
+        for name, (new_hits, new_count) in news.items():
+            diff[name] = (
+                0, 0,
+                new_hits, new_count,
+                new_hits, new_count,
+                (new_hits/new_count if new_count else 1.0) - 1.0)
+        for name, (old_hits, old_count) in olds.items():
+            _, _, new_hits, new_count, _, _, _ = diff[name]
+            diff[name] = (
+                old_hits, old_count,
+                new_hits, new_count,
+                new_hits-old_hits, new_count-old_count,
+                ((new_hits/new_count if new_count else 1.0)
+                    - (old_hits/old_count if old_count else 1.0)))
+        return diff
+
+    def print_header(by=''):
+        if not args.get('diff'):
+            print('%-36s %19s' % (by, 'hits/line'))
+        else:
+            print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
+
+    def print_entries(by='function'):
+        entries = dedup_entries(results, by=by)
+
+        if not args.get('diff'):
+            print_header(by=by)
+            for name, (hits, count) in sorted(entries.items()):
+                print("%-36s %11s %7s" % (name,
+                    '%d/%d' % (hits, count)
+                        if count else '-',
+                    '%.1f%%' % (100*hits/count)
+                        if count else '-'))
+        else:
+            prev_entries = dedup_entries(prev_results, by=by)
+            diff = diff_entries(prev_entries, entries)
+            print_header(by='%s (%d added, %d removed)' % (by,
+                sum(1 for _, old, _, _, _, _, _ in diff.values() if not old),
+                sum(1 for _, _, _, new, _, _, _ in diff.values() if not new)))
+            for name, (
+                    old_hits, old_count,
+                    new_hits, new_count,
+                    diff_hits, diff_count, ratio) in sorted(diff.items(),
+                        key=lambda x: (-x[1][6], x)):
+                if ratio or args.get('all'):
+                    print("%-36s %11s %7s %11s %7s %11s%s" % (name,
+                        '%d/%d' % (old_hits, old_count)
+                            if old_count else '-',
+                        '%.1f%%' % (100*old_hits/old_count)
+                            if old_count else '-',
+                        '%d/%d' % (new_hits, new_count)
+                            if new_count else '-',
+                        '%.1f%%' % (100*new_hits/new_count)
+                            if new_count else '-',
+                        '%+d/%+d' % (diff_hits, diff_count),
+                        ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+    def print_totals():
+        if not args.get('diff'):
+            print("%-36s %11s %7s" % ('TOTAL',
+                '%d/%d' % (total_hits, total_count)
+                    if total_count else '-',
+                '%.1f%%' % (100*total_hits/total_count)
+                    if total_count else '-'))
+        else:
+            ratio = ((total_hits/total_count
+                    if total_count else 1.0)
+                - (prev_total_hits/prev_total_count
+                    if prev_total_count else 1.0))
+            print("%-36s %11s %7s %11s %7s %11s%s" % ('TOTAL',
+                '%d/%d' % (prev_total_hits, prev_total_count)
+                    if prev_total_count else '-',
+                '%.1f%%' % (100*prev_total_hits/prev_total_count)
+                    if prev_total_count else '-',
+                '%d/%d' % (total_hits, total_count)
+                    if total_count else '-',
+                '%.1f%%' % (100*total_hits/total_count)
+                    if total_count else '-',
+                '%+d/%+d' % (total_hits-prev_total_hits,
+                    total_count-prev_total_count),
+                ' (%+.1f%%)' % (100*ratio) if ratio else ''))
+
+    if args.get('quiet'):
+        pass
+    elif args.get('summary'):
+        print_header()
+        print_totals()
+    elif args.get('files'):
+        print_entries(by='file')
+        print_totals()
+    else:
+        print_entries(by='function')
+        print_totals()
+
+if __name__ == "__main__":
+    import argparse
+    import sys
+    parser = argparse.ArgumentParser(
+        description="Parse and report coverage info from .info files \
+            generated by lcov")
+    parser.add_argument('info_paths', nargs='*', default=INFO_PATHS,
+        help="Description of where to find *.info files. May be a directory \
+            or list of paths. *.info files will be merged to show the total \
+            coverage. Defaults to %r." % INFO_PATHS)
+    parser.add_argument('-v', '--verbose', action='store_true',
+        help="Output commands that run behind the scenes.")
+    parser.add_argument('-o', '--output',
+        help="Specify CSV file to store results.")
+    parser.add_argument('-u', '--use',
+        help="Don't do any work, instead use this CSV file.")
+    parser.add_argument('-d', '--diff',
+        help="Specify CSV file to diff code size against.")
+    parser.add_argument('-a', '--all', action='store_true',
+        help="Show all functions, not just the ones that changed.")
+    parser.add_argument('--files', action='store_true',
+        help="Show file-level coverage.")
+    parser.add_argument('-s', '--summary', action='store_true',
+        help="Only show the total coverage.")
+    parser.add_argument('-q', '--quiet', action='store_true',
+        help="Don't show anything, useful with -o.")
+    sys.exit(main(**vars(parser.parse_args())))

+ 131 - 56
scripts/test.py

@@ -20,19 +20,50 @@ import pty
 import errno
 import signal
 
-TESTDIR = 'tests'
+TEST_PATHS = 'tests'
 RULES = """
+# add block devices to sources
+TESTSRC ?= $(SRC) $(wildcard bd/*.c)
+
 define FLATTEN
-tests/%$(subst /,.,$(target)): $(target)
+%(path)s%%$(subst /,.,$(target)): $(target)
     ./scripts/explode_asserts.py $$< -o $$@
 endef
-$(foreach target,$(SRC),$(eval $(FLATTEN)))
-
--include tests/*.d
+$(foreach target,$(TESTSRC),$(eval $(FLATTEN)))
 
+-include %(path)s*.d
 .SECONDARY:
-%.test: %.test.o $(foreach f,$(subst /,.,$(SRC:.c=.o)),%.$f)
+
+%(path)s.test: %(path)s.test.o \\
+        $(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t)
     $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
+
+# needed in case builddir is different
+%(path)s%%.o: %(path)s%%.c
+    $(CC) -c -MMD $(CFLAGS) $< -o $@
+"""
+COVERAGE_RULES = """
+%(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage
+
+# delete lingering coverage
+%(path)s.test: | %(path)s.info.clean
+.PHONY: %(path)s.info.clean
+%(path)s.info.clean:
+    rm -f %(path)s*.gcda
+
+# accumulate coverage info
+.PHONY: %(path)s.info
+%(path)s.info:
+    $(strip $(LCOV) -c \\
+        $(addprefix -d ,$(wildcard %(path)s*.gcda)) \\
+        --rc 'geninfo_adjust_src_path=$(shell pwd)' \\
+        -o $@)
+    $(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@
+ifdef COVERAGETARGET
+    $(strip $(LCOV) -a $@ \\
+        $(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\
+        -o $(COVERAGETARGET))
+endif
 """
 GLOBALS = """
 //////////////// AUTOGENERATED TEST ////////////////
@@ -119,6 +150,8 @@ class TestCase:
         self.if_ = config.get('if', None)
         self.in_ = config.get('in', None)
 
+        self.result = None
+
     def __str__(self):
         if hasattr(self, 'permno'):
             if any(k not in self.case.defines for k in self.defines):
@@ -179,7 +212,7 @@ class TestCase:
                 len(self.filter) >= 2 and
                 self.filter[1] != self.permno):
             return False
-        elif args.get('no_internal', False) and self.in_ is not None:
+        elif args.get('no_internal') and self.in_ is not None:
             return False
         elif self.if_ is not None:
             if_ = self.if_
@@ -213,7 +246,7 @@ class TestCase:
                 try:
                     with open(disk, 'w') as f:
                         f.truncate(0)
-                    if args.get('verbose', False):
+                    if args.get('verbose'):
                         print('truncate --size=0', disk)
                 except FileNotFoundError:
                     pass
@@ -237,14 +270,14 @@ class TestCase:
                     '-ex', 'r'])
             ncmd.extend(['--args'] + cmd)
 
-            if args.get('verbose', False):
+            if args.get('verbose'):
                 print(' '.join(shlex.quote(c) for c in ncmd))
             signal.signal(signal.SIGINT, signal.SIG_IGN)
             sys.exit(sp.call(ncmd))
 
         # run test case!
         mpty, spty = pty.openpty()
-        if args.get('verbose', False):
+        if args.get('verbose'):
             print(' '.join(shlex.quote(c) for c in cmd))
         proc = sp.Popen(cmd, stdout=spty, stderr=spty)
         os.close(spty)
@@ -260,7 +293,7 @@ class TestCase:
                         break
                     raise
                 stdout.append(line)
-                if args.get('verbose', False):
+                if args.get('verbose'):
                     sys.stdout.write(line)
                 # intercept asserts
                 m = re.match(
@@ -299,7 +332,7 @@ class ValgrindTestCase(TestCase):
         return not self.leaky and super().shouldtest(**args)
 
     def test(self, exec=[], **args):
-        verbose = args.get('verbose', False)
+        verbose = args.get('verbose')
         uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1)
         exec = [
             'valgrind',
@@ -351,12 +384,17 @@ class TestSuite:
         self.name = os.path.basename(path)
         if self.name.endswith('.toml'):
             self.name = self.name[:-len('.toml')]
-        self.path = path
+        if args.get('build_dir'):
+            self.toml = path
+            self.path = args['build_dir'] + '/' + path
+        else:
+            self.toml = path
+            self.path = path
         self.classes = classes
         self.defines = defines.copy()
         self.filter = filter
 
-        with open(path) as f:
+        with open(self.toml) as f:
             # load tests
             config = toml.load(f)
 
@@ -467,7 +505,7 @@ class TestSuite:
 
     def build(self, **args):
         # build test files
-        tf = open(self.path + '.test.c.t', 'w')
+        tf = open(self.path + '.test.tc', 'w')
         tf.write(GLOBALS)
         if self.code is not None:
             tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
@@ -477,7 +515,7 @@ class TestSuite:
         for case in self.cases:
             if case.in_ not in tfs:
                 tfs[case.in_] = open(self.path+'.'+
-                    case.in_.replace('/', '.')+'.t', 'w')
+                    re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w')
                 tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
                 with open(case.in_) as f:
                     for line in f:
@@ -516,25 +554,33 @@ class TestSuite:
 
         # write makefiles
         with open(self.path + '.mk', 'w') as mk:
-            mk.write(RULES.replace(4*' ', '\t'))
+            mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path))
             mk.write('\n')
 
+            # add coverage hooks?
+            if args.get('coverage'):
+                mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict(
+                    path=self.path))
+                mk.write('\n')
+
             # add truely global defines globally
             for k, v in sorted(self.defines.items()):
-                mk.write('%s: override CFLAGS += -D%s=%r\n' % (
-                    self.path+'.test', k, v))
+                mk.write('%s.test: override CFLAGS += -D%s=%r\n'
+                    % (self.path, k, v))
 
             for path in tfs:
                 if path is None:
                     mk.write('%s: %s | %s\n' % (
                         self.path+'.test.c',
-                        self.path,
-                        self.path+'.test.c.t'))
+                        self.toml,
+                        self.path+'.test.tc'))
                 else:
                     mk.write('%s: %s %s | %s\n' % (
                         self.path+'.'+path.replace('/', '.'),
-                        self.path, path,
-                        self.path+'.'+path.replace('/', '.')+'.t'))
+                        self.toml,
+                        path,
+                        self.path+'.'+re.sub('(\.c)?$', '.tc',
+                            path.replace('/', '.'))))
                 mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
 
         self.makefile = self.path + '.mk'
@@ -557,7 +603,7 @@ class TestSuite:
                 if not args.get('verbose', True):
                     sys.stdout.write(FAIL)
                     sys.stdout.flush()
-                if not args.get('keep_going', False):
+                if not args.get('keep_going'):
                     if not args.get('verbose', True):
                         sys.stdout.write('\n')
                     raise
@@ -579,30 +625,30 @@ def main(**args):
 
     # and what class of TestCase to run
     classes = []
-    if args.get('normal', False):
+    if args.get('normal'):
         classes.append(TestCase)
-    if args.get('reentrant', False):
+    if args.get('reentrant'):
         classes.append(ReentrantTestCase)
-    if args.get('valgrind', False):
+    if args.get('valgrind'):
         classes.append(ValgrindTestCase)
     if not classes:
         classes = [TestCase]
 
     suites = []
-    for testpath in args['testpaths']:
+    for testpath in args['test_paths']:
         # optionally specified test case/perm
         testpath, *filter = testpath.split('#')
         filter = [int(f) for f in filter]
 
         # figure out the suite's toml file
         if os.path.isdir(testpath):
-            testpath = testpath + '/test_*.toml'
+            testpath = testpath + '/*.toml'
         elif os.path.isfile(testpath):
             testpath = testpath
         elif testpath.endswith('.toml'):
-            testpath = TESTDIR + '/' + testpath
+            testpath = TEST_PATHS + '/' + testpath
         else:
-            testpath = TESTDIR + '/' + testpath + '.toml'
+            testpath = TEST_PATHS + '/' + testpath + '.toml'
 
         # find tests
         for path in glob.glob(testpath):
@@ -628,7 +674,7 @@ def main(**args):
         list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
         [target for target in targets])
     mpty, spty = pty.openpty()
-    if args.get('verbose', False):
+    if args.get('verbose'):
         print(' '.join(shlex.quote(c) for c in cmd))
     proc = sp.Popen(cmd, stdout=spty, stderr=spty)
     os.close(spty)
@@ -642,14 +688,14 @@ def main(**args):
                 break
             raise
         stdout.append(line)
-        if args.get('verbose', False):
+        if args.get('verbose'):
             sys.stdout.write(line)
         # intercept warnings
         m = re.match(
             '^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
             .format('(?:\033\[[\d;]*.| )*', 'warning'),
             line)
-        if m and not args.get('verbose', False):
+        if m and not args.get('verbose'):
             try:
                 with open(m.group(1)) as f:
                     lineno = int(m.group(2))
@@ -662,27 +708,26 @@ def main(**args):
             except:
                 pass
     proc.wait()
-
     if proc.returncode != 0:
-        if not args.get('verbose', False):
+        if not args.get('verbose'):
             for line in stdout:
                 sys.stdout.write(line)
-        sys.exit(-3)
+        sys.exit(-1)
 
     print('built %d test suites, %d test cases, %d permutations' % (
         len(suites),
         sum(len(suite.cases) for suite in suites),
         sum(len(suite.perms) for suite in suites)))
 
-    filtered = 0
+    total = 0
     for suite in suites:
         for perm in suite.perms:
-            filtered += perm.shouldtest(**args)
-    if filtered != sum(len(suite.perms) for suite in suites):
-        print('filtered down to %d permutations' % filtered)
+            total += perm.shouldtest(**args)
+    if total != sum(len(suite.perms) for suite in suites):
+        print('filtered down to %d permutations' % total)
 
     # only requested to build?
-    if args.get('build', False):
+    if args.get('build'):
         return 0
 
     print('====== testing ======')
@@ -697,15 +742,12 @@ def main(**args):
     failed = 0
     for suite in suites:
         for perm in suite.perms:
-            if not hasattr(perm, 'result'):
-                continue
-
             if perm.result == PASS:
                 passed += 1
-            else:
+            elif isinstance(perm.result, TestFailure):
                 sys.stdout.write(
                     "\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
-                    "{perm} failed with {returncode}\n".format(
+                    "{perm} failed\n".format(
                         perm=perm, path=perm.suite.path, lineno=perm.lineno,
                         returncode=perm.result.returncode or 0))
                 if perm.result.stdout:
@@ -723,11 +765,33 @@ def main(**args):
                 sys.stdout.write('\n')
                 failed += 1
 
-    if args.get('gdb', False):
+    if args.get('coverage'):
+        # collect coverage info
+        # why -j1? lcov doesn't work in parallel because of gcov limitations
+        cmd = (['make', '-j1', '-f', 'Makefile'] +
+            list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
+            (['COVERAGETARGET=%s' % args['coverage']]
+                if isinstance(args['coverage'], str) else []) +
+            [suite.path + '.info' for suite in suites
+                if any(perm.result == PASS for perm in suite.perms)])
+        if args.get('verbose'):
+            print(' '.join(shlex.quote(c) for c in cmd))
+        proc = sp.Popen(cmd,
+            stdout=sp.PIPE if not args.get('verbose') else None,
+            stderr=sp.STDOUT if not args.get('verbose') else None,
+            universal_newlines=True)
+        proc.wait()
+        if proc.returncode != 0:
+            if not args.get('verbose'):
+                for line in proc.stdout:
+                    sys.stdout.write(line)
+            sys.exit(-1)
+
+    if args.get('gdb'):
         failure = None
         for suite in suites:
             for perm in suite.perms:
-                if getattr(perm, 'result', PASS) != PASS:
+                if isinstance(perm.result, TestFailure):
                     failure = perm.result
         if failure is not None:
             print('======= gdb ======')
@@ -735,20 +799,22 @@ def main(**args):
             failure.case.test(failure=failure, **args)
             sys.exit(0)
 
-    print('tests passed: %d' % passed)
-    print('tests failed: %d' % failed)
+    print('tests passed %d/%d (%.2f%%)' % (passed, total,
+        100*(passed/total if total else 1.0)))
+    print('tests failed %d/%d (%.2f%%)' % (failed, total,
+        100*(failed/total if total else 1.0)))
     return 1 if failed > 0 else 0
 
 if __name__ == "__main__":
     import argparse
     parser = argparse.ArgumentParser(
         description="Run parameterized tests in various configurations.")
-    parser.add_argument('testpaths', nargs='*', default=[TESTDIR],
+    parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS],
         help="Description of test(s) to run. By default, this is all tests \
             found in the \"{0}\" directory. Here, you can specify a different \
             directory of tests, a specific file, a suite by name, and even \
             specific test cases and permutations. For example \
-            \"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TESTDIR))
+            \"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS))
     parser.add_argument('-D', action='append', default=[],
         help="Overriding parameter definitions.")
     parser.add_argument('-v', '--verbose', action='store_true',
@@ -769,10 +835,19 @@ if __name__ == "__main__":
         help="Run tests normally.")
     parser.add_argument('-r', '--reentrant', action='store_true',
         help="Run reentrant tests with simulated power-loss.")
-    parser.add_argument('-V', '--valgrind', action='store_true',
+    parser.add_argument('--valgrind', action='store_true',
         help="Run non-leaky tests under valgrind to check for memory leaks.")
-    parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '),
+    parser.add_argument('--exec', default=[], type=lambda e: e.split(),
         help="Run tests with another executable prefixed on the command line.")
-    parser.add_argument('-d', '--disk',
+    parser.add_argument('--disk',
         help="Specify a file to use for persistent/reentrant tests.")
+    parser.add_argument('--coverage', type=lambda x: x if x else True,
+        nargs='?', const='',
+        help="Collect coverage information during testing. This uses lcov/gcov \
+            to accumulate coverage information into *.info files. May also \
+            a path to a *.info file to accumulate coverage info into.")
+    parser.add_argument('--build-dir',
+        help="Build relative to the specified directory instead of the \
+            current directory.")
+
     sys.exit(main(**vars(parser.parse_args())))