name: test on: [push, pull_request] defaults: run: shell: bash -euv -o pipefail {0} env: CFLAGS: -Werror MAKEFLAGS: -j TESTFLAGS: -k jobs: # run tests test: runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: arch: [x86_64, thumb, mips, powerpc] steps: - uses: actions/checkout@v2 - name: install run: | # need a few things sudo apt-get update -qq sudo apt-get install -qq gcc python3 python3-pip pip3 install toml gcc --version python3 --version # cross-compile with ARM Thumb (32-bit, little-endian) - name: install-thumb if: ${{matrix.arch == 'thumb'}} run: | sudo apt-get install -qq \ gcc-arm-linux-gnueabi \ libc6-dev-armel-cross \ qemu-user echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV echo "EXEC=qemu-arm" >> $GITHUB_ENV arm-linux-gnueabi-gcc --version qemu-arm -version # cross-compile with MIPS (32-bit, big-endian) - name: install-mips if: ${{matrix.arch == 'mips'}} run: | sudo apt-get install -qq \ gcc-mips-linux-gnu \ libc6-dev-mips-cross \ qemu-user echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV echo "EXEC=qemu-mips" >> $GITHUB_ENV mips-linux-gnu-gcc --version qemu-mips -version # cross-compile with PowerPC (32-bit, big-endian) - name: install-powerpc if: ${{matrix.arch == 'powerpc'}} run: | sudo apt-get install -qq \ gcc-powerpc-linux-gnu \ libc6-dev-powerpc-cross \ qemu-user echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV echo "EXEC=qemu-ppc" >> $GITHUB_ENV powerpc-linux-gnu-gcc --version qemu-ppc -version # does littlefs compile? - name: test-build run: | make clean make build # make sure example can at least compile - name: test-example run: | make clean sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c make all CFLAGS+=" \ -Duser_provided_block_device_read=NULL \ -Duser_provided_block_device_prog=NULL \ -Duser_provided_block_device_erase=NULL \ -Duser_provided_block_device_sync=NULL \ -include stdio.h" rm test.c # run the tests! - name: test run: | make clean # TODO include this by default? make test TESTFLAGS+='-Pnone,linear' # collect coverage info # # Note the goal is to maximize coverage in the small, easy-to-run # tests, so we intentionally exclude more aggressive powerloss testing # from coverage results - name: cov if: ${{matrix.arch == 'x86_64'}} run: | make lfs.cov.csv ./scripts/cov.py -u lfs.cov.csv mkdir -p cov cp lfs.cov.csv cov/cov.csv # find compile-time measurements - name: sizes run: | make clean make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ -DLFS_NO_WARN \ -DLFS_NO_ERROR" ./scripts/summary.py lfs.struct.csv \ -bstruct \ -fsize=struct_size ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \ -bfunction \ -fcode=code_size \ -fdata=data_size \ -fstack=stack_limit \ --max=stack_limit mkdir -p sizes cp lfs.code.csv sizes/${{matrix.arch}}.code.csv cp lfs.data.csv sizes/${{matrix.arch}}.data.csv cp lfs.stack.csv sizes/${{matrix.arch}}.stack.csv cp lfs.struct.csv sizes/${{matrix.arch}}.struct.csv - name: sizes-readonly run: | make clean make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ -DLFS_NO_WARN \ -DLFS_NO_ERROR \ -DLFS_READONLY" ./scripts/summary.py lfs.struct.csv \ -bstruct \ -fsize=struct_size ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \ -bfunction \ -fcode=code_size \ -fdata=data_size \ -fstack=stack_limit \ --max=stack_limit mkdir -p sizes cp lfs.code.csv sizes/${{matrix.arch}}-readonly.code.csv cp lfs.data.csv sizes/${{matrix.arch}}-readonly.data.csv cp lfs.stack.csv sizes/${{matrix.arch}}-readonly.stack.csv cp lfs.struct.csv sizes/${{matrix.arch}}-readonly.struct.csv - name: sizes-threadsafe run: | make clean make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ -DLFS_NO_WARN \ -DLFS_NO_ERROR \ -DLFS_THREADSAFE" ./scripts/summary.py lfs.struct.csv \ -bstruct \ -fsize=struct_size ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \ -bfunction \ -fcode=code_size \ -fdata=data_size \ -fstack=stack_limit \ --max=stack_limit mkdir -p sizes cp lfs.code.csv sizes/${{matrix.arch}}-threadsafe.code.csv cp lfs.data.csv sizes/${{matrix.arch}}-threadsafe.data.csv cp lfs.stack.csv sizes/${{matrix.arch}}-threadsafe.stack.csv cp lfs.struct.csv sizes/${{matrix.arch}}-threadsafe.struct.csv - name: sizes-migrate run: | make clean make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ -DLFS_NO_WARN \ -DLFS_NO_ERROR \ -DLFS_MIGRATE" ./scripts/summary.py lfs.struct.csv \ -bstruct \ -fsize=struct_size ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \ -bfunction \ -fcode=code_size \ -fdata=data_size \ -fstack=stack_limit \ --max=stack_limit mkdir -p sizes cp lfs.code.csv sizes/${{matrix.arch}}-migrate.code.csv cp lfs.data.csv sizes/${{matrix.arch}}-migrate.data.csv cp lfs.stack.csv sizes/${{matrix.arch}}-migrate.stack.csv cp lfs.struct.csv sizes/${{matrix.arch}}-migrate.struct.csv - name: sizes-error-asserts run: | make clean make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv \ CFLAGS+=" \ -DLFS_NO_DEBUG \ -DLFS_NO_WARN \ -DLFS_NO_ERROR \ -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" ./scripts/summary.py lfs.struct.csv \ -bstruct \ -fsize=struct_size ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \ -bfunction \ -fcode=code_size \ -fdata=data_size \ -fstack=stack_limit \ --max=stack_limit mkdir -p sizes cp lfs.code.csv sizes/${{matrix.arch}}-error-asserts.code.csv cp lfs.data.csv sizes/${{matrix.arch}}-error-asserts.data.csv cp lfs.stack.csv sizes/${{matrix.arch}}-error-asserts.stack.csv cp lfs.struct.csv sizes/${{matrix.arch}}-error-asserts.struct.csv # create size statuses - name: upload-sizes uses: actions/upload-artifact@v2 with: name: sizes path: sizes - name: status-sizes run: | mkdir -p status for f in $(shopt -s nullglob ; echo sizes/*.csv) do # skip .data.csv as it should always be zero [[ $f == *.data.csv ]] && continue export STEP="sizes$(echo $f \ | sed -n 's/[^-.]*-\([^.]*\)\..*csv/-\1/p')" export CONTEXT="sizes (${{matrix.arch}}$(echo $f \ | sed -n 's/[^-.]*-\([^.]*\)\..*csv/, \1/p')) / $(echo $f \ | sed -n 's/[^.]*\.\(.*\)\.csv/\1/p')" export PREV="$(curl -sS \ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/` `master?per_page=100" \ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | select(.context == env.CONTEXT).description | capture("(?[0-9∞]+)").prev' \ || echo 0)" export DESCRIPTION="$(./scripts/summary.py $f --max=stack_limit -Y \ | awk ' NR==2 {$1=0; printf "%s B",$NF} NR==2 && ENVIRON["PREV"]+0 != 0 { printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"] }')" jq -n '{ state: "success", context: env.CONTEXT, description: env.DESCRIPTION, target_job: "${{github.job}} (${{matrix.arch}})", target_step: env.STEP, }' | tee status/$(basename $f .csv).json done - name: upload-status-sizes uses: actions/upload-artifact@v2 with: name: status path: status retention-days: 1 # create cov statuses - name: upload-cov if: ${{matrix.arch == 'x86_64'}} uses: actions/upload-artifact@v2 with: name: cov path: cov - name: status-cov if: ${{matrix.arch == 'x86_64'}} run: | mkdir -p status f=cov/cov.csv for s in lines branches do export STEP="cov" export CONTEXT="cov / $s" export PREV="$(curl -sS \ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/` `master?per_page=100" \ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | select(.context == env.CONTEXT).description | capture("(?[0-9\\.]+)").prev' \ || echo 0)" export DESCRIPTION="$(./scripts/cov.py -u $f -f$s -Y \ | awk -F '[ /%]+' -v s=$s ' NR==2 {$1=0; printf "%.1f%% of %d %s",$4,$3,s} NR==2 && ENVIRON["PREV"]+0 != 0 { printf " (%+.1f%%)",$4-ENVIRON["PREV"] }')" jq -n '{ state: "success", context: env.CONTEXT, description: env.DESCRIPTION, target_job: "${{github.job}} (${{matrix.arch}})", target_step: env.STEP, }' | tee status/$(basename $f .csv)-$s.json done - name: upload-status-sizes if: ${{matrix.arch == 'x86_64'}} uses: actions/upload-artifact@v2 with: name: status path: status retention-days: 1 # run as many exhaustive tests as fits in GitHub's time limits # # this grows exponentially, so it doesn't turn out to be that many test-pls: runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: pls: [1, 2] steps: - uses: actions/checkout@v2 - name: install run: | # need a few things sudo apt-get update -qq sudo apt-get install -qq gcc python3 python3-pip pip3 install toml gcc --version python3 --version - name: test-pls if: ${{matrix.pls <= 1}} run: | make test TESTFLAGS+="-P${{matrix.pls}}" # >=2pls takes multiple days to run fully, so we can only # run a subset of tests, these are the most important - name: test-limited-pls if: ${{matrix.pls > 1}} run: | make test TESTFLAGS+="-P${{matrix.pls}} test_dirs test_relocations" # run with LFS_NO_INTRINSICS to make sure that works test-no-intrinsics: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 - name: install run: | # need a few things sudo apt-get update -qq sudo apt-get install -qq gcc python3 python3-pip pip3 install toml gcc --version python3 --version - name: test-no-intrinsics run: | make test CFLAGS+="-DLFS_NO_INTRINSICS" # run under Valgrind to check for memory errors test-valgrind: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 - name: install run: | # need a few things sudo apt-get update -qq sudo apt-get install -qq gcc python3 python3-pip valgrind pip3 install toml gcc --version python3 --version valgrind --version # Valgrind takes a while with diminishing value, so only test # on one geometry - name: test-valgrind run: | make test TESTFLAGS+="-Gdefault --valgrind" # run benchmarks # # note there's no real benefit to running these on multiple archs bench: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 - name: install run: | # need a few things sudo apt-get update -qq sudo apt-get install -qq gcc python3 python3-pip valgrind pip3 install toml gcc --version python3 --version valgrind --version - name: bench run: | make bench BENCHFLAGS+=-olfs.bench.csv # find bench results ./scripts/summary.py lfs.bench.csv \ -bsuite \ -freaded=bench_readed \ -fproged=bench_proged \ -ferased=bench_erased mkdir -p bench cp lfs.bench.csv bench/bench.csv # find perfbd results make lfs.perfbd.csv ./scripts/perfbd.py -u lfs.perfbd.csv mkdir -p bench cp lfs.perfbd.csv bench/perfbd.csv # create bench statuses - name: upload-bench uses: actions/upload-artifact@v2 with: name: bench path: bench - name: status-bench run: | mkdir -p status f=bench/bench.csv for s in readed proged erased do export STEP="bench" export CONTEXT="bench / $s" export PREV="$(curl -sS \ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/` `master?per_page=100" \ | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | select(.context == env.CONTEXT).description | capture("(?[0-9]+)").prev' \ || echo 0)" export DESCRIPTION="$(./scripts/summary.py $f -f$s=bench_$s -Y \ | awk ' NR==2 {$1=0; printf "%s B",$NF} NR==2 && ENVIRON["PREV"]+0 != 0 { printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"] }')" jq -n '{ state: "success", context: env.CONTEXT, description: env.DESCRIPTION, target_job: "${{github.job}}", target_step: env.STEP, }' | tee status/$(basename $f .csv)-$s.json done - name: upload-status-bench uses: actions/upload-artifact@v2 with: name: status path: status retention-days: 1 # self-host with littlefs-fuse for a fuzz-like test fuse: runs-on: ubuntu-22.04 if: ${{!endsWith(github.ref, '-prefix')}} steps: - uses: actions/checkout@v2 - name: install run: | # need a few things sudo apt-get update -qq sudo apt-get install -qq gcc python3 python3-pip libfuse-dev sudo pip3 install toml gcc --version python3 --version fusermount -V - uses: actions/checkout@v2 with: repository: littlefs-project/littlefs-fuse ref: v2 path: littlefs-fuse - name: setup run: | # copy our new version into littlefs-fuse rm -rf littlefs-fuse/littlefs/* cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs # setup disk for littlefs-fuse mkdir mount LOOP=$(sudo losetup -f) sudo chmod a+rw $LOOP dd if=/dev/zero bs=512 count=128K of=disk losetup $LOOP disk echo "LOOP=$LOOP" >> $GITHUB_ENV - name: test run: | # self-host test make -C littlefs-fuse littlefs-fuse/lfs --format $LOOP littlefs-fuse/lfs $LOOP mount ls mount mkdir mount/littlefs cp -r $(git ls-tree --name-only HEAD) mount/littlefs cd mount/littlefs stat . ls -flh make -B test-runner make -B test # test migration using littlefs-fuse migrate: runs-on: ubuntu-22.04 if: ${{!endsWith(github.ref, '-prefix')}} steps: - uses: actions/checkout@v2 - name: install run: | # need a few things sudo apt-get update -qq sudo apt-get install -qq gcc python3 python3-pip libfuse-dev sudo pip3 install toml gcc --version python3 --version fusermount -V - uses: actions/checkout@v2 with: repository: littlefs-project/littlefs-fuse ref: v2 path: v2 - uses: actions/checkout@v2 with: repository: littlefs-project/littlefs-fuse ref: v1 path: v1 - name: setup run: | # copy our new version into littlefs-fuse rm -rf v2/littlefs/* cp -r $(git ls-tree --name-only HEAD) v2/littlefs # setup disk for littlefs-fuse mkdir mount LOOP=$(sudo losetup -f) sudo chmod a+rw $LOOP dd if=/dev/zero bs=512 count=128K of=disk losetup $LOOP disk echo "LOOP=$LOOP" >> $GITHUB_ENV - name: test run: | # compile v1 and v2 make -C v1 make -C v2 # run self-host test with v1 v1/lfs --format $LOOP v1/lfs $LOOP mount ls mount mkdir mount/littlefs cp -r $(git ls-tree --name-only HEAD) mount/littlefs cd mount/littlefs stat . ls -flh make -B test-runner make -B test # attempt to migrate cd ../.. fusermount -u mount v2/lfs --migrate $LOOP v2/lfs $LOOP mount # run self-host test with v2 right where we left off ls mount cd mount/littlefs stat . ls -flh make -B test-runner make -B test