test.yml 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. name: test
  2. on: [push, pull_request]
  3. env:
  4. CFLAGS: -Werror
  5. MAKEFLAGS: -j
  6. jobs:
  7. # run tests
  8. test:
  9. runs-on: ubuntu-20.04
  10. strategy:
  11. fail-fast: false
  12. matrix:
  13. arch: [x86_64, thumb, mips, powerpc]
  14. steps:
  15. - uses: actions/checkout@v2
  16. - name: install
  17. run: |
  18. # need a few additional tools
  19. sudo apt-get update -qq
  20. sudo apt-get install -qq python3 python3-pip lcov
  21. sudo pip3 install toml
  22. python3 --version
  23. # setup a ram-backed disk to speed up reentrant tests
  24. mkdir disks
  25. sudo mount -t tmpfs -o size=100m tmpfs disks
  26. TESTFLAGS="$TESTFLAGS --disk=disks/disk"
  27. # collect coverage
  28. mkdir -p coverage
  29. TESTFLAGS="$TESTFLAGS --coverage=`
  30. `coverage/${{github.job}}-${{matrix.arch}}.info"
  31. echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV
  32. # we're not cross-compiling with x86, but we do need the newest
  33. # version of gcc for the -fcallgraph-info=su flag
  34. - name: install-x86_64
  35. if: ${{matrix.arch == 'x86_64'}}
  36. run: |
  37. sudo apt-get install -qq gcc-10
  38. echo "CC=gcc-10" >> $GITHUB_ENV
  39. gcc-10 --version
  40. # cross-compile with ARM Thumb (32-bit, little-endian)
  41. - name: install-thumb
  42. if: ${{matrix.arch == 'thumb'}}
  43. run: |
  44. sudo apt-get install -qq \
  45. gcc-10-arm-linux-gnueabi \
  46. libc6-dev-armel-cross \
  47. qemu-user
  48. echo "CC=arm-linux-gnueabi-gcc-10 -mthumb --static" >> $GITHUB_ENV
  49. echo "EXEC=qemu-arm" >> $GITHUB_ENV
  50. arm-linux-gnueabi-gcc-10 --version
  51. qemu-arm -version
  52. # cross-compile with MIPS (32-bit, big-endian)
  53. - name: install-mips
  54. if: ${{matrix.arch == 'mips'}}
  55. run: |
  56. sudo apt-get install -qq \
  57. gcc-10-mips-linux-gnu \
  58. libc6-dev-mips-cross \
  59. qemu-user
  60. echo "CC=mips-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
  61. echo "EXEC=qemu-mips" >> $GITHUB_ENV
  62. mips-linux-gnu-gcc-10 --version
  63. qemu-mips -version
  64. # cross-compile with PowerPC (32-bit, big-endian)
  65. - name: install-powerpc
  66. if: ${{matrix.arch == 'powerpc'}}
  67. run: |
  68. sudo apt-get install -qq \
  69. gcc-10-powerpc-linux-gnu \
  70. libc6-dev-powerpc-cross \
  71. qemu-user
  72. echo "CC=powerpc-linux-gnu-gcc-10 --static" >> $GITHUB_ENV
  73. echo "EXEC=qemu-ppc" >> $GITHUB_ENV
  74. powerpc-linux-gnu-gcc-10 --version
  75. qemu-ppc -version
  76. # make sure example can at least compile
  77. - name: test-example
  78. run: |
  79. sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
  80. make all CFLAGS+=" \
  81. -Duser_provided_block_device_read=NULL \
  82. -Duser_provided_block_device_prog=NULL \
  83. -Duser_provided_block_device_erase=NULL \
  84. -Duser_provided_block_device_sync=NULL \
  85. -include stdio.h"
  86. rm test.c
  87. # test configurations
  88. # normal+reentrant tests
  89. - name: test-default
  90. run: |
  91. make clean
  92. make test TESTFLAGS+="-nrk"
  93. # NOR flash: read/prog = 1 block = 4KiB
  94. - name: test-nor
  95. run: |
  96. make clean
  97. make test TESTFLAGS+="-nrk \
  98. -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096"
  99. # SD/eMMC: read/prog = 512 block = 512
  100. - name: test-emmc
  101. run: |
  102. make clean
  103. make test TESTFLAGS+="-nrk \
  104. -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512"
  105. # NAND flash: read/prog = 4KiB block = 32KiB
  106. - name: test-nand
  107. run: |
  108. make clean
  109. make test TESTFLAGS+="-nrk \
  110. -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)"
  111. # other extreme geometries that are useful for various corner cases
  112. - name: test-no-intrinsics
  113. run: |
  114. make clean
  115. make test TESTFLAGS+="-nrk \
  116. -DLFS_NO_INTRINSICS"
  117. - name: test-byte-writes
  118. # it just takes too long to test byte-level writes when in qemu,
  119. # should be plenty covered by the other configurations
  120. if: ${{matrix.arch == 'x86_64'}}
  121. run: |
  122. make clean
  123. make test TESTFLAGS+="-nrk \
  124. -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1"
  125. - name: test-block-cycles
  126. run: |
  127. make clean
  128. make test TESTFLAGS+="-nrk \
  129. -DLFS_BLOCK_CYCLES=1"
  130. - name: test-odd-block-count
  131. run: |
  132. make clean
  133. make test TESTFLAGS+="-nrk \
  134. -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256"
  135. - name: test-odd-block-size
  136. run: |
  137. make clean
  138. make test TESTFLAGS+="-nrk \
  139. -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704"
  140. # upload coverage for later coverage
  141. - name: upload-coverage
  142. uses: actions/upload-artifact@v2
  143. with:
  144. name: coverage
  145. path: coverage
  146. retention-days: 1
  147. # update results
  148. - name: results
  149. run: |
  150. mkdir -p results
  151. make clean
  152. make lfs.csv \
  153. CFLAGS+=" \
  154. -DLFS_NO_ASSERT \
  155. -DLFS_NO_DEBUG \
  156. -DLFS_NO_WARN \
  157. -DLFS_NO_ERROR"
  158. cp lfs.csv results/${{matrix.arch}}.csv
  159. ./scripts/summary.py results/${{matrix.arch}}.csv
  160. - name: results-readonly
  161. run: |
  162. mkdir -p results
  163. make clean
  164. make lfs.csv \
  165. CFLAGS+=" \
  166. -DLFS_NO_ASSERT \
  167. -DLFS_NO_DEBUG \
  168. -DLFS_NO_WARN \
  169. -DLFS_NO_ERROR \
  170. -DLFS_READONLY"
  171. cp lfs.csv results/${{matrix.arch}}-readonly.csv
  172. ./scripts/summary.py results/${{matrix.arch}}-readonly.csv
  173. - name: results-threadsafe
  174. run: |
  175. mkdir -p results
  176. make clean
  177. make lfs.csv \
  178. CFLAGS+=" \
  179. -DLFS_NO_ASSERT \
  180. -DLFS_NO_DEBUG \
  181. -DLFS_NO_WARN \
  182. -DLFS_NO_ERROR \
  183. -DLFS_THREADSAFE"
  184. cp lfs.csv results/${{matrix.arch}}-threadsafe.csv
  185. ./scripts/summary.py results/${{matrix.arch}}-threadsafe.csv
  186. - name: results-migrate
  187. run: |
  188. mkdir -p results
  189. make clean
  190. make lfs.csv \
  191. CFLAGS+=" \
  192. -DLFS_NO_ASSERT \
  193. -DLFS_NO_DEBUG \
  194. -DLFS_NO_WARN \
  195. -DLFS_NO_ERROR \
  196. -DLFS_MIGRATE"
  197. cp lfs.csv results/${{matrix.arch}}-migrate.csv
  198. ./scripts/summary.py results/${{matrix.arch}}-migrate.csv
  199. - name: results-error-asserts
  200. run: |
  201. mkdir -p results
  202. make clean
  203. make lfs.csv \
  204. CFLAGS+=" \
  205. -DLFS_NO_DEBUG \
  206. -DLFS_NO_WARN \
  207. -DLFS_NO_ERROR \
  208. -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'"
  209. cp lfs.csv results/${{matrix.arch}}-error-asserts.csv
  210. ./scripts/summary.py results/${{matrix.arch}}-error-asserts.csv
  211. - name: upload-results
  212. uses: actions/upload-artifact@v2
  213. with:
  214. name: results
  215. path: results
  216. # create statuses with results
  217. - name: collect-status
  218. run: |
  219. mkdir -p status
  220. for f in $(shopt -s nullglob ; echo results/*.csv)
  221. do
  222. export STEP="results$(
  223. echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p')"
  224. for r in code stack structs
  225. do
  226. export CONTEXT="results (${{matrix.arch}}$(
  227. echo $f | sed -n 's/[^-]*-\(.*\).csv/, \1/p')) / $r"
  228. export PREV="$(curl -sS \
  229. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
  230. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  231. | select(.context == env.CONTEXT).description
  232. | capture("(?<result>[0-9∞]+)").result' \
  233. || echo 0)"
  234. export DESCRIPTION="$(./scripts/summary.py $f -f $r -Y | awk '
  235. NR==2 {printf "%s B",$2}
  236. NR==2 && ENVIRON["PREV"]+0 != 0 {
  237. printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')"
  238. jq -n '{
  239. state: "success",
  240. context: env.CONTEXT,
  241. description: env.DESCRIPTION,
  242. target_job: "${{github.job}} (${{matrix.arch}})",
  243. target_step: env.STEP}' \
  244. | tee status/$r-${{matrix.arch}}$(
  245. echo $f | sed -n 's/[^-]*-\(.*\).csv/-\1/p').json
  246. done
  247. done
  248. - name: upload-status
  249. uses: actions/upload-artifact@v2
  250. with:
  251. name: status
  252. path: status
  253. retention-days: 1
  254. # run under Valgrind to check for memory errors
  255. valgrind:
  256. runs-on: ubuntu-20.04
  257. steps:
  258. - uses: actions/checkout@v2
  259. - name: install
  260. run: |
  261. # need toml, also pip3 isn't installed by default?
  262. sudo apt-get update -qq
  263. sudo apt-get install -qq python3 python3-pip
  264. sudo pip3 install toml
  265. - name: install-valgrind
  266. run: |
  267. sudo apt-get update -qq
  268. sudo apt-get install -qq valgrind
  269. valgrind --version
  270. # normal tests, we don't need to test all geometries
  271. - name: test-valgrind
  272. run: make test TESTFLAGS+="-k --valgrind"
  273. # self-host with littlefs-fuse for a fuzz-like test
  274. fuse:
  275. runs-on: ubuntu-20.04
  276. if: ${{!endsWith(github.ref, '-prefix')}}
  277. steps:
  278. - uses: actions/checkout@v2
  279. - name: install
  280. run: |
  281. # need toml, also pip3 isn't installed by default?
  282. sudo apt-get update -qq
  283. sudo apt-get install -qq python3 python3-pip libfuse-dev
  284. sudo pip3 install toml
  285. fusermount -V
  286. gcc --version
  287. - uses: actions/checkout@v2
  288. with:
  289. repository: littlefs-project/littlefs-fuse
  290. ref: v2
  291. path: littlefs-fuse
  292. - name: setup
  293. run: |
  294. # copy our new version into littlefs-fuse
  295. rm -rf littlefs-fuse/littlefs/*
  296. cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
  297. # setup disk for littlefs-fuse
  298. mkdir mount
  299. LOOP=$(sudo losetup -f)
  300. sudo chmod a+rw $LOOP
  301. dd if=/dev/zero bs=512 count=128K of=disk
  302. losetup $LOOP disk
  303. echo "LOOP=$LOOP" >> $GITHUB_ENV
  304. - name: test
  305. run: |
  306. # self-host test
  307. make -C littlefs-fuse
  308. littlefs-fuse/lfs --format $LOOP
  309. littlefs-fuse/lfs $LOOP mount
  310. ls mount
  311. mkdir mount/littlefs
  312. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  313. cd mount/littlefs
  314. stat .
  315. ls -flh
  316. make -B test
  317. # test migration using littlefs-fuse
  318. migrate:
  319. runs-on: ubuntu-20.04
  320. if: ${{!endsWith(github.ref, '-prefix')}}
  321. steps:
  322. - uses: actions/checkout@v2
  323. - name: install
  324. run: |
  325. # need toml, also pip3 isn't installed by default?
  326. sudo apt-get update -qq
  327. sudo apt-get install -qq python3 python3-pip libfuse-dev
  328. sudo pip3 install toml
  329. fusermount -V
  330. gcc --version
  331. - uses: actions/checkout@v2
  332. with:
  333. repository: littlefs-project/littlefs-fuse
  334. ref: v2
  335. path: v2
  336. - uses: actions/checkout@v2
  337. with:
  338. repository: littlefs-project/littlefs-fuse
  339. ref: v1
  340. path: v1
  341. - name: setup
  342. run: |
  343. # copy our new version into littlefs-fuse
  344. rm -rf v2/littlefs/*
  345. cp -r $(git ls-tree --name-only HEAD) v2/littlefs
  346. # setup disk for littlefs-fuse
  347. mkdir mount
  348. LOOP=$(sudo losetup -f)
  349. sudo chmod a+rw $LOOP
  350. dd if=/dev/zero bs=512 count=128K of=disk
  351. losetup $LOOP disk
  352. echo "LOOP=$LOOP" >> $GITHUB_ENV
  353. - name: test
  354. run: |
  355. # compile v1 and v2
  356. make -C v1
  357. make -C v2
  358. # run self-host test with v1
  359. v1/lfs --format $LOOP
  360. v1/lfs $LOOP mount
  361. ls mount
  362. mkdir mount/littlefs
  363. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  364. cd mount/littlefs
  365. stat .
  366. ls -flh
  367. make -B test
  368. # attempt to migrate
  369. cd ../..
  370. fusermount -u mount
  371. v2/lfs --migrate $LOOP
  372. v2/lfs $LOOP mount
  373. # run self-host test with v2 right where we left off
  374. ls mount
  375. cd mount/littlefs
  376. stat .
  377. ls -flh
  378. make -B test
  379. # collect coverage info
  380. coverage:
  381. runs-on: ubuntu-20.04
  382. needs: [test]
  383. steps:
  384. - uses: actions/checkout@v2
  385. - name: install
  386. run: |
  387. sudo apt-get update -qq
  388. sudo apt-get install -qq python3 python3-pip lcov
  389. sudo pip3 install toml
  390. # yes we continue-on-error nearly every step, continue-on-error
  391. # at job level apparently still marks a job as failed, which isn't
  392. # what we want
  393. - uses: actions/download-artifact@v2
  394. continue-on-error: true
  395. with:
  396. name: coverage
  397. path: coverage
  398. - name: results-coverage
  399. continue-on-error: true
  400. run: |
  401. mkdir -p results
  402. lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \
  403. -o results/coverage.info
  404. ./scripts/coverage.py results/coverage.info -o results/coverage.csv
  405. - name: upload-results
  406. uses: actions/upload-artifact@v2
  407. with:
  408. name: results
  409. path: results
  410. - name: collect-status
  411. run: |
  412. mkdir -p status
  413. [ -e results/coverage.csv ] || exit 0
  414. export STEP="results-coverage"
  415. export CONTEXT="results / coverage"
  416. export PREV="$(curl -sS \
  417. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master?per_page=100" \
  418. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  419. | select(.context == env.CONTEXT).description
  420. | capture("(?<result>[0-9\\.]+)").result' \
  421. || echo 0)"
  422. export DESCRIPTION="$(
  423. ./scripts/coverage.py -u results/coverage.csv -Y | awk -F '[ /%]+' '
  424. NR==2 {printf "%.1f%% of %d lines",$4,$3}
  425. NR==2 && ENVIRON["PREV"]+0 != 0 {
  426. printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')"
  427. jq -n '{
  428. state: "success",
  429. context: env.CONTEXT,
  430. description: env.DESCRIPTION,
  431. target_job: "${{github.job}}",
  432. target_step: env.STEP}' \
  433. | tee status/coverage.json
  434. - name: upload-status
  435. uses: actions/upload-artifact@v2
  436. with:
  437. name: status
  438. path: status
  439. retention-days: 1