test.yml 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. name: test
  2. on: [push, pull_request]
  3. defaults:
  4. run:
  5. shell: bash -euv -o pipefail {0}
  6. env:
  7. CFLAGS: -Werror
  8. MAKEFLAGS: -j
  9. TESTFLAGS: -k
  10. BENCHFLAGS:
  11. jobs:
  12. # run tests
  13. test:
  14. runs-on: ubuntu-22.04
  15. strategy:
  16. fail-fast: false
  17. matrix:
  18. arch: [x86_64, thumb, mips, powerpc]
  19. steps:
  20. - uses: actions/checkout@v2
  21. - name: install
  22. run: |
  23. # need a few things
  24. sudo apt-get update -qq
  25. sudo apt-get install -qq gcc python3 python3-pip
  26. pip3 install toml
  27. gcc --version
  28. python3 --version
  29. # cross-compile with ARM Thumb (32-bit, little-endian)
  30. - name: install-thumb
  31. if: ${{matrix.arch == 'thumb'}}
  32. run: |
  33. sudo apt-get install -qq \
  34. gcc-arm-linux-gnueabi \
  35. libc6-dev-armel-cross \
  36. qemu-user
  37. echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV
  38. echo "EXEC=qemu-arm" >> $GITHUB_ENV
  39. arm-linux-gnueabi-gcc --version
  40. qemu-arm -version
  41. # cross-compile with MIPS (32-bit, big-endian)
  42. - name: install-mips
  43. if: ${{matrix.arch == 'mips'}}
  44. run: |
  45. sudo apt-get install -qq \
  46. gcc-mips-linux-gnu \
  47. libc6-dev-mips-cross \
  48. qemu-user
  49. echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV
  50. echo "EXEC=qemu-mips" >> $GITHUB_ENV
  51. mips-linux-gnu-gcc --version
  52. qemu-mips -version
  53. # cross-compile with PowerPC (32-bit, big-endian)
  54. - name: install-powerpc
  55. if: ${{matrix.arch == 'powerpc'}}
  56. run: |
  57. sudo apt-get install -qq \
  58. gcc-powerpc-linux-gnu \
  59. libc6-dev-powerpc-cross \
  60. qemu-user
  61. echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV
  62. echo "EXEC=qemu-ppc" >> $GITHUB_ENV
  63. powerpc-linux-gnu-gcc --version
  64. qemu-ppc -version
  65. # does littlefs compile?
  66. - name: test-build
  67. run: |
  68. make clean
  69. make build
  70. # make sure example can at least compile
  71. - name: test-example
  72. run: |
  73. make clean
  74. sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
  75. CFLAGS="$CFLAGS \
  76. -Duser_provided_block_device_read=NULL \
  77. -Duser_provided_block_device_prog=NULL \
  78. -Duser_provided_block_device_erase=NULL \
  79. -Duser_provided_block_device_sync=NULL \
  80. -include stdio.h" \
  81. make all
  82. rm test.c
  83. # run the tests!
  84. - name: test
  85. run: |
  86. make clean
  87. make test
  88. # collect coverage info
  89. #
  90. # Note the goal is to maximize coverage in the small, easy-to-run
  91. # tests, so we intentionally exclude more aggressive powerloss testing
  92. # from coverage results
  93. - name: cov
  94. if: ${{matrix.arch == 'x86_64'}}
  95. run: |
  96. make lfs.cov.csv
  97. ./scripts/cov.py -u lfs.cov.csv
  98. mkdir -p cov
  99. cp lfs.cov.csv cov/cov.csv
  100. # find compile-time measurements
  101. - name: sizes
  102. run: |
  103. make clean
  104. CFLAGS="$CFLAGS \
  105. -DLFS_NO_ASSERT \
  106. -DLFS_NO_DEBUG \
  107. -DLFS_NO_WARN \
  108. -DLFS_NO_ERROR" \
  109. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  110. ./scripts/structs.py -u lfs.structs.csv
  111. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  112. -bfunction \
  113. -fcode=code_size \
  114. -fdata=data_size \
  115. -fstack=stack_limit --max=stack_limit
  116. mkdir -p sizes
  117. cp lfs.code.csv sizes/${{matrix.arch}}.code.csv
  118. cp lfs.data.csv sizes/${{matrix.arch}}.data.csv
  119. cp lfs.stack.csv sizes/${{matrix.arch}}.stack.csv
  120. cp lfs.structs.csv sizes/${{matrix.arch}}.structs.csv
  121. - name: sizes-readonly
  122. run: |
  123. make clean
  124. CFLAGS="$CFLAGS \
  125. -DLFS_NO_ASSERT \
  126. -DLFS_NO_DEBUG \
  127. -DLFS_NO_WARN \
  128. -DLFS_NO_ERROR \
  129. -DLFS_READONLY" \
  130. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  131. ./scripts/structs.py -u lfs.structs.csv
  132. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  133. -bfunction \
  134. -fcode=code_size \
  135. -fdata=data_size \
  136. -fstack=stack_limit --max=stack_limit
  137. mkdir -p sizes
  138. cp lfs.code.csv sizes/${{matrix.arch}}-readonly.code.csv
  139. cp lfs.data.csv sizes/${{matrix.arch}}-readonly.data.csv
  140. cp lfs.stack.csv sizes/${{matrix.arch}}-readonly.stack.csv
  141. cp lfs.structs.csv sizes/${{matrix.arch}}-readonly.structs.csv
  142. - name: sizes-threadsafe
  143. run: |
  144. make clean
  145. CFLAGS="$CFLAGS \
  146. -DLFS_NO_ASSERT \
  147. -DLFS_NO_DEBUG \
  148. -DLFS_NO_WARN \
  149. -DLFS_NO_ERROR \
  150. -DLFS_THREADSAFE" \
  151. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  152. ./scripts/structs.py -u lfs.structs.csv
  153. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  154. -bfunction \
  155. -fcode=code_size \
  156. -fdata=data_size \
  157. -fstack=stack_limit --max=stack_limit
  158. mkdir -p sizes
  159. cp lfs.code.csv sizes/${{matrix.arch}}-threadsafe.code.csv
  160. cp lfs.data.csv sizes/${{matrix.arch}}-threadsafe.data.csv
  161. cp lfs.stack.csv sizes/${{matrix.arch}}-threadsafe.stack.csv
  162. cp lfs.structs.csv sizes/${{matrix.arch}}-threadsafe.structs.csv
  163. - name: sizes-migrate
  164. run: |
  165. make clean
  166. CFLAGS="$CFLAGS \
  167. -DLFS_NO_ASSERT \
  168. -DLFS_NO_DEBUG \
  169. -DLFS_NO_WARN \
  170. -DLFS_NO_ERROR \
  171. -DLFS_MIGRATE" \
  172. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  173. ./scripts/structs.py -u lfs.structs.csv
  174. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  175. -bfunction \
  176. -fcode=code_size \
  177. -fdata=data_size \
  178. -fstack=stack_limit --max=stack_limit
  179. mkdir -p sizes
  180. cp lfs.code.csv sizes/${{matrix.arch}}-migrate.code.csv
  181. cp lfs.data.csv sizes/${{matrix.arch}}-migrate.data.csv
  182. cp lfs.stack.csv sizes/${{matrix.arch}}-migrate.stack.csv
  183. cp lfs.structs.csv sizes/${{matrix.arch}}-migrate.structs.csv
  184. - name: sizes-error-asserts
  185. run: |
  186. make clean
  187. CFLAGS="$CFLAGS \
  188. -DLFS_NO_DEBUG \
  189. -DLFS_NO_WARN \
  190. -DLFS_NO_ERROR \
  191. -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" \
  192. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  193. ./scripts/structs.py -u lfs.structs.csv
  194. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  195. -bfunction \
  196. -fcode=code_size \
  197. -fdata=data_size \
  198. -fstack=stack_limit --max=stack_limit
  199. mkdir -p sizes
  200. cp lfs.code.csv sizes/${{matrix.arch}}-error-asserts.code.csv
  201. cp lfs.data.csv sizes/${{matrix.arch}}-error-asserts.data.csv
  202. cp lfs.stack.csv sizes/${{matrix.arch}}-error-asserts.stack.csv
  203. cp lfs.structs.csv sizes/${{matrix.arch}}-error-asserts.structs.csv
  204. # create size statuses
  205. - name: upload-sizes
  206. uses: actions/upload-artifact@v2
  207. with:
  208. name: sizes
  209. path: sizes
  210. - name: status-sizes
  211. run: |
  212. mkdir -p status
  213. for f in $(shopt -s nullglob ; echo sizes/*.csv)
  214. do
  215. # skip .data.csv as it should always be zero
  216. [[ $f == *.data.csv ]] && continue
  217. export STEP="sizes$(echo $f \
  218. | sed -n 's/[^-.]*-\([^.]*\)\..*csv/-\1/p')"
  219. export CONTEXT="sizes (${{matrix.arch}}$(echo $f \
  220. | sed -n 's/[^-.]*-\([^.]*\)\..*csv/, \1/p')) / $(echo $f \
  221. | sed -n 's/[^.]*\.\(.*\)\.csv/\1/p')"
  222. export PREV="$(curl -sS \
  223. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  224. `?per_page=100" \
  225. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  226. | select(.context == env.CONTEXT).description
  227. | capture("(?<prev>[0-9∞]+)").prev' \
  228. || echo 0)"
  229. export DESCRIPTION="$(./scripts/summary.py $f --max=stack_limit -Y \
  230. | awk '
  231. NR==2 {$1=0; printf "%s B",$NF}
  232. NR==2 && ENVIRON["PREV"]+0 != 0 {
  233. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  234. }')"
  235. jq -n '{
  236. state: "success",
  237. context: env.CONTEXT,
  238. description: env.DESCRIPTION,
  239. target_job: "${{github.job}} (${{matrix.arch}})",
  240. target_step: env.STEP,
  241. }' | tee status/$(basename $f .csv).json
  242. done
  243. - name: upload-status-sizes
  244. uses: actions/upload-artifact@v2
  245. with:
  246. name: status
  247. path: status
  248. retention-days: 1
  249. # create cov statuses
  250. - name: upload-cov
  251. if: ${{matrix.arch == 'x86_64'}}
  252. uses: actions/upload-artifact@v2
  253. with:
  254. name: cov
  255. path: cov
  256. - name: status-cov
  257. if: ${{matrix.arch == 'x86_64'}}
  258. run: |
  259. mkdir -p status
  260. f=cov/cov.csv
  261. for s in lines branches
  262. do
  263. export STEP="cov"
  264. export CONTEXT="cov / $s"
  265. export PREV="$(curl -sS \
  266. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  267. `?per_page=100" \
  268. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  269. | select(.context == env.CONTEXT).description
  270. | capture("(?<prev_a>[0-9]+)/(?<prev_b>[0-9]+)")
  271. | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
  272. || echo 0)"
  273. export DESCRIPTION="$(./scripts/cov.py -u $f -f$s -Y \
  274. | awk -F '[ /%]+' -v s=$s '
  275. NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
  276. NR==2 && ENVIRON["PREV"]+0 != 0 {
  277. printf " (%+.1f%%)",$4-ENVIRON["PREV"]
  278. }')"
  279. jq -n '{
  280. state: "success",
  281. context: env.CONTEXT,
  282. description: env.DESCRIPTION,
  283. target_job: "${{github.job}} (${{matrix.arch}})",
  284. target_step: env.STEP,
  285. }' | tee status/$(basename $f .csv)-$s.json
  286. done
  287. - name: upload-status-sizes
  288. if: ${{matrix.arch == 'x86_64'}}
  289. uses: actions/upload-artifact@v2
  290. with:
  291. name: status
  292. path: status
  293. retention-days: 1
  294. # run as many exhaustive tests as fits in GitHub's time limits
  295. #
  296. # this grows exponentially, so it doesn't turn out to be that many
  297. test-pls:
  298. runs-on: ubuntu-22.04
  299. strategy:
  300. fail-fast: false
  301. matrix:
  302. pls: [1, 2]
  303. steps:
  304. - uses: actions/checkout@v2
  305. - name: install
  306. run: |
  307. # need a few things
  308. sudo apt-get update -qq
  309. sudo apt-get install -qq gcc python3 python3-pip
  310. pip3 install toml
  311. gcc --version
  312. python3 --version
  313. - name: test-pls
  314. if: ${{matrix.pls <= 1}}
  315. run: |
  316. TESTFLAGS="$TESTFLAGS -P${{matrix.pls}}" make test
  317. # >=2pls takes multiple days to run fully, so we can only
  318. # run a subset of tests, these are the most important
  319. - name: test-limited-pls
  320. if: ${{matrix.pls > 1}}
  321. run: |
  322. TESTFLAGS="$TESTFLAGS -P${{matrix.pls}} test_dirs test_relocations" \
  323. make test
  324. # run with LFS_NO_INTRINSICS to make sure that works
  325. test-no-intrinsics:
  326. runs-on: ubuntu-22.04
  327. steps:
  328. - uses: actions/checkout@v2
  329. - name: install
  330. run: |
  331. # need a few things
  332. sudo apt-get update -qq
  333. sudo apt-get install -qq gcc python3 python3-pip
  334. pip3 install toml
  335. gcc --version
  336. python3 --version
  337. - name: test-no-intrinsics
  338. run: |
  339. CFLAGS="$CFLAGS -DLFS_NO_INTRINSICS" make test
  340. # run under Valgrind to check for memory errors
  341. test-valgrind:
  342. runs-on: ubuntu-22.04
  343. steps:
  344. - uses: actions/checkout@v2
  345. - name: install
  346. run: |
  347. # need a few things
  348. sudo apt-get update -qq
  349. sudo apt-get install -qq gcc python3 python3-pip valgrind
  350. pip3 install toml
  351. gcc --version
  352. python3 --version
  353. valgrind --version
  354. # Valgrind takes a while with diminishing value, so only test
  355. # on one geometry
  356. - name: test-valgrind
  357. run: |
  358. TESTFLAGS="$TESTFLAGS --valgrind -Gdefault -Pnone" make test
  359. # test that compilation is warning free under clang
  360. # run with Clang, mostly to check for Clang-specific warnings
  361. test-clang:
  362. runs-on: ubuntu-22.04
  363. steps:
  364. - uses: actions/checkout@v2
  365. - name: install
  366. run: |
  367. # need a few things
  368. sudo apt-get install -qq clang python3 python3-pip
  369. pip3 install toml
  370. clang --version
  371. python3 --version
  372. - name: test-clang
  373. run: |
  374. # override CFLAGS since Clang does not support -fcallgraph-info
  375. # and -ftrack-macro-expansions
  376. make \
  377. CC=clang \
  378. CFLAGS="$CFLAGS -MMD -g3 -I. -std=c99 -Wall -Wextra -pedantic" \
  379. test
  380. # run benchmarks
  381. #
  382. # note there's no real benefit to running these on multiple archs
  383. bench:
  384. runs-on: ubuntu-22.04
  385. steps:
  386. - uses: actions/checkout@v2
  387. - name: install
  388. run: |
  389. # need a few things
  390. sudo apt-get update -qq
  391. sudo apt-get install -qq gcc python3 python3-pip valgrind
  392. pip3 install toml
  393. gcc --version
  394. python3 --version
  395. valgrind --version
  396. - name: bench
  397. run: |
  398. make bench
  399. # find bench results
  400. make lfs.bench.csv
  401. ./scripts/summary.py lfs.bench.csv \
  402. -bsuite \
  403. -freaded=bench_readed \
  404. -fproged=bench_proged \
  405. -ferased=bench_erased
  406. mkdir -p bench
  407. cp lfs.bench.csv bench/bench.csv
  408. # find perfbd results
  409. make lfs.perfbd.csv
  410. ./scripts/perfbd.py -u lfs.perfbd.csv
  411. mkdir -p bench
  412. cp lfs.perfbd.csv bench/perfbd.csv
  413. # create bench statuses
  414. - name: upload-bench
  415. uses: actions/upload-artifact@v2
  416. with:
  417. name: bench
  418. path: bench
  419. - name: status-bench
  420. run: |
  421. mkdir -p status
  422. f=bench/bench.csv
  423. for s in readed proged erased
  424. do
  425. export STEP="bench"
  426. export CONTEXT="bench / $s"
  427. export PREV="$(curl -sS \
  428. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  429. `?per_page=100" \
  430. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  431. | select(.context == env.CONTEXT).description
  432. | capture("(?<prev>[0-9]+)").prev' \
  433. || echo 0)"
  434. export DESCRIPTION="$(./scripts/summary.py $f -f$s=bench_$s -Y \
  435. | awk '
  436. NR==2 {$1=0; printf "%s B",$NF}
  437. NR==2 && ENVIRON["PREV"]+0 != 0 {
  438. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  439. }')"
  440. jq -n '{
  441. state: "success",
  442. context: env.CONTEXT,
  443. description: env.DESCRIPTION,
  444. target_job: "${{github.job}}",
  445. target_step: env.STEP,
  446. }' | tee status/$(basename $f .csv)-$s.json
  447. done
  448. - name: upload-status-bench
  449. uses: actions/upload-artifact@v2
  450. with:
  451. name: status
  452. path: status
  453. retention-days: 1
  454. # run compatibility tests using the current master as the previous version
  455. test-compat:
  456. runs-on: ubuntu-22.04
  457. steps:
  458. - uses: actions/checkout@v2
  459. if: ${{github.event_name == 'pull_request'}}
  460. # checkout the current pr target into lfsp
  461. - uses: actions/checkout@v2
  462. if: ${{github.event_name == 'pull_request'}}
  463. with:
  464. ref: ${{github.event.pull_request.base.ref}}
  465. path: lfsp
  466. - name: install
  467. if: ${{github.event_name == 'pull_request'}}
  468. run: |
  469. # need a few things
  470. sudo apt-get update -qq
  471. sudo apt-get install -qq gcc python3 python3-pip
  472. pip3 install toml
  473. gcc --version
  474. python3 --version
  475. # adjust prefix of lfsp
  476. - name: changeprefix
  477. if: ${{github.event_name == 'pull_request'}}
  478. run: |
  479. ./scripts/changeprefix.py lfs lfsp lfsp/*.h lfsp/*.c
  480. - name: test-compat
  481. if: ${{github.event_name == 'pull_request'}}
  482. run: |
  483. TESTS=tests/test_compat.toml \
  484. SRC="$(find . lfsp -name '*.c' -maxdepth 1 \
  485. -and -not -name '*.t.*' \
  486. -and -not -name '*.b.*')" \
  487. CFLAGS="-DLFSP=lfsp/lfsp.h" \
  488. make test
  489. # self-host with littlefs-fuse for a fuzz-like test
  490. fuse:
  491. runs-on: ubuntu-22.04
  492. if: ${{!endsWith(github.ref, '-prefix')}}
  493. steps:
  494. - uses: actions/checkout@v2
  495. - name: install
  496. run: |
  497. # need a few things
  498. sudo apt-get update -qq
  499. sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
  500. sudo pip3 install toml
  501. gcc --version
  502. python3 --version
  503. fusermount -V
  504. - uses: actions/checkout@v2
  505. with:
  506. repository: littlefs-project/littlefs-fuse
  507. ref: v2
  508. path: littlefs-fuse
  509. - name: setup
  510. run: |
  511. # copy our new version into littlefs-fuse
  512. rm -rf littlefs-fuse/littlefs/*
  513. cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
  514. # setup disk for littlefs-fuse
  515. mkdir mount
  516. LOOP=$(sudo losetup -f)
  517. sudo chmod a+rw $LOOP
  518. dd if=/dev/zero bs=512 count=128K of=disk
  519. losetup $LOOP disk
  520. echo "LOOP=$LOOP" >> $GITHUB_ENV
  521. - name: test
  522. run: |
  523. # self-host test
  524. make -C littlefs-fuse
  525. littlefs-fuse/lfs --format $LOOP
  526. littlefs-fuse/lfs $LOOP mount
  527. ls mount
  528. mkdir mount/littlefs
  529. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  530. cd mount/littlefs
  531. stat .
  532. ls -flh
  533. make -B test-runner
  534. make -B test
  535. # test migration using littlefs-fuse
  536. migrate:
  537. runs-on: ubuntu-22.04
  538. if: ${{!endsWith(github.ref, '-prefix')}}
  539. steps:
  540. - uses: actions/checkout@v2
  541. - name: install
  542. run: |
  543. # need a few things
  544. sudo apt-get update -qq
  545. sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
  546. sudo pip3 install toml
  547. gcc --version
  548. python3 --version
  549. fusermount -V
  550. - uses: actions/checkout@v2
  551. with:
  552. repository: littlefs-project/littlefs-fuse
  553. ref: v2
  554. path: v2
  555. - uses: actions/checkout@v2
  556. with:
  557. repository: littlefs-project/littlefs-fuse
  558. ref: v1
  559. path: v1
  560. - name: setup
  561. run: |
  562. # copy our new version into littlefs-fuse
  563. rm -rf v2/littlefs/*
  564. cp -r $(git ls-tree --name-only HEAD) v2/littlefs
  565. # setup disk for littlefs-fuse
  566. mkdir mount
  567. LOOP=$(sudo losetup -f)
  568. sudo chmod a+rw $LOOP
  569. dd if=/dev/zero bs=512 count=128K of=disk
  570. losetup $LOOP disk
  571. echo "LOOP=$LOOP" >> $GITHUB_ENV
  572. - name: test
  573. run: |
  574. # compile v1 and v2
  575. make -C v1
  576. make -C v2
  577. # run self-host test with v1
  578. v1/lfs --format $LOOP
  579. v1/lfs $LOOP mount
  580. ls mount
  581. mkdir mount/littlefs
  582. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  583. cd mount/littlefs
  584. stat .
  585. ls -flh
  586. make -B test-runner
  587. make -B test
  588. # attempt to migrate
  589. cd ../..
  590. fusermount -u mount
  591. v2/lfs --migrate $LOOP
  592. v2/lfs $LOOP mount
  593. # run self-host test with v2 right where we left off
  594. ls mount
  595. cd mount/littlefs
  596. stat .
  597. ls -flh
  598. make -B test-runner
  599. make -B test
  600. # status related tasks that run after tests
  601. status:
  602. runs-on: ubuntu-22.04
  603. needs: [test, bench]
  604. steps:
  605. - uses: actions/checkout@v2
  606. if: ${{github.event_name == 'pull_request'}}
  607. - name: install
  608. if: ${{github.event_name == 'pull_request'}}
  609. run: |
  610. # need a few things
  611. sudo apt-get install -qq gcc python3 python3-pip
  612. pip3 install toml
  613. gcc --version
  614. python3 --version
  615. - uses: actions/download-artifact@v2
  616. if: ${{github.event_name == 'pull_request'}}
  617. continue-on-error: true
  618. with:
  619. name: sizes
  620. path: sizes
  621. - uses: actions/download-artifact@v2
  622. if: ${{github.event_name == 'pull_request'}}
  623. continue-on-error: true
  624. with:
  625. name: cov
  626. path: cov
  627. - uses: actions/download-artifact@v2
  628. if: ${{github.event_name == 'pull_request'}}
  629. continue-on-error: true
  630. with:
  631. name: bench
  632. path: bench
  633. # try to find results from tests
  634. - name: create-table
  635. if: ${{github.event_name == 'pull_request'}}
  636. run: |
  637. # compare against pull-request target
  638. curl -sS \
  639. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/`
  640. `${{github.event.pull_request.base.ref}}`
  641. `?per_page=100" \
  642. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
  643. >> prev-status.json \
  644. || true
  645. # build table for GitHub
  646. declare -A table
  647. # sizes table
  648. i=0
  649. j=0
  650. for c in "" readonly threadsafe migrate error-asserts
  651. do
  652. # per-config results
  653. c_or_default=${c:-default}
  654. c_camel=${c_or_default^}
  655. table[$i,$j]=$c_camel
  656. ((j+=1))
  657. for s in code stack structs
  658. do
  659. f=sizes/thumb${c:+-$c}.$s.csv
  660. [ -e $f ] && table[$i,$j]=$( \
  661. export PREV="$(jq -re '
  662. select(.context == "'"sizes (thumb${c:+, $c}) / $s"'").description
  663. | capture("(?<prev>[0-9∞]+)").prev' \
  664. prev-status.json || echo 0)"
  665. ./scripts/summary.py $f --max=stack_limit -Y \
  666. | awk '
  667. NR==2 {$1=0; printf "%s B",$NF}
  668. NR==2 && ENVIRON["PREV"]+0 != 0 {
  669. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  670. }' \
  671. | sed -e 's/ /\&nbsp;/g')
  672. ((j+=1))
  673. done
  674. ((j=0, i+=1))
  675. done
  676. # coverage table
  677. i=0
  678. j=4
  679. for s in lines branches
  680. do
  681. table[$i,$j]=${s^}
  682. ((j+=1))
  683. f=cov/cov.csv
  684. [ -e $f ] && table[$i,$j]=$( \
  685. export PREV="$(jq -re '
  686. select(.context == "'"cov / $s"'").description
  687. | capture("(?<prev_a>[0-9]+)/(?<prev_b>[0-9]+)")
  688. | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
  689. prev-status.json || echo 0)"
  690. ./scripts/cov.py -u $f -f$s -Y \
  691. | awk -F '[ /%]+' -v s=$s '
  692. NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
  693. NR==2 && ENVIRON["PREV"]+0 != 0 {
  694. printf " (%+.1f%%)",$4-ENVIRON["PREV"]
  695. }' \
  696. | sed -e 's/ /\&nbsp;/g')
  697. ((j=4, i+=1))
  698. done
  699. # benchmark table
  700. i=3
  701. j=4
  702. for s in readed proged erased
  703. do
  704. table[$i,$j]=${s^}
  705. ((j+=1))
  706. f=bench/bench.csv
  707. [ -e $f ] && table[$i,$j]=$( \
  708. export PREV="$(jq -re '
  709. select(.context == "'"bench / $s"'").description
  710. | capture("(?<prev>[0-9]+)").prev' \
  711. prev-status.json || echo 0)"
  712. ./scripts/summary.py $f -f$s=bench_$s -Y \
  713. | awk '
  714. NR==2 {$1=0; printf "%s B",$NF}
  715. NR==2 && ENVIRON["PREV"]+0 != 0 {
  716. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  717. }' \
  718. | sed -e 's/ /\&nbsp;/g')
  719. ((j=4, i+=1))
  720. done
  721. # build the actual table
  722. echo "| | Code | Stack | Structs | | Coverage |" >> table.txt
  723. echo "|:--|-----:|------:|--------:|:--|---------:|" >> table.txt
  724. for ((i=0; i<6; i++))
  725. do
  726. echo -n "|" >> table.txt
  727. for ((j=0; j<6; j++))
  728. do
  729. echo -n " " >> table.txt
  730. [[ i -eq 2 && j -eq 5 ]] && echo -n "**Benchmarks**" >> table.txt
  731. echo -n "${table[$i,$j]:-}" >> table.txt
  732. echo -n " |" >> table.txt
  733. done
  734. echo >> table.txt
  735. done
  736. cat table.txt
  737. # create a bot comment for successful runs on pull requests
  738. - name: create-comment
  739. if: ${{github.event_name == 'pull_request'}}
  740. run: |
  741. touch comment.txt
  742. echo "<details>" >> comment.txt
  743. echo "<summary>" >> comment.txt
  744. echo "Tests passed ✓, `
  745. `Code: $(awk 'NR==3 {print $4}' table.txt || true), `
  746. `Stack: $(awk 'NR==3 {print $6}' table.txt || true), `
  747. `Structs: $(awk 'NR==3 {print $8}' table.txt || true)" \
  748. >> comment.txt
  749. echo "</summary>" >> comment.txt
  750. echo >> comment.txt
  751. [ -e table.txt ] && cat table.txt >> comment.txt
  752. echo >> comment.txt
  753. echo "</details>" >> comment.txt
  754. cat comment.txt
  755. mkdir -p comment
  756. jq -n --rawfile comment comment.txt '{
  757. number: ${{github.event.number}},
  758. body: $comment,
  759. }' | tee comment/comment.json
  760. - name: upload-comment
  761. uses: actions/upload-artifact@v2
  762. with:
  763. name: comment
  764. path: comment
  765. retention-days: 1