test.yml 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791
  1. name: test
  2. on: [push, pull_request]
  3. defaults:
  4. run:
  5. shell: bash -euv -o pipefail {0}
  6. env:
  7. CFLAGS: -Werror
  8. MAKEFLAGS: -j
  9. TESTFLAGS: -k
  10. BENCHFLAGS:
  11. jobs:
  12. # run tests
  13. test:
  14. runs-on: ubuntu-22.04
  15. strategy:
  16. fail-fast: false
  17. matrix:
  18. arch: [x86_64, thumb, mips, powerpc]
  19. steps:
  20. - uses: actions/checkout@v2
  21. - name: install
  22. run: |
  23. # need a few things
  24. sudo apt-get update -qq
  25. sudo apt-get install -qq gcc python3 python3-pip
  26. pip3 install toml
  27. gcc --version
  28. python3 --version
  29. # cross-compile with ARM Thumb (32-bit, little-endian)
  30. - name: install-thumb
  31. if: ${{matrix.arch == 'thumb'}}
  32. run: |
  33. sudo apt-get install -qq \
  34. gcc-arm-linux-gnueabi \
  35. libc6-dev-armel-cross \
  36. qemu-user
  37. echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV
  38. echo "EXEC=qemu-arm" >> $GITHUB_ENV
  39. arm-linux-gnueabi-gcc --version
  40. qemu-arm -version
  41. # cross-compile with MIPS (32-bit, big-endian)
  42. - name: install-mips
  43. if: ${{matrix.arch == 'mips'}}
  44. run: |
  45. sudo apt-get install -qq \
  46. gcc-mips-linux-gnu \
  47. libc6-dev-mips-cross \
  48. qemu-user
  49. echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV
  50. echo "EXEC=qemu-mips" >> $GITHUB_ENV
  51. mips-linux-gnu-gcc --version
  52. qemu-mips -version
  53. # cross-compile with PowerPC (32-bit, big-endian)
  54. - name: install-powerpc
  55. if: ${{matrix.arch == 'powerpc'}}
  56. run: |
  57. sudo apt-get install -qq \
  58. gcc-powerpc-linux-gnu \
  59. libc6-dev-powerpc-cross \
  60. qemu-user
  61. echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV
  62. echo "EXEC=qemu-ppc" >> $GITHUB_ENV
  63. powerpc-linux-gnu-gcc --version
  64. qemu-ppc -version
  65. # does littlefs compile?
  66. - name: test-build
  67. run: |
  68. make clean
  69. make build
  70. # make sure example can at least compile
  71. - name: test-example
  72. run: |
  73. make clean
  74. sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
  75. CFLAGS="$CFLAGS \
  76. -Duser_provided_block_device_read=NULL \
  77. -Duser_provided_block_device_prog=NULL \
  78. -Duser_provided_block_device_erase=NULL \
  79. -Duser_provided_block_device_sync=NULL \
  80. -include stdio.h" \
  81. make all
  82. rm test.c
  83. # run the tests!
  84. - name: test
  85. run: |
  86. make clean
  87. # TODO include this by default?
  88. TESTFLAGS="$TESTFLAGS -Pnone,linear" make test
  89. # collect coverage info
  90. #
  91. # Note the goal is to maximize coverage in the small, easy-to-run
  92. # tests, so we intentionally exclude more aggressive powerloss testing
  93. # from coverage results
  94. - name: cov
  95. if: ${{matrix.arch == 'x86_64'}}
  96. run: |
  97. make lfs.cov.csv
  98. ./scripts/cov.py -u lfs.cov.csv
  99. mkdir -p cov
  100. cp lfs.cov.csv cov/cov.csv
  101. # find compile-time measurements
  102. - name: sizes
  103. run: |
  104. make clean
  105. CFLAGS="$CFLAGS \
  106. -DLFS_NO_ASSERT \
  107. -DLFS_NO_DEBUG \
  108. -DLFS_NO_WARN \
  109. -DLFS_NO_ERROR" \
  110. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv
  111. ./scripts/summary.py lfs.struct.csv \
  112. -bstruct \
  113. -fsize=struct_size
  114. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  115. -bfunction \
  116. -fcode=code_size \
  117. -fdata=data_size \
  118. -fstack=stack_limit \
  119. --max=stack_limit
  120. mkdir -p sizes
  121. cp lfs.code.csv sizes/${{matrix.arch}}.code.csv
  122. cp lfs.data.csv sizes/${{matrix.arch}}.data.csv
  123. cp lfs.stack.csv sizes/${{matrix.arch}}.stack.csv
  124. cp lfs.struct.csv sizes/${{matrix.arch}}.struct.csv
  125. - name: sizes-readonly
  126. run: |
  127. make clean
  128. CFLAGS="$CFLAGS \
  129. -DLFS_NO_ASSERT \
  130. -DLFS_NO_DEBUG \
  131. -DLFS_NO_WARN \
  132. -DLFS_NO_ERROR \
  133. -DLFS_READONLY" \
  134. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv
  135. ./scripts/summary.py lfs.struct.csv \
  136. -bstruct \
  137. -fsize=struct_size
  138. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  139. -bfunction \
  140. -fcode=code_size \
  141. -fdata=data_size \
  142. -fstack=stack_limit \
  143. --max=stack_limit
  144. mkdir -p sizes
  145. cp lfs.code.csv sizes/${{matrix.arch}}-readonly.code.csv
  146. cp lfs.data.csv sizes/${{matrix.arch}}-readonly.data.csv
  147. cp lfs.stack.csv sizes/${{matrix.arch}}-readonly.stack.csv
  148. cp lfs.struct.csv sizes/${{matrix.arch}}-readonly.struct.csv
  149. - name: sizes-threadsafe
  150. run: |
  151. make clean
  152. CFLAGS="$CFLAGS \
  153. -DLFS_NO_ASSERT \
  154. -DLFS_NO_DEBUG \
  155. -DLFS_NO_WARN \
  156. -DLFS_NO_ERROR \
  157. -DLFS_THREADSAFE" \
  158. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv
  159. ./scripts/summary.py lfs.struct.csv \
  160. -bstruct \
  161. -fsize=struct_size
  162. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  163. -bfunction \
  164. -fcode=code_size \
  165. -fdata=data_size \
  166. -fstack=stack_limit \
  167. --max=stack_limit
  168. mkdir -p sizes
  169. cp lfs.code.csv sizes/${{matrix.arch}}-threadsafe.code.csv
  170. cp lfs.data.csv sizes/${{matrix.arch}}-threadsafe.data.csv
  171. cp lfs.stack.csv sizes/${{matrix.arch}}-threadsafe.stack.csv
  172. cp lfs.struct.csv sizes/${{matrix.arch}}-threadsafe.struct.csv
  173. - name: sizes-migrate
  174. run: |
  175. make clean
  176. CFLAGS="$CFLAGS \
  177. -DLFS_NO_ASSERT \
  178. -DLFS_NO_DEBUG \
  179. -DLFS_NO_WARN \
  180. -DLFS_NO_ERROR \
  181. -DLFS_MIGRATE" \
  182. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv
  183. ./scripts/summary.py lfs.struct.csv \
  184. -bstruct \
  185. -fsize=struct_size
  186. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  187. -bfunction \
  188. -fcode=code_size \
  189. -fdata=data_size \
  190. -fstack=stack_limit \
  191. --max=stack_limit
  192. mkdir -p sizes
  193. cp lfs.code.csv sizes/${{matrix.arch}}-migrate.code.csv
  194. cp lfs.data.csv sizes/${{matrix.arch}}-migrate.data.csv
  195. cp lfs.stack.csv sizes/${{matrix.arch}}-migrate.stack.csv
  196. cp lfs.struct.csv sizes/${{matrix.arch}}-migrate.struct.csv
  197. - name: sizes-error-asserts
  198. run: |
  199. make clean
  200. CFLAGS="$CFLAGS \
  201. -DLFS_NO_DEBUG \
  202. -DLFS_NO_WARN \
  203. -DLFS_NO_ERROR \
  204. -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" \
  205. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv
  206. ./scripts/summary.py lfs.struct.csv \
  207. -bstruct \
  208. -fsize=struct_size
  209. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  210. -bfunction \
  211. -fcode=code_size \
  212. -fdata=data_size \
  213. -fstack=stack_limit \
  214. --max=stack_limit
  215. mkdir -p sizes
  216. cp lfs.code.csv sizes/${{matrix.arch}}-error-asserts.code.csv
  217. cp lfs.data.csv sizes/${{matrix.arch}}-error-asserts.data.csv
  218. cp lfs.stack.csv sizes/${{matrix.arch}}-error-asserts.stack.csv
  219. cp lfs.struct.csv sizes/${{matrix.arch}}-error-asserts.struct.csv
  220. # create size statuses
  221. - name: upload-sizes
  222. uses: actions/upload-artifact@v2
  223. with:
  224. name: sizes
  225. path: sizes
  226. - name: status-sizes
  227. run: |
  228. mkdir -p status
  229. for f in $(shopt -s nullglob ; echo sizes/*.csv)
  230. do
  231. # skip .data.csv as it should always be zero
  232. [[ $f == *.data.csv ]] && continue
  233. export STEP="sizes$(echo $f \
  234. | sed -n 's/[^-.]*-\([^.]*\)\..*csv/-\1/p')"
  235. export CONTEXT="sizes (${{matrix.arch}}$(echo $f \
  236. | sed -n 's/[^-.]*-\([^.]*\)\..*csv/, \1/p')) / $(echo $f \
  237. | sed -n 's/[^.]*\.\(.*\)\.csv/\1/p')"
  238. export PREV="$(curl -sS \
  239. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  240. `?per_page=100" \
  241. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  242. | select(.context == env.CONTEXT).description
  243. | capture("(?<prev>[0-9∞]+)").prev' \
  244. || echo 0)"
  245. export DESCRIPTION="$(./scripts/summary.py $f --max=stack_limit -Y \
  246. | awk '
  247. NR==2 {$1=0; printf "%s B",$NF}
  248. NR==2 && ENVIRON["PREV"]+0 != 0 {
  249. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  250. }')"
  251. jq -n '{
  252. state: "success",
  253. context: env.CONTEXT,
  254. description: env.DESCRIPTION,
  255. target_job: "${{github.job}} (${{matrix.arch}})",
  256. target_step: env.STEP,
  257. }' | tee status/$(basename $f .csv).json
  258. done
  259. - name: upload-status-sizes
  260. uses: actions/upload-artifact@v2
  261. with:
  262. name: status
  263. path: status
  264. retention-days: 1
  265. # create cov statuses
  266. - name: upload-cov
  267. if: ${{matrix.arch == 'x86_64'}}
  268. uses: actions/upload-artifact@v2
  269. with:
  270. name: cov
  271. path: cov
  272. - name: status-cov
  273. if: ${{matrix.arch == 'x86_64'}}
  274. run: |
  275. mkdir -p status
  276. f=cov/cov.csv
  277. for s in lines branches
  278. do
  279. export STEP="cov"
  280. export CONTEXT="cov / $s"
  281. export PREV="$(curl -sS \
  282. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  283. `?per_page=100" \
  284. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  285. | select(.context == env.CONTEXT).description
  286. | capture("(?<prev_a>[0-9]+)/(?<prev_b>[0-9]+)")
  287. | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
  288. || echo 0)"
  289. export DESCRIPTION="$(./scripts/cov.py -u $f -f$s -Y \
  290. | awk -F '[ /%]+' -v s=$s '
  291. NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
  292. NR==2 && ENVIRON["PREV"]+0 != 0 {
  293. printf " (%+.1f%%)",$4-ENVIRON["PREV"]
  294. }')"
  295. jq -n '{
  296. state: "success",
  297. context: env.CONTEXT,
  298. description: env.DESCRIPTION,
  299. target_job: "${{github.job}} (${{matrix.arch}})",
  300. target_step: env.STEP,
  301. }' | tee status/$(basename $f .csv)-$s.json
  302. done
  303. - name: upload-status-sizes
  304. if: ${{matrix.arch == 'x86_64'}}
  305. uses: actions/upload-artifact@v2
  306. with:
  307. name: status
  308. path: status
  309. retention-days: 1
  310. # run as many exhaustive tests as fits in GitHub's time limits
  311. #
  312. # this grows exponentially, so it doesn't turn out to be that many
  313. test-pls:
  314. runs-on: ubuntu-22.04
  315. strategy:
  316. fail-fast: false
  317. matrix:
  318. pls: [1, 2]
  319. steps:
  320. - uses: actions/checkout@v2
  321. - name: install
  322. run: |
  323. # need a few things
  324. sudo apt-get update -qq
  325. sudo apt-get install -qq gcc python3 python3-pip
  326. pip3 install toml
  327. gcc --version
  328. python3 --version
  329. - name: test-pls
  330. if: ${{matrix.pls <= 1}}
  331. run: |
  332. TESTFLAGS="$TESTFLAGS -P${{matrix.pls}}" make test
  333. # >=2pls takes multiple days to run fully, so we can only
  334. # run a subset of tests, these are the most important
  335. - name: test-limited-pls
  336. if: ${{matrix.pls > 1}}
  337. run: |
  338. TESTFLAGS="$TESTFLAGS -P${{matrix.pls}} test_dirs test_relocations" \
  339. make test
  340. # run with LFS_NO_INTRINSICS to make sure that works
  341. test-no-intrinsics:
  342. runs-on: ubuntu-22.04
  343. steps:
  344. - uses: actions/checkout@v2
  345. - name: install
  346. run: |
  347. # need a few things
  348. sudo apt-get update -qq
  349. sudo apt-get install -qq gcc python3 python3-pip
  350. pip3 install toml
  351. gcc --version
  352. python3 --version
  353. - name: test-no-intrinsics
  354. run: |
  355. CFLAGS="$CFLAGS -DLFS_NO_INTRINSICS" make test
  356. # run under Valgrind to check for memory errors
  357. test-valgrind:
  358. runs-on: ubuntu-22.04
  359. steps:
  360. - uses: actions/checkout@v2
  361. - name: install
  362. run: |
  363. # need a few things
  364. sudo apt-get update -qq
  365. sudo apt-get install -qq gcc python3 python3-pip valgrind
  366. pip3 install toml
  367. gcc --version
  368. python3 --version
  369. valgrind --version
  370. # Valgrind takes a while with diminishing value, so only test
  371. # on one geometry
  372. - name: test-valgrind
  373. run: |
  374. TESTFLAGS="$TESTFLAGS -Gdefault --valgrind" make test
  375. # test that compilation is warning free under clang
  376. # run with Clang, mostly to check for Clang-specific warnings
  377. test-clang:
  378. runs-on: ubuntu-22.04
  379. steps:
  380. - uses: actions/checkout@v2
  381. - name: install
  382. run: |
  383. # need a few things
  384. sudo apt-get install -qq clang python3 python3-pip
  385. pip3 install toml
  386. clang --version
  387. python3 --version
  388. - name: test-clang
  389. run: |
  390. # override CFLAGS since Clang does not support -fcallgraph-info
  391. # and -ftrack-macro-expansions
  392. make \
  393. CC=clang \
  394. CFLAGS="$CFLAGS -MMD -g3 -I. -std=c99 -Wall -Wextra -pedantic" \
  395. test
  396. # run benchmarks
  397. #
  398. # note there's no real benefit to running these on multiple archs
  399. bench:
  400. runs-on: ubuntu-22.04
  401. steps:
  402. - uses: actions/checkout@v2
  403. - name: install
  404. run: |
  405. # need a few things
  406. sudo apt-get update -qq
  407. sudo apt-get install -qq gcc python3 python3-pip valgrind
  408. pip3 install toml
  409. gcc --version
  410. python3 --version
  411. valgrind --version
  412. - name: bench
  413. run: |
  414. BENCHFLAGS="$BENCHFLAGS -o lfs.bench.csv" make bench
  415. # find bench results
  416. ./scripts/summary.py lfs.bench.csv \
  417. -bsuite \
  418. -freaded=bench_readed \
  419. -fproged=bench_proged \
  420. -ferased=bench_erased
  421. mkdir -p bench
  422. cp lfs.bench.csv bench/bench.csv
  423. # find perfbd results
  424. make lfs.perfbd.csv
  425. ./scripts/perfbd.py -u lfs.perfbd.csv
  426. mkdir -p bench
  427. cp lfs.perfbd.csv bench/perfbd.csv
  428. # create bench statuses
  429. - name: upload-bench
  430. uses: actions/upload-artifact@v2
  431. with:
  432. name: bench
  433. path: bench
  434. - name: status-bench
  435. run: |
  436. mkdir -p status
  437. f=bench/bench.csv
  438. for s in readed proged erased
  439. do
  440. export STEP="bench"
  441. export CONTEXT="bench / $s"
  442. export PREV="$(curl -sS \
  443. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  444. `?per_page=100" \
  445. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  446. | select(.context == env.CONTEXT).description
  447. | capture("(?<prev>[0-9]+)").prev' \
  448. || echo 0)"
  449. export DESCRIPTION="$(./scripts/summary.py $f -f$s=bench_$s -Y \
  450. | awk '
  451. NR==2 {$1=0; printf "%s B",$NF}
  452. NR==2 && ENVIRON["PREV"]+0 != 0 {
  453. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  454. }')"
  455. jq -n '{
  456. state: "success",
  457. context: env.CONTEXT,
  458. description: env.DESCRIPTION,
  459. target_job: "${{github.job}}",
  460. target_step: env.STEP,
  461. }' | tee status/$(basename $f .csv)-$s.json
  462. done
  463. - name: upload-status-bench
  464. uses: actions/upload-artifact@v2
  465. with:
  466. name: status
  467. path: status
  468. retention-days: 1
  469. # self-host with littlefs-fuse for a fuzz-like test
  470. fuse:
  471. runs-on: ubuntu-22.04
  472. if: ${{!endsWith(github.ref, '-prefix')}}
  473. steps:
  474. - uses: actions/checkout@v2
  475. - name: install
  476. run: |
  477. # need a few things
  478. sudo apt-get update -qq
  479. sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
  480. sudo pip3 install toml
  481. gcc --version
  482. python3 --version
  483. fusermount -V
  484. - uses: actions/checkout@v2
  485. with:
  486. repository: littlefs-project/littlefs-fuse
  487. ref: v2
  488. path: littlefs-fuse
  489. - name: setup
  490. run: |
  491. # copy our new version into littlefs-fuse
  492. rm -rf littlefs-fuse/littlefs/*
  493. cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
  494. # setup disk for littlefs-fuse
  495. mkdir mount
  496. LOOP=$(sudo losetup -f)
  497. sudo chmod a+rw $LOOP
  498. dd if=/dev/zero bs=512 count=128K of=disk
  499. losetup $LOOP disk
  500. echo "LOOP=$LOOP" >> $GITHUB_ENV
  501. - name: test
  502. run: |
  503. # self-host test
  504. make -C littlefs-fuse
  505. littlefs-fuse/lfs --format $LOOP
  506. littlefs-fuse/lfs $LOOP mount
  507. ls mount
  508. mkdir mount/littlefs
  509. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  510. cd mount/littlefs
  511. stat .
  512. ls -flh
  513. make -B test-runner
  514. make -B test
  515. # test migration using littlefs-fuse
  516. migrate:
  517. runs-on: ubuntu-22.04
  518. if: ${{!endsWith(github.ref, '-prefix')}}
  519. steps:
  520. - uses: actions/checkout@v2
  521. - name: install
  522. run: |
  523. # need a few things
  524. sudo apt-get update -qq
  525. sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
  526. sudo pip3 install toml
  527. gcc --version
  528. python3 --version
  529. fusermount -V
  530. - uses: actions/checkout@v2
  531. with:
  532. repository: littlefs-project/littlefs-fuse
  533. ref: v2
  534. path: v2
  535. - uses: actions/checkout@v2
  536. with:
  537. repository: littlefs-project/littlefs-fuse
  538. ref: v1
  539. path: v1
  540. - name: setup
  541. run: |
  542. # copy our new version into littlefs-fuse
  543. rm -rf v2/littlefs/*
  544. cp -r $(git ls-tree --name-only HEAD) v2/littlefs
  545. # setup disk for littlefs-fuse
  546. mkdir mount
  547. LOOP=$(sudo losetup -f)
  548. sudo chmod a+rw $LOOP
  549. dd if=/dev/zero bs=512 count=128K of=disk
  550. losetup $LOOP disk
  551. echo "LOOP=$LOOP" >> $GITHUB_ENV
  552. - name: test
  553. run: |
  554. # compile v1 and v2
  555. make -C v1
  556. make -C v2
  557. # run self-host test with v1
  558. v1/lfs --format $LOOP
  559. v1/lfs $LOOP mount
  560. ls mount
  561. mkdir mount/littlefs
  562. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  563. cd mount/littlefs
  564. stat .
  565. ls -flh
  566. make -B test-runner
  567. make -B test
  568. # attempt to migrate
  569. cd ../..
  570. fusermount -u mount
  571. v2/lfs --migrate $LOOP
  572. v2/lfs $LOOP mount
  573. # run self-host test with v2 right where we left off
  574. ls mount
  575. cd mount/littlefs
  576. stat .
  577. ls -flh
  578. make -B test-runner
  579. make -B test
  580. # status related tasks that run after tests
  581. status:
  582. runs-on: ubuntu-22.04
  583. needs: [test, bench]
  584. steps:
  585. - uses: actions/checkout@v2
  586. if: ${{github.event_name == 'pull_request'}}
  587. - name: install
  588. if: ${{github.event_name == 'pull_request'}}
  589. run: |
  590. # need a few things
  591. sudo apt-get install -qq gcc python3 python3-pip
  592. pip3 install toml
  593. gcc --version
  594. python3 --version
  595. - uses: actions/download-artifact@v2
  596. if: ${{github.event_name == 'pull_request'}}
  597. continue-on-error: true
  598. with:
  599. name: sizes
  600. path: sizes
  601. - uses: actions/download-artifact@v2
  602. if: ${{github.event_name == 'pull_request'}}
  603. continue-on-error: true
  604. with:
  605. name: cov
  606. path: cov
  607. - uses: actions/download-artifact@v2
  608. if: ${{github.event_name == 'pull_request'}}
  609. continue-on-error: true
  610. with:
  611. name: bench
  612. path: bench
  613. # try to find results from tests
  614. - name: create-table
  615. if: ${{github.event_name == 'pull_request'}}
  616. run: |
  617. # compare against pull-request target
  618. curl -sS \
  619. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/`
  620. `${{github.event.pull_request.base.ref}}`
  621. `?per_page=100" \
  622. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
  623. >> prev-status.json \
  624. || true
  625. # build table for GitHub
  626. declare -A table
  627. # sizes table
  628. i=0
  629. j=0
  630. for c in "" readonly threadsafe migrate error-asserts
  631. do
  632. # per-config results
  633. c_or_default=${c:-default}
  634. c_camel=${c_or_default^}
  635. table[$i,$j]=$c_camel
  636. ((j+=1))
  637. for s in code stack struct
  638. do
  639. f=sizes/thumb${c:+-$c}.$s.csv
  640. [ -e $f ] && table[$i,$j]=$( \
  641. export PREV="$(jq -re '
  642. select(.context == "'"sizes (thumb${c:+, $c}) / $s"'").description
  643. | capture("(?<prev>[0-9∞]+)").prev' \
  644. prev-status.json || echo 0)"
  645. ./scripts/summary.py $f --max=stack_limit -Y \
  646. | awk '
  647. NR==2 {$1=0; printf "%s B",$NF}
  648. NR==2 && ENVIRON["PREV"]+0 != 0 {
  649. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  650. }' \
  651. | sed -e 's/ /\&nbsp;/g')
  652. ((j+=1))
  653. done
  654. ((j=0, i+=1))
  655. done
  656. # coverage table
  657. i=0
  658. j=4
  659. for s in lines branches
  660. do
  661. table[$i,$j]=${s^}
  662. ((j+=1))
  663. f=cov/cov.csv
  664. [ -e $f ] && table[$i,$j]=$( \
  665. export PREV="$(jq -re '
  666. select(.context == "'"cov / $s"'").description
  667. | capture("(?<prev_a>[0-9]+)/(?<prev_b>[0-9]+)")
  668. | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
  669. prev-status.json || echo 0)"
  670. ./scripts/cov.py -u $f -f$s -Y \
  671. | awk -F '[ /%]+' -v s=$s '
  672. NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
  673. NR==2 && ENVIRON["PREV"]+0 != 0 {
  674. printf " (%+.1f%%)",$4-ENVIRON["PREV"]
  675. }' \
  676. | sed -e 's/ /\&nbsp;/g')
  677. ((j=4, i+=1))
  678. done
  679. # benchmark table
  680. i=3
  681. j=4
  682. for s in readed proged erased
  683. do
  684. table[$i,$j]=${s^}
  685. ((j+=1))
  686. f=bench/bench.csv
  687. [ -e $f ] && table[$i,$j]=$( \
  688. export PREV="$(jq -re '
  689. select(.context == "'"bench / $s"'").description
  690. | capture("(?<prev>[0-9]+)").prev' \
  691. prev-status.json || echo 0)"
  692. ./scripts/summary.py $f -f$s=bench_$s -Y \
  693. | awk '
  694. NR==2 {$1=0; printf "%s B",$NF}
  695. NR==2 && ENVIRON["PREV"]+0 != 0 {
  696. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  697. }' \
  698. | sed -e 's/ /\&nbsp;/g')
  699. ((j=4, i+=1))
  700. done
  701. # build the actual table
  702. echo "| | Code | Stack | Structs | | Coverage |" >> table.txt
  703. echo "|:--|-----:|------:|--------:|:--|---------:|" >> table.txt
  704. for ((i=0; i<6; i++))
  705. do
  706. echo -n "|" >> table.txt
  707. for ((j=0; j<6; j++))
  708. do
  709. echo -n " " >> table.txt
  710. [[ i -eq 2 && j -eq 5 ]] && echo -n "**Benchmarks**" >> table.txt
  711. echo -n "${table[$i,$j]:-}" >> table.txt
  712. echo -n " |" >> table.txt
  713. done
  714. echo >> table.txt
  715. done
  716. cat table.txt
  717. # create a bot comment for successful runs on pull requests
  718. - name: create-comment
  719. if: ${{github.event_name == 'pull_request'}}
  720. run: |
  721. touch comment.txt
  722. echo "<details>" >> comment.txt
  723. echo "<summary>" >> comment.txt
  724. echo "Tests passed ✓, `
  725. `Code: $(awk 'NR==3 {print $4}' table.txt || true), `
  726. `Stack: $(awk 'NR==3 {print $6}' table.txt || true), `
  727. `Structs: $(awk 'NR==3 {print $8}' table.txt || true)" \
  728. >> comment.txt
  729. echo "</summary>" >> comment.txt
  730. echo >> comment.txt
  731. [ -e table.txt ] && cat table.txt >> comment.txt
  732. echo >> comment.txt
  733. echo "</details>" >> comment.txt
  734. cat comment.txt
  735. mkdir -p comment
  736. jq -n --rawfile comment comment.txt '{
  737. number: ${{github.event.number}},
  738. body: $comment,
  739. }' | tee comment/comment.json
  740. - name: upload-comment
  741. uses: actions/upload-artifact@v2
  742. with:
  743. name: comment
  744. path: comment
  745. retention-days: 1