test.yml 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. name: test
  2. on: [push, pull_request]
  3. defaults:
  4. run:
  5. shell: bash -euv -o pipefail {0}
  6. env:
  7. CFLAGS: -Werror
  8. MAKEFLAGS: -j
  9. TESTFLAGS: -k
  10. jobs:
  11. # run tests
  12. test:
  13. runs-on: ubuntu-22.04
  14. strategy:
  15. fail-fast: false
  16. matrix:
  17. arch: [x86_64, thumb, mips, powerpc]
  18. steps:
  19. - uses: actions/checkout@v2
  20. - name: install
  21. run: |
  22. # need a few things
  23. sudo apt-get update -qq
  24. sudo apt-get install -qq gcc python3 python3-pip
  25. pip3 install toml
  26. gcc --version
  27. python3 --version
  28. # cross-compile with ARM Thumb (32-bit, little-endian)
  29. - name: install-thumb
  30. if: ${{matrix.arch == 'thumb'}}
  31. run: |
  32. sudo apt-get install -qq \
  33. gcc-arm-linux-gnueabi \
  34. libc6-dev-armel-cross \
  35. qemu-user
  36. echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV
  37. echo "EXEC=qemu-arm" >> $GITHUB_ENV
  38. arm-linux-gnueabi-gcc --version
  39. qemu-arm -version
  40. # cross-compile with MIPS (32-bit, big-endian)
  41. - name: install-mips
  42. if: ${{matrix.arch == 'mips'}}
  43. run: |
  44. sudo apt-get install -qq \
  45. gcc-mips-linux-gnu \
  46. libc6-dev-mips-cross \
  47. qemu-user
  48. echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV
  49. echo "EXEC=qemu-mips" >> $GITHUB_ENV
  50. mips-linux-gnu-gcc --version
  51. qemu-mips -version
  52. # cross-compile with PowerPC (32-bit, big-endian)
  53. - name: install-powerpc
  54. if: ${{matrix.arch == 'powerpc'}}
  55. run: |
  56. sudo apt-get install -qq \
  57. gcc-powerpc-linux-gnu \
  58. libc6-dev-powerpc-cross \
  59. qemu-user
  60. echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV
  61. echo "EXEC=qemu-ppc" >> $GITHUB_ENV
  62. powerpc-linux-gnu-gcc --version
  63. qemu-ppc -version
  64. # does littlefs compile?
  65. - name: test-build
  66. run: |
  67. make clean
  68. make build
  69. # make sure example can at least compile
  70. - name: test-example
  71. run: |
  72. make clean
  73. sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
  74. make all CFLAGS+=" \
  75. -Duser_provided_block_device_read=NULL \
  76. -Duser_provided_block_device_prog=NULL \
  77. -Duser_provided_block_device_erase=NULL \
  78. -Duser_provided_block_device_sync=NULL \
  79. -include stdio.h"
  80. rm test.c
  81. # run the tests!
  82. - name: test
  83. run: |
  84. make clean
  85. # TODO include this by default?
  86. make test TESTFLAGS+='-Pnone,linear'
  87. # collect coverage info
  88. #
  89. # Note the goal is to maximize coverage in the small, easy-to-run
  90. # tests, so we intentionally exclude more aggressive powerloss testing
  91. # from coverage results
  92. - name: cov
  93. if: ${{matrix.arch == 'x86_64'}}
  94. run: |
  95. make lfs.cov.csv
  96. ./scripts/cov.py -u lfs.cov.csv
  97. mkdir -p cov
  98. cp lfs.cov.csv cov/cov.csv
  99. # find compile-time measurements
  100. - name: sizes
  101. run: |
  102. make clean
  103. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv \
  104. CFLAGS+=" \
  105. -DLFS_NO_ASSERT \
  106. -DLFS_NO_DEBUG \
  107. -DLFS_NO_WARN \
  108. -DLFS_NO_ERROR"
  109. ./scripts/summary.py lfs.struct.csv \
  110. -bstruct \
  111. -fsize=struct_size
  112. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  113. -bfunction \
  114. -fcode=code_size \
  115. -fdata=data_size \
  116. -fstack=stack_limit \
  117. --max=stack_limit
  118. mkdir -p sizes
  119. cp lfs.code.csv sizes/${{matrix.arch}}.code.csv
  120. cp lfs.data.csv sizes/${{matrix.arch}}.data.csv
  121. cp lfs.stack.csv sizes/${{matrix.arch}}.stack.csv
  122. cp lfs.struct.csv sizes/${{matrix.arch}}.struct.csv
  123. - name: sizes-readonly
  124. run: |
  125. make clean
  126. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv \
  127. CFLAGS+=" \
  128. -DLFS_NO_ASSERT \
  129. -DLFS_NO_DEBUG \
  130. -DLFS_NO_WARN \
  131. -DLFS_NO_ERROR \
  132. -DLFS_READONLY"
  133. ./scripts/summary.py lfs.struct.csv \
  134. -bstruct \
  135. -fsize=struct_size
  136. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  137. -bfunction \
  138. -fcode=code_size \
  139. -fdata=data_size \
  140. -fstack=stack_limit \
  141. --max=stack_limit
  142. mkdir -p sizes
  143. cp lfs.code.csv sizes/${{matrix.arch}}-readonly.code.csv
  144. cp lfs.data.csv sizes/${{matrix.arch}}-readonly.data.csv
  145. cp lfs.stack.csv sizes/${{matrix.arch}}-readonly.stack.csv
  146. cp lfs.struct.csv sizes/${{matrix.arch}}-readonly.struct.csv
  147. - name: sizes-threadsafe
  148. run: |
  149. make clean
  150. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv \
  151. CFLAGS+=" \
  152. -DLFS_NO_ASSERT \
  153. -DLFS_NO_DEBUG \
  154. -DLFS_NO_WARN \
  155. -DLFS_NO_ERROR \
  156. -DLFS_THREADSAFE"
  157. ./scripts/summary.py lfs.struct.csv \
  158. -bstruct \
  159. -fsize=struct_size
  160. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  161. -bfunction \
  162. -fcode=code_size \
  163. -fdata=data_size \
  164. -fstack=stack_limit \
  165. --max=stack_limit
  166. mkdir -p sizes
  167. cp lfs.code.csv sizes/${{matrix.arch}}-threadsafe.code.csv
  168. cp lfs.data.csv sizes/${{matrix.arch}}-threadsafe.data.csv
  169. cp lfs.stack.csv sizes/${{matrix.arch}}-threadsafe.stack.csv
  170. cp lfs.struct.csv sizes/${{matrix.arch}}-threadsafe.struct.csv
  171. - name: sizes-migrate
  172. run: |
  173. make clean
  174. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv \
  175. CFLAGS+=" \
  176. -DLFS_NO_ASSERT \
  177. -DLFS_NO_DEBUG \
  178. -DLFS_NO_WARN \
  179. -DLFS_NO_ERROR \
  180. -DLFS_MIGRATE"
  181. ./scripts/summary.py lfs.struct.csv \
  182. -bstruct \
  183. -fsize=struct_size
  184. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  185. -bfunction \
  186. -fcode=code_size \
  187. -fdata=data_size \
  188. -fstack=stack_limit \
  189. --max=stack_limit
  190. mkdir -p sizes
  191. cp lfs.code.csv sizes/${{matrix.arch}}-migrate.code.csv
  192. cp lfs.data.csv sizes/${{matrix.arch}}-migrate.data.csv
  193. cp lfs.stack.csv sizes/${{matrix.arch}}-migrate.stack.csv
  194. cp lfs.struct.csv sizes/${{matrix.arch}}-migrate.struct.csv
  195. - name: sizes-error-asserts
  196. run: |
  197. make clean
  198. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.struct.csv \
  199. CFLAGS+=" \
  200. -DLFS_NO_DEBUG \
  201. -DLFS_NO_WARN \
  202. -DLFS_NO_ERROR \
  203. -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'"
  204. ./scripts/summary.py lfs.struct.csv \
  205. -bstruct \
  206. -fsize=struct_size
  207. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  208. -bfunction \
  209. -fcode=code_size \
  210. -fdata=data_size \
  211. -fstack=stack_limit \
  212. --max=stack_limit
  213. mkdir -p sizes
  214. cp lfs.code.csv sizes/${{matrix.arch}}-error-asserts.code.csv
  215. cp lfs.data.csv sizes/${{matrix.arch}}-error-asserts.data.csv
  216. cp lfs.stack.csv sizes/${{matrix.arch}}-error-asserts.stack.csv
  217. cp lfs.struct.csv sizes/${{matrix.arch}}-error-asserts.struct.csv
  218. # create size statuses
  219. - name: upload-sizes
  220. uses: actions/upload-artifact@v2
  221. with:
  222. name: sizes
  223. path: sizes
  224. - name: status-sizes
  225. run: |
  226. mkdir -p status
  227. for f in $(shopt -s nullglob ; echo sizes/*.csv)
  228. do
  229. # skip .data.csv as it should always be zero
  230. [[ $f == *.data.csv ]] && continue
  231. export STEP="sizes$(echo $f \
  232. | sed -n 's/[^-.]*-\([^.]*\)\..*csv/-\1/p')"
  233. export CONTEXT="sizes (${{matrix.arch}}$(echo $f \
  234. | sed -n 's/[^-.]*-\([^.]*\)\..*csv/, \1/p')) / $(echo $f \
  235. | sed -n 's/[^.]*\.\(.*\)\.csv/\1/p')"
  236. export PREV="$(curl -sS \
  237. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  238. `?per_page=100" \
  239. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  240. | select(.context == env.CONTEXT).description
  241. | capture("(?<prev>[0-9∞]+)").prev' \
  242. || echo 0)"
  243. export DESCRIPTION="$(./scripts/summary.py $f --max=stack_limit -Y \
  244. | awk '
  245. NR==2 {$1=0; printf "%s B",$NF}
  246. NR==2 && ENVIRON["PREV"]+0 != 0 {
  247. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  248. }')"
  249. jq -n '{
  250. state: "success",
  251. context: env.CONTEXT,
  252. description: env.DESCRIPTION,
  253. target_job: "${{github.job}} (${{matrix.arch}})",
  254. target_step: env.STEP,
  255. }' | tee status/$(basename $f .csv).json
  256. done
  257. - name: upload-status-sizes
  258. uses: actions/upload-artifact@v2
  259. with:
  260. name: status
  261. path: status
  262. retention-days: 1
  263. # create cov statuses
  264. - name: upload-cov
  265. if: ${{matrix.arch == 'x86_64'}}
  266. uses: actions/upload-artifact@v2
  267. with:
  268. name: cov
  269. path: cov
  270. - name: status-cov
  271. if: ${{matrix.arch == 'x86_64'}}
  272. run: |
  273. mkdir -p status
  274. f=cov/cov.csv
  275. for s in lines branches
  276. do
  277. export STEP="cov"
  278. export CONTEXT="cov / $s"
  279. export PREV="$(curl -sS \
  280. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  281. `?per_page=100" \
  282. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  283. | select(.context == env.CONTEXT).description
  284. | capture("(?<prev_a>[0-9]+)/(?<prev_b>[0-9]+)")
  285. | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
  286. || echo 0)"
  287. export DESCRIPTION="$(./scripts/cov.py -u $f -f$s -Y \
  288. | awk -F '[ /%]+' -v s=$s '
  289. NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
  290. NR==2 && ENVIRON["PREV"]+0 != 0 {
  291. printf " (%+.1f%%)",$4-ENVIRON["PREV"]
  292. }')"
  293. jq -n '{
  294. state: "success",
  295. context: env.CONTEXT,
  296. description: env.DESCRIPTION,
  297. target_job: "${{github.job}} (${{matrix.arch}})",
  298. target_step: env.STEP,
  299. }' | tee status/$(basename $f .csv)-$s.json
  300. done
  301. - name: upload-status-sizes
  302. if: ${{matrix.arch == 'x86_64'}}
  303. uses: actions/upload-artifact@v2
  304. with:
  305. name: status
  306. path: status
  307. retention-days: 1
  308. # run as many exhaustive tests as fits in GitHub's time limits
  309. #
  310. # this grows exponentially, so it doesn't turn out to be that many
  311. test-pls:
  312. runs-on: ubuntu-22.04
  313. strategy:
  314. fail-fast: false
  315. matrix:
  316. pls: [1, 2]
  317. steps:
  318. - uses: actions/checkout@v2
  319. - name: install
  320. run: |
  321. # need a few things
  322. sudo apt-get update -qq
  323. sudo apt-get install -qq gcc python3 python3-pip
  324. pip3 install toml
  325. gcc --version
  326. python3 --version
  327. - name: test-pls
  328. if: ${{matrix.pls <= 1}}
  329. run: |
  330. make test TESTFLAGS+="-P${{matrix.pls}}"
  331. # >=2pls takes multiple days to run fully, so we can only
  332. # run a subset of tests, these are the most important
  333. - name: test-limited-pls
  334. if: ${{matrix.pls > 1}}
  335. run: |
  336. make test TESTFLAGS+="-P${{matrix.pls}} test_dirs test_relocations"
  337. # run with LFS_NO_INTRINSICS to make sure that works
  338. test-no-intrinsics:
  339. runs-on: ubuntu-22.04
  340. steps:
  341. - uses: actions/checkout@v2
  342. - name: install
  343. run: |
  344. # need a few things
  345. sudo apt-get update -qq
  346. sudo apt-get install -qq gcc python3 python3-pip
  347. pip3 install toml
  348. gcc --version
  349. python3 --version
  350. - name: test-no-intrinsics
  351. run: |
  352. make test CFLAGS+="-DLFS_NO_INTRINSICS"
  353. # run under Valgrind to check for memory errors
  354. test-valgrind:
  355. runs-on: ubuntu-22.04
  356. steps:
  357. - uses: actions/checkout@v2
  358. - name: install
  359. run: |
  360. # need a few things
  361. sudo apt-get update -qq
  362. sudo apt-get install -qq gcc python3 python3-pip valgrind
  363. pip3 install toml
  364. gcc --version
  365. python3 --version
  366. valgrind --version
  367. # Valgrind takes a while with diminishing value, so only test
  368. # on one geometry
  369. - name: test-valgrind
  370. run: |
  371. make test TESTFLAGS+="-Gdefault --valgrind"
  372. # run benchmarks
  373. #
  374. # note there's no real benefit to running these on multiple archs
  375. bench:
  376. runs-on: ubuntu-22.04
  377. steps:
  378. - uses: actions/checkout@v2
  379. - name: install
  380. run: |
  381. # need a few things
  382. sudo apt-get update -qq
  383. sudo apt-get install -qq gcc python3 python3-pip valgrind
  384. pip3 install toml
  385. gcc --version
  386. python3 --version
  387. valgrind --version
  388. - name: bench
  389. run: |
  390. make bench BENCHFLAGS+="-o lfs.bench.csv"
  391. # find bench results
  392. ./scripts/summary.py lfs.bench.csv \
  393. -bsuite \
  394. -freaded=bench_readed \
  395. -fproged=bench_proged \
  396. -ferased=bench_erased
  397. mkdir -p bench
  398. cp lfs.bench.csv bench/bench.csv
  399. # find perfbd results
  400. make lfs.perfbd.csv
  401. ./scripts/perfbd.py -u lfs.perfbd.csv
  402. mkdir -p bench
  403. cp lfs.perfbd.csv bench/perfbd.csv
  404. # create bench statuses
  405. - name: upload-bench
  406. uses: actions/upload-artifact@v2
  407. with:
  408. name: bench
  409. path: bench
  410. - name: status-bench
  411. run: |
  412. mkdir -p status
  413. f=bench/bench.csv
  414. for s in readed proged erased
  415. do
  416. export STEP="bench"
  417. export CONTEXT="bench / $s"
  418. export PREV="$(curl -sS \
  419. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  420. `?per_page=100" \
  421. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  422. | select(.context == env.CONTEXT).description
  423. | capture("(?<prev>[0-9]+)").prev' \
  424. || echo 0)"
  425. export DESCRIPTION="$(./scripts/summary.py $f -f$s=bench_$s -Y \
  426. | awk '
  427. NR==2 {$1=0; printf "%s B",$NF}
  428. NR==2 && ENVIRON["PREV"]+0 != 0 {
  429. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  430. }')"
  431. jq -n '{
  432. state: "success",
  433. context: env.CONTEXT,
  434. description: env.DESCRIPTION,
  435. target_job: "${{github.job}}",
  436. target_step: env.STEP,
  437. }' | tee status/$(basename $f .csv)-$s.json
  438. done
  439. - name: upload-status-bench
  440. uses: actions/upload-artifact@v2
  441. with:
  442. name: status
  443. path: status
  444. retention-days: 1
  445. # test that compilation is warning free under clang
  446. clang:
  447. runs-on: ubuntu-20.04
  448. steps:
  449. - uses: actions/checkout@v2
  450. - name: install
  451. run: |
  452. # need toml, also pip3 isn't installed by default?
  453. sudo apt-get update -qq
  454. sudo apt-get install -qq python3 python3-pip
  455. sudo pip3 install toml
  456. - name: install-clang
  457. run: |
  458. sudo apt-get update -qq
  459. sudo apt-get install -qq clang
  460. echo "CC=clang" >> $GITHUB_ENV
  461. clang --version
  462. # no reason to not test again
  463. - name: test-clang
  464. run: make test TESTFLAGS+="-k"
  465. # self-host with littlefs-fuse for a fuzz-like test
  466. fuse:
  467. runs-on: ubuntu-22.04
  468. if: ${{!endsWith(github.ref, '-prefix')}}
  469. steps:
  470. - uses: actions/checkout@v2
  471. - name: install
  472. run: |
  473. # need a few things
  474. sudo apt-get update -qq
  475. sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
  476. sudo pip3 install toml
  477. gcc --version
  478. python3 --version
  479. fusermount -V
  480. - uses: actions/checkout@v2
  481. with:
  482. repository: littlefs-project/littlefs-fuse
  483. ref: v2
  484. path: littlefs-fuse
  485. - name: setup
  486. run: |
  487. # copy our new version into littlefs-fuse
  488. rm -rf littlefs-fuse/littlefs/*
  489. cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
  490. # setup disk for littlefs-fuse
  491. mkdir mount
  492. LOOP=$(sudo losetup -f)
  493. sudo chmod a+rw $LOOP
  494. dd if=/dev/zero bs=512 count=128K of=disk
  495. losetup $LOOP disk
  496. echo "LOOP=$LOOP" >> $GITHUB_ENV
  497. - name: test
  498. run: |
  499. # self-host test
  500. make -C littlefs-fuse
  501. littlefs-fuse/lfs --format $LOOP
  502. littlefs-fuse/lfs $LOOP mount
  503. ls mount
  504. mkdir mount/littlefs
  505. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  506. cd mount/littlefs
  507. stat .
  508. ls -flh
  509. make -B test-runner
  510. make -B test
  511. # test migration using littlefs-fuse
  512. migrate:
  513. runs-on: ubuntu-22.04
  514. if: ${{!endsWith(github.ref, '-prefix')}}
  515. steps:
  516. - uses: actions/checkout@v2
  517. - name: install
  518. run: |
  519. # need a few things
  520. sudo apt-get update -qq
  521. sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
  522. sudo pip3 install toml
  523. gcc --version
  524. python3 --version
  525. fusermount -V
  526. - uses: actions/checkout@v2
  527. with:
  528. repository: littlefs-project/littlefs-fuse
  529. ref: v2
  530. path: v2
  531. - uses: actions/checkout@v2
  532. with:
  533. repository: littlefs-project/littlefs-fuse
  534. ref: v1
  535. path: v1
  536. - name: setup
  537. run: |
  538. # copy our new version into littlefs-fuse
  539. rm -rf v2/littlefs/*
  540. cp -r $(git ls-tree --name-only HEAD) v2/littlefs
  541. # setup disk for littlefs-fuse
  542. mkdir mount
  543. LOOP=$(sudo losetup -f)
  544. sudo chmod a+rw $LOOP
  545. dd if=/dev/zero bs=512 count=128K of=disk
  546. losetup $LOOP disk
  547. echo "LOOP=$LOOP" >> $GITHUB_ENV
  548. - name: test
  549. run: |
  550. # compile v1 and v2
  551. make -C v1
  552. make -C v2
  553. # run self-host test with v1
  554. v1/lfs --format $LOOP
  555. v1/lfs $LOOP mount
  556. ls mount
  557. mkdir mount/littlefs
  558. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  559. cd mount/littlefs
  560. stat .
  561. ls -flh
  562. make -B test-runner
  563. make -B test
  564. # attempt to migrate
  565. cd ../..
  566. fusermount -u mount
  567. v2/lfs --migrate $LOOP
  568. v2/lfs $LOOP mount
  569. # run self-host test with v2 right where we left off
  570. ls mount
  571. cd mount/littlefs
  572. stat .
  573. ls -flh
  574. make -B test-runner
  575. make -B test
  576. # status related tasks that run after tests
  577. status:
  578. runs-on: ubuntu-22.04
  579. needs: [test, bench]
  580. steps:
  581. - uses: actions/checkout@v2
  582. if: ${{github.event_name == 'pull_request'}}
  583. - name: install
  584. if: ${{github.event_name == 'pull_request'}}
  585. run: |
  586. # need a few things
  587. sudo apt-get install -qq gcc python3 python3-pip
  588. pip3 install toml
  589. gcc --version
  590. python3 --version
  591. - uses: actions/download-artifact@v2
  592. if: ${{github.event_name == 'pull_request'}}
  593. continue-on-error: true
  594. with:
  595. name: sizes
  596. path: sizes
  597. - uses: actions/download-artifact@v2
  598. if: ${{github.event_name == 'pull_request'}}
  599. continue-on-error: true
  600. with:
  601. name: cov
  602. path: cov
  603. - uses: actions/download-artifact@v2
  604. if: ${{github.event_name == 'pull_request'}}
  605. continue-on-error: true
  606. with:
  607. name: bench
  608. path: bench
  609. # try to find results from tests
  610. - name: create-table
  611. if: ${{github.event_name == 'pull_request'}}
  612. run: |
  613. # compare against pull-request target
  614. curl -sS \
  615. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/`
  616. `${{github.event.pull_request.base.ref}}`
  617. `?per_page=100" \
  618. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
  619. >> prev-status.json \
  620. || true
  621. # build table for GitHub
  622. declare -A table
  623. # sizes table
  624. i=0
  625. j=0
  626. for c in "" readonly threadsafe migrate error-asserts
  627. do
  628. # per-config results
  629. c_or_default=${c:-default}
  630. c_camel=${c_or_default^}
  631. table[$i,$j]=$c_camel
  632. ((j+=1))
  633. for s in code stack struct
  634. do
  635. f=sizes/thumb${c:+-$c}.$s.csv
  636. [ -e $f ] && table[$i,$j]=$( \
  637. export PREV="$(jq -re '
  638. select(.context == "'"sizes (thumb${c:+, $c}) / $s"'").description
  639. | capture("(?<prev>[0-9∞]+)").prev' \
  640. prev-status.json || echo 0)"
  641. ./scripts/summary.py $f --max=stack_limit -Y \
  642. | awk '
  643. NR==2 {$1=0; printf "%s B",$NF}
  644. NR==2 && ENVIRON["PREV"]+0 != 0 {
  645. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  646. }' \
  647. | sed -e 's/ /\&nbsp;/g')
  648. ((j+=1))
  649. done
  650. ((j=0, i+=1))
  651. done
  652. # coverage table
  653. i=0
  654. j=4
  655. for s in lines branches
  656. do
  657. table[$i,$j]=${s^}
  658. ((j+=1))
  659. f=cov/cov.csv
  660. [ -e $f ] && table[$i,$j]=$( \
  661. export PREV="$(jq -re '
  662. select(.context == "'"cov / $s"'").description
  663. | capture("(?<prev_a>[0-9]+)/(?<prev_b>[0-9]+)")
  664. | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
  665. prev-status.json || echo 0)"
  666. ./scripts/cov.py -u $f -f$s -Y \
  667. | awk -F '[ /%]+' -v s=$s '
  668. NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
  669. NR==2 && ENVIRON["PREV"]+0 != 0 {
  670. printf " (%+.1f%%)",$4-ENVIRON["PREV"]
  671. }' \
  672. | sed -e 's/ /\&nbsp;/g')
  673. ((j=4, i+=1))
  674. done
  675. # benchmark table
  676. i=3
  677. j=4
  678. for s in readed proged erased
  679. do
  680. table[$i,$j]=${s^}
  681. ((j+=1))
  682. f=bench/bench.csv
  683. [ -e $f ] && table[$i,$j]=$( \
  684. export PREV="$(jq -re '
  685. select(.context == "'"bench / $s"'").description
  686. | capture("(?<prev>[0-9]+)").prev' \
  687. prev-status.json || echo 0)"
  688. ./scripts/summary.py $f -f$s=bench_$s -Y \
  689. | awk '
  690. NR==2 {$1=0; printf "%s B",$NF}
  691. NR==2 && ENVIRON["PREV"]+0 != 0 {
  692. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  693. }' \
  694. | sed -e 's/ /\&nbsp;/g')
  695. ((j=4, i+=1))
  696. done
  697. # build the actual table
  698. echo "| | Code | Stack | Structs | | Coverage |" >> table.txt
  699. echo "|:--|-----:|------:|--------:|:--|---------:|" >> table.txt
  700. for ((i=0; i<6; i++))
  701. do
  702. echo -n "|" >> table.txt
  703. for ((j=0; j<6; j++))
  704. do
  705. echo -n " " >> table.txt
  706. [[ i -eq 2 && j -eq 5 ]] && echo -n "**Benchmarks**" >> table.txt
  707. echo -n "${table[$i,$j]:-}" >> table.txt
  708. echo -n " |" >> table.txt
  709. done
  710. echo >> table.txt
  711. done
  712. cat table.txt
  713. # create a bot comment for successful runs on pull requests
  714. - name: create-comment
  715. if: ${{github.event_name == 'pull_request'}}
  716. run: |
  717. touch comment.txt
  718. echo "<details>" >> comment.txt
  719. echo "<summary>" >> comment.txt
  720. echo "Tests passed ✓, `
  721. `Code: $(awk 'NR==3 {print $4}' table.txt || true), `
  722. `Stack: $(awk 'NR==3 {print $6}' table.txt || true), `
  723. `Structs: $(awk 'NR==3 {print $8}' table.txt || true)" \
  724. >> comment.txt
  725. echo "</summary>" >> comment.txt
  726. echo >> comment.txt
  727. [ -e table.txt ] && cat table.txt >> comment.txt
  728. echo >> comment.txt
  729. echo "</details>" >> comment.txt
  730. cat comment.txt
  731. mkdir -p comment
  732. jq -n --rawfile comment comment.txt '{
  733. number: ${{github.event.number}},
  734. body: $comment,
  735. }' | tee comment/comment.json
  736. - name: upload-comment
  737. uses: actions/upload-artifact@v2
  738. with:
  739. name: comment
  740. path: comment
  741. retention-days: 1