test.yml 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. name: test
  2. on: [push, pull_request]
  3. defaults:
  4. run:
  5. shell: bash -euv -o pipefail {0}
  6. env:
  7. CFLAGS: -Werror
  8. MAKEFLAGS: -j
  9. TESTFLAGS: -k
  10. BENCHFLAGS:
  11. jobs:
  12. # run tests
  13. test:
  14. runs-on: ubuntu-22.04
  15. strategy:
  16. fail-fast: false
  17. matrix:
  18. arch: [x86_64, thumb, mips, powerpc]
  19. steps:
  20. - uses: actions/checkout@v2
  21. - name: install
  22. run: |
  23. # need a few things
  24. sudo apt-get update -qq
  25. sudo apt-get install -qq gcc python3 python3-pip
  26. pip3 install toml
  27. gcc --version
  28. python3 --version
  29. # cross-compile with ARM Thumb (32-bit, little-endian)
  30. - name: install-thumb
  31. if: ${{matrix.arch == 'thumb'}}
  32. run: |
  33. sudo apt-get install -qq \
  34. gcc-arm-linux-gnueabi \
  35. libc6-dev-armel-cross \
  36. qemu-user
  37. echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV
  38. echo "EXEC=qemu-arm" >> $GITHUB_ENV
  39. arm-linux-gnueabi-gcc --version
  40. qemu-arm -version
  41. # cross-compile with MIPS (32-bit, big-endian)
  42. - name: install-mips
  43. if: ${{matrix.arch == 'mips'}}
  44. run: |
  45. sudo apt-get install -qq \
  46. gcc-mips-linux-gnu \
  47. libc6-dev-mips-cross \
  48. qemu-user
  49. echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV
  50. echo "EXEC=qemu-mips" >> $GITHUB_ENV
  51. mips-linux-gnu-gcc --version
  52. qemu-mips -version
  53. # cross-compile with PowerPC (32-bit, big-endian)
  54. - name: install-powerpc
  55. if: ${{matrix.arch == 'powerpc'}}
  56. run: |
  57. sudo apt-get install -qq \
  58. gcc-powerpc-linux-gnu \
  59. libc6-dev-powerpc-cross \
  60. qemu-user
  61. echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV
  62. echo "EXEC=qemu-ppc" >> $GITHUB_ENV
  63. powerpc-linux-gnu-gcc --version
  64. qemu-ppc -version
  65. # does littlefs compile?
  66. - name: test-build
  67. run: |
  68. make clean
  69. make build
  70. # make sure example can at least compile
  71. - name: test-example
  72. run: |
  73. make clean
  74. sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
  75. CFLAGS="$CFLAGS \
  76. -Duser_provided_block_device_read=NULL \
  77. -Duser_provided_block_device_prog=NULL \
  78. -Duser_provided_block_device_erase=NULL \
  79. -Duser_provided_block_device_sync=NULL \
  80. -include stdio.h" \
  81. make all
  82. rm test.c
  83. # run the tests!
  84. - name: test
  85. run: |
  86. make clean
  87. # TODO include this by default?
  88. TESTFLAGS="$TESTFLAGS -Pnone,linear" make test
  89. # collect coverage info
  90. #
  91. # Note the goal is to maximize coverage in the small, easy-to-run
  92. # tests, so we intentionally exclude more aggressive powerloss testing
  93. # from coverage results
  94. - name: cov
  95. if: ${{matrix.arch == 'x86_64'}}
  96. run: |
  97. make lfs.cov.csv
  98. ./scripts/cov.py -u lfs.cov.csv
  99. mkdir -p cov
  100. cp lfs.cov.csv cov/cov.csv
  101. # find compile-time measurements
  102. - name: sizes
  103. run: |
  104. make clean
  105. CFLAGS="$CFLAGS \
  106. -DLFS_NO_ASSERT \
  107. -DLFS_NO_DEBUG \
  108. -DLFS_NO_WARN \
  109. -DLFS_NO_ERROR" \
  110. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  111. ./scripts/structs.py -u lfs.structs.csv
  112. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  113. -bfunction \
  114. -fcode=code_size \
  115. -fdata=data_size \
  116. -fstack=stack_limit --max=stack_limit
  117. mkdir -p sizes
  118. cp lfs.code.csv sizes/${{matrix.arch}}.code.csv
  119. cp lfs.data.csv sizes/${{matrix.arch}}.data.csv
  120. cp lfs.stack.csv sizes/${{matrix.arch}}.stack.csv
  121. cp lfs.structs.csv sizes/${{matrix.arch}}.structs.csv
  122. - name: sizes-readonly
  123. run: |
  124. make clean
  125. CFLAGS="$CFLAGS \
  126. -DLFS_NO_ASSERT \
  127. -DLFS_NO_DEBUG \
  128. -DLFS_NO_WARN \
  129. -DLFS_NO_ERROR \
  130. -DLFS_READONLY" \
  131. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  132. ./scripts/structs.py -u lfs.structs.csv
  133. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  134. -bfunction \
  135. -fcode=code_size \
  136. -fdata=data_size \
  137. -fstack=stack_limit --max=stack_limit
  138. mkdir -p sizes
  139. cp lfs.code.csv sizes/${{matrix.arch}}-readonly.code.csv
  140. cp lfs.data.csv sizes/${{matrix.arch}}-readonly.data.csv
  141. cp lfs.stack.csv sizes/${{matrix.arch}}-readonly.stack.csv
  142. cp lfs.structs.csv sizes/${{matrix.arch}}-readonly.structs.csv
  143. - name: sizes-threadsafe
  144. run: |
  145. make clean
  146. CFLAGS="$CFLAGS \
  147. -DLFS_NO_ASSERT \
  148. -DLFS_NO_DEBUG \
  149. -DLFS_NO_WARN \
  150. -DLFS_NO_ERROR \
  151. -DLFS_THREADSAFE" \
  152. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  153. ./scripts/structs.py -u lfs.structs.csv
  154. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  155. -bfunction \
  156. -fcode=code_size \
  157. -fdata=data_size \
  158. -fstack=stack_limit --max=stack_limit
  159. mkdir -p sizes
  160. cp lfs.code.csv sizes/${{matrix.arch}}-threadsafe.code.csv
  161. cp lfs.data.csv sizes/${{matrix.arch}}-threadsafe.data.csv
  162. cp lfs.stack.csv sizes/${{matrix.arch}}-threadsafe.stack.csv
  163. cp lfs.structs.csv sizes/${{matrix.arch}}-threadsafe.structs.csv
  164. - name: sizes-migrate
  165. run: |
  166. make clean
  167. CFLAGS="$CFLAGS \
  168. -DLFS_NO_ASSERT \
  169. -DLFS_NO_DEBUG \
  170. -DLFS_NO_WARN \
  171. -DLFS_NO_ERROR \
  172. -DLFS_MIGRATE" \
  173. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  174. ./scripts/structs.py -u lfs.structs.csv
  175. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  176. -bfunction \
  177. -fcode=code_size \
  178. -fdata=data_size \
  179. -fstack=stack_limit --max=stack_limit
  180. mkdir -p sizes
  181. cp lfs.code.csv sizes/${{matrix.arch}}-migrate.code.csv
  182. cp lfs.data.csv sizes/${{matrix.arch}}-migrate.data.csv
  183. cp lfs.stack.csv sizes/${{matrix.arch}}-migrate.stack.csv
  184. cp lfs.structs.csv sizes/${{matrix.arch}}-migrate.structs.csv
  185. - name: sizes-error-asserts
  186. run: |
  187. make clean
  188. CFLAGS="$CFLAGS \
  189. -DLFS_NO_DEBUG \
  190. -DLFS_NO_WARN \
  191. -DLFS_NO_ERROR \
  192. -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" \
  193. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  194. ./scripts/structs.py -u lfs.structs.csv
  195. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  196. -bfunction \
  197. -fcode=code_size \
  198. -fdata=data_size \
  199. -fstack=stack_limit --max=stack_limit
  200. mkdir -p sizes
  201. cp lfs.code.csv sizes/${{matrix.arch}}-error-asserts.code.csv
  202. cp lfs.data.csv sizes/${{matrix.arch}}-error-asserts.data.csv
  203. cp lfs.stack.csv sizes/${{matrix.arch}}-error-asserts.stack.csv
  204. cp lfs.structs.csv sizes/${{matrix.arch}}-error-asserts.structs.csv
  205. # create size statuses
  206. - name: upload-sizes
  207. uses: actions/upload-artifact@v2
  208. with:
  209. name: sizes
  210. path: sizes
  211. - name: status-sizes
  212. run: |
  213. mkdir -p status
  214. for f in $(shopt -s nullglob ; echo sizes/*.csv)
  215. do
  216. # skip .data.csv as it should always be zero
  217. [[ $f == *.data.csv ]] && continue
  218. export STEP="sizes$(echo $f \
  219. | sed -n 's/[^-.]*-\([^.]*\)\..*csv/-\1/p')"
  220. export CONTEXT="sizes (${{matrix.arch}}$(echo $f \
  221. | sed -n 's/[^-.]*-\([^.]*\)\..*csv/, \1/p')) / $(echo $f \
  222. | sed -n 's/[^.]*\.\(.*\)\.csv/\1/p')"
  223. export PREV="$(curl -sS \
  224. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  225. `?per_page=100" \
  226. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  227. | select(.context == env.CONTEXT).description
  228. | capture("(?<prev>[0-9∞]+)").prev' \
  229. || echo 0)"
  230. export DESCRIPTION="$(./scripts/summary.py $f --max=stack_limit -Y \
  231. | awk '
  232. NR==2 {$1=0; printf "%s B",$NF}
  233. NR==2 && ENVIRON["PREV"]+0 != 0 {
  234. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  235. }')"
  236. jq -n '{
  237. state: "success",
  238. context: env.CONTEXT,
  239. description: env.DESCRIPTION,
  240. target_job: "${{github.job}} (${{matrix.arch}})",
  241. target_step: env.STEP,
  242. }' | tee status/$(basename $f .csv).json
  243. done
  244. - name: upload-status-sizes
  245. uses: actions/upload-artifact@v2
  246. with:
  247. name: status
  248. path: status
  249. retention-days: 1
  250. # create cov statuses
  251. - name: upload-cov
  252. if: ${{matrix.arch == 'x86_64'}}
  253. uses: actions/upload-artifact@v2
  254. with:
  255. name: cov
  256. path: cov
  257. - name: status-cov
  258. if: ${{matrix.arch == 'x86_64'}}
  259. run: |
  260. mkdir -p status
  261. f=cov/cov.csv
  262. for s in lines branches
  263. do
  264. export STEP="cov"
  265. export CONTEXT="cov / $s"
  266. export PREV="$(curl -sS \
  267. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  268. `?per_page=100" \
  269. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  270. | select(.context == env.CONTEXT).description
  271. | capture("(?<prev_a>[0-9]+)/(?<prev_b>[0-9]+)")
  272. | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
  273. || echo 0)"
  274. export DESCRIPTION="$(./scripts/cov.py -u $f -f$s -Y \
  275. | awk -F '[ /%]+' -v s=$s '
  276. NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
  277. NR==2 && ENVIRON["PREV"]+0 != 0 {
  278. printf " (%+.1f%%)",$4-ENVIRON["PREV"]
  279. }')"
  280. jq -n '{
  281. state: "success",
  282. context: env.CONTEXT,
  283. description: env.DESCRIPTION,
  284. target_job: "${{github.job}} (${{matrix.arch}})",
  285. target_step: env.STEP,
  286. }' | tee status/$(basename $f .csv)-$s.json
  287. done
  288. - name: upload-status-sizes
  289. if: ${{matrix.arch == 'x86_64'}}
  290. uses: actions/upload-artifact@v2
  291. with:
  292. name: status
  293. path: status
  294. retention-days: 1
  295. # run as many exhaustive tests as fits in GitHub's time limits
  296. #
  297. # this grows exponentially, so it doesn't turn out to be that many
  298. test-pls:
  299. runs-on: ubuntu-22.04
  300. strategy:
  301. fail-fast: false
  302. matrix:
  303. pls: [1, 2]
  304. steps:
  305. - uses: actions/checkout@v2
  306. - name: install
  307. run: |
  308. # need a few things
  309. sudo apt-get update -qq
  310. sudo apt-get install -qq gcc python3 python3-pip
  311. pip3 install toml
  312. gcc --version
  313. python3 --version
  314. - name: test-pls
  315. if: ${{matrix.pls <= 1}}
  316. run: |
  317. TESTFLAGS="$TESTFLAGS -P${{matrix.pls}}" make test
  318. # >=2pls takes multiple days to run fully, so we can only
  319. # run a subset of tests, these are the most important
  320. - name: test-limited-pls
  321. if: ${{matrix.pls > 1}}
  322. run: |
  323. TESTFLAGS="$TESTFLAGS -P${{matrix.pls}} test_dirs test_relocations" \
  324. make test
  325. # run with LFS_NO_INTRINSICS to make sure that works
  326. test-no-intrinsics:
  327. runs-on: ubuntu-22.04
  328. steps:
  329. - uses: actions/checkout@v2
  330. - name: install
  331. run: |
  332. # need a few things
  333. sudo apt-get update -qq
  334. sudo apt-get install -qq gcc python3 python3-pip
  335. pip3 install toml
  336. gcc --version
  337. python3 --version
  338. - name: test-no-intrinsics
  339. run: |
  340. CFLAGS="$CFLAGS -DLFS_NO_INTRINSICS" make test
  341. # run under Valgrind to check for memory errors
  342. test-valgrind:
  343. runs-on: ubuntu-22.04
  344. steps:
  345. - uses: actions/checkout@v2
  346. - name: install
  347. run: |
  348. # need a few things
  349. sudo apt-get update -qq
  350. sudo apt-get install -qq gcc python3 python3-pip valgrind
  351. pip3 install toml
  352. gcc --version
  353. python3 --version
  354. valgrind --version
  355. # Valgrind takes a while with diminishing value, so only test
  356. # on one geometry
  357. - name: test-valgrind
  358. run: |
  359. TESTFLAGS="$TESTFLAGS -Gdefault --valgrind" make test
  360. # test that compilation is warning free under clang
  361. # run with Clang, mostly to check for Clang-specific warnings
  362. test-clang:
  363. runs-on: ubuntu-22.04
  364. steps:
  365. - uses: actions/checkout@v2
  366. - name: install
  367. run: |
  368. # need a few things
  369. sudo apt-get install -qq clang python3 python3-pip
  370. pip3 install toml
  371. clang --version
  372. python3 --version
  373. - name: test-clang
  374. run: |
  375. # override CFLAGS since Clang does not support -fcallgraph-info
  376. # and -ftrack-macro-expansions
  377. make \
  378. CC=clang \
  379. CFLAGS="$CFLAGS -MMD -g3 -I. -std=c99 -Wall -Wextra -pedantic" \
  380. test
  381. # run benchmarks
  382. #
  383. # note there's no real benefit to running these on multiple archs
  384. bench:
  385. runs-on: ubuntu-22.04
  386. steps:
  387. - uses: actions/checkout@v2
  388. - name: install
  389. run: |
  390. # need a few things
  391. sudo apt-get update -qq
  392. sudo apt-get install -qq gcc python3 python3-pip valgrind
  393. pip3 install toml
  394. gcc --version
  395. python3 --version
  396. valgrind --version
  397. - name: bench
  398. run: |
  399. make bench
  400. # find bench results
  401. make lfs.bench.csv
  402. ./scripts/summary.py lfs.bench.csv \
  403. -bsuite \
  404. -freaded=bench_readed \
  405. -fproged=bench_proged \
  406. -ferased=bench_erased
  407. mkdir -p bench
  408. cp lfs.bench.csv bench/bench.csv
  409. # find perfbd results
  410. make lfs.perfbd.csv
  411. ./scripts/perfbd.py -u lfs.perfbd.csv
  412. mkdir -p bench
  413. cp lfs.perfbd.csv bench/perfbd.csv
  414. # create bench statuses
  415. - name: upload-bench
  416. uses: actions/upload-artifact@v2
  417. with:
  418. name: bench
  419. path: bench
  420. - name: status-bench
  421. run: |
  422. mkdir -p status
  423. f=bench/bench.csv
  424. for s in readed proged erased
  425. do
  426. export STEP="bench"
  427. export CONTEXT="bench / $s"
  428. export PREV="$(curl -sS \
  429. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  430. `?per_page=100" \
  431. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  432. | select(.context == env.CONTEXT).description
  433. | capture("(?<prev>[0-9]+)").prev' \
  434. || echo 0)"
  435. export DESCRIPTION="$(./scripts/summary.py $f -f$s=bench_$s -Y \
  436. | awk '
  437. NR==2 {$1=0; printf "%s B",$NF}
  438. NR==2 && ENVIRON["PREV"]+0 != 0 {
  439. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  440. }')"
  441. jq -n '{
  442. state: "success",
  443. context: env.CONTEXT,
  444. description: env.DESCRIPTION,
  445. target_job: "${{github.job}}",
  446. target_step: env.STEP,
  447. }' | tee status/$(basename $f .csv)-$s.json
  448. done
  449. - name: upload-status-bench
  450. uses: actions/upload-artifact@v2
  451. with:
  452. name: status
  453. path: status
  454. retention-days: 1
  455. # self-host with littlefs-fuse for a fuzz-like test
  456. fuse:
  457. runs-on: ubuntu-22.04
  458. if: ${{!endsWith(github.ref, '-prefix')}}
  459. steps:
  460. - uses: actions/checkout@v2
  461. - name: install
  462. run: |
  463. # need a few things
  464. sudo apt-get update -qq
  465. sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
  466. sudo pip3 install toml
  467. gcc --version
  468. python3 --version
  469. fusermount -V
  470. - uses: actions/checkout@v2
  471. with:
  472. repository: littlefs-project/littlefs-fuse
  473. ref: v2
  474. path: littlefs-fuse
  475. - name: setup
  476. run: |
  477. # copy our new version into littlefs-fuse
  478. rm -rf littlefs-fuse/littlefs/*
  479. cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
  480. # setup disk for littlefs-fuse
  481. mkdir mount
  482. LOOP=$(sudo losetup -f)
  483. sudo chmod a+rw $LOOP
  484. dd if=/dev/zero bs=512 count=128K of=disk
  485. losetup $LOOP disk
  486. echo "LOOP=$LOOP" >> $GITHUB_ENV
  487. - name: test
  488. run: |
  489. # self-host test
  490. make -C littlefs-fuse
  491. littlefs-fuse/lfs --format $LOOP
  492. littlefs-fuse/lfs $LOOP mount
  493. ls mount
  494. mkdir mount/littlefs
  495. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  496. cd mount/littlefs
  497. stat .
  498. ls -flh
  499. make -B test-runner
  500. make -B test
  501. # test migration using littlefs-fuse
  502. migrate:
  503. runs-on: ubuntu-22.04
  504. if: ${{!endsWith(github.ref, '-prefix')}}
  505. steps:
  506. - uses: actions/checkout@v2
  507. - name: install
  508. run: |
  509. # need a few things
  510. sudo apt-get update -qq
  511. sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
  512. sudo pip3 install toml
  513. gcc --version
  514. python3 --version
  515. fusermount -V
  516. - uses: actions/checkout@v2
  517. with:
  518. repository: littlefs-project/littlefs-fuse
  519. ref: v2
  520. path: v2
  521. - uses: actions/checkout@v2
  522. with:
  523. repository: littlefs-project/littlefs-fuse
  524. ref: v1
  525. path: v1
  526. - name: setup
  527. run: |
  528. # copy our new version into littlefs-fuse
  529. rm -rf v2/littlefs/*
  530. cp -r $(git ls-tree --name-only HEAD) v2/littlefs
  531. # setup disk for littlefs-fuse
  532. mkdir mount
  533. LOOP=$(sudo losetup -f)
  534. sudo chmod a+rw $LOOP
  535. dd if=/dev/zero bs=512 count=128K of=disk
  536. losetup $LOOP disk
  537. echo "LOOP=$LOOP" >> $GITHUB_ENV
  538. - name: test
  539. run: |
  540. # compile v1 and v2
  541. make -C v1
  542. make -C v2
  543. # run self-host test with v1
  544. v1/lfs --format $LOOP
  545. v1/lfs $LOOP mount
  546. ls mount
  547. mkdir mount/littlefs
  548. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  549. cd mount/littlefs
  550. stat .
  551. ls -flh
  552. make -B test-runner
  553. make -B test
  554. # attempt to migrate
  555. cd ../..
  556. fusermount -u mount
  557. v2/lfs --migrate $LOOP
  558. v2/lfs $LOOP mount
  559. # run self-host test with v2 right where we left off
  560. ls mount
  561. cd mount/littlefs
  562. stat .
  563. ls -flh
  564. make -B test-runner
  565. make -B test
  566. # status related tasks that run after tests
  567. status:
  568. runs-on: ubuntu-22.04
  569. needs: [test, bench]
  570. steps:
  571. - uses: actions/checkout@v2
  572. if: ${{github.event_name == 'pull_request'}}
  573. - name: install
  574. if: ${{github.event_name == 'pull_request'}}
  575. run: |
  576. # need a few things
  577. sudo apt-get install -qq gcc python3 python3-pip
  578. pip3 install toml
  579. gcc --version
  580. python3 --version
  581. - uses: actions/download-artifact@v2
  582. if: ${{github.event_name == 'pull_request'}}
  583. continue-on-error: true
  584. with:
  585. name: sizes
  586. path: sizes
  587. - uses: actions/download-artifact@v2
  588. if: ${{github.event_name == 'pull_request'}}
  589. continue-on-error: true
  590. with:
  591. name: cov
  592. path: cov
  593. - uses: actions/download-artifact@v2
  594. if: ${{github.event_name == 'pull_request'}}
  595. continue-on-error: true
  596. with:
  597. name: bench
  598. path: bench
  599. # try to find results from tests
  600. - name: create-table
  601. if: ${{github.event_name == 'pull_request'}}
  602. run: |
  603. # compare against pull-request target
  604. curl -sS \
  605. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/`
  606. `${{github.event.pull_request.base.ref}}`
  607. `?per_page=100" \
  608. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
  609. >> prev-status.json \
  610. || true
  611. # build table for GitHub
  612. declare -A table
  613. # sizes table
  614. i=0
  615. j=0
  616. for c in "" readonly threadsafe migrate error-asserts
  617. do
  618. # per-config results
  619. c_or_default=${c:-default}
  620. c_camel=${c_or_default^}
  621. table[$i,$j]=$c_camel
  622. ((j+=1))
  623. for s in code stack structs
  624. do
  625. f=sizes/thumb${c:+-$c}.$s.csv
  626. [ -e $f ] && table[$i,$j]=$( \
  627. export PREV="$(jq -re '
  628. select(.context == "'"sizes (thumb${c:+, $c}) / $s"'").description
  629. | capture("(?<prev>[0-9∞]+)").prev' \
  630. prev-status.json || echo 0)"
  631. ./scripts/summary.py $f --max=stack_limit -Y \
  632. | awk '
  633. NR==2 {$1=0; printf "%s B",$NF}
  634. NR==2 && ENVIRON["PREV"]+0 != 0 {
  635. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  636. }' \
  637. | sed -e 's/ /\&nbsp;/g')
  638. ((j+=1))
  639. done
  640. ((j=0, i+=1))
  641. done
  642. # coverage table
  643. i=0
  644. j=4
  645. for s in lines branches
  646. do
  647. table[$i,$j]=${s^}
  648. ((j+=1))
  649. f=cov/cov.csv
  650. [ -e $f ] && table[$i,$j]=$( \
  651. export PREV="$(jq -re '
  652. select(.context == "'"cov / $s"'").description
  653. | capture("(?<prev_a>[0-9]+)/(?<prev_b>[0-9]+)")
  654. | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
  655. prev-status.json || echo 0)"
  656. ./scripts/cov.py -u $f -f$s -Y \
  657. | awk -F '[ /%]+' -v s=$s '
  658. NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
  659. NR==2 && ENVIRON["PREV"]+0 != 0 {
  660. printf " (%+.1f%%)",$4-ENVIRON["PREV"]
  661. }' \
  662. | sed -e 's/ /\&nbsp;/g')
  663. ((j=4, i+=1))
  664. done
  665. # benchmark table
  666. i=3
  667. j=4
  668. for s in readed proged erased
  669. do
  670. table[$i,$j]=${s^}
  671. ((j+=1))
  672. f=bench/bench.csv
  673. [ -e $f ] && table[$i,$j]=$( \
  674. export PREV="$(jq -re '
  675. select(.context == "'"bench / $s"'").description
  676. | capture("(?<prev>[0-9]+)").prev' \
  677. prev-status.json || echo 0)"
  678. ./scripts/summary.py $f -f$s=bench_$s -Y \
  679. | awk '
  680. NR==2 {$1=0; printf "%s B",$NF}
  681. NR==2 && ENVIRON["PREV"]+0 != 0 {
  682. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  683. }' \
  684. | sed -e 's/ /\&nbsp;/g')
  685. ((j=4, i+=1))
  686. done
  687. # build the actual table
  688. echo "| | Code | Stack | Structs | | Coverage |" >> table.txt
  689. echo "|:--|-----:|------:|--------:|:--|---------:|" >> table.txt
  690. for ((i=0; i<6; i++))
  691. do
  692. echo -n "|" >> table.txt
  693. for ((j=0; j<6; j++))
  694. do
  695. echo -n " " >> table.txt
  696. [[ i -eq 2 && j -eq 5 ]] && echo -n "**Benchmarks**" >> table.txt
  697. echo -n "${table[$i,$j]:-}" >> table.txt
  698. echo -n " |" >> table.txt
  699. done
  700. echo >> table.txt
  701. done
  702. cat table.txt
  703. # create a bot comment for successful runs on pull requests
  704. - name: create-comment
  705. if: ${{github.event_name == 'pull_request'}}
  706. run: |
  707. touch comment.txt
  708. echo "<details>" >> comment.txt
  709. echo "<summary>" >> comment.txt
  710. echo "Tests passed ✓, `
  711. `Code: $(awk 'NR==3 {print $4}' table.txt || true), `
  712. `Stack: $(awk 'NR==3 {print $6}' table.txt || true), `
  713. `Structs: $(awk 'NR==3 {print $8}' table.txt || true)" \
  714. >> comment.txt
  715. echo "</summary>" >> comment.txt
  716. echo >> comment.txt
  717. [ -e table.txt ] && cat table.txt >> comment.txt
  718. echo >> comment.txt
  719. echo "</details>" >> comment.txt
  720. cat comment.txt
  721. mkdir -p comment
  722. jq -n --rawfile comment comment.txt '{
  723. number: ${{github.event.number}},
  724. body: $comment,
  725. }' | tee comment/comment.json
  726. - name: upload-comment
  727. uses: actions/upload-artifact@v2
  728. with:
  729. name: comment
  730. path: comment
  731. retention-days: 1