test.yml 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. name: test
  2. on: [push, pull_request]
  3. defaults:
  4. run:
  5. shell: bash -euv -o pipefail {0}
  6. env:
  7. CFLAGS: -Werror
  8. MAKEFLAGS: -j
  9. TESTFLAGS: -k
  10. BENCHFLAGS:
  11. jobs:
  12. # run tests
  13. test:
  14. runs-on: ubuntu-latest
  15. strategy:
  16. fail-fast: false
  17. matrix:
  18. arch: [x86_64, thumb, mips, powerpc]
  19. steps:
  20. - uses: actions/checkout@v4
  21. - name: install
  22. run: |
  23. # need a few things
  24. sudo apt-get update -qq
  25. sudo apt-get install -qq gcc python3 python3-pip
  26. pip3 install toml
  27. gcc --version
  28. python3 --version
  29. # cross-compile with ARM Thumb (32-bit, little-endian)
  30. - name: install-thumb
  31. if: ${{matrix.arch == 'thumb'}}
  32. run: |
  33. sudo apt-get install -qq \
  34. gcc-arm-linux-gnueabi \
  35. libc6-dev-armel-cross \
  36. qemu-user
  37. echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV
  38. echo "EXEC=qemu-arm" >> $GITHUB_ENV
  39. arm-linux-gnueabi-gcc --version
  40. qemu-arm -version
  41. # cross-compile with MIPS (32-bit, big-endian)
  42. - name: install-mips
  43. if: ${{matrix.arch == 'mips'}}
  44. run: |
  45. sudo apt-get install -qq \
  46. gcc-mips-linux-gnu \
  47. libc6-dev-mips-cross \
  48. qemu-user
  49. echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV
  50. echo "EXEC=qemu-mips" >> $GITHUB_ENV
  51. mips-linux-gnu-gcc --version
  52. qemu-mips -version
  53. # cross-compile with PowerPC (32-bit, big-endian)
  54. - name: install-powerpc
  55. if: ${{matrix.arch == 'powerpc'}}
  56. run: |
  57. sudo apt-get install -qq \
  58. gcc-powerpc-linux-gnu \
  59. libc6-dev-powerpc-cross \
  60. qemu-user
  61. echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV
  62. echo "EXEC=qemu-ppc" >> $GITHUB_ENV
  63. powerpc-linux-gnu-gcc --version
  64. qemu-ppc -version
  65. # does littlefs compile?
  66. - name: test-build
  67. run: |
  68. make clean
  69. make build
  70. # make sure example can at least compile
  71. - name: test-example
  72. run: |
  73. make clean
  74. sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c
  75. CFLAGS="$CFLAGS \
  76. -Duser_provided_block_device_read=NULL \
  77. -Duser_provided_block_device_prog=NULL \
  78. -Duser_provided_block_device_erase=NULL \
  79. -Duser_provided_block_device_sync=NULL \
  80. -include stdio.h" \
  81. make all
  82. rm test.c
  83. # run the tests!
  84. - name: test
  85. run: |
  86. make clean
  87. make test
  88. # collect coverage info
  89. #
  90. # Note the goal is to maximize coverage in the small, easy-to-run
  91. # tests, so we intentionally exclude more aggressive powerloss testing
  92. # from coverage results
  93. - name: cov
  94. if: ${{matrix.arch == 'x86_64'}}
  95. run: |
  96. make lfs.cov.csv
  97. ./scripts/cov.py -u lfs.cov.csv
  98. mkdir -p cov
  99. cp lfs.cov.csv cov/cov.csv
  100. # find compile-time measurements
  101. - name: sizes
  102. run: |
  103. make clean
  104. CFLAGS="$CFLAGS \
  105. -DLFS_NO_ASSERT \
  106. -DLFS_NO_DEBUG \
  107. -DLFS_NO_WARN \
  108. -DLFS_NO_ERROR" \
  109. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  110. ./scripts/structs.py -u lfs.structs.csv
  111. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  112. -bfunction \
  113. -fcode=code_size \
  114. -fdata=data_size \
  115. -fstack=stack_limit --max=stack_limit
  116. mkdir -p sizes
  117. cp lfs.code.csv sizes/${{matrix.arch}}.code.csv
  118. cp lfs.data.csv sizes/${{matrix.arch}}.data.csv
  119. cp lfs.stack.csv sizes/${{matrix.arch}}.stack.csv
  120. cp lfs.structs.csv sizes/${{matrix.arch}}.structs.csv
  121. - name: sizes-readonly
  122. run: |
  123. make clean
  124. CFLAGS="$CFLAGS \
  125. -DLFS_NO_ASSERT \
  126. -DLFS_NO_DEBUG \
  127. -DLFS_NO_WARN \
  128. -DLFS_NO_ERROR \
  129. -DLFS_READONLY" \
  130. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  131. ./scripts/structs.py -u lfs.structs.csv
  132. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  133. -bfunction \
  134. -fcode=code_size \
  135. -fdata=data_size \
  136. -fstack=stack_limit --max=stack_limit
  137. mkdir -p sizes
  138. cp lfs.code.csv sizes/${{matrix.arch}}-readonly.code.csv
  139. cp lfs.data.csv sizes/${{matrix.arch}}-readonly.data.csv
  140. cp lfs.stack.csv sizes/${{matrix.arch}}-readonly.stack.csv
  141. cp lfs.structs.csv sizes/${{matrix.arch}}-readonly.structs.csv
  142. - name: sizes-threadsafe
  143. run: |
  144. make clean
  145. CFLAGS="$CFLAGS \
  146. -DLFS_NO_ASSERT \
  147. -DLFS_NO_DEBUG \
  148. -DLFS_NO_WARN \
  149. -DLFS_NO_ERROR \
  150. -DLFS_THREADSAFE" \
  151. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  152. ./scripts/structs.py -u lfs.structs.csv
  153. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  154. -bfunction \
  155. -fcode=code_size \
  156. -fdata=data_size \
  157. -fstack=stack_limit --max=stack_limit
  158. mkdir -p sizes
  159. cp lfs.code.csv sizes/${{matrix.arch}}-threadsafe.code.csv
  160. cp lfs.data.csv sizes/${{matrix.arch}}-threadsafe.data.csv
  161. cp lfs.stack.csv sizes/${{matrix.arch}}-threadsafe.stack.csv
  162. cp lfs.structs.csv sizes/${{matrix.arch}}-threadsafe.structs.csv
  163. - name: sizes-multiversion
  164. run: |
  165. make clean
  166. CFLAGS="$CFLAGS \
  167. -DLFS_NO_ASSERT \
  168. -DLFS_NO_DEBUG \
  169. -DLFS_NO_WARN \
  170. -DLFS_NO_ERROR \
  171. -DLFS_MULTIVERSION" \
  172. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  173. ./scripts/structs.py -u lfs.structs.csv
  174. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  175. -bfunction \
  176. -fcode=code_size \
  177. -fdata=data_size \
  178. -fstack=stack_limit --max=stack_limit
  179. mkdir -p sizes
  180. cp lfs.code.csv sizes/${{matrix.arch}}-multiversion.code.csv
  181. cp lfs.data.csv sizes/${{matrix.arch}}-multiversion.data.csv
  182. cp lfs.stack.csv sizes/${{matrix.arch}}-multiversion.stack.csv
  183. cp lfs.structs.csv sizes/${{matrix.arch}}-multiversion.structs.csv
  184. - name: sizes-migrate
  185. run: |
  186. make clean
  187. CFLAGS="$CFLAGS \
  188. -DLFS_NO_ASSERT \
  189. -DLFS_NO_DEBUG \
  190. -DLFS_NO_WARN \
  191. -DLFS_NO_ERROR \
  192. -DLFS_MIGRATE" \
  193. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  194. ./scripts/structs.py -u lfs.structs.csv
  195. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  196. -bfunction \
  197. -fcode=code_size \
  198. -fdata=data_size \
  199. -fstack=stack_limit --max=stack_limit
  200. mkdir -p sizes
  201. cp lfs.code.csv sizes/${{matrix.arch}}-migrate.code.csv
  202. cp lfs.data.csv sizes/${{matrix.arch}}-migrate.data.csv
  203. cp lfs.stack.csv sizes/${{matrix.arch}}-migrate.stack.csv
  204. cp lfs.structs.csv sizes/${{matrix.arch}}-migrate.structs.csv
  205. - name: sizes-error-asserts
  206. run: |
  207. make clean
  208. CFLAGS="$CFLAGS \
  209. -DLFS_NO_DEBUG \
  210. -DLFS_NO_WARN \
  211. -DLFS_NO_ERROR \
  212. -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" \
  213. make lfs.code.csv lfs.data.csv lfs.stack.csv lfs.structs.csv
  214. ./scripts/structs.py -u lfs.structs.csv
  215. ./scripts/summary.py lfs.code.csv lfs.data.csv lfs.stack.csv \
  216. -bfunction \
  217. -fcode=code_size \
  218. -fdata=data_size \
  219. -fstack=stack_limit --max=stack_limit
  220. mkdir -p sizes
  221. cp lfs.code.csv sizes/${{matrix.arch}}-error-asserts.code.csv
  222. cp lfs.data.csv sizes/${{matrix.arch}}-error-asserts.data.csv
  223. cp lfs.stack.csv sizes/${{matrix.arch}}-error-asserts.stack.csv
  224. cp lfs.structs.csv sizes/${{matrix.arch}}-error-asserts.structs.csv
  225. # create size statuses
  226. - name: upload-sizes
  227. uses: actions/upload-artifact@v4
  228. with:
  229. name: sizes-${{matrix.arch}}
  230. path: sizes
  231. - name: status-sizes
  232. run: |
  233. mkdir -p status
  234. for f in $(shopt -s nullglob ; echo sizes/*.csv)
  235. do
  236. # skip .data.csv as it should always be zero
  237. [[ $f == *.data.csv ]] && continue
  238. export STEP="sizes$(echo $f \
  239. | sed -n 's/[^-.]*-\([^.]*\)\..*csv/-\1/p')"
  240. export CONTEXT="sizes (${{matrix.arch}}$(echo $f \
  241. | sed -n 's/[^-.]*-\([^.]*\)\..*csv/, \1/p')) / $(echo $f \
  242. | sed -n 's/[^.]*\.\(.*\)\.csv/\1/p')"
  243. export PREV="$(curl -sS \
  244. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  245. `?per_page=100" \
  246. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  247. | select(.context == env.CONTEXT).description
  248. | capture("(?<prev>[0-9∞]+)").prev' \
  249. || echo 0)"
  250. export DESCRIPTION="$(./scripts/summary.py $f --max=stack_limit -Y \
  251. | awk '
  252. NR==2 {$1=0; printf "%s B",$NF}
  253. NR==2 && ENVIRON["PREV"]+0 != 0 {
  254. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  255. }')"
  256. jq -n '{
  257. state: "success",
  258. context: env.CONTEXT,
  259. description: env.DESCRIPTION,
  260. target_job: "${{github.job}} (${{matrix.arch}})",
  261. target_step: env.STEP,
  262. }' | tee status/$(basename $f .csv).json
  263. done
  264. - name: upload-status-sizes
  265. uses: actions/upload-artifact@v4
  266. with:
  267. name: status-sizes-${{matrix.arch}}
  268. path: status
  269. retention-days: 1
  270. # create cov statuses
  271. - name: upload-cov
  272. if: ${{matrix.arch == 'x86_64'}}
  273. uses: actions/upload-artifact@v4
  274. with:
  275. name: cov
  276. path: cov
  277. - name: status-cov
  278. if: ${{matrix.arch == 'x86_64'}}
  279. run: |
  280. mkdir -p status
  281. f=cov/cov.csv
  282. for s in lines branches
  283. do
  284. export STEP="cov"
  285. export CONTEXT="cov / $s"
  286. export PREV="$(curl -sS \
  287. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  288. `?per_page=100" \
  289. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  290. | select(.context == env.CONTEXT).description
  291. | capture("(?<prev_a>[0-9]+)/(?<prev_b>[0-9]+)")
  292. | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
  293. || echo 0)"
  294. export DESCRIPTION="$(./scripts/cov.py -u $f -f$s -Y \
  295. | awk -F '[ /%]+' -v s=$s '
  296. NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
  297. NR==2 && ENVIRON["PREV"]+0 != 0 {
  298. printf " (%+.1f%%)",$4-ENVIRON["PREV"]
  299. }')"
  300. jq -n '{
  301. state: "success",
  302. context: env.CONTEXT,
  303. description: env.DESCRIPTION,
  304. target_job: "${{github.job}} (${{matrix.arch}})",
  305. target_step: env.STEP,
  306. }' | tee status/$(basename $f .csv)-$s.json
  307. done
  308. - name: upload-status-cov
  309. if: ${{matrix.arch == 'x86_64'}}
  310. uses: actions/upload-artifact@v4
  311. with:
  312. name: status-cov
  313. path: status
  314. retention-days: 1
  315. # run as many exhaustive tests as fits in GitHub's time limits
  316. #
  317. # this grows exponentially, so it doesn't turn out to be that many
  318. test-pls:
  319. runs-on: ubuntu-latest
  320. strategy:
  321. fail-fast: false
  322. matrix:
  323. pls: [1, 2]
  324. steps:
  325. - uses: actions/checkout@v4
  326. - name: install
  327. run: |
  328. # need a few things
  329. sudo apt-get update -qq
  330. sudo apt-get install -qq gcc python3 python3-pip
  331. pip3 install toml
  332. gcc --version
  333. python3 --version
  334. - name: test-pls
  335. if: ${{matrix.pls <= 1}}
  336. run: |
  337. TESTFLAGS="$TESTFLAGS -P${{matrix.pls}}" make test
  338. # >=2pls takes multiple days to run fully, so we can only
  339. # run a subset of tests, these are the most important
  340. - name: test-limited-pls
  341. if: ${{matrix.pls > 1}}
  342. run: |
  343. TESTFLAGS="$TESTFLAGS -P${{matrix.pls}} test_dirs test_relocations" \
  344. make test
  345. # run with LFS_NO_INTRINSICS to make sure that works
  346. test-no-intrinsics:
  347. runs-on: ubuntu-latest
  348. steps:
  349. - uses: actions/checkout@v4
  350. - name: install
  351. run: |
  352. # need a few things
  353. sudo apt-get update -qq
  354. sudo apt-get install -qq gcc python3 python3-pip
  355. pip3 install toml
  356. gcc --version
  357. python3 --version
  358. - name: test-no-intrinsics
  359. run: |
  360. CFLAGS="$CFLAGS -DLFS_NO_INTRINSICS" make test
  361. test-shrink:
  362. runs-on: ubuntu-latest
  363. steps:
  364. - uses: actions/checkout@v4
  365. - name: install
  366. run: |
  367. # need a few things
  368. sudo apt-get update -qq
  369. sudo apt-get install -qq gcc python3 python3-pip
  370. pip3 install toml
  371. gcc --version
  372. python3 --version
  373. - name: test-no-intrinsics
  374. run: |
  375. CFLAGS="$CFLAGS -DLFS_SHRINKNONRELOCATING" make test
  376. # run with all trace options enabled to at least make sure these
  377. # all compile
  378. test-yes-trace:
  379. runs-on: ubuntu-latest
  380. steps:
  381. - uses: actions/checkout@v4
  382. - name: install
  383. run: |
  384. # need a few things
  385. sudo apt-get update -qq
  386. sudo apt-get install -qq gcc python3 python3-pip
  387. pip3 install toml
  388. gcc --version
  389. python3 --version
  390. - name: test-yes-trace
  391. run: |
  392. CFLAGS="$CFLAGS \
  393. -DLFS_YES_TRACE \
  394. -DLFS_RAMBD_YES_TRACE \
  395. -DLFS_FILEBD_YES_TRACE \
  396. -DLFS_RAMBD_YES_TRACE" \
  397. make test
  398. # run LFS_MULTIVERSION tests
  399. test-multiversion:
  400. runs-on: ubuntu-latest
  401. steps:
  402. - uses: actions/checkout@v4
  403. - name: install
  404. run: |
  405. # need a few things
  406. sudo apt-get update -qq
  407. sudo apt-get install -qq gcc python3 python3-pip
  408. pip3 install toml
  409. gcc --version
  410. python3 --version
  411. - name: test-multiversion
  412. run: |
  413. CFLAGS="$CFLAGS -DLFS_MULTIVERSION" make test
  414. # run tests on the older version lfs2.0
  415. test-lfs2_0:
  416. runs-on: ubuntu-latest
  417. steps:
  418. - uses: actions/checkout@v4
  419. - name: install
  420. run: |
  421. # need a few things
  422. sudo apt-get update -qq
  423. sudo apt-get install -qq gcc python3 python3-pip
  424. pip3 install toml
  425. gcc --version
  426. python3 --version
  427. - name: test-lfs2_0
  428. run: |
  429. CFLAGS="$CFLAGS -DLFS_MULTIVERSION" \
  430. TESTFLAGS="$TESTFLAGS -DDISK_VERSION=0x00020000" \
  431. make test
  432. # run under Valgrind to check for memory errors
  433. test-valgrind:
  434. runs-on: ubuntu-latest
  435. steps:
  436. - uses: actions/checkout@v4
  437. - name: install
  438. run: |
  439. # need a few things
  440. sudo apt-get update -qq
  441. sudo apt-get install -qq gcc python3 python3-pip valgrind
  442. pip3 install toml
  443. gcc --version
  444. python3 --version
  445. valgrind --version
  446. # Valgrind takes a while with diminishing value, so only test
  447. # on one geometry
  448. - name: test-valgrind
  449. run: |
  450. TESTFLAGS="$TESTFLAGS --valgrind --context=1024 -Gdefault -Pnone" \
  451. make test
  452. # test that compilation is warning free under clang
  453. # run with Clang, mostly to check for Clang-specific warnings
  454. test-clang:
  455. runs-on: ubuntu-latest
  456. steps:
  457. - uses: actions/checkout@v4
  458. - name: install
  459. run: |
  460. # need a few things
  461. sudo apt-get install -qq clang python3 python3-pip
  462. pip3 install toml
  463. clang --version
  464. python3 --version
  465. - name: test-clang
  466. run: |
  467. # override CFLAGS since Clang does not support -fcallgraph-info
  468. # and -ftrack-macro-expansions
  469. make \
  470. CC=clang \
  471. CFLAGS="$CFLAGS -MMD -g3 -I. -std=c99 -Wall -Wextra -pedantic" \
  472. test
  473. # run benchmarks
  474. #
  475. # note there's no real benefit to running these on multiple archs
  476. bench:
  477. runs-on: ubuntu-latest
  478. steps:
  479. - uses: actions/checkout@v4
  480. - name: install
  481. run: |
  482. # need a few things
  483. sudo apt-get update -qq
  484. sudo apt-get install -qq gcc python3 python3-pip valgrind
  485. pip3 install toml
  486. gcc --version
  487. python3 --version
  488. valgrind --version
  489. - name: bench
  490. run: |
  491. make bench
  492. # find bench results
  493. make lfs.bench.csv
  494. ./scripts/summary.py lfs.bench.csv \
  495. -bsuite \
  496. -freaded=bench_readed \
  497. -fproged=bench_proged \
  498. -ferased=bench_erased
  499. mkdir -p bench
  500. cp lfs.bench.csv bench/bench.csv
  501. # find perfbd results
  502. make lfs.perfbd.csv
  503. ./scripts/perfbd.py -u lfs.perfbd.csv
  504. mkdir -p bench
  505. cp lfs.perfbd.csv bench/perfbd.csv
  506. # create bench statuses
  507. - name: upload-bench
  508. uses: actions/upload-artifact@v4
  509. with:
  510. name: bench
  511. path: bench
  512. - name: status-bench
  513. run: |
  514. mkdir -p status
  515. f=bench/bench.csv
  516. for s in readed proged erased
  517. do
  518. export STEP="bench"
  519. export CONTEXT="bench / $s"
  520. export PREV="$(curl -sS \
  521. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master`
  522. `?per_page=100" \
  523. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]
  524. | select(.context == env.CONTEXT).description
  525. | capture("(?<prev>[0-9]+)").prev' \
  526. || echo 0)"
  527. export DESCRIPTION="$(./scripts/summary.py $f -f$s=bench_$s -Y \
  528. | awk '
  529. NR==2 {$1=0; printf "%s B",$NF}
  530. NR==2 && ENVIRON["PREV"]+0 != 0 {
  531. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  532. }')"
  533. jq -n '{
  534. state: "success",
  535. context: env.CONTEXT,
  536. description: env.DESCRIPTION,
  537. target_job: "${{github.job}}",
  538. target_step: env.STEP,
  539. }' | tee status/$(basename $f .csv)-$s.json
  540. done
  541. - name: upload-status-bench
  542. uses: actions/upload-artifact@v4
  543. with:
  544. name: status-bench
  545. path: status
  546. retention-days: 1
  547. # run compatibility tests using the current master as the previous version
  548. test-compat:
  549. runs-on: ubuntu-latest
  550. steps:
  551. - uses: actions/checkout@v4
  552. if: ${{github.event_name == 'pull_request'}}
  553. # checkout the current pr target into lfsp
  554. - uses: actions/checkout@v4
  555. if: ${{github.event_name == 'pull_request'}}
  556. with:
  557. ref: ${{github.event.pull_request.base.ref}}
  558. path: lfsp
  559. - name: install
  560. if: ${{github.event_name == 'pull_request'}}
  561. run: |
  562. # need a few things
  563. sudo apt-get update -qq
  564. sudo apt-get install -qq gcc python3 python3-pip
  565. pip3 install toml
  566. gcc --version
  567. python3 --version
  568. # adjust prefix of lfsp
  569. - name: changeprefix
  570. if: ${{github.event_name == 'pull_request'}}
  571. run: |
  572. ./scripts/changeprefix.py lfs lfsp lfsp/*.h lfsp/*.c
  573. - name: test-compat
  574. if: ${{github.event_name == 'pull_request'}}
  575. run: |
  576. TESTS=tests/test_compat.toml \
  577. SRC="$(find . lfsp -name '*.c' -maxdepth 1 \
  578. -and -not -name '*.t.*' \
  579. -and -not -name '*.b.*')" \
  580. CFLAGS="-DLFSP=lfsp/lfsp.h" \
  581. make test
  582. # self-host with littlefs-fuse for a fuzz-like test
  583. fuse:
  584. runs-on: ubuntu-latest
  585. if: ${{!endsWith(github.ref, '-prefix')}}
  586. steps:
  587. - uses: actions/checkout@v4
  588. - name: install
  589. run: |
  590. # need a few things
  591. sudo apt-get update -qq
  592. sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
  593. sudo pip3 install toml
  594. gcc --version
  595. python3 --version
  596. fusermount -V
  597. - uses: actions/checkout@v4
  598. with:
  599. repository: littlefs-project/littlefs-fuse
  600. ref: v2
  601. path: littlefs-fuse
  602. - name: setup
  603. run: |
  604. # copy our new version into littlefs-fuse
  605. rm -rf littlefs-fuse/littlefs/*
  606. cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs
  607. # setup disk for littlefs-fuse
  608. mkdir mount
  609. LOOP=$(sudo losetup -f)
  610. sudo chmod a+rw $LOOP
  611. dd if=/dev/zero bs=512 count=128K of=disk
  612. losetup $LOOP disk
  613. echo "LOOP=$LOOP" >> $GITHUB_ENV
  614. - name: test
  615. run: |
  616. # self-host test
  617. make -C littlefs-fuse
  618. littlefs-fuse/lfs --format $LOOP
  619. littlefs-fuse/lfs $LOOP mount
  620. ls mount
  621. mkdir mount/littlefs
  622. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  623. cd mount/littlefs
  624. stat .
  625. ls -flh
  626. make -B test-runner
  627. make -B test
  628. # test migration using littlefs-fuse
  629. migrate:
  630. runs-on: ubuntu-latest
  631. if: ${{!endsWith(github.ref, '-prefix')}}
  632. steps:
  633. - uses: actions/checkout@v4
  634. - name: install
  635. run: |
  636. # need a few things
  637. sudo apt-get update -qq
  638. sudo apt-get install -qq gcc python3 python3-pip libfuse-dev
  639. sudo pip3 install toml
  640. gcc --version
  641. python3 --version
  642. fusermount -V
  643. - uses: actions/checkout@v4
  644. with:
  645. repository: littlefs-project/littlefs-fuse
  646. ref: v2
  647. path: v2
  648. - uses: actions/checkout@v4
  649. with:
  650. repository: littlefs-project/littlefs-fuse
  651. ref: v1
  652. path: v1
  653. - name: setup
  654. run: |
  655. # copy our new version into littlefs-fuse
  656. rm -rf v2/littlefs/*
  657. cp -r $(git ls-tree --name-only HEAD) v2/littlefs
  658. # setup disk for littlefs-fuse
  659. mkdir mount
  660. LOOP=$(sudo losetup -f)
  661. sudo chmod a+rw $LOOP
  662. dd if=/dev/zero bs=512 count=128K of=disk
  663. losetup $LOOP disk
  664. echo "LOOP=$LOOP" >> $GITHUB_ENV
  665. - name: test
  666. run: |
  667. # compile v1 and v2
  668. make -C v1
  669. make -C v2
  670. # run self-host test with v1
  671. v1/lfs --format $LOOP
  672. v1/lfs $LOOP mount
  673. ls mount
  674. mkdir mount/littlefs
  675. cp -r $(git ls-tree --name-only HEAD) mount/littlefs
  676. cd mount/littlefs
  677. stat .
  678. ls -flh
  679. make -B test-runner
  680. make -B test
  681. # attempt to migrate
  682. cd ../..
  683. fusermount -u mount
  684. v2/lfs --migrate $LOOP
  685. v2/lfs $LOOP mount
  686. # run self-host test with v2 right where we left off
  687. ls mount
  688. cd mount/littlefs
  689. stat .
  690. ls -flh
  691. make -B test-runner
  692. make -B test
  693. # status related tasks that run after tests
  694. status:
  695. runs-on: ubuntu-latest
  696. needs: [test, bench]
  697. steps:
  698. - uses: actions/checkout@v4
  699. if: ${{github.event_name == 'pull_request'}}
  700. - name: install
  701. if: ${{github.event_name == 'pull_request'}}
  702. run: |
  703. # need a few things
  704. sudo apt-get install -qq gcc python3 python3-pip
  705. pip3 install toml
  706. gcc --version
  707. python3 --version
  708. - uses: actions/download-artifact@v4
  709. if: ${{github.event_name == 'pull_request'}}
  710. continue-on-error: true
  711. with:
  712. pattern: '{sizes,sizes-*}'
  713. merge-multiple: true
  714. path: sizes
  715. - uses: actions/download-artifact@v4
  716. if: ${{github.event_name == 'pull_request'}}
  717. continue-on-error: true
  718. with:
  719. pattern: '{cov,cov-*}'
  720. merge-multiple: true
  721. path: cov
  722. - uses: actions/download-artifact@v4
  723. if: ${{github.event_name == 'pull_request'}}
  724. continue-on-error: true
  725. with:
  726. pattern: '{bench,bench-*}'
  727. merge-multiple: true
  728. path: bench
  729. # try to find results from tests
  730. - name: create-table
  731. if: ${{github.event_name == 'pull_request'}}
  732. run: |
  733. # compare against pull-request target
  734. curl -sS \
  735. "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/`
  736. `${{github.event.pull_request.base.ref}}`
  737. `?per_page=100" \
  738. | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \
  739. >> prev-status.json \
  740. || true
  741. # build table for GitHub
  742. declare -A table
  743. # sizes table
  744. i=0
  745. j=0
  746. for c in "" readonly threadsafe multiversion migrate error-asserts
  747. do
  748. # per-config results
  749. c_or_default=${c:-default}
  750. c_camel=${c_or_default^}
  751. table[$i,$j]=$c_camel
  752. ((j+=1))
  753. for s in code stack structs
  754. do
  755. f=sizes/thumb${c:+-$c}.$s.csv
  756. [ -e $f ] && table[$i,$j]=$( \
  757. export PREV="$(jq -re '
  758. select(.context == "'"sizes (thumb${c:+, $c}) / $s"'").description
  759. | capture("(?<prev>[0-9∞]+)").prev' \
  760. prev-status.json || echo 0)"
  761. ./scripts/summary.py $f --max=stack_limit -Y \
  762. | awk '
  763. NR==2 {$1=0; printf "%s B",$NF}
  764. NR==2 && ENVIRON["PREV"]+0 != 0 {
  765. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  766. }' \
  767. | sed -e 's/ /\&nbsp;/g')
  768. ((j+=1))
  769. done
  770. ((j=0, i+=1))
  771. done
  772. # coverage table
  773. i=0
  774. j=4
  775. for s in lines branches
  776. do
  777. table[$i,$j]=${s^}
  778. ((j+=1))
  779. f=cov/cov.csv
  780. [ -e $f ] && table[$i,$j]=$( \
  781. export PREV="$(jq -re '
  782. select(.context == "'"cov / $s"'").description
  783. | capture("(?<prev_a>[0-9]+)/(?<prev_b>[0-9]+)")
  784. | 100*((.prev_a|tonumber) / (.prev_b|tonumber))' \
  785. prev-status.json || echo 0)"
  786. ./scripts/cov.py -u $f -f$s -Y \
  787. | awk -F '[ /%]+' -v s=$s '
  788. NR==2 {$1=0; printf "%d/%d %s",$2,$3,s}
  789. NR==2 && ENVIRON["PREV"]+0 != 0 {
  790. printf " (%+.1f%%)",$4-ENVIRON["PREV"]
  791. }' \
  792. | sed -e 's/ /\&nbsp;/g')
  793. ((j=4, i+=1))
  794. done
  795. # benchmark table
  796. i=3
  797. j=4
  798. for s in readed proged erased
  799. do
  800. table[$i,$j]=${s^}
  801. ((j+=1))
  802. f=bench/bench.csv
  803. [ -e $f ] && table[$i,$j]=$( \
  804. export PREV="$(jq -re '
  805. select(.context == "'"bench / $s"'").description
  806. | capture("(?<prev>[0-9]+)").prev' \
  807. prev-status.json || echo 0)"
  808. ./scripts/summary.py $f -f$s=bench_$s -Y \
  809. | awk '
  810. NR==2 {$1=0; printf "%s B",$NF}
  811. NR==2 && ENVIRON["PREV"]+0 != 0 {
  812. printf " (%+.1f%%)",100*($NF-ENVIRON["PREV"])/ENVIRON["PREV"]
  813. }' \
  814. | sed -e 's/ /\&nbsp;/g')
  815. ((j=4, i+=1))
  816. done
  817. # build the actual table
  818. echo "| | Code | Stack | Structs | | Coverage |" >> table.txt
  819. echo "|:--|-----:|------:|--------:|:--|---------:|" >> table.txt
  820. for ((i=0; i<6; i++))
  821. do
  822. echo -n "|" >> table.txt
  823. for ((j=0; j<6; j++))
  824. do
  825. echo -n " " >> table.txt
  826. [[ i -eq 2 && j -eq 5 ]] && echo -n "**Benchmarks**" >> table.txt
  827. echo -n "${table[$i,$j]:-}" >> table.txt
  828. echo -n " |" >> table.txt
  829. done
  830. echo >> table.txt
  831. done
  832. cat table.txt
  833. # create a bot comment for successful runs on pull requests
  834. - name: create-comment
  835. if: ${{github.event_name == 'pull_request'}}
  836. run: |
  837. touch comment.txt
  838. echo "<details>" >> comment.txt
  839. echo "<summary>" >> comment.txt
  840. echo "Tests passed ✓, `
  841. `Code: $(awk 'NR==3 {print $4}' table.txt || true), `
  842. `Stack: $(awk 'NR==3 {print $6}' table.txt || true), `
  843. `Structs: $(awk 'NR==3 {print $8}' table.txt || true)" \
  844. >> comment.txt
  845. echo "</summary>" >> comment.txt
  846. echo >> comment.txt
  847. [ -e table.txt ] && cat table.txt >> comment.txt
  848. echo >> comment.txt
  849. echo "</details>" >> comment.txt
  850. cat comment.txt
  851. mkdir -p comment
  852. jq -n --rawfile comment comment.txt '{
  853. number: ${{github.event.number}},
  854. body: $comment,
  855. }' | tee comment/comment.json
  856. - name: upload-comment
  857. uses: actions/upload-artifact@v4
  858. with:
  859. name: comment
  860. path: comment
  861. retention-days: 1