lfs.c 97 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422
  1. /*
  2. * The little filesystem
  3. *
  4. * Copyright (c) 2017 ARM Limited
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include "lfs.h"
  19. #include "lfs_util.h"
  20. /// Caching block device operations ///
  21. static int lfs_cache_read(lfs_t *lfs,
  22. const lfs_cache_t *pcache, lfs_cache_t *rcache, bool store,
  23. lfs_block_t block, lfs_off_t off,
  24. void *buffer, lfs_size_t size) {
  25. uint8_t *data = buffer;
  26. LFS_ASSERT(block != 0xffffffff);
  27. while (size > 0) {
  28. if (pcache && block == pcache->block &&
  29. off >= pcache->off &&
  30. off < pcache->off + pcache->size) {
  31. // is already in pcache?
  32. lfs_size_t diff = lfs_min(size, pcache->size - (off-pcache->off));
  33. memcpy(data, &pcache->buffer[off-pcache->off], diff);
  34. data += diff;
  35. off += diff;
  36. size -= diff;
  37. continue;
  38. }
  39. if (block == rcache->block &&
  40. off >= rcache->off &&
  41. off < rcache->off + rcache->size) {
  42. // is already in rcache?
  43. lfs_size_t diff = lfs_min(size, rcache->size - (off-rcache->off));
  44. if (pcache && block == pcache->block) {
  45. diff = lfs_min(diff, pcache->off - off);
  46. }
  47. memcpy(data, &rcache->buffer[off-rcache->off], diff);
  48. data += diff;
  49. off += diff;
  50. size -= diff;
  51. continue;
  52. }
  53. if (!store && off % lfs->cfg->read_size == 0 &&
  54. size >= lfs->cfg->read_size) {
  55. // bypass cache?
  56. lfs_size_t diff = size - (size % lfs->cfg->read_size);
  57. int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
  58. if (err) {
  59. return err;
  60. }
  61. data += diff;
  62. off += diff;
  63. size -= diff;
  64. continue;
  65. }
  66. // load to cache, first condition can no longer fail
  67. LFS_ASSERT(block < lfs->cfg->block_count);
  68. lfs_size_t nsize = store ? lfs->cfg->cache_size : lfs->cfg->prog_size;
  69. rcache->block = block;
  70. rcache->off = lfs_aligndown(off, nsize);
  71. rcache->size = nsize;
  72. int err = lfs->cfg->read(lfs->cfg, rcache->block,
  73. rcache->off, rcache->buffer, nsize);
  74. if (err) {
  75. return err;
  76. }
  77. }
  78. return 0;
  79. }
  80. static int lfs_cache_cmp(lfs_t *lfs,
  81. const lfs_cache_t *pcache, lfs_cache_t *rcache,
  82. lfs_block_t block, lfs_off_t off,
  83. const void *buffer, lfs_size_t size) {
  84. const uint8_t *data = buffer;
  85. for (lfs_off_t i = 0; i < size; i++) {
  86. uint8_t c;
  87. int err = lfs_cache_read(lfs, pcache, rcache, true,
  88. block, off+i, &c, 1);
  89. if (err) {
  90. return err;
  91. }
  92. if (c != data[i]) {
  93. return false;
  94. }
  95. }
  96. return true;
  97. }
  98. static int lfs_cache_crc(lfs_t *lfs,
  99. const lfs_cache_t *pcache, lfs_cache_t *rcache,
  100. lfs_block_t block, lfs_off_t off, lfs_size_t size, uint32_t *crc) {
  101. for (lfs_off_t i = 0; i < size; i++) {
  102. uint8_t c;
  103. int err = lfs_cache_read(lfs, pcache, rcache, true,
  104. block, off+i, &c, 1);
  105. if (err) {
  106. return err;
  107. }
  108. *crc = lfs_crc(*crc, &c, 1);
  109. }
  110. return 0;
  111. }
  112. static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
  113. // do not zero, cheaper if cache is readonly or only going to be
  114. // written with identical data (during relocates)
  115. (void)lfs;
  116. rcache->block = 0xffffffff;
  117. }
  118. static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
  119. // zero to avoid information leak
  120. memset(pcache->buffer, 0xff, lfs->cfg->prog_size);
  121. pcache->block = 0xffffffff;
  122. }
  123. static int lfs_cache_flush(lfs_t *lfs,
  124. lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
  125. if (pcache->block != 0xffffffff && pcache->block != 0xfffffffe) {
  126. LFS_ASSERT(pcache->block < lfs->cfg->block_count);
  127. lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
  128. int err = lfs->cfg->prog(lfs->cfg, pcache->block,
  129. pcache->off, pcache->buffer, diff);
  130. if (err) {
  131. return err;
  132. }
  133. if (validate) {
  134. // check data on disk
  135. lfs_cache_drop(lfs, rcache);
  136. int res = lfs_cache_cmp(lfs, NULL, rcache, pcache->block,
  137. pcache->off, pcache->buffer, diff);
  138. if (res < 0) {
  139. return res;
  140. }
  141. if (!res) {
  142. return LFS_ERR_CORRUPT;
  143. }
  144. }
  145. lfs_cache_zero(lfs, pcache);
  146. }
  147. return 0;
  148. }
  149. static int lfs_cache_prog(lfs_t *lfs,
  150. lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate,
  151. lfs_block_t block, lfs_off_t off,
  152. const void *buffer, lfs_size_t size) {
  153. const uint8_t *data = buffer;
  154. LFS_ASSERT(block != 0xffffffff);
  155. LFS_ASSERT(off + size <= lfs->cfg->block_size);
  156. while (size > 0) {
  157. if (block == pcache->block &&
  158. off >= pcache->off &&
  159. off < pcache->off + lfs->cfg->cache_size) {
  160. // already fits in pcache?
  161. lfs_size_t diff = lfs_min(size,
  162. lfs->cfg->cache_size - (off-pcache->off));
  163. memcpy(&pcache->buffer[off-pcache->off], data, diff);
  164. data += diff;
  165. off += diff;
  166. size -= diff;
  167. pcache->size = off - pcache->off;
  168. if (pcache->size == lfs->cfg->cache_size) {
  169. // eagerly flush out pcache if we fill up
  170. int err = lfs_cache_flush(lfs, pcache, rcache, validate);
  171. if (err) {
  172. return err;
  173. }
  174. }
  175. continue;
  176. }
  177. // pcache must have been flushed, either by programming and
  178. // entire block or manually flushing the pcache
  179. LFS_ASSERT(pcache->block == 0xffffffff);
  180. // prepare pcache, first condition can no longer fail
  181. pcache->block = block;
  182. pcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
  183. pcache->size = 0;
  184. }
  185. return 0;
  186. }
  187. /// General lfs block device operations ///
  188. static int lfs_bd_read(lfs_t *lfs, lfs_block_t block,
  189. lfs_off_t off, void *buffer, lfs_size_t size) {
  190. return lfs_cache_read(lfs, &lfs->pcache, &lfs->rcache, true,
  191. block, off, buffer, size);
  192. }
  193. static int lfs_bd_prog(lfs_t *lfs, lfs_block_t block,
  194. lfs_off_t off, const void *buffer, lfs_size_t size) {
  195. return lfs_cache_prog(lfs, &lfs->pcache, &lfs->rcache, false,
  196. block, off, buffer, size);
  197. }
  198. static int lfs_bd_cmp(lfs_t *lfs, lfs_block_t block,
  199. lfs_off_t off, const void *buffer, lfs_size_t size) {
  200. return lfs_cache_cmp(lfs, NULL, &lfs->rcache, block, off, buffer, size);
  201. }
  202. static int lfs_bd_crc32(lfs_t *lfs, lfs_block_t block,
  203. lfs_off_t off, lfs_size_t size, uint32_t *crc) {
  204. return lfs_cache_crc(lfs, NULL, &lfs->rcache, block, off, size, crc);
  205. }
  206. static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
  207. LFS_ASSERT(block < lfs->cfg->block_count);
  208. return lfs->cfg->erase(lfs->cfg, block);
  209. }
  210. static int lfs_bd_sync(lfs_t *lfs) {
  211. lfs_cache_drop(lfs, &lfs->rcache);
  212. int err = lfs_cache_flush(lfs, &lfs->pcache, &lfs->rcache, false);
  213. if (err) {
  214. return err;
  215. }
  216. return lfs->cfg->sync(lfs->cfg);
  217. }
  218. /// Internal operations predeclared here ///
  219. static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
  220. lfs_mdir_t *pdir);
  221. static int32_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
  222. lfs_mdir_t *parent);
  223. static int lfs_fs_relocate(lfs_t *lfs,
  224. const lfs_block_t oldpair[2], lfs_block_t newpair[2]);
  225. static int lfs_fs_deorphan(lfs_t *lfs);
  226. static int lfs_fs_demove(lfs_t *lfs);
  227. static int lfs_fs_forceconsistency(lfs_t *lfs);
  228. static int lfs_deinit(lfs_t *lfs);
  229. /// Block allocator ///
  230. static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
  231. lfs_t *lfs = (lfs_t*)p;
  232. lfs_block_t off = ((block - lfs->free.off)
  233. + lfs->cfg->block_count) % lfs->cfg->block_count;
  234. if (off < lfs->free.size) {
  235. lfs->free.buffer[off / 32] |= 1U << (off % 32);
  236. }
  237. return 0;
  238. }
  239. static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
  240. while (true) {
  241. while (lfs->free.i != lfs->free.size) {
  242. lfs_block_t off = lfs->free.i;
  243. lfs->free.i += 1;
  244. lfs->free.ack -= 1;
  245. if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
  246. // found a free block
  247. *block = (lfs->free.off + off) % lfs->cfg->block_count;
  248. // eagerly find next off so an alloc ack can
  249. // discredit old lookahead blocks
  250. while (lfs->free.i != lfs->free.size &&
  251. (lfs->free.buffer[lfs->free.i / 32]
  252. & (1U << (lfs->free.i % 32)))) {
  253. lfs->free.i += 1;
  254. lfs->free.ack -= 1;
  255. }
  256. return 0;
  257. }
  258. }
  259. // check if we have looked at all blocks since last ack
  260. if (lfs->free.ack == 0) {
  261. LFS_WARN("No more free space %"PRIu32,
  262. lfs->free.i + lfs->free.off);
  263. return LFS_ERR_NOSPC;
  264. }
  265. lfs->free.off = (lfs->free.off + lfs->free.size)
  266. % lfs->cfg->block_count;
  267. lfs->free.size = lfs_min(lfs->cfg->lookahead, lfs->free.ack);
  268. lfs->free.i = 0;
  269. // find mask of free blocks from tree
  270. memset(lfs->free.buffer, 0, lfs->cfg->lookahead/8);
  271. int err = lfs_fs_traverse(lfs, lfs_alloc_lookahead, lfs);
  272. if (err) {
  273. return err;
  274. }
  275. }
  276. }
  277. static void lfs_alloc_ack(lfs_t *lfs) {
  278. lfs->free.ack = lfs->cfg->block_count;
  279. }
  280. /// Metadata pair and directory operations ///
  281. static inline void lfs_pair_swap(lfs_block_t pair[2]) {
  282. lfs_block_t t = pair[0];
  283. pair[0] = pair[1];
  284. pair[1] = t;
  285. }
  286. static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
  287. return pair[0] == 0xffffffff || pair[1] == 0xffffffff;
  288. }
  289. static inline int lfs_pair_cmp(
  290. const lfs_block_t paira[2],
  291. const lfs_block_t pairb[2]) {
  292. return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
  293. paira[0] == pairb[1] || paira[1] == pairb[0]);
  294. }
  295. static inline bool lfs_pair_sync(
  296. const lfs_block_t paira[2],
  297. const lfs_block_t pairb[2]) {
  298. return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
  299. (paira[0] == pairb[1] && paira[1] == pairb[0]);
  300. }
  301. static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
  302. pair[0] = lfs_fromle32(pair[0]);
  303. pair[1] = lfs_fromle32(pair[1]);
  304. }
  305. static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
  306. pair[0] = lfs_tole32(pair[0]);
  307. pair[1] = lfs_tole32(pair[1]);
  308. }
  309. static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
  310. ctz->head = lfs_fromle32(ctz->head);
  311. ctz->size = lfs_fromle32(ctz->size);
  312. }
  313. static void lfs_ctz_tole32(struct lfs_ctz *ctz) {
  314. ctz->head = lfs_tole32(ctz->head);
  315. ctz->size = lfs_tole32(ctz->size);
  316. }
  317. static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) {
  318. superblock->version = lfs_fromle32(superblock->version);
  319. superblock->block_size = lfs_fromle32(superblock->block_size);
  320. superblock->block_count = lfs_fromle32(superblock->block_count);
  321. superblock->inline_max = lfs_fromle32(superblock->inline_max);
  322. superblock->attr_max = lfs_fromle32(superblock->attr_max);
  323. superblock->name_max = lfs_fromle32(superblock->name_max);
  324. }
  325. static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
  326. superblock->version = lfs_tole32(superblock->version);
  327. superblock->block_size = lfs_tole32(superblock->block_size);
  328. superblock->block_count = lfs_tole32(superblock->block_count);
  329. superblock->inline_max = lfs_tole32(superblock->inline_max);
  330. superblock->attr_max = lfs_tole32(superblock->attr_max);
  331. superblock->name_max = lfs_tole32(superblock->name_max);
  332. }
  333. /// Entry tag operations ///
  334. #define LFS_MKTAG(type, id, size) \
  335. (((uint32_t)(type) << 22) | ((uint32_t)(id) << 12) | (uint32_t)(size))
  336. #define LFS_MKATTR(type, id, buffer, size, next) \
  337. &(const lfs_mattr_t){LFS_MKTAG(type, id, size), (buffer), (next)}
  338. static inline bool lfs_tag_isvalid(uint32_t tag) {
  339. return !(tag & 0x80000000);
  340. }
  341. static inline bool lfs_tag_isuser(uint32_t tag) {
  342. return (tag & 0x40000000);
  343. }
  344. static inline uint16_t lfs_tag_type(uint32_t tag) {
  345. return (tag & 0x7fc00000) >> 22;
  346. }
  347. static inline uint16_t lfs_tag_subtype(uint32_t tag) {
  348. return (tag & 0x7c000000) >> 22;
  349. }
  350. static inline uint16_t lfs_tag_id(uint32_t tag) {
  351. return (tag & 0x003ff000) >> 12;
  352. }
  353. static inline lfs_size_t lfs_tag_size(uint32_t tag) {
  354. return tag & 0x00000fff;
  355. }
  356. // operations on set of globals
  357. static inline void lfs_global_xor(lfs_global_t *a, const lfs_global_t *b) {
  358. for (int i = 0; i < sizeof(lfs_global_t)/4; i++) {
  359. a->u32[i] ^= b->u32[i];
  360. }
  361. }
  362. static inline bool lfs_global_iszero(const lfs_global_t *a) {
  363. for (int i = 0; i < sizeof(lfs_global_t)/4; i++) {
  364. if (a->u32[i] != 0) {
  365. return false;
  366. }
  367. }
  368. return true;
  369. }
  370. static inline void lfs_global_zero(lfs_global_t *a) {
  371. memset(a, 0, sizeof(lfs_global_t));
  372. }
  373. static inline void lfs_global_fromle32(lfs_global_t *a) {
  374. lfs_pair_fromle32(a->l.movepair);
  375. a->l.moveid = lfs_fromle16(a->l.moveid);
  376. }
  377. static inline void lfs_global_tole32(lfs_global_t *a) {
  378. lfs_pair_tole32(a->l.movepair);
  379. a->l.moveid = lfs_tole16(a->l.moveid);
  380. }
  381. static inline void lfs_global_move(lfs_t *lfs,
  382. const lfs_block_t pair[2], uint16_t id) {
  383. lfs_global_t diff;
  384. lfs_global_zero(&diff);
  385. diff.l.movepair[0] ^= lfs->globals.g.movepair[0] ^ pair[0];
  386. diff.l.movepair[1] ^= lfs->globals.g.movepair[1] ^ pair[1];
  387. diff.l.moveid ^= lfs->globals.g.moveid ^ id;
  388. lfs_global_fromle32(&lfs->locals);
  389. lfs_global_xor(&lfs->locals, &diff);
  390. lfs_global_tole32(&lfs->locals);
  391. lfs_global_xor(&lfs->globals, &diff);
  392. }
  393. static inline void lfs_global_orphans(lfs_t *lfs, int8_t orphans) {
  394. lfs->locals.l.deorphaned ^= (lfs->globals.g.orphans == 0);
  395. lfs->locals.l.deorphaned ^= (lfs->globals.g.orphans + orphans == 0);
  396. lfs->globals.g.orphans += orphans;
  397. }
  398. // commit logic
  399. struct lfs_commit {
  400. lfs_block_t block;
  401. lfs_off_t off;
  402. uint32_t ptag;
  403. uint32_t crc;
  404. lfs_off_t begin;
  405. lfs_off_t end;
  406. };
  407. struct lfs_diskoff {
  408. lfs_block_t block;
  409. lfs_off_t off;
  410. };
  411. static int32_t lfs_commit_get(lfs_t *lfs, lfs_block_t block, lfs_off_t off,
  412. uint32_t tag, uint32_t getmask, uint32_t gettag, int32_t getdiff,
  413. void *buffer, bool stopatcommit) {
  414. // iterate over dir block backwards (for faster lookups)
  415. while (off >= 2*sizeof(tag)+lfs_tag_size(tag)) {
  416. off -= sizeof(tag)+lfs_tag_size(tag);
  417. if (lfs_tag_type(tag) == LFS_TYPE_CRC && stopatcommit) {
  418. break;
  419. } else if (lfs_tag_type(tag) == LFS_TYPE_DELETE) {
  420. if (lfs_tag_id(tag) <= lfs_tag_id(gettag + getdiff)) {
  421. getdiff += LFS_MKTAG(0, 1, 0);
  422. }
  423. } else if ((tag & getmask) == ((gettag + getdiff) & getmask)) {
  424. if (buffer) {
  425. lfs_size_t diff = lfs_min(
  426. lfs_tag_size(gettag), lfs_tag_size(tag));
  427. int err = lfs_bd_read(lfs, block,
  428. off+sizeof(tag), buffer, diff);
  429. if (err) {
  430. return err;
  431. }
  432. memset((uint8_t*)buffer + diff, 0,
  433. lfs_tag_size(gettag) - diff);
  434. }
  435. return tag - getdiff;
  436. }
  437. uint32_t ntag;
  438. int err = lfs_bd_read(lfs, block, off, &ntag, sizeof(ntag));
  439. if (err) {
  440. return err;
  441. }
  442. tag ^= lfs_fromle32(ntag);
  443. }
  444. return LFS_ERR_NOENT;
  445. }
  446. static int lfs_commit_attrs(lfs_t *lfs, struct lfs_commit *commit,
  447. uint16_t id, const struct lfs_attr *attrs);
  448. static int lfs_commit_move(lfs_t *lfs, struct lfs_commit *commit,
  449. uint32_t frommask, uint32_t fromtag, uint32_t tomask, uint32_t totag,
  450. const lfs_mdir_t *dir, const lfs_mattr_t *attrs);
  451. static int lfs_commit_attr(lfs_t *lfs, struct lfs_commit *commit,
  452. uint32_t tag, const void *buffer) {
  453. if (lfs_tag_subtype(tag) == LFS_FROM_MOVE) {
  454. // special case for moves
  455. return lfs_commit_move(lfs, commit,
  456. 0x003ff000, LFS_MKTAG(0, lfs_tag_size(tag), 0),
  457. 0x003ff000, LFS_MKTAG(0, lfs_tag_id(tag), 0),
  458. buffer, NULL);
  459. } else if (lfs_tag_subtype(tag) == LFS_FROM_ATTRS) {
  460. // special case for custom attributes
  461. return lfs_commit_attrs(lfs, commit,
  462. lfs_tag_id(tag), buffer);
  463. }
  464. // check if we fit
  465. lfs_size_t size = lfs_tag_size(tag);
  466. if (commit->off + sizeof(tag)+size > commit->end) {
  467. return LFS_ERR_NOSPC;
  468. }
  469. // write out tag
  470. uint32_t ntag = lfs_tole32((tag & 0x7fffffff) ^ commit->ptag);
  471. commit->crc = lfs_crc(commit->crc, &ntag, sizeof(ntag));
  472. int err = lfs_bd_prog(lfs, commit->block, commit->off,
  473. &ntag, sizeof(ntag));
  474. if (err) {
  475. return err;
  476. }
  477. commit->off += sizeof(ntag);
  478. if (!(tag & 0x80000000)) {
  479. // from memory
  480. commit->crc = lfs_crc(commit->crc, buffer, size);
  481. err = lfs_bd_prog(lfs, commit->block, commit->off, buffer, size);
  482. if (err) {
  483. return err;
  484. }
  485. } else {
  486. // from disk
  487. const struct lfs_diskoff *disk = buffer;
  488. for (lfs_off_t i = 0; i < size; i++) {
  489. // rely on caching to make this efficient
  490. uint8_t dat;
  491. err = lfs_bd_read(lfs, disk->block, disk->off+i, &dat, 1);
  492. if (err) {
  493. return err;
  494. }
  495. commit->crc = lfs_crc(commit->crc, &dat, 1);
  496. err = lfs_bd_prog(lfs, commit->block, commit->off+i, &dat, 1);
  497. if (err) {
  498. return err;
  499. }
  500. }
  501. }
  502. commit->off += size;
  503. commit->ptag = tag & 0x7fffffff;
  504. return 0;
  505. }
  506. static int lfs_commit_attrs(lfs_t *lfs, struct lfs_commit *commit,
  507. uint16_t id, const struct lfs_attr *attrs) {
  508. for (const struct lfs_attr *a = attrs; a; a = a->next) {
  509. int err = lfs_commit_attr(lfs, commit,
  510. LFS_MKTAG(0x100 | a->type, id, a->size), a->buffer);
  511. if (err) {
  512. return err;
  513. }
  514. }
  515. return 0;
  516. }
  517. static int lfs_commit_move(lfs_t *lfs, struct lfs_commit *commit,
  518. uint32_t frommask, uint32_t fromtag, uint32_t tomask, uint32_t totag,
  519. const lfs_mdir_t *dir, const lfs_mattr_t *attrs) {
  520. // iterate through list and commits, only committing unique entries
  521. lfs_off_t off = dir->off;
  522. uint32_t ntag = dir->etag;
  523. while (attrs || off > sizeof(uint32_t)) {
  524. struct lfs_diskoff disk;
  525. uint32_t tag;
  526. const void *buffer;
  527. if (attrs) {
  528. tag = attrs->tag;
  529. buffer = attrs->buffer;
  530. attrs = attrs->next;
  531. } else {
  532. LFS_ASSERT(off > sizeof(ntag)+lfs_tag_size(ntag));
  533. off -= sizeof(ntag)+lfs_tag_size(ntag);
  534. tag = ntag;
  535. buffer = &disk;
  536. disk.block = dir->pair[0];
  537. disk.off = off + sizeof(tag);
  538. int err = lfs_bd_read(lfs, dir->pair[0], off, &ntag, sizeof(ntag));
  539. if (err) {
  540. return err;
  541. }
  542. ntag = lfs_fromle32(ntag);
  543. ntag ^= tag;
  544. tag |= 0x80000000;
  545. }
  546. if (lfs_tag_type(tag) == LFS_TYPE_DELETE &&
  547. lfs_tag_id(tag) <= lfs_tag_id(fromtag)) {
  548. // something was deleted, we need to move around it
  549. fromtag += LFS_MKTAG(0, 1, 0);
  550. } else if ((tag & frommask) == (fromtag & frommask)) {
  551. // check if type has already been committed
  552. int32_t res = lfs_commit_get(lfs, commit->block,
  553. commit->off, commit->ptag,
  554. lfs_tag_isuser(tag) ? 0x7ffff000 : 0x7c3ff000,
  555. (tag & ~tomask) | totag,
  556. 0, NULL, true);
  557. if (res < 0 && res != LFS_ERR_NOENT) {
  558. return res;
  559. }
  560. if (res == LFS_ERR_NOENT) {
  561. // update id and commit, as we are currently unique
  562. int err = lfs_commit_attr(lfs, commit,
  563. (tag & ~tomask) | totag,
  564. buffer);
  565. if (err) {
  566. return err;
  567. }
  568. }
  569. }
  570. }
  571. return 0;
  572. }
  573. static int lfs_commit_globals(lfs_t *lfs, struct lfs_commit *commit,
  574. lfs_global_t *locals) {
  575. if (lfs_global_iszero(&lfs->locals)) {
  576. return 0;
  577. }
  578. lfs_global_xor(locals, &lfs->locals);
  579. int err = lfs_commit_attr(lfs, commit,
  580. LFS_MKTAG(LFS_TYPE_GLOBALS + locals->l.deorphaned, 0x3ff, 10),
  581. locals);
  582. lfs_global_xor(locals, &lfs->locals);
  583. return err;
  584. }
  585. static int lfs_commit_crc(lfs_t *lfs, struct lfs_commit *commit) {
  586. // align to program units
  587. lfs_off_t off = lfs_alignup(commit->off + 2*sizeof(uint32_t),
  588. lfs->cfg->prog_size);
  589. // read erased state from next program unit
  590. uint32_t tag;
  591. int err = lfs_bd_read(lfs, commit->block, off, &tag, sizeof(tag));
  592. if (err) {
  593. return err;
  594. }
  595. // build crc tag
  596. tag = lfs_fromle32(tag);
  597. tag = (0x80000000 & ~tag) |
  598. LFS_MKTAG(LFS_TYPE_CRC, 0x3ff,
  599. off - (commit->off+sizeof(uint32_t)));
  600. // write out crc
  601. uint32_t footer[2];
  602. footer[0] = lfs_tole32(tag ^ commit->ptag);
  603. commit->crc = lfs_crc(commit->crc, &footer[0], sizeof(footer[0]));
  604. footer[1] = lfs_tole32(commit->crc);
  605. err = lfs_bd_prog(lfs, commit->block, commit->off, footer, sizeof(footer));
  606. if (err) {
  607. return err;
  608. }
  609. commit->off += sizeof(tag)+lfs_tag_size(tag);
  610. commit->ptag = tag;
  611. // flush buffers
  612. err = lfs_bd_sync(lfs);
  613. if (err) {
  614. return err;
  615. }
  616. // successful commit, check checksum to make sure
  617. uint32_t crc = 0xffffffff;
  618. err = lfs_bd_crc32(lfs, commit->block, commit->begin,
  619. commit->off-lfs_tag_size(tag)-commit->begin, &crc);
  620. if (err) {
  621. return err;
  622. }
  623. if (crc != commit->crc) {
  624. return LFS_ERR_CORRUPT;
  625. }
  626. return 0;
  627. }
  628. // internal dir operations
  629. static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
  630. // allocate pair of dir blocks (backwards, so we write block 1 first)
  631. for (int i = 0; i < 2; i++) {
  632. int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
  633. if (err) {
  634. return err;
  635. }
  636. }
  637. // rather than clobbering one of the blocks we just pretend
  638. // the revision may be valid
  639. int err = lfs_bd_read(lfs, dir->pair[0], 0, &dir->rev, 4);
  640. dir->rev = lfs_fromle32(dir->rev);
  641. if (err && err != LFS_ERR_CORRUPT) {
  642. return err;
  643. }
  644. // set defaults
  645. dir->off = sizeof(dir->rev);
  646. dir->etag = 0;
  647. dir->count = 0;
  648. dir->tail[0] = 0xffffffff;
  649. dir->tail[1] = 0xffffffff;
  650. dir->erased = false;
  651. dir->split = false;
  652. lfs_global_zero(&dir->locals);
  653. // don't write out yet, let caller take care of that
  654. return 0;
  655. }
  656. static int32_t lfs_dir_fetchmatch(lfs_t *lfs,
  657. lfs_mdir_t *dir, const lfs_block_t pair[2],
  658. uint32_t findmask, uint32_t findtag, const void *findbuffer) {
  659. dir->pair[0] = pair[0];
  660. dir->pair[1] = pair[1];
  661. int32_t foundtag = LFS_ERR_NOENT;
  662. // find the block with the most recent revision
  663. uint32_t rev[2];
  664. for (int i = 0; i < 2; i++) {
  665. int err = lfs_cache_read(lfs, &lfs->pcache, &lfs->rcache, false,
  666. dir->pair[i], 0, &rev[i], sizeof(rev[i]));
  667. rev[i] = lfs_fromle32(rev[i]);
  668. if (err && err != LFS_ERR_CORRUPT) {
  669. return err;
  670. }
  671. if (err == LFS_ERR_CORRUPT) {
  672. rev[i] = rev[(i+1)%2] - 1;
  673. }
  674. }
  675. if (lfs_scmp(rev[1], rev[0]) > 0) {
  676. lfs_pair_swap(dir->pair);
  677. lfs_pair_swap(rev);
  678. }
  679. // load blocks and check crc
  680. for (int i = 0; i < 2; i++) {
  681. lfs_off_t off = sizeof(dir->rev);
  682. uint32_t ptag = 0;
  683. uint32_t crc = 0xffffffff;
  684. dir->rev = lfs_tole32(rev[0]);
  685. crc = lfs_crc(crc, &dir->rev, sizeof(dir->rev));
  686. dir->rev = lfs_fromle32(dir->rev);
  687. dir->off = 0;
  688. uint32_t tempfoundtag = foundtag;
  689. uint16_t tempcount = 0;
  690. lfs_block_t temptail[2] = {0xffffffff, 0xffffffff};
  691. bool tempsplit = false;
  692. lfs_global_t templocals;
  693. lfs_global_zero(&templocals);
  694. while (true) {
  695. // extract next tag
  696. uint32_t tag;
  697. int err = lfs_bd_read(lfs, dir->pair[0],
  698. off, &tag, sizeof(tag));
  699. if (err) {
  700. if (err == LFS_ERR_CORRUPT) {
  701. // can't continue?
  702. dir->erased = false;
  703. break;
  704. }
  705. return err;
  706. }
  707. crc = lfs_crc(crc, &tag, sizeof(tag));
  708. tag = lfs_fromle32(tag) ^ ptag;
  709. // next commit not yet programmed
  710. if (lfs_tag_type(ptag) == LFS_TYPE_CRC && !lfs_tag_isvalid(tag)) {
  711. dir->erased = true;
  712. break;
  713. }
  714. // check we're in valid range
  715. if (off + sizeof(tag)+lfs_tag_size(tag) > lfs->cfg->block_size) {
  716. dir->erased = false;
  717. break;
  718. }
  719. if (lfs_tag_type(tag) == LFS_TYPE_CRC) {
  720. // check the crc attr
  721. uint32_t dcrc;
  722. err = lfs_bd_read(lfs, dir->pair[0],
  723. off+sizeof(tag), &dcrc, sizeof(dcrc));
  724. if (err) {
  725. if (err == LFS_ERR_CORRUPT) {
  726. dir->erased = false;
  727. break;
  728. }
  729. return err;
  730. }
  731. dcrc = lfs_fromle32(dcrc);
  732. if (crc != dcrc) {
  733. dir->erased = false;
  734. break;
  735. }
  736. foundtag = tempfoundtag;
  737. dir->off = off + sizeof(tag)+lfs_tag_size(tag);
  738. dir->etag = tag;
  739. dir->count = tempcount;
  740. dir->tail[0] = temptail[0];
  741. dir->tail[1] = temptail[1];
  742. dir->split = tempsplit;
  743. dir->locals = templocals;
  744. lfs->seed ^= crc;
  745. crc = 0xffffffff;
  746. } else {
  747. err = lfs_bd_crc32(lfs, dir->pair[0],
  748. off+sizeof(tag), lfs_tag_size(tag), &crc);
  749. if (err) {
  750. if (err == LFS_ERR_CORRUPT) {
  751. dir->erased = false;
  752. break;
  753. }
  754. }
  755. if (lfs_tag_id(tag) < 0x3ff && lfs_tag_id(tag) >= tempcount) {
  756. tempcount = lfs_tag_id(tag)+1;
  757. }
  758. if (lfs_tag_subtype(tag) == LFS_TYPE_TAIL) {
  759. tempsplit = (lfs_tag_type(tag) & 1);
  760. err = lfs_bd_read(lfs, dir->pair[0], off+sizeof(tag),
  761. temptail, sizeof(temptail));
  762. if (err) {
  763. if (err == LFS_ERR_CORRUPT) {
  764. dir->erased = false;
  765. break;
  766. }
  767. }
  768. lfs_pair_fromle32(temptail);
  769. } else if (lfs_tag_subtype(tag) == LFS_TYPE_GLOBALS) {
  770. templocals.l.deorphaned = (lfs_tag_type(tag) & 1);
  771. err = lfs_bd_read(lfs, dir->pair[0], off+sizeof(tag),
  772. &templocals, 10);
  773. if (err) {
  774. if (err == LFS_ERR_CORRUPT) {
  775. dir->erased = false;
  776. break;
  777. }
  778. }
  779. } else if (lfs_tag_subtype(tag) == LFS_TYPE_DELETE) {
  780. LFS_ASSERT(tempcount > 0);
  781. tempcount -= 1;
  782. if (lfs_tag_id(tag) == lfs_tag_id(tempfoundtag)) {
  783. tempfoundtag = LFS_ERR_NOENT;
  784. } else if (lfs_tag_isvalid(tempfoundtag) &&
  785. lfs_tag_id(tag) < lfs_tag_id(tempfoundtag)) {
  786. tempfoundtag -= LFS_MKTAG(0, 1, 0);
  787. }
  788. } else if ((tag & findmask) == (findtag & findmask)) {
  789. // found a match?
  790. if (lfs_tag_type(findtag) == LFS_TYPE_DIRSTRUCT) {
  791. lfs_block_t child[2];
  792. err = lfs_bd_read(lfs, dir->pair[0], off+sizeof(tag),
  793. child, sizeof(child));
  794. if (err < 0) {
  795. if (err == LFS_ERR_CORRUPT) {
  796. dir->erased = false;
  797. break;
  798. }
  799. return err;
  800. }
  801. lfs_pair_fromle32(child);
  802. if (lfs_pair_cmp(child,
  803. (const lfs_block_t *)findbuffer) == 0) {
  804. tempfoundtag = tag;
  805. }
  806. } else if (lfs_tag_type(findtag) == LFS_TYPE_NAME) {
  807. int res = lfs_bd_cmp(lfs,
  808. dir->pair[0], off+sizeof(tag),
  809. findbuffer, lfs_tag_size(findtag));
  810. if (res < 0) {
  811. if (res == LFS_ERR_CORRUPT) {
  812. dir->erased = false;
  813. break;
  814. }
  815. return res;
  816. }
  817. if (res) {
  818. tempfoundtag = tag;
  819. }
  820. } else {
  821. tempfoundtag = tag;
  822. }
  823. }
  824. }
  825. ptag = tag;
  826. off += sizeof(tag)+lfs_tag_size(tag);
  827. }
  828. // consider what we have good enough
  829. if (dir->off > 0) {
  830. // synthetic move
  831. if (lfs_pair_cmp(dir->pair, lfs->globals.g.movepair) == 0) {
  832. if (lfs->globals.g.moveid == lfs_tag_id(foundtag)) {
  833. foundtag = LFS_ERR_NOENT;
  834. } else if (lfs_tag_isvalid(foundtag) &&
  835. lfs->globals.g.moveid < lfs_tag_id(foundtag)) {
  836. foundtag -= LFS_MKTAG(0, 1, 0);
  837. }
  838. }
  839. return foundtag;
  840. }
  841. // failed, try the other crc?
  842. lfs_pair_swap(dir->pair);
  843. lfs_pair_swap(rev);
  844. }
  845. LFS_ERROR("Corrupted dir pair at %"PRIu32" %"PRIu32,
  846. dir->pair[0], dir->pair[1]);
  847. return LFS_ERR_CORRUPT;
  848. }
  849. static int lfs_dir_fetch(lfs_t *lfs,
  850. lfs_mdir_t *dir, const lfs_block_t pair[2]) {
  851. int32_t res = lfs_dir_fetchmatch(lfs, dir, pair,
  852. 0xffffffff, 0xffffffff, NULL);
  853. if (res < 0 && res != LFS_ERR_NOENT) {
  854. return res;
  855. }
  856. return 0;
  857. }
  858. static int32_t lfs_dir_find(lfs_t *lfs,
  859. lfs_mdir_t *dir, const lfs_block_t pair[2], bool fs,
  860. uint32_t findmask, uint32_t findtag, const void *findbuffer) {
  861. dir->split = true;
  862. dir->tail[0] = pair[0];
  863. dir->tail[1] = pair[1];
  864. while ((dir->split || fs) && !lfs_pair_isnull(dir->tail)) {
  865. int32_t tag = lfs_dir_fetchmatch(lfs, dir, dir->tail,
  866. findmask, findtag, findbuffer);
  867. if (tag != LFS_ERR_NOENT) {
  868. return tag;
  869. }
  870. }
  871. return LFS_ERR_NOENT;
  872. }
  873. static int32_t lfs_dir_get(lfs_t *lfs, lfs_mdir_t *dir,
  874. uint32_t getmask, uint32_t gettag, void *buffer) {
  875. int32_t getdiff = 0;
  876. if (lfs_pair_cmp(dir->pair, lfs->globals.g.movepair) == 0 &&
  877. lfs_tag_id(gettag) <= lfs->globals.g.moveid) {
  878. // synthetic moves
  879. getdiff = LFS_MKTAG(0, 1, 0);
  880. }
  881. return lfs_commit_get(lfs, dir->pair[0], dir->off, dir->etag,
  882. getmask, gettag, getdiff, buffer, false);
  883. }
  884. static int lfs_dir_compact(lfs_t *lfs,
  885. lfs_mdir_t *dir, const lfs_mattr_t *attrs,
  886. lfs_mdir_t *source, uint16_t begin, uint16_t end) {
  887. // save some state in case block is bad
  888. const lfs_block_t oldpair[2] = {dir->pair[1], dir->pair[0]};
  889. bool relocated = false;
  890. // There's nothing special about our global delta, so feed it back
  891. // into the global global delta
  892. lfs_global_xor(&lfs->locals, &dir->locals);
  893. lfs_global_zero(&dir->locals);
  894. while (true) {
  895. // last complete id
  896. dir->count = end - begin;
  897. int16_t ack = -1;
  898. bool exhausted = false;
  899. // increment revision count
  900. dir->rev += 1;
  901. if (lfs->cfg->block_cycles && dir->rev % lfs->cfg->block_cycles == 0) {
  902. if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
  903. // we're writing too much to the superblock, should we expand?
  904. lfs_ssize_t res = lfs_fs_size(lfs);
  905. if (res < 0) {
  906. return res;
  907. }
  908. // do we have enough space to expand?
  909. if (res < lfs->cfg->block_count/2) {
  910. LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
  911. exhausted = true;
  912. goto split;
  913. }
  914. } else {
  915. // we're writing too much, time to relocate
  916. exhausted = true;
  917. goto relocate;
  918. }
  919. }
  920. // erase block to write to
  921. int err = lfs_bd_erase(lfs, dir->pair[1]);
  922. if (err) {
  923. if (err == LFS_ERR_CORRUPT) {
  924. goto relocate;
  925. }
  926. return err;
  927. }
  928. // write out header
  929. uint32_t crc = 0xffffffff;
  930. uint32_t rev = lfs_tole32(dir->rev);
  931. crc = lfs_crc(crc, &rev, sizeof(rev));
  932. err = lfs_bd_prog(lfs, dir->pair[1], 0, &rev, sizeof(rev));
  933. if (err) {
  934. if (err == LFS_ERR_CORRUPT) {
  935. goto relocate;
  936. }
  937. return err;
  938. }
  939. // setup compaction
  940. struct lfs_commit commit = {
  941. .block = dir->pair[1],
  942. .off = sizeof(dir->rev),
  943. .crc = crc,
  944. .ptag = 0,
  945. // space is complicated, we need room for tail, crc, globals,
  946. // and we cap at half a block to give room for metadata updates
  947. .begin = 0,
  948. .end = lfs_min(
  949. lfs_alignup(lfs->cfg->block_size/2, lfs->cfg->prog_size),
  950. lfs->cfg->block_size - 34),
  951. };
  952. // commit with a move
  953. for (uint16_t id = begin; id < end; id++) {
  954. err = lfs_commit_move(lfs, &commit,
  955. 0x003ff000, LFS_MKTAG(0, id, 0),
  956. 0x003ff000, LFS_MKTAG(0, id - begin, 0),
  957. source, attrs);
  958. if (err) {
  959. if (err == LFS_ERR_NOSPC) {
  960. goto split;
  961. } else if (err == LFS_ERR_CORRUPT) {
  962. goto relocate;
  963. }
  964. return err;
  965. }
  966. ack = id;
  967. }
  968. // reopen reserved space at the end
  969. commit.end = lfs->cfg->block_size - 8;
  970. if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
  971. // move over (duplicate) superblock if we are root
  972. err = lfs_commit_move(lfs, &commit,
  973. 0x7c000000, LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 0),
  974. 0x7ffff000, LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 0),
  975. source, attrs);
  976. if (err) {
  977. if (err == LFS_ERR_CORRUPT) {
  978. goto relocate;
  979. }
  980. return err;
  981. }
  982. }
  983. if (!relocated) {
  984. // commit any globals, unless we're relocating, in which case our
  985. // parent will steal our globals
  986. err = lfs_commit_globals(lfs, &commit, &dir->locals);
  987. if (err) {
  988. if (err == LFS_ERR_CORRUPT) {
  989. goto relocate;
  990. }
  991. return err;
  992. }
  993. }
  994. if (!lfs_pair_isnull(dir->tail)) {
  995. // commit tail, which may be new after last size check
  996. lfs_pair_tole32(dir->tail);
  997. err = lfs_commit_attr(lfs, &commit,
  998. LFS_MKTAG(LFS_TYPE_TAIL + dir->split,
  999. 0x3ff, sizeof(dir->tail)), dir->tail);
  1000. lfs_pair_fromle32(dir->tail);
  1001. if (err) {
  1002. if (err == LFS_ERR_CORRUPT) {
  1003. goto relocate;
  1004. }
  1005. return err;
  1006. }
  1007. }
  1008. err = lfs_commit_crc(lfs, &commit);
  1009. if (err) {
  1010. if (err == LFS_ERR_CORRUPT) {
  1011. goto relocate;
  1012. }
  1013. return err;
  1014. }
  1015. // successful compaction, swap dir pair to indicate most recent
  1016. lfs_pair_swap(dir->pair);
  1017. dir->off = commit.off;
  1018. dir->etag = commit.ptag;
  1019. dir->erased = true;
  1020. break;
  1021. split:
  1022. // commit no longer fits, need to split dir,
  1023. // drop caches and create tail
  1024. lfs_cache_drop(lfs, &lfs->pcache);
  1025. if (!exhausted && ack < 0) {
  1026. // If we can't fit in this block, we won't fit in next block
  1027. return LFS_ERR_NOSPC;
  1028. }
  1029. lfs_mdir_t tail;
  1030. err = lfs_dir_alloc(lfs, &tail);
  1031. if (err) {
  1032. return err;
  1033. }
  1034. if (exhausted) {
  1035. lfs->root[0] = tail.pair[0];
  1036. lfs->root[1] = tail.pair[1];
  1037. }
  1038. tail.split = dir->split;
  1039. tail.tail[0] = dir->tail[0];
  1040. tail.tail[1] = dir->tail[1];
  1041. err = lfs_dir_compact(lfs, &tail, attrs, source, ack+1, end);
  1042. if (err) {
  1043. return err;
  1044. }
  1045. end = ack+1;
  1046. dir->tail[0] = tail.pair[0];
  1047. dir->tail[1] = tail.pair[1];
  1048. dir->split = true;
  1049. continue;
  1050. relocate:
  1051. // commit was corrupted, drop caches and prepare to relocate block
  1052. relocated = true;
  1053. lfs_cache_drop(lfs, &lfs->pcache);
  1054. if (!exhausted) {
  1055. LFS_DEBUG("Bad block at %"PRIu32, dir->pair[1]);
  1056. }
  1057. // can't relocate superblock, filesystem is now frozen
  1058. if (lfs_pair_cmp(oldpair, (const lfs_block_t[2]){0, 1}) == 0) {
  1059. LFS_WARN("Superblock %"PRIu32" has become unwritable", oldpair[1]);
  1060. return LFS_ERR_CORRUPT;
  1061. }
  1062. // relocate half of pair
  1063. err = lfs_alloc(lfs, &dir->pair[1]);
  1064. if (err && (err != LFS_ERR_NOSPC && !exhausted)) {
  1065. return err;
  1066. }
  1067. continue;
  1068. }
  1069. if (!relocated) {
  1070. // successful commit, update globals
  1071. lfs_global_xor(&dir->locals, &lfs->locals);
  1072. lfs_global_zero(&lfs->locals);
  1073. } else {
  1074. // update references if we relocated
  1075. LFS_DEBUG("Relocating %"PRIu32" %"PRIu32" to %"PRIu32" %"PRIu32,
  1076. oldpair[0], oldpair[1], dir->pair[0], dir->pair[1]);
  1077. int err = lfs_fs_relocate(lfs, oldpair, dir->pair);
  1078. if (err) {
  1079. return err;
  1080. }
  1081. }
  1082. return 0;
  1083. }
  1084. static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
  1085. const lfs_mattr_t *attrs) {
  1086. lfs_mattr_t cancelattr;
  1087. lfs_global_t canceldiff;
  1088. lfs_global_zero(&canceldiff);
  1089. if (lfs_pair_cmp(dir->pair, lfs->globals.g.movepair) == 0) {
  1090. // Wait, we have the move? Just cancel this out here
  1091. // We need to, or else the move can become outdated
  1092. canceldiff.l.movepair[0] ^= lfs->globals.g.movepair[0] ^ 0xffffffff;
  1093. canceldiff.l.movepair[1] ^= lfs->globals.g.movepair[1] ^ 0xffffffff;
  1094. canceldiff.l.moveid ^= lfs->globals.g.moveid ^ 0x3ff;
  1095. lfs_global_fromle32(&lfs->locals);
  1096. lfs_global_xor(&lfs->locals, &canceldiff);
  1097. lfs_global_tole32(&lfs->locals);
  1098. cancelattr.tag = LFS_MKTAG(LFS_TYPE_DELETE, lfs->globals.l.moveid, 0);
  1099. cancelattr.next = attrs;
  1100. attrs = &cancelattr;
  1101. }
  1102. // calculate new directory size
  1103. uint32_t deletetag = 0xffffffff;
  1104. for (const lfs_mattr_t *a = attrs; a; a = a->next) {
  1105. if (lfs_tag_id(a->tag) < 0x3ff && lfs_tag_id(a->tag) >= dir->count) {
  1106. dir->count = lfs_tag_id(a->tag)+1;
  1107. }
  1108. if (lfs_tag_type(a->tag) == LFS_TYPE_DELETE) {
  1109. LFS_ASSERT(dir->count > 0);
  1110. dir->count -= 1;
  1111. deletetag = a->tag;
  1112. if (dir->count == 0) {
  1113. // should we actually drop the directory block?
  1114. lfs_mdir_t pdir;
  1115. int err = lfs_fs_pred(lfs, dir->pair, &pdir);
  1116. if (err && err != LFS_ERR_NOENT) {
  1117. return err;
  1118. }
  1119. if (err != LFS_ERR_NOENT && pdir.split) {
  1120. // steal tail and global state
  1121. pdir.split = dir->split;
  1122. pdir.tail[0] = dir->tail[0];
  1123. pdir.tail[1] = dir->tail[1];
  1124. lfs_global_xor(&lfs->locals, &dir->locals);
  1125. return lfs_dir_commit(lfs, &pdir,
  1126. LFS_MKATTR(LFS_TYPE_TAIL + pdir.split, 0x3ff,
  1127. pdir.tail, sizeof(pdir.tail),
  1128. NULL));
  1129. }
  1130. }
  1131. }
  1132. }
  1133. while (true) {
  1134. if (!dir->erased) {
  1135. goto compact;
  1136. }
  1137. // try to commit
  1138. struct lfs_commit commit = {
  1139. .block = dir->pair[0],
  1140. .off = dir->off,
  1141. .crc = 0xffffffff,
  1142. .ptag = dir->etag,
  1143. .begin = dir->off,
  1144. .end = lfs->cfg->block_size - 8,
  1145. };
  1146. for (const lfs_mattr_t *a = attrs; a; a = a->next) {
  1147. if (lfs_tag_type(a->tag) != LFS_TYPE_DELETE) {
  1148. lfs_pair_tole32(dir->tail);
  1149. int err = lfs_commit_attr(lfs, &commit, a->tag, a->buffer);
  1150. lfs_pair_fromle32(dir->tail);
  1151. if (err) {
  1152. if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
  1153. goto compact;
  1154. }
  1155. return err;
  1156. }
  1157. }
  1158. }
  1159. if (lfs_tag_isvalid(deletetag)) {
  1160. // special case for deletes, since order matters
  1161. int err = lfs_commit_attr(lfs, &commit, deletetag, NULL);
  1162. if (err) {
  1163. if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
  1164. goto compact;
  1165. }
  1166. return err;
  1167. }
  1168. }
  1169. int err = lfs_commit_globals(lfs, &commit, &dir->locals);
  1170. if (err) {
  1171. if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
  1172. goto compact;
  1173. }
  1174. return err;
  1175. }
  1176. err = lfs_commit_crc(lfs, &commit);
  1177. if (err) {
  1178. if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
  1179. goto compact;
  1180. }
  1181. return err;
  1182. }
  1183. // successful commit, update dir
  1184. dir->off = commit.off;
  1185. dir->etag = commit.ptag;
  1186. // successful commit, update globals
  1187. lfs_global_xor(&dir->locals, &lfs->locals);
  1188. lfs_global_zero(&lfs->locals);
  1189. break;
  1190. compact:
  1191. // fall back to compaction
  1192. lfs_cache_drop(lfs, &lfs->pcache);
  1193. err = lfs_dir_compact(lfs, dir, attrs, dir, 0, dir->count);
  1194. if (err) {
  1195. return err;
  1196. }
  1197. break;
  1198. }
  1199. // update globals that are affected
  1200. lfs_global_xor(&lfs->globals, &canceldiff);
  1201. // update any directories that are affected
  1202. lfs_mdir_t copy = *dir;
  1203. // two passes, once for things that aren't us, and one
  1204. // for things that are
  1205. for (lfs_mlist_t *d = lfs->mlist; d; d = d->next) {
  1206. if (lfs_pair_cmp(d->m.pair, copy.pair) == 0) {
  1207. d->m = *dir;
  1208. if (d->id == lfs_tag_id(deletetag)) {
  1209. d->m.pair[0] = 0xffffffff;
  1210. d->m.pair[1] = 0xffffffff;
  1211. } else if (d->id > lfs_tag_id(deletetag)) {
  1212. d->id -= 1;
  1213. if (d->type == LFS_TYPE_DIR) {
  1214. ((lfs_dir_t*)d)->pos -= 1;
  1215. }
  1216. }
  1217. while (d->id >= d->m.count && d->m.split) {
  1218. // we split and id is on tail now
  1219. d->id -= d->m.count;
  1220. int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
  1221. if (err) {
  1222. return err;
  1223. }
  1224. }
  1225. }
  1226. }
  1227. return 0;
  1228. }
  1229. static int32_t lfs_dir_lookup(lfs_t *lfs, lfs_mdir_t *dir, const char **path) {
  1230. // we reduce path to a single name if we can find it
  1231. const char *name = *path;
  1232. *path = NULL;
  1233. // default to root dir
  1234. int32_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0);
  1235. lfs_block_t pair[2] = {lfs->root[0], lfs->root[1]};
  1236. while (true) {
  1237. nextname:
  1238. // skip slashes
  1239. name += strspn(name, "/");
  1240. lfs_size_t namelen = strcspn(name, "/");
  1241. // skip '.' and root '..'
  1242. if ((namelen == 1 && memcmp(name, ".", 1) == 0) ||
  1243. (namelen == 2 && memcmp(name, "..", 2) == 0)) {
  1244. name += namelen;
  1245. goto nextname;
  1246. }
  1247. // skip if matched by '..' in name
  1248. const char *suffix = name + namelen;
  1249. lfs_size_t sufflen;
  1250. int depth = 1;
  1251. while (true) {
  1252. suffix += strspn(suffix, "/");
  1253. sufflen = strcspn(suffix, "/");
  1254. if (sufflen == 0) {
  1255. break;
  1256. }
  1257. if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
  1258. depth -= 1;
  1259. if (depth == 0) {
  1260. name = suffix + sufflen;
  1261. goto nextname;
  1262. }
  1263. } else {
  1264. depth += 1;
  1265. }
  1266. suffix += sufflen;
  1267. }
  1268. // found path
  1269. if (name[0] == '\0') {
  1270. return tag;
  1271. }
  1272. // update what we've found if path is only a name
  1273. if (strchr(name, '/') == NULL) {
  1274. *path = name;
  1275. }
  1276. // only continue if we hit a directory
  1277. if (lfs_tag_type(tag) != LFS_TYPE_DIR) {
  1278. return LFS_ERR_NOTDIR;
  1279. }
  1280. // grab the entry data
  1281. if (lfs_tag_id(tag) != 0x3ff) {
  1282. int32_t res = lfs_dir_get(lfs, dir, 0x7c3ff000,
  1283. LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
  1284. if (res < 0) {
  1285. return res;
  1286. }
  1287. lfs_pair_fromle32(pair);
  1288. }
  1289. // find entry matching name
  1290. tag = lfs_dir_find(lfs, dir, pair, false, 0x7c000fff,
  1291. LFS_MKTAG(LFS_TYPE_NAME, 0, namelen), name);
  1292. if (tag < 0) {
  1293. return tag;
  1294. }
  1295. // to next name
  1296. name += namelen;
  1297. }
  1298. }
  1299. static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
  1300. uint16_t id, struct lfs_info *info) {
  1301. if (id == 0x3ff) {
  1302. // special case for root
  1303. strcpy(info->name, "/");
  1304. info->type = LFS_TYPE_DIR;
  1305. return 0;
  1306. }
  1307. int32_t tag = lfs_dir_get(lfs, dir, 0x7c3ff000,
  1308. LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name);
  1309. if (tag < 0) {
  1310. return tag;
  1311. }
  1312. info->type = lfs_tag_type(tag);
  1313. struct lfs_ctz ctz;
  1314. tag = lfs_dir_get(lfs, dir, 0x7c3ff000,
  1315. LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
  1316. if (tag < 0) {
  1317. return tag;
  1318. }
  1319. lfs_ctz_fromle32(&ctz);
  1320. if (lfs_tag_type(tag) == LFS_TYPE_CTZSTRUCT) {
  1321. info->size = ctz.size;
  1322. } else if (lfs_tag_type(tag) == LFS_TYPE_INLINESTRUCT) {
  1323. info->size = lfs_tag_size(tag);
  1324. }
  1325. return 0;
  1326. }
  1327. /// Top level directory operations ///
  1328. int lfs_mkdir(lfs_t *lfs, const char *path) {
  1329. // deorphan if we haven't yet, needed at most once after poweron
  1330. int err = lfs_fs_forceconsistency(lfs);
  1331. if (err) {
  1332. return err;
  1333. }
  1334. lfs_mdir_t cwd;
  1335. int32_t res = lfs_dir_lookup(lfs, &cwd, &path);
  1336. if (!(res == LFS_ERR_NOENT && path)) {
  1337. return (res < 0) ? res : LFS_ERR_EXIST;
  1338. }
  1339. // check that name fits
  1340. lfs_size_t nlen = strlen(path);
  1341. if (nlen > lfs->name_max) {
  1342. return LFS_ERR_NAMETOOLONG;
  1343. }
  1344. // build up new directory
  1345. lfs_alloc_ack(lfs);
  1346. lfs_mdir_t dir;
  1347. err = lfs_dir_alloc(lfs, &dir);
  1348. if (err) {
  1349. return err;
  1350. }
  1351. dir.tail[0] = cwd.tail[0];
  1352. dir.tail[1] = cwd.tail[1];
  1353. err = lfs_dir_commit(lfs, &dir, NULL);
  1354. if (err) {
  1355. return err;
  1356. }
  1357. // get next slot and commit
  1358. uint16_t id = cwd.count;
  1359. cwd.tail[0] = dir.pair[0];
  1360. cwd.tail[1] = dir.pair[1];
  1361. lfs_pair_tole32(dir.pair);
  1362. err = lfs_dir_commit(lfs, &cwd,
  1363. LFS_MKATTR(LFS_TYPE_DIR, id, path, nlen,
  1364. LFS_MKATTR(LFS_TYPE_DIRSTRUCT, id, dir.pair, sizeof(dir.pair),
  1365. LFS_MKATTR(LFS_TYPE_SOFTTAIL, 0x3ff, cwd.tail, sizeof(cwd.tail),
  1366. NULL))));
  1367. lfs_pair_fromle32(dir.pair);
  1368. if (err) {
  1369. return err;
  1370. }
  1371. return 0;
  1372. }
  1373. int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
  1374. int32_t tag = lfs_dir_lookup(lfs, &dir->m, &path);
  1375. if (tag < 0) {
  1376. return tag;
  1377. }
  1378. if (lfs_tag_type(tag) != LFS_TYPE_DIR) {
  1379. return LFS_ERR_NOTDIR;
  1380. }
  1381. lfs_block_t pair[2];
  1382. if (lfs_tag_id(tag) == 0x3ff) {
  1383. // handle root dir separately
  1384. pair[0] = lfs->root[0];
  1385. pair[1] = lfs->root[1];
  1386. } else {
  1387. // get dir pair from parent
  1388. int32_t res = lfs_dir_get(lfs, &dir->m, 0x7c3ff000,
  1389. LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
  1390. if (res < 0) {
  1391. return res;
  1392. }
  1393. lfs_pair_fromle32(pair);
  1394. }
  1395. // fetch first pair
  1396. int err = lfs_dir_fetch(lfs, &dir->m, pair);
  1397. if (err) {
  1398. return err;
  1399. }
  1400. // setup entry
  1401. dir->head[0] = dir->m.pair[0];
  1402. dir->head[1] = dir->m.pair[1];
  1403. dir->id = 0;
  1404. dir->pos = 0;
  1405. // add to list of mdirs
  1406. dir->type = LFS_TYPE_DIR;
  1407. dir->next = (lfs_dir_t*)lfs->mlist;
  1408. lfs->mlist = (lfs_mlist_t*)dir;
  1409. return 0;
  1410. }
  1411. int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
  1412. // remove from list of mdirs
  1413. for (lfs_mlist_t **p = &lfs->mlist; *p; p = &(*p)->next) {
  1414. if (*p == (lfs_mlist_t*)dir) {
  1415. *p = (*p)->next;
  1416. break;
  1417. }
  1418. }
  1419. return 0;
  1420. }
  1421. int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
  1422. memset(info, 0, sizeof(*info));
  1423. // special offset for '.' and '..'
  1424. if (dir->pos == 0) {
  1425. info->type = LFS_TYPE_DIR;
  1426. strcpy(info->name, ".");
  1427. dir->pos += 1;
  1428. return 1;
  1429. } else if (dir->pos == 1) {
  1430. info->type = LFS_TYPE_DIR;
  1431. strcpy(info->name, "..");
  1432. dir->pos += 1;
  1433. return 1;
  1434. }
  1435. while (true) {
  1436. if (dir->id == dir->m.count) {
  1437. if (!dir->m.split) {
  1438. return false;
  1439. }
  1440. int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
  1441. if (err) {
  1442. return err;
  1443. }
  1444. dir->id = 0;
  1445. }
  1446. int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
  1447. if (err && err != LFS_ERR_NOENT) {
  1448. return err;
  1449. }
  1450. dir->id += 1;
  1451. if (err != LFS_ERR_NOENT) {
  1452. break;
  1453. }
  1454. }
  1455. dir->pos += 1;
  1456. return true;
  1457. }
  1458. int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
  1459. // simply walk from head dir
  1460. int err = lfs_dir_rewind(lfs, dir);
  1461. if (err) {
  1462. return err;
  1463. }
  1464. // first two for ./..
  1465. dir->pos = lfs_min(2, off);
  1466. off -= dir->pos;
  1467. while (off != 0) {
  1468. dir->id = lfs_min(dir->m.count, off);
  1469. dir->pos += dir->id;
  1470. off -= dir->id;
  1471. if (dir->id == dir->m.count) {
  1472. if (!dir->m.split) {
  1473. return LFS_ERR_INVAL;
  1474. }
  1475. err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
  1476. if (err) {
  1477. return err;
  1478. }
  1479. }
  1480. }
  1481. return 0;
  1482. }
  1483. lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
  1484. (void)lfs;
  1485. return dir->pos;
  1486. }
  1487. int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
  1488. // reload the head dir
  1489. int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
  1490. if (err) {
  1491. return err;
  1492. }
  1493. dir->m.pair[0] = dir->head[0];
  1494. dir->m.pair[1] = dir->head[1];
  1495. dir->id = 0;
  1496. dir->pos = 0;
  1497. return 0;
  1498. }
  1499. /// File index list operations ///
  1500. static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
  1501. lfs_off_t size = *off;
  1502. lfs_off_t b = lfs->cfg->block_size - 2*4;
  1503. lfs_off_t i = size / b;
  1504. if (i == 0) {
  1505. return 0;
  1506. }
  1507. i = (size - 4*(lfs_popc(i-1)+2)) / b;
  1508. *off = size - b*i - 4*lfs_popc(i);
  1509. return i;
  1510. }
  1511. static int lfs_ctz_find(lfs_t *lfs,
  1512. const lfs_cache_t *pcache, lfs_cache_t *rcache,
  1513. lfs_block_t head, lfs_size_t size,
  1514. lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
  1515. if (size == 0) {
  1516. *block = 0xffffffff;
  1517. *off = 0;
  1518. return 0;
  1519. }
  1520. lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
  1521. lfs_off_t target = lfs_ctz_index(lfs, &pos);
  1522. while (current > target) {
  1523. lfs_size_t skip = lfs_min(
  1524. lfs_npw2(current-target+1) - 1,
  1525. lfs_ctz(current));
  1526. int err = lfs_cache_read(lfs, pcache, rcache, false,
  1527. head, 4*skip, &head, 4);
  1528. head = lfs_fromle32(head);
  1529. if (err) {
  1530. return err;
  1531. }
  1532. LFS_ASSERT(head >= 2 && head <= lfs->cfg->block_count);
  1533. current -= 1 << skip;
  1534. }
  1535. *block = head;
  1536. *off = pos;
  1537. return 0;
  1538. }
  1539. static int lfs_ctz_extend(lfs_t *lfs,
  1540. lfs_cache_t *pcache, lfs_cache_t *rcache,
  1541. lfs_block_t head, lfs_size_t size,
  1542. lfs_block_t *block, lfs_off_t *off) {
  1543. while (true) {
  1544. // go ahead and grab a block
  1545. lfs_block_t nblock;
  1546. int err = lfs_alloc(lfs, &nblock);
  1547. if (err) {
  1548. return err;
  1549. }
  1550. LFS_ASSERT(nblock >= 2 && nblock <= lfs->cfg->block_count);
  1551. err = lfs_bd_erase(lfs, nblock);
  1552. if (err) {
  1553. if (err == LFS_ERR_CORRUPT) {
  1554. goto relocate;
  1555. }
  1556. return err;
  1557. }
  1558. if (size == 0) {
  1559. *block = nblock;
  1560. *off = 0;
  1561. return 0;
  1562. }
  1563. size -= 1;
  1564. lfs_off_t index = lfs_ctz_index(lfs, &size);
  1565. size += 1;
  1566. // just copy out the last block if it is incomplete
  1567. if (size != lfs->cfg->block_size) {
  1568. for (lfs_off_t i = 0; i < size; i++) {
  1569. uint8_t data;
  1570. err = lfs_cache_read(lfs, NULL, rcache, true,
  1571. head, i, &data, 1);
  1572. if (err) {
  1573. return err;
  1574. }
  1575. err = lfs_cache_prog(lfs, pcache, rcache, true,
  1576. nblock, i, &data, 1);
  1577. if (err) {
  1578. if (err == LFS_ERR_CORRUPT) {
  1579. goto relocate;
  1580. }
  1581. return err;
  1582. }
  1583. }
  1584. *block = nblock;
  1585. *off = size;
  1586. return 0;
  1587. }
  1588. // append block
  1589. index += 1;
  1590. lfs_size_t skips = lfs_ctz(index) + 1;
  1591. for (lfs_off_t i = 0; i < skips; i++) {
  1592. head = lfs_tole32(head);
  1593. err = lfs_cache_prog(lfs, pcache, rcache, true,
  1594. nblock, 4*i, &head, 4);
  1595. head = lfs_fromle32(head);
  1596. if (err) {
  1597. if (err == LFS_ERR_CORRUPT) {
  1598. goto relocate;
  1599. }
  1600. return err;
  1601. }
  1602. if (i != skips-1) {
  1603. err = lfs_cache_read(lfs, NULL, rcache, false,
  1604. head, 4*i, &head, 4);
  1605. head = lfs_fromle32(head);
  1606. if (err) {
  1607. return err;
  1608. }
  1609. }
  1610. LFS_ASSERT(head >= 2 && head <= lfs->cfg->block_count);
  1611. }
  1612. *block = nblock;
  1613. *off = 4*skips;
  1614. return 0;
  1615. relocate:
  1616. LFS_DEBUG("Bad block at %"PRIu32, nblock);
  1617. // just clear cache and try a new block
  1618. lfs_cache_drop(lfs, pcache);
  1619. }
  1620. }
  1621. static int lfs_ctz_traverse(lfs_t *lfs,
  1622. const lfs_cache_t *pcache, lfs_cache_t *rcache,
  1623. lfs_block_t head, lfs_size_t size,
  1624. int (*cb)(void*, lfs_block_t), void *data) {
  1625. if (size == 0) {
  1626. return 0;
  1627. }
  1628. lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
  1629. while (true) {
  1630. int err = cb(data, head);
  1631. if (err) {
  1632. return err;
  1633. }
  1634. if (index == 0) {
  1635. return 0;
  1636. }
  1637. lfs_block_t heads[2];
  1638. int count = 2 - (index & 1);
  1639. err = lfs_cache_read(lfs, pcache, rcache, false,
  1640. head, 0, &heads, count*4);
  1641. heads[0] = lfs_fromle32(heads[0]);
  1642. heads[1] = lfs_fromle32(heads[1]);
  1643. if (err) {
  1644. return err;
  1645. }
  1646. for (int i = 0; i < count-1; i++) {
  1647. err = cb(data, heads[i]);
  1648. if (err) {
  1649. return err;
  1650. }
  1651. }
  1652. head = heads[count-1];
  1653. index -= count;
  1654. }
  1655. }
  1656. /// Top level file operations ///
  1657. int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
  1658. const char *path, int flags,
  1659. const struct lfs_file_config *cfg) {
  1660. // deorphan if we haven't yet, needed at most once after poweron
  1661. if ((flags & 3) != LFS_O_RDONLY) {
  1662. int err = lfs_fs_forceconsistency(lfs);
  1663. if (err) {
  1664. return err;
  1665. }
  1666. }
  1667. // setup simple file details
  1668. int err = 0;
  1669. file->cfg = cfg;
  1670. file->flags = flags;
  1671. file->pos = 0;
  1672. file->cache.buffer = NULL;
  1673. // allocate entry for file if it doesn't exist
  1674. int32_t tag = lfs_dir_lookup(lfs, &file->m, &path);
  1675. if (tag < 0 && !(tag == LFS_ERR_NOENT && path)) {
  1676. err = tag;
  1677. goto cleanup;
  1678. }
  1679. // get id, add to list of mdirs to catch update changes
  1680. file->id = lfs_tag_id(tag);
  1681. file->type = LFS_TYPE_REG;
  1682. file->next = (lfs_file_t*)lfs->mlist;
  1683. lfs->mlist = (lfs_mlist_t*)file;
  1684. if (tag == LFS_ERR_NOENT) {
  1685. if (!(flags & LFS_O_CREAT)) {
  1686. err = LFS_ERR_NOENT;
  1687. goto cleanup;
  1688. }
  1689. // check that name fits
  1690. lfs_size_t nlen = strlen(path);
  1691. if (nlen > lfs->name_max) {
  1692. err = LFS_ERR_NAMETOOLONG;
  1693. goto cleanup;
  1694. }
  1695. // get next slot and create entry to remember name
  1696. file->id = file->m.count;
  1697. err = lfs_dir_commit(lfs, &file->m,
  1698. LFS_MKATTR(LFS_TYPE_REG, file->id, path, nlen,
  1699. LFS_MKATTR(LFS_TYPE_INLINESTRUCT, file->id, NULL, 0,
  1700. NULL)));
  1701. if (err) {
  1702. err = LFS_ERR_NAMETOOLONG;
  1703. goto cleanup;
  1704. }
  1705. tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0);
  1706. } else if (flags & LFS_O_EXCL) {
  1707. err = LFS_ERR_EXIST;
  1708. goto cleanup;
  1709. } else if (lfs_tag_type(tag) != LFS_TYPE_REG) {
  1710. err = LFS_ERR_ISDIR;
  1711. goto cleanup;
  1712. } else if (flags & LFS_O_TRUNC) {
  1713. // truncate if requested
  1714. tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0);
  1715. file->flags |= LFS_F_DIRTY;
  1716. } else {
  1717. // try to load what's on disk, if it's inlined we'll fix it later
  1718. tag = lfs_dir_get(lfs, &file->m, 0x7c3ff000,
  1719. LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz);
  1720. if (tag < 0) {
  1721. err = tag;
  1722. goto cleanup;
  1723. }
  1724. lfs_ctz_fromle32(&file->ctz);
  1725. }
  1726. // fetch attrs
  1727. for (const struct lfs_attr *a = file->cfg->attrs; a; a = a->next) {
  1728. if ((file->flags & 3) != LFS_O_WRONLY) {
  1729. int32_t res = lfs_dir_get(lfs, &file->m, 0x7ffff000,
  1730. LFS_MKTAG(0x100 | a->type, file->id, a->size), a->buffer);
  1731. if (res < 0 && res != LFS_ERR_NOENT) {
  1732. err = res;
  1733. goto cleanup;
  1734. }
  1735. }
  1736. if ((file->flags & 3) != LFS_O_RDONLY) {
  1737. if (a->size > lfs->attr_max) {
  1738. err = LFS_ERR_NOSPC;
  1739. goto cleanup;
  1740. }
  1741. file->flags |= LFS_F_DIRTY;
  1742. }
  1743. }
  1744. // allocate buffer if needed
  1745. if (file->cfg->buffer) {
  1746. file->cache.buffer = file->cfg->buffer;
  1747. } else {
  1748. file->cache.buffer = lfs_malloc(lfs->cfg->cache_size);
  1749. if (!file->cache.buffer) {
  1750. err = LFS_ERR_NOMEM;
  1751. goto cleanup;
  1752. }
  1753. }
  1754. // zero to avoid information leak
  1755. lfs_cache_zero(lfs, &file->cache);
  1756. if (lfs_tag_type(tag) == LFS_TYPE_INLINESTRUCT) {
  1757. // load inline files
  1758. file->ctz.head = 0xfffffffe;
  1759. file->ctz.size = lfs_tag_size(tag);
  1760. file->flags |= LFS_F_INLINE;
  1761. file->cache.block = file->ctz.head;
  1762. file->cache.off = 0;
  1763. file->cache.size = lfs->cfg->cache_size;
  1764. // don't always read (may be new/trunc file)
  1765. if (file->ctz.size > 0) {
  1766. int32_t res = lfs_dir_get(lfs, &file->m, 0x7c3ff000,
  1767. LFS_MKTAG(LFS_TYPE_STRUCT, file->id, file->ctz.size),
  1768. file->cache.buffer);
  1769. if (res < 0) {
  1770. err = res;
  1771. goto cleanup;
  1772. }
  1773. }
  1774. }
  1775. return 0;
  1776. cleanup:
  1777. // clean up lingering resources
  1778. file->flags |= LFS_F_ERRED;
  1779. lfs_file_close(lfs, file);
  1780. return err;
  1781. }
  1782. int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
  1783. const char *path, int flags) {
  1784. static const struct lfs_file_config defaults = {0};
  1785. return lfs_file_opencfg(lfs, file, path, flags, &defaults);
  1786. }
  1787. int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
  1788. int err = lfs_file_sync(lfs, file);
  1789. // remove from list of mdirs
  1790. for (lfs_mlist_t **p = &lfs->mlist; *p; p = &(*p)->next) {
  1791. if (*p == (lfs_mlist_t*)file) {
  1792. *p = (*p)->next;
  1793. break;
  1794. }
  1795. }
  1796. // clean up memory
  1797. if (!file->cfg->buffer) {
  1798. lfs_free(file->cache.buffer);
  1799. }
  1800. return err;
  1801. }
  1802. static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
  1803. while (true) {
  1804. // just relocate what exists into new block
  1805. lfs_block_t nblock;
  1806. int err = lfs_alloc(lfs, &nblock);
  1807. if (err) {
  1808. return err;
  1809. }
  1810. err = lfs_bd_erase(lfs, nblock);
  1811. if (err) {
  1812. if (err == LFS_ERR_CORRUPT) {
  1813. goto relocate;
  1814. }
  1815. return err;
  1816. }
  1817. // either read from dirty cache or disk
  1818. for (lfs_off_t i = 0; i < file->off; i++) {
  1819. uint8_t data;
  1820. err = lfs_cache_read(lfs, &file->cache, &lfs->rcache, true,
  1821. file->block, i, &data, 1);
  1822. if (err) {
  1823. return err;
  1824. }
  1825. err = lfs_cache_prog(lfs, &lfs->pcache, &lfs->rcache, true,
  1826. nblock, i, &data, 1);
  1827. if (err) {
  1828. if (err == LFS_ERR_CORRUPT) {
  1829. goto relocate;
  1830. }
  1831. return err;
  1832. }
  1833. }
  1834. // copy over new state of file
  1835. memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size);
  1836. file->cache.block = lfs->pcache.block;
  1837. file->cache.off = lfs->pcache.off;
  1838. file->cache.size = lfs->pcache.size;
  1839. lfs_cache_zero(lfs, &lfs->pcache);
  1840. file->block = nblock;
  1841. return 0;
  1842. relocate:
  1843. LFS_DEBUG("Bad block at %"PRIu32, nblock);
  1844. // just clear cache and try a new block
  1845. lfs_cache_drop(lfs, &lfs->pcache);
  1846. }
  1847. }
  1848. static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
  1849. if (file->flags & LFS_F_READING) {
  1850. file->flags &= ~LFS_F_READING;
  1851. }
  1852. if (file->flags & LFS_F_WRITING) {
  1853. lfs_off_t pos = file->pos;
  1854. if (!(file->flags & LFS_F_INLINE)) {
  1855. // copy over anything after current branch
  1856. lfs_file_t orig = {
  1857. .ctz.head = file->ctz.head,
  1858. .ctz.size = file->ctz.size,
  1859. .flags = LFS_O_RDONLY,
  1860. .pos = file->pos,
  1861. .cache = lfs->rcache,
  1862. };
  1863. lfs_cache_drop(lfs, &lfs->rcache);
  1864. while (file->pos < file->ctz.size) {
  1865. // copy over a byte at a time, leave it up to caching
  1866. // to make this efficient
  1867. uint8_t data;
  1868. lfs_ssize_t res = lfs_file_read(lfs, &orig, &data, 1);
  1869. if (res < 0) {
  1870. return res;
  1871. }
  1872. res = lfs_file_write(lfs, file, &data, 1);
  1873. if (res < 0) {
  1874. return res;
  1875. }
  1876. // keep our reference to the rcache in sync
  1877. if (lfs->rcache.block != 0xffffffff) {
  1878. lfs_cache_drop(lfs, &orig.cache);
  1879. lfs_cache_drop(lfs, &lfs->rcache);
  1880. }
  1881. }
  1882. // write out what we have
  1883. while (true) {
  1884. int err = lfs_cache_flush(lfs,
  1885. &file->cache, &lfs->rcache, true);
  1886. if (err) {
  1887. if (err == LFS_ERR_CORRUPT) {
  1888. goto relocate;
  1889. }
  1890. return err;
  1891. }
  1892. break;
  1893. relocate:
  1894. LFS_DEBUG("Bad block at %"PRIu32, file->block);
  1895. err = lfs_file_relocate(lfs, file);
  1896. if (err) {
  1897. return err;
  1898. }
  1899. }
  1900. } else {
  1901. file->ctz.size = lfs_max(file->pos, file->ctz.size);
  1902. }
  1903. // actual file updates
  1904. file->ctz.head = file->block;
  1905. file->ctz.size = file->pos;
  1906. file->flags &= ~LFS_F_WRITING;
  1907. file->flags |= LFS_F_DIRTY;
  1908. file->pos = pos;
  1909. }
  1910. return 0;
  1911. }
  1912. int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
  1913. while (true) {
  1914. int err = lfs_file_flush(lfs, file);
  1915. if (err) {
  1916. return err;
  1917. }
  1918. if ((file->flags & LFS_F_DIRTY) &&
  1919. !(file->flags & LFS_F_ERRED) &&
  1920. !lfs_pair_isnull(file->m.pair)) {
  1921. // update dir entry
  1922. uint16_t type;
  1923. const void *buffer;
  1924. lfs_size_t size;
  1925. struct lfs_ctz ctz;
  1926. if (file->flags & LFS_F_INLINE) {
  1927. // inline the whole file
  1928. type = LFS_TYPE_INLINESTRUCT;
  1929. buffer = file->cache.buffer;
  1930. size = file->ctz.size;
  1931. } else {
  1932. // update the ctz reference
  1933. type = LFS_TYPE_CTZSTRUCT;
  1934. // copy ctz so alloc will work during a relocate
  1935. ctz = file->ctz;
  1936. lfs_ctz_tole32(&ctz);
  1937. buffer = &ctz;
  1938. size = sizeof(ctz);
  1939. }
  1940. // commit file data and attributes
  1941. err = lfs_dir_commit(lfs, &file->m,
  1942. LFS_MKATTR(type, file->id, buffer, size,
  1943. LFS_MKATTR(LFS_FROM_ATTRS, file->id, file->cfg->attrs, 0,
  1944. NULL)));
  1945. if (err) {
  1946. if (err == LFS_ERR_NOSPC && (file->flags & LFS_F_INLINE)) {
  1947. goto relocate;
  1948. }
  1949. return err;
  1950. }
  1951. file->flags &= ~LFS_F_DIRTY;
  1952. }
  1953. return 0;
  1954. relocate:
  1955. // inline file doesn't fit anymore
  1956. file->block = 0xfffffffe;
  1957. file->off = file->pos;
  1958. lfs_alloc_ack(lfs);
  1959. err = lfs_file_relocate(lfs, file);
  1960. if (err) {
  1961. return err;
  1962. }
  1963. file->flags &= ~LFS_F_INLINE;
  1964. file->flags |= LFS_F_WRITING;
  1965. }
  1966. }
  1967. lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
  1968. void *buffer, lfs_size_t size) {
  1969. uint8_t *data = buffer;
  1970. lfs_size_t nsize = size;
  1971. if ((file->flags & 3) == LFS_O_WRONLY) {
  1972. return LFS_ERR_BADF;
  1973. }
  1974. if (file->flags & LFS_F_WRITING) {
  1975. // flush out any writes
  1976. int err = lfs_file_flush(lfs, file);
  1977. if (err) {
  1978. return err;
  1979. }
  1980. }
  1981. if (file->pos >= file->ctz.size) {
  1982. // eof if past end
  1983. return 0;
  1984. }
  1985. size = lfs_min(size, file->ctz.size - file->pos);
  1986. nsize = size;
  1987. while (nsize > 0) {
  1988. // check if we need a new block
  1989. if (!(file->flags & LFS_F_READING) ||
  1990. file->off == lfs->cfg->block_size) {
  1991. if (!(file->flags & LFS_F_INLINE)) {
  1992. int err = lfs_ctz_find(lfs, NULL, &file->cache,
  1993. file->ctz.head, file->ctz.size,
  1994. file->pos, &file->block, &file->off);
  1995. if (err) {
  1996. return err;
  1997. }
  1998. } else {
  1999. file->block = 0xfffffffe;
  2000. file->off = file->pos;
  2001. }
  2002. file->flags |= LFS_F_READING;
  2003. }
  2004. // read as much as we can in current block
  2005. lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
  2006. int err = lfs_cache_read(lfs, NULL, &file->cache, true,
  2007. file->block, file->off, data, diff);
  2008. if (err) {
  2009. return err;
  2010. }
  2011. file->pos += diff;
  2012. file->off += diff;
  2013. data += diff;
  2014. nsize -= diff;
  2015. }
  2016. return size;
  2017. }
  2018. lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
  2019. const void *buffer, lfs_size_t size) {
  2020. const uint8_t *data = buffer;
  2021. lfs_size_t nsize = size;
  2022. if ((file->flags & 3) == LFS_O_RDONLY) {
  2023. return LFS_ERR_BADF;
  2024. }
  2025. if (file->flags & LFS_F_READING) {
  2026. // drop any reads
  2027. int err = lfs_file_flush(lfs, file);
  2028. if (err) {
  2029. return err;
  2030. }
  2031. }
  2032. if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
  2033. file->pos = file->ctz.size;
  2034. }
  2035. if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
  2036. // fill with zeros
  2037. lfs_off_t pos = file->pos;
  2038. file->pos = file->ctz.size;
  2039. while (file->pos < pos) {
  2040. lfs_ssize_t res = lfs_file_write(lfs, file, &(uint8_t){0}, 1);
  2041. if (res < 0) {
  2042. return res;
  2043. }
  2044. }
  2045. }
  2046. if ((file->flags & LFS_F_INLINE) &&
  2047. file->pos + nsize > lfs->inline_max) {
  2048. // inline file doesn't fit anymore
  2049. file->block = 0xfffffffe;
  2050. file->off = file->pos;
  2051. lfs_alloc_ack(lfs);
  2052. int err = lfs_file_relocate(lfs, file);
  2053. if (err) {
  2054. file->flags |= LFS_F_ERRED;
  2055. return err;
  2056. }
  2057. file->flags &= ~LFS_F_INLINE;
  2058. file->flags |= LFS_F_WRITING;
  2059. }
  2060. while (nsize > 0) {
  2061. // check if we need a new block
  2062. if (!(file->flags & LFS_F_WRITING) ||
  2063. file->off == lfs->cfg->block_size) {
  2064. if (!(file->flags & LFS_F_INLINE)) {
  2065. if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
  2066. // find out which block we're extending from
  2067. int err = lfs_ctz_find(lfs, NULL, &file->cache,
  2068. file->ctz.head, file->ctz.size,
  2069. file->pos-1, &file->block, &file->off);
  2070. if (err) {
  2071. file->flags |= LFS_F_ERRED;
  2072. return err;
  2073. }
  2074. // mark cache as dirty since we may have read data into it
  2075. lfs_cache_zero(lfs, &file->cache);
  2076. }
  2077. // extend file with new blocks
  2078. lfs_alloc_ack(lfs);
  2079. int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
  2080. file->block, file->pos,
  2081. &file->block, &file->off);
  2082. if (err) {
  2083. file->flags |= LFS_F_ERRED;
  2084. return err;
  2085. }
  2086. } else {
  2087. file->block = 0xfffffffe;
  2088. file->off = file->pos;
  2089. }
  2090. file->flags |= LFS_F_WRITING;
  2091. }
  2092. // program as much as we can in current block
  2093. lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
  2094. while (true) {
  2095. int err = lfs_cache_prog(lfs, &file->cache, &lfs->rcache, true,
  2096. file->block, file->off, data, diff);
  2097. if (err) {
  2098. if (err == LFS_ERR_CORRUPT) {
  2099. goto relocate;
  2100. }
  2101. file->flags |= LFS_F_ERRED;
  2102. return err;
  2103. }
  2104. break;
  2105. relocate:
  2106. err = lfs_file_relocate(lfs, file);
  2107. if (err) {
  2108. file->flags |= LFS_F_ERRED;
  2109. return err;
  2110. }
  2111. }
  2112. file->pos += diff;
  2113. file->off += diff;
  2114. data += diff;
  2115. nsize -= diff;
  2116. lfs_alloc_ack(lfs);
  2117. }
  2118. file->flags &= ~LFS_F_ERRED;
  2119. return size;
  2120. }
  2121. lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
  2122. lfs_soff_t off, int whence) {
  2123. // write out everything beforehand, may be noop if rdonly
  2124. int err = lfs_file_flush(lfs, file);
  2125. if (err) {
  2126. return err;
  2127. }
  2128. // update pos
  2129. if (whence == LFS_SEEK_SET) {
  2130. file->pos = off;
  2131. } else if (whence == LFS_SEEK_CUR) {
  2132. if (off < 0 && (lfs_off_t)-off > file->pos) {
  2133. return LFS_ERR_INVAL;
  2134. }
  2135. file->pos = file->pos + off;
  2136. } else if (whence == LFS_SEEK_END) {
  2137. if (off < 0 && (lfs_off_t)-off > file->ctz.size) {
  2138. return LFS_ERR_INVAL;
  2139. }
  2140. file->pos = file->ctz.size + off;
  2141. }
  2142. return file->pos;
  2143. }
  2144. int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
  2145. if ((file->flags & 3) == LFS_O_RDONLY) {
  2146. return LFS_ERR_BADF;
  2147. }
  2148. lfs_off_t oldsize = lfs_file_size(lfs, file);
  2149. if (size < oldsize) {
  2150. // need to flush since directly changing metadata
  2151. int err = lfs_file_flush(lfs, file);
  2152. if (err) {
  2153. return err;
  2154. }
  2155. // lookup new head in ctz skip list
  2156. err = lfs_ctz_find(lfs, NULL, &file->cache,
  2157. file->ctz.head, file->ctz.size,
  2158. size, &file->ctz.head, &(lfs_off_t){0});
  2159. if (err) {
  2160. return err;
  2161. }
  2162. file->ctz.size = size;
  2163. file->flags |= LFS_F_DIRTY;
  2164. } else if (size > oldsize) {
  2165. lfs_off_t pos = file->pos;
  2166. // flush+seek if not already at end
  2167. if (file->pos != oldsize) {
  2168. int err = lfs_file_seek(lfs, file, 0, LFS_SEEK_END);
  2169. if (err < 0) {
  2170. return err;
  2171. }
  2172. }
  2173. // fill with zeros
  2174. while (file->pos < size) {
  2175. lfs_ssize_t res = lfs_file_write(lfs, file, &(uint8_t){0}, 1);
  2176. if (res < 0) {
  2177. return res;
  2178. }
  2179. }
  2180. // restore pos
  2181. int err = lfs_file_seek(lfs, file, pos, LFS_SEEK_SET);
  2182. if (err < 0) {
  2183. return err;
  2184. }
  2185. }
  2186. return 0;
  2187. }
  2188. lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
  2189. (void)lfs;
  2190. return file->pos;
  2191. }
  2192. int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
  2193. lfs_soff_t res = lfs_file_seek(lfs, file, 0, LFS_SEEK_SET);
  2194. if (res < 0) {
  2195. return res;
  2196. }
  2197. return 0;
  2198. }
  2199. lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
  2200. (void)lfs;
  2201. if (file->flags & LFS_F_WRITING) {
  2202. return lfs_max(file->pos, file->ctz.size);
  2203. } else {
  2204. return file->ctz.size;
  2205. }
  2206. }
  2207. /// General fs operations ///
  2208. int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
  2209. lfs_mdir_t cwd;
  2210. int32_t tag = lfs_dir_lookup(lfs, &cwd, &path);
  2211. if (tag < 0) {
  2212. return tag;
  2213. }
  2214. return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info);
  2215. }
  2216. int lfs_remove(lfs_t *lfs, const char *path) {
  2217. // deorphan if we haven't yet, needed at most once after poweron
  2218. int err = lfs_fs_forceconsistency(lfs);
  2219. if (err) {
  2220. return err;
  2221. }
  2222. lfs_mdir_t cwd;
  2223. err = lfs_dir_fetch(lfs, &cwd, lfs->root);
  2224. if (err) {
  2225. return err;
  2226. }
  2227. int32_t tag = lfs_dir_lookup(lfs, &cwd, &path);
  2228. if (tag < 0) {
  2229. return tag;
  2230. }
  2231. lfs_mdir_t dir;
  2232. if (lfs_tag_type(tag) == LFS_TYPE_DIR) {
  2233. // must be empty before removal
  2234. lfs_block_t pair[2];
  2235. int32_t res = lfs_dir_get(lfs, &cwd, 0x7c3ff000,
  2236. LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
  2237. if (res < 0) {
  2238. return res;
  2239. }
  2240. lfs_pair_fromle32(pair);
  2241. err = lfs_dir_fetch(lfs, &dir, pair);
  2242. if (err) {
  2243. return err;
  2244. }
  2245. if (dir.count > 0 || dir.split) {
  2246. return LFS_ERR_NOTEMPTY;
  2247. }
  2248. // mark fs as orphaned
  2249. lfs_global_orphans(lfs, +1);
  2250. }
  2251. // delete the entry
  2252. err = lfs_dir_commit(lfs, &cwd,
  2253. LFS_MKATTR(LFS_TYPE_DELETE, lfs_tag_id(tag), NULL, 0,
  2254. NULL));
  2255. if (err) {
  2256. return err;
  2257. }
  2258. if (lfs_tag_type(tag) == LFS_TYPE_DIR) {
  2259. // fix orphan
  2260. lfs_global_orphans(lfs, -1);
  2261. err = lfs_fs_pred(lfs, dir.pair, &cwd);
  2262. if (err) {
  2263. return err;
  2264. }
  2265. // steal state
  2266. cwd.tail[0] = dir.tail[0];
  2267. cwd.tail[1] = dir.tail[1];
  2268. lfs_global_xor(&lfs->locals, &dir.locals);
  2269. err = lfs_dir_commit(lfs, &cwd,
  2270. LFS_MKATTR(LFS_TYPE_SOFTTAIL, 0x3ff,
  2271. cwd.tail, sizeof(cwd.tail),
  2272. NULL));
  2273. if (err) {
  2274. return err;
  2275. }
  2276. }
  2277. return 0;
  2278. }
  2279. int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
  2280. // deorphan if we haven't yet, needed at most once after poweron
  2281. int err = lfs_fs_forceconsistency(lfs);
  2282. if (err) {
  2283. return err;
  2284. }
  2285. // find old entry
  2286. lfs_mdir_t oldcwd;
  2287. int32_t oldtag = lfs_dir_lookup(lfs, &oldcwd, &oldpath);
  2288. if (oldtag < 0) {
  2289. return oldtag;
  2290. }
  2291. // find new entry
  2292. lfs_mdir_t newcwd;
  2293. int32_t prevtag = lfs_dir_lookup(lfs, &newcwd, &newpath);
  2294. if (prevtag < 0 && prevtag != LFS_ERR_NOENT) {
  2295. return prevtag;
  2296. }
  2297. uint16_t newid = lfs_tag_id(prevtag);
  2298. lfs_mdir_t prevdir;
  2299. if (prevtag == LFS_ERR_NOENT) {
  2300. // check that name fits
  2301. lfs_size_t nlen = strlen(newpath);
  2302. if (nlen > lfs->name_max) {
  2303. return LFS_ERR_NAMETOOLONG;
  2304. }
  2305. // get next id
  2306. newid = newcwd.count;
  2307. } else if (lfs_tag_type(prevtag) != lfs_tag_type(oldtag)) {
  2308. return LFS_ERR_ISDIR;
  2309. } else if (lfs_tag_type(prevtag) == LFS_TYPE_DIR) {
  2310. // must be empty before removal
  2311. lfs_block_t prevpair[2];
  2312. int32_t res = lfs_dir_get(lfs, &newcwd, 0x7c3ff000,
  2313. LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair);
  2314. if (res < 0) {
  2315. return res;
  2316. }
  2317. lfs_pair_fromle32(prevpair);
  2318. // must be empty before removal
  2319. err = lfs_dir_fetch(lfs, &prevdir, prevpair);
  2320. if (err) {
  2321. return err;
  2322. }
  2323. if (prevdir.count > 0 || prevdir.split) {
  2324. return LFS_ERR_NOTEMPTY;
  2325. }
  2326. // mark fs as orphaned
  2327. lfs_global_orphans(lfs, +1);
  2328. }
  2329. // create move to fix later
  2330. lfs_global_move(lfs, oldcwd.pair, lfs_tag_id(oldtag));
  2331. // move over all attributes
  2332. err = lfs_dir_commit(lfs, &newcwd,
  2333. LFS_MKATTR(lfs_tag_type(oldtag), newid, newpath, strlen(newpath),
  2334. LFS_MKATTR(LFS_FROM_MOVE, newid, &oldcwd, lfs_tag_id(oldtag),
  2335. NULL)));
  2336. if (err) {
  2337. return err;
  2338. }
  2339. // let commit clean up after move (if we're different! otherwise move
  2340. // logic already fixed it for us)
  2341. if (lfs_pair_cmp(oldcwd.pair, newcwd.pair) != 0) {
  2342. err = lfs_dir_commit(lfs, &oldcwd, NULL);
  2343. if (err) {
  2344. return err;
  2345. }
  2346. }
  2347. if (prevtag != LFS_ERR_NOENT && lfs_tag_type(prevtag) == LFS_TYPE_DIR) {
  2348. // fix orphan
  2349. lfs_global_orphans(lfs, -1);
  2350. err = lfs_fs_pred(lfs, prevdir.pair, &newcwd);
  2351. if (err) {
  2352. return err;
  2353. }
  2354. // steal state
  2355. newcwd.tail[0] = prevdir.tail[0];
  2356. newcwd.tail[1] = prevdir.tail[1];
  2357. lfs_global_xor(&lfs->locals, &prevdir.locals);
  2358. err = lfs_dir_commit(lfs, &newcwd,
  2359. LFS_MKATTR(LFS_TYPE_SOFTTAIL, 0x3ff,
  2360. newcwd.tail, sizeof(newcwd.tail),
  2361. NULL));
  2362. if (err) {
  2363. return err;
  2364. }
  2365. }
  2366. return 0;
  2367. }
  2368. lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
  2369. uint8_t type, void *buffer, lfs_size_t size) {
  2370. lfs_mdir_t cwd;
  2371. int32_t res = lfs_dir_lookup(lfs, &cwd, &path);
  2372. if (res < 0) {
  2373. return res;
  2374. }
  2375. uint16_t id = lfs_tag_id(res);
  2376. if (id == 0x3ff) {
  2377. // special case for root
  2378. id = 0;
  2379. int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
  2380. if (err) {
  2381. return err;
  2382. }
  2383. }
  2384. res = lfs_dir_get(lfs, &cwd, 0x7ffff000,
  2385. LFS_MKTAG(0x100 | type, id, lfs_min(size, lfs->attr_max)),
  2386. buffer);
  2387. if (res < 0 && res != LFS_ERR_NOENT) {
  2388. return res;
  2389. }
  2390. return (res == LFS_ERR_NOENT) ? 0 : lfs_tag_size(res);
  2391. }
  2392. int lfs_setattr(lfs_t *lfs, const char *path,
  2393. uint8_t type, const void *buffer, lfs_size_t size) {
  2394. if (size > lfs->attr_max) {
  2395. return LFS_ERR_NOSPC;
  2396. }
  2397. lfs_mdir_t cwd;
  2398. int32_t res = lfs_dir_lookup(lfs, &cwd, &path);
  2399. if (res < 0) {
  2400. return res;
  2401. }
  2402. uint16_t id = lfs_tag_id(res);
  2403. if (id == 0x3ff) {
  2404. // special case for root
  2405. id = 0;
  2406. int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
  2407. if (err) {
  2408. return err;
  2409. }
  2410. }
  2411. return lfs_dir_commit(lfs, &cwd,
  2412. LFS_MKATTR(0x100 | type, id, buffer, size,
  2413. NULL));
  2414. }
  2415. /// Filesystem operations ///
  2416. static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
  2417. lfs->cfg = cfg;
  2418. int err = 0;
  2419. // check that block size is a multiple of cache size is a multiple
  2420. // of prog and read sizes
  2421. LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
  2422. LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
  2423. LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
  2424. // check that the block size is large enough to fit ctz pointers
  2425. LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
  2426. <= lfs->cfg->block_size);
  2427. // setup read cache
  2428. if (lfs->cfg->read_buffer) {
  2429. lfs->rcache.buffer = lfs->cfg->read_buffer;
  2430. } else {
  2431. lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size);
  2432. if (!lfs->rcache.buffer) {
  2433. err = LFS_ERR_NOMEM;
  2434. goto cleanup;
  2435. }
  2436. }
  2437. // setup program cache
  2438. if (lfs->cfg->prog_buffer) {
  2439. lfs->pcache.buffer = lfs->cfg->prog_buffer;
  2440. } else {
  2441. lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size);
  2442. if (!lfs->pcache.buffer) {
  2443. err = LFS_ERR_NOMEM;
  2444. goto cleanup;
  2445. }
  2446. }
  2447. // zero to avoid information leaks
  2448. lfs_cache_zero(lfs, &lfs->rcache);
  2449. lfs_cache_zero(lfs, &lfs->pcache);
  2450. // setup lookahead, must be multiple of 32-bits
  2451. LFS_ASSERT(lfs->cfg->lookahead % 32 == 0);
  2452. LFS_ASSERT(lfs->cfg->lookahead > 0);
  2453. if (lfs->cfg->lookahead_buffer) {
  2454. lfs->free.buffer = lfs->cfg->lookahead_buffer;
  2455. } else {
  2456. lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead/8);
  2457. if (!lfs->free.buffer) {
  2458. err = LFS_ERR_NOMEM;
  2459. goto cleanup;
  2460. }
  2461. }
  2462. // check that the size limits are sane
  2463. LFS_ASSERT(lfs->cfg->inline_max <= LFS_INLINE_MAX);
  2464. LFS_ASSERT(lfs->cfg->inline_max <= lfs->cfg->cache_size);
  2465. lfs->inline_max = lfs->cfg->inline_max;
  2466. if (!lfs->inline_max) {
  2467. lfs->inline_max = lfs_min(LFS_INLINE_MAX, lfs->cfg->cache_size);
  2468. }
  2469. LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX);
  2470. lfs->attr_max = lfs->cfg->attr_max;
  2471. if (!lfs->attr_max) {
  2472. lfs->attr_max = LFS_ATTR_MAX;
  2473. }
  2474. LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX);
  2475. lfs->name_max = lfs->cfg->name_max;
  2476. if (!lfs->name_max) {
  2477. lfs->name_max = LFS_NAME_MAX;
  2478. }
  2479. // setup default state
  2480. lfs->root[0] = 0xffffffff;
  2481. lfs->root[1] = 0xffffffff;
  2482. lfs->mlist = NULL;
  2483. lfs->seed = 0;
  2484. lfs->globals.g.movepair[0] = 0xffffffff;
  2485. lfs->globals.g.movepair[1] = 0xffffffff;
  2486. lfs->globals.g.moveid = 0x3ff;
  2487. lfs->globals.g.orphans = 0;
  2488. lfs_global_zero(&lfs->locals);
  2489. return 0;
  2490. cleanup:
  2491. lfs_deinit(lfs);
  2492. return err;
  2493. }
  2494. static int lfs_deinit(lfs_t *lfs) {
  2495. // free allocated memory
  2496. if (!lfs->cfg->read_buffer) {
  2497. lfs_free(lfs->rcache.buffer);
  2498. }
  2499. if (!lfs->cfg->prog_buffer) {
  2500. lfs_free(lfs->pcache.buffer);
  2501. }
  2502. if (!lfs->cfg->lookahead_buffer) {
  2503. lfs_free(lfs->free.buffer);
  2504. }
  2505. return 0;
  2506. }
  2507. int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
  2508. int err = lfs_init(lfs, cfg);
  2509. if (err) {
  2510. return err;
  2511. }
  2512. // create free lookahead
  2513. memset(lfs->free.buffer, 0, lfs->cfg->lookahead/8);
  2514. lfs->free.off = 0;
  2515. lfs->free.size = lfs_min(lfs->cfg->lookahead, lfs->cfg->block_count);
  2516. lfs->free.i = 0;
  2517. lfs_alloc_ack(lfs);
  2518. // create root dir
  2519. lfs_mdir_t root;
  2520. err = lfs_dir_alloc(lfs, &root);
  2521. if (err) {
  2522. goto cleanup;
  2523. }
  2524. // write one superblock
  2525. lfs_superblock_t superblock = {
  2526. .magic = {"littlefs"},
  2527. .version = LFS_DISK_VERSION,
  2528. .block_size = lfs->cfg->block_size,
  2529. .block_count = lfs->cfg->block_count,
  2530. .attr_max = lfs->attr_max,
  2531. .name_max = lfs->name_max,
  2532. .inline_max = lfs->inline_max,
  2533. };
  2534. lfs_superblock_tole32(&superblock);
  2535. err = lfs_dir_commit(lfs, &root,
  2536. LFS_MKATTR(LFS_TYPE_ROOT, 0, &superblock, sizeof(superblock),
  2537. NULL));
  2538. if (err) {
  2539. goto cleanup;
  2540. }
  2541. // sanity check that fetch works
  2542. err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
  2543. if (err) {
  2544. goto cleanup;
  2545. }
  2546. cleanup:
  2547. lfs_deinit(lfs);
  2548. return err;
  2549. }
  2550. int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
  2551. int err = lfs_init(lfs, cfg);
  2552. if (err) {
  2553. return err;
  2554. }
  2555. // find root/superblock
  2556. lfs_mdir_t root;
  2557. lfs_superblock_t superblock;
  2558. int32_t tag = lfs_dir_find(lfs,
  2559. &root, (const lfs_block_t[2]){0, 1}, false, 0x7fc00000,
  2560. LFS_MKTAG(LFS_TYPE_ROOT, 0, 8), "littlefs");
  2561. if (tag < 0) {
  2562. err = tag;
  2563. goto cleanup;
  2564. }
  2565. int32_t res = lfs_dir_get(lfs, &root, 0x7c000000,
  2566. LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, sizeof(superblock)),
  2567. &superblock);
  2568. if (res < 0) {
  2569. err = res;
  2570. goto cleanup;
  2571. }
  2572. lfs_superblock_fromle32(&superblock);
  2573. lfs->root[0] = root.pair[0];
  2574. lfs->root[1] = root.pair[1];
  2575. // check version
  2576. uint16_t major_version = (0xffff & (superblock.version >> 16));
  2577. uint16_t minor_version = (0xffff & (superblock.version >> 0));
  2578. if ((major_version != LFS_DISK_VERSION_MAJOR ||
  2579. minor_version > LFS_DISK_VERSION_MINOR)) {
  2580. LFS_ERROR("Invalid version %"PRIu32".%"PRIu32,
  2581. major_version, minor_version);
  2582. err = LFS_ERR_INVAL;
  2583. goto cleanup;
  2584. }
  2585. // check superblock configuration
  2586. if (superblock.attr_max) {
  2587. if (superblock.attr_max > lfs->attr_max) {
  2588. LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")",
  2589. superblock.attr_max, lfs->attr_max);
  2590. err = LFS_ERR_INVAL;
  2591. goto cleanup;
  2592. }
  2593. lfs->attr_max = superblock.attr_max;
  2594. }
  2595. if (superblock.name_max) {
  2596. if (superblock.name_max > lfs->name_max) {
  2597. LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")",
  2598. superblock.name_max, lfs->name_max);
  2599. err = LFS_ERR_INVAL;
  2600. goto cleanup;
  2601. }
  2602. lfs->name_max = superblock.name_max;
  2603. }
  2604. if (superblock.inline_max) {
  2605. if (superblock.inline_max > lfs->inline_max) {
  2606. LFS_ERROR("Unsupported inline_max (%"PRIu32" > %"PRIu32")",
  2607. superblock.inline_max, lfs->inline_max);
  2608. err = LFS_ERR_INVAL;
  2609. goto cleanup;
  2610. }
  2611. lfs->inline_max = superblock.inline_max;
  2612. }
  2613. // scan for any global updates
  2614. lfs_mdir_t dir = {.tail = {0, 1}};
  2615. while (!lfs_pair_isnull(dir.tail)) {
  2616. err = lfs_dir_fetch(lfs, &dir, dir.tail);
  2617. if (err) {
  2618. err = LFS_ERR_INVAL;
  2619. goto cleanup;
  2620. }
  2621. // xor together indirect deletes
  2622. lfs_global_xor(&lfs->locals, &dir.locals);
  2623. }
  2624. // update littlefs with globals
  2625. lfs_global_fromle32(&lfs->locals);
  2626. lfs_global_xor(&lfs->globals, &lfs->locals);
  2627. lfs_global_zero(&lfs->locals);
  2628. if (!lfs_pair_isnull(lfs->globals.g.movepair)) {
  2629. LFS_DEBUG("Found move %"PRIu32" %"PRIu32" %"PRIu32,
  2630. lfs->globals.g.movepair[0],
  2631. lfs->globals.g.movepair[1],
  2632. lfs->globals.g.moveid);
  2633. }
  2634. // setup free lookahead
  2635. lfs->free.off = lfs->seed % lfs->cfg->block_size;
  2636. lfs->free.size = 0;
  2637. lfs->free.i = 0;
  2638. lfs_alloc_ack(lfs);
  2639. return 0;
  2640. cleanup:
  2641. lfs_unmount(lfs);
  2642. return err;
  2643. }
  2644. int lfs_unmount(lfs_t *lfs) {
  2645. return lfs_deinit(lfs);
  2646. }
  2647. /// Filesystem filesystem operations ///
  2648. int lfs_fs_traverse(lfs_t *lfs,
  2649. int (*cb)(void *data, lfs_block_t block), void *data) {
  2650. if (lfs_pair_isnull(lfs->root)) {
  2651. return 0;
  2652. }
  2653. // iterate over metadata pairs
  2654. lfs_mdir_t dir = {.tail = {0, 1}};
  2655. while (!lfs_pair_isnull(dir.tail)) {
  2656. for (int i = 0; i < 2; i++) {
  2657. int err = cb(data, dir.tail[i]);
  2658. if (err) {
  2659. return err;
  2660. }
  2661. }
  2662. // iterate through ids in directory
  2663. int err = lfs_dir_fetch(lfs, &dir, dir.tail);
  2664. if (err) {
  2665. return err;
  2666. }
  2667. for (uint16_t id = 0; id < dir.count; id++) {
  2668. struct lfs_ctz ctz;
  2669. int32_t tag = lfs_dir_get(lfs, &dir, 0x7c3ff000,
  2670. LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
  2671. if (tag < 0) {
  2672. if (tag == LFS_ERR_NOENT) {
  2673. continue;
  2674. }
  2675. return tag;
  2676. }
  2677. lfs_ctz_fromle32(&ctz);
  2678. if (lfs_tag_type(tag) == LFS_TYPE_CTZSTRUCT) {
  2679. err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
  2680. ctz.head, ctz.size, cb, data);
  2681. if (err) {
  2682. return err;
  2683. }
  2684. }
  2685. }
  2686. }
  2687. // iterate over any open files
  2688. for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
  2689. if (f->type != LFS_TYPE_REG) {
  2690. continue;
  2691. }
  2692. if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
  2693. int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
  2694. f->ctz.head, f->ctz.size, cb, data);
  2695. if (err) {
  2696. return err;
  2697. }
  2698. }
  2699. if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
  2700. int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
  2701. f->block, f->pos, cb, data);
  2702. if (err) {
  2703. return err;
  2704. }
  2705. }
  2706. }
  2707. return 0;
  2708. }
  2709. static int lfs_fs_pred(lfs_t *lfs,
  2710. const lfs_block_t pair[2], lfs_mdir_t *pdir) {
  2711. if (lfs_pair_isnull(lfs->root)) {
  2712. return LFS_ERR_NOENT;
  2713. }
  2714. // iterate over all directory directory entries
  2715. pdir->tail[0] = 0;
  2716. pdir->tail[1] = 1;
  2717. while (!lfs_pair_isnull(pdir->tail)) {
  2718. if (lfs_pair_cmp(pdir->tail, pair) == 0) {
  2719. return 0;
  2720. }
  2721. int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
  2722. if (err) {
  2723. return err;
  2724. }
  2725. }
  2726. return LFS_ERR_NOENT;
  2727. }
  2728. static int32_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
  2729. lfs_mdir_t *parent) {
  2730. if (lfs_pair_isnull(lfs->root)) {
  2731. return LFS_ERR_NOENT;
  2732. }
  2733. // search for both orderings so we can reuse the find function
  2734. for (int i = 0; i < 2; i++) {
  2735. int32_t tag = lfs_dir_find(lfs, parent,
  2736. (const lfs_block_t[2]){0, 1}, true, 0x7fc00fff,
  2737. LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8), pair);
  2738. if (tag != LFS_ERR_NOENT) {
  2739. return tag;
  2740. }
  2741. }
  2742. return LFS_ERR_NOENT;
  2743. }
  2744. static int lfs_fs_relocate(lfs_t *lfs,
  2745. const lfs_block_t oldpair[2], lfs_block_t newpair[2]) {
  2746. // update internal root
  2747. if (lfs_pair_cmp(oldpair, lfs->root) == 0) {
  2748. LFS_DEBUG("Relocating root %"PRIu32" %"PRIu32,
  2749. newpair[0], newpair[1]);
  2750. lfs->root[0] = newpair[0];
  2751. lfs->root[1] = newpair[1];
  2752. }
  2753. // update internally tracked dirs
  2754. for (lfs_mlist_t *d = lfs->mlist; d; d = d->next) {
  2755. if (lfs_pair_cmp(oldpair, d->m.pair) == 0) {
  2756. d->m.pair[0] = newpair[0];
  2757. d->m.pair[1] = newpair[1];
  2758. }
  2759. }
  2760. // find parent
  2761. lfs_mdir_t parent;
  2762. int32_t tag = lfs_fs_parent(lfs, oldpair, &parent);
  2763. if (tag < 0 && tag != LFS_ERR_NOENT) {
  2764. return tag;
  2765. }
  2766. if (tag != LFS_ERR_NOENT) {
  2767. // update disk, this creates a desync
  2768. lfs_global_orphans(lfs, +1);
  2769. lfs_pair_tole32(newpair);
  2770. int err = lfs_dir_commit(lfs, &parent,
  2771. &(lfs_mattr_t){.tag=tag, .buffer=newpair});
  2772. lfs_pair_fromle32(newpair);
  2773. if (err) {
  2774. return err;
  2775. }
  2776. // next step, clean up orphans
  2777. lfs_global_orphans(lfs, -1);
  2778. }
  2779. // find pred
  2780. int err = lfs_fs_pred(lfs, oldpair, &parent);
  2781. if (err && err != LFS_ERR_NOENT) {
  2782. return err;
  2783. }
  2784. // if we can't find dir, it must be new
  2785. if (err != LFS_ERR_NOENT) {
  2786. // replace bad pair, either we clean up desync, or no desync occured
  2787. parent.tail[0] = newpair[0];
  2788. parent.tail[1] = newpair[1];
  2789. err = lfs_dir_commit(lfs, &parent,
  2790. LFS_MKATTR(LFS_TYPE_TAIL + parent.split, 0x3ff,
  2791. parent.tail, sizeof(parent.tail),
  2792. NULL));
  2793. if (err) {
  2794. return err;
  2795. }
  2796. }
  2797. return 0;
  2798. }
  2799. static int lfs_fs_deorphan(lfs_t *lfs) {
  2800. // Fix any orphans
  2801. lfs_mdir_t pdir = {.split = true};
  2802. lfs_mdir_t dir = {.tail = {0, 1}};
  2803. // iterate over all directory directory entries
  2804. while (!lfs_pair_isnull(dir.tail)) {
  2805. int err = lfs_dir_fetch(lfs, &dir, dir.tail);
  2806. if (err) {
  2807. return err;
  2808. }
  2809. // check head blocks for orphans
  2810. if (!pdir.split) {
  2811. // check if we have a parent
  2812. lfs_mdir_t parent;
  2813. int32_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
  2814. if (tag < 0 && tag != LFS_ERR_NOENT) {
  2815. return tag;
  2816. }
  2817. if (tag == LFS_ERR_NOENT) {
  2818. // we are an orphan
  2819. LFS_DEBUG("Fixing orphan %"PRIu32" %"PRIu32,
  2820. pdir.tail[0], pdir.tail[1]);
  2821. pdir.tail[0] = dir.tail[0];
  2822. pdir.tail[1] = dir.tail[1];
  2823. err = lfs_dir_commit(lfs, &pdir,
  2824. LFS_MKATTR(LFS_TYPE_SOFTTAIL, 0x3ff,
  2825. pdir.tail, sizeof(pdir.tail),
  2826. NULL));
  2827. if (err) {
  2828. return err;
  2829. }
  2830. break;
  2831. }
  2832. lfs_block_t pair[2];
  2833. int32_t res = lfs_dir_get(lfs, &parent, 0x7ffff000, tag, pair);
  2834. if (res < 0) {
  2835. return res;
  2836. }
  2837. lfs_pair_fromle32(pair);
  2838. if (!lfs_pair_sync(pair, pdir.tail)) {
  2839. // we have desynced
  2840. LFS_DEBUG("Fixing half-orphan %"PRIu32" %"PRIu32,
  2841. pair[0], pair[1]);
  2842. pdir.tail[0] = pair[0];
  2843. pdir.tail[1] = pair[1];
  2844. err = lfs_dir_commit(lfs, &pdir,
  2845. LFS_MKATTR(LFS_TYPE_SOFTTAIL, 0x3ff,
  2846. pdir.tail, sizeof(pdir.tail),
  2847. NULL));
  2848. if (err) {
  2849. return err;
  2850. }
  2851. break;
  2852. }
  2853. }
  2854. memcpy(&pdir, &dir, sizeof(pdir));
  2855. }
  2856. // mark orphans as fixed
  2857. lfs_global_orphans(lfs, -lfs->globals.g.orphans);
  2858. return 0;
  2859. }
  2860. static int lfs_fs_demove(lfs_t *lfs) {
  2861. // Fix bad moves
  2862. LFS_DEBUG("Fixing move %"PRIu32" %"PRIu32" %"PRIu32,
  2863. lfs->globals.g.movepair[0],
  2864. lfs->globals.g.movepair[1],
  2865. lfs->globals.g.moveid);
  2866. // fetch and delete the moved entry
  2867. lfs_mdir_t movedir;
  2868. int err = lfs_dir_fetch(lfs, &movedir, lfs->globals.g.movepair);
  2869. if (err) {
  2870. return err;
  2871. }
  2872. // rely on cancel logic inside commit
  2873. err = lfs_dir_commit(lfs, &movedir, NULL);
  2874. if (err) {
  2875. return err;
  2876. }
  2877. return 0;
  2878. }
  2879. static int lfs_fs_forceconsistency(lfs_t *lfs) {
  2880. if (lfs->globals.g.orphans) {
  2881. int err = lfs_fs_deorphan(lfs);
  2882. if (err) {
  2883. return err;
  2884. }
  2885. }
  2886. if (lfs->globals.g.moveid != 0x3ff) {
  2887. int err = lfs_fs_demove(lfs);
  2888. if (err) {
  2889. return err;
  2890. }
  2891. }
  2892. return 0;
  2893. }
  2894. static int lfs_fs_size_count(void *p, lfs_block_t block) {
  2895. (void)block;
  2896. lfs_size_t *size = p;
  2897. *size += 1;
  2898. return 0;
  2899. }
  2900. lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
  2901. lfs_size_t size = 0;
  2902. int err = lfs_fs_traverse(lfs, lfs_fs_size_count, &size);
  2903. if (err) {
  2904. return err;
  2905. }
  2906. return size;
  2907. }