readtree.py 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. #!/usr/bin/env python3
  2. import struct
  3. import sys
  4. import json
  5. import io
  6. import itertools as it
  7. from readmdir import Tag, MetadataPair
  8. def popc(x):
  9. return bin(x).count('1')
  10. def ctz(x):
  11. return len(bin(x)) - len(bin(x).rstrip('0'))
  12. def dumpentries(args, mdir, f):
  13. for k, id_ in enumerate(mdir.ids):
  14. name = mdir[Tag('name', id_, 0)]
  15. struct_ = mdir[Tag('struct', id_, 0)]
  16. desc = "id %d %s %s" % (
  17. id_, name.typerepr(),
  18. json.dumps(name.data.decode('utf8')))
  19. if struct_.is_('dirstruct'):
  20. desc += " dir {%#x, %#x}" % struct.unpack(
  21. '<II', struct_.data[:8].ljust(8, b'\xff'))
  22. if struct_.is_('ctzstruct'):
  23. desc += " ctz {%#x} size %d" % struct.unpack(
  24. '<II', struct_.data[:8].ljust(8, b'\xff'))
  25. if struct_.is_('inlinestruct'):
  26. desc += " inline size %d" % struct_.size
  27. data = None
  28. if struct_.is_('inlinestruct'):
  29. data = struct_.data
  30. elif struct_.is_('ctzstruct'):
  31. block, size = struct.unpack(
  32. '<II', struct_.data[:8].ljust(8, b'\xff'))
  33. data = []
  34. i = 0 if size == 0 else (size-1) // (args.block_size - 8)
  35. if i != 0:
  36. i = ((size-1) - 4*popc(i-1)+2) // (args.block_size - 8)
  37. with open(args.disk, 'rb') as f2:
  38. while i >= 0:
  39. f2.seek(block * args.block_size)
  40. dat = f2.read(args.block_size)
  41. data.append(dat[4*(ctz(i)+1) if i != 0 else 0:])
  42. block, = struct.unpack('<I', dat[:4].ljust(4, b'\xff'))
  43. i -= 1
  44. data = bytes(it.islice(
  45. it.chain.from_iterable(reversed(data)), size))
  46. f.write("%-45s%s\n" % (desc,
  47. "%-23s %-8s" % (
  48. ' '.join('%02x' % c for c in data[:8]),
  49. ''.join(c if c >= ' ' and c <= '~' else '.'
  50. for c in map(chr, data[:8])))
  51. if not args.no_truncate and len(desc) < 45
  52. and data is not None else ""))
  53. if name.is_('superblock') and struct_.is_('inlinestruct'):
  54. f.write(
  55. " block_size %d\n"
  56. " block_count %d\n"
  57. " name_max %d\n"
  58. " file_max %d\n"
  59. " attr_max %d\n" % struct.unpack(
  60. '<IIIII', struct_.data[4:4+20].ljust(20, b'\xff')))
  61. for tag in mdir.tags:
  62. if tag.id==id_ and tag.is_('userattr'):
  63. desc = "%s size %d" % (tag.typerepr(), tag.size)
  64. f.write(" %-43s%s\n" % (desc,
  65. "%-23s %-8s" % (
  66. ' '.join('%02x' % c for c in tag.data[:8]),
  67. ''.join(c if c >= ' ' and c <= '~' else '.'
  68. for c in map(chr, tag.data[:8])))
  69. if not args.no_truncate and len(desc) < 43 else ""))
  70. if args.no_truncate:
  71. for i in range(0, len(tag.data), 16):
  72. f.write(" %08x: %-47s %-16s\n" % (
  73. i, ' '.join('%02x' % c for c in tag.data[i:i+16]),
  74. ''.join(c if c >= ' ' and c <= '~' else '.'
  75. for c in map(chr, tag.data[i:i+16]))))
  76. if args.no_truncate and data is not None:
  77. for i in range(0, len(data), 16):
  78. f.write(" %08x: %-47s %-16s\n" % (
  79. i, ' '.join('%02x' % c for c in data[i:i+16]),
  80. ''.join(c if c >= ' ' and c <= '~' else '.'
  81. for c in map(chr, data[i:i+16]))))
  82. def main(args):
  83. with open(args.disk, 'rb') as f:
  84. dirs = []
  85. superblock = None
  86. gstate = b''
  87. mdirs = []
  88. cycle = False
  89. tail = (args.block1, args.block2)
  90. hard = False
  91. while True:
  92. for m in it.chain((m for d in dirs for m in d), mdirs):
  93. if set(m.blocks) == set(tail):
  94. # cycle detected
  95. cycle = m.blocks
  96. if cycle:
  97. break
  98. # load mdir
  99. data = []
  100. blocks = {}
  101. for block in tail:
  102. f.seek(block * args.block_size)
  103. data.append(f.read(args.block_size)
  104. .ljust(args.block_size, b'\xff'))
  105. blocks[id(data[-1])] = block
  106. mdir = MetadataPair(data)
  107. mdir.blocks = tuple(blocks[id(p.data)] for p in mdir.pair)
  108. # fetch some key metadata as a we scan
  109. try:
  110. mdir.tail = mdir[Tag('tail', 0, 0)]
  111. if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
  112. mdir.tail = None
  113. except KeyError:
  114. mdir.tail = None
  115. # have superblock?
  116. try:
  117. nsuperblock = mdir[
  118. Tag(0x7ff, 0x3ff, 0), Tag('superblock', 0, 0)]
  119. superblock = nsuperblock, mdir[Tag('inlinestruct', 0, 0)]
  120. except KeyError:
  121. pass
  122. # have gstate?
  123. try:
  124. ngstate = mdir[Tag('movestate', 0, 0)]
  125. gstate = bytes((a or 0) ^ (b or 0)
  126. for a,b in it.zip_longest(gstate, ngstate.data))
  127. except KeyError:
  128. pass
  129. # add to directories
  130. mdirs.append(mdir)
  131. if mdir.tail is None or not mdir.tail.is_('hardtail'):
  132. dirs.append(mdirs)
  133. mdirs = []
  134. if mdir.tail is None:
  135. break
  136. tail = struct.unpack('<II', mdir.tail.data)
  137. hard = mdir.tail.is_('hardtail')
  138. # find paths
  139. dirtable = {}
  140. for dir in dirs:
  141. dirtable[frozenset(dir[0].blocks)] = dir
  142. pending = [("/", dirs[0])]
  143. while pending:
  144. path, dir = pending.pop(0)
  145. for mdir in dir:
  146. for tag in mdir.tags:
  147. if tag.is_('dir'):
  148. try:
  149. npath = tag.data.decode('utf8')
  150. dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
  151. nblocks = struct.unpack('<II', dirstruct.data)
  152. nmdir = dirtable[frozenset(nblocks)]
  153. pending.append(((path + '/' + npath), nmdir))
  154. except KeyError:
  155. pass
  156. dir[0].path = path.replace('//', '/')
  157. # dump tree
  158. version = ('?', '?')
  159. if superblock:
  160. version = tuple(reversed(
  161. struct.unpack('<HH', superblock[1].data[0:4].ljust(4, b'\xff'))))
  162. print("%-47s%s" % ("littlefs v%s.%s" % version,
  163. "data (truncated, if it fits)"
  164. if not any([args.no_truncate, args.tags, args.log, args.all]) else ""))
  165. if gstate:
  166. print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
  167. tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
  168. blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
  169. if tag.size or not tag.isvalid:
  170. print(" orphans >=%d" % max(tag.size, 1))
  171. if tag.type:
  172. print(" move dir {%#x, %#x} id %d" % (
  173. blocks[0], blocks[1], tag.id))
  174. for i, dir in enumerate(dirs):
  175. print("dir %s" % (json.dumps(dir[0].path)
  176. if hasattr(dir[0], 'path') else '(orphan)'))
  177. for j, mdir in enumerate(dir):
  178. print("mdir {%#x, %#x} rev %d%s" % (
  179. mdir.blocks[0], mdir.blocks[1], mdir.rev,
  180. ' (corrupted)' if not mdir else ''))
  181. f = io.StringIO()
  182. if args.tags:
  183. mdir.dump_tags(f, truncate=not args.no_truncate)
  184. elif args.log:
  185. mdir.dump_log(f, truncate=not args.no_truncate)
  186. elif args.all:
  187. mdir.dump_all(f, truncate=not args.no_truncate)
  188. else:
  189. dumpentries(args, mdir, f)
  190. lines = list(filter(None, f.getvalue().split('\n')))
  191. for k, line in enumerate(lines):
  192. print("%s %s" % (
  193. ' ' if i == len(dirs)-1 and j == len(dir)-1 else
  194. 'v' if k == len(lines)-1 else
  195. '.' if j == len(dir)-1 else
  196. '|',
  197. line))
  198. if cycle:
  199. print("*** cycle detected! -> {%#x, %#x} ***" % (cycle[0], cycle[1]))
  200. if cycle:
  201. return 2
  202. elif not all(mdir for dir in dirs for mdir in dir):
  203. return 1
  204. else:
  205. return 0;
  206. if __name__ == "__main__":
  207. import argparse
  208. import sys
  209. parser = argparse.ArgumentParser(
  210. description="Dump semantic info about the metadata tree in littlefs")
  211. parser.add_argument('disk',
  212. help="File representing the block device.")
  213. parser.add_argument('block_size', type=lambda x: int(x, 0),
  214. help="Size of a block in bytes.")
  215. parser.add_argument('block1', nargs='?', default=0,
  216. type=lambda x: int(x, 0),
  217. help="Optional first block address for finding the root.")
  218. parser.add_argument('block2', nargs='?', default=1,
  219. type=lambda x: int(x, 0),
  220. help="Optional second block address for finding the root.")
  221. parser.add_argument('-t', '--tags', action='store_true',
  222. help="Show metadata tags instead of reconstructing entries.")
  223. parser.add_argument('-l', '--log', action='store_true',
  224. help="Show tags in log.")
  225. parser.add_argument('-a', '--all', action='store_true',
  226. help="Show all tags in log, included tags in corrupted commits.")
  227. parser.add_argument('-T', '--no-truncate', action='store_true',
  228. help="Show the full contents of files/attrs/tags.")
  229. sys.exit(main(parser.parse_args()))