readtree.py 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. #!/usr/bin/env python3
  2. import struct
  3. import sys
  4. import json
  5. import io
  6. import itertools as it
  7. from readmdir import Tag, MetadataPair
  8. def popc(x):
  9. return bin(x).count('1')
  10. def ctz(x):
  11. return len(bin(x)) - len(bin(x).rstrip('0'))
  12. def dumpentries(args, mdir, f):
  13. for k, id_ in enumerate(mdir.ids):
  14. name = mdir[Tag('name', id_, 0)]
  15. struct_ = mdir[Tag('struct', id_, 0)]
  16. f.write("id %d %s %s" % (
  17. id_, name.typerepr(),
  18. json.dumps(name.data.decode('utf8'))))
  19. if struct_.is_('dirstruct'):
  20. f.write(" dir {%#x, %#x}" % struct.unpack(
  21. '<II', struct_.data[:8].ljust(8, b'\xff')))
  22. if struct_.is_('ctzstruct'):
  23. f.write(" ctz {%#x} size %d" % struct.unpack(
  24. '<II', struct_.data[:8].ljust(8, b'\xff')))
  25. if struct_.is_('inlinestruct'):
  26. f.write(" inline size %d" % struct_.size)
  27. f.write("\n")
  28. if args.data and struct_.is_('inlinestruct'):
  29. for i in range(0, len(struct_.data), 16):
  30. f.write(" %08x: %-47s %-16s\n" % (
  31. i, ' '.join('%02x' % c for c in struct_.data[i:i+16]),
  32. ''.join(c if c >= ' ' and c <= '~' else '.'
  33. for c in map(chr, struct_.data[i:i+16]))))
  34. elif args.data and struct_.is_('ctzstruct'):
  35. block, size = struct.unpack(
  36. '<II', struct_.data[:8].ljust(8, b'\xff'))
  37. data = []
  38. i = 0 if size == 0 else (size-1) // (args.block_size - 8)
  39. if i != 0:
  40. i = ((size-1) - 4*popc(i-1)+2) // (args.block_size - 8)
  41. with open(args.disk, 'rb') as f2:
  42. while i >= 0:
  43. f2.seek(block * args.block_size)
  44. dat = f2.read(args.block_size)
  45. data.append(dat[4*(ctz(i)+1) if i != 0 else 0:])
  46. block, = struct.unpack('<I', dat[:4].ljust(4, b'\xff'))
  47. i -= 1
  48. data = bytes(it.islice(
  49. it.chain.from_iterable(reversed(data)), size))
  50. for i in range(0, min(len(data), 256)
  51. if not args.no_truncate else len(data), 16):
  52. f.write(" %08x: %-47s %-16s\n" % (
  53. i, ' '.join('%02x' % c for c in data[i:i+16]),
  54. ''.join(c if c >= ' ' and c <= '~' else '.'
  55. for c in map(chr, data[i:i+16]))))
  56. for tag in mdir.tags:
  57. if tag.id==id_ and tag.is_('userattr'):
  58. f.write("id %d %s size %d\n" % (
  59. id_, tag.typerepr(), tag.size))
  60. if args.data:
  61. for i in range(0, len(tag.data), 16):
  62. f.write(" %-47s %-16s\n" % (
  63. ' '.join('%02x' % c for c in tag.data[i:i+16]),
  64. ''.join(c if c >= ' ' and c <= '~' else '.'
  65. for c in map(chr, tag.data[i:i+16]))))
  66. def main(args):
  67. with open(args.disk, 'rb') as f:
  68. dirs = []
  69. superblock = None
  70. gstate = b''
  71. mdirs = []
  72. cycle = False
  73. tail = (args.block1, args.block2)
  74. hard = False
  75. while True:
  76. for m in it.chain((m for d in dirs for m in d), mdirs):
  77. if set(m.blocks) == set(tail):
  78. # cycle detected
  79. cycle = m.blocks
  80. if cycle:
  81. break
  82. # load mdir
  83. data = []
  84. blocks = {}
  85. for block in tail:
  86. f.seek(block * args.block_size)
  87. data.append(f.read(args.block_size)
  88. .ljust(args.block_size, b'\xff'))
  89. blocks[id(data[-1])] = block
  90. mdir = MetadataPair(data)
  91. mdir.blocks = tuple(blocks[id(p.data)] for p in mdir.pair)
  92. # fetch some key metadata as a we scan
  93. try:
  94. mdir.tail = mdir[Tag('tail', 0, 0)]
  95. if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
  96. mdir.tail = None
  97. except KeyError:
  98. mdir.tail = None
  99. # have superblock?
  100. try:
  101. nsuperblock = mdir[
  102. Tag(0x7ff, 0x3ff, 0), Tag('superblock', 0, 0)]
  103. superblock = nsuperblock, mdir[Tag('inlinestruct', 0, 0)]
  104. except KeyError:
  105. pass
  106. # have gstate?
  107. try:
  108. ngstate = mdir[Tag('movestate', 0, 0)]
  109. gstate = bytes((a or 0) ^ (b or 0)
  110. for a,b in it.zip_longest(gstate, ngstate.data))
  111. except KeyError:
  112. pass
  113. # add to directories
  114. mdirs.append(mdir)
  115. if mdir.tail is None or not mdir.tail.is_('hardtail'):
  116. dirs.append(mdirs)
  117. mdirs = []
  118. if mdir.tail is None:
  119. break
  120. tail = struct.unpack('<II', mdir.tail.data)
  121. hard = mdir.tail.is_('hardtail')
  122. # find paths
  123. dirtable = {}
  124. for dir in dirs:
  125. dirtable[frozenset(dir[0].blocks)] = dir
  126. pending = [("/", dirs[0])]
  127. while pending:
  128. path, dir = pending.pop(0)
  129. for mdir in dir:
  130. for tag in mdir.tags:
  131. if tag.is_('dir'):
  132. try:
  133. npath = tag.data.decode('utf8')
  134. dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
  135. nblocks = struct.unpack('<II', dirstruct.data)
  136. nmdir = dirtable[frozenset(nblocks)]
  137. pending.append(((path + '/' + npath), nmdir))
  138. except KeyError:
  139. pass
  140. dir[0].path = path.replace('//', '/')
  141. # dump tree
  142. if not args.superblock and not args.gstate and not args.mdirs:
  143. args.superblock = True
  144. args.gstate = True
  145. args.mdirs = True
  146. if args.superblock and superblock:
  147. print("superblock %s v%d.%d" % (
  148. json.dumps(superblock[0].data.decode('utf8')),
  149. struct.unpack('<H', superblock[1].data[2:2+2])[0],
  150. struct.unpack('<H', superblock[1].data[0:0+2])[0]))
  151. print(
  152. " block_size %d\n"
  153. " block_count %d\n"
  154. " name_max %d\n"
  155. " file_max %d\n"
  156. " attr_max %d" % struct.unpack(
  157. '<IIIII', superblock[1].data[4:4+20].ljust(20, b'\xff')))
  158. if args.gstate and gstate:
  159. print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
  160. tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
  161. blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
  162. if tag.size:
  163. print(" orphans %d" % tag.size)
  164. if tag.type:
  165. print(" move dir {%#x, %#x} id %d" % (
  166. blocks[0], blocks[1], tag.id))
  167. if args.mdirs:
  168. for i, dir in enumerate(dirs):
  169. print("dir %s" % (json.dumps(dir[0].path)
  170. if hasattr(dir[0], 'path') else '(orphan)'))
  171. for j, mdir in enumerate(dir):
  172. print("mdir {%#x, %#x} rev %d%s" % (
  173. mdir.blocks[0], mdir.blocks[1], mdir.rev,
  174. ' (corrupted)' if not mdir else ''))
  175. f = io.StringIO()
  176. if args.tags:
  177. mdir.dump_tags(f, truncate=not args.no_truncate)
  178. elif args.log:
  179. mdir.dump_log(f, truncate=not args.no_truncate)
  180. elif args.all:
  181. mdir.dump_all(f, truncate=not args.no_truncate)
  182. else:
  183. dumpentries(args, mdir, f)
  184. lines = list(filter(None, f.getvalue().split('\n')))
  185. for k, line in enumerate(lines):
  186. print("%s %s" % (
  187. ' ' if j == len(dir)-1 else
  188. 'v' if k == len(lines)-1 else
  189. '|',
  190. line))
  191. if cycle:
  192. print("*** cycle detected! -> {%#x, %#x} ***" % (cycle[0], cycle[1]))
  193. if cycle:
  194. return 2
  195. elif not all(mdir for dir in dirs for mdir in dir):
  196. return 1
  197. else:
  198. return 0;
  199. if __name__ == "__main__":
  200. import argparse
  201. import sys
  202. parser = argparse.ArgumentParser(
  203. description="Dump semantic info about the metadata tree in littlefs")
  204. parser.add_argument('disk',
  205. help="File representing the block device.")
  206. parser.add_argument('block_size', type=lambda x: int(x, 0),
  207. help="Size of a block in bytes.")
  208. parser.add_argument('block1', nargs='?', default=0,
  209. type=lambda x: int(x, 0),
  210. help="Optional first block address for finding the root.")
  211. parser.add_argument('block2', nargs='?', default=1,
  212. type=lambda x: int(x, 0),
  213. help="Optional second block address for finding the root.")
  214. parser.add_argument('-s', '--superblock', action='store_true',
  215. help="Show contents of the superblock.")
  216. parser.add_argument('-g', '--gstate', action='store_true',
  217. help="Show contents of global-state.")
  218. parser.add_argument('-m', '--mdirs', action='store_true',
  219. help="Show contents of metadata-pairs/directories.")
  220. parser.add_argument('-t', '--tags', action='store_true',
  221. help="Show metadata tags instead of reconstructing entries.")
  222. parser.add_argument('-l', '--log', action='store_true',
  223. help="Show tags in log.")
  224. parser.add_argument('-a', '--all', action='store_true',
  225. help="Show all tags in log, included tags in corrupted commits.")
  226. parser.add_argument('-d', '--data', action='store_true',
  227. help="Also show the raw contents of files/attrs/tags.")
  228. parser.add_argument('-T', '--no-truncate', action='store_true',
  229. help="Don't truncate large amounts of data.")
  230. sys.exit(main(parser.parse_args()))