readtree.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. #!/usr/bin/env python3
  2. import struct
  3. import sys
  4. import json
  5. import io
  6. import itertools as it
  7. from readmdir import Tag, MetadataPair
  8. def popc(x):
  9. return bin(x).count('1')
  10. def ctz(x):
  11. return len(bin(x)) - len(bin(x).rstrip('0'))
  12. def dumptags(args, mdir, f):
  13. if args.all:
  14. tags = mdir.all_
  15. elif args.log:
  16. tags = mdir.log
  17. else:
  18. tags = mdir.tags
  19. for k, tag in enumerate(tags):
  20. f.write("tag %08x %s" % (tag, tag.typerepr()))
  21. if tag.id != 0x3ff:
  22. f.write(" id %d" % tag.id)
  23. if tag.size != 0x3ff:
  24. f.write(" size %d" % tag.size)
  25. if tag.is_('name'):
  26. f.write(" name %s" %
  27. json.dumps(tag.data.decode('utf8')))
  28. if tag.is_('dirstruct'):
  29. f.write(" dir {%#x, %#x}" % struct.unpack(
  30. '<II', tag.data[:8].ljust(8, b'\xff')))
  31. if tag.is_('ctzstruct'):
  32. f.write(" ctz {%#x} size %d" % struct.unpack(
  33. '<II', tag.data[:8].ljust(8, b'\xff')))
  34. if tag.is_('inlinestruct'):
  35. f.write(" inline size %d" % tag.size)
  36. if tag.is_('gstate'):
  37. f.write(" 0x%s" % ''.join('%02x' % c for c in tag.data))
  38. if tag.is_('tail'):
  39. f.write(" tail {%#x, %#x}" % struct.unpack(
  40. '<II', tag.data[:8].ljust(8, b'\xff')))
  41. f.write("\n")
  42. if args.data:
  43. for i in range(0, len(tag.data), 16):
  44. f.write(" %-47s %-16s\n" % (
  45. ' '.join('%02x' % c for c in tag.data[i:i+16]),
  46. ''.join(c if c >= ' ' and c <= '~' else '.'
  47. for c in map(chr, tag.data[i:i+16]))))
  48. def dumpentries(args, mdir, f):
  49. for k, id_ in enumerate(mdir.ids):
  50. name = mdir[Tag('name', id_, 0)]
  51. struct_ = mdir[Tag('struct', id_, 0)]
  52. f.write("id %d %s %s" % (
  53. id_, name.typerepr(),
  54. json.dumps(name.data.decode('utf8'))))
  55. if struct_.is_('dirstruct'):
  56. f.write(" dir {%#x, %#x}" % struct.unpack(
  57. '<II', struct_.data[:8].ljust(8, b'\xff')))
  58. if struct_.is_('ctzstruct'):
  59. f.write(" ctz {%#x} size %d" % struct.unpack(
  60. '<II', struct_.data[:8].ljust(8, b'\xff')))
  61. if struct_.is_('inlinestruct'):
  62. f.write(" inline size %d" % struct_.size)
  63. f.write("\n")
  64. if args.data and struct_.is_('inlinestruct'):
  65. for i in range(0, len(struct_.data), 16):
  66. f.write(" %-47s %-16s\n" % (
  67. ' '.join('%02x' % c for c in struct_.data[i:i+16]),
  68. ''.join(c if c >= ' ' and c <= '~' else '.'
  69. for c in map(chr, struct_.data[i:i+16]))))
  70. elif args.data and struct_.is_('ctzstruct'):
  71. block, size = struct.unpack(
  72. '<II', struct_.data[:8].ljust(8, b'\xff'))
  73. data = []
  74. i = 0 if size == 0 else (size-1) // (args.block_size - 8)
  75. if i != 0:
  76. i = ((size-1) - 4*popc(i-1)+2) // (args.block_size - 8)
  77. with open(args.disk, 'rb') as f2:
  78. while i >= 0:
  79. f2.seek(block * args.block_size)
  80. dat = f2.read(args.block_size)
  81. data.append(dat[4*(ctz(i)+1) if i != 0 else 0:])
  82. block, = struct.unpack('<I', dat[:4].ljust(4, b'\xff'))
  83. i -= 1
  84. data = bytes(it.islice(
  85. it.chain.from_iterable(reversed(data)), size))
  86. for i in range(0, min(len(data), 256)
  87. if not args.no_truncate else len(data), 16):
  88. f.write(" %-47s %-16s\n" % (
  89. ' '.join('%02x' % c for c in data[i:i+16]),
  90. ''.join(c if c >= ' ' and c <= '~' else '.'
  91. for c in map(chr, data[i:i+16]))))
  92. for tag in mdir.tags:
  93. if tag.id==id_ and tag.is_('userattr'):
  94. f.write("id %d %s size %d\n" % (
  95. id_, tag.typerepr(), tag.size))
  96. if args.data:
  97. for i in range(0, len(tag.data), 16):
  98. f.write(" %-47s %-16s\n" % (
  99. ' '.join('%02x' % c for c in tag.data[i:i+16]),
  100. ''.join(c if c >= ' ' and c <= '~' else '.'
  101. for c in map(chr, tag.data[i:i+16]))))
  102. def main(args):
  103. with open(args.disk, 'rb') as f:
  104. dirs = []
  105. superblock = None
  106. gstate = b''
  107. mdirs = []
  108. cycle = False
  109. tail = (args.block1, args.block2)
  110. hard = False
  111. while True:
  112. for m in it.chain((m for d in dirs for m in d), mdirs):
  113. if set(m.blocks) == set(tail):
  114. # cycle detected
  115. cycle = m.blocks
  116. if cycle:
  117. break
  118. # load mdir
  119. data = []
  120. blocks = {}
  121. for block in tail:
  122. f.seek(block * args.block_size)
  123. data.append(f.read(args.block_size)
  124. .ljust(args.block_size, b'\xff'))
  125. blocks[id(data[-1])] = block
  126. mdir = MetadataPair(data)
  127. mdir.blocks = tuple(blocks[id(p.data)] for p in mdir.pair)
  128. # fetch some key metadata as a we scan
  129. try:
  130. mdir.tail = mdir[Tag('tail', 0, 0)]
  131. if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
  132. mdir.tail = None
  133. except KeyError:
  134. mdir.tail = None
  135. # have superblock?
  136. try:
  137. nsuperblock = mdir[
  138. Tag(0x7ff, 0x3ff, 0), Tag('superblock', 0, 0)]
  139. superblock = nsuperblock, mdir[Tag('inlinestruct', 0, 0)]
  140. except KeyError:
  141. pass
  142. # have gstate?
  143. try:
  144. ngstate = mdir[Tag('movestate', 0, 0)]
  145. gstate = bytes((a or 0) ^ (b or 0)
  146. for a,b in it.zip_longest(gstate, ngstate.data))
  147. except KeyError:
  148. pass
  149. # add to directories
  150. mdirs.append(mdir)
  151. if mdir.tail is None or not mdir.tail.is_('hardtail'):
  152. dirs.append(mdirs)
  153. mdirs = []
  154. if mdir.tail is None:
  155. break
  156. tail = struct.unpack('<II', mdir.tail.data)
  157. hard = mdir.tail.is_('hardtail')
  158. # find paths
  159. dirtable = {}
  160. for dir in dirs:
  161. dirtable[frozenset(dir[0].blocks)] = dir
  162. pending = [("/", dirs[0])]
  163. while pending:
  164. path, dir = pending.pop(0)
  165. for mdir in dir:
  166. for tag in mdir.tags:
  167. if tag.is_('dir'):
  168. try:
  169. npath = tag.data.decode('utf8')
  170. dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
  171. nblocks = struct.unpack('<II', dirstruct.data)
  172. nmdir = dirtable[frozenset(nblocks)]
  173. pending.append(((path + '/' + npath), nmdir))
  174. except KeyError:
  175. pass
  176. dir[0].path = path.replace('//', '/')
  177. # dump tree
  178. if not args.superblock and not args.gstate and not args.mdirs:
  179. args.superblock = True
  180. args.gstate = True
  181. args.mdirs = True
  182. if args.superblock and superblock:
  183. print("superblock %s v%d.%d" % (
  184. json.dumps(superblock[0].data.decode('utf8')),
  185. struct.unpack('<H', superblock[1].data[2:2+2])[0],
  186. struct.unpack('<H', superblock[1].data[0:0+2])[0]))
  187. print(
  188. " block_size %d\n"
  189. " block_count %d\n"
  190. " name_max %d\n"
  191. " file_max %d\n"
  192. " attr_max %d" % struct.unpack(
  193. '<IIIII', superblock[1].data[4:4+20].ljust(20, b'\xff')))
  194. if args.gstate and gstate:
  195. print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
  196. tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
  197. blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
  198. if tag.size:
  199. print(" orphans %d" % tag.size)
  200. if tag.type:
  201. print(" move dir {%#x, %#x} id %d" % (
  202. blocks[0], blocks[1], tag.id))
  203. if args.mdirs:
  204. for i, dir in enumerate(dirs):
  205. print("dir %s" % (json.dumps(dir[0].path)
  206. if hasattr(dir[0], 'path') else '(orphan)'))
  207. for j, mdir in enumerate(dir):
  208. print("mdir {%#x, %#x} rev %d%s" % (
  209. mdir.blocks[0], mdir.blocks[1], mdir.rev,
  210. ' (corrupted)' if not mdir else ''))
  211. f = io.StringIO()
  212. if args.tags or args.all or args.log:
  213. dumptags(args, mdir, f)
  214. else:
  215. dumpentries(args, mdir, f)
  216. lines = list(filter(None, f.getvalue().split('\n')))
  217. for k, line in enumerate(lines):
  218. print("%s %s" % (
  219. ' ' if j == len(dir)-1 else
  220. 'v' if k == len(lines)-1 else
  221. '|',
  222. line))
  223. if cycle:
  224. print("*** cycle detected! -> {%#x, %#x} ***" % (cycle[0], cycle[1]))
  225. if cycle:
  226. return 2
  227. elif not all(mdir for dir in dirs for mdir in dir):
  228. return 1
  229. else:
  230. return 0;
  231. if __name__ == "__main__":
  232. import argparse
  233. import sys
  234. parser = argparse.ArgumentParser(
  235. description="Dump semantic info about the metadata tree in littlefs")
  236. parser.add_argument('disk',
  237. help="File representing the block device.")
  238. parser.add_argument('block_size', type=lambda x: int(x, 0),
  239. help="Size of a block in bytes.")
  240. parser.add_argument('block1', nargs='?', default=0,
  241. type=lambda x: int(x, 0),
  242. help="Optional first block address for finding the root.")
  243. parser.add_argument('block2', nargs='?', default=1,
  244. type=lambda x: int(x, 0),
  245. help="Optional second block address for finding the root.")
  246. parser.add_argument('-s', '--superblock', action='store_true',
  247. help="Show contents of the superblock.")
  248. parser.add_argument('-g', '--gstate', action='store_true',
  249. help="Show contents of global-state.")
  250. parser.add_argument('-m', '--mdirs', action='store_true',
  251. help="Show contents of metadata-pairs/directories.")
  252. parser.add_argument('-t', '--tags', action='store_true',
  253. help="Show metadata tags instead of reconstructing entries.")
  254. parser.add_argument('-a', '--all', action='store_true',
  255. help="Show all tags in log, included tags in corrupted commits.")
  256. parser.add_argument('-l', '--log', action='store_true',
  257. help="Show tags in log.")
  258. parser.add_argument('-d', '--data', action='store_true',
  259. help="Also show the raw contents of files/attrs/tags.")
  260. parser.add_argument('-T', '--no-truncate', action='store_true',
  261. help="Don't truncate large amounts of data in files.")
  262. sys.exit(main(parser.parse_args()))