readtree.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. #!/usr/bin/env python3
  2. import struct
  3. import sys
  4. import json
  5. import io
  6. import itertools as it
  7. from readmdir import Tag, MetadataPair
  8. def popc(x):
  9. return bin(x).count('1')
  10. def ctz(x):
  11. return len(bin(x)) - len(bin(x).rstrip('0'))
  12. def dumptags(args, mdir, f):
  13. if args.all:
  14. tags = mdir.all_
  15. elif args.log:
  16. tags = mdir.log
  17. else:
  18. tags = mdir.tags
  19. for k, tag in enumerate(tags):
  20. f.write("tag %08x %s" % (tag, tag.typerepr()))
  21. if tag.id != 0x3ff:
  22. f.write(" id %d" % tag.id)
  23. if tag.size != 0x3ff:
  24. f.write(" size %d" % tag.size)
  25. if tag.is_('name'):
  26. f.write(" name %s" %
  27. json.dumps(tag.data.decode('utf8')))
  28. if tag.is_('dirstruct'):
  29. f.write(" dir {%#x, %#x}" % struct.unpack(
  30. '<II', tag.data[:8].ljust(8, b'\xff')))
  31. if tag.is_('ctzstruct'):
  32. f.write(" ctz {%#x} size %d" % struct.unpack(
  33. '<II', tag.data[:8].ljust(8, b'\xff')))
  34. if tag.is_('inlinestruct'):
  35. f.write(" inline size %d" % tag.size)
  36. if tag.is_('tail'):
  37. f.write(" tail {%#x, %#x}" % struct.unpack(
  38. '<II', tag.data[:8].ljust(8, b'\xff')))
  39. f.write("\n")
  40. if args.data:
  41. for i in range(0, len(tag.data), 16):
  42. f.write(" %-47s %-16s\n" % (
  43. ' '.join('%02x' % c for c in tag.data[i:i+16]),
  44. ''.join(c if c >= ' ' and c <= '~' else '.'
  45. for c in map(chr, tag.data[i:i+16]))))
  46. def dumpentries(args, mdir, f):
  47. for k, id_ in enumerate(mdir.ids):
  48. name = mdir[Tag('name', id_, 0)]
  49. struct_ = mdir[Tag('struct', id_, 0)]
  50. f.write("id %d %s %s" % (
  51. name.id, name.typerepr(),
  52. json.dumps(name.data.decode('utf8'))))
  53. if struct_.is_('dirstruct'):
  54. f.write(" dir {%#x, %#x}" % struct.unpack(
  55. '<II', struct_.data[:8].ljust(8, b'\xff')))
  56. if struct_.is_('ctzstruct'):
  57. f.write(" ctz {%#x} size %d" % struct.unpack(
  58. '<II', struct_.data[:8].ljust(8, b'\xff')))
  59. if struct_.is_('inlinestruct'):
  60. f.write(" inline size %d" % struct_.size)
  61. f.write("\n")
  62. if args.data and struct_.is_('inlinestruct'):
  63. for i in range(0, len(struct_.data), 16):
  64. f.write(" %-47s %-16s\n" % (
  65. ' '.join('%02x' % c for c in struct_.data[i:i+16]),
  66. ''.join(c if c >= ' ' and c <= '~' else '.'
  67. for c in map(chr, struct_.data[i:i+16]))))
  68. elif args.data and struct_.is_('ctzstruct'):
  69. block, size = struct.unpack(
  70. '<II', struct_.data[:8].ljust(8, b'\xff'))
  71. data = []
  72. i = 0 if size == 0 else (size-1) // (args.block_size - 8)
  73. if i != 0:
  74. i = ((size-1) - 4*popc(i-1)+2) // (args.block_size - 8)
  75. with open(args.disk, 'rb') as f2:
  76. while i >= 0:
  77. f2.seek(block * args.block_size)
  78. dat = f2.read(args.block_size)
  79. data.append(dat[4*(ctz(i)+1) if i != 0 else 0:])
  80. block, = struct.unpack('<I', dat[:4].ljust(4, b'\xff'))
  81. i -= 1
  82. data = bytes(it.islice(
  83. it.chain.from_iterable(reversed(data)), size))
  84. for i in range(0, min(len(data), 256)
  85. if not args.no_truncate else len(data), 16):
  86. f.write(" %-47s %-16s\n" % (
  87. ' '.join('%02x' % c for c in data[i:i+16]),
  88. ''.join(c if c >= ' ' and c <= '~' else '.'
  89. for c in map(chr, data[i:i+16]))))
  90. for tag in mdir.tags:
  91. if tag.id==id_ and tag.is_('userattr'):
  92. f.write("id %d %s size %d\n" % (
  93. id_, tag.typerepr(), tag.size))
  94. if args.data:
  95. for i in range(0, len(tag.data), 16):
  96. f.write(" %-47s %-16s\n" % (
  97. ' '.join('%02x' % c for c in tag.data[i:i+16]),
  98. ''.join(c if c >= ' ' and c <= '~' else '.'
  99. for c in map(chr, tag.data[i:i+16]))))
  100. def main(args):
  101. with open(args.disk, 'rb') as f:
  102. dirs = []
  103. superblock = None
  104. gstate = b''
  105. mdirs = []
  106. tail = (args.block1, args.block2)
  107. hard = False
  108. while True:
  109. # load mdir
  110. data = []
  111. blocks = {}
  112. for block in tail:
  113. f.seek(block * args.block_size)
  114. data.append(f.read(args.block_size)
  115. .ljust(args.block_size, b'\xff'))
  116. blocks[id(data[-1])] = block
  117. mdir = MetadataPair(data)
  118. mdir.blocks = tuple(blocks[id(p.data)] for p in mdir.pair)
  119. # fetch some key metadata as a we scan
  120. try:
  121. mdir.tail = mdir[Tag('tail', 0, 0)]
  122. if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
  123. mdir.tail = None
  124. except KeyError:
  125. mdir.tail = None
  126. # have superblock?
  127. try:
  128. nsuperblock = mdir[
  129. Tag(0x7ff, 0x3ff, 0), Tag('superblock', 0, 0)]
  130. superblock = nsuperblock, mdir[Tag('inlinestruct', 0, 0)]
  131. except KeyError:
  132. pass
  133. # have gstate?
  134. try:
  135. ngstate = mdir[Tag('movestate', 0, 0)]
  136. gstate = bytes((a or 0) ^ (b or 0)
  137. for a,b in it.zip_longest(gstate, ngstate.data))
  138. except KeyError:
  139. pass
  140. # add to directories
  141. mdirs.append(mdir)
  142. if mdir.tail is None or not mdir.tail.is_('hardtail'):
  143. dirs.append(mdirs)
  144. mdirs = []
  145. if mdir.tail is None:
  146. break
  147. tail = struct.unpack('<II', mdir.tail.data)
  148. hard = mdir.tail.is_('hardtail')
  149. # find paths
  150. dirtable = {}
  151. for dir in dirs:
  152. dirtable[tuple(sorted(dir[0].blocks))] = dir
  153. pending = [("/", dirs[0])]
  154. while pending:
  155. path, dir = pending.pop(0)
  156. for mdir in dir:
  157. for tag in mdir.tags:
  158. if tag.is_('dir'):
  159. npath = tag.data.decode('utf8')
  160. dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
  161. nblocks = struct.unpack('<II', dirstruct.data)
  162. nmdir = dirtable[tuple(sorted(nblocks))]
  163. pending.append(((path + '/' + npath), nmdir))
  164. dir[0].path = path.replace('//', '/')
  165. # dump tree
  166. if not args.superblock and not args.gstate and not args.mdirs:
  167. args.superblock = True
  168. args.gstate = True
  169. args.mdirs = True
  170. if args.superblock and superblock:
  171. print("superblock %s" % json.dumps(superblock[0].data.decode('utf8')))
  172. print(
  173. " version v{1}.{0}\n"
  174. " block_size {2}\n"
  175. " block_count {3}\n"
  176. " name_max {4}\n"
  177. " file_max {5}\n"
  178. " attr_max {6}"
  179. .format(*struct.unpack(
  180. '<HHIIIII', superblock[1].data[:24].ljust(24, b'\xff'))))
  181. if args.gstate and gstate:
  182. print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
  183. tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
  184. blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
  185. if tag.size:
  186. print(" orphans %d" % tag.size)
  187. if not tag.isvalid:
  188. print(" move dir {%#x, %#x} id %d" % (
  189. blocks[0], blocks[1], tag.id))
  190. if args.mdirs:
  191. for i, dir in enumerate(dirs):
  192. print("dir %s" % (json.dumps(dir[0].path)
  193. if hasattr(dir[0], 'path') else '(orphan)'))
  194. for j, mdir in enumerate(dir):
  195. print("mdir {%#x, %#x} rev %d%s" % (
  196. mdir.blocks[0], mdir.blocks[1], mdir.rev,
  197. ' (corrupted)' if not mdir else ''))
  198. f = io.StringIO()
  199. if args.tags or args.all or args.log:
  200. dumptags(args, mdir, f)
  201. else:
  202. dumpentries(args, mdir, f)
  203. lines = list(filter(None, f.getvalue().split('\n')))
  204. for k, line in enumerate(lines):
  205. print("%s %s" % (
  206. ' ' if j == len(dir)-1 else
  207. 'v' if k == len(lines)-1 else
  208. '|',
  209. line))
  210. return 0 if all(mdir for dir in dirs for mdir in dir) else 1
  211. if __name__ == "__main__":
  212. import argparse
  213. import sys
  214. parser = argparse.ArgumentParser(
  215. description="Dump semantic info about the metadata tree in littlefs")
  216. parser.add_argument('disk',
  217. help="File representing the block device.")
  218. parser.add_argument('block_size', type=lambda x: int(x, 0),
  219. help="Size of a block in bytes.")
  220. parser.add_argument('block1', nargs='?', default=0,
  221. type=lambda x: int(x, 0),
  222. help="Optional first block address for finding the root.")
  223. parser.add_argument('block2', nargs='?', default=1,
  224. type=lambda x: int(x, 0),
  225. help="Optional second block address for finding the root.")
  226. parser.add_argument('-s', '--superblock', action='store_true',
  227. help="Show contents of the superblock.")
  228. parser.add_argument('-g', '--gstate', action='store_true',
  229. help="Show contents of global-state.")
  230. parser.add_argument('-m', '--mdirs', action='store_true',
  231. help="Show contents of metadata-pairs/directories.")
  232. parser.add_argument('-t', '--tags', action='store_true',
  233. help="Show metadata tags instead of reconstructing entries.")
  234. parser.add_argument('-a', '--all', action='store_true',
  235. help="Show all tags in log, included tags in corrupted commits.")
  236. parser.add_argument('-l', '--log', action='store_true',
  237. help="Show tags in log.")
  238. parser.add_argument('-d', '--data', action='store_true',
  239. help="Also show the raw contents of files/attrs/tags.")
  240. parser.add_argument('-T', '--no-truncate', action='store_true',
  241. help="Don't truncate large amounts of data in files.")
  242. sys.exit(main(parser.parse_args()))