structs.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. #!/usr/bin/env python3
  2. #
  3. # Script to find struct sizes.
  4. #
  5. import os
  6. import glob
  7. import itertools as it
  8. import subprocess as sp
  9. import shlex
  10. import re
  11. import csv
  12. import collections as co
  13. OBJ_PATHS = ['*.o']
  14. def collect(paths, **args):
  15. results = co.defaultdict(lambda: 0)
  16. pattern = re.compile(
  17. '^(?:.*DW_TAG_(?P<tag>[a-z_]+).*'
  18. '|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
  19. '|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
  20. for path in paths:
  21. # collect structs as we parse dwarf info
  22. found = False
  23. name = None
  24. size = None
  25. # note objdump-tool may contain extra args
  26. cmd = args['objdump_tool'] + ['--dwarf=info', path]
  27. if args.get('verbose'):
  28. print(' '.join(shlex.quote(c) for c in cmd))
  29. proc = sp.Popen(cmd,
  30. stdout=sp.PIPE,
  31. stderr=sp.PIPE if not args.get('verbose') else None,
  32. universal_newlines=True)
  33. for line in proc.stdout:
  34. # state machine here to find structs
  35. m = pattern.match(line)
  36. if m:
  37. if m.group('tag'):
  38. if name is not None and size is not None:
  39. results[(path, name)] = size
  40. found = (m.group('tag') == 'structure_type')
  41. name = None
  42. size = None
  43. elif found and m.group('name'):
  44. name = m.group('name')
  45. elif found and name and m.group('size'):
  46. size = int(m.group('size'))
  47. proc.wait()
  48. if proc.returncode != 0:
  49. if not args.get('verbose'):
  50. for line in proc.stderr:
  51. sys.stdout.write(line)
  52. sys.exit(-1)
  53. flat_results = []
  54. for (file, struct), size in results.items():
  55. # map to source files
  56. if args.get('build_dir'):
  57. file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
  58. # replace .o with .c, different scripts report .o/.c, we need to
  59. # choose one if we want to deduplicate csv files
  60. file = re.sub('\.o$', '.c', file)
  61. flat_results.append((file, struct, size))
  62. return flat_results
  63. def main(**args):
  64. def openio(path, mode='r'):
  65. if path == '-':
  66. if 'r' in mode:
  67. return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
  68. else:
  69. return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
  70. else:
  71. return open(path, mode)
  72. # find sizes
  73. if not args.get('use', None):
  74. # find .o files
  75. paths = []
  76. for path in args['obj_paths']:
  77. if os.path.isdir(path):
  78. path = path + '/*.o'
  79. for path in glob.glob(path):
  80. paths.append(path)
  81. if not paths:
  82. print('no .obj files found in %r?' % args['obj_paths'])
  83. sys.exit(-1)
  84. results = collect(paths, **args)
  85. else:
  86. with openio(args['use']) as f:
  87. r = csv.DictReader(f)
  88. results = [
  89. ( result['file'],
  90. result['name'],
  91. int(result['struct_size']))
  92. for result in r
  93. if result.get('struct_size') not in {None, ''}]
  94. total = 0
  95. for _, _, size in results:
  96. total += size
  97. # find previous results?
  98. if args.get('diff'):
  99. try:
  100. with openio(args['diff']) as f:
  101. r = csv.DictReader(f)
  102. prev_results = [
  103. ( result['file'],
  104. result['name'],
  105. int(result['struct_size']))
  106. for result in r
  107. if result.get('struct_size') not in {None, ''}]
  108. except FileNotFoundError:
  109. prev_results = []
  110. prev_total = 0
  111. for _, _, size in prev_results:
  112. prev_total += size
  113. # write results to CSV
  114. if args.get('output'):
  115. merged_results = co.defaultdict(lambda: {})
  116. other_fields = []
  117. # merge?
  118. if args.get('merge'):
  119. try:
  120. with openio(args['merge']) as f:
  121. r = csv.DictReader(f)
  122. for result in r:
  123. file = result.pop('file', '')
  124. struct = result.pop('name', '')
  125. result.pop('struct_size', None)
  126. merged_results[(file, struct)] = result
  127. other_fields = result.keys()
  128. except FileNotFoundError:
  129. pass
  130. for file, struct, size in results:
  131. merged_results[(file, struct)]['struct_size'] = size
  132. with openio(args['output'], 'w') as f:
  133. w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
  134. w.writeheader()
  135. for (file, struct), result in sorted(merged_results.items()):
  136. w.writerow({'file': file, 'name': struct, **result})
  137. # print results
  138. def dedup_entries(results, by='name'):
  139. entries = co.defaultdict(lambda: 0)
  140. for file, struct, size in results:
  141. entry = (file if by == 'file' else struct)
  142. entries[entry] += size
  143. return entries
  144. def diff_entries(olds, news):
  145. diff = co.defaultdict(lambda: (0, 0, 0, 0))
  146. for name, new in news.items():
  147. diff[name] = (0, new, new, 1.0)
  148. for name, old in olds.items():
  149. _, new, _, _ = diff[name]
  150. diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
  151. return diff
  152. def sorted_entries(entries):
  153. if args.get('size_sort'):
  154. return sorted(entries, key=lambda x: (-x[1], x))
  155. elif args.get('reverse_size_sort'):
  156. return sorted(entries, key=lambda x: (+x[1], x))
  157. else:
  158. return sorted(entries)
  159. def sorted_diff_entries(entries):
  160. if args.get('size_sort'):
  161. return sorted(entries, key=lambda x: (-x[1][1], x))
  162. elif args.get('reverse_size_sort'):
  163. return sorted(entries, key=lambda x: (+x[1][1], x))
  164. else:
  165. return sorted(entries, key=lambda x: (-x[1][3], x))
  166. def print_header(by=''):
  167. if not args.get('diff'):
  168. print('%-36s %7s' % (by, 'size'))
  169. else:
  170. print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
  171. def print_entry(name, size):
  172. print("%-36s %7d" % (name, size))
  173. def print_diff_entry(name, old, new, diff, ratio):
  174. print("%-36s %7s %7s %+7d%s" % (name,
  175. old or "-",
  176. new or "-",
  177. diff,
  178. ' (%+.1f%%)' % (100*ratio) if ratio else ''))
  179. def print_entries(by='name'):
  180. entries = dedup_entries(results, by=by)
  181. if not args.get('diff'):
  182. print_header(by=by)
  183. for name, size in sorted_entries(entries.items()):
  184. print_entry(name, size)
  185. else:
  186. prev_entries = dedup_entries(prev_results, by=by)
  187. diff = diff_entries(prev_entries, entries)
  188. print_header(by='%s (%d added, %d removed)' % (by,
  189. sum(1 for old, _, _, _ in diff.values() if not old),
  190. sum(1 for _, new, _, _ in diff.values() if not new)))
  191. for name, (old, new, diff, ratio) in sorted_diff_entries(
  192. diff.items()):
  193. if ratio or args.get('all'):
  194. print_diff_entry(name, old, new, diff, ratio)
  195. def print_totals():
  196. if not args.get('diff'):
  197. print_entry('TOTAL', total)
  198. else:
  199. ratio = (0.0 if not prev_total and not total
  200. else 1.0 if not prev_total
  201. else (total-prev_total)/prev_total)
  202. print_diff_entry('TOTAL',
  203. prev_total, total,
  204. total-prev_total,
  205. ratio)
  206. if args.get('quiet'):
  207. pass
  208. elif args.get('summary'):
  209. print_header()
  210. print_totals()
  211. elif args.get('files'):
  212. print_entries(by='file')
  213. print_totals()
  214. else:
  215. print_entries(by='name')
  216. print_totals()
  217. if __name__ == "__main__":
  218. import argparse
  219. import sys
  220. parser = argparse.ArgumentParser(
  221. description="Find struct sizes.")
  222. parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
  223. help="Description of where to find *.o files. May be a directory \
  224. or a list of paths. Defaults to %r." % OBJ_PATHS)
  225. parser.add_argument('-v', '--verbose', action='store_true',
  226. help="Output commands that run behind the scenes.")
  227. parser.add_argument('-q', '--quiet', action='store_true',
  228. help="Don't show anything, useful with -o.")
  229. parser.add_argument('-o', '--output',
  230. help="Specify CSV file to store results.")
  231. parser.add_argument('-u', '--use',
  232. help="Don't compile and find struct sizes, instead use this CSV file.")
  233. parser.add_argument('-d', '--diff',
  234. help="Specify CSV file to diff struct size against.")
  235. parser.add_argument('-m', '--merge',
  236. help="Merge with an existing CSV file when writing to output.")
  237. parser.add_argument('-a', '--all', action='store_true',
  238. help="Show all functions, not just the ones that changed.")
  239. parser.add_argument('-A', '--everything', action='store_true',
  240. help="Include builtin and libc specific symbols.")
  241. parser.add_argument('-s', '--size-sort', action='store_true',
  242. help="Sort by size.")
  243. parser.add_argument('-S', '--reverse-size-sort', action='store_true',
  244. help="Sort by size, but backwards.")
  245. parser.add_argument('-F', '--files', action='store_true',
  246. help="Show file-level struct sizes.")
  247. parser.add_argument('-Y', '--summary', action='store_true',
  248. help="Only show the total struct size.")
  249. parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(),
  250. help="Path to the objdump tool to use.")
  251. parser.add_argument('--build-dir',
  252. help="Specify the relative build directory. Used to map object files \
  253. to the correct source files.")
  254. sys.exit(main(**vars(parser.parse_args())))