data.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. #!/usr/bin/env python3
  2. #
  3. # Script to find data size at the function level. Basically just a bit wrapper
  4. # around nm with some extra conveniences for comparing builds. Heavily inspired
  5. # by Linux's Bloat-O-Meter.
  6. #
  7. import os
  8. import glob
  9. import itertools as it
  10. import subprocess as sp
  11. import shlex
  12. import re
  13. import csv
  14. import collections as co
  15. OBJ_PATHS = ['*.o']
  16. def collect(paths, **args):
  17. results = co.defaultdict(lambda: 0)
  18. pattern = re.compile(
  19. '^(?P<size>[0-9a-fA-F]+)' +
  20. ' (?P<type>[%s])' % re.escape(args['type']) +
  21. ' (?P<func>.+?)$')
  22. for path in paths:
  23. # note nm-tool may contain extra args
  24. cmd = args['nm_tool'] + ['--size-sort', path]
  25. if args.get('verbose'):
  26. print(' '.join(shlex.quote(c) for c in cmd))
  27. proc = sp.Popen(cmd,
  28. stdout=sp.PIPE,
  29. stderr=sp.PIPE if not args.get('verbose') else None,
  30. universal_newlines=True)
  31. for line in proc.stdout:
  32. m = pattern.match(line)
  33. if m:
  34. results[(path, m.group('func'))] += int(m.group('size'), 16)
  35. proc.wait()
  36. if proc.returncode != 0:
  37. if not args.get('verbose'):
  38. for line in proc.stderr:
  39. sys.stdout.write(line)
  40. sys.exit(-1)
  41. flat_results = []
  42. for (file, func), size in results.items():
  43. # map to source files
  44. if args.get('build_dir'):
  45. file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
  46. # replace .o with .c, different scripts report .o/.c, we need to
  47. # choose one if we want to deduplicate csv files
  48. file = re.sub('\.o$', '.c', file)
  49. # discard internal functions
  50. if not args.get('everything'):
  51. if func.startswith('__'):
  52. continue
  53. # discard .8449 suffixes created by optimizer
  54. func = re.sub('\.[0-9]+', '', func)
  55. flat_results.append((file, func, size))
  56. return flat_results
  57. def main(**args):
  58. def openio(path, mode='r'):
  59. if path == '-':
  60. if 'r' in mode:
  61. return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
  62. else:
  63. return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
  64. else:
  65. return open(path, mode)
  66. # find sizes
  67. if not args.get('use', None):
  68. # find .o files
  69. paths = []
  70. for path in args['obj_paths']:
  71. if os.path.isdir(path):
  72. path = path + '/*.o'
  73. for path in glob.glob(path):
  74. paths.append(path)
  75. if not paths:
  76. print('no .obj files found in %r?' % args['obj_paths'])
  77. sys.exit(-1)
  78. results = collect(paths, **args)
  79. else:
  80. with openio(args['use']) as f:
  81. r = csv.DictReader(f)
  82. results = [
  83. ( result['file'],
  84. result['name'],
  85. int(result['data_size']))
  86. for result in r
  87. if result.get('data_size') not in {None, ''}]
  88. total = 0
  89. for _, _, size in results:
  90. total += size
  91. # find previous results?
  92. if args.get('diff'):
  93. try:
  94. with openio(args['diff']) as f:
  95. r = csv.DictReader(f)
  96. prev_results = [
  97. ( result['file'],
  98. result['name'],
  99. int(result['data_size']))
  100. for result in r
  101. if result.get('data_size') not in {None, ''}]
  102. except FileNotFoundError:
  103. prev_results = []
  104. prev_total = 0
  105. for _, _, size in prev_results:
  106. prev_total += size
  107. # write results to CSV
  108. if args.get('output'):
  109. merged_results = co.defaultdict(lambda: {})
  110. other_fields = []
  111. # merge?
  112. if args.get('merge'):
  113. try:
  114. with openio(args['merge']) as f:
  115. r = csv.DictReader(f)
  116. for result in r:
  117. file = result.pop('file', '')
  118. func = result.pop('name', '')
  119. result.pop('data_size', None)
  120. merged_results[(file, func)] = result
  121. other_fields = result.keys()
  122. except FileNotFoundError:
  123. pass
  124. for file, func, size in results:
  125. merged_results[(file, func)]['data_size'] = size
  126. with openio(args['output'], 'w') as f:
  127. w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
  128. w.writeheader()
  129. for (file, func), result in sorted(merged_results.items()):
  130. w.writerow({'file': file, 'name': func, **result})
  131. # print results
  132. def dedup_entries(results, by='name'):
  133. entries = co.defaultdict(lambda: 0)
  134. for file, func, size in results:
  135. entry = (file if by == 'file' else func)
  136. entries[entry] += size
  137. return entries
  138. def diff_entries(olds, news):
  139. diff = co.defaultdict(lambda: (0, 0, 0, 0))
  140. for name, new in news.items():
  141. diff[name] = (0, new, new, 1.0)
  142. for name, old in olds.items():
  143. _, new, _, _ = diff[name]
  144. diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
  145. return diff
  146. def sorted_entries(entries):
  147. if args.get('size_sort'):
  148. return sorted(entries, key=lambda x: (-x[1], x))
  149. elif args.get('reverse_size_sort'):
  150. return sorted(entries, key=lambda x: (+x[1], x))
  151. else:
  152. return sorted(entries)
  153. def sorted_diff_entries(entries):
  154. if args.get('size_sort'):
  155. return sorted(entries, key=lambda x: (-x[1][1], x))
  156. elif args.get('reverse_size_sort'):
  157. return sorted(entries, key=lambda x: (+x[1][1], x))
  158. else:
  159. return sorted(entries, key=lambda x: (-x[1][3], x))
  160. def print_header(by=''):
  161. if not args.get('diff'):
  162. print('%-36s %7s' % (by, 'size'))
  163. else:
  164. print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
  165. def print_entry(name, size):
  166. print("%-36s %7d" % (name, size))
  167. def print_diff_entry(name, old, new, diff, ratio):
  168. print("%-36s %7s %7s %+7d%s" % (name,
  169. old or "-",
  170. new or "-",
  171. diff,
  172. ' (%+.1f%%)' % (100*ratio) if ratio else ''))
  173. def print_entries(by='name'):
  174. entries = dedup_entries(results, by=by)
  175. if not args.get('diff'):
  176. print_header(by=by)
  177. for name, size in sorted_entries(entries.items()):
  178. print_entry(name, size)
  179. else:
  180. prev_entries = dedup_entries(prev_results, by=by)
  181. diff = diff_entries(prev_entries, entries)
  182. print_header(by='%s (%d added, %d removed)' % (by,
  183. sum(1 for old, _, _, _ in diff.values() if not old),
  184. sum(1 for _, new, _, _ in diff.values() if not new)))
  185. for name, (old, new, diff, ratio) in sorted_diff_entries(
  186. diff.items()):
  187. if ratio or args.get('all'):
  188. print_diff_entry(name, old, new, diff, ratio)
  189. def print_totals():
  190. if not args.get('diff'):
  191. print_entry('TOTAL', total)
  192. else:
  193. ratio = (0.0 if not prev_total and not total
  194. else 1.0 if not prev_total
  195. else (total-prev_total)/prev_total)
  196. print_diff_entry('TOTAL',
  197. prev_total, total,
  198. total-prev_total,
  199. ratio)
  200. if args.get('quiet'):
  201. pass
  202. elif args.get('summary'):
  203. print_header()
  204. print_totals()
  205. elif args.get('files'):
  206. print_entries(by='file')
  207. print_totals()
  208. else:
  209. print_entries(by='name')
  210. print_totals()
  211. if __name__ == "__main__":
  212. import argparse
  213. import sys
  214. parser = argparse.ArgumentParser(
  215. description="Find data size at the function level.")
  216. parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
  217. help="Description of where to find *.o files. May be a directory \
  218. or a list of paths. Defaults to %r." % OBJ_PATHS)
  219. parser.add_argument('-v', '--verbose', action='store_true',
  220. help="Output commands that run behind the scenes.")
  221. parser.add_argument('-q', '--quiet', action='store_true',
  222. help="Don't show anything, useful with -o.")
  223. parser.add_argument('-o', '--output',
  224. help="Specify CSV file to store results.")
  225. parser.add_argument('-u', '--use',
  226. help="Don't compile and find data sizes, instead use this CSV file.")
  227. parser.add_argument('-d', '--diff',
  228. help="Specify CSV file to diff data size against.")
  229. parser.add_argument('-m', '--merge',
  230. help="Merge with an existing CSV file when writing to output.")
  231. parser.add_argument('-a', '--all', action='store_true',
  232. help="Show all functions, not just the ones that changed.")
  233. parser.add_argument('-A', '--everything', action='store_true',
  234. help="Include builtin and libc specific symbols.")
  235. parser.add_argument('-s', '--size-sort', action='store_true',
  236. help="Sort by size.")
  237. parser.add_argument('-S', '--reverse-size-sort', action='store_true',
  238. help="Sort by size, but backwards.")
  239. parser.add_argument('-F', '--files', action='store_true',
  240. help="Show file-level data sizes. Note this does not include padding! "
  241. "So sizes may differ from other tools.")
  242. parser.add_argument('-Y', '--summary', action='store_true',
  243. help="Only show the total data size.")
  244. parser.add_argument('--type', default='dDbB',
  245. help="Type of symbols to report, this uses the same single-character "
  246. "type-names emitted by nm. Defaults to %(default)r.")
  247. parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
  248. help="Path to the nm tool to use.")
  249. parser.add_argument('--build-dir',
  250. help="Specify the relative build directory. Used to map object files \
  251. to the correct source files.")
  252. sys.exit(main(**vars(parser.parse_args())))