data.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737
  1. #!/usr/bin/env python3
  2. #
  3. # Script to find data size at the function level. Basically just a big wrapper
  4. # around nm with some extra conveniences for comparing builds. Heavily inspired
  5. # by Linux's Bloat-O-Meter.
  6. #
  7. # Example:
  8. # ./scripts/data.py lfs.o lfs_util.o -Ssize
  9. #
  10. # Copyright (c) 2022, The littlefs authors.
  11. # Copyright (c) 2020, Arm Limited. All rights reserved.
  12. # SPDX-License-Identifier: BSD-3-Clause
  13. #
  14. import collections as co
  15. import csv
  16. import difflib
  17. import glob
  18. import itertools as it
  19. import math as m
  20. import os
  21. import re
  22. import shlex
  23. import subprocess as sp
  24. OBJ_PATHS = ['*.o']
  25. NM_TOOL = ['nm']
  26. NM_TYPES = 'dDbB'
  27. OBJDUMP_TOOL = ['objdump']
  28. # integer fields
  29. class Int(co.namedtuple('Int', 'x')):
  30. __slots__ = ()
  31. def __new__(cls, x=0):
  32. if isinstance(x, Int):
  33. return x
  34. if isinstance(x, str):
  35. try:
  36. x = int(x, 0)
  37. except ValueError:
  38. # also accept +-∞ and +-inf
  39. if re.match('^\s*\+?\s*(?:∞|inf)\s*$', x):
  40. x = m.inf
  41. elif re.match('^\s*-\s*(?:∞|inf)\s*$', x):
  42. x = -m.inf
  43. else:
  44. raise
  45. assert isinstance(x, int) or m.isinf(x), x
  46. return super().__new__(cls, x)
  47. def __str__(self):
  48. if self.x == m.inf:
  49. return '∞'
  50. elif self.x == -m.inf:
  51. return '-∞'
  52. else:
  53. return str(self.x)
  54. def __int__(self):
  55. assert not m.isinf(self.x)
  56. return self.x
  57. def __float__(self):
  58. return float(self.x)
  59. none = '%7s' % '-'
  60. def table(self):
  61. return '%7s' % (self,)
  62. diff_none = '%7s' % '-'
  63. diff_table = table
  64. def diff_diff(self, other):
  65. new = self.x if self else 0
  66. old = other.x if other else 0
  67. diff = new - old
  68. if diff == +m.inf:
  69. return '%7s' % '+∞'
  70. elif diff == -m.inf:
  71. return '%7s' % '-∞'
  72. else:
  73. return '%+7d' % diff
  74. def ratio(self, other):
  75. new = self.x if self else 0
  76. old = other.x if other else 0
  77. if m.isinf(new) and m.isinf(old):
  78. return 0.0
  79. elif m.isinf(new):
  80. return +m.inf
  81. elif m.isinf(old):
  82. return -m.inf
  83. elif not old and not new:
  84. return 0.0
  85. elif not old:
  86. return 1.0
  87. else:
  88. return (new-old) / old
  89. def __add__(self, other):
  90. return self.__class__(self.x + other.x)
  91. def __sub__(self, other):
  92. return self.__class__(self.x - other.x)
  93. def __mul__(self, other):
  94. return self.__class__(self.x * other.x)
  95. # data size results
  96. class DataResult(co.namedtuple('DataResult', [
  97. 'file', 'function',
  98. 'size'])):
  99. _by = ['file', 'function']
  100. _fields = ['size']
  101. _types = {'size': Int}
  102. __slots__ = ()
  103. def __new__(cls, file='', function='', size=0):
  104. return super().__new__(cls, file, function,
  105. Int(size))
  106. def __add__(self, other):
  107. return DataResult(self.file, self.function,
  108. self.size + other.size)
  109. def openio(path, mode='r'):
  110. if path == '-':
  111. if mode == 'r':
  112. return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
  113. else:
  114. return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
  115. else:
  116. return open(path, mode)
  117. def collect(paths, *,
  118. nm_tool=NM_TOOL,
  119. nm_types=NM_TYPES,
  120. objdump_tool=OBJDUMP_TOOL,
  121. sources=None,
  122. everything=False,
  123. **args):
  124. size_pattern = re.compile(
  125. '^(?P<size>[0-9a-fA-F]+)' +
  126. ' (?P<type>[%s])' % re.escape(nm_types) +
  127. ' (?P<func>.+?)$')
  128. line_pattern = re.compile(
  129. '^\s+(?P<no>[0-9]+)\s+'
  130. '(?:(?P<dir>[0-9]+)\s+)?'
  131. '.*\s+'
  132. '(?P<path>[^\s]+)$')
  133. info_pattern = re.compile(
  134. '^(?:.*(?P<tag>DW_TAG_[a-z_]+).*'
  135. '|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
  136. '|^.*DW_AT_decl_file.*:\s*(?P<file>[0-9]+)\s*)$')
  137. results = []
  138. for path in paths:
  139. # guess the source, if we have debug-info we'll replace this later
  140. file = re.sub('(\.o)?$', '.c', path, 1)
  141. # find symbol sizes
  142. results_ = []
  143. # note nm-tool may contain extra args
  144. cmd = nm_tool + ['--size-sort', path]
  145. if args.get('verbose'):
  146. print(' '.join(shlex.quote(c) for c in cmd))
  147. proc = sp.Popen(cmd,
  148. stdout=sp.PIPE,
  149. stderr=sp.PIPE if not args.get('verbose') else None,
  150. universal_newlines=True,
  151. errors='replace',
  152. close_fds=False)
  153. for line in proc.stdout:
  154. m = size_pattern.match(line)
  155. if m:
  156. func = m.group('func')
  157. # discard internal functions
  158. if not everything and func.startswith('__'):
  159. continue
  160. results_.append(DataResult(
  161. file, func,
  162. int(m.group('size'), 16)))
  163. proc.wait()
  164. if proc.returncode != 0:
  165. if not args.get('verbose'):
  166. for line in proc.stderr:
  167. sys.stdout.write(line)
  168. sys.exit(-1)
  169. # try to figure out the source file if we have debug-info
  170. dirs = {}
  171. files = {}
  172. # note objdump-tool may contain extra args
  173. cmd = objdump_tool + ['--dwarf=rawline', path]
  174. if args.get('verbose'):
  175. print(' '.join(shlex.quote(c) for c in cmd))
  176. proc = sp.Popen(cmd,
  177. stdout=sp.PIPE,
  178. stderr=sp.PIPE if not args.get('verbose') else None,
  179. universal_newlines=True,
  180. errors='replace',
  181. close_fds=False)
  182. for line in proc.stdout:
  183. # note that files contain references to dirs, which we
  184. # dereference as soon as we see them as each file table follows a
  185. # dir table
  186. m = line_pattern.match(line)
  187. if m:
  188. if not m.group('dir'):
  189. # found a directory entry
  190. dirs[int(m.group('no'))] = m.group('path')
  191. else:
  192. # found a file entry
  193. dir = int(m.group('dir'))
  194. if dir in dirs:
  195. files[int(m.group('no'))] = os.path.join(
  196. dirs[dir],
  197. m.group('path'))
  198. else:
  199. files[int(m.group('no'))] = m.group('path')
  200. proc.wait()
  201. if proc.returncode != 0:
  202. if not args.get('verbose'):
  203. for line in proc.stderr:
  204. sys.stdout.write(line)
  205. # do nothing on error, we don't need objdump to work, source files
  206. # may just be inaccurate
  207. pass
  208. defs = {}
  209. is_func = False
  210. f_name = None
  211. f_file = None
  212. # note objdump-tool may contain extra args
  213. cmd = objdump_tool + ['--dwarf=info', path]
  214. if args.get('verbose'):
  215. print(' '.join(shlex.quote(c) for c in cmd))
  216. proc = sp.Popen(cmd,
  217. stdout=sp.PIPE,
  218. stderr=sp.PIPE if not args.get('verbose') else None,
  219. universal_newlines=True,
  220. errors='replace',
  221. close_fds=False)
  222. for line in proc.stdout:
  223. # state machine here to find definitions
  224. m = info_pattern.match(line)
  225. if m:
  226. if m.group('tag'):
  227. if is_func:
  228. defs[f_name] = files.get(f_file, '?')
  229. is_func = (m.group('tag') == 'DW_TAG_subprogram')
  230. elif m.group('name'):
  231. f_name = m.group('name')
  232. elif m.group('file'):
  233. f_file = int(m.group('file'))
  234. proc.wait()
  235. if proc.returncode != 0:
  236. if not args.get('verbose'):
  237. for line in proc.stderr:
  238. sys.stdout.write(line)
  239. # do nothing on error, we don't need objdump to work, source files
  240. # may just be inaccurate
  241. pass
  242. for r in results_:
  243. # find best matching debug symbol, this may be slightly different
  244. # due to optimizations
  245. if defs:
  246. # exact match? avoid difflib if we can for speed
  247. if r.function in defs:
  248. file = defs[r.function]
  249. else:
  250. _, file = max(
  251. defs.items(),
  252. key=lambda d: difflib.SequenceMatcher(None,
  253. d[0],
  254. r.function, False).ratio())
  255. else:
  256. file = r.file
  257. # ignore filtered sources
  258. if sources is not None:
  259. if not any(
  260. os.path.abspath(file) == os.path.abspath(s)
  261. for s in sources):
  262. continue
  263. else:
  264. # default to only cwd
  265. if not everything and not os.path.commonpath([
  266. os.getcwd(),
  267. os.path.abspath(file)]) == os.getcwd():
  268. continue
  269. # simplify path
  270. if os.path.commonpath([
  271. os.getcwd(),
  272. os.path.abspath(file)]) == os.getcwd():
  273. file = os.path.relpath(file)
  274. else:
  275. file = os.path.abspath(file)
  276. results.append(DataResult(file, r.function, r.size))
  277. return results
  278. def fold(Result, results, *,
  279. by=None,
  280. defines=None,
  281. **_):
  282. if by is None:
  283. by = Result._by
  284. for k in it.chain(by or [], (k for k, _ in defines or [])):
  285. if k not in Result._by and k not in Result._fields:
  286. print("error: could not find field %r?" % k)
  287. sys.exit(-1)
  288. # filter by matching defines
  289. if defines is not None:
  290. results_ = []
  291. for r in results:
  292. if all(getattr(r, k) in vs for k, vs in defines):
  293. results_.append(r)
  294. results = results_
  295. # organize results into conflicts
  296. folding = co.OrderedDict()
  297. for r in results:
  298. name = tuple(getattr(r, k) for k in by)
  299. if name not in folding:
  300. folding[name] = []
  301. folding[name].append(r)
  302. # merge conflicts
  303. folded = []
  304. for name, rs in folding.items():
  305. folded.append(sum(rs[1:], start=rs[0]))
  306. return folded
  307. def table(Result, results, diff_results=None, *,
  308. by=None,
  309. fields=None,
  310. sort=None,
  311. summary=False,
  312. all=False,
  313. percent=False,
  314. **_):
  315. all_, all = all, __builtins__.all
  316. if by is None:
  317. by = Result._by
  318. if fields is None:
  319. fields = Result._fields
  320. types = Result._types
  321. # fold again
  322. results = fold(Result, results, by=by)
  323. if diff_results is not None:
  324. diff_results = fold(Result, diff_results, by=by)
  325. # organize by name
  326. table = {
  327. ','.join(str(getattr(r, k) or '') for k in by): r
  328. for r in results}
  329. diff_table = {
  330. ','.join(str(getattr(r, k) or '') for k in by): r
  331. for r in diff_results or []}
  332. names = list(table.keys() | diff_table.keys())
  333. # sort again, now with diff info, note that python's sort is stable
  334. names.sort()
  335. if diff_results is not None:
  336. names.sort(key=lambda n: tuple(
  337. types[k].ratio(
  338. getattr(table.get(n), k, None),
  339. getattr(diff_table.get(n), k, None))
  340. for k in fields),
  341. reverse=True)
  342. if sort:
  343. for k, reverse in reversed(sort):
  344. names.sort(key=lambda n: (getattr(table[n], k),)
  345. if getattr(table.get(n), k, None) is not None else (),
  346. reverse=reverse ^ (not k or k in Result._fields))
  347. # build up our lines
  348. lines = []
  349. # header
  350. line = []
  351. line.append('%s%s' % (
  352. ','.join(by),
  353. ' (%d added, %d removed)' % (
  354. sum(1 for n in table if n not in diff_table),
  355. sum(1 for n in diff_table if n not in table))
  356. if diff_results is not None and not percent else '')
  357. if not summary else '')
  358. if diff_results is None:
  359. for k in fields:
  360. line.append(k)
  361. elif percent:
  362. for k in fields:
  363. line.append(k)
  364. else:
  365. for k in fields:
  366. line.append('o'+k)
  367. for k in fields:
  368. line.append('n'+k)
  369. for k in fields:
  370. line.append('d'+k)
  371. line.append('')
  372. lines.append(line)
  373. # entries
  374. if not summary:
  375. for name in names:
  376. r = table.get(name)
  377. if diff_results is not None:
  378. diff_r = diff_table.get(name)
  379. ratios = [
  380. types[k].ratio(
  381. getattr(r, k, None),
  382. getattr(diff_r, k, None))
  383. for k in fields]
  384. if not any(ratios) and not all_:
  385. continue
  386. line = []
  387. line.append(name)
  388. if diff_results is None:
  389. for k in fields:
  390. line.append(getattr(r, k).table()
  391. if getattr(r, k, None) is not None
  392. else types[k].none)
  393. elif percent:
  394. for k in fields:
  395. line.append(getattr(r, k).diff_table()
  396. if getattr(r, k, None) is not None
  397. else types[k].diff_none)
  398. else:
  399. for k in fields:
  400. line.append(getattr(diff_r, k).diff_table()
  401. if getattr(diff_r, k, None) is not None
  402. else types[k].diff_none)
  403. for k in fields:
  404. line.append(getattr(r, k).diff_table()
  405. if getattr(r, k, None) is not None
  406. else types[k].diff_none)
  407. for k in fields:
  408. line.append(types[k].diff_diff(
  409. getattr(r, k, None),
  410. getattr(diff_r, k, None)))
  411. if diff_results is None:
  412. line.append('')
  413. elif percent:
  414. line.append(' (%s)' % ', '.join(
  415. '+∞%' if t == +m.inf
  416. else '-∞%' if t == -m.inf
  417. else '%+.1f%%' % (100*t)
  418. for t in ratios))
  419. else:
  420. line.append(' (%s)' % ', '.join(
  421. '+∞%' if t == +m.inf
  422. else '-∞%' if t == -m.inf
  423. else '%+.1f%%' % (100*t)
  424. for t in ratios
  425. if t)
  426. if any(ratios) else '')
  427. lines.append(line)
  428. # total
  429. r = next(iter(fold(Result, results, by=[])), None)
  430. if diff_results is not None:
  431. diff_r = next(iter(fold(Result, diff_results, by=[])), None)
  432. ratios = [
  433. types[k].ratio(
  434. getattr(r, k, None),
  435. getattr(diff_r, k, None))
  436. for k in fields]
  437. line = []
  438. line.append('TOTAL')
  439. if diff_results is None:
  440. for k in fields:
  441. line.append(getattr(r, k).table()
  442. if getattr(r, k, None) is not None
  443. else types[k].none)
  444. elif percent:
  445. for k in fields:
  446. line.append(getattr(r, k).diff_table()
  447. if getattr(r, k, None) is not None
  448. else types[k].diff_none)
  449. else:
  450. for k in fields:
  451. line.append(getattr(diff_r, k).diff_table()
  452. if getattr(diff_r, k, None) is not None
  453. else types[k].diff_none)
  454. for k in fields:
  455. line.append(getattr(r, k).diff_table()
  456. if getattr(r, k, None) is not None
  457. else types[k].diff_none)
  458. for k in fields:
  459. line.append(types[k].diff_diff(
  460. getattr(r, k, None),
  461. getattr(diff_r, k, None)))
  462. if diff_results is None:
  463. line.append('')
  464. elif percent:
  465. line.append(' (%s)' % ', '.join(
  466. '+∞%' if t == +m.inf
  467. else '-∞%' if t == -m.inf
  468. else '%+.1f%%' % (100*t)
  469. for t in ratios))
  470. else:
  471. line.append(' (%s)' % ', '.join(
  472. '+∞%' if t == +m.inf
  473. else '-∞%' if t == -m.inf
  474. else '%+.1f%%' % (100*t)
  475. for t in ratios
  476. if t)
  477. if any(ratios) else '')
  478. lines.append(line)
  479. # find the best widths, note that column 0 contains the names and column -1
  480. # the ratios, so those are handled a bit differently
  481. widths = [
  482. ((max(it.chain([w], (len(l[i]) for l in lines)))+1+4-1)//4)*4-1
  483. for w, i in zip(
  484. it.chain([23], it.repeat(7)),
  485. range(len(lines[0])-1))]
  486. # print our table
  487. for line in lines:
  488. print('%-*s %s%s' % (
  489. widths[0], line[0],
  490. ' '.join('%*s' % (w, x)
  491. for w, x in zip(widths[1:], line[1:-1])),
  492. line[-1]))
  493. def main(obj_paths, *,
  494. by=None,
  495. fields=None,
  496. defines=None,
  497. sort=None,
  498. **args):
  499. # find sizes
  500. if not args.get('use', None):
  501. # find .o files
  502. paths = []
  503. for path in obj_paths:
  504. if os.path.isdir(path):
  505. path = path + '/*.o'
  506. for path in glob.glob(path):
  507. paths.append(path)
  508. if not paths:
  509. print("error: no .o files found in %r?" % obj_paths)
  510. sys.exit(-1)
  511. results = collect(paths, **args)
  512. else:
  513. results = []
  514. with openio(args['use']) as f:
  515. reader = csv.DictReader(f, restval='')
  516. for r in reader:
  517. try:
  518. results.append(DataResult(
  519. **{k: r[k] for k in DataResult._by
  520. if k in r and r[k].strip()},
  521. **{k: r['data_'+k] for k in DataResult._fields
  522. if 'data_'+k in r and r['data_'+k].strip()}))
  523. except TypeError:
  524. pass
  525. # fold
  526. results = fold(DataResult, results, by=by, defines=defines)
  527. # sort, note that python's sort is stable
  528. results.sort()
  529. if sort:
  530. for k, reverse in reversed(sort):
  531. results.sort(key=lambda r: (getattr(r, k),)
  532. if getattr(r, k) is not None else (),
  533. reverse=reverse ^ (not k or k in DataResult._fields))
  534. # write results to CSV
  535. if args.get('output'):
  536. with openio(args['output'], 'w') as f:
  537. writer = csv.DictWriter(f,
  538. (by if by is not None else DataResult._by)
  539. + ['data_'+k for k in DataResult._fields])
  540. writer.writeheader()
  541. for r in results:
  542. writer.writerow(
  543. {k: getattr(r, k)
  544. for k in (by if by is not None else DataResult._by)}
  545. | {'data_'+k: getattr(r, k)
  546. for k in DataResult._fields})
  547. # find previous results?
  548. if args.get('diff'):
  549. diff_results = []
  550. try:
  551. with openio(args['diff']) as f:
  552. reader = csv.DictReader(f, restval='')
  553. for r in reader:
  554. try:
  555. diff_results.append(DataResult(
  556. **{k: r[k] for k in DataResult._by
  557. if k in r and r[k].strip()},
  558. **{k: r['data_'+k] for k in DataResult._fields
  559. if 'data_'+k in r and r['data_'+k].strip()}))
  560. except TypeError:
  561. pass
  562. except FileNotFoundError:
  563. pass
  564. # fold
  565. diff_results = fold(DataResult, diff_results, by=by, defines=defines)
  566. # print table
  567. if not args.get('quiet'):
  568. table(DataResult, results,
  569. diff_results if args.get('diff') else None,
  570. by=by if by is not None else ['function'],
  571. fields=fields,
  572. sort=sort,
  573. **args)
  574. if __name__ == "__main__":
  575. import argparse
  576. import sys
  577. parser = argparse.ArgumentParser(
  578. description="Find data size at the function level.",
  579. allow_abbrev=False)
  580. parser.add_argument(
  581. 'obj_paths',
  582. nargs='*',
  583. default=OBJ_PATHS,
  584. help="Description of where to find *.o files. May be a directory "
  585. "or a list of paths. Defaults to %r." % OBJ_PATHS)
  586. parser.add_argument(
  587. '-v', '--verbose',
  588. action='store_true',
  589. help="Output commands that run behind the scenes.")
  590. parser.add_argument(
  591. '-q', '--quiet',
  592. action='store_true',
  593. help="Don't show anything, useful with -o.")
  594. parser.add_argument(
  595. '-o', '--output',
  596. help="Specify CSV file to store results.")
  597. parser.add_argument(
  598. '-u', '--use',
  599. help="Don't parse anything, use this CSV file.")
  600. parser.add_argument(
  601. '-d', '--diff',
  602. help="Specify CSV file to diff against.")
  603. parser.add_argument(
  604. '-a', '--all',
  605. action='store_true',
  606. help="Show all, not just the ones that changed.")
  607. parser.add_argument(
  608. '-p', '--percent',
  609. action='store_true',
  610. help="Only show percentage change, not a full diff.")
  611. parser.add_argument(
  612. '-b', '--by',
  613. action='append',
  614. choices=DataResult._by,
  615. help="Group by this field.")
  616. parser.add_argument(
  617. '-f', '--field',
  618. dest='fields',
  619. action='append',
  620. choices=DataResult._fields,
  621. help="Show this field.")
  622. parser.add_argument(
  623. '-D', '--define',
  624. dest='defines',
  625. action='append',
  626. type=lambda x: (lambda k,v: (k, set(v.split(','))))(*x.split('=', 1)),
  627. help="Only include results where this field is this value.")
  628. class AppendSort(argparse.Action):
  629. def __call__(self, parser, namespace, value, option):
  630. if namespace.sort is None:
  631. namespace.sort = []
  632. namespace.sort.append((value, True if option == '-S' else False))
  633. parser.add_argument(
  634. '-s', '--sort',
  635. action=AppendSort,
  636. help="Sort by this fields.")
  637. parser.add_argument(
  638. '-S', '--reverse-sort',
  639. action=AppendSort,
  640. help="Sort by this fields, but backwards.")
  641. parser.add_argument(
  642. '-Y', '--summary',
  643. action='store_true',
  644. help="Only show the total.")
  645. parser.add_argument(
  646. '-F', '--source',
  647. dest='sources',
  648. action='append',
  649. help="Only consider definitions in this file. Defaults to anything "
  650. "in the current directory.")
  651. parser.add_argument(
  652. '--everything',
  653. action='store_true',
  654. help="Include builtin and libc specific symbols.")
  655. parser.add_argument(
  656. '--nm-types',
  657. default=NM_TYPES,
  658. help="Type of symbols to report, this uses the same single-character "
  659. "type-names emitted by nm. Defaults to %r." % NM_TYPES)
  660. parser.add_argument(
  661. '--nm-tool',
  662. type=lambda x: x.split(),
  663. default=NM_TOOL,
  664. help="Path to the nm tool to use. Defaults to %r." % NM_TOOL)
  665. parser.add_argument(
  666. '--objdump-tool',
  667. type=lambda x: x.split(),
  668. default=OBJDUMP_TOOL,
  669. help="Path to the objdump tool to use. Defaults to %r." % OBJDUMP_TOOL)
  670. sys.exit(main(**{k: v
  671. for k, v in vars(parser.parse_intermixed_args()).items()
  672. if v is not None}))