idf_size.py 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225
  1. #!/usr/bin/env python
  2. #
  3. # esp-idf alternative to "size" to print ELF file sizes, also analyzes
  4. # the linker map file to dump higher resolution details.
  5. #
  6. # Includes information which is not shown in "xtensa-esp32-elf-size",
  7. # or easy to parse from "xtensa-esp32-elf-objdump" or raw map files.
  8. #
  9. # Copyright 2017-2021 Espressif Systems (Shanghai) CO LTD
  10. #
  11. # Licensed under the Apache License, Version 2.0 (the "License");
  12. # you may not use this file except in compliance with the License.
  13. # You may obtain a copy of the License at
  14. #
  15. # http://www.apache.org/licenses/LICENSE-2.0
  16. #
  17. # Unless required by applicable law or agreed to in writing, software
  18. # distributed under the License is distributed on an "AS IS" BASIS,
  19. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  20. # See the License for the specific language governing permissions and
  21. # limitations under the License.
  22. #
  23. from __future__ import division, print_function, unicode_literals
  24. import argparse
  25. import collections
  26. import json
  27. import os.path
  28. import re
  29. import sys
  30. from future.utils import iteritems
  31. try:
  32. from typing import Any, Callable, Collection, Dict, Iterable, List, Optional, TextIO, Tuple, Union
  33. Section = Dict[str, Union[str, int]]
  34. SectionDict = Dict[str, Section]
  35. except ImportError:
  36. pass
  37. try:
  38. basestring
  39. except NameError:
  40. basestring = str
  41. GLOBAL_JSON_INDENT = 4
  42. GLOBAL_JSON_SEPARATORS = (',', ': ')
  43. class MemRegions(object):
  44. # Regions determined by the chip target.
  45. # DIRAM is not added here. The DIRAM is indicated by the `secondary_addr` of each MemRegDef
  46. (DRAM_ID, IRAM_ID, CACHE_D_ID, CACHE_I_ID, RTC_FAST_D_ID, RTC_FAST_I_ID, RTC_SLOW_D_ID) = range(7)
  47. # The order of variables in the tuple is the same as in the soc_memory_layout.c files
  48. MemRegDef = collections.namedtuple('MemRegDef', ['primary_addr', 'length', 'type', 'secondary_addr'])
  49. class Region(object):
  50. # Helper class to store region information
  51. def __init__(self, start, length, region, section=None):
  52. # type: (MemRegions.Region, int, int, MemRegions.MemRegDef, Optional[str]) -> None
  53. self.start = start
  54. self.len = length
  55. self.region = region
  56. self.section = section
  57. @staticmethod
  58. def get_mem_regions(target): # type: (str) -> List
  59. # The target specific memory structure is deduced from soc_memory_types defined in
  60. # $IDF_PATH/components/soc/**/soc_memory_layout.c files.
  61. MemRegDef = MemRegions.MemRegDef
  62. # Consecutive MemRegDefs of the same type are joined into one MemRegDef
  63. if target == 'esp32':
  64. return sorted([
  65. MemRegDef(0x3FFAE000, 17 * 0x2000 + 4 * 0x8000 + 4 * 0x4000, MemRegions.DRAM_ID, 0),
  66. MemRegDef(0x40070000, 2 * 0x8000 + 16 * 0x2000, MemRegions.IRAM_ID, 0),
  67. MemRegDef(0x400C2000, 0xB3E000, MemRegions.CACHE_I_ID, 0),
  68. MemRegDef(0x3F400000, 0x400000, MemRegions.CACHE_D_ID, 0),
  69. MemRegDef(0x3F800000, 0x400000, MemRegions.CACHE_D_ID, 0),
  70. MemRegDef(0x3FF80000, 0x2000, MemRegions.RTC_FAST_D_ID, 0x400C0000),
  71. MemRegDef(0x50000000, 0x2000, MemRegions.RTC_SLOW_D_ID, 0),
  72. ])
  73. elif target == 'esp32s2':
  74. return sorted([
  75. MemRegDef(0x3FFB2000, 3 * 0x2000 + 18 * 0x4000, MemRegions.DRAM_ID, 0x40022000),
  76. MemRegDef(0x3F000000, 0x400000, MemRegions.CACHE_I_ID, 0),
  77. MemRegDef(0x3F500000, 0xA80000, MemRegions.CACHE_D_ID, 0),
  78. MemRegDef(0x40080000, 0x780000, MemRegions.CACHE_I_ID, 0),
  79. MemRegDef(0x40070000, 0x2000, MemRegions.RTC_FAST_D_ID, 0x3FF9E000),
  80. MemRegDef(0x50000000, 0x2000, MemRegions.RTC_SLOW_D_ID, 0),
  81. ])
  82. elif target == 'esp32s3':
  83. return sorted([
  84. # IRAM, usually used by Icache.
  85. #
  86. # The segment from the ld file lies across the boundary of the line below: it is
  87. # partly IRAM and partly D/IRAM. Here's a workaround for this kind of segment: we
  88. # only list the DIRAM part. If a segment from the ld file falls in any part of a
  89. # DIRAM def region, we treat the whole segment D/IRAM.
  90. #
  91. # Uncomment the following line if sections of the same segment can be
  92. # distinguished, or the ld file can give separated segment for the region.
  93. #
  94. MemRegDef(0x40370000, 0x8000, MemRegions.IRAM_ID, 0),
  95. MemRegDef(0x3FC88000, 0x8000 + 6 * 0x10000, MemRegions.DRAM_ID, 0x40378000),
  96. MemRegDef(0x3FCF0000, 0x10000, MemRegions.DRAM_ID, 0),
  97. MemRegDef(0x42000000, 0x2000000, MemRegions.CACHE_I_ID, 0),
  98. MemRegDef(0x3C000000, 0x2000000, MemRegions.CACHE_D_ID, 0),
  99. MemRegDef(0x3ff80000, 0x2000, MemRegions.RTC_FAST_D_ID, 0x600FE000),
  100. MemRegDef(0x50000000, 0x2000, MemRegions.RTC_SLOW_D_ID, 0),
  101. ])
  102. elif target in ['esp32c3']:
  103. return sorted([
  104. MemRegDef(0x3FC80000, 0x60000, MemRegions.DRAM_ID, 0x40380000),
  105. MemRegDef(0x4037C000, 0x4000, MemRegions.IRAM_ID, 0),
  106. MemRegDef(0x42000000, 0x800000, MemRegions.CACHE_I_ID, 0),
  107. MemRegDef(0x3C000000, 0x800000, MemRegions.CACHE_D_ID, 0),
  108. MemRegDef(0x50000000, 0x2000, MemRegions.RTC_SLOW_D_ID, 0),
  109. ])
  110. else:
  111. raise RuntimeError('Target not detected.')
  112. def __init__(self, target): # type: (MemRegions, str) -> None
  113. self.chip_mem_regions = self.get_mem_regions(target)
  114. if not self.chip_mem_regions:
  115. raise RuntimeError('Target {} is not implemented in idf_size'.format(target))
  116. def _get_first_region(self, start, length):
  117. # type: (int, int) -> Tuple[Union[MemRegions.MemRegDef, None], int]
  118. for region in self.chip_mem_regions: # type: ignore
  119. if region.primary_addr <= start < region.primary_addr + region.length:
  120. return (region, length)
  121. if region.secondary_addr and region.secondary_addr <= start < region.secondary_addr + region.length:
  122. return (region, length)
  123. print('WARNING: Given section not found in any memory region.')
  124. print('Check whether the LD file is compatible with the definitions in get_mem_regions in idf_size.py')
  125. return (None, length)
  126. def _get_regions(self, start, length, name=None): # type: (int, int, Optional[str]) -> List
  127. ret = []
  128. while length > 0:
  129. (region, cur_len) = self._get_first_region(start, length)
  130. if region is None:
  131. # skip regions that not in given section
  132. length -= cur_len
  133. start += cur_len
  134. continue
  135. ret.append(MemRegions.Region(start, cur_len, region, name))
  136. length -= cur_len
  137. start += cur_len
  138. return ret
  139. def fit_segments_into_regions(self, segments): # type: (MemRegions, Dict) -> List
  140. region_list = []
  141. for segment in segments.values():
  142. sorted_segments = self._get_regions(segment['origin'], segment['length'])
  143. region_list.extend(sorted_segments)
  144. return region_list
  145. def fit_sections_into_regions(self, sections): # type: (MemRegions, Dict) -> List
  146. region_list = []
  147. for section in sections.values():
  148. sorted_sections = self._get_regions(section['address'], section['size'], section['name'])
  149. region_list.extend(sorted_sections)
  150. return region_list
  151. class LinkingSections(object):
  152. _section_type_dict = {k: re.compile(v) for k, v in {
  153. 'text': r'.*\.text',
  154. 'data': r'.*\.data',
  155. 'bss': r'.*\.bss',
  156. 'rodata': r'.*\.rodata',
  157. 'noinit': r'.*noinit',
  158. 'vectors': r'.*\.vectors',
  159. 'flash': r'.*flash.*',
  160. }.items()}
  161. @staticmethod
  162. def in_section(section, section_name_or_list): # type: (str, Union[str, Iterable]) -> bool
  163. if isinstance(section_name_or_list, basestring):
  164. section_name_or_list = [section_name_or_list]
  165. for section_name in section_name_or_list:
  166. if LinkingSections._section_type_dict[section_name].match(section):
  167. return True
  168. return False
  169. @staticmethod
  170. def filter_sections(sections): # type: (Dict) -> Dict
  171. return {k: v for k, v in sections.items()
  172. if LinkingSections.in_section(k, LinkingSections._section_type_dict.keys())}
  173. @staticmethod
  174. def get_display_name_order(section_name_list): # type: (List[str]) -> Tuple[List[str], List[str]]
  175. '''
  176. Return two lists, in the suggested display order.
  177. First list is the reordered section_name_list, second list is the suggested display name, corresponding to the first list
  178. '''
  179. def get_name_score(name): # type: (str) -> int
  180. score_dict = {
  181. '.dram': 30,
  182. '.iram': 20,
  183. '.flash': 10,
  184. 'ram_st_total': -10,
  185. 'flash_total': -20,
  186. '.data': 6,
  187. '.bss': 5,
  188. '.text': 4,
  189. '.rodata': 3,
  190. '.vectors': 2,
  191. '.noinit': 1,
  192. '.other': -1,
  193. }
  194. return sum([score if section in name else 0
  195. for section, score in score_dict.items()])
  196. score_list = [get_name_score(section) for section in section_name_list]
  197. ordered_name_list = sorted(section_name_list, key=lambda x: score_list[section_name_list.index(x)], reverse=True)
  198. display_name_list = ordered_name_list[:]
  199. memory_name = ''
  200. display_name_list = sorted(display_name_list)
  201. ordered_name_list = sorted(ordered_name_list)
  202. ordered_name_list = check_is_dict_sort(ordered_name_list)
  203. for i, section in enumerate(ordered_name_list):
  204. if memory_name and section.startswith(memory_name):
  205. # If the section has same memory type with the previous one, use shorter name
  206. display_name_list[i] = section.replace(memory_name, '& ')
  207. continue
  208. memory_name = ''
  209. split_name = section.split('.')
  210. if len(split_name) > 1:
  211. # If the section has a memory type, update the type and try to display the type properly
  212. assert len(split_name) == 3 and split_name[0] == '', 'Unexpected section name'
  213. memory_name = '.iram' if 'iram' in split_name[1] else\
  214. '.dram' if 'dram' in split_name[1] else\
  215. '.flash' if 'flash' in split_name[1] else\
  216. '.' + split_name[1]
  217. display_name_list[i] = 'DRAM .' + split_name[2] if 'dram' in split_name[1] else\
  218. 'IRAM' + split_name[1].replace('iram', '') + ' .' + split_name[2] if 'iram' in split_name[1] else\
  219. 'Flash .' + split_name[2] if 'flash' in split_name[1] else\
  220. section
  221. continue
  222. # Otherwise use its original name
  223. display_name_list[i] = section
  224. return ordered_name_list, display_name_list
  225. def scan_to_header(f, header_line): # type: (Iterable, str) -> None
  226. """ Scan forward in a file until you reach 'header_line', then return """
  227. for line in f:
  228. if line.strip() == header_line:
  229. return
  230. raise RuntimeError("Didn't find line '%s' in file" % header_line)
  231. def format_json(json_object): # type: (Dict) -> str
  232. return json.dumps(json_object,
  233. allow_nan=True,
  234. indent=GLOBAL_JSON_INDENT,
  235. separators=GLOBAL_JSON_SEPARATORS) + os.linesep
  236. def load_map_data(map_file): # type: (TextIO) -> Tuple[str, Dict, Dict]
  237. segments = load_segments(map_file)
  238. detected_chip = detect_target_chip(map_file)
  239. sections = load_sections(map_file)
  240. # Exclude the .dummy section, which usually means shared region among I/D buses
  241. dummy_keys = [key for key in sections if key.endswith(('.dummy'))]
  242. if dummy_keys:
  243. sections.pop(*dummy_keys)
  244. return detected_chip, segments, sections
  245. def load_segments(map_file): # type: (TextIO) -> Dict
  246. """ Memory Configuration section is the total size of each segment """
  247. result = {} # type: Dict[Any, Dict]
  248. scan_to_header(map_file, 'Memory Configuration')
  249. RE_MEMORY_SECTION = re.compile(r'(?P<name>[^ ]+) +0x(?P<origin>[\da-f]+) +0x(?P<length>[\da-f]+)')
  250. for line in map_file:
  251. m = RE_MEMORY_SECTION.match(line)
  252. if m is None:
  253. if len(result) == 0:
  254. continue # whitespace or a header, before the content we want
  255. else:
  256. return result # we're at the end of the Memory Configuration
  257. segment = {
  258. 'name': m.group('name'),
  259. 'origin': int(m.group('origin'), 16),
  260. 'length': int(m.group('length'), 16),
  261. }
  262. if segment['name'] != '*default*':
  263. result[segment['name']] = segment
  264. raise RuntimeError('End of file while scanning memory configuration?')
  265. def detect_target_chip(map_file): # type: (Iterable) -> str
  266. ''' Detect target chip based on the target archive name in the linker script part of the MAP file '''
  267. scan_to_header(map_file, 'Linker script and memory map')
  268. RE_TARGET = re.compile(r'project_elf_src_(.*)\.c.obj')
  269. # For back-compatible with make
  270. RE_TARGET_MAKE = re.compile(r'^LOAD .*?/xtensa-([^-]+)-elf/')
  271. for line in map_file:
  272. m = RE_TARGET.search(line)
  273. if m:
  274. return m.group(1)
  275. m = RE_TARGET_MAKE.search(line)
  276. if m:
  277. return m.group(1)
  278. line = line.strip()
  279. # There could be empty line(s) between the "Linker script and memory map" header and "LOAD lines". Therefore,
  280. # line stripping and length is checked as well. The "LOAD lines" are between START GROUP and END GROUP for
  281. # older MAP files.
  282. if not line.startswith(('LOAD', 'START GROUP', 'END GROUP')) and len(line) > 0:
  283. # This break is a failsafe to not process anything load_sections() might want to analyze.
  284. break
  285. raise RuntimeError('Target not detected')
  286. def load_sections(map_file): # type: (TextIO) -> Dict
  287. """ Load section size information from the MAP file.
  288. Returns a dict of 'sections', where each key is a section name and the value
  289. is a dict with details about this section, including a "sources" key which holds a list of source file line
  290. information for each symbol linked into the section.
  291. There are two kinds of lines:
  292. - symbol_only: [optional space]<sym_name>
  293. - full line: [optional space][optional sym_name] <address> <size> [optional file_info]
  294. If <sym_name> doesn't exist, ues the symbol name from the symbol_only line above
  295. If the line is the starting of a section, the <file> should be empty, otherwise if the line is for a source
  296. line, the <file> must exist, or the <sym_name> should be is no *fill*. This rule is used to tell sections from
  297. source lines.
  298. """
  299. # Check for lines which only contain the sym name (and rest is on following lines)
  300. RE_SYMBOL_ONLY_LINE = re.compile(r'^\s*(?P<sym_name>\S*)$')
  301. # Fast check to see if line is a potential source line before running the slower full regex against it
  302. RE_PRE_FILTER = re.compile(r'.*0x[\da-f]+\s*0x[\da-f]+.*')
  303. # source file line, ie
  304. # 0x0000000040080400 0xa4 /home/gus/esp/32/idf/examples/get-started/hello_world/build/esp32/libesp32.a(cpu_start.o)
  305. # cmake build system links some object files directly, not part of any archive, so make that part optional
  306. # .xtensa.info 0x0000000000000000 0x38 CMakeFiles/hello-world.elf.dir/project_elf_src.c.obj
  307. # *fill* 0x00000000400e2967 0x1
  308. RE_FULL_LINE = re.compile(r'\s*(?P<sym_name>\S*) +0x(?P<address>[\da-f]+) +0x(?P<size>[\da-f]+)\s*(?P<file>.*)$')
  309. # Extract archive and object_file from the file_info field
  310. RE_FILE = re.compile(r'((?P<archive>[^ ]+\.a)?\(?(?P<object_file>[^ ]+\.(o|obj))\)?)')
  311. def dump_src_line(src): # type: (Dict) -> str
  312. return '%s(%s) addr: 0x%08x, size: 0x%x+%d' % (src['sym_name'], src['file'], src['address'], src['size'], src['fill'])
  313. sections = {} # type: Dict[Any, Dict]
  314. section = {} # type: Dict[str, Any]
  315. sym_backup = ''
  316. for line in map_file:
  317. if line.strip() == 'Cross Reference Table':
  318. # stop processing lines because we are at the next section in the map file
  319. break
  320. m = RE_SYMBOL_ONLY_LINE.match(line)
  321. if m:
  322. # In some cases the section name appears on the previous line, back it up in here
  323. sym_backup = m.group('sym_name')
  324. continue
  325. if not RE_PRE_FILTER.match(line):
  326. # line does not match our quick check, so skip to next line
  327. continue
  328. m = RE_FULL_LINE.match(line)
  329. if not m:
  330. assert not sym_backup, 'Symbol only line must be followed by a line with address and size'
  331. continue
  332. name = m.group('sym_name') if m.group('sym_name') else sym_backup
  333. sym_backup = ''
  334. is_section = not m.group('file') and name != '*fill*'
  335. if is_section:
  336. # section
  337. section = {
  338. 'name': name,
  339. 'address': int(m.group('address'), 16),
  340. 'size': int(m.group('size'), 16),
  341. 'sources': [],
  342. }
  343. sections[name] = section
  344. else:
  345. # symbol
  346. if not section:
  347. continue
  348. # There are some source lines in rodata section doesn't actually take any space, but have size
  349. # Make size of those sections zero
  350. srcs = section['sources'] # type: List[Dict]
  351. if srcs:
  352. last_src = srcs[-1]
  353. if last_src['size'] > 0 and last_src['address'] == int(m.group('address'), 16):
  354. if '.comment' != section['name'] and '.debug_str' != section['name'] and\
  355. 'rodata' not in last_src['sym_name']:
  356. raise RuntimeError('Due to overlap with following lines, size of the line set to 0:\n %s' % dump_src_line(last_src))
  357. last_src['size'] = 0
  358. # Count the padding size into the last valid (size > 0) source in the section
  359. if name == '*fill*':
  360. for src in reversed(srcs):
  361. if src['size'] > 0:
  362. src['fill'] += int(m.group('size'), 16)
  363. break
  364. continue
  365. # Extract archive and file information
  366. n = RE_FILE.match(m.group('file'))
  367. assert n
  368. archive = n.group('archive')
  369. if archive is None:
  370. # optional named group "archive" was not matched, so assign a value to it
  371. archive = '(exe)'
  372. file = n.group('object_file')
  373. assert name
  374. source = {
  375. 'size': int(m.group('size'), 16),
  376. 'address': int(m.group('address'), 16),
  377. 'archive': os.path.basename(archive),
  378. 'object_file': os.path.basename(file),
  379. 'sym_name': name,
  380. 'fill': 0, # padding size ofter the source
  381. }
  382. source['file'] = '%s:%s' % (source['archive'], source['object_file'])
  383. section['sources'].append(source) # type: ignore
  384. # Validate the map file
  385. for section in sections.values():
  386. src_curr = {} # type: Dict[str, Any]
  387. for src in section['sources']:
  388. if src['size'] == 0:
  389. continue
  390. expected_addr = src_curr['address'] + src_curr['size'] + src_curr['fill'] if src_curr else section['sources'][0]['address']
  391. if src['address'] != expected_addr:
  392. print('Warning: source line overlap:')
  393. print(' ' + dump_src_line(src_curr))
  394. print(' ' + dump_src_line(src))
  395. src_curr = src
  396. return sections
  397. def check_target(target, map_file): # type: (str, TextIO) -> None
  398. if target is None:
  399. raise RuntimeError('The target chip cannot be detected for {}. '
  400. 'Please report the issue.'.format(map_file.name))
  401. def main(): # type: () -> None
  402. parser = argparse.ArgumentParser(description='idf_size - a tool to print size information from an IDF MAP file')
  403. parser.add_argument(
  404. '--json',
  405. help='Output results as JSON',
  406. action='store_true')
  407. parser.add_argument(
  408. 'map_file', help='MAP file produced by linker',
  409. type=argparse.FileType('r'))
  410. parser.add_argument(
  411. '--archives', help='Print per-archive sizes', action='store_true')
  412. parser.add_argument(
  413. '--archive_details', help='Print detailed symbols per archive')
  414. parser.add_argument(
  415. '--files', help='Print per-file sizes', action='store_true')
  416. parser.add_argument(
  417. '--target', help='Set target chip', default=None)
  418. parser.add_argument(
  419. '--diff', help='Show the differences in comparison with another MAP file',
  420. metavar='ANOTHER_MAP_FILE',
  421. default=None,
  422. dest='another_map_file')
  423. parser.add_argument(
  424. '-o',
  425. '--output-file',
  426. type=argparse.FileType('w'),
  427. default=sys.stdout,
  428. help='Print output to the specified file instead of stdout')
  429. args = parser.parse_args()
  430. detected_target, segments, sections = load_map_data(args.map_file)
  431. args.map_file.close()
  432. check_target(detected_target, args.map_file)
  433. if args.another_map_file:
  434. with open(args.another_map_file, 'r') as f:
  435. detected_target_diff, segments_diff, sections_diff = load_map_data(f)
  436. check_target(detected_target_diff, f)
  437. if detected_target_diff != detected_target:
  438. print('WARNING: The target of the reference and other MAP files is {} and {}, respectively.'
  439. ''.format(detected_target, detected_target_diff))
  440. else:
  441. segments_diff, sections_diff, detected_target_diff = {}, {}, ''
  442. if args.target is not None:
  443. if args.target != detected_target or (detected_target_diff and args.target != detected_target_diff):
  444. print('WARNING: The detected chip target overwritten to {} by command line argument!'.format(args.target))
  445. detected_target = args.target
  446. detected_target_diff = args.target
  447. output = ''
  448. if not args.json or not (args.archives or args.files or args.archive_details):
  449. output += get_summary(args.map_file.name, segments, sections, detected_target,
  450. args.json,
  451. args.another_map_file, segments_diff, sections_diff, detected_target_diff, not (args.archives or args.files))
  452. if args.archives:
  453. output += get_detailed_sizes(sections, 'archive', 'Archive File', args.json, sections_diff)
  454. if args.files:
  455. output += get_detailed_sizes(sections, 'file', 'Object File', args.json, sections_diff)
  456. if args.archive_details:
  457. output += get_archive_symbols(sections, args.archive_details, args.json, sections_diff)
  458. args.output_file.write(output)
  459. args.output_file.close()
  460. class StructureForSummary(object):
  461. used_dram_data, used_dram_bss, used_dram_rodata, used_dram_other, used_dram, dram_total, dram_remain = (0, ) * 7
  462. used_dram_ratio = 0.
  463. used_iram_vectors, used_iram_text, used_iram_other, used_iram, iram_total, iram_remain = (0, ) * 6
  464. used_iram_ratio = 0.
  465. used_diram_data, used_diram_bss, used_diram_text, used_diram_vectors, used_diram_rodata, used_diram_other, diram_total, used_diram, diram_remain = (0, ) * 9
  466. used_diram_ratio = 0.
  467. used_flash_text, used_flash_rodata, used_flash_other, used_flash, total_size = (0, ) * 5
  468. def __sub__(self, rhs): # type: (StructureForSummary) -> StructureForSummary
  469. assert isinstance(rhs, StructureForSummary)
  470. ret = self
  471. for key in StructureForSummary.get_required_items():
  472. setattr(ret, key, getattr(self, key) - getattr(rhs, key))
  473. return ret
  474. def get_dram_overflowed(self): # type: (StructureForSummary) -> bool
  475. return self.used_dram_ratio > 1.0
  476. def get_iram_overflowed(self): # type: (StructureForSummary) -> bool
  477. return self.used_iram_ratio > 1.0
  478. def get_diram_overflowed(self): # type: (StructureForSummary) -> bool
  479. return self.used_diram_ratio > 1.0
  480. @classmethod
  481. def get_required_items(cls): # type: (Any) -> List
  482. whole_list = list(filter(lambda x: not (x.startswith('__') or x.endswith('__') or callable(getattr(cls, x))), dir(cls)))
  483. return whole_list
  484. @staticmethod
  485. def get(segments, sections): # type: (List, List) -> StructureForSummary
  486. def get_size(sections): # type: (Iterable) -> int
  487. return sum([x.len for x in sections])
  488. def in_diram(x): # type: (MemRegions.Region) -> bool
  489. return x.region.type in (MemRegions.DRAM_ID, MemRegions.IRAM_ID) and x.region.secondary_addr > 0
  490. def in_dram(x): # type: (MemRegions.Region) -> bool
  491. return x.region.type == MemRegions.DRAM_ID and x.region.secondary_addr == 0 # type: ignore
  492. def in_iram(x): # type: (MemRegions.Region) -> bool
  493. return x.region.type == MemRegions.IRAM_ID and x.region.secondary_addr == 0 # type: ignore
  494. r = StructureForSummary()
  495. diram_filter = filter(in_diram, segments)
  496. r.diram_total = int(get_size(diram_filter) / 2)
  497. dram_filter = filter(in_dram, segments)
  498. r.dram_total = get_size(dram_filter)
  499. iram_filter = filter(in_iram, segments)
  500. r.iram_total = get_size(iram_filter)
  501. if r.diram_total == 0:
  502. r.diram_total = r.dram_total + r.iram_total
  503. def filter_in_section(sections, section_to_check): # type: (Iterable[MemRegions.Region], str) -> List[MemRegions.Region]
  504. return list(filter(lambda x: LinkingSections.in_section(x.section, section_to_check), sections)) # type: ignore
  505. dram_sections = list(filter(in_dram, sections))
  506. iram_sections = list(filter(in_iram, sections))
  507. diram_sections = list(filter(in_diram, sections))
  508. if not diram_sections:
  509. diram_sections = dram_sections + iram_sections
  510. flash_sections = filter_in_section(sections, 'flash')
  511. dram_data_list = filter_in_section(dram_sections, 'data')
  512. dram_bss_list = filter_in_section(dram_sections, 'bss')
  513. dram_rodata_list = filter_in_section(dram_sections, 'rodata')
  514. dram_other_list = [x for x in dram_sections if x not in dram_data_list + dram_bss_list + dram_rodata_list]
  515. iram_vectors_list = filter_in_section(iram_sections, 'vectors')
  516. iram_text_list = filter_in_section(iram_sections, 'text')
  517. iram_other_list = [x for x in iram_sections if x not in iram_vectors_list + iram_text_list]
  518. diram_vectors_list = filter_in_section(diram_sections, 'vectors')
  519. diram_data_list = filter_in_section(diram_sections, 'data')
  520. diram_bss_list = filter_in_section(diram_sections, 'bss')
  521. diram_text_list = filter_in_section(diram_sections, 'text')
  522. diram_rodata_list = filter_in_section(diram_sections, 'rodata')
  523. diram_other_list = [x for x in diram_sections if x not in diram_data_list + diram_bss_list + diram_text_list + diram_vectors_list + diram_rodata_list]
  524. flash_text_list = filter_in_section(flash_sections, 'text')
  525. flash_rodata_list = filter_in_section(flash_sections, 'rodata')
  526. flash_other_list = [x for x in flash_sections if x not in flash_text_list + flash_rodata_list]
  527. r.used_dram_data = get_size(dram_data_list)
  528. r.used_dram_bss = get_size(dram_bss_list)
  529. r.used_dram_rodata = get_size(dram_rodata_list)
  530. r.used_dram_other = get_size(dram_other_list)
  531. r.used_dram = r.used_dram_data + r.used_dram_bss + r.used_dram_other + r.used_dram_rodata
  532. try:
  533. r.used_dram_ratio = r.used_dram / r.dram_total
  534. except ZeroDivisionError:
  535. r.used_dram_ratio = float('nan') if r.used_dram != 0 else 0
  536. r.dram_remain = r.dram_total - r.used_dram
  537. r.used_iram_vectors = get_size((iram_vectors_list))
  538. r.used_iram_text = get_size((iram_text_list))
  539. r.used_iram_other = get_size((iram_other_list))
  540. r.used_iram = r.used_iram_vectors + r.used_iram_text + r.used_iram_other
  541. try:
  542. r.used_iram_ratio = r.used_iram / r.iram_total
  543. except ZeroDivisionError:
  544. r.used_iram_ratio = float('nan') if r.used_iram != 0 else 0
  545. r.iram_remain = r.iram_total - r.used_iram
  546. r.used_diram_data = get_size(diram_data_list)
  547. r.used_diram_bss = get_size(diram_bss_list)
  548. r.used_diram_text = get_size(diram_text_list)
  549. r.used_diram_vectors = get_size(diram_vectors_list)
  550. r.used_diram_rodata = get_size(diram_rodata_list)
  551. r.used_diram_other = get_size(diram_other_list)
  552. r.used_diram = r.used_diram_data + r.used_diram_bss + r.used_diram_text + r.used_diram_vectors + r.used_diram_other + r.used_diram_rodata
  553. try:
  554. r.used_diram_ratio = r.used_diram / r.diram_total
  555. except ZeroDivisionError:
  556. r.used_diram_ratio = float('nan') if r.used_diram != 0 else 0
  557. r.diram_remain = r.diram_total - r.used_diram
  558. r.used_flash_text = get_size(flash_text_list)
  559. r.used_flash_rodata = get_size(flash_rodata_list)
  560. r.used_flash_other = get_size(flash_other_list)
  561. r.used_flash = r.used_flash_text + r.used_flash_rodata + r.used_flash_other
  562. # The used DRAM BSS is counted into the "Used static DRAM" but not into the "Total image size"
  563. r.total_size = r.used_dram - r.used_dram_bss + r.used_iram + r.used_diram - r.used_diram_bss + r.used_flash
  564. return r
  565. def get_json_dic(self): # type: (StructureForSummary) -> collections.OrderedDict
  566. ret = collections.OrderedDict([
  567. ('dram_data', self.used_dram_data),
  568. ('dram_bss', self.used_dram_bss),
  569. ('dram_rodata', self.used_dram_rodata),
  570. ('dram_other', self.used_dram_other),
  571. ('used_dram', self.used_dram),
  572. ('dram_total', self.dram_total),
  573. ('used_dram_ratio', self.used_dram_ratio if self.used_dram_ratio is not float('nan') else 0),
  574. ('dram_remain', self.dram_remain),
  575. ('iram_vectors', self.used_iram_vectors),
  576. ('iram_text', self.used_iram_text),
  577. ('iram_other', self.used_iram_other),
  578. ('used_iram', self.used_iram),
  579. ('iram_total', self.iram_total),
  580. ('used_iram_ratio', self.used_iram_ratio),
  581. ('iram_remain', self.iram_remain),
  582. ('diram_data', self.used_diram_data),
  583. ('diram_bss', self.used_diram_bss),
  584. ('diram_text', self.used_diram_text),
  585. ('diram_vectors', self.used_diram_vectors),
  586. ('diram_rodata', self.used_diram_rodata),
  587. ('diram_other', self.used_diram_other),
  588. ('diram_total', self.diram_total),
  589. ('used_diram', self.used_diram),
  590. ('used_diram_ratio', self.used_diram_ratio),
  591. ('diram_remain', self.diram_remain),
  592. ('flash_code', self.used_flash_text),
  593. ('flash_rodata', self.used_flash_rodata),
  594. ('flash_other', self.used_flash_other),
  595. ('used_flash_non_ram', self.used_flash), # text/data in D/I RAM not included
  596. ('total_size', self.total_size) # bss not included
  597. ])
  598. assert len(ret) == len(StructureForSummary.get_required_items())
  599. return ret
  600. def get_structure_for_target(segments, sections, target): # type: (Dict, Dict, str) -> StructureForSummary
  601. """
  602. Return StructureForSummary for spasific target
  603. """
  604. mem_regions = MemRegions(target)
  605. segment_layout = mem_regions.fit_segments_into_regions(segments)
  606. section_layout = mem_regions.fit_sections_into_regions(LinkingSections.filter_sections(sections))
  607. current = StructureForSummary.get(segment_layout, section_layout)
  608. return current
  609. def get_summary(path, segments, sections, target,
  610. as_json=False,
  611. path_diff='', segments_diff=None, sections_diff=None, target_diff='', print_suggestions=True):
  612. # type: (str, Dict, Dict, str, bool, str, Optional[Dict], Optional[Dict], str, bool) -> str
  613. if segments_diff is None:
  614. segments_diff = {}
  615. if sections_diff is None:
  616. sections_diff = {}
  617. current = get_structure_for_target(segments, sections, target)
  618. if path_diff:
  619. diff_en = True
  620. mem_regions_diff = MemRegions(target_diff)
  621. segment_layout_diff = mem_regions_diff.fit_segments_into_regions(segments_diff)
  622. section_layout_diff = mem_regions_diff.fit_sections_into_regions(LinkingSections.filter_sections(sections_diff))
  623. reference = StructureForSummary.get(segment_layout_diff, section_layout_diff)
  624. else:
  625. diff_en = False
  626. reference = StructureForSummary()
  627. if as_json:
  628. current_json_dic = current.get_json_dic()
  629. if diff_en:
  630. reference_json_dic = reference.get_json_dic()
  631. diff_json_dic = collections.OrderedDict([
  632. (k, v - reference_json_dic[k]) for k, v in iteritems(current_json_dic)])
  633. output = format_json(collections.OrderedDict([('current', current_json_dic),
  634. ('reference', reference_json_dic),
  635. ('diff', diff_json_dic),
  636. ]))
  637. else:
  638. output = format_json(current_json_dic)
  639. else:
  640. class LineDef(object):
  641. title = ''
  642. name = ''
  643. def __init__(self, title, name): # type: (LineDef, str, str) -> None
  644. self.title = title
  645. self.name = name
  646. def format_line(self): # type: (LineDef) -> Tuple[str, str, str, str]
  647. return (self.title + ': {%s:>7} bytes' % self.name,
  648. '{%s:>7}' % self.name,
  649. '{%s:+}' % self.name,
  650. '')
  651. class HeadLineDef(LineDef):
  652. remain = ''
  653. ratio = ''
  654. total = ''
  655. warning_message = ''
  656. def __init__(self, title, name, remain, ratio, total, warning_message): # type: (HeadLineDef, str, str, str, str, str, str) -> None
  657. super(HeadLineDef, self).__init__(title, name)
  658. self.remain = remain
  659. self.ratio = ratio
  660. self.total = total
  661. self.warning_message = warning_message
  662. def format_line(self): # type: (HeadLineDef) -> Tuple[str, str, str, str]
  663. return ('%s: {%s:>7} bytes ({%s:>7} remain, {%s:.1%%} used)%s' % (self.title, self.name, self.remain, self.ratio, self.warning_message),
  664. '{%s:>7}' % self.name,
  665. '{%s:+}' % self.name,
  666. '({%s:>+7} remain, {%s:>+7} total)' % (self.remain, self.total))
  667. class TotalLineDef(LineDef):
  668. def format_line(self): # type: (TotalLineDef) -> Tuple[str, str, str, str]
  669. return (self.title + ': {%s:>7} bytes (.bin may be padded larger)' % self.name,
  670. '{%s:>7}' % self.name,
  671. '{%s:+}' % self.name,
  672. '')
  673. warning_message = ' Overflow detected!' + (' You can run idf.py size-files for more information.' if print_suggestions else '')
  674. format_list = [
  675. HeadLineDef('Used static DRAM', 'used_dram', remain='dram_remain', ratio='used_dram_ratio', total='dram_total',
  676. warning_message=warning_message if current.get_dram_overflowed() else ''),
  677. LineDef(' .data size', 'used_dram_data'),
  678. LineDef(' .bss size', 'used_dram_bss'),
  679. LineDef(' .rodata size', 'used_dram_rodata'),
  680. LineDef(' DRAM other size', 'used_dram_other'),
  681. HeadLineDef('Used static IRAM', 'used_iram', remain='iram_remain', ratio='used_iram_ratio', total='iram_total',
  682. warning_message=warning_message if current.get_iram_overflowed() else ''),
  683. LineDef(' .text size', 'used_iram_text'),
  684. LineDef(' .vectors size', 'used_iram_vectors'),
  685. HeadLineDef('Used stat D/IRAM', 'used_diram', remain='diram_remain', ratio='used_diram_ratio', total='diram_total',
  686. warning_message=warning_message if current.get_diram_overflowed() else ''),
  687. LineDef(' .data size', 'used_diram_data'),
  688. LineDef(' .bss size', 'used_diram_bss'),
  689. LineDef(' .text size', 'used_diram_text'),
  690. LineDef(' .vectors size', 'used_diram_vectors'),
  691. LineDef(' .rodata size', 'used_diram_rodata'),
  692. LineDef(' other ', 'used_diram_other'),
  693. LineDef('Used Flash size ', 'used_flash'),
  694. LineDef(' .text ', 'used_flash_text'),
  695. LineDef(' .rodata ', 'used_flash_rodata'),
  696. TotalLineDef('Total image size', 'total_size')
  697. ]
  698. def convert_to_fmt_dict(summary, suffix=''): # type: (StructureForSummary, str) -> Dict
  699. required_items = StructureForSummary.get_required_items()
  700. return dict([(key + suffix, getattr(summary, key)) for key in required_items])
  701. f_dic1 = convert_to_fmt_dict(current)
  702. if diff_en:
  703. f_dic2 = convert_to_fmt_dict(reference)
  704. f_dic_diff = convert_to_fmt_dict(current - reference)
  705. lf = '{:60}{:>15}{:>15} {}' # Width for a, b, c, d columns
  706. def print_in_columns(a, b='', c='', d=''):
  707. # type: (str, Optional[str], Optional[str], Optional[str]) -> str
  708. return lf.format(a, b, c, d).rstrip() + os.linesep
  709. output = ''
  710. if diff_en:
  711. output += print_in_columns('<CURRENT> MAP file: ' + path)
  712. output += print_in_columns('<REFERENCE> MAP file: ' + path_diff)
  713. output += print_in_columns('Difference is counted as <CURRENT> - <REFERENCE>, ',
  714. 'i.e. a positive number means that <CURRENT> is larger.')
  715. output += print_in_columns('Total sizes of <CURRENT>:', '<REFERENCE>', 'Difference', '')
  716. for line in format_list:
  717. if getattr(current, line.name) > 0 or getattr(reference, line.name) > 0 or line.name == 'total_size':
  718. a, b, c, d = line.format_line()
  719. output += print_in_columns(
  720. a.format(**f_dic1),
  721. b.format(**f_dic2),
  722. c.format(**f_dic_diff) if not c.format(**f_dic_diff).startswith('+0') else '',
  723. d.format(**f_dic_diff))
  724. else:
  725. output += print_in_columns('Total sizes:')
  726. for line in format_list:
  727. if getattr(current, line.name) > 0 or line.name == 'total_size':
  728. a, b, c, d = line.format_line()
  729. output += print_in_columns(a.format(**f_dic1))
  730. return output
  731. def check_is_dict_sort(non_sort_list): # type: (List) -> List
  732. '''
  733. sort with keeping the order data, bss, other, iram, diram, ram_st_total, flash_text, flash_rodata, flash_total
  734. '''
  735. start_of_other = 0
  736. props_sort = [] # type: List
  737. props_elem = ['.data', '.bss', 'other', 'iram', 'diram', 'ram_st_total', 'flash.text', 'flash.rodata', 'flash', 'flash_total']
  738. for i in props_elem:
  739. for j in non_sort_list:
  740. if i == 'other':
  741. # remembering where 'other' will start
  742. start_of_other = len(props_sort)
  743. elif i in j and j not in props_sort:
  744. props_sort.append(j)
  745. for j in non_sort_list:
  746. if j not in props_sort:
  747. # add all item that fit in other in dict
  748. props_sort.insert(start_of_other, j)
  749. return props_sort
  750. class StructureForDetailedSizes(object):
  751. @staticmethod
  752. def sizes_by_key(sections, key, include_padding=False): # type: (SectionDict, str, Optional[bool]) -> Dict[str, Dict[str, int]]
  753. """ Takes a dict of sections (from load_sections) and returns
  754. a dict keyed by 'key' with aggregate output size information.
  755. Key can be either "archive" (for per-archive data) or "file" (for per-file data) in the result.
  756. """
  757. result = {} # type: Dict[str, Dict[str, int]]
  758. for _, section in iteritems(sections):
  759. for s in section['sources']:
  760. if not s[key] in result:
  761. result[s[key]] = {}
  762. archive = result[s[key]]
  763. if not section['name'] in archive:
  764. archive[section['name']] = 0
  765. archive[section['name']] += s['size']
  766. if include_padding:
  767. archive[section['name']] += s['fill']
  768. return result
  769. @staticmethod
  770. def get(sections, by_key): # type: (SectionDict, str) -> collections.OrderedDict
  771. # Get the detailed structure before using the filter to remove undesired sections,
  772. # to show entries without desired sections
  773. sizes = StructureForDetailedSizes.sizes_by_key(sections, by_key)
  774. for key_name in sizes:
  775. sizes[key_name] = LinkingSections.filter_sections(sizes[key_name])
  776. s = []
  777. for key, section_dict in sizes.items():
  778. ram_st_total = sum([x[1] for x in section_dict.items() if not LinkingSections.in_section(x[0], 'flash')])
  779. flash_total = sum([x[1] for x in section_dict.items() if not LinkingSections.in_section(x[0], 'bss')]) # type: int
  780. section_dict['ram_st_total'] = ram_st_total
  781. section_dict['flash_total'] = flash_total
  782. sorted_dict = sorted(section_dict.items(), key=lambda elem: elem[0])
  783. s.append((key, collections.OrderedDict(sorted_dict)))
  784. s = sorted(s, key=lambda elem: elem[0])
  785. # do a secondary sort in order to have consistent order (for diff-ing the output)
  786. s = sorted(s, key=lambda elem: elem[1]['flash_total'], reverse=True)
  787. return collections.OrderedDict(s)
  788. def get_detailed_sizes(sections, key, header, as_json=False, sections_diff=None): # type: (Dict, str, str, bool, Dict) -> str
  789. key_name_set = set()
  790. current = StructureForDetailedSizes.get(sections, key)
  791. for section_dict in current.values():
  792. key_name_set.update(section_dict.keys())
  793. if sections_diff:
  794. reference = StructureForDetailedSizes.get(sections_diff, key)
  795. for section_dict in reference.values():
  796. key_name_set.update(section_dict.keys())
  797. diff_en = True
  798. else:
  799. diff_en = False
  800. key_name_list = list(key_name_set)
  801. ordered_key_list, display_name_list = LinkingSections.get_display_name_order(key_name_list)
  802. if as_json:
  803. if diff_en:
  804. diff_json_dic = collections.OrderedDict()
  805. for name in sorted(list(frozenset(current.keys()) | frozenset(reference.keys()))):
  806. cur_name_dic = current.get(name, {})
  807. ref_name_dic = reference.get(name, {})
  808. all_keys = sorted(list(frozenset(cur_name_dic.keys()) | frozenset(ref_name_dic.keys())))
  809. diff_json_dic[name] = collections.OrderedDict([(k,
  810. cur_name_dic.get(k, 0) -
  811. ref_name_dic.get(k, 0)) for k in all_keys])
  812. output = format_json(collections.OrderedDict([('current', current),
  813. ('reference', reference),
  814. ('diff', diff_json_dic),
  815. ]))
  816. else:
  817. output = format_json(current)
  818. else:
  819. def _get_header_format(disp_list=display_name_list): # type: (List) -> str
  820. len_list = [len(x) for x in disp_list]
  821. len_list.insert(0, 24)
  822. return ' '.join(['{:>%d}' % x for x in len_list]) + os.linesep
  823. def _get_output(data, selection, key_list=ordered_key_list, disp_list=display_name_list):
  824. # type: (Dict[str, Dict[str, int]], Collection, List, List) -> str
  825. header_format = _get_header_format(disp_list)
  826. output = header_format.format(header, *disp_list)
  827. for k, v in iteritems(data):
  828. if k not in selection:
  829. continue
  830. try:
  831. _, k = k.split(':', 1)
  832. # print subheadings for key of format archive:file
  833. except ValueError:
  834. # k remains the same
  835. pass
  836. def get_section_size(section_dict): # type: (Dict) -> Callable[[str], int]
  837. return lambda x: section_dict.get(x, 0)
  838. section_size_list = map(get_section_size(section_dict=v), key_list)
  839. output += header_format.format(k[:24], *(section_size_list))
  840. return output
  841. def _get_header_format_diff(disp_list=display_name_list, columns=False): # type: (List, bool) -> str
  842. if columns:
  843. len_list = (24, ) + (7, ) * 3 * len(disp_list)
  844. return '|'.join(['{:>%d}' % x for x in len_list]) + os.linesep
  845. len_list = (24, ) + (23, ) * len(disp_list)
  846. return ' '.join(['{:>%d}' % x for x in len_list]) + os.linesep
  847. def _get_output_diff(curr, ref, key_list=ordered_key_list, disp_list=display_name_list):
  848. # type: (Dict, Dict, List, List) -> str
  849. # First header without Current/Ref/Diff columns
  850. header_format = _get_header_format_diff(columns=False)
  851. output = header_format.format(header, *disp_list)
  852. f_print = ('-' * 23, '') * len(key_list)
  853. f_print = f_print[0:len(key_list)]
  854. header_line = header_format.format('', *f_print)
  855. header_format = _get_header_format_diff(columns=True)
  856. f_print = ('<C>', '<R>', '<C>-<R>') * len(key_list)
  857. output += header_format.format('', *f_print)
  858. output += header_line
  859. for k, v in iteritems(curr):
  860. try:
  861. v2 = ref[k]
  862. except KeyError:
  863. continue
  864. try:
  865. _, k = k.split(':', 1)
  866. # print subheadings for key of format archive:file
  867. except ValueError:
  868. # k remains the same
  869. pass
  870. def _get_items(name, section_dict=v, section_dict_ref=v2):
  871. # type: (str, Dict, Dict) -> Tuple[str, str, str]
  872. a = section_dict.get(name, 0)
  873. b = section_dict_ref.get(name, 0)
  874. diff = a - b
  875. # the sign is added here and not in header_format in order to be able to print empty strings
  876. return (a or '', b or '', '' if diff == 0 else '{:+}'.format(diff))
  877. x = [] # type: List[str]
  878. for section in key_list:
  879. x.extend(_get_items(section))
  880. output += header_format.format(k[:24], *(x))
  881. return output
  882. output = 'Per-{} contributions to ELF file:{}'.format(key, os.linesep)
  883. if diff_en:
  884. output += _get_output_diff(current, reference)
  885. in_current = frozenset(current.keys())
  886. in_reference = frozenset(reference.keys())
  887. only_in_current = in_current - in_reference
  888. only_in_reference = in_reference - in_current
  889. if len(only_in_current) > 0:
  890. output += 'The following entries are present in <CURRENT> only:{}'.format(os.linesep)
  891. output += _get_output(current, only_in_current)
  892. if len(only_in_reference) > 0:
  893. output += 'The following entries are present in <REFERENCE> only:{}'.format(os.linesep)
  894. output += _get_output(reference, only_in_reference)
  895. else:
  896. output += _get_output(current, current)
  897. return output
  898. class StructureForArchiveSymbols(object):
  899. @staticmethod
  900. def get(archive, sections): # type: (str, Dict) -> Dict
  901. interested_sections = LinkingSections.filter_sections(sections)
  902. result = dict([(t, {}) for t in interested_sections]) # type: Dict[str, Dict[str, int]]
  903. for _, section in iteritems(sections):
  904. section_name = section['name']
  905. if section_name not in interested_sections:
  906. continue
  907. for s in section['sources']:
  908. if archive != s['archive']:
  909. continue
  910. s['sym_name'] = re.sub('(.text.|.literal.|.data.|.bss.|.rodata.)', '', s['sym_name'])
  911. result[section_name][s['sym_name']] = result[section_name].get(s['sym_name'], 0) + s['size']
  912. # build a new ordered dict of each section, where each entry is an ordereddict of symbols to sizes
  913. section_symbols = collections.OrderedDict()
  914. for t in sorted(list(interested_sections)):
  915. s = sorted(result[t].items(), key=lambda k_v: str(k_v[0]))
  916. # do a secondary sort in order to have consistent order (for diff-ing the output)
  917. s = sorted(s, key=lambda k_v: int(k_v[1]), reverse=True)
  918. section_symbols[t] = collections.OrderedDict(s)
  919. return section_symbols
  920. def get_archive_symbols(sections, archive, as_json=False, sections_diff=None): # type: (Dict, str, bool, Dict) -> str
  921. diff_en = bool(sections_diff)
  922. current = StructureForArchiveSymbols.get(archive, sections)
  923. reference = StructureForArchiveSymbols.get(archive, sections_diff) if sections_diff else {}
  924. if as_json:
  925. if diff_en:
  926. diff_json_dic = collections.OrderedDict()
  927. for name in sorted(list(frozenset(current.keys()) | frozenset(reference.keys()))):
  928. cur_name_dic = current.get(name, {})
  929. ref_name_dic = reference.get(name, {})
  930. all_keys = sorted(list(frozenset(cur_name_dic.keys()) | frozenset(ref_name_dic.keys())))
  931. diff_json_dic[name] = collections.OrderedDict([(key,
  932. cur_name_dic.get(key, 0) -
  933. ref_name_dic.get(key, 0)) for key in all_keys])
  934. output = format_json(collections.OrderedDict([('current', current),
  935. ('reference', reference),
  936. ('diff', diff_json_dic),
  937. ]))
  938. else:
  939. output = format_json(current)
  940. else:
  941. def _get_item_pairs(name, section): # type: (str, collections.OrderedDict) -> collections.OrderedDict
  942. return collections.OrderedDict([(key.replace(name + '.', ''), val) for key, val in iteritems(section)])
  943. def _get_max_len(symbols_dict): # type: (Dict) -> Tuple[int, int]
  944. # the lists have 0 in them because max() doesn't work with empty lists
  945. names_max_len = 0
  946. numbers_max_len = 0
  947. for t, s in iteritems(symbols_dict):
  948. numbers_max_len = max([numbers_max_len] + [len(str(x)) for _, x in iteritems(s)])
  949. names_max_len = max([names_max_len] + [len(x) for x in _get_item_pairs(t, s)])
  950. return names_max_len, numbers_max_len
  951. def _get_output(section_symbols): # type: (Dict) -> str
  952. output = ''
  953. names_max_len, numbers_max_len = _get_max_len(section_symbols)
  954. for t, s in iteritems(section_symbols):
  955. output += '{}Symbols from section: {}{}'.format(os.linesep, t, os.linesep)
  956. item_pairs = _get_item_pairs(t, s)
  957. for key, val in iteritems(item_pairs):
  958. output += ' '.join([('\t{:<%d} : {:>%d}\n' % (names_max_len,numbers_max_len)).format(key, val)])
  959. section_total = sum([val for _, val in iteritems(item_pairs)])
  960. output += 'Section total: {}{}'.format(section_total, os.linesep)
  961. return output
  962. output = '{}Symbols within the archive: {} (Not all symbols may be reported){}'.format(os.linesep, archive, os.linesep)
  963. if diff_en:
  964. def _generate_line_tuple(curr, ref, name):
  965. # type: (collections.OrderedDict, collections.OrderedDict, str) -> Tuple[str, int, int, str]
  966. cur_val = curr.get(name, 0)
  967. ref_val = ref.get(name, 0)
  968. diff_val = cur_val - ref_val
  969. # string slicing is used just to make sure it will fit into the first column of line_format
  970. return ((' ' * 4 + name)[:40], cur_val, ref_val, '' if diff_val == 0 else '{:+}'.format(diff_val))
  971. line_format = '{:40} {:>12} {:>12} {:>25}'
  972. all_section_names = sorted(list(frozenset(current.keys()) | frozenset(reference.keys())))
  973. for section_name in all_section_names:
  974. current_item_pairs = _get_item_pairs(section_name, current.get(section_name, {}))
  975. reference_item_pairs = _get_item_pairs(section_name, reference.get(section_name, {}))
  976. output += os.linesep + line_format.format(section_name[:40],
  977. '<CURRENT>',
  978. '<REFERENCE>',
  979. '<CURRENT> - <REFERENCE>') + os.linesep
  980. current_section_total = sum([val for _, val in iteritems(current_item_pairs)])
  981. reference_section_total = sum([val for _, val in iteritems(reference_item_pairs)])
  982. diff_section_total = current_section_total - reference_section_total
  983. all_item_names = sorted(list(frozenset(current_item_pairs.keys()) |
  984. frozenset(reference_item_pairs.keys())))
  985. output += os.linesep.join([line_format.format(*_generate_line_tuple(current_item_pairs,
  986. reference_item_pairs,
  987. n)
  988. ).rstrip() for n in all_item_names])
  989. output += os.linesep if current_section_total > 0 or reference_section_total > 0 else ''
  990. output += line_format.format('Section total:',
  991. current_section_total,
  992. reference_section_total,
  993. '' if diff_section_total == 0 else '{:+}'.format(diff_section_total)
  994. ).rstrip() + os.linesep
  995. else:
  996. output += _get_output(current)
  997. return output
  998. if __name__ == '__main__':
  999. main()