idf_size.py 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151
  1. #!/usr/bin/env python
  2. #
  3. # esp-idf alternative to "size" to print ELF file sizes, also analyzes
  4. # the linker map file to dump higher resolution details.
  5. #
  6. # Includes information which is not shown in "xtensa-esp32-elf-size",
  7. # or easy to parse from "xtensa-esp32-elf-objdump" or raw map files.
  8. #
  9. # SPDX-FileCopyrightText: 2017-2022 Espressif Systems (Shanghai) CO LTD
  10. # SPDX-License-Identifier: Apache-2.0
  11. #
  12. from __future__ import division, print_function, unicode_literals
  13. import argparse
  14. import collections
  15. import json
  16. import os.path
  17. import re
  18. import sys
  19. from future.utils import iteritems
  20. try:
  21. from typing import Any, Callable, Collection, Dict, Iterable, List, Optional, TextIO, Tuple, Union
  22. Section = Dict[str, Union[str, int]]
  23. SectionDict = Dict[str, Section]
  24. except ImportError:
  25. pass
  26. try:
  27. basestring
  28. except NameError:
  29. basestring = str
  30. GLOBAL_JSON_INDENT = 4
  31. GLOBAL_JSON_SEPARATORS = (',', ': ')
  32. class MemRegions(object):
  33. # Regions determined by the chip target.
  34. # DIRAM is not added here. The DIRAM is indicated by the `secondary_addr` of each MemRegDef
  35. (DRAM_ID, IRAM_ID, CACHE_D_ID, CACHE_I_ID, RTC_FAST_D_ID, RTC_FAST_I_ID, RTC_SLOW_D_ID) = range(7)
  36. # The order of variables in the tuple is the same as in the soc_memory_layout.c files
  37. MemRegDef = collections.namedtuple('MemRegDef', ['primary_addr', 'length', 'type', 'secondary_addr'])
  38. class Region(object):
  39. # Helper class to store region information
  40. def __init__(self, start, length, region, section=None):
  41. # type: (MemRegions.Region, int, int, MemRegions.MemRegDef, Optional[str]) -> None
  42. self.start = start
  43. self.len = length
  44. self.region = region
  45. self.section = section
  46. @staticmethod
  47. def get_mem_regions(target): # type: (str) -> List
  48. # The target specific memory structure is deduced from soc_memory_types defined in
  49. # $IDF_PATH/components/soc/**/soc_memory_layout.c files.
  50. MemRegDef = MemRegions.MemRegDef
  51. # Consecutive MemRegDefs of the same type are joined into one MemRegDef
  52. if target == 'esp32':
  53. return sorted([
  54. MemRegDef(0x3FFAE000, 17 * 0x2000 + 4 * 0x8000 + 4 * 0x4000, MemRegions.DRAM_ID, 0),
  55. MemRegDef(0x40070000, 2 * 0x8000 + 16 * 0x2000, MemRegions.IRAM_ID, 0),
  56. MemRegDef(0x400C2000, 0xB3E000, MemRegions.CACHE_I_ID, 0),
  57. MemRegDef(0x3F400000, 0x400000, MemRegions.CACHE_D_ID, 0),
  58. MemRegDef(0x3F800000, 0x400000, MemRegions.CACHE_D_ID, 0),
  59. MemRegDef(0x3FF80000, 0x2000, MemRegions.RTC_FAST_D_ID, 0x400C0000),
  60. MemRegDef(0x50000000, 0x2000, MemRegions.RTC_SLOW_D_ID, 0),
  61. ])
  62. elif target == 'esp32s2':
  63. return sorted([
  64. MemRegDef(0x3FFB2000, 3 * 0x2000 + 18 * 0x4000, MemRegions.DRAM_ID, 0x40022000),
  65. MemRegDef(0x3F000000, 0x400000, MemRegions.CACHE_I_ID, 0),
  66. MemRegDef(0x3F500000, 0xA80000, MemRegions.CACHE_D_ID, 0),
  67. MemRegDef(0x40080000, 0x780000, MemRegions.CACHE_I_ID, 0),
  68. MemRegDef(0x40070000, 0x2000, MemRegions.RTC_FAST_D_ID, 0x3FF9E000),
  69. MemRegDef(0x50000000, 0x2000, MemRegions.RTC_SLOW_D_ID, 0),
  70. ])
  71. else:
  72. raise RuntimeError('Target not detected.')
  73. def __init__(self, target): # type: (MemRegions, str) -> None
  74. self.chip_mem_regions = self.get_mem_regions(target)
  75. if not self.chip_mem_regions:
  76. raise RuntimeError('Target {} is not implemented in idf_size'.format(target))
  77. def _get_first_region(self, start, length):
  78. # type: (int, int) -> Tuple[MemRegions.MemRegDef, int]
  79. for region in self.chip_mem_regions: # type: ignore
  80. if region.primary_addr <= start < region.primary_addr + region.length:
  81. return (region, min(length, region.primary_addr + region.length - start))
  82. if (region.secondary_addr and region.secondary_addr <= start < region.secondary_addr + region.length):
  83. return (region, min(length, region.secondary_addr + region.length - start))
  84. raise RuntimeError('Given section not found in any memory region. '
  85. 'Check whether the LD file is compatible with the definitions in get_mem_regions in idf_size.py')
  86. def _get_regions(self, start, length, name=None): # type: (int, int, Optional[str]) -> List
  87. ret = []
  88. while length > 0:
  89. (region, cur_len) = self._get_first_region(start, length)
  90. ret.append(MemRegions.Region(start, cur_len, region, name))
  91. length -= cur_len
  92. start += cur_len
  93. return ret
  94. def fit_segments_into_regions(self, segments): # type: (MemRegions, Dict) -> List
  95. region_list = []
  96. for segment in segments.values():
  97. sorted_segments = self._get_regions(segment['origin'], segment['length'])
  98. region_list.extend(sorted_segments)
  99. return region_list
  100. def fit_sections_into_regions(self, sections): # type: (MemRegions, Dict) -> List
  101. region_list = []
  102. for section in sections.values():
  103. sorted_sections = self._get_regions(section['address'], section['size'], section['name'])
  104. region_list.extend(sorted_sections)
  105. return region_list
  106. class LinkingSections(object):
  107. _section_type_dict = {k: re.compile(v) for k, v in {
  108. 'text': r'.*\.text',
  109. 'data': r'.*\.data',
  110. 'bss': r'.*\.bss',
  111. 'rodata': r'.*\.rodata',
  112. 'noinit': r'.*noinit',
  113. 'vectors': r'.*\.vectors',
  114. 'flash': r'.*flash.*',
  115. }.items()}
  116. @staticmethod
  117. def in_section(section, section_name_or_list): # type: (str, Union[str, Iterable]) -> bool
  118. if isinstance(section_name_or_list, basestring):
  119. section_name_or_list = [section_name_or_list]
  120. for section_name in section_name_or_list:
  121. if LinkingSections._section_type_dict[section_name].match(section):
  122. return True
  123. return False
  124. @staticmethod
  125. def filter_sections(sections): # type: (Dict) -> Dict
  126. return {k: v for k, v in sections.items()
  127. if LinkingSections.in_section(k, LinkingSections._section_type_dict.keys())}
  128. @staticmethod
  129. def get_display_name_order(section_name_list): # type: (List[str]) -> Tuple[List[str], List[str]]
  130. '''
  131. Return two lists, in the suggested display order.
  132. First list is the reordered section_name_list, second list is the suggested display name, corresponding to the first list
  133. '''
  134. def get_name_score(name): # type: (str) -> int
  135. score_dict = {
  136. '.dram': 30,
  137. '.iram': 20,
  138. '.flash': 10,
  139. 'ram_st_total': -10,
  140. 'flash_total': -20,
  141. '.data': 6,
  142. '.bss': 5,
  143. '.text': 4,
  144. '.rodata': 3,
  145. '.vectors': 2,
  146. '.noinit': 1,
  147. '.other': -1,
  148. }
  149. return sum([score if section in name else 0
  150. for section, score in score_dict.items()])
  151. score_list = [get_name_score(section) for section in section_name_list]
  152. ordered_name_list = sorted(section_name_list, key=lambda x: score_list[section_name_list.index(x)], reverse=True)
  153. display_name_list = ordered_name_list[:]
  154. memory_name = ''
  155. display_name_list = sorted(display_name_list)
  156. ordered_name_list = sorted(ordered_name_list)
  157. ordered_name_list = check_is_dict_sort(ordered_name_list)
  158. for i, section in enumerate(ordered_name_list):
  159. if memory_name and section.startswith(memory_name):
  160. # If the section has same memory type with the previous one, use shorter name
  161. display_name_list[i] = section.replace(memory_name, '& ')
  162. continue
  163. memory_name = ''
  164. split_name = section.split('.')
  165. if len(split_name) > 1:
  166. # If the section has a memory type, update the type and try to display the type properly
  167. assert len(split_name) == 3 and split_name[0] == '', 'Unexpected section name'
  168. memory_name = '.iram' if 'iram' in split_name[1] else\
  169. '.dram' if 'dram' in split_name[1] else\
  170. '.flash' if 'flash' in split_name[1] else\
  171. '.' + split_name[1]
  172. display_name_list[i] = 'DRAM .' + split_name[2] if 'dram' in split_name[1] else\
  173. 'IRAM' + split_name[1].replace('iram', '') + ' .' + split_name[2] if 'iram' in split_name[1] else\
  174. 'Flash .' + split_name[2] if 'flash' in split_name[1] else\
  175. section
  176. continue
  177. # Otherwise use its original name
  178. display_name_list[i] = section
  179. return ordered_name_list, display_name_list
  180. def scan_to_header(f, header_line): # type: (Iterable, str) -> None
  181. """ Scan forward in a file until you reach 'header_line', then return """
  182. for line in f:
  183. if line.strip() == header_line:
  184. return
  185. raise RuntimeError("Didn't find line '%s' in file" % header_line)
  186. def format_json(json_object): # type: (Dict) -> str
  187. return json.dumps(json_object,
  188. allow_nan=True,
  189. indent=GLOBAL_JSON_INDENT,
  190. separators=GLOBAL_JSON_SEPARATORS) + os.linesep
  191. def load_map_data(map_file): # type: (TextIO) -> Tuple[str, Dict, Dict]
  192. segments = load_segments(map_file)
  193. detected_chip = detect_target_chip(map_file)
  194. sections = load_sections(map_file)
  195. # Exclude the dummy and .text_end section, which usually means shared region among I/D buses
  196. for key in list(sections.keys()):
  197. if key.endswith(('dummy', '.text_end')):
  198. sections.pop(key)
  199. return detected_chip, segments, sections
  200. def load_segments(map_file): # type: (TextIO) -> Dict
  201. """ Memory Configuration section is the total size of each segment """
  202. result = {} # type: Dict[Any, Dict]
  203. scan_to_header(map_file, 'Memory Configuration')
  204. RE_MEMORY_SECTION = re.compile(r'(?P<name>[^ ]+) +0x(?P<origin>[\da-f]+) +0x(?P<length>[\da-f]+)')
  205. for line in map_file:
  206. m = RE_MEMORY_SECTION.match(line)
  207. if m is None:
  208. if len(result) == 0:
  209. continue # whitespace or a header, before the content we want
  210. else:
  211. return result # we're at the end of the Memory Configuration
  212. segment = {
  213. 'name': m.group('name'),
  214. 'origin': int(m.group('origin'), 16),
  215. 'length': int(m.group('length'), 16),
  216. }
  217. if segment['name'] != '*default*':
  218. result[segment['name']] = segment
  219. raise RuntimeError('End of file while scanning memory configuration?')
  220. def detect_target_chip(map_file): # type: (Iterable) -> str
  221. ''' Detect target chip based on the target archive name in the linker script part of the MAP file '''
  222. scan_to_header(map_file, 'Linker script and memory map')
  223. RE_TARGET = re.compile(r'project_elf_src_(.*)\.c.obj')
  224. # For back-compatible with make
  225. RE_TARGET_MAKE = re.compile(r'^LOAD .*?/xtensa-([^-]+)-elf/')
  226. for line in map_file:
  227. m = RE_TARGET.search(line)
  228. if m:
  229. return m.group(1)
  230. m = RE_TARGET_MAKE.search(line)
  231. if m:
  232. return m.group(1)
  233. line = line.strip()
  234. # There could be empty line(s) between the "Linker script and memory map" header and "LOAD lines". Therefore,
  235. # line stripping and length is checked as well. The "LOAD lines" are between START GROUP and END GROUP for
  236. # older MAP files.
  237. if not line.startswith(('LOAD', 'START GROUP', 'END GROUP')) and len(line) > 0:
  238. # This break is a failsafe to not process anything load_sections() might want to analyze.
  239. break
  240. raise RuntimeError('Target not detected')
  241. def load_sections(map_file): # type: (TextIO) -> Dict
  242. """ Load section size information from the MAP file.
  243. Returns a dict of 'sections', where each key is a section name and the value
  244. is a dict with details about this section, including a "sources" key which holds a list of source file line
  245. information for each symbol linked into the section.
  246. There are two kinds of lines:
  247. - symbol_only: [optional space]<sym_name>
  248. - full line: [optional space][optional sym_name] <address> <size> [optional file_info]
  249. If <sym_name> doesn't exist, ues the symbol name from the symbol_only line above
  250. If the line is the starting of a section, the <file> should be empty, otherwise if the line is for a source
  251. line, the <file> must exist, or the <sym_name> should be is no *fill*. This rule is used to tell sections from
  252. source lines.
  253. """
  254. # Check for lines which only contain the sym name (and rest is on following lines)
  255. RE_SYMBOL_ONLY_LINE = re.compile(r'^\s*(?P<sym_name>\S*)$')
  256. # Fast check to see if line is a potential source line before running the slower full regex against it
  257. RE_PRE_FILTER = re.compile(r'.*0x[\da-f]+\s*0x[\da-f]+.*')
  258. # source file line, ie
  259. # 0x0000000040080400 0xa4 /home/gus/esp/32/idf/examples/get-started/hello_world/build/esp32/libesp32.a(cpu_start.o)
  260. # cmake build system links some object files directly, not part of any archive, so make that part optional
  261. # .xtensa.info 0x0000000000000000 0x38 CMakeFiles/hello-world.elf.dir/project_elf_src.c.obj
  262. # *fill* 0x00000000400e2967 0x1
  263. RE_FULL_LINE = re.compile(r'\s*(?P<sym_name>\S*) +0x(?P<address>[\da-f]+) +0x(?P<size>[\da-f]+)\s*(?P<file>.*)$')
  264. # Extract archive and object_file from the file_info field
  265. RE_FILE = re.compile(r'((?P<archive>[^ ]+\.a)?\(?(?P<object_file>[^ ]+\.(o|obj))\)?)')
  266. def dump_src_line(src): # type: (Dict) -> str
  267. return '%s(%s) addr: 0x%08x, size: 0x%x+%d' % (src['sym_name'], src['file'], src['address'], src['size'], src['fill'])
  268. sections = {} # type: Dict[Any, Dict]
  269. section = {} # type: Dict[str, Any]
  270. sym_backup = ''
  271. for line in map_file:
  272. if line.strip() == 'Cross Reference Table':
  273. # stop processing lines because we are at the next section in the map file
  274. break
  275. m = RE_SYMBOL_ONLY_LINE.match(line)
  276. if m:
  277. # In some cases the section name appears on the previous line, back it up in here
  278. sym_backup = m.group('sym_name')
  279. continue
  280. if not RE_PRE_FILTER.match(line):
  281. # line does not match our quick check, so skip to next line
  282. continue
  283. m = RE_FULL_LINE.match(line)
  284. if not m:
  285. assert not sym_backup, 'Symbol only line must be followed by a line with address and size'
  286. continue
  287. name = m.group('sym_name') if m.group('sym_name') else sym_backup
  288. sym_backup = ''
  289. is_section = not m.group('file') and name != '*fill*'
  290. if is_section:
  291. # section
  292. section = {
  293. 'name': name,
  294. 'address': int(m.group('address'), 16),
  295. 'size': int(m.group('size'), 16),
  296. 'sources': [],
  297. }
  298. sections[name] = section
  299. else:
  300. # symbol
  301. if not section:
  302. continue
  303. # There are some source lines in rodata section doesn't actually take any space, but have size
  304. # Make size of those sections zero
  305. srcs = section['sources'] # type: List[Dict]
  306. if srcs:
  307. last_src = srcs[-1]
  308. if last_src['size'] > 0 and last_src['address'] == int(m.group('address'), 16):
  309. if '.comment' != section['name'] and '.debug_str' != section['name'] and\
  310. 'rodata' not in last_src['sym_name']:
  311. raise RuntimeError('Due to overlap with following lines, size of the line set to 0:\n %s' % dump_src_line(last_src))
  312. last_src['size'] = 0
  313. # Count the padding size into the last valid (size > 0) source in the section
  314. if name == '*fill*':
  315. for src in reversed(srcs):
  316. if src['size'] > 0:
  317. src['fill'] += int(m.group('size'), 16)
  318. break
  319. continue
  320. # Extract archive and file information
  321. n = RE_FILE.match(m.group('file'))
  322. assert n
  323. archive = n.group('archive')
  324. if archive is None:
  325. # optional named group "archive" was not matched, so assign a value to it
  326. archive = '(exe)'
  327. file = n.group('object_file')
  328. assert name
  329. source = {
  330. 'size': int(m.group('size'), 16),
  331. 'address': int(m.group('address'), 16),
  332. 'archive': os.path.basename(archive),
  333. 'object_file': os.path.basename(file),
  334. 'sym_name': name,
  335. 'fill': 0, # padding size ofter the source
  336. }
  337. source['file'] = '%s:%s' % (source['archive'], source['object_file'])
  338. section['sources'].append(source) # type: ignore
  339. # Validate the map file
  340. for section in sections.values():
  341. src_curr = {} # type: Dict[str, Any]
  342. for src in section['sources']:
  343. if src['size'] == 0:
  344. continue
  345. expected_addr = src_curr['address'] + src_curr['size'] + src_curr['fill'] if src_curr else section['sources'][0]['address']
  346. if src['address'] != expected_addr:
  347. print('Warning: source line overlap:')
  348. print(' ' + dump_src_line(src_curr))
  349. print(' ' + dump_src_line(src))
  350. src_curr = src
  351. return sections
  352. def check_target(target, map_file): # type: (str, TextIO) -> None
  353. if target is None:
  354. raise RuntimeError('The target chip cannot be detected for {}. '
  355. 'Please report the issue.'.format(map_file.name))
  356. def main(): # type: () -> None
  357. parser = argparse.ArgumentParser(description='idf_size - a tool to print size information from an IDF MAP file')
  358. parser.add_argument(
  359. '--json',
  360. help='Output results as JSON',
  361. action='store_true')
  362. parser.add_argument(
  363. 'map_file', help='MAP file produced by linker',
  364. type=argparse.FileType('r'))
  365. parser.add_argument(
  366. '--archives', help='Print per-archive sizes', action='store_true')
  367. parser.add_argument(
  368. '--archive_details', help='Print detailed symbols per archive')
  369. parser.add_argument(
  370. '--files', help='Print per-file sizes', action='store_true')
  371. parser.add_argument(
  372. '--target', help='Set target chip', default=None)
  373. parser.add_argument(
  374. '--diff', help='Show the differences in comparison with another MAP file',
  375. metavar='ANOTHER_MAP_FILE',
  376. default=None,
  377. dest='another_map_file')
  378. parser.add_argument(
  379. '-o',
  380. '--output-file',
  381. type=argparse.FileType('w'),
  382. default=sys.stdout,
  383. help='Print output to the specified file instead of stdout')
  384. args = parser.parse_args()
  385. detected_target, segments, sections = load_map_data(args.map_file)
  386. args.map_file.close()
  387. check_target(detected_target, args.map_file)
  388. if args.another_map_file:
  389. with open(args.another_map_file, 'r') as f:
  390. detected_target_diff, segments_diff, sections_diff = load_map_data(f)
  391. check_target(detected_target_diff, f)
  392. if detected_target_diff != detected_target:
  393. print('WARNING: The target of the reference and other MAP files is {} and {}, respectively.'
  394. ''.format(detected_target, detected_target_diff))
  395. else:
  396. segments_diff, sections_diff, detected_target_diff = {}, {}, ''
  397. if args.target is not None:
  398. if args.target != detected_target or (detected_target_diff and args.target != detected_target_diff):
  399. print('WARNING: The detected chip target overwritten to {} by command line argument!'.format(args.target))
  400. detected_target = args.target
  401. detected_target_diff = args.target
  402. output = ''
  403. if not args.json or not (args.archives or args.files or args.archive_details):
  404. output += get_summary(args.map_file.name, segments, sections, detected_target,
  405. args.json,
  406. args.another_map_file, segments_diff, sections_diff, detected_target_diff)
  407. if args.archives:
  408. output += get_detailed_sizes(sections, 'archive', 'Archive File', args.json, sections_diff)
  409. if args.files:
  410. output += get_detailed_sizes(sections, 'file', 'Object File', args.json, sections_diff)
  411. if args.archive_details:
  412. output += get_archive_symbols(sections, args.archive_details, args.json, sections_diff)
  413. args.output_file.write(output)
  414. args.output_file.close()
  415. class StructureForSummary(object):
  416. # this is from main branch
  417. # used_dram_data, used_dram_bss, used_dram_other, used_dram, dram_total, dram_remain = (0, ) * 6
  418. used_dram_data, used_dram_bss, used_dram_rodata, used_dram_other, used_dram, dram_total, dram_remain = (0, ) * 7
  419. used_dram_ratio = 0.
  420. used_iram_vectors, used_iram_text, used_iram_other, used_iram, iram_total, iram_remain = (0, ) * 6
  421. used_iram_ratio = 0.
  422. used_diram_data, used_diram_bss, used_diram_text, used_diram_vectors, used_diram_rodata, used_diram_other, diram_total, used_diram, diram_remain = (0, ) * 9
  423. used_diram_ratio = 0.
  424. used_flash_text, used_flash_rodata, used_flash_other, used_flash, total_size = (0, ) * 5
  425. def __sub__(self, rhs): # type: (StructureForSummary) -> StructureForSummary
  426. assert isinstance(rhs, StructureForSummary)
  427. ret = self
  428. for key in StructureForSummary.get_required_items():
  429. setattr(ret, key, getattr(self, key) - getattr(rhs, key))
  430. return ret
  431. @classmethod
  432. def get_required_items(cls): # type: (Any) -> List
  433. whole_list = list(filter(lambda x: not (x.startswith('__') or x.endswith('__') or callable(getattr(cls, x))), dir(cls)))
  434. return whole_list
  435. @staticmethod
  436. def get(segments, sections): # type: (List, List) -> StructureForSummary
  437. def get_size(sections): # type: (Iterable) -> int
  438. return sum([x.len for x in sections])
  439. def in_diram(x): # type: (MemRegions.Region) -> bool
  440. return x.region.type in (MemRegions.DRAM_ID, MemRegions.IRAM_ID) and x.region.secondary_addr > 0
  441. def in_dram(x): # type: (MemRegions.Region) -> bool
  442. return x.region.type == MemRegions.DRAM_ID and x.region.secondary_addr == 0 # type: ignore
  443. def in_iram(x): # type: (MemRegions.Region) -> bool
  444. return x.region.type == MemRegions.IRAM_ID and x.region.secondary_addr == 0 # type: ignore
  445. r = StructureForSummary()
  446. diram_filter = filter(in_diram, segments)
  447. r.diram_total = int(get_size(diram_filter) / 2)
  448. dram_filter = filter(in_dram, segments)
  449. r.dram_total = get_size(dram_filter)
  450. iram_filter = filter(in_iram, segments)
  451. r.iram_total = get_size(iram_filter)
  452. def filter_in_section(sections, section_to_check): # type: (Iterable[MemRegions.Region], str) -> List[MemRegions.Region]
  453. return list(filter(lambda x: LinkingSections.in_section(x.section, section_to_check), sections)) # type: ignore
  454. dram_sections = list(filter(in_dram, sections))
  455. iram_sections = list(filter(in_iram, sections))
  456. diram_sections = list(filter(in_diram, sections))
  457. flash_sections = filter_in_section(sections, 'flash')
  458. dram_data_list = filter_in_section(dram_sections, 'data')
  459. dram_bss_list = filter_in_section(dram_sections, 'bss')
  460. dram_rodata_list = filter_in_section(dram_sections, 'rodata')
  461. dram_other_list = [x for x in dram_sections if x not in dram_data_list + dram_bss_list + dram_rodata_list]
  462. iram_vectors_list = filter_in_section(iram_sections, 'vectors')
  463. iram_text_list = filter_in_section(iram_sections, 'text')
  464. iram_other_list = [x for x in iram_sections if x not in iram_vectors_list + iram_text_list]
  465. diram_vectors_list = filter_in_section(diram_sections, 'vectors')
  466. diram_data_list = filter_in_section(diram_sections, 'data')
  467. diram_bss_list = filter_in_section(diram_sections, 'bss')
  468. diram_text_list = filter_in_section(diram_sections, 'text')
  469. diram_rodata_list = filter_in_section(diram_sections, 'rodata')
  470. diram_other_list = [x for x in diram_sections if x not in diram_data_list + diram_bss_list + diram_text_list + diram_vectors_list + diram_rodata_list]
  471. flash_text_list = filter_in_section(flash_sections, 'text')
  472. flash_rodata_list = filter_in_section(flash_sections, 'rodata')
  473. flash_other_list = [x for x in flash_sections if x not in flash_text_list + flash_rodata_list]
  474. r.used_dram_data = get_size(dram_data_list)
  475. r.used_dram_bss = get_size(dram_bss_list)
  476. r.used_dram_rodata = get_size(dram_rodata_list)
  477. r.used_dram_other = get_size(dram_other_list)
  478. r.used_dram = r.used_dram_data + r.used_dram_bss + r.used_dram_other + r.used_dram_rodata
  479. try:
  480. r.used_dram_ratio = r.used_dram / r.dram_total
  481. except ZeroDivisionError:
  482. r.used_dram_ratio = float('nan') if r.used_dram != 0 else 0
  483. r.dram_remain = r.dram_total - r.used_dram
  484. r.used_iram_vectors = get_size((iram_vectors_list))
  485. r.used_iram_text = get_size((iram_text_list))
  486. r.used_iram_other = get_size((iram_other_list))
  487. r.used_iram = r.used_iram_vectors + r.used_iram_text + r.used_iram_other
  488. try:
  489. r.used_iram_ratio = r.used_iram / r.iram_total
  490. except ZeroDivisionError:
  491. r.used_iram_ratio = float('nan') if r.used_iram != 0 else 0
  492. r.iram_remain = r.iram_total - r.used_iram
  493. r.used_diram_data = get_size(diram_data_list)
  494. r.used_diram_bss = get_size(diram_bss_list)
  495. r.used_diram_text = get_size(diram_text_list)
  496. r.used_diram_vectors = get_size(diram_vectors_list)
  497. r.used_diram_rodata = get_size(diram_rodata_list)
  498. r.used_diram_other = get_size(diram_other_list)
  499. r.used_diram = r.used_diram_data + r.used_diram_bss + r.used_diram_text + r.used_diram_vectors + r.used_diram_other + r.used_diram_rodata
  500. try:
  501. r.used_diram_ratio = r.used_diram / r.diram_total
  502. except ZeroDivisionError:
  503. r.used_diram_ratio = float('nan') if r.used_diram != 0 else 0
  504. r.diram_remain = r.diram_total - r.used_diram
  505. r.used_flash_text = get_size(flash_text_list)
  506. r.used_flash_rodata = get_size(flash_rodata_list)
  507. r.used_flash_other = get_size(flash_other_list)
  508. r.used_flash = r.used_flash_text + r.used_flash_rodata + r.used_flash_other
  509. # The used DRAM BSS is counted into the "Used static DRAM" but not into the "Total image size"
  510. r.total_size = r.used_dram - r.used_dram_bss + r.used_iram + r.used_diram - r.used_diram_bss + r.used_flash
  511. return r
  512. def get_json_dic(self): # type: (StructureForSummary) -> collections.OrderedDict
  513. ret = collections.OrderedDict([
  514. ('dram_data', self.used_dram_data),
  515. ('dram_bss', self.used_dram_bss),
  516. ('dram_rodata', self.used_dram_rodata),
  517. ('dram_other', self.used_dram_other),
  518. ('used_dram', self.used_dram),
  519. ('dram_total', self.dram_total),
  520. ('used_dram_ratio', self.used_dram_ratio if self.used_dram_ratio is not float('nan') else 0),
  521. ('dram_remain', self.dram_remain),
  522. ('iram_vectors', self.used_iram_vectors),
  523. ('iram_text', self.used_iram_text),
  524. ('iram_other', self.used_iram_other),
  525. ('used_iram', self.used_iram),
  526. ('iram_total', self.iram_total),
  527. ('used_iram_ratio', self.used_iram_ratio),
  528. ('iram_remain', self.iram_remain),
  529. ('diram_data', self.used_diram_data),
  530. ('diram_bss', self.used_diram_bss),
  531. ('diram_text', self.used_diram_text),
  532. ('diram_vectors', self.used_diram_vectors),
  533. ('diram_rodata', self.used_diram_rodata),
  534. ('diram_other', self.used_diram_other),
  535. ('diram_total', self.diram_total),
  536. ('used_diram', self.used_diram),
  537. ('used_diram_ratio', self.used_diram_ratio),
  538. ('diram_remain', self.diram_remain),
  539. ('flash_code', self.used_flash_text),
  540. ('flash_rodata', self.used_flash_rodata),
  541. ('flash_other', self.used_flash_other),
  542. ('used_flash_non_ram', self.used_flash), # text/data in D/I RAM not included
  543. ('total_size', self.total_size) # bss not included
  544. ])
  545. assert len(ret) == len(StructureForSummary.get_required_items())
  546. return ret
  547. def get_summary(path, segments, sections, target,
  548. as_json=False,
  549. path_diff='', segments_diff=None, sections_diff=None, target_diff=''):
  550. # type: (str, Dict, Dict, str, bool, str, Optional[Dict], Optional[Dict], str) -> str
  551. if segments_diff is None:
  552. segments_diff = {}
  553. if sections_diff is None:
  554. sections_diff = {}
  555. mem_regions = MemRegions(target)
  556. segment_layout = mem_regions.fit_segments_into_regions(segments)
  557. section_layout = mem_regions.fit_sections_into_regions(LinkingSections.filter_sections(sections))
  558. current = StructureForSummary.get(segment_layout, section_layout)
  559. if path_diff:
  560. diff_en = True
  561. mem_regions_diff = MemRegions(target_diff)
  562. segment_layout_diff = mem_regions_diff.fit_segments_into_regions(segments_diff)
  563. section_layout_diff = mem_regions_diff.fit_sections_into_regions(LinkingSections.filter_sections(sections_diff))
  564. reference = StructureForSummary.get(segment_layout_diff, section_layout_diff)
  565. else:
  566. diff_en = False
  567. reference = StructureForSummary()
  568. if as_json:
  569. current_json_dic = current.get_json_dic()
  570. if diff_en:
  571. reference_json_dic = reference.get_json_dic()
  572. diff_json_dic = collections.OrderedDict([
  573. (k, v - reference_json_dic[k]) for k, v in iteritems(current_json_dic)])
  574. output = format_json(collections.OrderedDict([('current', current_json_dic),
  575. ('reference', reference_json_dic),
  576. ('diff', diff_json_dic),
  577. ]))
  578. else:
  579. output = format_json(current_json_dic)
  580. else:
  581. class LineDef(object):
  582. title = ''
  583. name = ''
  584. def __init__(self, title, name): # type: (LineDef, str, str) -> None
  585. self.title = title
  586. self.name = name
  587. def format_line(self): # type: (LineDef) -> Tuple[str, str, str, str]
  588. return (self.title + ': {%s:>7} bytes' % self.name,
  589. '{%s:>7}' % self.name,
  590. '{%s:+}' % self.name,
  591. '')
  592. class HeadLineDef(LineDef):
  593. remain = ''
  594. ratio = ''
  595. total = ''
  596. def __init__(self, title, name, remain, ratio, total): # type: (HeadLineDef, str, str, str, str, str) -> None
  597. super(HeadLineDef, self).__init__(title, name)
  598. self.remain = remain
  599. self.ratio = ratio
  600. self.total = total
  601. def format_line(self): # type: (HeadLineDef) -> Tuple[str, str, str, str]
  602. return ('%s: {%s:>7} bytes ({%s:>7} remain, {%s:.1%%} used)' % (self.title, self.name, self.remain, self.ratio),
  603. '{%s:>7}' % self.name,
  604. '{%s:+}' % self.name,
  605. '({%s:>+7} remain, {%s:>+7} total)' % (self.remain, self.total))
  606. class TotalLineDef(LineDef):
  607. def format_line(self): # type: (TotalLineDef) -> Tuple[str, str, str, str]
  608. return (self.title + ': {%s:>7} bytes (.bin may be padded larger)' % self.name,
  609. '{%s:>7}' % self.name,
  610. '{%s:+}' % self.name,
  611. '')
  612. format_list = [
  613. HeadLineDef('Used static DRAM', 'used_dram', remain='dram_remain', ratio='used_dram_ratio', total='dram_total'),
  614. LineDef(' .data size', 'used_dram_data'),
  615. LineDef(' .bss size', 'used_dram_bss'),
  616. LineDef(' .rodata size', 'used_dram_rodata'),
  617. LineDef(' DRAM other size', 'used_dram_other'),
  618. HeadLineDef('Used static IRAM', 'used_iram', remain='iram_remain', ratio='used_iram_ratio', total='iram_total'),
  619. LineDef(' .text size', 'used_iram_text'),
  620. LineDef(' .vectors size', 'used_iram_vectors'),
  621. HeadLineDef('Used stat D/IRAM', 'used_diram', remain='diram_remain', ratio='used_diram_ratio', total='diram_total'),
  622. LineDef(' .data size', 'used_diram_data'),
  623. LineDef(' .bss size', 'used_diram_bss'),
  624. LineDef(' .text size', 'used_diram_text'),
  625. LineDef(' .vectors size', 'used_diram_vectors'),
  626. LineDef(' .rodata size', 'used_diram_rodata'),
  627. LineDef(' other ', 'used_diram_other'),
  628. LineDef('Used Flash size ', 'used_flash'),
  629. LineDef(' .text ', 'used_flash_text'),
  630. LineDef(' .rodata ', 'used_flash_rodata'),
  631. TotalLineDef('Total image size', 'total_size')
  632. ]
  633. def convert_to_fmt_dict(summary, suffix=''): # type: (StructureForSummary, str) -> Dict
  634. required_items = StructureForSummary.get_required_items()
  635. return dict([(key + suffix, getattr(summary, key)) for key in required_items])
  636. f_dic1 = convert_to_fmt_dict(current)
  637. if diff_en:
  638. f_dic2 = convert_to_fmt_dict(reference)
  639. f_dic_diff = convert_to_fmt_dict(current - reference)
  640. lf = '{:60}{:>15}{:>15} {}' # Width for a, b, c, d columns
  641. def print_in_columns(a, b='', c='', d=''):
  642. # type: (str, Optional[str], Optional[str], Optional[str]) -> str
  643. return lf.format(a, b, c, d).rstrip() + os.linesep
  644. output = ''
  645. if diff_en:
  646. output += print_in_columns('<CURRENT> MAP file: ' + path)
  647. output += print_in_columns('<REFERENCE> MAP file: ' + path_diff)
  648. output += print_in_columns('Difference is counted as <CURRENT> - <REFERENCE>, ',
  649. 'i.e. a positive number means that <CURRENT> is larger.')
  650. output += print_in_columns('Total sizes of <CURRENT>:', '<REFERENCE>', 'Difference', '')
  651. for line in format_list:
  652. if getattr(current, line.name) > 0 or getattr(reference, line.name) > 0 or line.name == 'total_size':
  653. a, b, c, d = line.format_line()
  654. output += print_in_columns(
  655. a.format(**f_dic1),
  656. b.format(**f_dic2),
  657. c.format(**f_dic_diff) if not c.format(**f_dic_diff).startswith('+0') else '',
  658. d.format(**f_dic_diff))
  659. else:
  660. output += print_in_columns('Total sizes:')
  661. for line in format_list:
  662. if getattr(current, line.name) > 0 or line.name == 'total_size':
  663. a, b, c, d = line.format_line()
  664. output += print_in_columns(a.format(**f_dic1))
  665. return output
  666. def check_is_dict_sort(non_sort_list): # type: (List) -> List
  667. # keeping the order data, bss, other, iram, diram, ram_st_total, flash_text, flash_rodata, flash_total
  668. start_of_other = 0
  669. props_sort = [] # type: List
  670. props_elem = ['.data', '.bss', 'other', 'iram', 'diram', 'ram_st_total', 'flash.text', 'flash.rodata', 'flash', 'flash_total']
  671. for i in props_elem:
  672. for j in non_sort_list:
  673. if i == 'other':
  674. start_of_other = len(props_sort)
  675. elif i in (j[0] if len(j[0]) > 1 else j) and (j[0] if len(j[0]) > 1 else j) not in props_sort:
  676. props_sort.append(j)
  677. for j in non_sort_list:
  678. if j not in props_sort:
  679. props_sort.insert(start_of_other, j)
  680. return props_sort
  681. class StructureForDetailedSizes(object):
  682. @staticmethod
  683. def sizes_by_key(sections, key, include_padding=False): # type: (SectionDict, str, Optional[bool]) -> Dict[str, Dict[str, int]]
  684. """ Takes a dict of sections (from load_sections) and returns
  685. a dict keyed by 'key' with aggregate output size information.
  686. Key can be either "archive" (for per-archive data) or "file" (for per-file data) in the result.
  687. """
  688. result = {} # type: Dict[str, Dict[str, int]]
  689. for _, section in iteritems(sections):
  690. for s in section['sources']:
  691. if not s[key] in result:
  692. result[s[key]] = {}
  693. archive = result[s[key]]
  694. if not section['name'] in archive:
  695. archive[section['name']] = 0
  696. archive[section['name']] += s['size']
  697. if include_padding:
  698. archive[section['name']] += s['fill']
  699. return result
  700. @staticmethod
  701. def get(sections, by_key): # type: (SectionDict, str) -> collections.OrderedDict
  702. # Get the detailed structure before using the filter to remove undesired sections,
  703. # to show entries without desired sections
  704. sizes = StructureForDetailedSizes.sizes_by_key(sections, by_key)
  705. for key_name in sizes:
  706. sizes[key_name] = LinkingSections.filter_sections(sizes[key_name])
  707. s = []
  708. for key, section_dict in sizes.items():
  709. ram_st_total = sum([x[1] for x in section_dict.items() if not LinkingSections.in_section(x[0], 'flash')])
  710. flash_total = sum([x[1] for x in section_dict.items() if not LinkingSections.in_section(x[0], 'bss')]) # type: int
  711. section_dict['ram_st_total'] = ram_st_total
  712. section_dict['flash_total'] = flash_total
  713. sorted_dict = sorted(section_dict.items(), key=lambda elem: elem[0])
  714. sorted_dict = check_is_dict_sort(sorted_dict)
  715. s.append((key, collections.OrderedDict(sorted_dict)))
  716. s = sorted(s, key=lambda elem: elem[0])
  717. # do a secondary sort in order to have consistent order (for diff-ing the output)
  718. s = sorted(s, key=lambda elem: elem[1]['flash_total'], reverse=True)
  719. return collections.OrderedDict(s)
  720. def get_detailed_sizes(sections, key, header, as_json=False, sections_diff=None): # type: (Dict, str, str, bool, Dict) -> str
  721. key_name_set = set()
  722. current = StructureForDetailedSizes.get(sections, key)
  723. for section_dict in current.values():
  724. key_name_set.update(section_dict.keys())
  725. if sections_diff:
  726. reference = StructureForDetailedSizes.get(sections_diff, key)
  727. for section_dict in reference.values():
  728. key_name_set.update(section_dict.keys())
  729. diff_en = True
  730. else:
  731. diff_en = False
  732. key_name_list = list(key_name_set)
  733. ordered_key_list, display_name_list = LinkingSections.get_display_name_order(key_name_list)
  734. if as_json:
  735. if diff_en:
  736. diff_json_dic = collections.OrderedDict()
  737. for name in sorted(list(frozenset(current.keys()) | frozenset(reference.keys()))):
  738. cur_name_dic = current.get(name, {})
  739. ref_name_dic = reference.get(name, {})
  740. all_keys = sorted(list(frozenset(cur_name_dic.keys()) | frozenset(ref_name_dic.keys())))
  741. diff_json_dic[name] = collections.OrderedDict([(k,
  742. cur_name_dic.get(k, 0) -
  743. ref_name_dic.get(k, 0)) for k in all_keys])
  744. output = format_json(collections.OrderedDict([('current', current),
  745. ('reference', reference),
  746. ('diff', diff_json_dic),
  747. ]))
  748. else:
  749. output = format_json(current)
  750. else:
  751. def _get_header_format(disp_list=display_name_list): # type: (List) -> str
  752. len_list = [len(x) for x in disp_list]
  753. len_list.insert(0, 24)
  754. return ' '.join(['{:>%d}' % x for x in len_list]) + os.linesep
  755. def _get_output(data, selection, key_list=ordered_key_list, disp_list=display_name_list):
  756. # type: (Dict[str, Dict[str, int]], Collection, List, List) -> str
  757. header_format = _get_header_format(disp_list)
  758. output = header_format.format(header, *disp_list)
  759. for k, v in iteritems(data):
  760. if k not in selection:
  761. continue
  762. try:
  763. _, k = k.split(':', 1)
  764. # print subheadings for key of format archive:file
  765. except ValueError:
  766. # k remains the same
  767. pass
  768. def get_section_size(section_dict): # type: (Dict) -> Callable[[str], int]
  769. return lambda x: section_dict.get(x, 0)
  770. section_size_list = map(get_section_size(section_dict=v), key_list)
  771. output += header_format.format(k[:24], *(section_size_list))
  772. return output
  773. def _get_header_format_diff(disp_list=display_name_list, columns=False): # type: (List, bool) -> str
  774. if columns:
  775. len_list = (24, ) + (7, ) * 3 * len(disp_list)
  776. return '|'.join(['{:>%d}' % x for x in len_list]) + os.linesep
  777. len_list = (24, ) + (23, ) * len(disp_list)
  778. return ' '.join(['{:>%d}' % x for x in len_list]) + os.linesep
  779. def _get_output_diff(curr, ref, key_list=ordered_key_list, disp_list=display_name_list):
  780. # type: (Dict, Dict, List, List) -> str
  781. # First header without Current/Ref/Diff columns
  782. header_format = _get_header_format_diff(columns=False)
  783. output = header_format.format(header, *disp_list)
  784. f_print = ('-' * 23, '') * len(key_list)
  785. f_print = f_print[0:len(key_list)]
  786. header_line = header_format.format('', *f_print)
  787. header_format = _get_header_format_diff(columns=True)
  788. f_print = ('<C>', '<R>', '<C>-<R>') * len(key_list)
  789. output += header_format.format('', *f_print)
  790. output += header_line
  791. for k, v in iteritems(curr):
  792. try:
  793. v2 = ref[k]
  794. except KeyError:
  795. continue
  796. try:
  797. _, k = k.split(':', 1)
  798. # print subheadings for key of format archive:file
  799. except ValueError:
  800. # k remains the same
  801. pass
  802. def _get_items(name, section_dict=v, section_dict_ref=v2):
  803. # type: (str, Dict, Dict) -> Tuple[str, str, str]
  804. a = section_dict.get(name, 0)
  805. b = section_dict_ref.get(name, 0)
  806. diff = a - b
  807. # the sign is added here and not in header_format in order to be able to print empty strings
  808. return (a or '', b or '', '' if diff == 0 else '{:+}'.format(diff))
  809. x = [] # type: List[str]
  810. for section in key_list:
  811. x.extend(_get_items(section))
  812. output += header_format.format(k[:24], *(x))
  813. return output
  814. output = 'Per-{} contributions to ELF file:{}'.format(key, os.linesep)
  815. if diff_en:
  816. output += _get_output_diff(current, reference)
  817. in_current = frozenset(current.keys())
  818. in_reference = frozenset(reference.keys())
  819. only_in_current = in_current - in_reference
  820. only_in_reference = in_reference - in_current
  821. if len(only_in_current) > 0:
  822. output += 'The following entries are present in <CURRENT> only:{}'.format(os.linesep)
  823. output += _get_output(current, only_in_current)
  824. if len(only_in_reference) > 0:
  825. output += 'The following entries are present in <REFERENCE> only:{}'.format(os.linesep)
  826. output += _get_output(reference, only_in_reference)
  827. else:
  828. output += _get_output(current, current)
  829. return output
  830. class StructureForArchiveSymbols(object):
  831. @staticmethod
  832. def get(archive, sections): # type: (str, Dict) -> Dict
  833. interested_sections = LinkingSections.filter_sections(sections)
  834. result = dict([(t, {}) for t in interested_sections]) # type: Dict[str, Dict[str, int]]
  835. for _, section in iteritems(sections):
  836. section_name = section['name']
  837. if section_name not in interested_sections:
  838. continue
  839. for s in section['sources']:
  840. if archive != s['archive']:
  841. continue
  842. s['sym_name'] = re.sub('(.text.|.literal.|.data.|.bss.|.rodata.)', '', s['sym_name'])
  843. result[section_name][s['sym_name']] = result[section_name].get(s['sym_name'], 0) + s['size']
  844. # build a new ordered dict of each section, where each entry is an ordereddict of symbols to sizes
  845. section_symbols = collections.OrderedDict()
  846. for t in sorted(list(interested_sections)):
  847. s = sorted(result[t].items(), key=lambda k_v: str(k_v[0]))
  848. # do a secondary sort in order to have consistent order (for diff-ing the output)
  849. s = sorted(s, key=lambda k_v: int(k_v[1]), reverse=True)
  850. section_symbols[t] = collections.OrderedDict(s)
  851. return section_symbols
  852. def get_archive_symbols(sections, archive, as_json=False, sections_diff=None): # type: (Dict, str, bool, Dict) -> str
  853. diff_en = bool(sections_diff)
  854. current = StructureForArchiveSymbols.get(archive, sections)
  855. reference = StructureForArchiveSymbols.get(archive, sections_diff) if sections_diff else {}
  856. if as_json:
  857. if diff_en:
  858. diff_json_dic = collections.OrderedDict()
  859. for name in sorted(list(frozenset(current.keys()) | frozenset(reference.keys()))):
  860. cur_name_dic = current.get(name, {})
  861. ref_name_dic = reference.get(name, {})
  862. all_keys = sorted(list(frozenset(cur_name_dic.keys()) | frozenset(ref_name_dic.keys())))
  863. diff_json_dic[name] = collections.OrderedDict([(key,
  864. cur_name_dic.get(key, 0) -
  865. ref_name_dic.get(key, 0)) for key in all_keys])
  866. output = format_json(collections.OrderedDict([('current', current),
  867. ('reference', reference),
  868. ('diff', diff_json_dic),
  869. ]))
  870. else:
  871. output = format_json(current)
  872. else:
  873. def _get_item_pairs(name, section): # type: (str, collections.OrderedDict) -> collections.OrderedDict
  874. return collections.OrderedDict([(key.replace(name + '.', ''), val) for key, val in iteritems(section)])
  875. def _get_max_len(symbols_dict): # type: (Dict) -> Tuple[int, int]
  876. names_max_len = 0
  877. numbers_max_len = 0
  878. for t, s in iteritems(symbols_dict):
  879. numbers_max_len = max([numbers_max_len] + [len(str(x)) for _, x in iteritems(s)])
  880. names_max_len = max([names_max_len] + [len(x) for x in _get_item_pairs(t, s)])
  881. return names_max_len, numbers_max_len
  882. def _get_output(section_symbols): # type: (Dict) -> str
  883. output = ''
  884. names_max_len, numbers_max_len = _get_max_len(section_symbols)
  885. for t, s in iteritems(section_symbols):
  886. output += '{}Symbols from section: {}{}'.format(os.linesep, t, os.linesep)
  887. item_pairs = _get_item_pairs(t, s)
  888. for key, val in iteritems(item_pairs):
  889. output += ' '.join([('\t{:<%d} : {:>%d}\n' % (names_max_len,numbers_max_len)).format(key, val)])
  890. section_total = sum([val for _, val in iteritems(item_pairs)])
  891. output += 'Section total: {}{}'.format(section_total, os.linesep)
  892. return output
  893. output = '{}Symbols within the archive: {} (Not all symbols may be reported){}'.format(os.linesep, archive, os.linesep)
  894. if diff_en:
  895. def _generate_line_tuple(curr, ref, name):
  896. # type: (collections.OrderedDict, collections.OrderedDict, str) -> Tuple[str, int, int, str]
  897. cur_val = curr.get(name, 0)
  898. ref_val = ref.get(name, 0)
  899. diff_val = cur_val - ref_val
  900. # string slicing is used just to make sure it will fit into the first column of line_format
  901. return ((' ' * 4 + name)[:40], cur_val, ref_val, '' if diff_val == 0 else '{:+}'.format(diff_val))
  902. line_format = '{:40} {:>12} {:>12} {:>25}'
  903. all_section_names = sorted(list(frozenset(current.keys()) | frozenset(reference.keys())))
  904. for section_name in all_section_names:
  905. current_item_pairs = _get_item_pairs(section_name, current.get(section_name, {}))
  906. reference_item_pairs = _get_item_pairs(section_name, reference.get(section_name, {}))
  907. output += os.linesep + line_format.format(section_name[:40],
  908. '<CURRENT>',
  909. '<REFERENCE>',
  910. '<CURRENT> - <REFERENCE>') + os.linesep
  911. current_section_total = sum([val for _, val in iteritems(current_item_pairs)])
  912. reference_section_total = sum([val for _, val in iteritems(reference_item_pairs)])
  913. diff_section_total = current_section_total - reference_section_total
  914. all_item_names = sorted(list(frozenset(current_item_pairs.keys()) |
  915. frozenset(reference_item_pairs.keys())))
  916. output += os.linesep.join([line_format.format(*_generate_line_tuple(current_item_pairs,
  917. reference_item_pairs,
  918. n)
  919. ).rstrip() for n in all_item_names])
  920. output += os.linesep if current_section_total > 0 or reference_section_total > 0 else ''
  921. output += line_format.format('Section total:',
  922. current_section_total,
  923. reference_section_total,
  924. '' if diff_section_total == 0 else '{:+}'.format(diff_section_total)
  925. ).rstrip() + os.linesep
  926. else:
  927. output += _get_output(current)
  928. return output
  929. if __name__ == '__main__':
  930. main()