Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
run_binary_size_analysis.py
Go to the documentation of this file.
1#!/usr/bin/env python3
2# Copyright 2014 The Chromium Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5"""Generate a spatial analysis against an arbitrary library.
6
7To use, build the 'binary_size_tool' target. Then run this tool, passing
8in the location of the library to be analyzed along with any other options
9you desire.
10"""
11
12import json
13import logging
14import multiprocessing
15import optparse
16import os
17import re
18import shutil
19import struct
20import subprocess
21import sys
22import tempfile
23import time
24
25import binary_size_utils
26import elf_symbolizer
27
28# Node dictionary keys. These are output in json read by the webapp so
29# keep them short to save file size.
30# Note: If these change, the webapp must also change.
31NODE_TYPE_KEY = 'k'
32NODE_NAME_KEY = 'n'
33NODE_CHILDREN_KEY = 'children'
34NODE_SYMBOL_TYPE_KEY = 't'
35NODE_SYMBOL_SIZE_KEY = 'value'
36NODE_MAX_DEPTH_KEY = 'maxDepth'
37NODE_LAST_PATH_ELEMENT_KEY = 'lastPathElement'
38
39# The display name of the bucket where we put symbols without path.
40NAME_NO_PATH_BUCKET = '(No Path)'
41
42# Try to keep data buckets smaller than this to avoid killing the
43# graphing lib.
44BIG_BUCKET_LIMIT = 3000
45
46
47def _MkChild(node, name):
48 child = node[NODE_CHILDREN_KEY].get(name)
49 if child is None:
50 child = {NODE_NAME_KEY: name, NODE_CHILDREN_KEY: {}}
51 node[NODE_CHILDREN_KEY][name] = child
52 return child
53
54
56 """NAME_NO_PATH_BUCKET can be too large for the graphing lib to
57 handle. Split it into sub-buckets in that case."""
58 root_children = node[NODE_CHILDREN_KEY]
59 if NAME_NO_PATH_BUCKET in root_children:
60 no_path_bucket = root_children[NAME_NO_PATH_BUCKET]
61 old_children = no_path_bucket[NODE_CHILDREN_KEY]
62 count = 0
63 for symbol_type, symbol_bucket in old_children.items():
64 count += len(symbol_bucket[NODE_CHILDREN_KEY])
65 if count > BIG_BUCKET_LIMIT:
66 new_children = {}
67 no_path_bucket[NODE_CHILDREN_KEY] = new_children
68 current_bucket = None
69 index = 0
70 for symbol_type, symbol_bucket in old_children.items():
71 for symbol_name, value in symbol_bucket[
72 NODE_CHILDREN_KEY].items():
73 if index % BIG_BUCKET_LIMIT == 0:
74 group_no = (index / BIG_BUCKET_LIMIT) + 1
75 current_bucket = _MkChild(
76 no_path_bucket,
77 '%s subgroup %d' % (NAME_NO_PATH_BUCKET, group_no))
78 assert not NODE_TYPE_KEY in node or node[
79 NODE_TYPE_KEY] == 'p'
80 node[NODE_TYPE_KEY] = 'p' # p for path
81 index += 1
82 symbol_size = value[NODE_SYMBOL_SIZE_KEY]
83 AddSymbolIntoFileNode(current_bucket, symbol_type,
84 symbol_name, symbol_size)
85
86
88 largest_list_len = 0
89 if NODE_CHILDREN_KEY in node:
90 largest_list_len = len(node[NODE_CHILDREN_KEY])
91 child_list = []
92 for child in node[NODE_CHILDREN_KEY].values():
93 child_largest_list_len = MakeChildrenDictsIntoLists(child)
94 if child_largest_list_len > largest_list_len:
95 largest_list_len = child_largest_list_len
96 child_list.append(child)
97 node[NODE_CHILDREN_KEY] = child_list
98
99 return largest_list_len
100
101
102def AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size):
103 """Puts symbol into the file path node |node|.
104 Returns the number of added levels in tree. I.e. returns 2."""
105
106 # 'node' is the file node and first step is to find its symbol-type bucket.
107 node[NODE_LAST_PATH_ELEMENT_KEY] = True
108 node = _MkChild(node, symbol_type)
109 assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'b'
110 node[NODE_SYMBOL_TYPE_KEY] = symbol_type
111 node[NODE_TYPE_KEY] = 'b' # b for bucket
112
113 # 'node' is now the symbol-type bucket. Make the child entry.
114 node = _MkChild(node, symbol_name)
115 if NODE_CHILDREN_KEY in node:
116 if node[NODE_CHILDREN_KEY]:
117 logging.warning(
118 'A container node used as symbol for %s.' % symbol_name)
119 # This is going to be used as a leaf so no use for child list.
120 del node[NODE_CHILDREN_KEY]
121 node[NODE_SYMBOL_SIZE_KEY] = symbol_size
122 node[NODE_SYMBOL_TYPE_KEY] = symbol_type
123 node[NODE_TYPE_KEY] = 's' # s for symbol
124
125 return 2 # Depth of the added subtree.
126
127
128def MakeCompactTree(symbols, symbol_path_origin_dir):
129 result = {
130 NODE_NAME_KEY: '/',
131 NODE_CHILDREN_KEY: {},
132 NODE_TYPE_KEY: 'p',
133 NODE_MAX_DEPTH_KEY: 0
134 }
135 seen_symbol_with_path = False
136 cwd = os.path.abspath(os.getcwd())
137 for symbol_name, symbol_type, symbol_size, file_path, _address in symbols:
138
139 if 'vtable for ' in symbol_name:
140 symbol_type = '@' # hack to categorize these separately
141 # Take path like '/foo/bar/baz', convert to ['foo', 'bar', 'baz']
142 if file_path and file_path != "??":
143 file_path = os.path.abspath(
144 os.path.join(symbol_path_origin_dir, file_path))
145 # Let the output structure be relative to $CWD if inside $CWD,
146 # otherwise relative to the disk root. This is to avoid
147 # unnecessary click-through levels in the output.
148 if file_path.startswith(cwd + os.sep):
149 file_path = file_path[len(cwd):]
150 if file_path.startswith('/'):
151 file_path = file_path[1:]
152 seen_symbol_with_path = True
153 else:
154 file_path = NAME_NO_PATH_BUCKET
155
156 path_parts = file_path.split('/')
157
158 # Find preexisting node in tree, or update if it already exists
159 node = result
160 depth = 0
161 while len(path_parts) > 0:
162 path_part = path_parts.pop(0)
163 if len(path_part) == 0:
164 continue
165 depth += 1
166 node = _MkChild(node, path_part)
167 assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p'
168 node[NODE_TYPE_KEY] = 'p' # p for path
169
170 depth += AddSymbolIntoFileNode(node, symbol_type, symbol_name,
171 symbol_size)
172 result[NODE_MAX_DEPTH_KEY] = max(result[NODE_MAX_DEPTH_KEY], depth)
173
174 if not seen_symbol_with_path:
175 logging.warning('Symbols lack paths. Data will not be structured.')
176
177 # The (no path) bucket can be extremely large if we failed to get
178 # path information. Split it into subgroups if needed.
179 SplitNoPathBucket(result)
180
181 largest_list_len = MakeChildrenDictsIntoLists(result)
182
183 if largest_list_len > BIG_BUCKET_LIMIT:
184 logging.warning('There are sections with %d nodes. '
185 'Results might be unusable.' % largest_list_len)
186 return result
187
188
189def DumpCompactTree(symbols, symbol_path_origin_dir, outfile):
190 tree_root = MakeCompactTree(symbols, symbol_path_origin_dir)
191 with open(outfile, 'w') as out:
192 out.write('var tree_data=')
193 # Use separators without whitespace to get a smaller file.
194 json.dump(tree_root, out, separators=(',', ':'))
195 print('Writing %d bytes json' % os.path.getsize(outfile))
196
197
198def MakeSourceMap(symbols):
199 sources = {}
200 for _sym, _symbol_type, size, path, _address in symbols:
201 key = None
202 if path:
203 key = os.path.normpath(path)
204 else:
205 key = '[no path]'
206 if key not in sources:
207 sources[key] = {'path': path, 'symbol_count': 0, 'size': 0}
208 record = sources[key]
209 record['size'] += size
210 record['symbol_count'] += 1
211 return sources
212
213
214# Regex for parsing "nm" output. A sample line looks like this:
215# 0167b39c 00000018 t ACCESS_DESCRIPTION_free /path/file.c:95
216#
217# The fields are: address, size, type, name, source location
218# Regular expression explained ( see also: https://xkcd.com/208 ):
219# ([0-9a-f]{8,}+) The address
220# [\s]+ Whitespace separator
221# ([0-9a-f]{8,}+) The size. From here on out it's all optional.
222# [\s]+ Whitespace separator
223# (\S?) The symbol type, which is any non-whitespace char
224# [\s*] Whitespace separator
225# ([^\t]*) Symbol name, any non-tab character (spaces ok!)
226# [\t]? Tab separator
227# (.*) The location (filename[:linennum|?][ (discriminator n)]
228sNmPattern = re.compile(
229 r'([0-9a-f]{8,})[\s]+([0-9a-f]{8,})[\s]*(\S?)[\s*]([^\t]*)[\t]?(.*)')
230
231
232class Progress():
233
234 def __init__(self):
235 self.count = 0
236 self.skip_count = 0
237 self.collisions = 0
238 self.time_last_output = time.time()
242
243
244def RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs,
245 disambiguate, src_path):
246 nm_output = RunNm(library, nm_binary)
247 nm_output_lines = nm_output.splitlines()
248 nm_output_lines_len = len(nm_output_lines)
249 address_symbol = {}
250 progress = Progress()
251
252 def map_address_symbol(symbol, addr):
253 progress.count += 1
254 if addr in address_symbol:
255 # 'Collision between %s and %s.' % (str(symbol.name),
256 # str(address_symbol[addr].name))
257 progress.collisions += 1
258 else:
259 if symbol.disambiguated:
260 progress.disambiguations += 1
261 if symbol.was_ambiguous:
262 progress.was_ambiguous += 1
263
264 address_symbol[addr] = symbol
265
266 progress_output()
267
268 def progress_output():
269 progress_chunk = 100
270 if progress.count % progress_chunk == 0:
271 time_now = time.time()
272 time_spent = time_now - progress.time_last_output
273 if time_spent > 1.0:
274 # Only output at most once per second.
275 progress.time_last_output = time_now
276 chunk_size = progress.count - progress.count_last_output
277 progress.count_last_output = progress.count
278 if time_spent > 0:
279 speed = chunk_size / time_spent
280 else:
281 speed = 0
282 progress_percent = (100.0 * (
283 progress.count + progress.skip_count) / nm_output_lines_len)
284 disambiguation_percent = 0
285 if progress.disambiguations != 0:
286 disambiguation_percent = (100.0 * progress.disambiguations /
287 progress.was_ambiguous)
288
289 sys.stdout.write(
290 '\r%.1f%%: Looked up %d symbols (%d collisions, '
291 '%d disambiguations where %.1f%% succeeded)'
292 ' - %.1f lookups/s.' %
293 (progress_percent, progress.count, progress.collisions,
294 progress.disambiguations, disambiguation_percent, speed))
295
296 # In case disambiguation was disabled, we remove the source path (which upon
297 # being set signals the symbolizer to enable disambiguation)
298 if not disambiguate:
299 src_path = None
300 symbolizer = elf_symbolizer.ELFSymbolizer(
301 library,
302 addr2line_binary,
303 map_address_symbol,
304 max_concurrent_jobs=jobs,
305 source_root_path=src_path)
306 user_interrupted = False
307 try:
308 for binary_line in nm_output_lines:
309 line = binary_line.decode()
310 match = sNmPattern.match(line)
311 if match:
312 location = match.group(5)
313 if not location:
314 addr = int(match.group(1), 16)
315 size = int(match.group(2), 16)
316 if addr in address_symbol: # Already looked up, shortcut
317 # ELFSymbolizer.
318 map_address_symbol(address_symbol[addr], addr)
319 continue
320 elif size == 0:
321 # Save time by not looking up empty symbols (do they even exist?)
322 print('Empty symbol: ' + line)
323 else:
324 symbolizer.SymbolizeAsync(addr, addr)
325 continue
326
327 progress.skip_count += 1
328 except KeyboardInterrupt:
329 user_interrupted = True
330 print('Interrupting - killing subprocesses. Please wait.')
331
332 try:
333 symbolizer.Join()
334 except KeyboardInterrupt:
335 # Don't want to abort here since we will be finished in a few seconds.
336 user_interrupted = True
337 print('Patience you must have my young padawan.')
338
339 print('')
340
341 if user_interrupted:
342 print('Skipping the rest of the file mapping. '
343 'Output will not be fully classified.')
344
345 symbol_path_origin_dir = os.path.dirname(os.path.abspath(library))
346
347 with open(outfile, 'w') as out:
348 for binary_line in nm_output_lines:
349 line = binary_line.decode()
350 match = sNmPattern.match(line)
351 if match:
352 location = match.group(5)
353 if not location:
354 addr = int(match.group(1), 16)
355 symbol = address_symbol.get(addr)
356 if symbol is not None:
357 path = '??'
358 if symbol.source_path is not None:
359 path = os.path.abspath(
360 os.path.join(symbol_path_origin_dir,
361 symbol.source_path))
362 line_number = 0
363 if symbol.source_line is not None:
364 line_number = symbol.source_line
365 out.write('%s\t%s:%d\n' % (line, path, line_number))
366 continue
367
368 out.write('%s\n' % line)
369
370 print('%d symbols in the results.' % len(address_symbol))
371
372
373def RunNm(binary, nm_binary):
374 cmd = [
375 nm_binary, '-C', '--print-size', '--size-sort', '--reverse-sort', binary
376 ]
377 nm_process = subprocess.Popen(
378 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
379 (process_output, err_output) = nm_process.communicate()
380
381 if nm_process.returncode != 0:
382 if err_output:
383 raise Exception(err_output)
384 else:
385 raise Exception(process_output)
386
387 return process_output
388
389
390def GetNmSymbols(nm_infile, outfile, library, jobs, verbose, addr2line_binary,
391 nm_binary, disambiguate, src_path):
392 if nm_infile is None:
393 if outfile is None:
394 outfile = tempfile.NamedTemporaryFile(delete=False).name
395
396 if verbose:
397 print('Running parallel addr2line, dumping symbols to ' + outfile)
398 RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs,
399 disambiguate, src_path)
400
401 nm_infile = outfile
402
403 elif verbose:
404 print('Using nm input from ' + nm_infile)
405 with open(nm_infile, 'r') as infile:
406 return list(binary_size_utils.ParseNm(infile))
407
408
409PAK_RESOURCE_ID_TO_STRING = {"inited": False}
410
411
413 """Given a file name, it loads everything that looks like a resource id
414 into PAK_RESOURCE_ID_TO_STRING."""
415 with open(filename) as resource_header:
416 for line in resource_header:
417 if line.startswith("#define "):
418 line_data = line.split()
419 if len(line_data) == 3:
420 try:
421 resource_number = int(line_data[2])
422 resource_name = line_data[1]
423 PAK_RESOURCE_ID_TO_STRING[
424 resource_number] = resource_name
425 except ValueError:
426 pass
427
428
429def GetReadablePakResourceName(pak_file, resource_id):
430 """Pak resources have a numeric identifier. It is not helpful when
431 trying to locate where footprint is generated. This does its best to
432 map the number to a usable string."""
433 if not PAK_RESOURCE_ID_TO_STRING['inited']:
434 # Try to find resource header files generated by grit when
435 # building the pak file. We'll look for files named *resources.h"
436 # and lines of the type:
437 # #define MY_RESOURCE_JS 1234
438 PAK_RESOURCE_ID_TO_STRING['inited'] = True
439 gen_dir = os.path.join(os.path.dirname(pak_file), 'gen')
440 if os.path.isdir(gen_dir):
441 for dirname, _dirs, files in os.walk(gen_dir):
442 for filename in files:
443 if filename.endswith('resources.h'):
445 os.path.join(dirname, filename))
446 return PAK_RESOURCE_ID_TO_STRING.get(resource_id,
447 'Pak Resource %d' % resource_id)
448
449
450def AddPakData(symbols, pak_file):
451 """Adds pseudo-symbols from a pak file."""
452 pak_file = os.path.abspath(pak_file)
453 with open(pak_file, 'rb') as pak:
454 data = pak.read()
455
456 PAK_FILE_VERSION = 4
457 HEADER_LENGTH = 2 * 4 + 1 # Two uint32s. (file version, number of entries)
458 # and one uint8 (encoding of text resources)
459 INDEX_ENTRY_SIZE = 2 + 4 # Each entry is a uint16 and a uint32.
460 version, num_entries, _encoding = struct.unpack('<IIB',
461 data[:HEADER_LENGTH])
462 assert version == PAK_FILE_VERSION, (
463 'Unsupported pak file '
464 'version (%d) in %s. Only '
465 'support version %d' % (version, pak_file, PAK_FILE_VERSION))
466 if num_entries > 0:
467 # Read the index and data.
468 data = data[HEADER_LENGTH:]
469 for _ in range(num_entries):
470 resource_id, offset = struct.unpack('<HI', data[:INDEX_ENTRY_SIZE])
471 data = data[INDEX_ENTRY_SIZE:]
472 _next_id, next_offset = struct.unpack('<HI',
473 data[:INDEX_ENTRY_SIZE])
474 resource_size = next_offset - offset
475
476 symbol_name = GetReadablePakResourceName(pak_file, resource_id)
477 symbol_path = pak_file
478 symbol_type = 'd' # Data. Approximation.
479 symbol_size = resource_size
480 symbols.append((symbol_name, symbol_type, symbol_size, symbol_path))
481
482
484 """Locate the full path to binary in the system path or return None
485 if not found."""
486 system_path = os.environ["PATH"].split(os.pathsep)
487 for path in system_path:
488 binary_path = os.path.join(path, binary)
489 if os.path.isfile(binary_path):
490 return binary_path
491 return None
492
493
494def CheckDebugFormatSupport(library, addr2line_binary):
495 """Kills the program if debug data is in an unsupported format.
496
497 There are two common versions of the DWARF debug formats and
498 since we are right now transitioning from DWARF2 to newer formats,
499 it's possible to have a mix of tools that are not compatible. Detect
500 that and abort rather than produce meaningless output."""
501 tool_output = subprocess.check_output([addr2line_binary,
502 '--version']).decode()
503 version_re = re.compile(r'^GNU [^ ]+ .* (\d+).(\d+).*?$', re.M)
504 parsed_output = version_re.match(tool_output)
505 major = int(parsed_output.group(1))
506 minor = int(parsed_output.group(2))
507 supports_dwarf4 = major > 2 or major == 2 and minor > 22
508
509 if supports_dwarf4:
510 return
511
512 print('Checking version of debug information in %s.' % library)
513 debug_info = subprocess.check_output(
514 ['readelf', '--debug-dump=info', '--dwarf-depth=1', library])
515 dwarf_version_re = re.compile(r'^\s+Version:\s+(\d+)$', re.M)
516 parsed_dwarf_format_output = dwarf_version_re.search(debug_info)
517 version = int(parsed_dwarf_format_output.group(1))
518 if version > 2:
519 print(
520 'The supplied tools only support DWARF2 debug data but the binary\n'
521 + 'uses DWARF%d. Update the tools or compile the binary\n' % version
522 + 'with -gdwarf-2.')
523 sys.exit(1)
524
525
526def main():
527 usage = """%prog [options]
528
529 Runs a spatial analysis on a given library, looking up the source locations
530 of its symbols and calculating how much space each directory, source file,
531 and so on is taking. The result is a report that can be used to pinpoint
532 sources of large portions of the binary, etceteras.
533
534 Under normal circumstances, you only need to pass two arguments, thusly:
535
536 %prog --library /path/to/library --destdir /path/to/output
537
538 In this mode, the program will dump the symbols from the specified library
539 and map those symbols back to source locations, producing a web-based
540 report in the specified output directory.
541
542 Other options are available via '--help'.
543 """
544 parser = optparse.OptionParser(usage=usage)
545 parser.add_option(
546 '--nm-in',
547 metavar='PATH',
548 help='if specified, use nm input from <path> instead of '
549 'generating it. Note that source locations should be '
550 'present in the file; i.e., no addr2line symbol lookups '
551 'will be performed when this option is specified. '
552 'Mutually exclusive with --library.')
553 parser.add_option(
554 '--destdir',
555 metavar='PATH',
556 help='write output to the specified directory. An HTML '
557 'report is generated here along with supporting files; '
558 'any existing report will be overwritten.')
559 parser.add_option(
560 '--library',
561 metavar='PATH',
562 help='if specified, process symbols in the library at '
563 'the specified path. Mutually exclusive with --nm-in.')
564 parser.add_option(
565 '--pak',
566 metavar='PATH',
567 help='if specified, includes the contents of the '
568 'specified *.pak file in the output.')
569 parser.add_option(
570 '--nm-binary',
571 help='use the specified nm binary to analyze library. '
572 'This is to be used when the nm in the path is not for '
573 'the right architecture or of the right version.')
574 parser.add_option(
575 '--addr2line-binary',
576 help='use the specified addr2line binary to analyze '
577 'library. This is to be used when the addr2line in '
578 'the path is not for the right architecture or '
579 'of the right version.')
580 parser.add_option(
581 '--jobs',
582 type='int',
583 help='number of jobs to use for the parallel '
584 'addr2line processing pool; defaults to 1. More '
585 'jobs greatly improve throughput but eat RAM like '
586 'popcorn, and take several gigabytes each. Start low '
587 'and ramp this number up until your machine begins to '
588 'struggle with RAM. '
589 'This argument is only valid when using --library.')
590 parser.add_option(
591 '-v',
592 '--verbose',
593 dest='verbose',
594 action='store_true',
595 help='be verbose, printing lots of status information.')
596 parser.add_option(
597 '--nm-out',
598 metavar='PATH',
599 help='(deprecated) No-op. nm.out is stored in --destdir.')
600 parser.add_option(
601 '--no-nm-out',
602 action='store_true',
603 help='do not keep the nm output file. This file is useful '
604 'if you want to see the fully processed nm output after '
605 'the symbols have been mapped to source locations, or if '
606 'you plan to run explain_binary_size_delta.py. By default '
607 'the file \'nm.out\' is placed alongside the generated '
608 'report. The nm.out file is only created when using '
609 '--library.')
610 parser.add_option(
611 '--disable-disambiguation',
612 action='store_true',
613 help='disables the disambiguation process altogether,'
614 ' NOTE: this may, depending on your toolchain, produce'
615 ' output with some symbols at the top layer if addr2line'
616 ' could not get the entire source path.')
617 parser.add_option(
618 '--source-path',
619 default='./',
620 help='the path to the source code of the output binary, '
621 'default set to current directory. Used in the'
622 ' disambiguation process.')
623 parser.add_option(
624 '--check-support',
625 dest='check_support',
626 default=True,
627 action='store_true',
628 help='Check that the version of the available tools is sufficient to '
629 'read the data from the library given by --library')
630 parser.add_option('--no-check-support',
631 action='store_false',
632 dest='check_support')
633 opts, _args = parser.parse_args()
634
635 if ((not opts.library) and
636 (not opts.nm_in)) or (opts.library and opts.nm_in):
637 parser.error('exactly one of --library or --nm-in is required')
638 if opts.nm_out:
639 print('WARNING: --nm-out is deprecated and has no effect.',
640 file=sys.stderr)
641 if (opts.nm_in):
642 if opts.jobs:
643 print('WARNING: --jobs has no effect when used with --nm-in',
644 file=sys.stderr)
645 if not opts.destdir:
646 parser.error('--destdir is a required argument')
647 if not opts.jobs:
648 # Use the number of processors but cap between 2 and 4 since raw
649 # CPU power isn't the limiting factor. It's I/O limited, memory
650 # bus limited and available-memory-limited. Too many processes and
651 # the computer will run out of memory and it will be slow.
652 opts.jobs = max(2, min(4, multiprocessing.cpu_count()))
653
654 if opts.addr2line_binary:
655 assert os.path.isfile(opts.addr2line_binary)
656 addr2line_binary = opts.addr2line_binary
657 else:
658 addr2line_binary = _find_in_system_path('addr2line')
659 assert addr2line_binary, 'Unable to find addr2line in the path. '\
660 'Use --addr2line-binary to specify location.'
661
662 if opts.nm_binary:
663 assert os.path.isfile(opts.nm_binary)
664 nm_binary = opts.nm_binary
665 else:
666 nm_binary = _find_in_system_path('nm')
667 assert nm_binary, 'Unable to find nm in the path. Use --nm-binary '\
668 'to specify location.'
669
670 if opts.pak:
671 assert os.path.isfile(opts.pak), 'Could not find ' % opts.pak
672
673 print('addr2line: %s' % addr2line_binary)
674 print('nm: %s' % nm_binary)
675
676 if opts.library and opts.check_support:
677 CheckDebugFormatSupport(opts.library, addr2line_binary)
678
679 # Prepare output directory and report guts
680 if not os.path.exists(opts.destdir):
681 os.makedirs(opts.destdir, 0o755)
682 nm_out = os.path.join(opts.destdir, 'nm.out')
683 if opts.no_nm_out:
684 nm_out = None
685
686 # Copy report boilerplate into output directory. This also proves that the
687 # output directory is safe for writing, so there should be no problems writing
688 # the nm.out file later.
689 data_js_file_name = os.path.join(opts.destdir, 'data.js')
690 d3_out = os.path.join(opts.destdir, 'd3')
691 if not os.path.exists(d3_out):
692 os.makedirs(d3_out, 0o755)
693 d3_src = os.path.join(os.path.dirname(__file__), '..', '..', 'd3', 'src')
694 template_src = os.path.join(os.path.dirname(__file__), 'template')
695 shutil.copy(os.path.join(d3_src, 'LICENSE'), d3_out)
696 shutil.copy(os.path.join(d3_src, 'd3.js'), d3_out)
697 shutil.copy(os.path.join(template_src, 'index.html'), opts.destdir)
698 shutil.copy(os.path.join(template_src, 'D3SymbolTreeMap.js'), opts.destdir)
699
700 # Run nm and/or addr2line to gather the data
701 symbols = GetNmSymbols(opts.nm_in, nm_out, opts.library, opts.jobs,
702 opts.verbose is True, addr2line_binary, nm_binary,
703 opts.disable_disambiguation is None,
704 opts.source_path)
705
706 # Post-processing
707 if opts.pak:
708 AddPakData(symbols, opts.pak)
709 if opts.library:
710 symbol_path_origin_dir = os.path.dirname(os.path.abspath(opts.library))
711 else:
712 # Just a guess. Hopefully all paths in the input file are absolute.
713 symbol_path_origin_dir = os.path.abspath(os.getcwd())
714 # Dump JSON for the HTML report.
715 DumpCompactTree(symbols, symbol_path_origin_dir, data_js_file_name)
716 print('Report saved to ' + opts.destdir + '/index.html')
717
718
719if __name__ == '__main__':
720 sys.exit(main())
Type::kYUV Type::kRGBA() int(0.7 *637)
void print(void *str)
Definition bridge.cpp:126
static float max(float r, float g, float b)
Definition hsl.cpp:49
static float min(float r, float g, float b)
Definition hsl.cpp:48
Definition main.py:1
RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs, disambiguate, src_path)
GetReadablePakResourceName(pak_file, resource_id)
CheckDebugFormatSupport(library, addr2line_binary)
GetNmSymbols(nm_infile, outfile, library, jobs, verbose, addr2line_binary, nm_binary, disambiguate, src_path)
DumpCompactTree(symbols, symbol_path_origin_dir, outfile)
AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size)
MakeCompactTree(symbols, symbol_path_origin_dir)
static DecodeResult decode(std::string path)