Flutter Engine
The Flutter Engine
elf.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/elf.h"
6
7#include "platform/elf.h"
9#include "vm/cpu.h"
10#include "vm/dwarf.h"
11#include "vm/hash_map.h"
12#include "vm/image_snapshot.h"
13#include "vm/stack_frame.h"
14#include "vm/thread.h"
16#include "vm/zone_text_buffer.h"
17
18namespace dart {
19
20#if defined(DART_PRECOMPILER)
21
22// A wrapper around BaseWriteStream that provides methods useful for
23// writing ELF files (e.g., using ELF definitions of data sizes).
24class ElfWriteStream : public ValueObject {
25 public:
26 explicit ElfWriteStream(BaseWriteStream* stream, const Elf& elf)
27 : stream_(ASSERT_NOTNULL(stream)),
28 elf_(elf),
29 start_(stream_->Position()) {
30 // So that we can use the underlying stream's Align, as all alignments
31 // will be less than or equal to this alignment.
33 }
34
35 // Subclasses of Section may need to query the Elf object during Write(),
36 // so we store it in the ElfWriteStream for easy access.
37 const Elf& elf() const { return elf_; }
38
39 // We return positions in terms of the ELF content that has been written,
40 // ignoring any previous content on the stream.
41 intptr_t Position() const { return stream_->Position() - start_; }
42 void Align(const intptr_t alignment) {
43 ASSERT(Utils::IsPowerOfTwo(alignment));
44 ASSERT(alignment <= Elf::kPageSize);
45 stream_->Align(alignment);
46 }
47 void WriteBytes(const uint8_t* b, intptr_t size) {
48 stream_->WriteBytes(b, size);
49 }
50 void WriteByte(uint8_t value) { stream_->WriteByte(value); }
51 void WriteHalf(uint16_t value) { stream_->WriteFixed(value); }
52 void WriteWord(uint32_t value) { stream_->WriteFixed(value); }
53 void WriteAddr(compiler::target::uword value) { stream_->WriteFixed(value); }
54 void WriteOff(compiler::target::uword value) { stream_->WriteFixed(value); }
55#if defined(TARGET_ARCH_IS_64_BIT)
56 void WriteXWord(uint64_t value) { stream_->WriteFixed(value); }
57#endif
58
59 private:
60 BaseWriteStream* const stream_;
61 const Elf& elf_;
62 const intptr_t start_;
63};
64
65static constexpr intptr_t kLinearInitValue = -1;
66
67#define DEFINE_LINEAR_FIELD_METHODS(name) \
68 intptr_t name() const { \
69 ASSERT(name##_ != kLinearInitValue); \
70 return name##_; \
71 } \
72 bool name##_is_set() const { \
73 return name##_ != kLinearInitValue; \
74 } \
75 void set_##name(intptr_t value) { \
76 ASSERT(value != kLinearInitValue); \
77 ASSERT_EQUAL(name##_, kLinearInitValue); \
78 name##_ = value; \
79 }
80
81#define DEFINE_LINEAR_FIELD(name) intptr_t name##_ = kLinearInitValue;
82
83// We only allow for dynamic casting to a subset of section types, since
84// these are the only ones we need to distinguish at runtime.
85#define FOR_EACH_SECTION_TYPE(V) \
86 V(ReservedSection) \
87 V(SymbolTable) \
88 V(DynamicTable) \
89 V(BitsContainer) \
90 V(TextSection) V(DataSection) V(BssSection) V(PseudoSection) V(SectionTable)
91#define DEFINE_TYPE_CHECK_FOR(Type) \
92 bool Is##Type() const { \
93 return true; \
94 }
95
96#define DECLARE_SECTION_TYPE_CLASS(Type) class Type;
97FOR_EACH_SECTION_TYPE(DECLARE_SECTION_TYPE_CLASS)
98#undef DECLARE_SECTION_TYPE_CLASS
99
100class BitsContainer;
101class Segment;
102
103// Align note sections and segments to 4 byte boundaries.
104static constexpr intptr_t kNoteAlignment = 4;
105
106class Section : public ZoneAllocated {
107 public:
109 bool allocate,
110 bool executable,
111 bool writable,
112 intptr_t align = compiler::target::kWordSize)
113 : type(t),
114 flags(EncodeFlags(allocate, executable, writable)),
115 alignment(align),
116 // Non-segments will never have a memory offset, here represented by 0.
117 memory_offset_(allocate ? kLinearInitValue : 0) {
118 // Only SHT_NULL sections (namely, the reserved section) are allowed to have
119 // an alignment of 0 (as the written section header entry for the reserved
120 // section must be all 0s).
121 ASSERT(alignment > 0 || type == elf::SectionHeaderType::SHT_NULL);
122 // Non-zero alignments must be a power of 2.
123 ASSERT(alignment == 0 || Utils::IsPowerOfTwo(alignment));
124 }
125
126 virtual ~Section() {}
127
128 // Linker view.
129
131 const intptr_t flags;
132 const intptr_t alignment;
133
134 // These are fields that only are not set for most kinds of sections and so we
135 // set them to a reasonable default.
136 intptr_t link = elf::SHN_UNDEF;
137 intptr_t info = 0;
138 intptr_t entry_size = 0;
139 // This field is set for all sections, but due to reordering, we may set it
140 // more than once.
141 intptr_t index = elf::SHN_UNDEF;
142
143#define FOR_EACH_SECTION_LINEAR_FIELD(M) \
144 M(name) \
145 M(file_offset)
146
147 FOR_EACH_SECTION_LINEAR_FIELD(DEFINE_LINEAR_FIELD_METHODS);
148
149 // Only needs to be overridden for sections that may not be allocated or
150 // for allocated sections where MemorySize() and FileSize() may differ.
151 virtual intptr_t FileSize() const {
152 if (!IsAllocated()) {
153 UNREACHABLE();
154 }
155 return MemorySize();
156 }
157
158 // Loader view.
159
160#define FOR_EACH_SEGMENT_LINEAR_FIELD(M) M(memory_offset)
161
162 FOR_EACH_SEGMENT_LINEAR_FIELD(DEFINE_LINEAR_FIELD_METHODS);
163
164 // Only needs to be overridden for sections that may be allocated.
165 virtual intptr_t MemorySize() const {
166 if (IsAllocated()) {
167 UNREACHABLE();
168 }
169 return 0;
170 }
171
172 // Other methods.
173
174 bool IsAllocated() const {
175 return (flags & elf::SHF_ALLOC) == elf::SHF_ALLOC;
176 }
177 bool IsExecutable() const {
179 }
180 bool IsWritable() const { return (flags & elf::SHF_WRITE) == elf::SHF_WRITE; }
181
182 bool HasBits() const { return type != elf::SectionHeaderType::SHT_NOBITS; }
183
184 // Returns whether the size of a section can change.
185 bool HasBeenFinalized() const {
186 // Sections can grow or shrink up until Elf::ComputeOffsets has been run,
187 // which sets the file (and memory, if applicable) offsets.
188 return file_offset_is_set();
189 }
190
191#define DEFINE_BASE_TYPE_CHECKS(Type) \
192 Type* As##Type() { \
193 return Is##Type() ? reinterpret_cast<Type*>(this) : nullptr; \
194 } \
195 const Type* As##Type() const { \
196 return const_cast<Type*>(const_cast<Section*>(this)->As##Type()); \
197 } \
198 virtual bool Is##Type() const { return false; }
199
200 FOR_EACH_SECTION_TYPE(DEFINE_BASE_TYPE_CHECKS)
201#undef DEFINE_BASE_TYPE_CHECKS
202
203 // Only some sections support merging.
204 virtual bool CanMergeWith(const Section& other) const { return false; }
205 virtual void Merge(const Section& other) { UNREACHABLE(); }
206
207 // Writes the file contents of the section.
208 virtual void Write(ElfWriteStream* stream) const { UNREACHABLE(); }
209
210 virtual void WriteSectionHeader(ElfWriteStream* stream) const {
211#if defined(TARGET_ARCH_IS_32_BIT)
212 stream->WriteWord(name());
213 stream->WriteWord(static_cast<uint32_t>(type));
214 stream->WriteWord(flags);
215 stream->WriteAddr(memory_offset());
216 stream->WriteOff(file_offset());
217 stream->WriteWord(FileSize());
218 stream->WriteWord(link);
219 stream->WriteWord(info);
220 stream->WriteWord(alignment);
221 stream->WriteWord(entry_size);
222#else
223 stream->WriteWord(name());
224 stream->WriteWord(static_cast<uint32_t>(type));
225 stream->WriteXWord(flags);
226 stream->WriteAddr(memory_offset());
227 stream->WriteOff(file_offset());
228 stream->WriteXWord(FileSize());
229 stream->WriteWord(link);
230 stream->WriteWord(info);
231 stream->WriteXWord(alignment);
232 stream->WriteXWord(entry_size);
233#endif
234 }
235
236 private:
237 static intptr_t EncodeFlags(bool allocate, bool executable, bool writable) {
238 // We currently don't allow sections that are both executable and writable.
239 ASSERT(!executable || !writable);
240 intptr_t flags = 0;
241 if (allocate) flags |= elf::SHF_ALLOC;
242 if (executable) flags |= elf::SHF_EXECINSTR;
243 if (writable) flags |= elf::SHF_WRITE;
244 return flags;
245 }
246
247 FOR_EACH_SECTION_LINEAR_FIELD(DEFINE_LINEAR_FIELD);
248 FOR_EACH_SEGMENT_LINEAR_FIELD(DEFINE_LINEAR_FIELD);
249
250#undef FOR_EACH_SECTION_LINEAR_FIELD
251#undef FOR_EACH_SEGMENT_LINEAR_FIELD
252};
253
254#undef DEFINE_LINEAR_FIELD
255#undef DEFINE_LINEAR_FIELD_METHODS
256
257class Segment : public ZoneAllocated {
258 public:
259 Segment(Zone* zone,
260 Section* initial_section,
261 elf::ProgramHeaderType segment_type)
262 : type(segment_type),
263 // Flags for the segment are the same as the initial section.
264 flags(EncodeFlags(ASSERT_NOTNULL(initial_section)->IsExecutable(),
265 ASSERT_NOTNULL(initial_section)->IsWritable())),
266 sections_(zone, 0) {
267 // Unlike sections, we don't have a reserved segment with the null type,
268 // so we never should pass this value.
270 // All segments should have at least one section.
271 ASSERT(initial_section != nullptr);
272 sections_.Add(initial_section);
273 }
274
275 virtual ~Segment() {}
276
277 const GrowableArray<Section*>& sections() const { return sections_; }
278
279 intptr_t Alignment() const {
280 switch (type) {
282 return Elf::kPageSize;
287 return kNoteAlignment;
289 return 1;
290 default:
291 UNREACHABLE();
292 return 0;
293 }
294 }
295
296 bool IsExecutable() const { return (flags & elf::PF_X) == elf::PF_X; }
297 bool IsWritable() const { return (flags & elf::PF_W) == elf::PF_W; }
298
299 void WriteProgramHeader(ElfWriteStream* stream) const {
300#if defined(TARGET_ARCH_IS_32_BIT)
301 stream->WriteWord(static_cast<uint32_t>(type));
302 stream->WriteOff(FileOffset());
303 stream->WriteAddr(MemoryOffset()); // Virtual address.
304 stream->WriteAddr(MemoryOffset()); // Physical address.
305 stream->WriteWord(FileSize());
306 stream->WriteWord(MemorySize());
307 stream->WriteWord(flags);
308 stream->WriteWord(Alignment());
309#else
310 stream->WriteWord(static_cast<uint32_t>(type));
311 stream->WriteWord(flags);
312 stream->WriteOff(FileOffset());
313 stream->WriteAddr(MemoryOffset()); // Virtual address.
314 stream->WriteAddr(MemoryOffset()); // Physical address.
315 stream->WriteXWord(FileSize());
316 stream->WriteXWord(MemorySize());
317 stream->WriteXWord(Alignment());
318#endif
319 }
320
321 // Adds a given section to the end of this segment. Returns whether the
322 // section was successfully added.
323 bool Add(Section* section) {
324 ASSERT(section != nullptr);
325 // We can't add if memory offsets have already been calculated.
326 ASSERT(!section->memory_offset_is_set());
327 // We only add additional sections to load segments.
329 // We only add sections with the same executable and writable bits.
330 if (IsExecutable() != section->IsExecutable() ||
331 IsWritable() != section->IsWritable()) {
332 return false;
333 }
334 sections_.Add(section);
335 return true;
336 }
337
338 intptr_t FileOffset() const { return sections_[0]->file_offset(); }
339
340 intptr_t FileSize() const {
341 auto const last = sections_.Last();
342 const intptr_t end = last->file_offset() + last->FileSize();
343 return end - FileOffset();
344 }
345
346 intptr_t MemoryOffset() const { return sections_[0]->memory_offset(); }
347
348 intptr_t MemorySize() const {
349 auto const last = sections_.Last();
350 const intptr_t end = last->memory_offset() + last->MemorySize();
351 return end - MemoryOffset();
352 }
353
354 intptr_t MemoryEnd() const { return MemoryOffset() + MemorySize(); }
355
357 const intptr_t flags;
358
359 private:
360 static intptr_t EncodeFlags(bool executable, bool writable) {
361 intptr_t flags = elf::PF_R;
362 if (executable) flags |= elf::PF_X;
363 if (writable) flags |= elf::PF_W;
364 return flags;
365 }
366
367 GrowableArray<Section*> sections_;
368};
369
370// Represents the first entry in the section table, which should only contain
371// zero values and does not correspond to a memory segment.
372class ReservedSection : public Section {
373 public:
374 ReservedSection()
376 /*allocate=*/false,
377 /*executable=*/false,
378 /*writable=*/false,
379 /*alignment=*/0) {
380 set_file_offset(0);
381 }
382
383 DEFINE_TYPE_CHECK_FOR(ReservedSection);
384 intptr_t FileSize() const { return 0; }
385};
386
387// Specifies the permissions used for the stack, notably whether the stack
388// should be executable. If absent, the stack will be executable.
389class GnuStackSection : public Section {
390 public:
391 GnuStackSection()
393 /*allocate=*/false,
394 /*executable=*/false,
395 /*writable=*/true) {
396 set_file_offset(0);
397 }
398
399 intptr_t FileSize() const { return 0; }
400};
401
402class StringTable : public Section {
403 public:
404 explicit StringTable(Zone* zone, bool allocate)
406 allocate,
407 /*executable=*/false,
408 /*writable=*/false),
409 dynamic_(allocate),
410 text_(zone, 128),
411 text_indices_(zone) {
412 Add("");
413 }
414
415 intptr_t FileSize() const { return text_.length(); }
416 intptr_t MemorySize() const { return dynamic_ ? FileSize() : 0; }
417
418 void Write(ElfWriteStream* stream) const {
419 stream->WriteBytes(reinterpret_cast<const uint8_t*>(text_.buffer()),
420 text_.length());
421 }
422
423 intptr_t Add(const char* str) {
424 ASSERT(str != nullptr);
425 if (auto const kv = text_indices_.Lookup(str)) {
426 return kv->value;
427 }
428 intptr_t offset = text_.length();
429 text_.AddString(str);
430 text_.AddChar('\0');
431 text_indices_.Insert({str, offset});
432 return offset;
433 }
434
435 const char* At(intptr_t index) const {
436 if (index >= text_.length()) return nullptr;
437 return text_.buffer() + index;
438 }
439
440 static constexpr intptr_t kNotIndexed = CStringIntMapKeyValueTrait::kNoValue;
441
442 // Returns the index of |str| if it is present in the string table
443 // and |kNotIndexed| otherwise.
444 intptr_t Lookup(const char* str) const {
445 return text_indices_.LookupValue(str);
446 }
447
448 const bool dynamic_;
449 ZoneTextBuffer text_;
450 CStringIntMap text_indices_;
451};
452
453class SymbolTable : public Section {
454 public:
455 SymbolTable(Zone* zone, StringTable* table, bool dynamic)
456 : Section(dynamic ? elf::SectionHeaderType::SHT_DYNSYM
458 dynamic,
459 /*executable=*/false,
460 /*writable=*/false),
461 zone_(zone),
462 table_(table),
463 dynamic_(dynamic),
464 symbols_(zone, 1),
465 by_label_index_(zone) {
466 link = table_->index;
467 entry_size = sizeof(elf::Symbol);
468 // The first symbol table entry is reserved and must be all zeros.
469 // (String tables always have the empty string at the 0th index.)
470 ASSERT_EQUAL(table_->Lookup(""), 0);
471 symbols_.Add({/*name_index=*/0, elf::STB_LOCAL, elf::STT_NOTYPE, /*size=*/0,
472 elf::SHN_UNDEF, /*offset=*/0, /*label =*/0});
473 // The info field on a symbol table section holds the index of the first
474 // non-local symbol, so since there are none yet, it points past the single
475 // symbol we do have.
476 info = 1;
477 }
478
479 DEFINE_TYPE_CHECK_FOR(SymbolTable)
480 const StringTable& strtab() const { return *table_; }
481 intptr_t FileSize() const { return symbols_.length() * entry_size; }
482 intptr_t MemorySize() const { return dynamic_ ? FileSize() : 0; }
483
484 struct Symbol {
485 void Write(ElfWriteStream* stream) const {
486 const intptr_t start = stream->Position();
487 ASSERT(section_index == elf::SHN_UNDEF || offset > 0);
488 stream->WriteWord(name_index);
489#if defined(TARGET_ARCH_IS_32_BIT)
490 stream->WriteAddr(offset);
491 stream->WriteWord(size);
492 stream->WriteByte(elf::SymbolInfo(binding, type));
493 stream->WriteByte(0);
494 stream->WriteHalf(section_index);
495#else
496 stream->WriteByte(elf::SymbolInfo(binding, type));
497 stream->WriteByte(0);
498 stream->WriteHalf(section_index);
499 stream->WriteAddr(offset);
500 stream->WriteXWord(size);
501#endif
502 ASSERT_EQUAL(stream->Position() - start, sizeof(elf::Symbol));
503 }
504
505 intptr_t name_index;
506 intptr_t binding;
507 intptr_t type;
508 intptr_t size;
509 // Must be updated whenever sections are reordered.
510 intptr_t section_index;
511 // Initialized to the section-relative offset, must be updated to the
512 // snapshot-relative offset before writing.
513 intptr_t offset;
514 // Only used within the VM and not written as part of the ELF file. If 0,
515 // this symbol cannot be looked up via label.
516 intptr_t label;
517
518 private:
520 };
521
522 const GrowableArray<Symbol>& symbols() const { return symbols_; }
523
524 void Initialize(const GrowableArray<Section*>& sections);
525
526 void Write(ElfWriteStream* stream) const {
527 for (const auto& symbol : symbols_) {
528 const intptr_t start = stream->Position();
529 symbol.Write(stream);
530 ASSERT_EQUAL(stream->Position() - start, entry_size);
531 }
532 }
533
534 void AddSymbol(const char* name,
535 intptr_t binding,
536 intptr_t type,
537 intptr_t size,
538 intptr_t index,
539 intptr_t offset,
540 intptr_t label) {
541 ASSERT(label > 0);
542 ASSERT(!table_->HasBeenFinalized());
543 auto const name_index = table_->Add(name);
544 ASSERT(name_index != 0);
545 const intptr_t new_index = symbols_.length();
546 symbols_.Add({name_index, binding, type, size, index, offset, label});
547 by_label_index_.Insert(label, new_index);
548 // The info field on a symbol table section holds the index of the first
549 // non-local symbol, so that local symbols can be skipped if desired. Thus,
550 // we need to make sure local symbols are before any non-local ones.
551 if (binding == elf::STB_LOCAL) {
552 if (info != new_index) {
553 // There are non-local symbols, as otherwise [info] would be the
554 // index of the new symbol. Since the order doesn't otherwise matter,
555 // swap the new local symbol with the value at index [info], so when
556 // [info] is incremented it will point just past the new local symbol.
557 ASSERT(symbols_[info].binding != elf::STB_LOCAL);
558 symbols_.Swap(info, new_index);
559 // Since by_label_index has indices into symbols_, we need to update it.
560 by_label_index_.Update({symbols_[info].label, info});
561 by_label_index_.Update({symbols_[new_index].label, new_index});
562 }
563 info += 1;
564 }
565 }
566
567 void UpdateSectionIndices(const GrowableArray<intptr_t>& index_map) {
568#if defined(DEBUG)
569 const intptr_t map_size = index_map.length();
570 // The first entry must be 0 so that symbols with index SHN_UNDEF, like
571 // the initial reserved symbol, are unchanged.
572 ASSERT_EQUAL(index_map[0], 0);
573 for (intptr_t i = 1; i < map_size; i++) {
574 ASSERT(index_map[i] != 0);
575 ASSERT(index_map[i] < map_size);
576 }
577#endif
578 for (auto& symbol : symbols_) {
579 DEBUG_ASSERT(symbol.section_index < map_size);
580 symbol.section_index = index_map[symbol.section_index];
581 }
582 }
583
584 void Finalize(const GrowableArray<intptr_t>& address_map) {
585#if defined(DEBUG)
586 const intptr_t map_size = address_map.length();
587 // The first entry must be 0 so that symbols with index SHN_UNDEF, like
588 // the initial reserved symbol, are unchanged.
589 ASSERT_EQUAL(address_map[0], 0);
590 for (intptr_t i = 1; i < map_size; i++) {
591 // No section begins at the start of the snapshot.
592 ASSERT(address_map[i] != 0);
593 }
594#endif
595 for (auto& symbol : symbols_) {
596 DEBUG_ASSERT(symbol.section_index < map_size);
597 symbol.offset += address_map[symbol.section_index];
598 }
599 }
600
601 const Symbol* FindUid(intptr_t label) const {
602 ASSERT(label > 0);
603 const intptr_t symbols_index = by_label_index_.Lookup(label);
604 if (symbols_index == 0) return nullptr; // Not found.
605 return &symbols_[symbols_index];
606 }
607
608 private:
609 Zone* const zone_;
610 StringTable* const table_;
611 const bool dynamic_;
612 GrowableArray<Symbol> symbols_;
613 // Maps positive symbol labels to indexes in symbols_. No entry for the
614 // reserved symbol, which has index 0, the same as the IntMap's kNoValue.
615 IntMap<intptr_t> by_label_index_;
616};
617
618class SymbolHashTable : public Section {
619 public:
620 SymbolHashTable(Zone* zone, SymbolTable* symtab)
622 /*allocate=*/true,
623 /*executable=*/false,
624 /*writable=*/false),
625 buckets_(zone, 0),
626 chains_(zone, 0) {
627 link = symtab->index;
628 entry_size = sizeof(int32_t);
629
630 const auto& symbols = symtab->symbols();
631 const intptr_t num_symbols = symbols.length();
632 buckets_.FillWith(elf::STN_UNDEF, 0, num_symbols);
633 chains_.FillWith(elf::STN_UNDEF, 0, num_symbols);
634
635 for (intptr_t i = 1; i < num_symbols; i++) {
636 const auto& symbol = symbols[i];
637 uint32_t hash = HashSymbolName(symtab->strtab().At(symbol.name_index));
638 uint32_t probe = hash % num_symbols;
639 chains_[i] = buckets_[probe]; // next = head
640 buckets_[probe] = i; // head = symbol
641 }
642 }
643
644 intptr_t MemorySize() const {
645 return entry_size * (buckets_.length() + chains_.length() + 2);
646 }
647
648 void Write(ElfWriteStream* stream) const {
649 stream->WriteWord(buckets_.length());
650 stream->WriteWord(chains_.length());
651 for (const int32_t bucket : buckets_) {
652 stream->WriteWord(bucket);
653 }
654 for (const int32_t chain : chains_) {
655 stream->WriteWord(chain);
656 }
657 }
658
659 static uint32_t HashSymbolName(const void* p) {
660 auto* name = reinterpret_cast<const uint8_t*>(p);
661 uint32_t h = 0;
662 while (*name != '\0') {
663 h = (h << 4) + *name++;
664 uint32_t g = h & 0xf0000000;
665 h ^= g;
666 h ^= g >> 24;
667 }
668 return h;
669 }
670
671 private:
672 GrowableArray<int32_t> buckets_; // "Head"
673 GrowableArray<int32_t> chains_; // "Next"
674};
675
676class DynamicTable : public Section {
677 public:
678 // .dynamic section is expected to be writable on most Linux systems
679 // unless dynamic linker is explicitly built with support for an read-only
680 // .dynamic section (DL_RO_DYN_SECTION).
681 DynamicTable(Zone* zone, SymbolTable* symtab, SymbolHashTable* hash)
683 /*allocate=*/true,
684 /*executable=*/false,
685 /*writable=*/true),
686 symtab_(symtab),
687 hash_(hash) {
688 link = strtab().index;
689 entry_size = sizeof(elf::DynamicEntry);
690
691 AddEntry(zone, elf::DynamicEntryType::DT_HASH, kInvalidEntry);
692 AddEntry(zone, elf::DynamicEntryType::DT_STRTAB, kInvalidEntry);
693 AddEntry(zone, elf::DynamicEntryType::DT_STRSZ, kInvalidEntry);
694 AddEntry(zone, elf::DynamicEntryType::DT_SYMTAB, kInvalidEntry);
695 AddEntry(zone, elf::DynamicEntryType::DT_SYMENT, sizeof(elf::Symbol));
696 AddEntry(zone, elf::DynamicEntryType::DT_NULL, 0);
697 }
698
699 static constexpr intptr_t kInvalidEntry = -1;
700
701 DEFINE_TYPE_CHECK_FOR(DynamicTable)
702 const SymbolHashTable& hash() const { return *hash_; }
703 const SymbolTable& symtab() const { return *symtab_; }
704 const StringTable& strtab() const { return symtab().strtab(); }
705 intptr_t MemorySize() const { return entries_.length() * entry_size; }
706
707 void Write(ElfWriteStream* stream) const {
708 for (intptr_t i = 0; i < entries_.length(); i++) {
709 entries_[i]->Write(stream);
710 }
711 }
712
713 void Finalize() {
714 FinalizeEntry(elf::DynamicEntryType::DT_HASH, hash().memory_offset());
715 FinalizeEntry(elf::DynamicEntryType::DT_STRTAB, strtab().memory_offset());
716 FinalizeEntry(elf::DynamicEntryType::DT_STRSZ, strtab().MemorySize());
717 FinalizeEntry(elf::DynamicEntryType::DT_SYMTAB, symtab().memory_offset());
718 }
719
720 private:
721 struct Entry : public ZoneAllocated {
722 Entry(elf::DynamicEntryType tag, intptr_t value) : tag(tag), value(value) {}
723
724 void Write(ElfWriteStream* stream) const {
725 ASSERT(value != kInvalidEntry);
726 const intptr_t start = stream->Position();
727#if defined(TARGET_ARCH_IS_32_BIT)
728 stream->WriteWord(static_cast<uint32_t>(tag));
729 stream->WriteAddr(value);
730#else
731 stream->WriteXWord(static_cast<uint64_t>(tag));
732 stream->WriteAddr(value);
733#endif
734 ASSERT_EQUAL(stream->Position() - start, sizeof(elf::DynamicEntry));
735 }
736
738 intptr_t value;
739 };
740
741 void AddEntry(Zone* zone, elf::DynamicEntryType tag, intptr_t value) {
742 auto const entry = new (zone) Entry(tag, value);
743 entries_.Add(entry);
744 }
745
746 void FinalizeEntry(elf::DynamicEntryType tag, intptr_t value) {
747 for (auto* entry : entries_) {
748 if (entry->tag == tag) {
749 entry->value = value;
750 break;
751 }
752 }
753 }
754
755 SymbolTable* const symtab_;
756 SymbolHashTable* const hash_;
757 GrowableArray<Entry*> entries_;
758};
759
760class BitsContainer : public Section {
761 public:
762 // Fully specified BitsContainer information. Unless otherwise specified,
763 // BitContainers are aligned on byte boundaries (i.e., no padding is used).
764 BitsContainer(elf::SectionHeaderType type,
765 bool allocate,
766 bool executable,
767 bool writable,
768 int alignment = 1)
769 : Section(type, allocate, executable, writable, alignment) {}
770
771 // For BitsContainers used only as unallocated sections.
772 explicit BitsContainer(elf::SectionHeaderType type, intptr_t alignment = 1)
773 : BitsContainer(type,
774 /*allocate=*/false,
775 /*executable=*/false,
776 /*writable=*/false,
777 alignment) {}
778
779 // For BitsContainers used as segments whose type differ on the type of the
780 // ELF file. Creates an elf::SHT_PROGBITS section if type is Snapshot,
781 // otherwise creates an elf::SHT_NOBITS section.
782 BitsContainer(Elf::Type t,
783 bool executable,
784 bool writable,
785 intptr_t alignment = 1)
786 : BitsContainer(t == Elf::Type::Snapshot
789 /*allocate=*/true,
790 executable,
791 writable,
792 alignment) {}
793
794 DEFINE_TYPE_CHECK_FOR(BitsContainer)
795
796 bool IsNoBits() const { return type == elf::SectionHeaderType::SHT_NOBITS; }
797 bool HasBytes() const {
798 return portions_.length() != 0 && portions_[0].bytes != nullptr;
799 }
800
801 struct Portion {
802 void Write(ElfWriteStream* stream, intptr_t section_start) const {
803 ASSERT(bytes != nullptr);
804 if (relocations == nullptr) {
805 stream->WriteBytes(bytes, size);
806 return;
807 }
808 const SymbolTable& symtab = stream->elf().symtab();
809 // Resolve relocations as we write.
810 intptr_t current_pos = 0;
811 for (const auto& reloc : *relocations) {
812 // We assume here that the relocations are sorted in increasing order,
813 // with unique section offsets.
814 ASSERT(current_pos <= reloc.section_offset);
815 if (current_pos < reloc.section_offset) {
816 stream->WriteBytes(bytes + current_pos,
817 reloc.section_offset - current_pos);
818 }
819 intptr_t source_address = reloc.source_offset;
820 if (reloc.source_label > 0) {
821 auto* const source_symbol = symtab.FindUid(reloc.source_label);
822 ASSERT(source_symbol != nullptr);
823 source_address += source_symbol->offset;
824 } else if (reloc.source_label == Elf::Relocation::kSelfRelative) {
825 source_address += section_start + offset + reloc.section_offset;
826 } else {
827 ASSERT_EQUAL(reloc.source_label, Elf::Relocation::kSnapshotRelative);
828 // No change to source_address.
829 }
830 ASSERT(reloc.size_in_bytes <= kWordSize);
831 word to_write = reloc.target_offset - source_address;
832 if (reloc.target_label > 0) {
833 if (auto* const target_symbol = symtab.FindUid(reloc.target_label)) {
834 to_write += target_symbol->offset;
835 } else {
836 ASSERT_EQUAL(reloc.target_label, Elf::kBuildIdLabel);
837 ASSERT_EQUAL(reloc.target_offset, 0);
838 ASSERT_EQUAL(reloc.source_offset, 0);
839 ASSERT_EQUAL(reloc.size_in_bytes, compiler::target::kWordSize);
840 // TODO(dartbug.com/43516): Special case for snapshots with deferred
841 // sections that handles the build ID relocation in an
842 // InstructionsSection when there is no build ID.
843 to_write = Image::kNoRelocatedAddress;
844 }
845 } else if (reloc.target_label == Elf::Relocation::kSelfRelative) {
846 to_write += section_start + offset + reloc.section_offset;
847 } else {
848 ASSERT_EQUAL(reloc.target_label, Elf::Relocation::kSnapshotRelative);
849 // No change to source_address.
850 }
851 ASSERT(Utils::IsInt(reloc.size_in_bytes * kBitsPerByte, to_write));
852 stream->WriteBytes(reinterpret_cast<const uint8_t*>(&to_write),
853 reloc.size_in_bytes);
854 current_pos = reloc.section_offset + reloc.size_in_bytes;
855 }
856 stream->WriteBytes(bytes + current_pos, size - current_pos);
857 }
858
859 intptr_t offset;
860 const char* symbol_name;
861 intptr_t label;
862 const uint8_t* bytes;
863 intptr_t size;
864 const ZoneGrowableArray<Elf::Relocation>* relocations;
865 const ZoneGrowableArray<Elf::SymbolData>* symbols;
866
867 private:
869 };
870
871 const GrowableArray<Portion>& portions() const { return portions_; }
872
873 const Portion& AddPortion(
874 const uint8_t* bytes,
875 intptr_t size,
876 const ZoneGrowableArray<Elf::Relocation>* relocations = nullptr,
877 const ZoneGrowableArray<Elf::SymbolData>* symbols = nullptr,
878 const char* symbol_name = nullptr,
879 intptr_t label = 0) {
880 // Any named portion should also have a valid symbol label.
881 ASSERT(symbol_name == nullptr || label > 0);
882 ASSERT(IsNoBits() || bytes != nullptr);
883 ASSERT(bytes != nullptr || relocations == nullptr);
884 // Make sure all portions are consistent in containing bytes.
885 ASSERT(portions_.is_empty() || HasBytes() == (bytes != nullptr));
886 const intptr_t offset = Utils::RoundUp(total_size_, alignment);
887 portions_.Add(
888 {offset, symbol_name, label, bytes, size, relocations, symbols});
889 const Portion& portion = portions_.Last();
890 total_size_ = offset + size;
891 return portion;
892 }
893
894 void Write(ElfWriteStream* stream) const {
896 intptr_t start_position = stream->Position(); // Used for checks.
897 for (const auto& portion : portions_) {
898 stream->Align(alignment);
899 ASSERT_EQUAL(stream->Position(), start_position + portion.offset);
900 portion.Write(stream, memory_offset());
901 }
902 ASSERT_EQUAL(stream->Position(), start_position + total_size_);
903 }
904
905 // Returns the hash for the portion corresponding to symbol_name.
906 // Returns 0 if the portion has no bytes or no portions have that name.
907 uint32_t Hash(const char* symbol_name) const {
908 for (const auto& portion : portions_) {
909 if (strcmp(symbol_name, portion.symbol_name) == 0) {
910 if (portion.bytes == nullptr) return 0;
911 const uint32_t hash = Utils::StringHash(portion.bytes, portion.size);
912 // Ensure a non-zero return.
913 return hash == 0 ? 1 : hash;
914 }
915 }
916 return 0;
917 }
918
919 intptr_t FileSize() const { return IsNoBits() ? 0 : total_size_; }
920 intptr_t MemorySize() const { return IsAllocated() ? total_size_ : 0; }
921
922 private:
923 GrowableArray<Portion> portions_;
924 intptr_t total_size_ = 0;
925};
926
927class NoteSection : public BitsContainer {
928 public:
929 NoteSection()
930 : BitsContainer(elf::SectionHeaderType::SHT_NOTE,
931 /*allocate=*/true,
932 /*executable=*/false,
933 /*writable=*/false,
934 kNoteAlignment) {}
935};
936
937// Abstract bits container that allows merging by just appending the portion
938// information (with properly adjusted offsets) of the other to this one.
939class ConcatenableBitsContainer : public BitsContainer {
940 public:
941 ConcatenableBitsContainer(Elf::Type type,
942 bool executable,
943 bool writable,
944 intptr_t alignment)
945 : BitsContainer(type, executable, writable, alignment) {}
946
947 virtual bool CanMergeWith(const Section& other) const = 0;
948 virtual void Merge(const Section& other) {
949 ASSERT(other.IsBitsContainer());
950 ASSERT(CanMergeWith(other));
951 for (const auto& portion : other.AsBitsContainer()->portions()) {
952 AddPortion(portion.bytes, portion.size, portion.relocations,
953 portion.symbols, portion.symbol_name, portion.label);
954 }
955 }
956};
957
958class TextSection : public ConcatenableBitsContainer {
959 public:
960 explicit TextSection(Elf::Type t)
961 : ConcatenableBitsContainer(t,
962 /*executable=*/true,
963 /*writable=*/false,
964 ImageWriter::kTextAlignment) {}
965
966 DEFINE_TYPE_CHECK_FOR(TextSection);
967
968 virtual bool CanMergeWith(const Section& other) const {
969 return other.IsTextSection();
970 }
971};
972
973class DataSection : public ConcatenableBitsContainer {
974 public:
975 explicit DataSection(Elf::Type t)
976 : ConcatenableBitsContainer(t,
977 /*executable=*/false,
978 /*writable=*/false,
979 ImageWriter::kRODataAlignment) {}
980
981 DEFINE_TYPE_CHECK_FOR(DataSection);
982
983 virtual bool CanMergeWith(const Section& other) const {
984 return other.IsDataSection();
985 }
986};
987
988class BssSection : public ConcatenableBitsContainer {
989 public:
990 explicit BssSection(Elf::Type t)
991 : ConcatenableBitsContainer(t,
992 /*executable=*/false,
993 /*writable=*/true,
994 ImageWriter::kBssAlignment) {}
995
996 DEFINE_TYPE_CHECK_FOR(BssSection);
997
998 virtual bool CanMergeWith(const Section& other) const {
999 return other.IsBssSection();
1000 }
1001};
1002
1003// Represents portions of the file/memory space which do not correspond to
1004// sections from the section header. Should never be added to the section table,
1005// but may be added to segments.
1006class PseudoSection : public Section {
1007 public:
1008 // All PseudoSections are aligned to target word size.
1009 static constexpr intptr_t kAlignment = compiler::target::kWordSize;
1010
1011 PseudoSection(bool allocate, bool executable, bool writable)
1013 allocate,
1014 executable,
1015 writable,
1016 kAlignment) {}
1017
1018 DEFINE_TYPE_CHECK_FOR(PseudoSection)
1019
1020 void Write(ElfWriteStream* stream) const = 0;
1021};
1022
1023class ProgramTable : public PseudoSection {
1024 public:
1025 explicit ProgramTable(Zone* zone)
1026 : PseudoSection(/*allocate=*/true,
1027 /*executable=*/false,
1028 /*writable=*/false),
1029 segments_(zone, 0) {
1030 entry_size = sizeof(elf::ProgramHeader);
1031 }
1032
1033 const GrowableArray<Segment*>& segments() const { return segments_; }
1034 intptr_t SegmentCount() const { return segments_.length(); }
1035 intptr_t MemorySize() const {
1036 return segments_.length() * sizeof(elf::ProgramHeader);
1037 }
1038
1039 void Add(Segment* segment) {
1040 ASSERT(segment != nullptr);
1041 segments_.Add(segment);
1042 }
1043
1044 void Write(ElfWriteStream* stream) const;
1045
1046 private:
1047 GrowableArray<Segment*> segments_;
1048};
1049
1050// This particular PseudoSection should not appear in segments either (hence
1051// being marked non-allocated), but is directly held by the Elf object.
1052class SectionTable : public PseudoSection {
1053 public:
1054 explicit SectionTable(Zone* zone)
1055 : PseudoSection(/*allocate=*/false,
1056 /*executable=*/false,
1057 /*writable=*/false),
1058 zone_(zone),
1059 sections_(zone_, 2),
1060 shstrtab_(zone_, /*allocate=*/false) {
1061 entry_size = sizeof(elf::SectionHeader);
1062 // The section at index 0 (elf::SHN_UNDEF) must be all 0s.
1063 ASSERT_EQUAL(shstrtab_.Lookup(""), 0);
1064 Add(new (zone_) ReservedSection(), "");
1065 Add(&shstrtab_, ".shstrtab");
1066 }
1067
1068 const GrowableArray<Section*>& sections() const { return sections_; }
1069 intptr_t SectionCount() const { return sections_.length(); }
1070 intptr_t StringTableIndex() const { return shstrtab_.index; }
1071
1072 bool HasSectionNamed(const char* name) {
1073 return shstrtab_.Lookup(name) != StringTable::kNotIndexed;
1074 }
1075
1076 void Add(Section* section, const char* name = nullptr) {
1077 ASSERT(!section->IsPseudoSection());
1078 ASSERT(name != nullptr || section->name_is_set());
1079 if (name != nullptr) {
1080 // First, check for an existing section with the same table name.
1081 if (auto* const old_section = Find(name)) {
1082 ASSERT(old_section->CanMergeWith(*section));
1083 old_section->Merge(*section);
1084 return;
1085 }
1086 // No existing section with this name.
1087 const intptr_t name_index = shstrtab_.Add(name);
1088 section->set_name(name_index);
1089 }
1090 section->index = sections_.length();
1091 sections_.Add(section);
1092 }
1093
1094 Section* Find(const char* name) {
1095 const intptr_t name_index = shstrtab_.Lookup(name);
1096 if (name_index == StringTable::kNotIndexed) {
1097 // We're guaranteed that no section with this name has been added yet.
1098 return nullptr;
1099 }
1100 // We check walk all sections to check for uniqueness in DEBUG mode.
1101 Section* result = nullptr;
1102 for (Section* const section : sections_) {
1103 if (section->name() == name_index) {
1104#if defined(DEBUG)
1105 ASSERT(result == nullptr);
1106 result = section;
1107#else
1108 return section;
1109#endif
1110 }
1111 }
1112 return result;
1113 }
1114
1115 intptr_t FileSize() const {
1116 return sections_.length() * sizeof(elf::SectionHeader);
1117 }
1118
1119 void Write(ElfWriteStream* stream) const;
1120
1121 // Reorders the sections for creating a minimal amount of segments and
1122 // creates and returns an appropriate program table.
1123 //
1124 // Also takes and adjusts section indices in the static symbol table, since it
1125 // is not recorded in sections_ for stripped outputs.
1126 ProgramTable* CreateProgramTable(SymbolTable* symtab);
1127
1128 private:
1129 Zone* const zone_;
1130 GrowableArray<Section*> sections_;
1131 StringTable shstrtab_;
1132};
1133
1134class ElfHeader : public PseudoSection {
1135 public:
1136 ElfHeader(const ProgramTable& program_table,
1137 const SectionTable& section_table)
1138 : PseudoSection(/*allocate=*/true,
1139 /*executable=*/false,
1140 /*writable=*/false),
1141 program_table_(program_table),
1142 section_table_(section_table) {}
1143
1144 intptr_t MemorySize() const { return sizeof(elf::ElfHeader); }
1145
1146 void Write(ElfWriteStream* stream) const;
1147
1148 private:
1149 const ProgramTable& program_table_;
1150 const SectionTable& section_table_;
1151};
1152
1153#undef DEFINE_TYPE_CHECK_FOR
1154#undef FOR_EACH_SECTION_TYPE
1155
1156Elf::Elf(Zone* zone, BaseWriteStream* stream, Type type, Dwarf* dwarf)
1157 : zone_(zone),
1158 unwrapped_stream_(stream),
1159 type_(type),
1160 dwarf_(dwarf),
1161 section_table_(new(zone) SectionTable(zone)) {
1162 // Separate debugging information should always have a Dwarf object.
1163 ASSERT(type_ == Type::Snapshot || dwarf_ != nullptr);
1164 // Assumed by various offset logic in this file.
1165 ASSERT_EQUAL(unwrapped_stream_->Position(), 0);
1166}
1167
1168void Elf::AddText(const char* name,
1169 intptr_t label,
1170 const uint8_t* bytes,
1171 intptr_t size,
1172 const ZoneGrowableArray<Relocation>* relocations,
1173 const ZoneGrowableArray<SymbolData>* symbols) {
1174 auto* const container = new (zone_) TextSection(type_);
1175 container->AddPortion(bytes, size, relocations, symbols, name, label);
1176 section_table_->Add(container, kTextName);
1177}
1178
1179void Elf::CreateBSS() {
1180 // Not idempotent.
1181 ASSERT(section_table_->Find(kBssName) == nullptr);
1182 // No text section means no BSS section.
1183 auto* const text_section = section_table_->Find(kTextName);
1184 if (text_section == nullptr) return;
1185 ASSERT(text_section->IsTextSection());
1186
1187 auto* const bss_container = new (zone_) BssSection(type_);
1188 for (const auto& portion : text_section->AsBitsContainer()->portions()) {
1189 size_t size;
1190 const char* symbol_name;
1191 intptr_t label;
1192 // First determine whether this is the VM's text portion or the isolate's.
1193 if (strcmp(portion.symbol_name, kVmSnapshotInstructionsAsmSymbol) == 0) {
1194 size = BSS::kVmEntryCount * compiler::target::kWordSize;
1195 symbol_name = kVmSnapshotBssAsmSymbol;
1196 label = kVmBssLabel;
1197 } else if (strcmp(portion.symbol_name,
1199 size = BSS::kIsolateGroupEntryCount * compiler::target::kWordSize;
1200 symbol_name = kIsolateSnapshotBssAsmSymbol;
1201 label = kIsolateBssLabel;
1202 } else {
1203 // Not VM or isolate text.
1204 UNREACHABLE();
1205 continue;
1206 }
1207
1208 uint8_t* bytes = nullptr;
1209 if (type_ == Type::Snapshot) {
1210 // Ideally the BSS segment would take no space in the object, but
1211 // Android's "strip" utility truncates the memory-size of our segments to
1212 // their file-size.
1213 //
1214 // Therefore we must insert zero-filled data for the BSS.
1215 bytes = zone_->Alloc<uint8_t>(size);
1216 memset(bytes, 0, size);
1217 }
1218 // For the BSS section, we add the section symbols as local symbols in the
1219 // static symbol table, as these addresses are only used for relocation.
1220 // (This matches the behavior in the assembly output.)
1221 auto* symbols = new (zone_) ZoneGrowableArray<Elf::SymbolData>();
1222 symbols->Add({symbol_name, elf::STT_SECTION, 0, size, label});
1223 bss_container->AddPortion(bytes, size, /*relocations=*/nullptr, symbols);
1224 }
1225
1226 section_table_->Add(bss_container, kBssName);
1227}
1228
1229void Elf::AddROData(const char* name,
1230 intptr_t label,
1231 const uint8_t* bytes,
1232 intptr_t size,
1233 const ZoneGrowableArray<Relocation>* relocations,
1234 const ZoneGrowableArray<SymbolData>* symbols) {
1235 auto* const container = new (zone_) DataSection(type_);
1236 container->AddPortion(bytes, size, relocations, symbols, name, label);
1237 section_table_->Add(container, kDataName);
1238}
1239
1240#if defined(DART_PRECOMPILER)
1241class DwarfElfStream : public DwarfWriteStream {
1242 public:
1243 DwarfElfStream(Zone* zone, NonStreamingWriteStream* stream)
1244 : zone_(ASSERT_NOTNULL(zone)),
1245 stream_(ASSERT_NOTNULL(stream)),
1246 relocations_(new(zone) ZoneGrowableArray<Elf::Relocation>()) {}
1247
1248 const uint8_t* buffer() const { return stream_->buffer(); }
1249 intptr_t bytes_written() const { return stream_->bytes_written(); }
1250 intptr_t Position() const { return stream_->Position(); }
1251
1252 void sleb128(intptr_t value) { stream_->WriteSLEB128(value); }
1253 void uleb128(uintptr_t value) { stream_->WriteLEB128(value); }
1254 void u1(uint8_t value) { stream_->WriteByte(value); }
1255 void u2(uint16_t value) { stream_->WriteFixed(value); }
1256 void u4(uint32_t value) { stream_->WriteFixed(value); }
1257 void u8(uint64_t value) { stream_->WriteFixed(value); }
1258 void string(const char* cstr) { // NOLINT
1259 // Unlike stream_->WriteString(), we want the null terminator written.
1260 stream_->WriteBytes(cstr, strlen(cstr) + 1);
1261 }
1262 // The prefix is ignored for DwarfElfStreams.
1263 void WritePrefixedLength(const char* unused, std::function<void()> body) {
1264 const intptr_t fixup = stream_->Position();
1265 // We assume DWARF v2 currently, so all sizes are 32-bit.
1266 u4(0);
1267 // All sizes for DWARF sections measure the size of the section data _after_
1268 // the size value.
1269 const intptr_t start = stream_->Position();
1270 body();
1271 const intptr_t end = stream_->Position();
1272 stream_->SetPosition(fixup);
1273 u4(end - start);
1274 stream_->SetPosition(end);
1275 }
1276 // Shorthand for when working directly with DwarfElfStreams.
1277 void WritePrefixedLength(std::function<void()> body) {
1278 WritePrefixedLength(nullptr, body);
1279 }
1280
1281 void OffsetFromSymbol(intptr_t label, intptr_t offset) {
1282 relocations_->Add({kAddressSize, stream_->Position(),
1283 Elf::Relocation::kSnapshotRelative, 0, label, offset});
1284 addr(0); // Resolved later.
1285 }
1286 template <typename T>
1287 void RelativeSymbolOffset(intptr_t label) {
1288 relocations_->Add({sizeof(T), stream_->Position(),
1289 Elf::Relocation::kSelfRelative, 0, label, 0});
1290 stream_->WriteFixed<T>(0); // Resolved later.
1291 }
1292 void InitializeAbstractOrigins(intptr_t size) {
1293 abstract_origins_size_ = size;
1294 abstract_origins_ = zone_->Alloc<uint32_t>(abstract_origins_size_);
1295 }
1296 void RegisterAbstractOrigin(intptr_t index) {
1297 ASSERT(abstract_origins_ != nullptr);
1298 ASSERT(index < abstract_origins_size_);
1299 abstract_origins_[index] = stream_->Position();
1300 }
1301 void AbstractOrigin(intptr_t index) { u4(abstract_origins_[index]); }
1302
1303 const ZoneGrowableArray<Elf::Relocation>* relocations() const {
1304 return relocations_;
1305 }
1306
1307 protected:
1308#if defined(TARGET_ARCH_IS_32_BIT)
1309 static constexpr intptr_t kAddressSize = kInt32Size;
1310#else
1311 static constexpr intptr_t kAddressSize = kInt64Size;
1312#endif
1313
1314 void addr(uword value) {
1315#if defined(TARGET_ARCH_IS_32_BIT)
1316 u4(value);
1317#else
1318 u8(value);
1319#endif
1320 }
1321
1322 Zone* const zone_;
1323 NonStreamingWriteStream* const stream_;
1324 ZoneGrowableArray<Elf::Relocation>* relocations_ = nullptr;
1325 uint32_t* abstract_origins_ = nullptr;
1326 intptr_t abstract_origins_size_ = -1;
1327
1328 private:
1329 DISALLOW_COPY_AND_ASSIGN(DwarfElfStream);
1330};
1331
1332static constexpr intptr_t kInitialDwarfBufferSize = 64 * KB;
1333#endif
1334
1335void SymbolTable::Initialize(const GrowableArray<Section*>& sections) {
1336 for (auto* const section : sections) {
1337 // The values of all added symbols are memory addresses.
1338 if (!section->IsAllocated()) continue;
1339 if (auto* const bits = section->AsBitsContainer()) {
1340 for (const auto& portion : section->AsBitsContainer()->portions()) {
1341 if (portion.symbol_name != nullptr) {
1342 // Global dynamic symbols for the content of a given section, which is
1343 // always a single structured element (and thus we use STT_OBJECT).
1344 const intptr_t binding = elf::STB_GLOBAL;
1345 const intptr_t type = elf::STT_OBJECT;
1346 // Some tools assume the static symbol table is a superset of the
1347 // dynamic symbol table when it exists and only use it, so put all
1348 // dynamic symbols there also. (see dartbug.com/41783).
1349 AddSymbol(portion.symbol_name, binding, type, portion.size,
1350 section->index, portion.offset, portion.label);
1351 }
1352 if (!dynamic_ && portion.symbols != nullptr) {
1353 for (const auto& symbol_data : *portion.symbols) {
1354 // Local static-only symbols, e.g., code payloads or RO objects.
1355 AddSymbol(symbol_data.name, elf::STB_LOCAL, symbol_data.type,
1356 symbol_data.size, section->index,
1357 portion.offset + symbol_data.offset, symbol_data.label);
1358 }
1359 }
1360 }
1361 }
1362 }
1363}
1364
1365void Elf::InitializeSymbolTables() {
1366 // Not idempotent.
1367 ASSERT(symtab_ == nullptr);
1368
1369 // Create static and dynamic symbol tables.
1370 auto* const dynstrtab = new (zone_) StringTable(zone_, /*allocate=*/true);
1371 section_table_->Add(dynstrtab, ".dynstr");
1372 auto* const dynsym =
1373 new (zone_) SymbolTable(zone_, dynstrtab, /*dynamic=*/true);
1374 section_table_->Add(dynsym, ".dynsym");
1375 dynsym->Initialize(section_table_->sections());
1376 // Now the dynamic symbol table is populated, set up the hash table and
1377 // dynamic table.
1378 auto* const hash = new (zone_) SymbolHashTable(zone_, dynsym);
1379 section_table_->Add(hash, ".hash");
1380 auto* const dynamic = new (zone_) DynamicTable(zone_, dynsym, hash);
1381 section_table_->Add(dynamic, kDynamicTableName);
1382
1383 // We only add the static string and symbol tables to the section table if
1384 // this is an unstripped output, but we always create them as they are used
1385 // to resolve relocations.
1386 auto* const strtab = new (zone_) StringTable(zone_, /*allocate=*/false);
1387 if (!IsStripped()) {
1388 section_table_->Add(strtab, ".strtab");
1389 }
1390 symtab_ = new (zone_) SymbolTable(zone_, strtab, /*dynamic=*/false);
1391 if (!IsStripped()) {
1392 section_table_->Add(symtab_, ".symtab");
1393 }
1394 symtab_->Initialize(section_table_->sections());
1395}
1396
1397void Elf::FinalizeEhFrame() {
1398#if !defined(TARGET_ARCH_IA32)
1399#if defined(TARGET_ARCH_X64)
1400 // The x86_64 psABI defines the DWARF register numbers, which differ from
1401 // the registers' usual encoding within instructions.
1402 const intptr_t DWARF_RA = 16; // No corresponding register.
1403 const intptr_t DWARF_FP = 6; // RBP
1404#else
1405 const intptr_t DWARF_RA = ConcreteRegister(LINK_REGISTER);
1406 const intptr_t DWARF_FP = FP;
1407#endif
1408
1409 // No text section added means no .eh_frame.
1410 TextSection* text_section = nullptr;
1411 if (auto* const section = section_table_->Find(kTextName)) {
1412 text_section = section->AsTextSection();
1413 ASSERT(text_section != nullptr);
1414 }
1415 // No text section added means no .eh_frame.
1416 if (text_section == nullptr) return;
1417
1418#if defined(DART_TARGET_OS_WINDOWS) && \
1419 (defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64))
1420 // Append Windows unwinding instructions to the end of .text section.
1421 { // NOLINT
1422 auto* const unwinding_instructions_frame = new (zone_) TextSection(type_);
1423 ZoneWriteStream stream(
1424 zone(),
1425 /*initial_size=*/UnwindingRecordsPlatform::SizeInBytes());
1426 uint8_t* unwinding_instructions =
1427 zone()->Alloc<uint8_t>(UnwindingRecordsPlatform::SizeInBytes());
1428
1429 intptr_t start_offset =
1430 Utils::RoundUp(text_section->FileSize(), text_section->alignment);
1431 stream.WriteBytes(UnwindingRecords::GenerateRecordsInto(
1432 start_offset, unwinding_instructions),
1434
1435 unwinding_instructions_frame->AddPortion(stream.buffer(),
1436 stream.bytes_written());
1437 section_table_->Add(unwinding_instructions_frame, kTextName);
1438 }
1439#endif
1440
1441 // Multiplier which will be used to scale operands of DW_CFA_offset and
1442 // DW_CFA_val_offset.
1443 const intptr_t kDataAlignment = -compiler::target::kWordSize;
1444
1445 static constexpr uint8_t DW_EH_PE_pcrel = 0x10;
1446 static constexpr uint8_t DW_EH_PE_sdata4 = 0x0b;
1447
1448 ZoneWriteStream stream(zone(), kInitialDwarfBufferSize);
1449 DwarfElfStream dwarf_stream(zone_, &stream);
1450
1451 // Emit CIE.
1452
1453 // Used to calculate offset to CIE in FDEs.
1454 const intptr_t cie_start = dwarf_stream.Position();
1455 dwarf_stream.WritePrefixedLength([&] {
1456 dwarf_stream.u4(0); // CIE
1457 dwarf_stream.u1(1); // Version (must be 1 or 3)
1458 // Augmentation String
1459 dwarf_stream.string("zR"); // NOLINT
1460 dwarf_stream.uleb128(1); // Code alignment (must be 1).
1461 dwarf_stream.sleb128(kDataAlignment); // Data alignment
1462 dwarf_stream.u1(DWARF_RA); // Return address register
1463 dwarf_stream.uleb128(1); // Augmentation size
1464 dwarf_stream.u1(DW_EH_PE_pcrel | DW_EH_PE_sdata4); // FDE encoding.
1465 // CFA is caller's SP (FP+kCallerSpSlotFromFp*kWordSize)
1466 dwarf_stream.u1(Dwarf::DW_CFA_def_cfa);
1467 dwarf_stream.uleb128(DWARF_FP);
1468 dwarf_stream.uleb128(kCallerSpSlotFromFp * compiler::target::kWordSize);
1469 });
1470
1471 // Emit rule defining that |reg| value is stored at CFA+offset.
1472 const auto cfa_offset = [&](intptr_t reg, intptr_t offset) {
1473 const intptr_t scaled_offset = offset / kDataAlignment;
1474 RELEASE_ASSERT(scaled_offset >= 0);
1475 dwarf_stream.u1(Dwarf::DW_CFA_offset | reg);
1476 dwarf_stream.uleb128(scaled_offset);
1477 };
1478
1479 // Emit an FDE covering each .text section.
1480 for (const auto& portion : text_section->portions()) {
1481#if defined(DART_TARGET_OS_WINDOWS) && \
1482 (defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64))
1483 if (portion.label == 0) {
1484 // Unwinding instructions sections doesn't have label, doesn't dwarf
1485 continue;
1486 }
1487#endif
1488 ASSERT(portion.label != 0); // Needed for relocations.
1489 dwarf_stream.WritePrefixedLength([&]() {
1490 // Offset to CIE. Note that unlike pcrel this offset is encoded
1491 // backwards: it will be subtracted from the current position.
1492 dwarf_stream.u4(stream.Position() - cie_start);
1493 // Start address as a PC relative reference.
1494 dwarf_stream.RelativeSymbolOffset<int32_t>(portion.label);
1495 dwarf_stream.u4(portion.size); // Size.
1496 dwarf_stream.u1(0); // Augmentation Data length.
1497
1498 // Caller FP at FP+kSavedCallerPcSlotFromFp*kWordSize,
1499 // where FP is CFA - kCallerSpSlotFromFp*kWordSize.
1501 cfa_offset(DWARF_FP, (kSavedCallerFpSlotFromFp - kCallerSpSlotFromFp) *
1503
1504 // Caller LR at FP+kSavedCallerPcSlotFromFp*kWordSize,
1505 // where FP is CFA - kCallerSpSlotFromFp*kWordSize
1507 cfa_offset(DWARF_RA, (kSavedCallerPcSlotFromFp - kCallerSpSlotFromFp) *
1509 });
1510 }
1511
1512 dwarf_stream.u4(0); // end of section (FDE with zero length)
1513
1514 auto* const eh_frame = new (zone_)
1515 BitsContainer(type_, /*writable=*/false, /*executable=*/false);
1516 eh_frame->AddPortion(dwarf_stream.buffer(), dwarf_stream.bytes_written(),
1517 dwarf_stream.relocations());
1518 section_table_->Add(eh_frame, ".eh_frame");
1519#endif // !defined(TARGET_ARCH_IA32)
1520}
1521
1522void Elf::FinalizeDwarfSections() {
1523 if (dwarf_ == nullptr) return;
1524
1525 // Currently we only output DWARF information involving code.
1526 ASSERT(section_table_->HasSectionNamed(kTextName));
1527
1528 auto add_debug = [&](const char* name, const DwarfElfStream& stream) {
1529 auto const container =
1530 new (zone_) BitsContainer(elf::SectionHeaderType::SHT_PROGBITS);
1531 container->AddPortion(stream.buffer(), stream.bytes_written(),
1532 stream.relocations());
1533 section_table_->Add(container, name);
1534 };
1535 {
1536 ZoneWriteStream stream(zone(), kInitialDwarfBufferSize);
1537 DwarfElfStream dwarf_stream(zone_, &stream);
1538 dwarf_->WriteAbbreviations(&dwarf_stream);
1539 add_debug(".debug_abbrev", dwarf_stream);
1540 }
1541
1542 {
1543 ZoneWriteStream stream(zone(), kInitialDwarfBufferSize);
1544 DwarfElfStream dwarf_stream(zone_, &stream);
1545 dwarf_->WriteDebugInfo(&dwarf_stream);
1546 add_debug(".debug_info", dwarf_stream);
1547 }
1548
1549 {
1550 ZoneWriteStream stream(zone(), kInitialDwarfBufferSize);
1551 DwarfElfStream dwarf_stream(zone_, &stream);
1552 dwarf_->WriteLineNumberProgram(&dwarf_stream);
1553 add_debug(".debug_line", dwarf_stream);
1554 }
1555}
1556
1557ProgramTable* SectionTable::CreateProgramTable(SymbolTable* symtab) {
1558 const intptr_t num_sections = sections_.length();
1559 // Should have at least the reserved entry in sections_.
1560 ASSERT(!sections_.is_empty());
1561 ASSERT_EQUAL(sections_[0]->alignment, 0);
1562
1563 // The new program table that collects the segments for allocated sections
1564 // and a few special segments.
1565 auto* const program_table = new (zone_) ProgramTable(zone_);
1566
1567 GrowableArray<Section*> reordered_sections(zone_, num_sections);
1568 // Maps the old indices of sections to the new ones.
1569 GrowableArray<intptr_t> index_map(zone_, num_sections);
1570 index_map.FillWith(0, 0, num_sections);
1571
1572 Segment* current_segment = nullptr;
1573 // Only called for sections in the section table (i.e., not special sections
1574 // appearing in segments only or the section table itself).
1575 auto add_to_reordered_sections = [&](Section* section) {
1576 intptr_t new_index = reordered_sections.length();
1577 index_map[section->index] = new_index;
1578 section->index = new_index;
1579 reordered_sections.Add(section);
1580 if (section->IsAllocated()) {
1581 ASSERT(current_segment != nullptr);
1582 if (!current_segment->Add(section)) {
1583 // The current segment is incompatible for the current sectioni, so
1584 // create a new one.
1585 current_segment = new (zone_)
1586 Segment(zone_, section, elf::ProgramHeaderType::PT_LOAD);
1587 program_table->Add(current_segment);
1588 }
1589 }
1590 };
1591
1592 // The first section in the section header table is always a reserved
1593 // entry containing only 0 values, so copy it over from sections_.
1594 add_to_reordered_sections(sections_[0]);
1595
1596 // There are few important invariants originating from Android idiosyncrasies
1597 // we are trying to maintain when ordering sections:
1598 //
1599 // - Android requires the program header table be in the first load segment,
1600 // so create PseudoSections representing the ELF header and program header
1601 // table to initialize that segment.
1602 //
1603 // - The Android dynamic linker in Jelly Bean incorrectly assumes that all
1604 // non-writable segments are contiguous. Thus we write them all together.
1605 // The bug is here: https://github.com/aosp-mirror/platform_bionic/blob/94963af28e445384e19775a838a29e6a71708179/linker/linker.c#L1991-L2001
1606 //
1607 // - On Android native libraries can be mapped directly from an APK
1608 // they are stored uncompressed in it. In such situations the name
1609 // of the mapping no longer provides enough information for libunwindstack
1610 // to find the original ELF file and instead it has to rely on heuristics
1611 // to locate program header table. These heuristics currently assume that
1612 // program header table will be located in the RO mapping which precedes
1613 // RX mapping.
1614 //
1615 // These invariants imply the following order of segments: RO (program
1616 // header, .note.gnu.build-id, .dynstr, .dynsym, .hash, .rodata
1617 // and .eh_frame), RX (.text), RW (.dynamic and .bss).
1618 //
1619 auto* const elf_header = new (zone_) ElfHeader(*program_table, *this);
1620
1621 // Self-reference to program header table. Required by Android but not by
1622 // Linux. Must appear before any PT_LOAD entries.
1623 program_table->Add(new (zone_) Segment(zone_, program_table,
1624 elf::ProgramHeaderType::PT_PHDR));
1625
1626 // Create the initial load segment which contains the ELF header and program
1627 // table.
1628 current_segment =
1629 new (zone_) Segment(zone_, elf_header, elf::ProgramHeaderType::PT_LOAD);
1630 program_table->Add(current_segment);
1631 current_segment->Add(program_table);
1632
1633 // We now do several passes over the collected sections to reorder them in
1634 // a way that minimizes segments (and thus padding) in the resulting snapshot.
1635
1636 auto add_sections_matching =
1637 [&](const std::function<bool(Section*)>& should_add) {
1638 // We order the sections in a segment so all non-NOBITS sections come
1639 // before NOBITS sections, since the former sections correspond to the
1640 // file contents for the segment.
1641 for (auto* const section : sections_) {
1642 if (!section->HasBits()) continue;
1643 if (should_add(section)) {
1644 add_to_reordered_sections(section);
1645 }
1646 }
1647 for (auto* const section : sections_) {
1648 if (section->HasBits()) continue;
1649 if (should_add(section)) {
1650 add_to_reordered_sections(section);
1651 }
1652 }
1653 };
1654
1655 // If a build ID was created, we put it right after the program table so it
1656 // can be read with a minimum number of bytes from the ELF file.
1657 auto* const build_id = Find(Elf::kBuildIdNoteName);
1658 if (build_id != nullptr) {
1659 ASSERT(build_id->type == elf::SectionHeaderType::SHT_NOTE);
1660 add_to_reordered_sections(build_id);
1661 }
1662
1663 // Now add the other non-writable, non-executable allocated sections.
1664 add_sections_matching([&](Section* section) -> bool {
1665 if (section == build_id) return false; // Already added.
1666 return section->IsAllocated() && !section->IsWritable() &&
1667 !section->IsExecutable();
1668 });
1669
1670 // Now add the executable sections in a new segment.
1671 add_sections_matching([](Section* section) -> bool {
1672 return section->IsExecutable(); // Implies IsAllocated() && !IsWritable()
1673 });
1674
1675 // Now add all the writable sections.
1676 add_sections_matching([](Section* section) -> bool {
1677 return section->IsWritable(); // Implies IsAllocated() && !IsExecutable()
1678 });
1679
1680 // We put all non-reserved unallocated sections last. Otherwise, they would
1681 // affect the file offset but not the memory offset of any following allocated
1682 // sections. Doing it in this order makes it easier to keep file and memory
1683 // offsets page-aligned with respect to each other, which is required for
1684 // some loaders.
1685 add_sections_matching([](Section* section) -> bool {
1686 // Don't re-add the initial reserved section.
1687 return !section->IsReservedSection() && !section->IsAllocated();
1688 });
1689
1690 // All sections should have been accounted for in the loops above.
1691 ASSERT_EQUAL(sections_.length(), reordered_sections.length());
1692 // Replace the content of sections_ with the reordered sections.
1693 sections_.Clear();
1694 sections_.AddArray(reordered_sections);
1695
1696 // This must be true for uses of the map to be correct.
1698
1699 // Since the section indices have been updated, change links to match
1700 // and update the indexes of symbols in any symbol tables.
1701 for (auto* const section : sections_) {
1702 // SHN_UNDEF maps to SHN_UNDEF, so no need to check for it.
1703 section->link = index_map[section->link];
1704 if (auto* const table = section->AsSymbolTable()) {
1705 table->UpdateSectionIndices(index_map);
1706 }
1707 }
1708 if (symtab->index == elf::SHN_UNDEF) {
1709 // The output is stripped, so this wasn't finalized during the loop above.
1710 symtab->UpdateSectionIndices(index_map);
1711 }
1712
1713 // Add any special non-load segments.
1714 if (build_id != nullptr) {
1715 // Add a PT_NOTE segment for the build ID.
1716 program_table->Add(
1717 new (zone_) Segment(zone_, build_id, elf::ProgramHeaderType::PT_NOTE));
1718 }
1719
1720 // Add a PT_DYNAMIC segment for the dynamic symbol table.
1721 ASSERT(HasSectionNamed(Elf::kDynamicTableName));
1722 auto* const dynamic = Find(Elf::kDynamicTableName)->AsDynamicTable();
1723 program_table->Add(
1724 new (zone_) Segment(zone_, dynamic, elf::ProgramHeaderType::PT_DYNAMIC));
1725
1726 // Add a PT_GNU_STACK segment to prevent the loading of our snapshot from
1727 // switch the stack to be executable.
1728 auto* const gnu_stack = new (zone_) GnuStackSection();
1729 program_table->Add(new (zone_) Segment(zone_, gnu_stack,
1730 elf::ProgramHeaderType::PT_GNU_STACK));
1731
1732 return program_table;
1733}
1734
1735void Elf::Finalize() {
1736 // Generate the build ID now that we have all user-provided sections.
1737 GenerateBuildId();
1738
1739 // We add a BSS section in all cases, even to the separate debugging
1740 // information, to ensure that relocated addresses are consistent between ELF
1741 // snapshots and the corresponding separate debugging information.
1742 CreateBSS();
1743
1744 FinalizeEhFrame();
1745 FinalizeDwarfSections();
1746
1747 // Create and initialize the dynamic and static symbol tables and any
1748 // other associated sections now that all other sections have been added.
1749 InitializeSymbolTables();
1750 // Creates an appropriate program table containing load segments for allocated
1751 // sections and any other segments needed. May reorder sections to minimize
1752 // the number of load segments, so also takes the static symbol table so
1753 // symbol section indices can be adjusted if needed.
1754 program_table_ = section_table_->CreateProgramTable(symtab_);
1755 // Calculate file and memory offsets, and finalizes symbol values in any
1756 // symbol tables.
1757 ComputeOffsets();
1758
1759#if defined(DEBUG)
1760 if (type_ == Type::Snapshot) {
1761 // For files that will be dynamically loaded, ensure the file offsets
1762 // of allocated sections are page aligned to the memory offsets.
1763 for (auto* const segment : program_table_->segments()) {
1764 for (auto* const section : segment->sections()) {
1765 ASSERT_EQUAL(section->file_offset() % Elf::kPageSize,
1766 section->memory_offset() % Elf::kPageSize);
1767 }
1768 }
1769 }
1770#endif
1771
1772 // Finally, write the ELF file contents.
1773 ElfWriteStream wrapped(unwrapped_stream_, *this);
1774
1775 auto write_section = [&](const Section* section) {
1776 wrapped.Align(section->alignment);
1777 ASSERT_EQUAL(wrapped.Position(), section->file_offset());
1778 section->Write(&wrapped);
1779 ASSERT_EQUAL(wrapped.Position(),
1780 section->file_offset() + section->FileSize());
1781 };
1782
1783 // To match ComputeOffsets, first we write allocated sections and then
1784 // unallocated sections. We access the allocated sections via the load
1785 // segments so we can properly align the stream for each entered segment.
1786 intptr_t section_index = 1; // We don't visit the reserved section.
1787 for (auto* const segment : program_table_->segments()) {
1788 if (segment->type != elf::ProgramHeaderType::PT_LOAD) continue;
1789 wrapped.Align(segment->Alignment());
1790 for (auto* const section : segment->sections()) {
1791 ASSERT(section->IsAllocated());
1792 write_section(section);
1793 if (!section->IsPseudoSection()) {
1794 ASSERT_EQUAL(section->index, section_index);
1795 section_index++;
1796 }
1797 }
1798 }
1799 const auto& sections = section_table_->sections();
1800 for (; section_index < sections.length(); section_index++) {
1801 auto* const section = sections[section_index];
1802 ASSERT(!section->IsAllocated());
1803 write_section(section);
1804 }
1805 // Finally, write the section table.
1806 write_section(section_table_);
1807}
1808
1809// For the build ID, we generate a 128-bit hash, where each 32 bits is a hash of
1810// the contents of the following segments in order:
1811//
1812// .text(VM) | .text(Isolate) | .rodata(VM) | .rodata(Isolate)
1813static constexpr const char* kBuildIdSegmentNames[]{
1818};
1819static constexpr intptr_t kBuildIdSegmentNamesLength =
1820 ARRAY_SIZE(kBuildIdSegmentNames);
1821// Includes the note name, but not the description.
1822static constexpr intptr_t kBuildIdHeaderSize =
1823 sizeof(elf::Note) + sizeof(elf::ELF_NOTE_GNU);
1824
1825void Elf::GenerateBuildId() {
1826 // Not idempotent.
1827 ASSERT(section_table_->Find(kBuildIdNoteName) == nullptr);
1828 uint32_t hashes[kBuildIdSegmentNamesLength];
1829 // Currently, we construct the build ID out of data from two different
1830 // sections: the .text section and the .rodata section.
1831 //
1832 // TODO(dartbug.com/43274): Generate build IDs for separate debugging
1833 // information for assembly snapshots.
1834 auto* const text_section = section_table_->Find(kTextName);
1835 if (text_section == nullptr) return;
1836 ASSERT(text_section->IsTextSection());
1837 auto* const text_bits = text_section->AsBitsContainer();
1838 auto* const data_section = section_table_->Find(kDataName);
1839 ASSERT(data_section == nullptr || data_section->IsDataSection());
1840 // Hash each component by first hashing the associated text section and, if
1841 // there's not one, hashing the associated data section (if any).
1842 //
1843 // Any component of the build ID which does not have an associated section
1844 // in the result is kept as 0.
1845 bool has_any_text = false;
1846 for (intptr_t i = 0; i < kBuildIdSegmentNamesLength; i++) {
1847 auto* const name = kBuildIdSegmentNames[i];
1848 hashes[i] = text_bits->Hash(name);
1849 if (hashes[i] != 0) {
1850 has_any_text = true;
1851 } else if (data_section != nullptr) {
1852 hashes[i] = data_section->AsBitsContainer()->Hash(name);
1853 }
1854 }
1855 // If none of the sections in the hash were text sections, then we don't need
1856 // a build ID, as it is only used to symbolicize non-symbolic stack traces.
1857 if (!has_any_text) return;
1858 auto const description_bytes = reinterpret_cast<uint8_t*>(hashes);
1859 const size_t description_length = sizeof(hashes);
1860 // Now that we have the description field contents, create the section.
1861 ZoneWriteStream stream(zone(), kBuildIdHeaderSize + description_length);
1862 stream.WriteFixed<decltype(elf::Note::name_size)>(sizeof(elf::ELF_NOTE_GNU));
1863 stream.WriteFixed<decltype(elf::Note::description_size)>(description_length);
1864 stream.WriteFixed<decltype(elf::Note::type)>(elf::NoteType::NT_GNU_BUILD_ID);
1865 ASSERT_EQUAL(stream.Position(), sizeof(elf::Note));
1866 stream.WriteBytes(elf::ELF_NOTE_GNU, sizeof(elf::ELF_NOTE_GNU));
1867 ASSERT_EQUAL(stream.bytes_written(), kBuildIdHeaderSize);
1868 stream.WriteBytes(description_bytes, description_length);
1869 auto* const container = new (zone_) NoteSection();
1870 container->AddPortion(stream.buffer(), stream.bytes_written(),
1871 /*relocations=*/nullptr, /*symbols=*/nullptr,
1872 kSnapshotBuildIdAsmSymbol, kBuildIdLabel);
1873 section_table_->Add(container, kBuildIdNoteName);
1874}
1875
1876void Elf::ComputeOffsets() {
1877 intptr_t file_offset = 0;
1878 intptr_t memory_offset = 0;
1879
1880 // Maps indices of allocated sections in the section table to memory offsets.
1881 const intptr_t num_sections = section_table_->SectionCount();
1882 GrowableArray<intptr_t> address_map(zone_, num_sections);
1883 address_map.Add(0); // Don't adjust offsets for symbols with index SHN_UNDEF.
1884
1885 auto calculate_section_offsets = [&](Section* section) {
1886 file_offset = Utils::RoundUp(file_offset, section->alignment);
1887 section->set_file_offset(file_offset);
1888 file_offset += section->FileSize();
1889 if (section->IsAllocated()) {
1890 memory_offset = Utils::RoundUp(memory_offset, section->alignment);
1891 section->set_memory_offset(memory_offset);
1892 memory_offset += section->MemorySize();
1893 }
1894 };
1895
1896 intptr_t section_index = 1; // We don't visit the reserved section.
1897 for (auto* const segment : program_table_->segments()) {
1898 if (segment->type != elf::ProgramHeaderType::PT_LOAD) continue;
1899 // Adjust file and memory offsets for segment alignment on entry.
1900 file_offset = Utils::RoundUp(file_offset, segment->Alignment());
1901 memory_offset = Utils::RoundUp(memory_offset, segment->Alignment());
1902 for (auto* const section : segment->sections()) {
1903 ASSERT(section->IsAllocated());
1904 calculate_section_offsets(section);
1905 if (!section->IsPseudoSection()) {
1906 // Note: this assumes that the sections in the section header has all
1907 // allocated sections before all (non-reserved) unallocated sections and
1908 // in the same order as the load segments in in the program table.
1909 address_map.Add(section->memory_offset());
1910 ASSERT_EQUAL(section->index, section_index);
1911 section_index++;
1912 }
1913 }
1914 }
1915
1916 const auto& sections = section_table_->sections();
1917 for (; section_index < sections.length(); section_index++) {
1918 auto* const section = sections[section_index];
1919 ASSERT(!section->IsAllocated());
1920 calculate_section_offsets(section);
1921 }
1922
1923 ASSERT_EQUAL(section_index, sections.length());
1924 // Now that all sections have been handled, set the file offset for the
1925 // section table, as it will be written after the last section.
1926 calculate_section_offsets(section_table_);
1927
1928#if defined(DEBUG)
1929 // Double check that segment starts are aligned as expected.
1930 for (auto* const segment : program_table_->segments()) {
1931 ASSERT(Utils::IsAligned(segment->MemoryOffset(), segment->Alignment()));
1932 }
1933#endif
1934
1935 // This must be true for uses of the map to be correct.
1936 ASSERT_EQUAL(address_map[elf::SHN_UNDEF], 0);
1937 // Adjust addresses in symbol tables as we now have section memory offsets.
1938 // Also finalize the entries of the dynamic table, as some are memory offsets.
1939 for (auto* const section : sections) {
1940 if (auto* const table = section->AsSymbolTable()) {
1941 table->Finalize(address_map);
1942 } else if (auto* const dynamic = section->AsDynamicTable()) {
1943 dynamic->Finalize();
1944 }
1945 }
1946 // Also adjust addresses in symtab for stripped snapshots.
1947 if (IsStripped()) {
1948 ASSERT_EQUAL(symtab_->index, elf::SHN_UNDEF);
1949 symtab_->Finalize(address_map);
1950 }
1951}
1952
1953void ElfHeader::Write(ElfWriteStream* stream) const {
1954 ASSERT_EQUAL(file_offset(), 0);
1955 ASSERT_EQUAL(memory_offset(), 0);
1956#if defined(TARGET_ARCH_IS_32_BIT)
1957 uint8_t size = elf::ELFCLASS32;
1958#else
1959 uint8_t size = elf::ELFCLASS64;
1960#endif
1961 uint8_t e_ident[16] = {0x7f,
1962 'E',
1963 'L',
1964 'F',
1965 size,
1969 0,
1970 0,
1971 0,
1972 0,
1973 0,
1974 0,
1975 0,
1976 0};
1977 stream->WriteBytes(e_ident, 16);
1978
1979 stream->WriteHalf(elf::ET_DYN); // Shared library.
1980
1981#if defined(TARGET_ARCH_IA32)
1982 stream->WriteHalf(elf::EM_386);
1983#elif defined(TARGET_ARCH_X64)
1984 stream->WriteHalf(elf::EM_X86_64);
1985#elif defined(TARGET_ARCH_ARM)
1986 stream->WriteHalf(elf::EM_ARM);
1987#elif defined(TARGET_ARCH_ARM64)
1988 stream->WriteHalf(elf::EM_AARCH64);
1989#elif defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
1990 stream->WriteHalf(elf::EM_RISCV);
1991#else
1992 FATAL("Unknown ELF architecture");
1993#endif
1994
1995 stream->WriteWord(elf::EV_CURRENT); // Version
1996 stream->WriteAddr(0); // "Entry point"
1997 stream->WriteOff(program_table_.file_offset());
1998 stream->WriteOff(section_table_.file_offset());
1999
2000#if defined(TARGET_ARCH_ARM)
2001 uword flags = elf::EF_ARM_ABI | (TargetCPUFeatures::hardfp_supported()
2004#else
2005 uword flags = 0;
2006#endif
2007 stream->WriteWord(flags);
2008
2009 stream->WriteHalf(sizeof(elf::ElfHeader));
2010 stream->WriteHalf(program_table_.entry_size);
2011 stream->WriteHalf(program_table_.SegmentCount());
2012 stream->WriteHalf(section_table_.entry_size);
2013 stream->WriteHalf(section_table_.SectionCount());
2014 stream->WriteHalf(stream->elf().section_table().StringTableIndex());
2015}
2016
2017void ProgramTable::Write(ElfWriteStream* stream) const {
2018 ASSERT(segments_.length() > 0);
2019 // Make sure all relevant segments were created by checking the type of the
2020 // first.
2021 ASSERT(segments_[0]->type == elf::ProgramHeaderType::PT_PHDR);
2022 const intptr_t start = stream->Position();
2023 // Should be immediately following the ELF header.
2024 ASSERT_EQUAL(start, sizeof(elf::ElfHeader));
2025#if defined(DEBUG)
2026 // Here, we count the number of times that a PT_LOAD writable segment is
2027 // followed by a non-writable segment. We initialize last_writable to true
2028 // so that we catch the case where the first segment is non-writable.
2029 bool last_writable = true;
2030 int non_writable_groups = 0;
2031#endif
2032 for (intptr_t i = 0; i < segments_.length(); i++) {
2033 const Segment* const segment = segments_[i];
2034 ASSERT(segment->type != elf::ProgramHeaderType::PT_NULL);
2035 ASSERT_EQUAL(i == 0, segment->type == elf::ProgramHeaderType::PT_PHDR);
2036#if defined(DEBUG)
2037 if (segment->type == elf::ProgramHeaderType::PT_LOAD) {
2038 if (last_writable && !segment->IsWritable()) {
2039 non_writable_groups++;
2040 }
2041 last_writable = segment->IsWritable();
2042 }
2043#endif
2044 const intptr_t start = stream->Position();
2045 segment->WriteProgramHeader(stream);
2046 const intptr_t end = stream->Position();
2047 ASSERT_EQUAL(end - start, entry_size);
2048 }
2049#if defined(DEBUG)
2050 // All PT_LOAD non-writable segments must be contiguous. If not, some older
2051 // Android dynamic linkers fail to handle writable segments between
2052 // non-writable ones. See https://github.com/flutter/flutter/issues/43259.
2053 ASSERT(non_writable_groups <= 1);
2054#endif
2055}
2056
2057void SectionTable::Write(ElfWriteStream* stream) const {
2058 for (intptr_t i = 0; i < sections_.length(); i++) {
2059 const Section* const section = sections_[i];
2060 ASSERT_EQUAL(i == 0, section->IsReservedSection());
2061 ASSERT_EQUAL(section->index, i);
2062 ASSERT(section->link < sections_.length());
2063 const intptr_t start = stream->Position();
2064 section->WriteSectionHeader(stream);
2065 const intptr_t end = stream->Position();
2066 ASSERT_EQUAL(end - start, entry_size);
2067 }
2068}
2069
2070#endif // DART_PRECOMPILER
2071
2072} // namespace dart
static bool unused
Align
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
static uint32_t hash(const SkShaderBase::GradientInfo &v)
Type
Definition: SortBench.cpp:56
static sk_sp< GrTextureProxy > wrapped(skiatest::Reporter *reporter, GrRecordingContext *rContext, GrProxyProvider *proxyProvider, SkBackingFit fit)
SI F table(const skcms_Curve *curve, F v)
#define UNREACHABLE()
Definition: assert.h:248
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define COMPILE_ASSERT(expr)
Definition: assert.h:339
#define ASSERT_NOTNULL(ptr)
Definition: assert.h:323
GLenum type
static bool IsInt(intptr_t N, T value)
Definition: utils.h:313
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:120
static uint32_t StringHash(const void *data, int length)
Definition: utils.cc:114
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:92
static constexpr bool IsPowerOfTwo(T x)
Definition: utils.h:76
#define LINK_REGISTER
#define kIsolateSnapshotDataAsmSymbol
Definition: dart_api.h:3967
#define kIsolateSnapshotBssAsmSymbol
Definition: dart_api.h:3970
#define kIsolateSnapshotInstructionsAsmSymbol
Definition: dart_api.h:3968
#define kVmSnapshotBssAsmSymbol
Definition: dart_api.h:3966
#define kVmSnapshotDataAsmSymbol
Definition: dart_api.h:3964
#define kVmSnapshotInstructionsAsmSymbol
Definition: dart_api.h:3965
#define kSnapshotBuildIdAsmSymbol
Definition: dart_api.h:3963
#define ASSERT(E)
static bool b
#define FATAL(error)
FlutterSemanticsFlag flags
glong glong end
uint8_t value
GAsyncResult * result
Dart_NativeFunction function
Definition: fuchsia.cc:51
static int SizeInBytes(Dart_TypedData_Type type)
Definition: file.cc:1199
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr intptr_t ELFCLASS64
Definition: elf.h:158
static constexpr intptr_t STT_SECTION
Definition: elf.h:197
static constexpr intptr_t SHN_UNDEF
Definition: elf.h:187
static constexpr intptr_t EF_ARM_ABI_FLOAT_SOFT
Definition: elf.h:168
static constexpr intptr_t ELFOSABI_SYSV
Definition: elf.h:163
static constexpr intptr_t STB_GLOBAL
Definition: elf.h:192
static constexpr intptr_t PF_R
Definition: elf.h:181
static constexpr intptr_t PF_X
Definition: elf.h:179
SectionHeaderType
Definition: elf.h:69
static constexpr intptr_t STN_UNDEF
Definition: elf.h:189
static constexpr intptr_t ET_DYN
Definition: elf.h:165
static constexpr intptr_t EM_X86_64
Definition: elf.h:173
DynamicEntryType
Definition: elf.h:125
static constexpr intptr_t EM_386
Definition: elf.h:171
static constexpr intptr_t ELFCLASS32
Definition: elf.h:157
static constexpr intptr_t EM_RISCV
Definition: elf.h:175
static constexpr intptr_t EM_AARCH64
Definition: elf.h:174
constexpr decltype(Symbol::info) SymbolInfo(intptr_t binding, intptr_t type)
Definition: elf.h:202
static constexpr intptr_t STB_LOCAL
Definition: elf.h:191
static constexpr intptr_t SHF_EXECINSTR
Definition: elf.h:185
static constexpr intptr_t SHF_WRITE
Definition: elf.h:183
ProgramHeaderType
Definition: elf.h:38
static constexpr intptr_t PF_W
Definition: elf.h:180
static constexpr const char ELF_NOTE_GNU[]
Definition: elf.h:199
static constexpr intptr_t ELFDATA2LSB
Definition: elf.h:161
static constexpr intptr_t EF_ARM_ABI_FLOAT_HARD
Definition: elf.h:167
static constexpr intptr_t STT_NOTYPE
Definition: elf.h:194
static constexpr intptr_t EF_ARM_ABI
Definition: elf.h:169
static constexpr intptr_t EM_ARM
Definition: elf.h:172
static constexpr intptr_t EV_CURRENT
Definition: elf.h:177
static constexpr intptr_t SHF_ALLOC
Definition: elf.h:184
static constexpr intptr_t STT_OBJECT
Definition: elf.h:195
def link(from_root, to_root)
Definition: dart_pkg.py:44
Definition: dart_vm.cc:33
const char *const name
static constexpr int kSavedCallerPcSlotFromFp
static constexpr int kSavedCallerFpSlotFromFp
constexpr intptr_t kInt64Size
Definition: globals.h:453
constexpr intptr_t kBitsPerByte
Definition: globals.h:463
Register ConcreteRegister(LinkRegister)
constexpr intptr_t KB
Definition: globals.h:528
uintptr_t uword
Definition: globals.h:501
intptr_t word
Definition: globals.h:500
static constexpr int kCallerSpSlotFromFp
constexpr intptr_t kInt32Size
Definition: globals.h:450
static uint32_t Hash(uint32_t key)
Definition: hashmap_test.cc:65
constexpr intptr_t kWordSize
Definition: globals.h:509
static void RoundUp(Vector< char > buffer, int *length, int *decimal_point)
Definition: fixed-dtoa.cc:189
DEF_SWITCHES_START aot vmservice shared library name
Definition: switches.h:32
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
static bool IsExecutable(std::initializer_list< FileMapping::Protection > protection_flags)
Definition: mapping_win.cc:33
static bool IsWritable(std::initializer_list< FileMapping::Protection > protection_flags)
void Initialize(zx::channel directory_request, std::optional< zx::eventpair > view_ref)
Initializes Dart bindings for the Fuchsia application model.
Definition: fuchsia.cc:103
SkScalar h
#define DISALLOW_ALLOCATION()
Definition: globals.h:604
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:581
#define T
Definition: precompiler.cc:65
SeparatedVector2 offset
static constexpr Value kNoValue
Definition: hash_map.h:463
#define ARRAY_SIZE(array)
Definition: globals.h:72