blob: b4d72b27d250f930f40a700f787251e6cde6ec7b [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
Elliott Hughes650be4e2013-03-05 18:47:58 -080029#include "linker_phdr.h"
30
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020031#include <errno.h>
Marcus Oaklande365f9d2013-10-10 15:19:31 +010032#include <machine/exec.h>
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020033#include <sys/mman.h>
34
Elliott Hughes650be4e2013-03-05 18:47:58 -080035#include "linker.h"
36#include "linker_debug.h"
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020037
38/**
39 TECHNICAL NOTE ON ELF LOADING.
40
41 An ELF file's program header table contains one or more PT_LOAD
42 segments, which corresponds to portions of the file that need to
43 be mapped into the process' address space.
44
45 Each loadable segment has the following important properties:
46
47 p_offset -> segment file offset
48 p_filesz -> segment file size
49 p_memsz -> segment memory size (always >= p_filesz)
50 p_vaddr -> segment's virtual address
51 p_flags -> segment flags (e.g. readable, writable, executable)
52
Elliott Hughesc6200592013-09-30 18:43:46 -070053 We will ignore the p_paddr and p_align fields of Elf_Phdr for now.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +020054
55 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
56 ranges of virtual addresses. A few rules apply:
57
58 - the virtual address ranges should not overlap.
59
60 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
61 between them should always be initialized to 0.
62
63 - ranges do not necessarily start or end at page boundaries. Two distinct
64 segments can have their start and end on the same page. In this case, the
65 page inherits the mapping flags of the latter segment.
66
67 Finally, the real load addrs of each segment is not p_vaddr. Instead the
68 loader decides where to load the first segment, then will load all others
69 relative to the first one to respect the initial range layout.
70
71 For example, consider the following list:
72
73 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
74 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
75
76 This corresponds to two segments that cover these virtual address ranges:
77
78 0x30000...0x34000
79 0x40000...0x48000
80
81 If the loader decides to load the first segment at address 0xa0000000
82 then the segments' load address ranges will be:
83
84 0xa0030000...0xa0034000
85 0xa0040000...0xa0048000
86
87 In other words, all segments must be loaded at an address that has the same
88 constant offset from their p_vaddr value. This offset is computed as the
89 difference between the first segment's load address, and its p_vaddr value.
90
91 However, in practice, segments do _not_ start at page boundaries. Since we
92 can only memory-map at page boundaries, this means that the bias is
93 computed as:
94
95 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
96
97 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
98 possible wrap around UINT32_MAX for possible large p_vaddr values).
99
100 And that the phdr0_load_address must start at a page boundary, with
101 the segment's real content starting at:
102
103 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
104
105 Note that ELF requires the following condition to make the mmap()-ing work:
106
107 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
108
109 The load_bias must be added to any p_vaddr value read from the ELF file to
110 determine the corresponding memory address.
111
112 **/
113
114#define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0)
115#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
116 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
117 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
118
Elliott Hughes650be4e2013-03-05 18:47:58 -0800119ElfReader::ElfReader(const char* name, int fd)
120 : name_(name), fd_(fd),
121 phdr_num_(0), phdr_mmap_(NULL), phdr_table_(NULL), phdr_size_(0),
122 load_start_(NULL), load_size_(0), load_bias_(0),
123 loaded_phdr_(NULL) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200124}
125
Elliott Hughes650be4e2013-03-05 18:47:58 -0800126ElfReader::~ElfReader() {
127 if (fd_ != -1) {
128 close(fd_);
129 }
130 if (phdr_mmap_ != NULL) {
131 munmap(phdr_mmap_, phdr_size_);
132 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200133}
134
Elliott Hughes650be4e2013-03-05 18:47:58 -0800135bool ElfReader::Load() {
136 return ReadElfHeader() &&
137 VerifyElfHeader() &&
138 ReadProgramHeader() &&
139 ReserveAddressSpace() &&
140 LoadSegments() &&
141 FindPhdr();
142}
143
144bool ElfReader::ReadElfHeader() {
145 ssize_t rc = TEMP_FAILURE_RETRY(read(fd_, &header_, sizeof(header_)));
146 if (rc < 0) {
147 DL_ERR("can't read file \"%s\": %s", name_, strerror(errno));
148 return false;
149 }
150 if (rc != sizeof(header_)) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700151 DL_ERR("\"%s\" is too small to be an ELF executable: only found %zd bytes", name_,
152 static_cast<size_t>(rc));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800153 return false;
154 }
155 return true;
156}
157
158bool ElfReader::VerifyElfHeader() {
159 if (header_.e_ident[EI_MAG0] != ELFMAG0 ||
160 header_.e_ident[EI_MAG1] != ELFMAG1 ||
161 header_.e_ident[EI_MAG2] != ELFMAG2 ||
162 header_.e_ident[EI_MAG3] != ELFMAG3) {
163 DL_ERR("\"%s\" has bad ELF magic", name_);
164 return false;
165 }
166
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700167 // Try to give a clear diagnostic for ELF class mismatches, since they're
168 // an easy mistake to make during the 32-bit/64-bit transition period.
169 int elf_class = header_.e_ident[EI_CLASS];
170#if defined(__LP64__)
171 if (elf_class != ELFCLASS64) {
172 if (elf_class == ELFCLASS32) {
173 DL_ERR("\"%s\" is 32-bit instead of 64-bit", name_);
174 } else {
175 DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
176 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800177 return false;
178 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700179#else
180 if (elf_class != ELFCLASS32) {
181 if (elf_class == ELFCLASS64) {
182 DL_ERR("\"%s\" is 64-bit instead of 32-bit", name_);
183 } else {
184 DL_ERR("\"%s\" has unknown ELF class: %d", name_, elf_class);
185 }
186 return false;
187 }
188#endif
189
Elliott Hughes650be4e2013-03-05 18:47:58 -0800190 if (header_.e_ident[EI_DATA] != ELFDATA2LSB) {
191 DL_ERR("\"%s\" not little-endian: %d", name_, header_.e_ident[EI_DATA]);
192 return false;
193 }
194
195 if (header_.e_type != ET_DYN) {
196 DL_ERR("\"%s\" has unexpected e_type: %d", name_, header_.e_type);
197 return false;
198 }
199
200 if (header_.e_version != EV_CURRENT) {
201 DL_ERR("\"%s\" has unexpected e_version: %d", name_, header_.e_version);
202 return false;
203 }
204
Marcus Oaklande365f9d2013-10-10 15:19:31 +0100205 if (header_.e_machine != ELF_TARG_MACH) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800206 DL_ERR("\"%s\" has unexpected e_machine: %d", name_, header_.e_machine);
207 return false;
208 }
209
210 return true;
211}
212
213// Loads the program header table from an ELF file into a read-only private
214// anonymous mmap-ed block.
215bool ElfReader::ReadProgramHeader() {
216 phdr_num_ = header_.e_phnum;
217
218 // Like the kernel, we only accept program header tables that
219 // are smaller than 64KiB.
Elliott Hughesc6200592013-09-30 18:43:46 -0700220 if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf_Phdr)) {
221 DL_ERR("\"%s\" has invalid e_phnum: %zd", name_, phdr_num_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800222 return false;
223 }
224
Elliott Hughesc6200592013-09-30 18:43:46 -0700225 Elf_Addr page_min = PAGE_START(header_.e_phoff);
226 Elf_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf_Phdr)));
227 Elf_Addr page_offset = PAGE_OFFSET(header_.e_phoff);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800228
229 phdr_size_ = page_max - page_min;
230
231 void* mmap_result = mmap(NULL, phdr_size_, PROT_READ, MAP_PRIVATE, fd_, page_min);
232 if (mmap_result == MAP_FAILED) {
233 DL_ERR("\"%s\" phdr mmap failed: %s", name_, strerror(errno));
234 return false;
235 }
236
237 phdr_mmap_ = mmap_result;
Elliott Hughesc6200592013-09-30 18:43:46 -0700238 phdr_table_ = reinterpret_cast<Elf_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800239 return true;
240}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200241
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800242/* Returns the size of the extent of all the possibly non-contiguous
243 * loadable segments in an ELF program header table. This corresponds
244 * to the page-aligned size in bytes that needs to be reserved in the
245 * process' address space. If there are no loadable segments, 0 is
246 * returned.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200247 *
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800248 * If out_min_vaddr or out_max_vaddr are non-NULL, they will be
249 * set to the minimum and maximum addresses of pages to be reserved,
250 * or 0 if there is nothing to load.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200251 */
Elliott Hughesc6200592013-09-30 18:43:46 -0700252size_t phdr_table_get_load_size(const Elf_Phdr* phdr_table, size_t phdr_count,
253 Elf_Addr* out_min_vaddr,
254 Elf_Addr* out_max_vaddr) {
255 Elf_Addr min_vaddr = 0xFFFFFFFFU;
256 Elf_Addr max_vaddr = 0x00000000U;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200257
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800258 bool found_pt_load = false;
Elliott Hughes46882792012-08-03 16:49:39 -0700259 for (size_t i = 0; i < phdr_count; ++i) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700260 const Elf_Phdr* phdr = &phdr_table[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200261
Elliott Hughes46882792012-08-03 16:49:39 -0700262 if (phdr->p_type != PT_LOAD) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200263 continue;
Elliott Hughes46882792012-08-03 16:49:39 -0700264 }
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800265 found_pt_load = true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200266
Elliott Hughes46882792012-08-03 16:49:39 -0700267 if (phdr->p_vaddr < min_vaddr) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200268 min_vaddr = phdr->p_vaddr;
Elliott Hughes46882792012-08-03 16:49:39 -0700269 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200270
Elliott Hughes46882792012-08-03 16:49:39 -0700271 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200272 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
Elliott Hughes46882792012-08-03 16:49:39 -0700273 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200274 }
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800275 if (!found_pt_load) {
276 min_vaddr = 0x00000000U;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200277 }
278
279 min_vaddr = PAGE_START(min_vaddr);
280 max_vaddr = PAGE_END(max_vaddr);
281
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800282 if (out_min_vaddr != NULL) {
283 *out_min_vaddr = min_vaddr;
284 }
285 if (out_max_vaddr != NULL) {
286 *out_max_vaddr = max_vaddr;
287 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200288 return max_vaddr - min_vaddr;
289}
290
Elliott Hughes650be4e2013-03-05 18:47:58 -0800291// Reserve a virtual address range big enough to hold all loadable
292// segments of a program header table. This is done by creating a
293// private anonymous mmap() with PROT_NONE.
294bool ElfReader::ReserveAddressSpace() {
Elliott Hughesc6200592013-09-30 18:43:46 -0700295 Elf_Addr min_vaddr;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800296 load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800297 if (load_size_ == 0) {
298 DL_ERR("\"%s\" has no loadable segments", name_);
299 return false;
300 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200301
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800302 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800303 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800304 void* start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800305 if (start == MAP_FAILED) {
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700306 DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800307 return false;
308 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200309
Elliott Hughes650be4e2013-03-05 18:47:58 -0800310 load_start_ = start;
Brian Carlstrome7dffe12013-01-10 16:39:58 -0800311 load_bias_ = reinterpret_cast<uint8_t*>(start) - addr;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800312 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200313}
314
Elliott Hughes650be4e2013-03-05 18:47:58 -0800315bool ElfReader::LoadSegments() {
316 for (size_t i = 0; i < phdr_num_; ++i) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700317 const Elf_Phdr* phdr = &phdr_table_[i];
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200318
Elliott Hughes650be4e2013-03-05 18:47:58 -0800319 if (phdr->p_type != PT_LOAD) {
320 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200321 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800322
323 // Segment addresses in memory.
Elliott Hughesc6200592013-09-30 18:43:46 -0700324 Elf_Addr seg_start = phdr->p_vaddr + load_bias_;
325 Elf_Addr seg_end = seg_start + phdr->p_memsz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800326
Elliott Hughesc6200592013-09-30 18:43:46 -0700327 Elf_Addr seg_page_start = PAGE_START(seg_start);
328 Elf_Addr seg_page_end = PAGE_END(seg_end);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800329
Elliott Hughesc6200592013-09-30 18:43:46 -0700330 Elf_Addr seg_file_end = seg_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800331
332 // File offsets.
Elliott Hughesc6200592013-09-30 18:43:46 -0700333 Elf_Addr file_start = phdr->p_offset;
334 Elf_Addr file_end = file_start + phdr->p_filesz;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800335
Elliott Hughesc6200592013-09-30 18:43:46 -0700336 Elf_Addr file_page_start = PAGE_START(file_start);
337 Elf_Addr file_length = file_end - file_page_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800338
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700339 if (file_length != 0) {
340 void* seg_addr = mmap((void*)seg_page_start,
341 file_length,
342 PFLAGS_TO_PROT(phdr->p_flags),
343 MAP_FIXED|MAP_PRIVATE,
344 fd_,
345 file_page_start);
346 if (seg_addr == MAP_FAILED) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700347 DL_ERR("couldn't map \"%s\" segment %zd: %s", name_, i, strerror(errno));
Brian Carlstrom82dcc792013-05-21 16:49:24 -0700348 return false;
349 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800350 }
351
352 // if the segment is writable, and does not end on a page boundary,
353 // zero-fill it until the page limit.
354 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
355 memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
356 }
357
358 seg_file_end = PAGE_END(seg_file_end);
359
360 // seg_file_end is now the first page address after the file
361 // content. If seg_end is larger, we need to zero anything
362 // between them. This is done by using a private anonymous
363 // map for all extra pages.
364 if (seg_page_end > seg_file_end) {
365 void* zeromap = mmap((void*)seg_file_end,
366 seg_page_end - seg_file_end,
367 PFLAGS_TO_PROT(phdr->p_flags),
368 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
369 -1,
370 0);
371 if (zeromap == MAP_FAILED) {
372 DL_ERR("couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
373 return false;
374 }
375 }
376 }
377 return true;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200378}
379
Elliott Hughes105bc262012-08-15 16:56:00 -0700380/* Used internally. Used to set the protection bits of all loaded segments
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200381 * with optional extra flags (i.e. really PROT_WRITE). Used by
382 * phdr_table_protect_segments and phdr_table_unprotect_segments.
383 */
Elliott Hughesc6200592013-09-30 18:43:46 -0700384static int _phdr_table_set_load_prot(const Elf_Phdr* phdr_table, size_t phdr_count,
385 Elf_Addr load_bias, int extra_prot_flags) {
386 const Elf_Phdr* phdr = phdr_table;
387 const Elf_Phdr* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200388
389 for (; phdr < phdr_limit; phdr++) {
390 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
391 continue;
392
Elliott Hughesc6200592013-09-30 18:43:46 -0700393 Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
394 Elf_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200395
396 int ret = mprotect((void*)seg_page_start,
397 seg_page_end - seg_page_start,
398 PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
399 if (ret < 0) {
400 return -1;
401 }
402 }
403 return 0;
404}
405
406/* Restore the original protection modes for all loadable segments.
407 * You should only call this after phdr_table_unprotect_segments and
408 * applying all relocations.
409 *
410 * Input:
411 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700412 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200413 * load_bias -> load bias
414 * Return:
415 * 0 on error, -1 on failure (error code in errno).
416 */
Elliott Hughesc6200592013-09-30 18:43:46 -0700417int phdr_table_protect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
418 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200419}
420
421/* Change the protection of all loaded segments in memory to writable.
422 * This is useful before performing relocations. Once completed, you
423 * will have to call phdr_table_protect_segments to restore the original
424 * protection flags on all segments.
425 *
426 * Note that some writable segments can also have their content turned
427 * to read-only by calling phdr_table_protect_gnu_relro. This is no
428 * performed here.
429 *
430 * Input:
431 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700432 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200433 * load_bias -> load bias
434 * Return:
435 * 0 on error, -1 on failure (error code in errno).
436 */
Elliott Hughesc6200592013-09-30 18:43:46 -0700437int phdr_table_unprotect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
438 return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200439}
440
441/* Used internally by phdr_table_protect_gnu_relro and
442 * phdr_table_unprotect_gnu_relro.
443 */
Elliott Hughesc6200592013-09-30 18:43:46 -0700444static int _phdr_table_set_gnu_relro_prot(const Elf_Phdr* phdr_table, size_t phdr_count,
445 Elf_Addr load_bias, int prot_flags) {
446 const Elf_Phdr* phdr = phdr_table;
447 const Elf_Phdr* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200448
449 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
450 if (phdr->p_type != PT_GNU_RELRO)
451 continue;
452
453 /* Tricky: what happens when the relro segment does not start
454 * or end at page boundaries?. We're going to be over-protective
455 * here and put every page touched by the segment as read-only.
456 *
457 * This seems to match Ian Lance Taylor's description of the
458 * feature at http://www.airs.com/blog/archives/189.
459 *
460 * Extract:
461 * Note that the current dynamic linker code will only work
462 * correctly if the PT_GNU_RELRO segment starts on a page
463 * boundary. This is because the dynamic linker rounds the
464 * p_vaddr field down to the previous page boundary. If
465 * there is anything on the page which should not be read-only,
466 * the program is likely to fail at runtime. So in effect the
467 * linker must only emit a PT_GNU_RELRO segment if it ensures
468 * that it starts on a page boundary.
469 */
Elliott Hughesc6200592013-09-30 18:43:46 -0700470 Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
471 Elf_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200472
473 int ret = mprotect((void*)seg_page_start,
474 seg_page_end - seg_page_start,
475 prot_flags);
476 if (ret < 0) {
477 return -1;
478 }
479 }
480 return 0;
481}
482
483/* Apply GNU relro protection if specified by the program header. This will
484 * turn some of the pages of a writable PT_LOAD segment to read-only, as
485 * specified by one or more PT_GNU_RELRO segments. This must be always
486 * performed after relocations.
487 *
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200488 * The areas typically covered are .got and .data.rel.ro, these are
489 * read-only from the program's POV, but contain absolute addresses
490 * that need to be relocated before use.
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200491 *
492 * Input:
493 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700494 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200495 * load_bias -> load bias
496 * Return:
497 * 0 on error, -1 on failure (error code in errno).
498 */
Elliott Hughesc6200592013-09-30 18:43:46 -0700499int phdr_table_protect_gnu_relro(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
500 return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200501}
502
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700503#if defined(__arm__)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200504
505# ifndef PT_ARM_EXIDX
506# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
507# endif
508
509/* Return the address and size of the .ARM.exidx section in memory,
510 * if present.
511 *
512 * Input:
513 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700514 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200515 * load_bias -> load bias
516 * Output:
517 * arm_exidx -> address of table in memory (NULL on failure).
518 * arm_exidx_count -> number of items in table (0 on failure).
519 * Return:
520 * 0 on error, -1 on failure (_no_ error code in errno)
521 */
Elliott Hughesc6200592013-09-30 18:43:46 -0700522int phdr_table_get_arm_exidx(const Elf_Phdr* phdr_table, size_t phdr_count,
523 Elf_Addr load_bias,
524 Elf_Addr** arm_exidx, unsigned* arm_exidx_count) {
525 const Elf_Phdr* phdr = phdr_table;
526 const Elf_Phdr* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200527
528 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
529 if (phdr->p_type != PT_ARM_EXIDX)
530 continue;
531
Elliott Hughesc6200592013-09-30 18:43:46 -0700532 *arm_exidx = (Elf_Addr*)(load_bias + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200533 *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
534 return 0;
535 }
536 *arm_exidx = NULL;
537 *arm_exidx_count = 0;
538 return -1;
539}
Elliott Hughes4eeb1f12013-10-25 17:38:02 -0700540#endif
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200541
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200542/* Return the address and size of the ELF file's .dynamic section in memory,
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200543 * or NULL if missing.
544 *
545 * Input:
546 * phdr_table -> program header table
Elliott Hughes105bc262012-08-15 16:56:00 -0700547 * phdr_count -> number of entries in tables
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200548 * load_bias -> load bias
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200549 * Output:
550 * dynamic -> address of table in memory (NULL on failure).
551 * dynamic_count -> number of items in table (0 on failure).
Chris Dearmancf239052013-01-11 15:32:20 -0800552 * dynamic_flags -> protection flags for section (unset on failure)
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200553 * Return:
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200554 * void
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200555 */
Elliott Hughesc6200592013-09-30 18:43:46 -0700556void phdr_table_get_dynamic_section(const Elf_Phdr* phdr_table, size_t phdr_count,
557 Elf_Addr load_bias,
558 Elf_Dyn** dynamic, size_t* dynamic_count, Elf_Word* dynamic_flags) {
559 const Elf_Phdr* phdr = phdr_table;
560 const Elf_Phdr* phdr_limit = phdr + phdr_count;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200561
562 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200563 if (phdr->p_type != PT_DYNAMIC) {
564 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200565 }
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200566
Elliott Hughesc6200592013-09-30 18:43:46 -0700567 *dynamic = reinterpret_cast<Elf_Dyn*>(load_bias + phdr->p_vaddr);
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200568 if (dynamic_count) {
569 *dynamic_count = (unsigned)(phdr->p_memsz / 8);
570 }
Chris Dearmancf239052013-01-11 15:32:20 -0800571 if (dynamic_flags) {
572 *dynamic_flags = phdr->p_flags;
573 }
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200574 return;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200575 }
Ard Biesheuvel12c78bb2012-08-14 12:30:09 +0200576 *dynamic = NULL;
577 if (dynamic_count) {
578 *dynamic_count = 0;
579 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200580}
581
Elliott Hughes650be4e2013-03-05 18:47:58 -0800582// Returns the address of the program header table as it appears in the loaded
583// segments in memory. This is in contrast with 'phdr_table_' which
584// is temporary and will be released before the library is relocated.
585bool ElfReader::FindPhdr() {
Elliott Hughesc6200592013-09-30 18:43:46 -0700586 const Elf_Phdr* phdr_limit = phdr_table_ + phdr_num_;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200587
Elliott Hughes650be4e2013-03-05 18:47:58 -0800588 // If there is a PT_PHDR, use it directly.
Elliott Hughesc6200592013-09-30 18:43:46 -0700589 for (const Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800590 if (phdr->p_type == PT_PHDR) {
591 return CheckPhdr(load_bias_ + phdr->p_vaddr);
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200592 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800593 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200594
Elliott Hughes650be4e2013-03-05 18:47:58 -0800595 // Otherwise, check the first loadable segment. If its file offset
596 // is 0, it starts with the ELF header, and we can trivially find the
597 // loaded program header from it.
Elliott Hughesc6200592013-09-30 18:43:46 -0700598 for (const Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800599 if (phdr->p_type == PT_LOAD) {
600 if (phdr->p_offset == 0) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700601 Elf_Addr elf_addr = load_bias_ + phdr->p_vaddr;
602 const Elf_Ehdr* ehdr = (const Elf_Ehdr*)(void*)elf_addr;
603 Elf_Addr offset = ehdr->e_phoff;
604 return CheckPhdr((Elf_Addr)ehdr + offset);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800605 }
606 break;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200607 }
Elliott Hughes650be4e2013-03-05 18:47:58 -0800608 }
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200609
Elliott Hughes650be4e2013-03-05 18:47:58 -0800610 DL_ERR("can't find loaded phdr for \"%s\"", name_);
611 return false;
612}
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200613
Elliott Hughes650be4e2013-03-05 18:47:58 -0800614// Ensures that our program header is actually within a loadable
615// segment. This should help catch badly-formed ELF files that
616// would cause the linker to crash later when trying to access it.
Elliott Hughesc6200592013-09-30 18:43:46 -0700617bool ElfReader::CheckPhdr(Elf_Addr loaded) {
618 const Elf_Phdr* phdr_limit = phdr_table_ + phdr_num_;
619 Elf_Addr loaded_end = loaded + (phdr_num_ * sizeof(Elf_Phdr));
620 for (Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
Elliott Hughes650be4e2013-03-05 18:47:58 -0800621 if (phdr->p_type != PT_LOAD) {
622 continue;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200623 }
Elliott Hughesc6200592013-09-30 18:43:46 -0700624 Elf_Addr seg_start = phdr->p_vaddr + load_bias_;
625 Elf_Addr seg_end = phdr->p_filesz + seg_start;
Elliott Hughes650be4e2013-03-05 18:47:58 -0800626 if (seg_start <= loaded && loaded_end <= seg_end) {
Elliott Hughesc6200592013-09-30 18:43:46 -0700627 loaded_phdr_ = reinterpret_cast<const Elf_Phdr*>(loaded);
Elliott Hughes650be4e2013-03-05 18:47:58 -0800628 return true;
629 }
630 }
Elliott Hughesc00f2cb2013-10-04 17:01:33 -0700631 DL_ERR("\"%s\" loaded phdr %p not in loadable segment", name_, reinterpret_cast<void*>(loaded));
Elliott Hughes650be4e2013-03-05 18:47:58 -0800632 return false;
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +0200633}