blob: c9f194b0a49179dd8ad60ce06eebdf2d3c9f0d02 [file] [log] [blame]
David 'Digit' Turnerc1bd5592012-06-19 11:21:29 +02001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <errno.h>
30#include <sys/mman.h>
31
32#include "linker_phdr.h"
33
34/**
35 TECHNICAL NOTE ON ELF LOADING.
36
37 An ELF file's program header table contains one or more PT_LOAD
38 segments, which corresponds to portions of the file that need to
39 be mapped into the process' address space.
40
41 Each loadable segment has the following important properties:
42
43 p_offset -> segment file offset
44 p_filesz -> segment file size
45 p_memsz -> segment memory size (always >= p_filesz)
46 p_vaddr -> segment's virtual address
47 p_flags -> segment flags (e.g. readable, writable, executable)
48
49 We will ignore the p_paddr and p_align fields of Elf32_Phdr for now.
50
51 The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
52 ranges of virtual addresses. A few rules apply:
53
54 - the virtual address ranges should not overlap.
55
56 - if a segment's p_filesz is smaller than its p_memsz, the extra bytes
57 between them should always be initialized to 0.
58
59 - ranges do not necessarily start or end at page boundaries. Two distinct
60 segments can have their start and end on the same page. In this case, the
61 page inherits the mapping flags of the latter segment.
62
63 Finally, the real load addrs of each segment is not p_vaddr. Instead the
64 loader decides where to load the first segment, then will load all others
65 relative to the first one to respect the initial range layout.
66
67 For example, consider the following list:
68
69 [ offset:0, filesz:0x4000, memsz:0x4000, vaddr:0x30000 ],
70 [ offset:0x4000, filesz:0x2000, memsz:0x8000, vaddr:0x40000 ],
71
72 This corresponds to two segments that cover these virtual address ranges:
73
74 0x30000...0x34000
75 0x40000...0x48000
76
77 If the loader decides to load the first segment at address 0xa0000000
78 then the segments' load address ranges will be:
79
80 0xa0030000...0xa0034000
81 0xa0040000...0xa0048000
82
83 In other words, all segments must be loaded at an address that has the same
84 constant offset from their p_vaddr value. This offset is computed as the
85 difference between the first segment's load address, and its p_vaddr value.
86
87 However, in practice, segments do _not_ start at page boundaries. Since we
88 can only memory-map at page boundaries, this means that the bias is
89 computed as:
90
91 load_bias = phdr0_load_address - PAGE_START(phdr0->p_vaddr)
92
93 (NOTE: The value must be used as a 32-bit unsigned integer, to deal with
94 possible wrap around UINT32_MAX for possible large p_vaddr values).
95
96 And that the phdr0_load_address must start at a page boundary, with
97 the segment's real content starting at:
98
99 phdr0_load_address + PAGE_OFFSET(phdr0->p_vaddr)
100
101 Note that ELF requires the following condition to make the mmap()-ing work:
102
103 PAGE_OFFSET(phdr0->p_vaddr) == PAGE_OFFSET(phdr0->p_offset)
104
105 The load_bias must be added to any p_vaddr value read from the ELF file to
106 determine the corresponding memory address.
107
108 **/
109
110#define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0)
111#define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
112 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
113 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
114
115/* Load the program header table from an ELF file into a read-only private
116 * anonymous mmap-ed block.
117 *
118 * Input:
119 * fd -> file descriptor
120 * phdr_offset -> file offset of phdr table
121 * phdr_num -> number of entries in the table.
122 *
123 * Output:
124 * phdr_mmap -> address of mmap block in memory.
125 * phdr_memsize -> size of mmap block in memory.
126 * phdr_table -> address of first entry in memory.
127 *
128 * Return:
129 * -1 on error, or 0 on success.
130 */
131int phdr_table_load(int fd,
132 Elf32_Addr phdr_offset,
133 Elf32_Half phdr_num,
134 void** phdr_mmap,
135 Elf32_Addr* phdr_size,
136 const Elf32_Phdr** phdr_table)
137{
138 Elf32_Addr page_min, page_max, page_offset;
139 void* mmap_result;
140
141 /* Just like the kernel, we only accept program header tables that
142 * are smaller than 64KB. */
143 if (phdr_num < 1 || phdr_num > 65536/sizeof(Elf32_Phdr)) {
144 errno = EINVAL;
145 return -1;
146 }
147
148 page_min = PAGE_START(phdr_offset);
149 page_max = PAGE_END(phdr_offset + phdr_num*sizeof(Elf32_Phdr));
150 page_offset = PAGE_OFFSET(phdr_offset);
151
152 mmap_result = mmap(NULL,
153 page_max - page_min,
154 PROT_READ,
155 MAP_PRIVATE,
156 fd,
157 page_min);
158
159 if (mmap_result == MAP_FAILED) {
160 return -1;
161 }
162
163 *phdr_mmap = mmap_result;
164 *phdr_size = page_max - page_min;
165 *phdr_table = (Elf32_Phdr*)((char*)mmap_result + page_offset);
166
167 return 0;
168}
169
170void phdr_table_unload(void* phdr_mmap, Elf32_Addr phdr_memsize)
171{
172 munmap(phdr_mmap, phdr_memsize);
173}
174
175
176/* Compute the extent of all loadable segments in an ELF program header
177 * table. This corresponds to the page-aligned size in bytes that needs to be
178 * reserved in the process' address space
179 *
180 * This returns 0 if there are no loadable segments.
181 */
182Elf32_Addr phdr_table_get_load_size(const Elf32_Phdr* phdr_table,
183 int phdr_count)
184{
185 int nn;
186
187 Elf32_Addr min_vaddr = 0xFFFFFFFFU;
188 Elf32_Addr max_vaddr = 0x00000000U;
189
190 for (nn = 0; nn < phdr_count; nn++) {
191 const Elf32_Phdr* phdr = &phdr_table[nn];
192
193 if (phdr->p_type != PT_LOAD)
194 continue;
195
196 if (phdr->p_vaddr < min_vaddr)
197 min_vaddr = phdr->p_vaddr;
198
199 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr)
200 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
201 }
202
203 if (min_vaddr > max_vaddr) {
204 return 0;
205 }
206
207 min_vaddr = PAGE_START(min_vaddr);
208 max_vaddr = PAGE_END(max_vaddr);
209
210 return max_vaddr - min_vaddr;
211}
212
213/* Reserve a virtual address range big enough to hold all loadable
214 * segments of a program header table. This is done by creating a
215 * private anonymous mmap() with PROT_NONE.
216 *
217 * Input:
218 * phdr_table -> program header table
219 * phdr_count -> number of entries in the tables
220 * required_base -> for prelinked libraries, mandatory load address
221 * of the first loadable segment. 0 otherwise.
222 * Output:
223 * load_start -> first page of reserved address space range
224 * load_size -> size in bytes of reserved address space range
225 * load_bias -> load bias, as described in technical note above.
226 *
227 * Return:
228 * 0 on success, -1 otherwise. Error code in errno.
229 */
230int
231phdr_table_reserve_memory(const Elf32_Phdr* phdr_table,
232 int phdr_count,
233 Elf32_Addr required_base,
234 void** load_start,
235 Elf32_Addr* load_size,
236 Elf32_Addr* load_bias)
237{
238 Elf32_Addr size = phdr_table_get_load_size(phdr_table, phdr_count);
239 void* start;
240 int nn, mmap_flags;
241
242 if (size == 0) {
243 errno = EINVAL;
244 return -1;
245 }
246
247 mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
248 if (required_base != 0)
249 mmap_flags |= MAP_FIXED;
250
251 start = mmap((void*)required_base, size, PROT_NONE, mmap_flags, -1, 0);
252 if (start == MAP_FAILED) {
253 return -1;
254 }
255
256 *load_start = start;
257 *load_size = size;
258 *load_bias = 0;
259
260 for (nn = 0; nn < phdr_count; nn++) {
261 const Elf32_Phdr* phdr = &phdr_table[nn];
262 if (phdr->p_type == PT_LOAD) {
263 *load_bias = (Elf32_Addr)start - PAGE_START(phdr->p_vaddr);
264 break;
265 }
266 }
267 return 0;
268}
269
270/* Map all loadable segments in process' address space.
271 * This assumes you already called phdr_table_reserve_memory to
272 * reserve the address space range for the library.
273 *
274 * Input:
275 * phdr_table -> program header table
276 * phdr_count -> number of entries in the table
277 * load_start -> start address of reserved memory range.
278 * load_size -> size of reserved memory range.
279 * load_bias -> load offset.
280 * fd -> input file descriptor.
281 *
282 * Return:
283 * 0 on success, -1 otherwise. Error code in errno.
284 */
285int
286phdr_table_load_segments(const Elf32_Phdr* phdr_table,
287 int phdr_count,
288 void* load_start,
289 Elf32_Addr load_size,
290 Elf32_Addr load_bias,
291 int fd)
292{
293 int nn;
294
295 for (nn = 0; nn < phdr_count; nn++) {
296 const Elf32_Phdr* phdr = &phdr_table[nn];
297 void* seg_addr;
298
299 if (phdr->p_type != PT_LOAD)
300 continue;
301
302 /* Segment addresses in memory */
303 Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
304 Elf32_Addr seg_end = seg_start + phdr->p_memsz;
305
306 Elf32_Addr seg_page_start = PAGE_START(seg_start);
307 Elf32_Addr seg_page_end = PAGE_END(seg_end);
308
309 Elf32_Addr seg_file_end = seg_start + phdr->p_filesz;
310
311 /* File offsets */
312 Elf32_Addr file_start = phdr->p_offset;
313 Elf32_Addr file_end = file_start + phdr->p_filesz;
314
315 Elf32_Addr file_page_start = PAGE_START(file_start);
316 Elf32_Addr file_page_end = PAGE_END(file_end);
317
318 seg_addr = mmap((void*)seg_page_start,
319 file_end - file_page_start,
320 PFLAGS_TO_PROT(phdr->p_flags),
321 MAP_FIXED|MAP_PRIVATE,
322 fd,
323 file_page_start);
324
325 if (seg_addr == MAP_FAILED) {
326 return -1;
327 }
328
329 /* if the segment is writable, and does not end on a page boundary,
330 * zero-fill it until the page limit. */
331 if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) {
332 memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end));
333 }
334
335 seg_file_end = PAGE_END(seg_file_end);
336
337 /* seg_file_end is now the first page address after the file
338 * content. If seg_end is larger, we need to zero anything
339 * between them. This is done by using a private anonymous
340 * map for all extra pages.
341 */
342 if (seg_page_end > seg_file_end) {
343 void* zeromap = mmap((void*)seg_file_end,
344 seg_page_end - seg_file_end,
345 PFLAGS_TO_PROT(phdr->p_flags),
346 MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE,
347 -1,
348 0);
349 if (zeromap == MAP_FAILED) {
350 return -1;
351 }
352 }
353 }
354 return 0;
355}
356
357/* Used internally. Used to set the protection bits of all loaded segmments
358 * with optional extra flags (i.e. really PROT_WRITE). Used by
359 * phdr_table_protect_segments and phdr_table_unprotect_segments.
360 */
361static int
362_phdr_table_set_load_prot(const Elf32_Phdr* phdr_table,
363 int phdr_count,
364 Elf32_Addr load_bias,
365 int extra_prot_flags)
366{
367 const Elf32_Phdr* phdr = phdr_table;
368 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
369
370 for (; phdr < phdr_limit; phdr++) {
371 if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
372 continue;
373
374 Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
375 Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
376
377 int ret = mprotect((void*)seg_page_start,
378 seg_page_end - seg_page_start,
379 PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
380 if (ret < 0) {
381 return -1;
382 }
383 }
384 return 0;
385}
386
387/* Restore the original protection modes for all loadable segments.
388 * You should only call this after phdr_table_unprotect_segments and
389 * applying all relocations.
390 *
391 * Input:
392 * phdr_table -> program header table
393 * phdr_count -> number of entires in tables
394 * load_bias -> load bias
395 * Return:
396 * 0 on error, -1 on failure (error code in errno).
397 */
398int
399phdr_table_protect_segments(const Elf32_Phdr* phdr_table,
400 int phdr_count,
401 Elf32_Addr load_bias)
402{
403 return _phdr_table_set_load_prot(phdr_table, phdr_count,
404 load_bias, 0);
405}
406
407/* Change the protection of all loaded segments in memory to writable.
408 * This is useful before performing relocations. Once completed, you
409 * will have to call phdr_table_protect_segments to restore the original
410 * protection flags on all segments.
411 *
412 * Note that some writable segments can also have their content turned
413 * to read-only by calling phdr_table_protect_gnu_relro. This is no
414 * performed here.
415 *
416 * Input:
417 * phdr_table -> program header table
418 * phdr_count -> number of entires in tables
419 * load_bias -> load bias
420 * Return:
421 * 0 on error, -1 on failure (error code in errno).
422 */
423int
424phdr_table_unprotect_segments(const Elf32_Phdr* phdr_table,
425 int phdr_count,
426 Elf32_Addr load_bias)
427{
428 return _phdr_table_set_load_prot(phdr_table, phdr_count,
429 load_bias, PROT_WRITE);
430}
431
432/* Used internally by phdr_table_protect_gnu_relro and
433 * phdr_table_unprotect_gnu_relro.
434 */
435static int
436_phdr_table_set_gnu_relro_prot(const Elf32_Phdr* phdr_table,
437 int phdr_count,
438 Elf32_Addr load_bias,
439 int prot_flags)
440{
441 const Elf32_Phdr* phdr = phdr_table;
442 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
443
444 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
445 if (phdr->p_type != PT_GNU_RELRO)
446 continue;
447
448 /* Tricky: what happens when the relro segment does not start
449 * or end at page boundaries?. We're going to be over-protective
450 * here and put every page touched by the segment as read-only.
451 *
452 * This seems to match Ian Lance Taylor's description of the
453 * feature at http://www.airs.com/blog/archives/189.
454 *
455 * Extract:
456 * Note that the current dynamic linker code will only work
457 * correctly if the PT_GNU_RELRO segment starts on a page
458 * boundary. This is because the dynamic linker rounds the
459 * p_vaddr field down to the previous page boundary. If
460 * there is anything on the page which should not be read-only,
461 * the program is likely to fail at runtime. So in effect the
462 * linker must only emit a PT_GNU_RELRO segment if it ensures
463 * that it starts on a page boundary.
464 */
465 Elf32_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
466 Elf32_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
467
468 int ret = mprotect((void*)seg_page_start,
469 seg_page_end - seg_page_start,
470 prot_flags);
471 if (ret < 0) {
472 return -1;
473 }
474 }
475 return 0;
476}
477
478/* Apply GNU relro protection if specified by the program header. This will
479 * turn some of the pages of a writable PT_LOAD segment to read-only, as
480 * specified by one or more PT_GNU_RELRO segments. This must be always
481 * performed after relocations.
482 *
483 * NOTE: One must call phdr_table_unprotect_gnu_relro() before calling
484 * the library's destructors, in order to ensure that the .dynamic
485 * section is writable (as well as the .data.relro section that
486 * might contain the content of static constant C++ objects that
487 * needs to be destroyed).
488 *
489 * Input:
490 * phdr_table -> program header table
491 * phdr_count -> number of entires in tables
492 * load_bias -> load bias
493 * Return:
494 * 0 on error, -1 on failure (error code in errno).
495 */
496int
497phdr_table_protect_gnu_relro(const Elf32_Phdr* phdr_table,
498 int phdr_count,
499 Elf32_Addr load_bias)
500{
501 return _phdr_table_set_gnu_relro_prot(phdr_table,
502 phdr_count,
503 load_bias,
504 PROT_READ);
505}
506
507/* Un-apply GNU relro protection if specified by the program header.
508 * See comment for phdr_table_protect_gnu_relro.
509 *
510 * Input:
511 * phdr_table -> program header table
512 * phdr_count -> number of entires in tables
513 * load_bias -> load bias
514 * Return:
515 * 0 on error, -1 on failure (error code in errno).
516 */
517int
518phdr_table_unprotect_gnu_relro(const Elf32_Phdr* phdr_table,
519 int phdr_count,
520 Elf32_Addr load_bias)
521{
522 return _phdr_table_set_gnu_relro_prot(phdr_table,
523 phdr_count,
524 load_bias,
525 PROT_READ|PROT_WRITE);
526}
527
528#ifdef ANDROID_ARM_LINKER
529
530# ifndef PT_ARM_EXIDX
531# define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
532# endif
533
534/* Return the address and size of the .ARM.exidx section in memory,
535 * if present.
536 *
537 * Input:
538 * phdr_table -> program header table
539 * phdr_count -> number of entires in tables
540 * load_bias -> load bias
541 * Output:
542 * arm_exidx -> address of table in memory (NULL on failure).
543 * arm_exidx_count -> number of items in table (0 on failure).
544 * Return:
545 * 0 on error, -1 on failure (_no_ error code in errno)
546 */
547int
548phdr_table_get_arm_exidx(const Elf32_Phdr* phdr_table,
549 int phdr_count,
550 Elf32_Addr load_bias,
551 Elf32_Addr** arm_exidx,
552 unsigned* arm_exidx_count)
553{
554 const Elf32_Phdr* phdr = phdr_table;
555 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
556
557 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
558 if (phdr->p_type != PT_ARM_EXIDX)
559 continue;
560
561 *arm_exidx = (Elf32_Addr*)(load_bias + phdr->p_vaddr);
562 *arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
563 return 0;
564 }
565 *arm_exidx = NULL;
566 *arm_exidx_count = 0;
567 return -1;
568}
569#endif /* ANDROID_ARM_LINKER */
570
571/* Return the address of the ELF file's .dynamic section in memory,
572 * or NULL if missing.
573 *
574 * Input:
575 * phdr_table -> program header table
576 * phdr_count -> number of entires in tables
577 * load_bias -> load bias
578 * Return:
579 * 0 on error, -1 on failure (_no_ error code in errno)
580 */
581Elf32_Addr*
582phdr_table_get_dynamic_section(const Elf32_Phdr* phdr_table,
583 int phdr_count,
584 Elf32_Addr load_bias)
585{
586 const Elf32_Phdr* phdr = phdr_table;
587 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
588
589 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
590 if (phdr->p_type == PT_DYNAMIC) {
591 return (Elf32_Addr*)(load_bias + phdr->p_vaddr);
592 }
593 }
594 return NULL;
595}
596
597/* Return the address of the program header table as it appears in the loaded
598 * segments in memory. This is in contrast with the input 'phdr_table' which
599 * is temporary and will be released before the library is relocated.
600 *
601 * Input:
602 * phdr_table -> program header table
603 * phdr_count -> number of entries in tables
604 * load_bias -> load bias
605 * Return:
606 * Address of loaded program header table on success (it has
607 * 'phdr_count' entries), or NULL on failure (no error code).
608 */
609const Elf32_Phdr*
610phdr_table_get_loaded_phdr(const Elf32_Phdr* phdr_table,
611 int phdr_count,
612 Elf32_Addr load_bias)
613{
614 const Elf32_Phdr* phdr = phdr_table;
615 const Elf32_Phdr* phdr_limit = phdr + phdr_count;
616 Elf32_Addr loaded = 0;
617 Elf32_Addr loaded_end;
618
619 /* If there is a PT_PHDR, use it directly */
620 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
621 if (phdr->p_type == PT_PHDR) {
622 loaded = load_bias + phdr->p_vaddr;
623 goto CHECK;
624 }
625 }
626
627 /* Otherwise, check the first loadable segment. If its file offset
628 * is 0, it starts with the ELF header, and we can trivially find the
629 * loaded program header from it. */
630 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
631 if (phdr->p_type == PT_LOAD) {
632 if (phdr->p_offset == 0) {
633 Elf32_Addr elf_addr = load_bias + phdr->p_vaddr;
634 const Elf32_Ehdr* ehdr = (const Elf32_Ehdr*)(void*)elf_addr;
635 Elf32_Addr offset = ehdr->e_phoff;
636 loaded = (Elf32_Addr)ehdr + offset;
637 goto CHECK;
638 }
639 break;
640 }
641 }
642
643 /* We didn't find it, let the client know. He may be able to
644 * keep a copy of the input phdr_table instead. */
645 return NULL;
646
647CHECK:
648 /* Ensure that our program header is actually within a loadable
649 * segment. This should help catch badly-formed ELF files that
650 * would cause the linker to crash later when trying to access it.
651 */
652 loaded_end = loaded + phdr_count*sizeof(Elf32_Phdr);
653
654 for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
655 if (phdr->p_type != PT_LOAD)
656 continue;
657 Elf32_Addr seg_start = phdr->p_vaddr + load_bias;
658 Elf32_Addr seg_end = phdr->p_filesz + seg_start;
659
660 if (seg_start <= loaded && loaded_end <= seg_end) {
661 return (const Elf32_Phdr*)loaded;
662 }
663 }
664 return NULL;
665}