blob: a7ab6063e0389488a420680db46b96743366c1c2 [file] [log] [blame]
Namhyung Kime5a18452012-08-06 13:41:20 +09001#include <fcntl.h>
2#include <stdio.h>
3#include <errno.h>
4#include <string.h>
5#include <unistd.h>
6#include <inttypes.h>
7
8#include "symbol.h"
Waiman Long8fa7d872014-09-29 16:07:28 -04009#include "machine.h"
Vladimir Nikulichev922d0e42014-04-17 08:27:01 -070010#include "vdso.h"
Arnaldo Carvalho de Meloc506c962013-12-11 09:15:00 -030011#include <symbol/kallsyms.h>
Namhyung Kime5a18452012-08-06 13:41:20 +090012#include "debug.h"
13
David Aherne370a3d2015-02-18 19:33:37 -050014#ifndef EM_AARCH64
15#define EM_AARCH64 183 /* ARM 64 bit */
16#endif
17
18
Arnaldo Carvalho de Meloaaba4e12014-11-24 17:10:52 -030019#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
20extern char *cplus_demangle(const char *, int);
21
22static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
23{
24 return cplus_demangle(c, i);
25}
26#else
27#ifdef NO_DEMANGLE
28static inline char *bfd_demangle(void __maybe_unused *v,
29 const char __maybe_unused *c,
30 int __maybe_unused i)
31{
32 return NULL;
33}
34#else
35#define PACKAGE 'perf'
36#include <bfd.h>
37#endif
38#endif
39
Ingo Molnar89fe8082013-09-30 12:07:11 +020040#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
Adrian Huntere955d5c2013-09-13 16:49:30 +030041static int elf_getphdrnum(Elf *elf, size_t *dst)
42{
43 GElf_Ehdr gehdr;
44 GElf_Ehdr *ehdr;
45
46 ehdr = gelf_getehdr(elf, &gehdr);
47 if (!ehdr)
48 return -1;
49
50 *dst = ehdr->e_phnum;
51
52 return 0;
53}
54#endif
55
Namhyung Kime5a18452012-08-06 13:41:20 +090056#ifndef NT_GNU_BUILD_ID
57#define NT_GNU_BUILD_ID 3
58#endif
59
60/**
61 * elf_symtab__for_each_symbol - iterate thru all the symbols
62 *
63 * @syms: struct elf_symtab instance to iterate
64 * @idx: uint32_t idx
65 * @sym: GElf_Sym iterator
66 */
67#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
68 for (idx = 0, gelf_getsym(syms, idx, &sym);\
69 idx < nr_syms; \
70 idx++, gelf_getsym(syms, idx, &sym))
71
72static inline uint8_t elf_sym__type(const GElf_Sym *sym)
73{
74 return GELF_ST_TYPE(sym->st_info);
75}
76
Vinson Lee4e310502015-02-09 16:29:37 -080077#ifndef STT_GNU_IFUNC
78#define STT_GNU_IFUNC 10
79#endif
80
Namhyung Kime5a18452012-08-06 13:41:20 +090081static inline int elf_sym__is_function(const GElf_Sym *sym)
82{
Adrian Huntera2f3b6b2014-07-14 13:02:33 +030083 return (elf_sym__type(sym) == STT_FUNC ||
84 elf_sym__type(sym) == STT_GNU_IFUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +090085 sym->st_name != 0 &&
86 sym->st_shndx != SHN_UNDEF;
87}
88
89static inline bool elf_sym__is_object(const GElf_Sym *sym)
90{
91 return elf_sym__type(sym) == STT_OBJECT &&
92 sym->st_name != 0 &&
93 sym->st_shndx != SHN_UNDEF;
94}
95
96static inline int elf_sym__is_label(const GElf_Sym *sym)
97{
98 return elf_sym__type(sym) == STT_NOTYPE &&
99 sym->st_name != 0 &&
100 sym->st_shndx != SHN_UNDEF &&
101 sym->st_shndx != SHN_ABS;
102}
103
104static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
105{
106 switch (type) {
107 case MAP__FUNCTION:
108 return elf_sym__is_function(sym);
109 case MAP__VARIABLE:
110 return elf_sym__is_object(sym);
111 default:
112 return false;
113 }
114}
115
116static inline const char *elf_sym__name(const GElf_Sym *sym,
117 const Elf_Data *symstrs)
118{
119 return symstrs->d_buf + sym->st_name;
120}
121
122static inline const char *elf_sec__name(const GElf_Shdr *shdr,
123 const Elf_Data *secstrs)
124{
125 return secstrs->d_buf + shdr->sh_name;
126}
127
128static inline int elf_sec__is_text(const GElf_Shdr *shdr,
129 const Elf_Data *secstrs)
130{
131 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
132}
133
134static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
135 const Elf_Data *secstrs)
136{
137 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
138}
139
140static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
141 enum map_type type)
142{
143 switch (type) {
144 case MAP__FUNCTION:
145 return elf_sec__is_text(shdr, secstrs);
146 case MAP__VARIABLE:
147 return elf_sec__is_data(shdr, secstrs);
148 default:
149 return false;
150 }
151}
152
153static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
154{
155 Elf_Scn *sec = NULL;
156 GElf_Shdr shdr;
157 size_t cnt = 1;
158
159 while ((sec = elf_nextscn(elf, sec)) != NULL) {
160 gelf_getshdr(sec, &shdr);
161
162 if ((addr >= shdr.sh_addr) &&
163 (addr < (shdr.sh_addr + shdr.sh_size)))
164 return cnt;
165
166 ++cnt;
167 }
168
169 return -1;
170}
171
Masami Hiramatsu99ca4232014-01-16 09:39:49 +0000172Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
173 GElf_Shdr *shp, const char *name, size_t *idx)
Namhyung Kime5a18452012-08-06 13:41:20 +0900174{
175 Elf_Scn *sec = NULL;
176 size_t cnt = 1;
177
Cody P Schafer49274652012-08-10 15:22:55 -0700178 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
179 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
180 return NULL;
181
Namhyung Kime5a18452012-08-06 13:41:20 +0900182 while ((sec = elf_nextscn(elf, sec)) != NULL) {
183 char *str;
184
185 gelf_getshdr(sec, shp);
186 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
Jiri Olsa155b3a12014-03-02 14:32:07 +0100187 if (str && !strcmp(name, str)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900188 if (idx)
189 *idx = cnt;
Jiri Olsa155b3a12014-03-02 14:32:07 +0100190 return sec;
Namhyung Kime5a18452012-08-06 13:41:20 +0900191 }
192 ++cnt;
193 }
194
Jiri Olsa155b3a12014-03-02 14:32:07 +0100195 return NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900196}
197
198#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
199 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
200 idx < nr_entries; \
201 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
202
203#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
204 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
205 idx < nr_entries; \
206 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
207
208/*
209 * We need to check if we have a .dynsym, so that we can handle the
210 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
211 * .dynsym or .symtab).
212 * And always look at the original dso, not at debuginfo packages, that
213 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
214 */
Cody P Schafera44f6052012-08-10 15:22:59 -0700215int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
Namhyung Kime5a18452012-08-06 13:41:20 +0900216 symbol_filter_t filter)
217{
218 uint32_t nr_rel_entries, idx;
219 GElf_Sym sym;
220 u64 plt_offset;
221 GElf_Shdr shdr_plt;
222 struct symbol *f;
223 GElf_Shdr shdr_rel_plt, shdr_dynsym;
224 Elf_Data *reldata, *syms, *symstrs;
225 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
226 size_t dynsym_idx;
227 GElf_Ehdr ehdr;
228 char sympltname[1024];
229 Elf *elf;
Cody P Schafera44f6052012-08-10 15:22:59 -0700230 int nr = 0, symidx, err = 0;
Namhyung Kime5a18452012-08-06 13:41:20 +0900231
David Ahernf47b58b2012-08-19 09:47:14 -0600232 if (!ss->dynsym)
233 return 0;
234
Cody P Schafera44f6052012-08-10 15:22:59 -0700235 elf = ss->elf;
236 ehdr = ss->ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900237
Cody P Schafera44f6052012-08-10 15:22:59 -0700238 scn_dynsym = ss->dynsym;
239 shdr_dynsym = ss->dynshdr;
240 dynsym_idx = ss->dynsym_idx;
Namhyung Kime5a18452012-08-06 13:41:20 +0900241
Namhyung Kime5a18452012-08-06 13:41:20 +0900242 if (scn_dynsym == NULL)
243 goto out_elf_end;
244
245 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
246 ".rela.plt", NULL);
247 if (scn_plt_rel == NULL) {
248 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
249 ".rel.plt", NULL);
250 if (scn_plt_rel == NULL)
251 goto out_elf_end;
252 }
253
254 err = -1;
255
256 if (shdr_rel_plt.sh_link != dynsym_idx)
257 goto out_elf_end;
258
259 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
260 goto out_elf_end;
261
262 /*
263 * Fetch the relocation section to find the idxes to the GOT
264 * and the symbols in the .dynsym they refer to.
265 */
266 reldata = elf_getdata(scn_plt_rel, NULL);
267 if (reldata == NULL)
268 goto out_elf_end;
269
270 syms = elf_getdata(scn_dynsym, NULL);
271 if (syms == NULL)
272 goto out_elf_end;
273
274 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
275 if (scn_symstrs == NULL)
276 goto out_elf_end;
277
278 symstrs = elf_getdata(scn_symstrs, NULL);
279 if (symstrs == NULL)
280 goto out_elf_end;
281
Cody P Schafer52f9ddb2012-08-10 15:22:51 -0700282 if (symstrs->d_size == 0)
283 goto out_elf_end;
284
Namhyung Kime5a18452012-08-06 13:41:20 +0900285 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
286 plt_offset = shdr_plt.sh_offset;
287
288 if (shdr_rel_plt.sh_type == SHT_RELA) {
289 GElf_Rela pos_mem, *pos;
290
291 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
292 nr_rel_entries) {
293 symidx = GELF_R_SYM(pos->r_info);
294 plt_offset += shdr_plt.sh_entsize;
295 gelf_getsym(syms, symidx, &sym);
296 snprintf(sympltname, sizeof(sympltname),
297 "%s@plt", elf_sym__name(&sym, symstrs));
298
299 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
300 STB_GLOBAL, sympltname);
301 if (!f)
302 goto out_elf_end;
303
304 if (filter && filter(map, f))
305 symbol__delete(f);
306 else {
307 symbols__insert(&dso->symbols[map->type], f);
308 ++nr;
309 }
310 }
311 } else if (shdr_rel_plt.sh_type == SHT_REL) {
312 GElf_Rel pos_mem, *pos;
313 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
314 nr_rel_entries) {
315 symidx = GELF_R_SYM(pos->r_info);
316 plt_offset += shdr_plt.sh_entsize;
317 gelf_getsym(syms, symidx, &sym);
318 snprintf(sympltname, sizeof(sympltname),
319 "%s@plt", elf_sym__name(&sym, symstrs));
320
321 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
322 STB_GLOBAL, sympltname);
323 if (!f)
324 goto out_elf_end;
325
326 if (filter && filter(map, f))
327 symbol__delete(f);
328 else {
329 symbols__insert(&dso->symbols[map->type], f);
330 ++nr;
331 }
332 }
333 }
334
335 err = 0;
336out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900337 if (err == 0)
338 return nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900339 pr_debug("%s: problems reading %s PLT info.\n",
340 __func__, dso->long_name);
341 return 0;
342}
343
344/*
345 * Align offset to 4 bytes as needed for note name and descriptor data.
346 */
347#define NOTE_ALIGN(n) (((n) + 3) & -4U)
348
349static int elf_read_build_id(Elf *elf, void *bf, size_t size)
350{
351 int err = -1;
352 GElf_Ehdr ehdr;
353 GElf_Shdr shdr;
354 Elf_Data *data;
355 Elf_Scn *sec;
356 Elf_Kind ek;
357 void *ptr;
358
359 if (size < BUILD_ID_SIZE)
360 goto out;
361
362 ek = elf_kind(elf);
363 if (ek != ELF_K_ELF)
364 goto out;
365
366 if (gelf_getehdr(elf, &ehdr) == NULL) {
367 pr_err("%s: cannot get elf header.\n", __func__);
368 goto out;
369 }
370
371 /*
372 * Check following sections for notes:
373 * '.note.gnu.build-id'
374 * '.notes'
375 * '.note' (VDSO specific)
376 */
377 do {
378 sec = elf_section_by_name(elf, &ehdr, &shdr,
379 ".note.gnu.build-id", NULL);
380 if (sec)
381 break;
382
383 sec = elf_section_by_name(elf, &ehdr, &shdr,
384 ".notes", NULL);
385 if (sec)
386 break;
387
388 sec = elf_section_by_name(elf, &ehdr, &shdr,
389 ".note", NULL);
390 if (sec)
391 break;
392
393 return err;
394
395 } while (0);
396
397 data = elf_getdata(sec, NULL);
398 if (data == NULL)
399 goto out;
400
401 ptr = data->d_buf;
402 while (ptr < (data->d_buf + data->d_size)) {
403 GElf_Nhdr *nhdr = ptr;
404 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
405 descsz = NOTE_ALIGN(nhdr->n_descsz);
406 const char *name;
407
408 ptr += sizeof(*nhdr);
409 name = ptr;
410 ptr += namesz;
411 if (nhdr->n_type == NT_GNU_BUILD_ID &&
412 nhdr->n_namesz == sizeof("GNU")) {
413 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
414 size_t sz = min(size, descsz);
415 memcpy(bf, ptr, sz);
416 memset(bf + sz, 0, size - sz);
417 err = descsz;
418 break;
419 }
420 }
421 ptr += descsz;
422 }
423
424out:
425 return err;
426}
427
428int filename__read_build_id(const char *filename, void *bf, size_t size)
429{
430 int fd, err = -1;
431 Elf *elf;
432
433 if (size < BUILD_ID_SIZE)
434 goto out;
435
436 fd = open(filename, O_RDONLY);
437 if (fd < 0)
438 goto out;
439
440 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
441 if (elf == NULL) {
442 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
443 goto out_close;
444 }
445
446 err = elf_read_build_id(elf, bf, size);
447
448 elf_end(elf);
449out_close:
450 close(fd);
451out:
452 return err;
453}
454
455int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
456{
457 int fd, err = -1;
458
459 if (size < BUILD_ID_SIZE)
460 goto out;
461
462 fd = open(filename, O_RDONLY);
463 if (fd < 0)
464 goto out;
465
466 while (1) {
467 char bf[BUFSIZ];
468 GElf_Nhdr nhdr;
469 size_t namesz, descsz;
470
471 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
472 break;
473
474 namesz = NOTE_ALIGN(nhdr.n_namesz);
475 descsz = NOTE_ALIGN(nhdr.n_descsz);
476 if (nhdr.n_type == NT_GNU_BUILD_ID &&
477 nhdr.n_namesz == sizeof("GNU")) {
478 if (read(fd, bf, namesz) != (ssize_t)namesz)
479 break;
480 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
481 size_t sz = min(descsz, size);
482 if (read(fd, build_id, sz) == (ssize_t)sz) {
483 memset(build_id + sz, 0, size - sz);
484 err = 0;
485 break;
486 }
487 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
488 break;
489 } else {
490 int n = namesz + descsz;
491 if (read(fd, bf, n) != n)
492 break;
493 }
494 }
495 close(fd);
496out:
497 return err;
498}
499
500int filename__read_debuglink(const char *filename, char *debuglink,
501 size_t size)
502{
503 int fd, err = -1;
504 Elf *elf;
505 GElf_Ehdr ehdr;
506 GElf_Shdr shdr;
507 Elf_Data *data;
508 Elf_Scn *sec;
509 Elf_Kind ek;
510
511 fd = open(filename, O_RDONLY);
512 if (fd < 0)
513 goto out;
514
515 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
516 if (elf == NULL) {
517 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
518 goto out_close;
519 }
520
521 ek = elf_kind(elf);
522 if (ek != ELF_K_ELF)
Chenggang Qin784f3392013-10-11 08:27:57 +0800523 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900524
525 if (gelf_getehdr(elf, &ehdr) == NULL) {
526 pr_err("%s: cannot get elf header.\n", __func__);
Chenggang Qin784f3392013-10-11 08:27:57 +0800527 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900528 }
529
530 sec = elf_section_by_name(elf, &ehdr, &shdr,
531 ".gnu_debuglink", NULL);
532 if (sec == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800533 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900534
535 data = elf_getdata(sec, NULL);
536 if (data == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800537 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900538
539 /* the start of this section is a zero-terminated string */
540 strncpy(debuglink, data->d_buf, size);
541
Stephane Eranian0d3dc5e2014-02-20 10:32:55 +0900542 err = 0;
543
Chenggang Qin784f3392013-10-11 08:27:57 +0800544out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900545 elf_end(elf);
Namhyung Kime5a18452012-08-06 13:41:20 +0900546out_close:
547 close(fd);
548out:
549 return err;
550}
551
552static int dso__swap_init(struct dso *dso, unsigned char eidata)
553{
554 static unsigned int const endian = 1;
555
556 dso->needs_swap = DSO_SWAP__NO;
557
558 switch (eidata) {
559 case ELFDATA2LSB:
560 /* We are big endian, DSO is little endian. */
561 if (*(unsigned char const *)&endian != 1)
562 dso->needs_swap = DSO_SWAP__YES;
563 break;
564
565 case ELFDATA2MSB:
566 /* We are little endian, DSO is big endian. */
567 if (*(unsigned char const *)&endian != 0)
568 dso->needs_swap = DSO_SWAP__YES;
569 break;
570
571 default:
572 pr_err("unrecognized DSO data encoding %d\n", eidata);
573 return -EINVAL;
574 }
575
576 return 0;
577}
578
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900579static int decompress_kmodule(struct dso *dso, const char *name,
580 enum dso_binary_type type)
581{
Jiri Olsa914f85c2015-02-12 22:27:50 +0100582 int fd = -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900583 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
Jiri Olsa914f85c2015-02-12 22:27:50 +0100584 struct kmod_path m;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900585
Namhyung Kim0b064f42015-01-29 17:06:42 +0900586 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
587 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
588 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900589 return -1;
590
Jiri Olsa914f85c2015-02-12 22:27:50 +0100591 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
592 name = dso->long_name;
593
594 if (kmod_path__parse_ext(&m, name) || !m.comp)
595 return -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900596
597 fd = mkstemp(tmpbuf);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300598 if (fd < 0) {
599 dso->load_errno = errno;
Jiri Olsa914f85c2015-02-12 22:27:50 +0100600 goto out;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300601 }
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900602
Jiri Olsa914f85c2015-02-12 22:27:50 +0100603 if (!decompress_to_file(m.ext, name, fd)) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300604 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900605 close(fd);
606 fd = -1;
607 }
608
609 unlink(tmpbuf);
610
Jiri Olsa914f85c2015-02-12 22:27:50 +0100611out:
612 free(m.ext);
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900613 return fd;
614}
615
Cody P Schafer3aafe5a2012-08-10 15:23:02 -0700616bool symsrc__possibly_runtime(struct symsrc *ss)
617{
618 return ss->dynsym || ss->opdsec;
619}
620
Cody P Schaferd26cd122012-08-10 15:23:00 -0700621bool symsrc__has_symtab(struct symsrc *ss)
622{
623 return ss->symtab != NULL;
624}
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700625
626void symsrc__destroy(struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900627{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300628 zfree(&ss->name);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700629 elf_end(ss->elf);
630 close(ss->fd);
631}
632
633int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
634 enum dso_binary_type type)
635{
Namhyung Kime5a18452012-08-06 13:41:20 +0900636 int err = -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900637 GElf_Ehdr ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900638 Elf *elf;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700639 int fd;
640
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300641 if (dso__needs_decompress(dso)) {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900642 fd = decompress_kmodule(dso, name, type);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300643 if (fd < 0)
644 return -1;
645 } else {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900646 fd = open(name, O_RDONLY);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300647 if (fd < 0) {
648 dso->load_errno = errno;
649 return -1;
650 }
651 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900652
653 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
654 if (elf == NULL) {
655 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300656 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900657 goto out_close;
658 }
659
660 if (gelf_getehdr(elf, &ehdr) == NULL) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300661 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900662 pr_debug("%s: cannot get elf header.\n", __func__);
663 goto out_elf_end;
664 }
665
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300666 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
667 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
Namhyung Kime5a18452012-08-06 13:41:20 +0900668 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300669 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900670
671 /* Always reject images with a mismatched build-id: */
672 if (dso->has_build_id) {
673 u8 build_id[BUILD_ID_SIZE];
674
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300675 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
676 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900677 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300678 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900679
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300680 if (!dso__build_id_equal(dso, build_id)) {
681 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900682 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300683 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900684 }
685
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300686 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
687
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700688 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
689 NULL);
690 if (ss->symshdr.sh_type != SHT_SYMTAB)
691 ss->symtab = NULL;
692
693 ss->dynsym_idx = 0;
694 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
695 &ss->dynsym_idx);
696 if (ss->dynshdr.sh_type != SHT_DYNSYM)
697 ss->dynsym = NULL;
698
699 ss->opdidx = 0;
700 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
701 &ss->opdidx);
702 if (ss->opdshdr.sh_type != SHT_PROGBITS)
703 ss->opdsec = NULL;
704
705 if (dso->kernel == DSO_TYPE_USER) {
706 GElf_Shdr shdr;
707 ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300708 ehdr.e_type == ET_REL ||
Adrian Hunter51682dc2014-07-22 16:17:57 +0300709 dso__is_vdso(dso) ||
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700710 elf_section_by_name(elf, &ehdr, &shdr,
711 ".gnu.prelink_undo",
712 NULL) != NULL);
713 } else {
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300714 ss->adjust_symbols = ehdr.e_type == ET_EXEC ||
715 ehdr.e_type == ET_REL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700716 }
717
718 ss->name = strdup(name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300719 if (!ss->name) {
720 dso->load_errno = errno;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700721 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300722 }
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700723
724 ss->elf = elf;
725 ss->fd = fd;
726 ss->ehdr = ehdr;
727 ss->type = type;
728
729 return 0;
730
731out_elf_end:
732 elf_end(elf);
733out_close:
734 close(fd);
735 return err;
736}
737
Adrian Hunter39b12f782013-08-07 14:38:47 +0300738/**
739 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
740 * @kmap: kernel maps and relocation reference symbol
741 *
742 * This function returns %true if we are dealing with the kernel maps and the
743 * relocation reference symbol has not yet been found. Otherwise %false is
744 * returned.
745 */
746static bool ref_reloc_sym_not_found(struct kmap *kmap)
747{
748 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
749 !kmap->ref_reloc_sym->unrelocated_addr;
750}
751
752/**
753 * ref_reloc - kernel relocation offset.
754 * @kmap: kernel maps and relocation reference symbol
755 *
756 * This function returns the offset of kernel addresses as determined by using
757 * the relocation reference symbol i.e. if the kernel has not been relocated
758 * then the return value is zero.
759 */
760static u64 ref_reloc(struct kmap *kmap)
761{
762 if (kmap && kmap->ref_reloc_sym &&
763 kmap->ref_reloc_sym->unrelocated_addr)
764 return kmap->ref_reloc_sym->addr -
765 kmap->ref_reloc_sym->unrelocated_addr;
766 return 0;
767}
768
Avi Kivity763122a2014-09-13 07:15:05 +0300769static bool want_demangle(bool is_kernel_sym)
770{
771 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
772}
773
Cody P Schafer261360b2012-08-10 15:23:01 -0700774int dso__load_sym(struct dso *dso, struct map *map,
775 struct symsrc *syms_ss, struct symsrc *runtime_ss,
Cody P Schaferd26cd122012-08-10 15:23:00 -0700776 symbol_filter_t filter, int kmodule)
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700777{
778 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
Wang Nanba927322015-04-07 08:22:45 +0000779 struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700780 struct map *curr_map = map;
781 struct dso *curr_dso = dso;
782 Elf_Data *symstrs, *secstrs;
783 uint32_t nr_syms;
784 int err = -1;
785 uint32_t idx;
786 GElf_Ehdr ehdr;
Cody P Schafer261360b2012-08-10 15:23:01 -0700787 GElf_Shdr shdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700788 Elf_Data *syms, *opddata = NULL;
789 GElf_Sym sym;
Cody P Schafer261360b2012-08-10 15:23:01 -0700790 Elf_Scn *sec, *sec_strndx;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700791 Elf *elf;
792 int nr = 0;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300793 bool remap_kernel = false, adjust_kernel_syms = false;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700794
Wang Nanba927322015-04-07 08:22:45 +0000795 if (kmap && !kmaps)
796 return -1;
797
Cody P Schafer261360b2012-08-10 15:23:01 -0700798 dso->symtab_type = syms_ss->type;
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300799 dso->is_64_bit = syms_ss->is_64_bit;
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300800 dso->rel = syms_ss->ehdr.e_type == ET_REL;
801
802 /*
803 * Modules may already have symbols from kallsyms, but those symbols
804 * have the wrong values for the dso maps, so remove them.
805 */
806 if (kmodule && syms_ss->symtab)
807 symbols__delete(&dso->symbols[map->type]);
Cody P Schafer005f9292012-08-10 15:22:58 -0700808
Cody P Schafer261360b2012-08-10 15:23:01 -0700809 if (!syms_ss->symtab) {
Anton Blanchardd0b0d042014-09-09 08:59:29 +1000810 /*
811 * If the vmlinux is stripped, fail so we will fall back
812 * to using kallsyms. The vmlinux runtime symbols aren't
813 * of much use.
814 */
815 if (dso->kernel)
816 goto out_elf_end;
817
Cody P Schafer261360b2012-08-10 15:23:01 -0700818 syms_ss->symtab = syms_ss->dynsym;
819 syms_ss->symshdr = syms_ss->dynshdr;
Cody P Schaferd26cd122012-08-10 15:23:00 -0700820 }
821
Cody P Schafer261360b2012-08-10 15:23:01 -0700822 elf = syms_ss->elf;
823 ehdr = syms_ss->ehdr;
824 sec = syms_ss->symtab;
825 shdr = syms_ss->symshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700826
Cody P Schafer261360b2012-08-10 15:23:01 -0700827 if (runtime_ss->opdsec)
828 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
Namhyung Kime5a18452012-08-06 13:41:20 +0900829
830 syms = elf_getdata(sec, NULL);
831 if (syms == NULL)
832 goto out_elf_end;
833
834 sec = elf_getscn(elf, shdr.sh_link);
835 if (sec == NULL)
836 goto out_elf_end;
837
838 symstrs = elf_getdata(sec, NULL);
839 if (symstrs == NULL)
840 goto out_elf_end;
841
Adrian Hunterf247fb82014-07-31 09:00:46 +0300842 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900843 if (sec_strndx == NULL)
844 goto out_elf_end;
845
846 secstrs = elf_getdata(sec_strndx, NULL);
847 if (secstrs == NULL)
848 goto out_elf_end;
849
850 nr_syms = shdr.sh_size / shdr.sh_entsize;
851
852 memset(&sym, 0, sizeof(sym));
Adrian Hunter39b12f782013-08-07 14:38:47 +0300853
854 /*
855 * The kernel relocation symbol is needed in advance in order to adjust
856 * kernel maps correctly.
857 */
858 if (ref_reloc_sym_not_found(kmap)) {
859 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
860 const char *elf_name = elf_sym__name(&sym, symstrs);
861
862 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
863 continue;
864 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
Adrian Hunter91767532014-01-29 16:14:36 +0200865 map->reloc = kmap->ref_reloc_sym->addr -
866 kmap->ref_reloc_sym->unrelocated_addr;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300867 break;
868 }
869 }
870
871 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
872 /*
873 * Initial kernel and module mappings do not map to the dso. For
874 * function mappings, flag the fixups.
875 */
876 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
877 remap_kernel = true;
878 adjust_kernel_syms = dso->adjust_symbols;
879 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900880 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
881 struct symbol *f;
882 const char *elf_name = elf_sym__name(&sym, symstrs);
883 char *demangled = NULL;
884 int is_label = elf_sym__is_label(&sym);
885 const char *section_name;
Cody P Schafer261360b2012-08-10 15:23:01 -0700886 bool used_opd = false;
Namhyung Kime5a18452012-08-06 13:41:20 +0900887
Namhyung Kime5a18452012-08-06 13:41:20 +0900888 if (!is_label && !elf_sym__is_a(&sym, map->type))
889 continue;
890
891 /* Reject ARM ELF "mapping symbols": these aren't unique and
892 * don't identify functions, so will confuse the profile
893 * output: */
Victor Kamensky4886f2c2015-01-26 22:34:01 -0800894 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
895 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
896 && (elf_name[2] == '\0' || elf_name[2] == '.'))
Namhyung Kime5a18452012-08-06 13:41:20 +0900897 continue;
898 }
899
Cody P Schafer261360b2012-08-10 15:23:01 -0700900 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
901 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900902 u64 *opd = opddata->d_buf + offset;
903 sym.st_value = DSO__SWAP(dso, u64, *opd);
Cody P Schafer261360b2012-08-10 15:23:01 -0700904 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
905 sym.st_value);
906 used_opd = true;
Namhyung Kime5a18452012-08-06 13:41:20 +0900907 }
Namhyung Kim3843b052012-11-21 13:49:44 +0100908 /*
909 * When loading symbols in a data mapping, ABS symbols (which
910 * has a value of SHN_ABS in its st_shndx) failed at
911 * elf_getscn(). And it marks the loading as a failure so
912 * already loaded symbols cannot be fixed up.
913 *
914 * I'm not sure what should be done. Just ignore them for now.
915 * - Namhyung Kim
916 */
917 if (sym.st_shndx == SHN_ABS)
918 continue;
Namhyung Kime5a18452012-08-06 13:41:20 +0900919
Cody P Schafer261360b2012-08-10 15:23:01 -0700920 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900921 if (!sec)
922 goto out_elf_end;
923
924 gelf_getshdr(sec, &shdr);
925
926 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
927 continue;
928
929 section_name = elf_sec__name(&shdr, secstrs);
930
931 /* On ARM, symbols for thumb functions have 1 added to
932 * the symbol address as a flag - remove it */
933 if ((ehdr.e_machine == EM_ARM) &&
934 (map->type == MAP__FUNCTION) &&
935 (sym.st_value & 1))
936 --sym.st_value;
937
Adrian Hunter39b12f782013-08-07 14:38:47 +0300938 if (dso->kernel || kmodule) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900939 char dso_name[PATH_MAX];
940
Adrian Hunter39b12f782013-08-07 14:38:47 +0300941 /* Adjust symbol to map to file offset */
942 if (adjust_kernel_syms)
943 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
944
Namhyung Kime5a18452012-08-06 13:41:20 +0900945 if (strcmp(section_name,
946 (curr_dso->short_name +
947 dso->short_name_len)) == 0)
948 goto new_symbol;
949
950 if (strcmp(section_name, ".text") == 0) {
Adrian Hunter39b12f782013-08-07 14:38:47 +0300951 /*
952 * The initial kernel mapping is based on
953 * kallsyms and identity maps. Overwrite it to
954 * map to the kernel dso.
955 */
956 if (remap_kernel && dso->kernel) {
957 remap_kernel = false;
958 map->start = shdr.sh_addr +
959 ref_reloc(kmap);
960 map->end = map->start + shdr.sh_size;
961 map->pgoff = shdr.sh_offset;
962 map->map_ip = map__map_ip;
963 map->unmap_ip = map__unmap_ip;
964 /* Ensure maps are correctly ordered */
Wang Nanba927322015-04-07 08:22:45 +0000965 if (kmaps) {
966 map_groups__remove(kmaps, map);
967 map_groups__insert(kmaps, map);
968 }
Adrian Hunter39b12f782013-08-07 14:38:47 +0300969 }
970
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300971 /*
972 * The initial module mapping is based on
973 * /proc/modules mapped to offset zero.
974 * Overwrite it to map to the module dso.
975 */
976 if (remap_kernel && kmodule) {
977 remap_kernel = false;
978 map->pgoff = shdr.sh_offset;
979 }
980
Namhyung Kime5a18452012-08-06 13:41:20 +0900981 curr_map = map;
982 curr_dso = dso;
983 goto new_symbol;
984 }
985
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300986 if (!kmap)
987 goto new_symbol;
988
Namhyung Kime5a18452012-08-06 13:41:20 +0900989 snprintf(dso_name, sizeof(dso_name),
990 "%s%s", dso->short_name, section_name);
991
Wang Nanba927322015-04-07 08:22:45 +0000992 curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
Namhyung Kime5a18452012-08-06 13:41:20 +0900993 if (curr_map == NULL) {
994 u64 start = sym.st_value;
995
996 if (kmodule)
997 start += map->start + shdr.sh_offset;
998
999 curr_dso = dso__new(dso_name);
1000 if (curr_dso == NULL)
1001 goto out_elf_end;
1002 curr_dso->kernel = dso->kernel;
1003 curr_dso->long_name = dso->long_name;
1004 curr_dso->long_name_len = dso->long_name_len;
1005 curr_map = map__new2(start, curr_dso,
1006 map->type);
1007 if (curr_map == NULL) {
1008 dso__delete(curr_dso);
1009 goto out_elf_end;
1010 }
Adrian Hunter39b12f782013-08-07 14:38:47 +03001011 if (adjust_kernel_syms) {
1012 curr_map->start = shdr.sh_addr +
1013 ref_reloc(kmap);
1014 curr_map->end = curr_map->start +
1015 shdr.sh_size;
1016 curr_map->pgoff = shdr.sh_offset;
1017 } else {
1018 curr_map->map_ip = identity__map_ip;
1019 curr_map->unmap_ip = identity__map_ip;
1020 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001021 curr_dso->symtab_type = dso->symtab_type;
Wang Nanba927322015-04-07 08:22:45 +00001022 map_groups__insert(kmaps, curr_map);
Waiman Long8fa7d872014-09-29 16:07:28 -04001023 /*
1024 * The new DSO should go to the kernel DSOS
1025 */
1026 dsos__add(&map->groups->machine->kernel_dsos,
1027 curr_dso);
Namhyung Kime5a18452012-08-06 13:41:20 +09001028 dso__set_loaded(curr_dso, map->type);
1029 } else
1030 curr_dso = curr_map->dso;
1031
1032 goto new_symbol;
1033 }
1034
Cody P Schafer261360b2012-08-10 15:23:01 -07001035 if ((used_opd && runtime_ss->adjust_symbols)
1036 || (!used_opd && syms_ss->adjust_symbols)) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001037 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1038 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1039 (u64)sym.st_value, (u64)shdr.sh_addr,
1040 (u64)shdr.sh_offset);
1041 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1042 }
Avi Kivity950b8352014-01-22 21:58:46 +02001043new_symbol:
Namhyung Kime5a18452012-08-06 13:41:20 +09001044 /*
1045 * We need to figure out if the object was created from C++ sources
1046 * DWARF DW_compile_unit has this, but we don't always have access
1047 * to it...
1048 */
Avi Kivity763122a2014-09-13 07:15:05 +03001049 if (want_demangle(dso->kernel || kmodule)) {
Namhyung Kime71e7942014-07-31 14:47:42 +09001050 int demangle_flags = DMGL_NO_OPTS;
1051 if (verbose)
1052 demangle_flags = DMGL_PARAMS | DMGL_ANSI;
1053
1054 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
Namhyung Kim328ccda2013-03-25 18:18:18 +09001055 if (demangled != NULL)
1056 elf_name = demangled;
1057 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001058 f = symbol__new(sym.st_value, sym.st_size,
1059 GELF_ST_BIND(sym.st_info), elf_name);
1060 free(demangled);
1061 if (!f)
1062 goto out_elf_end;
1063
1064 if (filter && filter(curr_map, f))
1065 symbol__delete(f);
1066 else {
1067 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1068 nr++;
1069 }
1070 }
1071
1072 /*
1073 * For misannotated, zeroed, ASM function sizes.
1074 */
1075 if (nr > 0) {
Namhyung Kim680d9262015-03-06 16:31:27 +09001076 if (!symbol_conf.allow_aliases)
1077 symbols__fixup_duplicate(&dso->symbols[map->type]);
Namhyung Kime5a18452012-08-06 13:41:20 +09001078 symbols__fixup_end(&dso->symbols[map->type]);
1079 if (kmap) {
1080 /*
1081 * We need to fixup this here too because we create new
1082 * maps here, for things like vsyscall sections.
1083 */
Wang Nanba927322015-04-07 08:22:45 +00001084 __map_groups__fixup_end(kmaps, map->type);
Namhyung Kime5a18452012-08-06 13:41:20 +09001085 }
1086 }
1087 err = nr;
1088out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +09001089 return err;
1090}
1091
Adrian Hunter8e0cf962013-08-07 14:38:51 +03001092static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1093{
1094 GElf_Phdr phdr;
1095 size_t i, phdrnum;
1096 int err;
1097 u64 sz;
1098
1099 if (elf_getphdrnum(elf, &phdrnum))
1100 return -1;
1101
1102 for (i = 0; i < phdrnum; i++) {
1103 if (gelf_getphdr(elf, i, &phdr) == NULL)
1104 return -1;
1105 if (phdr.p_type != PT_LOAD)
1106 continue;
1107 if (exe) {
1108 if (!(phdr.p_flags & PF_X))
1109 continue;
1110 } else {
1111 if (!(phdr.p_flags & PF_R))
1112 continue;
1113 }
1114 sz = min(phdr.p_memsz, phdr.p_filesz);
1115 if (!sz)
1116 continue;
1117 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1118 if (err)
1119 return err;
1120 }
1121 return 0;
1122}
1123
1124int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1125 bool *is_64_bit)
1126{
1127 int err;
1128 Elf *elf;
1129
1130 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1131 if (elf == NULL)
1132 return -1;
1133
1134 if (is_64_bit)
1135 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1136
1137 err = elf_read_maps(elf, exe, mapfn, data);
1138
1139 elf_end(elf);
1140 return err;
1141}
1142
Adrian Hunter2b5b8bb2014-07-22 16:17:59 +03001143enum dso_type dso__type_fd(int fd)
1144{
1145 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1146 GElf_Ehdr ehdr;
1147 Elf_Kind ek;
1148 Elf *elf;
1149
1150 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1151 if (elf == NULL)
1152 goto out;
1153
1154 ek = elf_kind(elf);
1155 if (ek != ELF_K_ELF)
1156 goto out_end;
1157
1158 if (gelf_getclass(elf) == ELFCLASS64) {
1159 dso_type = DSO__TYPE_64BIT;
1160 goto out_end;
1161 }
1162
1163 if (gelf_getehdr(elf, &ehdr) == NULL)
1164 goto out_end;
1165
1166 if (ehdr.e_machine == EM_X86_64)
1167 dso_type = DSO__TYPE_X32BIT;
1168 else
1169 dso_type = DSO__TYPE_32BIT;
1170out_end:
1171 elf_end(elf);
1172out:
1173 return dso_type;
1174}
1175
Adrian Hunterafba19d2013-10-09 15:01:12 +03001176static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1177{
1178 ssize_t r;
1179 size_t n;
1180 int err = -1;
1181 char *buf = malloc(page_size);
1182
1183 if (buf == NULL)
1184 return -1;
1185
1186 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1187 goto out;
1188
1189 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1190 goto out;
1191
1192 while (len) {
1193 n = page_size;
1194 if (len < n)
1195 n = len;
1196 /* Use read because mmap won't work on proc files */
1197 r = read(from, buf, n);
1198 if (r < 0)
1199 goto out;
1200 if (!r)
1201 break;
1202 n = r;
1203 r = write(to, buf, n);
1204 if (r < 0)
1205 goto out;
1206 if ((size_t)r != n)
1207 goto out;
1208 len -= n;
1209 }
1210
1211 err = 0;
1212out:
1213 free(buf);
1214 return err;
1215}
1216
1217struct kcore {
1218 int fd;
1219 int elfclass;
1220 Elf *elf;
1221 GElf_Ehdr ehdr;
1222};
1223
1224static int kcore__open(struct kcore *kcore, const char *filename)
1225{
1226 GElf_Ehdr *ehdr;
1227
1228 kcore->fd = open(filename, O_RDONLY);
1229 if (kcore->fd == -1)
1230 return -1;
1231
1232 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1233 if (!kcore->elf)
1234 goto out_close;
1235
1236 kcore->elfclass = gelf_getclass(kcore->elf);
1237 if (kcore->elfclass == ELFCLASSNONE)
1238 goto out_end;
1239
1240 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1241 if (!ehdr)
1242 goto out_end;
1243
1244 return 0;
1245
1246out_end:
1247 elf_end(kcore->elf);
1248out_close:
1249 close(kcore->fd);
1250 return -1;
1251}
1252
1253static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1254 bool temp)
1255{
1256 GElf_Ehdr *ehdr;
1257
1258 kcore->elfclass = elfclass;
1259
1260 if (temp)
1261 kcore->fd = mkstemp(filename);
1262 else
1263 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1264 if (kcore->fd == -1)
1265 return -1;
1266
1267 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1268 if (!kcore->elf)
1269 goto out_close;
1270
1271 if (!gelf_newehdr(kcore->elf, elfclass))
1272 goto out_end;
1273
1274 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1275 if (!ehdr)
1276 goto out_end;
1277
1278 return 0;
1279
1280out_end:
1281 elf_end(kcore->elf);
1282out_close:
1283 close(kcore->fd);
1284 unlink(filename);
1285 return -1;
1286}
1287
1288static void kcore__close(struct kcore *kcore)
1289{
1290 elf_end(kcore->elf);
1291 close(kcore->fd);
1292}
1293
1294static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1295{
1296 GElf_Ehdr *ehdr = &to->ehdr;
1297 GElf_Ehdr *kehdr = &from->ehdr;
1298
1299 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1300 ehdr->e_type = kehdr->e_type;
1301 ehdr->e_machine = kehdr->e_machine;
1302 ehdr->e_version = kehdr->e_version;
1303 ehdr->e_entry = 0;
1304 ehdr->e_shoff = 0;
1305 ehdr->e_flags = kehdr->e_flags;
1306 ehdr->e_phnum = count;
1307 ehdr->e_shentsize = 0;
1308 ehdr->e_shnum = 0;
1309 ehdr->e_shstrndx = 0;
1310
1311 if (from->elfclass == ELFCLASS32) {
1312 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1313 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1314 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1315 } else {
1316 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1317 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1318 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1319 }
1320
1321 if (!gelf_update_ehdr(to->elf, ehdr))
1322 return -1;
1323
1324 if (!gelf_newphdr(to->elf, count))
1325 return -1;
1326
1327 return 0;
1328}
1329
1330static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1331 u64 addr, u64 len)
1332{
1333 GElf_Phdr gphdr;
1334 GElf_Phdr *phdr;
1335
1336 phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
1337 if (!phdr)
1338 return -1;
1339
1340 phdr->p_type = PT_LOAD;
1341 phdr->p_flags = PF_R | PF_W | PF_X;
1342 phdr->p_offset = offset;
1343 phdr->p_vaddr = addr;
1344 phdr->p_paddr = 0;
1345 phdr->p_filesz = len;
1346 phdr->p_memsz = len;
1347 phdr->p_align = page_size;
1348
1349 if (!gelf_update_phdr(kcore->elf, idx, phdr))
1350 return -1;
1351
1352 return 0;
1353}
1354
1355static off_t kcore__write(struct kcore *kcore)
1356{
1357 return elf_update(kcore->elf, ELF_C_WRITE);
1358}
1359
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001360struct phdr_data {
1361 off_t offset;
1362 u64 addr;
1363 u64 len;
1364};
1365
1366struct kcore_copy_info {
1367 u64 stext;
1368 u64 etext;
1369 u64 first_symbol;
1370 u64 last_symbol;
1371 u64 first_module;
1372 u64 last_module_symbol;
1373 struct phdr_data kernel_map;
1374 struct phdr_data modules_map;
1375};
1376
1377static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1378 u64 start)
1379{
1380 struct kcore_copy_info *kci = arg;
1381
1382 if (!symbol_type__is_a(type, MAP__FUNCTION))
1383 return 0;
1384
1385 if (strchr(name, '[')) {
1386 if (start > kci->last_module_symbol)
1387 kci->last_module_symbol = start;
1388 return 0;
1389 }
1390
1391 if (!kci->first_symbol || start < kci->first_symbol)
1392 kci->first_symbol = start;
1393
1394 if (!kci->last_symbol || start > kci->last_symbol)
1395 kci->last_symbol = start;
1396
1397 if (!strcmp(name, "_stext")) {
1398 kci->stext = start;
1399 return 0;
1400 }
1401
1402 if (!strcmp(name, "_etext")) {
1403 kci->etext = start;
1404 return 0;
1405 }
1406
1407 return 0;
1408}
1409
1410static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1411 const char *dir)
1412{
1413 char kallsyms_filename[PATH_MAX];
1414
1415 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1416
1417 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1418 return -1;
1419
1420 if (kallsyms__parse(kallsyms_filename, kci,
1421 kcore_copy__process_kallsyms) < 0)
1422 return -1;
1423
1424 return 0;
1425}
1426
1427static int kcore_copy__process_modules(void *arg,
1428 const char *name __maybe_unused,
1429 u64 start)
1430{
1431 struct kcore_copy_info *kci = arg;
1432
1433 if (!kci->first_module || start < kci->first_module)
1434 kci->first_module = start;
1435
1436 return 0;
1437}
1438
1439static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1440 const char *dir)
1441{
1442 char modules_filename[PATH_MAX];
1443
1444 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1445
1446 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1447 return -1;
1448
1449 if (modules__parse(modules_filename, kci,
1450 kcore_copy__process_modules) < 0)
1451 return -1;
1452
1453 return 0;
1454}
1455
1456static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1457 u64 s, u64 e)
1458{
1459 if (p->addr || s < start || s >= end)
1460 return;
1461
1462 p->addr = s;
1463 p->offset = (s - start) + pgoff;
1464 p->len = e < end ? e - s : end - s;
1465}
1466
1467static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1468{
1469 struct kcore_copy_info *kci = data;
1470 u64 end = start + len;
1471
1472 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1473 kci->etext);
1474
1475 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1476 kci->last_module_symbol);
1477
1478 return 0;
1479}
1480
1481static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1482{
1483 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1484 return -1;
1485
1486 return 0;
1487}
1488
1489static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1490 Elf *elf)
1491{
1492 if (kcore_copy__parse_kallsyms(kci, dir))
1493 return -1;
1494
1495 if (kcore_copy__parse_modules(kci, dir))
1496 return -1;
1497
1498 if (kci->stext)
1499 kci->stext = round_down(kci->stext, page_size);
1500 else
1501 kci->stext = round_down(kci->first_symbol, page_size);
1502
1503 if (kci->etext) {
1504 kci->etext = round_up(kci->etext, page_size);
1505 } else if (kci->last_symbol) {
1506 kci->etext = round_up(kci->last_symbol, page_size);
1507 kci->etext += page_size;
1508 }
1509
1510 kci->first_module = round_down(kci->first_module, page_size);
1511
1512 if (kci->last_module_symbol) {
1513 kci->last_module_symbol = round_up(kci->last_module_symbol,
1514 page_size);
1515 kci->last_module_symbol += page_size;
1516 }
1517
1518 if (!kci->stext || !kci->etext)
1519 return -1;
1520
1521 if (kci->first_module && !kci->last_module_symbol)
1522 return -1;
1523
1524 return kcore_copy__read_maps(kci, elf);
1525}
1526
1527static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1528 const char *name)
1529{
1530 char from_filename[PATH_MAX];
1531 char to_filename[PATH_MAX];
1532
1533 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1534 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1535
1536 return copyfile_mode(from_filename, to_filename, 0400);
1537}
1538
1539static int kcore_copy__unlink(const char *dir, const char *name)
1540{
1541 char filename[PATH_MAX];
1542
1543 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1544
1545 return unlink(filename);
1546}
1547
1548static int kcore_copy__compare_fds(int from, int to)
1549{
1550 char *buf_from;
1551 char *buf_to;
1552 ssize_t ret;
1553 size_t len;
1554 int err = -1;
1555
1556 buf_from = malloc(page_size);
1557 buf_to = malloc(page_size);
1558 if (!buf_from || !buf_to)
1559 goto out;
1560
1561 while (1) {
1562 /* Use read because mmap won't work on proc files */
1563 ret = read(from, buf_from, page_size);
1564 if (ret < 0)
1565 goto out;
1566
1567 if (!ret)
1568 break;
1569
1570 len = ret;
1571
1572 if (readn(to, buf_to, len) != (int)len)
1573 goto out;
1574
1575 if (memcmp(buf_from, buf_to, len))
1576 goto out;
1577 }
1578
1579 err = 0;
1580out:
1581 free(buf_to);
1582 free(buf_from);
1583 return err;
1584}
1585
1586static int kcore_copy__compare_files(const char *from_filename,
1587 const char *to_filename)
1588{
1589 int from, to, err = -1;
1590
1591 from = open(from_filename, O_RDONLY);
1592 if (from < 0)
1593 return -1;
1594
1595 to = open(to_filename, O_RDONLY);
1596 if (to < 0)
1597 goto out_close_from;
1598
1599 err = kcore_copy__compare_fds(from, to);
1600
1601 close(to);
1602out_close_from:
1603 close(from);
1604 return err;
1605}
1606
1607static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1608 const char *name)
1609{
1610 char from_filename[PATH_MAX];
1611 char to_filename[PATH_MAX];
1612
1613 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1614 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1615
1616 return kcore_copy__compare_files(from_filename, to_filename);
1617}
1618
1619/**
1620 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1621 * @from_dir: from directory
1622 * @to_dir: to directory
1623 *
1624 * This function copies kallsyms, modules and kcore files from one directory to
1625 * another. kallsyms and modules are copied entirely. Only code segments are
1626 * copied from kcore. It is assumed that two segments suffice: one for the
1627 * kernel proper and one for all the modules. The code segments are determined
1628 * from kallsyms and modules files. The kernel map starts at _stext or the
1629 * lowest function symbol, and ends at _etext or the highest function symbol.
1630 * The module map starts at the lowest module address and ends at the highest
1631 * module symbol. Start addresses are rounded down to the nearest page. End
1632 * addresses are rounded up to the nearest page. An extra page is added to the
1633 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1634 * symbol too. Because it contains only code sections, the resulting kcore is
1635 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1636 * is not the same for the kernel map and the modules map. That happens because
1637 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1638 * kallsyms and modules files are compared with their copies to check that
1639 * modules have not been loaded or unloaded while the copies were taking place.
1640 *
1641 * Return: %0 on success, %-1 on failure.
1642 */
1643int kcore_copy(const char *from_dir, const char *to_dir)
1644{
1645 struct kcore kcore;
1646 struct kcore extract;
1647 size_t count = 2;
1648 int idx = 0, err = -1;
1649 off_t offset = page_size, sz, modules_offset = 0;
1650 struct kcore_copy_info kci = { .stext = 0, };
1651 char kcore_filename[PATH_MAX];
1652 char extract_filename[PATH_MAX];
1653
1654 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1655 return -1;
1656
1657 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1658 goto out_unlink_kallsyms;
1659
1660 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1661 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1662
1663 if (kcore__open(&kcore, kcore_filename))
1664 goto out_unlink_modules;
1665
1666 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1667 goto out_kcore_close;
1668
1669 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1670 goto out_kcore_close;
1671
1672 if (!kci.modules_map.addr)
1673 count -= 1;
1674
1675 if (kcore__copy_hdr(&kcore, &extract, count))
1676 goto out_extract_close;
1677
1678 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1679 kci.kernel_map.len))
1680 goto out_extract_close;
1681
1682 if (kci.modules_map.addr) {
1683 modules_offset = offset + kci.kernel_map.len;
1684 if (kcore__add_phdr(&extract, idx, modules_offset,
1685 kci.modules_map.addr, kci.modules_map.len))
1686 goto out_extract_close;
1687 }
1688
1689 sz = kcore__write(&extract);
1690 if (sz < 0 || sz > offset)
1691 goto out_extract_close;
1692
1693 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1694 kci.kernel_map.len))
1695 goto out_extract_close;
1696
1697 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1698 extract.fd, modules_offset,
1699 kci.modules_map.len))
1700 goto out_extract_close;
1701
1702 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1703 goto out_extract_close;
1704
1705 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1706 goto out_extract_close;
1707
1708 err = 0;
1709
1710out_extract_close:
1711 kcore__close(&extract);
1712 if (err)
1713 unlink(extract_filename);
1714out_kcore_close:
1715 kcore__close(&kcore);
1716out_unlink_modules:
1717 if (err)
1718 kcore_copy__unlink(to_dir, "modules");
1719out_unlink_kallsyms:
1720 if (err)
1721 kcore_copy__unlink(to_dir, "kallsyms");
1722
1723 return err;
1724}
1725
Adrian Hunterafba19d2013-10-09 15:01:12 +03001726int kcore_extract__create(struct kcore_extract *kce)
1727{
1728 struct kcore kcore;
1729 struct kcore extract;
1730 size_t count = 1;
1731 int idx = 0, err = -1;
1732 off_t offset = page_size, sz;
1733
1734 if (kcore__open(&kcore, kce->kcore_filename))
1735 return -1;
1736
1737 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1738 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1739 goto out_kcore_close;
1740
1741 if (kcore__copy_hdr(&kcore, &extract, count))
1742 goto out_extract_close;
1743
1744 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1745 goto out_extract_close;
1746
1747 sz = kcore__write(&extract);
1748 if (sz < 0 || sz > offset)
1749 goto out_extract_close;
1750
1751 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1752 goto out_extract_close;
1753
1754 err = 0;
1755
1756out_extract_close:
1757 kcore__close(&extract);
1758 if (err)
1759 unlink(kce->extract_filename);
1760out_kcore_close:
1761 kcore__close(&kcore);
1762
1763 return err;
1764}
1765
1766void kcore_extract__delete(struct kcore_extract *kce)
1767{
1768 unlink(kce->extract_filename);
1769}
1770
Namhyung Kime5a18452012-08-06 13:41:20 +09001771void symbol__elf_init(void)
1772{
1773 elf_version(EV_CURRENT);
1774}