blob: 62742e46c01036278e424fe477f9fab58df7835f [file] [log] [blame]
Namhyung Kime5a18452012-08-06 13:41:20 +09001#include <fcntl.h>
2#include <stdio.h>
3#include <errno.h>
4#include <string.h>
5#include <unistd.h>
6#include <inttypes.h>
7
8#include "symbol.h"
Waiman Long8fa7d872014-09-29 16:07:28 -04009#include "machine.h"
Vladimir Nikulichev922d0e42014-04-17 08:27:01 -070010#include "vdso.h"
Arnaldo Carvalho de Meloc506c962013-12-11 09:15:00 -030011#include <symbol/kallsyms.h>
Namhyung Kime5a18452012-08-06 13:41:20 +090012#include "debug.h"
13
David Aherne370a3d2015-02-18 19:33:37 -050014#ifndef EM_AARCH64
15#define EM_AARCH64 183 /* ARM 64 bit */
16#endif
17
18
Arnaldo Carvalho de Meloaaba4e12014-11-24 17:10:52 -030019#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
20extern char *cplus_demangle(const char *, int);
21
22static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
23{
24 return cplus_demangle(c, i);
25}
26#else
27#ifdef NO_DEMANGLE
28static inline char *bfd_demangle(void __maybe_unused *v,
29 const char __maybe_unused *c,
30 int __maybe_unused i)
31{
32 return NULL;
33}
34#else
35#define PACKAGE 'perf'
36#include <bfd.h>
37#endif
38#endif
39
Ingo Molnar89fe8082013-09-30 12:07:11 +020040#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
Adrian Huntere955d5c2013-09-13 16:49:30 +030041static int elf_getphdrnum(Elf *elf, size_t *dst)
42{
43 GElf_Ehdr gehdr;
44 GElf_Ehdr *ehdr;
45
46 ehdr = gelf_getehdr(elf, &gehdr);
47 if (!ehdr)
48 return -1;
49
50 *dst = ehdr->e_phnum;
51
52 return 0;
53}
54#endif
55
Namhyung Kime5a18452012-08-06 13:41:20 +090056#ifndef NT_GNU_BUILD_ID
57#define NT_GNU_BUILD_ID 3
58#endif
59
60/**
61 * elf_symtab__for_each_symbol - iterate thru all the symbols
62 *
63 * @syms: struct elf_symtab instance to iterate
64 * @idx: uint32_t idx
65 * @sym: GElf_Sym iterator
66 */
67#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
68 for (idx = 0, gelf_getsym(syms, idx, &sym);\
69 idx < nr_syms; \
70 idx++, gelf_getsym(syms, idx, &sym))
71
72static inline uint8_t elf_sym__type(const GElf_Sym *sym)
73{
74 return GELF_ST_TYPE(sym->st_info);
75}
76
Vinson Lee4e310502015-02-09 16:29:37 -080077#ifndef STT_GNU_IFUNC
78#define STT_GNU_IFUNC 10
79#endif
80
Namhyung Kime5a18452012-08-06 13:41:20 +090081static inline int elf_sym__is_function(const GElf_Sym *sym)
82{
Adrian Huntera2f3b6b2014-07-14 13:02:33 +030083 return (elf_sym__type(sym) == STT_FUNC ||
84 elf_sym__type(sym) == STT_GNU_IFUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +090085 sym->st_name != 0 &&
86 sym->st_shndx != SHN_UNDEF;
87}
88
89static inline bool elf_sym__is_object(const GElf_Sym *sym)
90{
91 return elf_sym__type(sym) == STT_OBJECT &&
92 sym->st_name != 0 &&
93 sym->st_shndx != SHN_UNDEF;
94}
95
96static inline int elf_sym__is_label(const GElf_Sym *sym)
97{
98 return elf_sym__type(sym) == STT_NOTYPE &&
99 sym->st_name != 0 &&
100 sym->st_shndx != SHN_UNDEF &&
101 sym->st_shndx != SHN_ABS;
102}
103
104static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
105{
106 switch (type) {
107 case MAP__FUNCTION:
108 return elf_sym__is_function(sym);
109 case MAP__VARIABLE:
110 return elf_sym__is_object(sym);
111 default:
112 return false;
113 }
114}
115
116static inline const char *elf_sym__name(const GElf_Sym *sym,
117 const Elf_Data *symstrs)
118{
119 return symstrs->d_buf + sym->st_name;
120}
121
122static inline const char *elf_sec__name(const GElf_Shdr *shdr,
123 const Elf_Data *secstrs)
124{
125 return secstrs->d_buf + shdr->sh_name;
126}
127
128static inline int elf_sec__is_text(const GElf_Shdr *shdr,
129 const Elf_Data *secstrs)
130{
131 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
132}
133
134static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
135 const Elf_Data *secstrs)
136{
137 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
138}
139
140static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
141 enum map_type type)
142{
143 switch (type) {
144 case MAP__FUNCTION:
145 return elf_sec__is_text(shdr, secstrs);
146 case MAP__VARIABLE:
147 return elf_sec__is_data(shdr, secstrs);
148 default:
149 return false;
150 }
151}
152
153static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
154{
155 Elf_Scn *sec = NULL;
156 GElf_Shdr shdr;
157 size_t cnt = 1;
158
159 while ((sec = elf_nextscn(elf, sec)) != NULL) {
160 gelf_getshdr(sec, &shdr);
161
162 if ((addr >= shdr.sh_addr) &&
163 (addr < (shdr.sh_addr + shdr.sh_size)))
164 return cnt;
165
166 ++cnt;
167 }
168
169 return -1;
170}
171
Masami Hiramatsu99ca4232014-01-16 09:39:49 +0000172Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
173 GElf_Shdr *shp, const char *name, size_t *idx)
Namhyung Kime5a18452012-08-06 13:41:20 +0900174{
175 Elf_Scn *sec = NULL;
176 size_t cnt = 1;
177
Cody P Schafer49274652012-08-10 15:22:55 -0700178 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
179 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
180 return NULL;
181
Namhyung Kime5a18452012-08-06 13:41:20 +0900182 while ((sec = elf_nextscn(elf, sec)) != NULL) {
183 char *str;
184
185 gelf_getshdr(sec, shp);
186 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
Jiri Olsa155b3a12014-03-02 14:32:07 +0100187 if (str && !strcmp(name, str)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900188 if (idx)
189 *idx = cnt;
Jiri Olsa155b3a12014-03-02 14:32:07 +0100190 return sec;
Namhyung Kime5a18452012-08-06 13:41:20 +0900191 }
192 ++cnt;
193 }
194
Jiri Olsa155b3a12014-03-02 14:32:07 +0100195 return NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900196}
197
198#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
199 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
200 idx < nr_entries; \
201 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
202
203#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
204 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
205 idx < nr_entries; \
206 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
207
208/*
209 * We need to check if we have a .dynsym, so that we can handle the
210 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
211 * .dynsym or .symtab).
212 * And always look at the original dso, not at debuginfo packages, that
213 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
214 */
Cody P Schafera44f6052012-08-10 15:22:59 -0700215int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
Namhyung Kime5a18452012-08-06 13:41:20 +0900216 symbol_filter_t filter)
217{
218 uint32_t nr_rel_entries, idx;
219 GElf_Sym sym;
220 u64 plt_offset;
221 GElf_Shdr shdr_plt;
222 struct symbol *f;
223 GElf_Shdr shdr_rel_plt, shdr_dynsym;
224 Elf_Data *reldata, *syms, *symstrs;
225 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
226 size_t dynsym_idx;
227 GElf_Ehdr ehdr;
228 char sympltname[1024];
229 Elf *elf;
Cody P Schafera44f6052012-08-10 15:22:59 -0700230 int nr = 0, symidx, err = 0;
Namhyung Kime5a18452012-08-06 13:41:20 +0900231
David Ahernf47b58b2012-08-19 09:47:14 -0600232 if (!ss->dynsym)
233 return 0;
234
Cody P Schafera44f6052012-08-10 15:22:59 -0700235 elf = ss->elf;
236 ehdr = ss->ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900237
Cody P Schafera44f6052012-08-10 15:22:59 -0700238 scn_dynsym = ss->dynsym;
239 shdr_dynsym = ss->dynshdr;
240 dynsym_idx = ss->dynsym_idx;
Namhyung Kime5a18452012-08-06 13:41:20 +0900241
Namhyung Kime5a18452012-08-06 13:41:20 +0900242 if (scn_dynsym == NULL)
243 goto out_elf_end;
244
245 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
246 ".rela.plt", NULL);
247 if (scn_plt_rel == NULL) {
248 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
249 ".rel.plt", NULL);
250 if (scn_plt_rel == NULL)
251 goto out_elf_end;
252 }
253
254 err = -1;
255
256 if (shdr_rel_plt.sh_link != dynsym_idx)
257 goto out_elf_end;
258
259 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
260 goto out_elf_end;
261
262 /*
263 * Fetch the relocation section to find the idxes to the GOT
264 * and the symbols in the .dynsym they refer to.
265 */
266 reldata = elf_getdata(scn_plt_rel, NULL);
267 if (reldata == NULL)
268 goto out_elf_end;
269
270 syms = elf_getdata(scn_dynsym, NULL);
271 if (syms == NULL)
272 goto out_elf_end;
273
274 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
275 if (scn_symstrs == NULL)
276 goto out_elf_end;
277
278 symstrs = elf_getdata(scn_symstrs, NULL);
279 if (symstrs == NULL)
280 goto out_elf_end;
281
Cody P Schafer52f9ddb2012-08-10 15:22:51 -0700282 if (symstrs->d_size == 0)
283 goto out_elf_end;
284
Namhyung Kime5a18452012-08-06 13:41:20 +0900285 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
286 plt_offset = shdr_plt.sh_offset;
287
288 if (shdr_rel_plt.sh_type == SHT_RELA) {
289 GElf_Rela pos_mem, *pos;
290
291 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
292 nr_rel_entries) {
293 symidx = GELF_R_SYM(pos->r_info);
294 plt_offset += shdr_plt.sh_entsize;
295 gelf_getsym(syms, symidx, &sym);
296 snprintf(sympltname, sizeof(sympltname),
297 "%s@plt", elf_sym__name(&sym, symstrs));
298
299 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
300 STB_GLOBAL, sympltname);
301 if (!f)
302 goto out_elf_end;
303
304 if (filter && filter(map, f))
305 symbol__delete(f);
306 else {
307 symbols__insert(&dso->symbols[map->type], f);
308 ++nr;
309 }
310 }
311 } else if (shdr_rel_plt.sh_type == SHT_REL) {
312 GElf_Rel pos_mem, *pos;
313 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
314 nr_rel_entries) {
315 symidx = GELF_R_SYM(pos->r_info);
316 plt_offset += shdr_plt.sh_entsize;
317 gelf_getsym(syms, symidx, &sym);
318 snprintf(sympltname, sizeof(sympltname),
319 "%s@plt", elf_sym__name(&sym, symstrs));
320
321 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
322 STB_GLOBAL, sympltname);
323 if (!f)
324 goto out_elf_end;
325
326 if (filter && filter(map, f))
327 symbol__delete(f);
328 else {
329 symbols__insert(&dso->symbols[map->type], f);
330 ++nr;
331 }
332 }
333 }
334
335 err = 0;
336out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900337 if (err == 0)
338 return nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900339 pr_debug("%s: problems reading %s PLT info.\n",
340 __func__, dso->long_name);
341 return 0;
342}
343
344/*
345 * Align offset to 4 bytes as needed for note name and descriptor data.
346 */
347#define NOTE_ALIGN(n) (((n) + 3) & -4U)
348
349static int elf_read_build_id(Elf *elf, void *bf, size_t size)
350{
351 int err = -1;
352 GElf_Ehdr ehdr;
353 GElf_Shdr shdr;
354 Elf_Data *data;
355 Elf_Scn *sec;
356 Elf_Kind ek;
357 void *ptr;
358
359 if (size < BUILD_ID_SIZE)
360 goto out;
361
362 ek = elf_kind(elf);
363 if (ek != ELF_K_ELF)
364 goto out;
365
366 if (gelf_getehdr(elf, &ehdr) == NULL) {
367 pr_err("%s: cannot get elf header.\n", __func__);
368 goto out;
369 }
370
371 /*
372 * Check following sections for notes:
373 * '.note.gnu.build-id'
374 * '.notes'
375 * '.note' (VDSO specific)
376 */
377 do {
378 sec = elf_section_by_name(elf, &ehdr, &shdr,
379 ".note.gnu.build-id", NULL);
380 if (sec)
381 break;
382
383 sec = elf_section_by_name(elf, &ehdr, &shdr,
384 ".notes", NULL);
385 if (sec)
386 break;
387
388 sec = elf_section_by_name(elf, &ehdr, &shdr,
389 ".note", NULL);
390 if (sec)
391 break;
392
393 return err;
394
395 } while (0);
396
397 data = elf_getdata(sec, NULL);
398 if (data == NULL)
399 goto out;
400
401 ptr = data->d_buf;
402 while (ptr < (data->d_buf + data->d_size)) {
403 GElf_Nhdr *nhdr = ptr;
404 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
405 descsz = NOTE_ALIGN(nhdr->n_descsz);
406 const char *name;
407
408 ptr += sizeof(*nhdr);
409 name = ptr;
410 ptr += namesz;
411 if (nhdr->n_type == NT_GNU_BUILD_ID &&
412 nhdr->n_namesz == sizeof("GNU")) {
413 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
414 size_t sz = min(size, descsz);
415 memcpy(bf, ptr, sz);
416 memset(bf + sz, 0, size - sz);
417 err = descsz;
418 break;
419 }
420 }
421 ptr += descsz;
422 }
423
424out:
425 return err;
426}
427
428int filename__read_build_id(const char *filename, void *bf, size_t size)
429{
430 int fd, err = -1;
431 Elf *elf;
432
433 if (size < BUILD_ID_SIZE)
434 goto out;
435
436 fd = open(filename, O_RDONLY);
437 if (fd < 0)
438 goto out;
439
440 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
441 if (elf == NULL) {
442 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
443 goto out_close;
444 }
445
446 err = elf_read_build_id(elf, bf, size);
447
448 elf_end(elf);
449out_close:
450 close(fd);
451out:
452 return err;
453}
454
455int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
456{
457 int fd, err = -1;
458
459 if (size < BUILD_ID_SIZE)
460 goto out;
461
462 fd = open(filename, O_RDONLY);
463 if (fd < 0)
464 goto out;
465
466 while (1) {
467 char bf[BUFSIZ];
468 GElf_Nhdr nhdr;
469 size_t namesz, descsz;
470
471 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
472 break;
473
474 namesz = NOTE_ALIGN(nhdr.n_namesz);
475 descsz = NOTE_ALIGN(nhdr.n_descsz);
476 if (nhdr.n_type == NT_GNU_BUILD_ID &&
477 nhdr.n_namesz == sizeof("GNU")) {
478 if (read(fd, bf, namesz) != (ssize_t)namesz)
479 break;
480 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
481 size_t sz = min(descsz, size);
482 if (read(fd, build_id, sz) == (ssize_t)sz) {
483 memset(build_id + sz, 0, size - sz);
484 err = 0;
485 break;
486 }
487 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
488 break;
489 } else {
490 int n = namesz + descsz;
491 if (read(fd, bf, n) != n)
492 break;
493 }
494 }
495 close(fd);
496out:
497 return err;
498}
499
500int filename__read_debuglink(const char *filename, char *debuglink,
501 size_t size)
502{
503 int fd, err = -1;
504 Elf *elf;
505 GElf_Ehdr ehdr;
506 GElf_Shdr shdr;
507 Elf_Data *data;
508 Elf_Scn *sec;
509 Elf_Kind ek;
510
511 fd = open(filename, O_RDONLY);
512 if (fd < 0)
513 goto out;
514
515 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
516 if (elf == NULL) {
517 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
518 goto out_close;
519 }
520
521 ek = elf_kind(elf);
522 if (ek != ELF_K_ELF)
Chenggang Qin784f3392013-10-11 08:27:57 +0800523 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900524
525 if (gelf_getehdr(elf, &ehdr) == NULL) {
526 pr_err("%s: cannot get elf header.\n", __func__);
Chenggang Qin784f3392013-10-11 08:27:57 +0800527 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900528 }
529
530 sec = elf_section_by_name(elf, &ehdr, &shdr,
531 ".gnu_debuglink", NULL);
532 if (sec == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800533 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900534
535 data = elf_getdata(sec, NULL);
536 if (data == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800537 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900538
539 /* the start of this section is a zero-terminated string */
540 strncpy(debuglink, data->d_buf, size);
541
Stephane Eranian0d3dc5e2014-02-20 10:32:55 +0900542 err = 0;
543
Chenggang Qin784f3392013-10-11 08:27:57 +0800544out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900545 elf_end(elf);
Namhyung Kime5a18452012-08-06 13:41:20 +0900546out_close:
547 close(fd);
548out:
549 return err;
550}
551
552static int dso__swap_init(struct dso *dso, unsigned char eidata)
553{
554 static unsigned int const endian = 1;
555
556 dso->needs_swap = DSO_SWAP__NO;
557
558 switch (eidata) {
559 case ELFDATA2LSB:
560 /* We are big endian, DSO is little endian. */
561 if (*(unsigned char const *)&endian != 1)
562 dso->needs_swap = DSO_SWAP__YES;
563 break;
564
565 case ELFDATA2MSB:
566 /* We are little endian, DSO is big endian. */
567 if (*(unsigned char const *)&endian != 0)
568 dso->needs_swap = DSO_SWAP__YES;
569 break;
570
571 default:
572 pr_err("unrecognized DSO data encoding %d\n", eidata);
573 return -EINVAL;
574 }
575
576 return 0;
577}
578
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900579static int decompress_kmodule(struct dso *dso, const char *name,
580 enum dso_binary_type type)
581{
582 int fd;
583 const char *ext = strrchr(name, '.');
584 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
585
Namhyung Kim0b064f42015-01-29 17:06:42 +0900586 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
587 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
588 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900589 return -1;
590
Namhyung Kim0b064f42015-01-29 17:06:42 +0900591 if (!ext || !is_supported_compression(ext + 1)) {
592 ext = strrchr(dso->name, '.');
593 if (!ext || !is_supported_compression(ext + 1))
594 return -1;
595 }
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900596
597 fd = mkstemp(tmpbuf);
598 if (fd < 0)
599 return -1;
600
601 if (!decompress_to_file(ext + 1, name, fd)) {
602 close(fd);
603 fd = -1;
604 }
605
606 unlink(tmpbuf);
607
608 return fd;
609}
610
Cody P Schafer3aafe5a2012-08-10 15:23:02 -0700611bool symsrc__possibly_runtime(struct symsrc *ss)
612{
613 return ss->dynsym || ss->opdsec;
614}
615
Cody P Schaferd26cd122012-08-10 15:23:00 -0700616bool symsrc__has_symtab(struct symsrc *ss)
617{
618 return ss->symtab != NULL;
619}
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700620
621void symsrc__destroy(struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900622{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300623 zfree(&ss->name);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700624 elf_end(ss->elf);
625 close(ss->fd);
626}
627
628int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
629 enum dso_binary_type type)
630{
Namhyung Kime5a18452012-08-06 13:41:20 +0900631 int err = -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900632 GElf_Ehdr ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900633 Elf *elf;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700634 int fd;
635
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900636 if (dso__needs_decompress(dso))
637 fd = decompress_kmodule(dso, name, type);
638 else
639 fd = open(name, O_RDONLY);
640
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700641 if (fd < 0)
642 return -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900643
644 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
645 if (elf == NULL) {
646 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
647 goto out_close;
648 }
649
650 if (gelf_getehdr(elf, &ehdr) == NULL) {
651 pr_debug("%s: cannot get elf header.\n", __func__);
652 goto out_elf_end;
653 }
654
655 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
656 goto out_elf_end;
657
658 /* Always reject images with a mismatched build-id: */
659 if (dso->has_build_id) {
660 u8 build_id[BUILD_ID_SIZE];
661
662 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
663 goto out_elf_end;
664
665 if (!dso__build_id_equal(dso, build_id))
666 goto out_elf_end;
667 }
668
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300669 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
670
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700671 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
672 NULL);
673 if (ss->symshdr.sh_type != SHT_SYMTAB)
674 ss->symtab = NULL;
675
676 ss->dynsym_idx = 0;
677 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
678 &ss->dynsym_idx);
679 if (ss->dynshdr.sh_type != SHT_DYNSYM)
680 ss->dynsym = NULL;
681
682 ss->opdidx = 0;
683 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
684 &ss->opdidx);
685 if (ss->opdshdr.sh_type != SHT_PROGBITS)
686 ss->opdsec = NULL;
687
688 if (dso->kernel == DSO_TYPE_USER) {
689 GElf_Shdr shdr;
690 ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300691 ehdr.e_type == ET_REL ||
Adrian Hunter51682dc2014-07-22 16:17:57 +0300692 dso__is_vdso(dso) ||
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700693 elf_section_by_name(elf, &ehdr, &shdr,
694 ".gnu.prelink_undo",
695 NULL) != NULL);
696 } else {
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300697 ss->adjust_symbols = ehdr.e_type == ET_EXEC ||
698 ehdr.e_type == ET_REL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700699 }
700
701 ss->name = strdup(name);
702 if (!ss->name)
703 goto out_elf_end;
704
705 ss->elf = elf;
706 ss->fd = fd;
707 ss->ehdr = ehdr;
708 ss->type = type;
709
710 return 0;
711
712out_elf_end:
713 elf_end(elf);
714out_close:
715 close(fd);
716 return err;
717}
718
Adrian Hunter39b12f782013-08-07 14:38:47 +0300719/**
720 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
721 * @kmap: kernel maps and relocation reference symbol
722 *
723 * This function returns %true if we are dealing with the kernel maps and the
724 * relocation reference symbol has not yet been found. Otherwise %false is
725 * returned.
726 */
727static bool ref_reloc_sym_not_found(struct kmap *kmap)
728{
729 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
730 !kmap->ref_reloc_sym->unrelocated_addr;
731}
732
733/**
734 * ref_reloc - kernel relocation offset.
735 * @kmap: kernel maps and relocation reference symbol
736 *
737 * This function returns the offset of kernel addresses as determined by using
738 * the relocation reference symbol i.e. if the kernel has not been relocated
739 * then the return value is zero.
740 */
741static u64 ref_reloc(struct kmap *kmap)
742{
743 if (kmap && kmap->ref_reloc_sym &&
744 kmap->ref_reloc_sym->unrelocated_addr)
745 return kmap->ref_reloc_sym->addr -
746 kmap->ref_reloc_sym->unrelocated_addr;
747 return 0;
748}
749
Avi Kivity763122a2014-09-13 07:15:05 +0300750static bool want_demangle(bool is_kernel_sym)
751{
752 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
753}
754
Cody P Schafer261360b2012-08-10 15:23:01 -0700755int dso__load_sym(struct dso *dso, struct map *map,
756 struct symsrc *syms_ss, struct symsrc *runtime_ss,
Cody P Schaferd26cd122012-08-10 15:23:00 -0700757 symbol_filter_t filter, int kmodule)
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700758{
759 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
760 struct map *curr_map = map;
761 struct dso *curr_dso = dso;
762 Elf_Data *symstrs, *secstrs;
763 uint32_t nr_syms;
764 int err = -1;
765 uint32_t idx;
766 GElf_Ehdr ehdr;
Cody P Schafer261360b2012-08-10 15:23:01 -0700767 GElf_Shdr shdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700768 Elf_Data *syms, *opddata = NULL;
769 GElf_Sym sym;
Cody P Schafer261360b2012-08-10 15:23:01 -0700770 Elf_Scn *sec, *sec_strndx;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700771 Elf *elf;
772 int nr = 0;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300773 bool remap_kernel = false, adjust_kernel_syms = false;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700774
Cody P Schafer261360b2012-08-10 15:23:01 -0700775 dso->symtab_type = syms_ss->type;
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300776 dso->is_64_bit = syms_ss->is_64_bit;
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300777 dso->rel = syms_ss->ehdr.e_type == ET_REL;
778
779 /*
780 * Modules may already have symbols from kallsyms, but those symbols
781 * have the wrong values for the dso maps, so remove them.
782 */
783 if (kmodule && syms_ss->symtab)
784 symbols__delete(&dso->symbols[map->type]);
Cody P Schafer005f9292012-08-10 15:22:58 -0700785
Cody P Schafer261360b2012-08-10 15:23:01 -0700786 if (!syms_ss->symtab) {
Anton Blanchardd0b0d042014-09-09 08:59:29 +1000787 /*
788 * If the vmlinux is stripped, fail so we will fall back
789 * to using kallsyms. The vmlinux runtime symbols aren't
790 * of much use.
791 */
792 if (dso->kernel)
793 goto out_elf_end;
794
Cody P Schafer261360b2012-08-10 15:23:01 -0700795 syms_ss->symtab = syms_ss->dynsym;
796 syms_ss->symshdr = syms_ss->dynshdr;
Cody P Schaferd26cd122012-08-10 15:23:00 -0700797 }
798
Cody P Schafer261360b2012-08-10 15:23:01 -0700799 elf = syms_ss->elf;
800 ehdr = syms_ss->ehdr;
801 sec = syms_ss->symtab;
802 shdr = syms_ss->symshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700803
Cody P Schafer261360b2012-08-10 15:23:01 -0700804 if (runtime_ss->opdsec)
805 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
Namhyung Kime5a18452012-08-06 13:41:20 +0900806
807 syms = elf_getdata(sec, NULL);
808 if (syms == NULL)
809 goto out_elf_end;
810
811 sec = elf_getscn(elf, shdr.sh_link);
812 if (sec == NULL)
813 goto out_elf_end;
814
815 symstrs = elf_getdata(sec, NULL);
816 if (symstrs == NULL)
817 goto out_elf_end;
818
Adrian Hunterf247fb82014-07-31 09:00:46 +0300819 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900820 if (sec_strndx == NULL)
821 goto out_elf_end;
822
823 secstrs = elf_getdata(sec_strndx, NULL);
824 if (secstrs == NULL)
825 goto out_elf_end;
826
827 nr_syms = shdr.sh_size / shdr.sh_entsize;
828
829 memset(&sym, 0, sizeof(sym));
Adrian Hunter39b12f782013-08-07 14:38:47 +0300830
831 /*
832 * The kernel relocation symbol is needed in advance in order to adjust
833 * kernel maps correctly.
834 */
835 if (ref_reloc_sym_not_found(kmap)) {
836 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
837 const char *elf_name = elf_sym__name(&sym, symstrs);
838
839 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
840 continue;
841 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
Adrian Hunter91767532014-01-29 16:14:36 +0200842 map->reloc = kmap->ref_reloc_sym->addr -
843 kmap->ref_reloc_sym->unrelocated_addr;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300844 break;
845 }
846 }
847
848 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
849 /*
850 * Initial kernel and module mappings do not map to the dso. For
851 * function mappings, flag the fixups.
852 */
853 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
854 remap_kernel = true;
855 adjust_kernel_syms = dso->adjust_symbols;
856 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900857 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
858 struct symbol *f;
859 const char *elf_name = elf_sym__name(&sym, symstrs);
860 char *demangled = NULL;
861 int is_label = elf_sym__is_label(&sym);
862 const char *section_name;
Cody P Schafer261360b2012-08-10 15:23:01 -0700863 bool used_opd = false;
Namhyung Kime5a18452012-08-06 13:41:20 +0900864
Namhyung Kime5a18452012-08-06 13:41:20 +0900865 if (!is_label && !elf_sym__is_a(&sym, map->type))
866 continue;
867
868 /* Reject ARM ELF "mapping symbols": these aren't unique and
869 * don't identify functions, so will confuse the profile
870 * output: */
Victor Kamensky4886f2c2015-01-26 22:34:01 -0800871 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
872 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
873 && (elf_name[2] == '\0' || elf_name[2] == '.'))
Namhyung Kime5a18452012-08-06 13:41:20 +0900874 continue;
875 }
876
Cody P Schafer261360b2012-08-10 15:23:01 -0700877 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
878 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900879 u64 *opd = opddata->d_buf + offset;
880 sym.st_value = DSO__SWAP(dso, u64, *opd);
Cody P Schafer261360b2012-08-10 15:23:01 -0700881 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
882 sym.st_value);
883 used_opd = true;
Namhyung Kime5a18452012-08-06 13:41:20 +0900884 }
Namhyung Kim3843b052012-11-21 13:49:44 +0100885 /*
886 * When loading symbols in a data mapping, ABS symbols (which
887 * has a value of SHN_ABS in its st_shndx) failed at
888 * elf_getscn(). And it marks the loading as a failure so
889 * already loaded symbols cannot be fixed up.
890 *
891 * I'm not sure what should be done. Just ignore them for now.
892 * - Namhyung Kim
893 */
894 if (sym.st_shndx == SHN_ABS)
895 continue;
Namhyung Kime5a18452012-08-06 13:41:20 +0900896
Cody P Schafer261360b2012-08-10 15:23:01 -0700897 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900898 if (!sec)
899 goto out_elf_end;
900
901 gelf_getshdr(sec, &shdr);
902
903 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
904 continue;
905
906 section_name = elf_sec__name(&shdr, secstrs);
907
908 /* On ARM, symbols for thumb functions have 1 added to
909 * the symbol address as a flag - remove it */
910 if ((ehdr.e_machine == EM_ARM) &&
911 (map->type == MAP__FUNCTION) &&
912 (sym.st_value & 1))
913 --sym.st_value;
914
Adrian Hunter39b12f782013-08-07 14:38:47 +0300915 if (dso->kernel || kmodule) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900916 char dso_name[PATH_MAX];
917
Adrian Hunter39b12f782013-08-07 14:38:47 +0300918 /* Adjust symbol to map to file offset */
919 if (adjust_kernel_syms)
920 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
921
Namhyung Kime5a18452012-08-06 13:41:20 +0900922 if (strcmp(section_name,
923 (curr_dso->short_name +
924 dso->short_name_len)) == 0)
925 goto new_symbol;
926
927 if (strcmp(section_name, ".text") == 0) {
Adrian Hunter39b12f782013-08-07 14:38:47 +0300928 /*
929 * The initial kernel mapping is based on
930 * kallsyms and identity maps. Overwrite it to
931 * map to the kernel dso.
932 */
933 if (remap_kernel && dso->kernel) {
934 remap_kernel = false;
935 map->start = shdr.sh_addr +
936 ref_reloc(kmap);
937 map->end = map->start + shdr.sh_size;
938 map->pgoff = shdr.sh_offset;
939 map->map_ip = map__map_ip;
940 map->unmap_ip = map__unmap_ip;
941 /* Ensure maps are correctly ordered */
942 map_groups__remove(kmap->kmaps, map);
943 map_groups__insert(kmap->kmaps, map);
944 }
945
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300946 /*
947 * The initial module mapping is based on
948 * /proc/modules mapped to offset zero.
949 * Overwrite it to map to the module dso.
950 */
951 if (remap_kernel && kmodule) {
952 remap_kernel = false;
953 map->pgoff = shdr.sh_offset;
954 }
955
Namhyung Kime5a18452012-08-06 13:41:20 +0900956 curr_map = map;
957 curr_dso = dso;
958 goto new_symbol;
959 }
960
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300961 if (!kmap)
962 goto new_symbol;
963
Namhyung Kime5a18452012-08-06 13:41:20 +0900964 snprintf(dso_name, sizeof(dso_name),
965 "%s%s", dso->short_name, section_name);
966
967 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
968 if (curr_map == NULL) {
969 u64 start = sym.st_value;
970
971 if (kmodule)
972 start += map->start + shdr.sh_offset;
973
974 curr_dso = dso__new(dso_name);
975 if (curr_dso == NULL)
976 goto out_elf_end;
977 curr_dso->kernel = dso->kernel;
978 curr_dso->long_name = dso->long_name;
979 curr_dso->long_name_len = dso->long_name_len;
980 curr_map = map__new2(start, curr_dso,
981 map->type);
982 if (curr_map == NULL) {
983 dso__delete(curr_dso);
984 goto out_elf_end;
985 }
Adrian Hunter39b12f782013-08-07 14:38:47 +0300986 if (adjust_kernel_syms) {
987 curr_map->start = shdr.sh_addr +
988 ref_reloc(kmap);
989 curr_map->end = curr_map->start +
990 shdr.sh_size;
991 curr_map->pgoff = shdr.sh_offset;
992 } else {
993 curr_map->map_ip = identity__map_ip;
994 curr_map->unmap_ip = identity__map_ip;
995 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900996 curr_dso->symtab_type = dso->symtab_type;
997 map_groups__insert(kmap->kmaps, curr_map);
Waiman Long8fa7d872014-09-29 16:07:28 -0400998 /*
999 * The new DSO should go to the kernel DSOS
1000 */
1001 dsos__add(&map->groups->machine->kernel_dsos,
1002 curr_dso);
Namhyung Kime5a18452012-08-06 13:41:20 +09001003 dso__set_loaded(curr_dso, map->type);
1004 } else
1005 curr_dso = curr_map->dso;
1006
1007 goto new_symbol;
1008 }
1009
Cody P Schafer261360b2012-08-10 15:23:01 -07001010 if ((used_opd && runtime_ss->adjust_symbols)
1011 || (!used_opd && syms_ss->adjust_symbols)) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001012 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1013 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1014 (u64)sym.st_value, (u64)shdr.sh_addr,
1015 (u64)shdr.sh_offset);
1016 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1017 }
Avi Kivity950b8352014-01-22 21:58:46 +02001018new_symbol:
Namhyung Kime5a18452012-08-06 13:41:20 +09001019 /*
1020 * We need to figure out if the object was created from C++ sources
1021 * DWARF DW_compile_unit has this, but we don't always have access
1022 * to it...
1023 */
Avi Kivity763122a2014-09-13 07:15:05 +03001024 if (want_demangle(dso->kernel || kmodule)) {
Namhyung Kime71e7942014-07-31 14:47:42 +09001025 int demangle_flags = DMGL_NO_OPTS;
1026 if (verbose)
1027 demangle_flags = DMGL_PARAMS | DMGL_ANSI;
1028
1029 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
Namhyung Kim328ccda2013-03-25 18:18:18 +09001030 if (demangled != NULL)
1031 elf_name = demangled;
1032 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001033 f = symbol__new(sym.st_value, sym.st_size,
1034 GELF_ST_BIND(sym.st_info), elf_name);
1035 free(demangled);
1036 if (!f)
1037 goto out_elf_end;
1038
1039 if (filter && filter(curr_map, f))
1040 symbol__delete(f);
1041 else {
1042 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1043 nr++;
1044 }
1045 }
1046
1047 /*
1048 * For misannotated, zeroed, ASM function sizes.
1049 */
1050 if (nr > 0) {
Namhyung Kim680d9262015-03-06 16:31:27 +09001051 if (!symbol_conf.allow_aliases)
1052 symbols__fixup_duplicate(&dso->symbols[map->type]);
Namhyung Kime5a18452012-08-06 13:41:20 +09001053 symbols__fixup_end(&dso->symbols[map->type]);
1054 if (kmap) {
1055 /*
1056 * We need to fixup this here too because we create new
1057 * maps here, for things like vsyscall sections.
1058 */
1059 __map_groups__fixup_end(kmap->kmaps, map->type);
1060 }
1061 }
1062 err = nr;
1063out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +09001064 return err;
1065}
1066
Adrian Hunter8e0cf962013-08-07 14:38:51 +03001067static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1068{
1069 GElf_Phdr phdr;
1070 size_t i, phdrnum;
1071 int err;
1072 u64 sz;
1073
1074 if (elf_getphdrnum(elf, &phdrnum))
1075 return -1;
1076
1077 for (i = 0; i < phdrnum; i++) {
1078 if (gelf_getphdr(elf, i, &phdr) == NULL)
1079 return -1;
1080 if (phdr.p_type != PT_LOAD)
1081 continue;
1082 if (exe) {
1083 if (!(phdr.p_flags & PF_X))
1084 continue;
1085 } else {
1086 if (!(phdr.p_flags & PF_R))
1087 continue;
1088 }
1089 sz = min(phdr.p_memsz, phdr.p_filesz);
1090 if (!sz)
1091 continue;
1092 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1093 if (err)
1094 return err;
1095 }
1096 return 0;
1097}
1098
1099int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1100 bool *is_64_bit)
1101{
1102 int err;
1103 Elf *elf;
1104
1105 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1106 if (elf == NULL)
1107 return -1;
1108
1109 if (is_64_bit)
1110 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1111
1112 err = elf_read_maps(elf, exe, mapfn, data);
1113
1114 elf_end(elf);
1115 return err;
1116}
1117
Adrian Hunter2b5b8bb2014-07-22 16:17:59 +03001118enum dso_type dso__type_fd(int fd)
1119{
1120 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1121 GElf_Ehdr ehdr;
1122 Elf_Kind ek;
1123 Elf *elf;
1124
1125 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1126 if (elf == NULL)
1127 goto out;
1128
1129 ek = elf_kind(elf);
1130 if (ek != ELF_K_ELF)
1131 goto out_end;
1132
1133 if (gelf_getclass(elf) == ELFCLASS64) {
1134 dso_type = DSO__TYPE_64BIT;
1135 goto out_end;
1136 }
1137
1138 if (gelf_getehdr(elf, &ehdr) == NULL)
1139 goto out_end;
1140
1141 if (ehdr.e_machine == EM_X86_64)
1142 dso_type = DSO__TYPE_X32BIT;
1143 else
1144 dso_type = DSO__TYPE_32BIT;
1145out_end:
1146 elf_end(elf);
1147out:
1148 return dso_type;
1149}
1150
Adrian Hunterafba19d2013-10-09 15:01:12 +03001151static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1152{
1153 ssize_t r;
1154 size_t n;
1155 int err = -1;
1156 char *buf = malloc(page_size);
1157
1158 if (buf == NULL)
1159 return -1;
1160
1161 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1162 goto out;
1163
1164 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1165 goto out;
1166
1167 while (len) {
1168 n = page_size;
1169 if (len < n)
1170 n = len;
1171 /* Use read because mmap won't work on proc files */
1172 r = read(from, buf, n);
1173 if (r < 0)
1174 goto out;
1175 if (!r)
1176 break;
1177 n = r;
1178 r = write(to, buf, n);
1179 if (r < 0)
1180 goto out;
1181 if ((size_t)r != n)
1182 goto out;
1183 len -= n;
1184 }
1185
1186 err = 0;
1187out:
1188 free(buf);
1189 return err;
1190}
1191
1192struct kcore {
1193 int fd;
1194 int elfclass;
1195 Elf *elf;
1196 GElf_Ehdr ehdr;
1197};
1198
1199static int kcore__open(struct kcore *kcore, const char *filename)
1200{
1201 GElf_Ehdr *ehdr;
1202
1203 kcore->fd = open(filename, O_RDONLY);
1204 if (kcore->fd == -1)
1205 return -1;
1206
1207 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1208 if (!kcore->elf)
1209 goto out_close;
1210
1211 kcore->elfclass = gelf_getclass(kcore->elf);
1212 if (kcore->elfclass == ELFCLASSNONE)
1213 goto out_end;
1214
1215 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1216 if (!ehdr)
1217 goto out_end;
1218
1219 return 0;
1220
1221out_end:
1222 elf_end(kcore->elf);
1223out_close:
1224 close(kcore->fd);
1225 return -1;
1226}
1227
1228static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1229 bool temp)
1230{
1231 GElf_Ehdr *ehdr;
1232
1233 kcore->elfclass = elfclass;
1234
1235 if (temp)
1236 kcore->fd = mkstemp(filename);
1237 else
1238 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1239 if (kcore->fd == -1)
1240 return -1;
1241
1242 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1243 if (!kcore->elf)
1244 goto out_close;
1245
1246 if (!gelf_newehdr(kcore->elf, elfclass))
1247 goto out_end;
1248
1249 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1250 if (!ehdr)
1251 goto out_end;
1252
1253 return 0;
1254
1255out_end:
1256 elf_end(kcore->elf);
1257out_close:
1258 close(kcore->fd);
1259 unlink(filename);
1260 return -1;
1261}
1262
1263static void kcore__close(struct kcore *kcore)
1264{
1265 elf_end(kcore->elf);
1266 close(kcore->fd);
1267}
1268
1269static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1270{
1271 GElf_Ehdr *ehdr = &to->ehdr;
1272 GElf_Ehdr *kehdr = &from->ehdr;
1273
1274 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1275 ehdr->e_type = kehdr->e_type;
1276 ehdr->e_machine = kehdr->e_machine;
1277 ehdr->e_version = kehdr->e_version;
1278 ehdr->e_entry = 0;
1279 ehdr->e_shoff = 0;
1280 ehdr->e_flags = kehdr->e_flags;
1281 ehdr->e_phnum = count;
1282 ehdr->e_shentsize = 0;
1283 ehdr->e_shnum = 0;
1284 ehdr->e_shstrndx = 0;
1285
1286 if (from->elfclass == ELFCLASS32) {
1287 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1288 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1289 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1290 } else {
1291 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1292 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1293 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1294 }
1295
1296 if (!gelf_update_ehdr(to->elf, ehdr))
1297 return -1;
1298
1299 if (!gelf_newphdr(to->elf, count))
1300 return -1;
1301
1302 return 0;
1303}
1304
1305static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1306 u64 addr, u64 len)
1307{
1308 GElf_Phdr gphdr;
1309 GElf_Phdr *phdr;
1310
1311 phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
1312 if (!phdr)
1313 return -1;
1314
1315 phdr->p_type = PT_LOAD;
1316 phdr->p_flags = PF_R | PF_W | PF_X;
1317 phdr->p_offset = offset;
1318 phdr->p_vaddr = addr;
1319 phdr->p_paddr = 0;
1320 phdr->p_filesz = len;
1321 phdr->p_memsz = len;
1322 phdr->p_align = page_size;
1323
1324 if (!gelf_update_phdr(kcore->elf, idx, phdr))
1325 return -1;
1326
1327 return 0;
1328}
1329
1330static off_t kcore__write(struct kcore *kcore)
1331{
1332 return elf_update(kcore->elf, ELF_C_WRITE);
1333}
1334
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001335struct phdr_data {
1336 off_t offset;
1337 u64 addr;
1338 u64 len;
1339};
1340
1341struct kcore_copy_info {
1342 u64 stext;
1343 u64 etext;
1344 u64 first_symbol;
1345 u64 last_symbol;
1346 u64 first_module;
1347 u64 last_module_symbol;
1348 struct phdr_data kernel_map;
1349 struct phdr_data modules_map;
1350};
1351
1352static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1353 u64 start)
1354{
1355 struct kcore_copy_info *kci = arg;
1356
1357 if (!symbol_type__is_a(type, MAP__FUNCTION))
1358 return 0;
1359
1360 if (strchr(name, '[')) {
1361 if (start > kci->last_module_symbol)
1362 kci->last_module_symbol = start;
1363 return 0;
1364 }
1365
1366 if (!kci->first_symbol || start < kci->first_symbol)
1367 kci->first_symbol = start;
1368
1369 if (!kci->last_symbol || start > kci->last_symbol)
1370 kci->last_symbol = start;
1371
1372 if (!strcmp(name, "_stext")) {
1373 kci->stext = start;
1374 return 0;
1375 }
1376
1377 if (!strcmp(name, "_etext")) {
1378 kci->etext = start;
1379 return 0;
1380 }
1381
1382 return 0;
1383}
1384
1385static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1386 const char *dir)
1387{
1388 char kallsyms_filename[PATH_MAX];
1389
1390 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1391
1392 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1393 return -1;
1394
1395 if (kallsyms__parse(kallsyms_filename, kci,
1396 kcore_copy__process_kallsyms) < 0)
1397 return -1;
1398
1399 return 0;
1400}
1401
1402static int kcore_copy__process_modules(void *arg,
1403 const char *name __maybe_unused,
1404 u64 start)
1405{
1406 struct kcore_copy_info *kci = arg;
1407
1408 if (!kci->first_module || start < kci->first_module)
1409 kci->first_module = start;
1410
1411 return 0;
1412}
1413
1414static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1415 const char *dir)
1416{
1417 char modules_filename[PATH_MAX];
1418
1419 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1420
1421 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1422 return -1;
1423
1424 if (modules__parse(modules_filename, kci,
1425 kcore_copy__process_modules) < 0)
1426 return -1;
1427
1428 return 0;
1429}
1430
1431static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1432 u64 s, u64 e)
1433{
1434 if (p->addr || s < start || s >= end)
1435 return;
1436
1437 p->addr = s;
1438 p->offset = (s - start) + pgoff;
1439 p->len = e < end ? e - s : end - s;
1440}
1441
1442static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1443{
1444 struct kcore_copy_info *kci = data;
1445 u64 end = start + len;
1446
1447 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1448 kci->etext);
1449
1450 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1451 kci->last_module_symbol);
1452
1453 return 0;
1454}
1455
1456static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1457{
1458 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1459 return -1;
1460
1461 return 0;
1462}
1463
1464static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1465 Elf *elf)
1466{
1467 if (kcore_copy__parse_kallsyms(kci, dir))
1468 return -1;
1469
1470 if (kcore_copy__parse_modules(kci, dir))
1471 return -1;
1472
1473 if (kci->stext)
1474 kci->stext = round_down(kci->stext, page_size);
1475 else
1476 kci->stext = round_down(kci->first_symbol, page_size);
1477
1478 if (kci->etext) {
1479 kci->etext = round_up(kci->etext, page_size);
1480 } else if (kci->last_symbol) {
1481 kci->etext = round_up(kci->last_symbol, page_size);
1482 kci->etext += page_size;
1483 }
1484
1485 kci->first_module = round_down(kci->first_module, page_size);
1486
1487 if (kci->last_module_symbol) {
1488 kci->last_module_symbol = round_up(kci->last_module_symbol,
1489 page_size);
1490 kci->last_module_symbol += page_size;
1491 }
1492
1493 if (!kci->stext || !kci->etext)
1494 return -1;
1495
1496 if (kci->first_module && !kci->last_module_symbol)
1497 return -1;
1498
1499 return kcore_copy__read_maps(kci, elf);
1500}
1501
1502static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1503 const char *name)
1504{
1505 char from_filename[PATH_MAX];
1506 char to_filename[PATH_MAX];
1507
1508 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1509 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1510
1511 return copyfile_mode(from_filename, to_filename, 0400);
1512}
1513
1514static int kcore_copy__unlink(const char *dir, const char *name)
1515{
1516 char filename[PATH_MAX];
1517
1518 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1519
1520 return unlink(filename);
1521}
1522
1523static int kcore_copy__compare_fds(int from, int to)
1524{
1525 char *buf_from;
1526 char *buf_to;
1527 ssize_t ret;
1528 size_t len;
1529 int err = -1;
1530
1531 buf_from = malloc(page_size);
1532 buf_to = malloc(page_size);
1533 if (!buf_from || !buf_to)
1534 goto out;
1535
1536 while (1) {
1537 /* Use read because mmap won't work on proc files */
1538 ret = read(from, buf_from, page_size);
1539 if (ret < 0)
1540 goto out;
1541
1542 if (!ret)
1543 break;
1544
1545 len = ret;
1546
1547 if (readn(to, buf_to, len) != (int)len)
1548 goto out;
1549
1550 if (memcmp(buf_from, buf_to, len))
1551 goto out;
1552 }
1553
1554 err = 0;
1555out:
1556 free(buf_to);
1557 free(buf_from);
1558 return err;
1559}
1560
1561static int kcore_copy__compare_files(const char *from_filename,
1562 const char *to_filename)
1563{
1564 int from, to, err = -1;
1565
1566 from = open(from_filename, O_RDONLY);
1567 if (from < 0)
1568 return -1;
1569
1570 to = open(to_filename, O_RDONLY);
1571 if (to < 0)
1572 goto out_close_from;
1573
1574 err = kcore_copy__compare_fds(from, to);
1575
1576 close(to);
1577out_close_from:
1578 close(from);
1579 return err;
1580}
1581
1582static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1583 const char *name)
1584{
1585 char from_filename[PATH_MAX];
1586 char to_filename[PATH_MAX];
1587
1588 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1589 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1590
1591 return kcore_copy__compare_files(from_filename, to_filename);
1592}
1593
1594/**
1595 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1596 * @from_dir: from directory
1597 * @to_dir: to directory
1598 *
1599 * This function copies kallsyms, modules and kcore files from one directory to
1600 * another. kallsyms and modules are copied entirely. Only code segments are
1601 * copied from kcore. It is assumed that two segments suffice: one for the
1602 * kernel proper and one for all the modules. The code segments are determined
1603 * from kallsyms and modules files. The kernel map starts at _stext or the
1604 * lowest function symbol, and ends at _etext or the highest function symbol.
1605 * The module map starts at the lowest module address and ends at the highest
1606 * module symbol. Start addresses are rounded down to the nearest page. End
1607 * addresses are rounded up to the nearest page. An extra page is added to the
1608 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1609 * symbol too. Because it contains only code sections, the resulting kcore is
1610 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1611 * is not the same for the kernel map and the modules map. That happens because
1612 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1613 * kallsyms and modules files are compared with their copies to check that
1614 * modules have not been loaded or unloaded while the copies were taking place.
1615 *
1616 * Return: %0 on success, %-1 on failure.
1617 */
1618int kcore_copy(const char *from_dir, const char *to_dir)
1619{
1620 struct kcore kcore;
1621 struct kcore extract;
1622 size_t count = 2;
1623 int idx = 0, err = -1;
1624 off_t offset = page_size, sz, modules_offset = 0;
1625 struct kcore_copy_info kci = { .stext = 0, };
1626 char kcore_filename[PATH_MAX];
1627 char extract_filename[PATH_MAX];
1628
1629 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1630 return -1;
1631
1632 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1633 goto out_unlink_kallsyms;
1634
1635 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1636 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1637
1638 if (kcore__open(&kcore, kcore_filename))
1639 goto out_unlink_modules;
1640
1641 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1642 goto out_kcore_close;
1643
1644 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1645 goto out_kcore_close;
1646
1647 if (!kci.modules_map.addr)
1648 count -= 1;
1649
1650 if (kcore__copy_hdr(&kcore, &extract, count))
1651 goto out_extract_close;
1652
1653 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1654 kci.kernel_map.len))
1655 goto out_extract_close;
1656
1657 if (kci.modules_map.addr) {
1658 modules_offset = offset + kci.kernel_map.len;
1659 if (kcore__add_phdr(&extract, idx, modules_offset,
1660 kci.modules_map.addr, kci.modules_map.len))
1661 goto out_extract_close;
1662 }
1663
1664 sz = kcore__write(&extract);
1665 if (sz < 0 || sz > offset)
1666 goto out_extract_close;
1667
1668 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1669 kci.kernel_map.len))
1670 goto out_extract_close;
1671
1672 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1673 extract.fd, modules_offset,
1674 kci.modules_map.len))
1675 goto out_extract_close;
1676
1677 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1678 goto out_extract_close;
1679
1680 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1681 goto out_extract_close;
1682
1683 err = 0;
1684
1685out_extract_close:
1686 kcore__close(&extract);
1687 if (err)
1688 unlink(extract_filename);
1689out_kcore_close:
1690 kcore__close(&kcore);
1691out_unlink_modules:
1692 if (err)
1693 kcore_copy__unlink(to_dir, "modules");
1694out_unlink_kallsyms:
1695 if (err)
1696 kcore_copy__unlink(to_dir, "kallsyms");
1697
1698 return err;
1699}
1700
Adrian Hunterafba19d2013-10-09 15:01:12 +03001701int kcore_extract__create(struct kcore_extract *kce)
1702{
1703 struct kcore kcore;
1704 struct kcore extract;
1705 size_t count = 1;
1706 int idx = 0, err = -1;
1707 off_t offset = page_size, sz;
1708
1709 if (kcore__open(&kcore, kce->kcore_filename))
1710 return -1;
1711
1712 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1713 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1714 goto out_kcore_close;
1715
1716 if (kcore__copy_hdr(&kcore, &extract, count))
1717 goto out_extract_close;
1718
1719 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1720 goto out_extract_close;
1721
1722 sz = kcore__write(&extract);
1723 if (sz < 0 || sz > offset)
1724 goto out_extract_close;
1725
1726 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1727 goto out_extract_close;
1728
1729 err = 0;
1730
1731out_extract_close:
1732 kcore__close(&extract);
1733 if (err)
1734 unlink(kce->extract_filename);
1735out_kcore_close:
1736 kcore__close(&kcore);
1737
1738 return err;
1739}
1740
1741void kcore_extract__delete(struct kcore_extract *kce)
1742{
1743 unlink(kce->extract_filename);
1744}
1745
Namhyung Kime5a18452012-08-06 13:41:20 +09001746void symbol__elf_init(void)
1747{
1748 elf_version(EV_CURRENT);
1749}