blob: 3f9d6798bd18358945c86e260afd135417d88163 [file] [log] [blame]
Namhyung Kime5a18452012-08-06 13:41:20 +09001#include <fcntl.h>
2#include <stdio.h>
3#include <errno.h>
4#include <string.h>
5#include <unistd.h>
6#include <inttypes.h>
7
8#include "symbol.h"
Stephane Eraniane9c4bcd2015-11-30 10:02:20 +01009#include "demangle-java.h"
Waiman Long8fa7d872014-09-29 16:07:28 -040010#include "machine.h"
Vladimir Nikulichev922d0e42014-04-17 08:27:01 -070011#include "vdso.h"
Arnaldo Carvalho de Meloc506c962013-12-11 09:15:00 -030012#include <symbol/kallsyms.h>
Namhyung Kime5a18452012-08-06 13:41:20 +090013#include "debug.h"
14
David Aherne370a3d2015-02-18 19:33:37 -050015#ifndef EM_AARCH64
16#define EM_AARCH64 183 /* ARM 64 bit */
17#endif
18
19
Arnaldo Carvalho de Meloaaba4e12014-11-24 17:10:52 -030020#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
21extern char *cplus_demangle(const char *, int);
22
23static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
24{
25 return cplus_demangle(c, i);
26}
27#else
28#ifdef NO_DEMANGLE
29static inline char *bfd_demangle(void __maybe_unused *v,
30 const char __maybe_unused *c,
31 int __maybe_unused i)
32{
33 return NULL;
34}
35#else
36#define PACKAGE 'perf'
37#include <bfd.h>
38#endif
39#endif
40
Ingo Molnar89fe8082013-09-30 12:07:11 +020041#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
Arnaldo Carvalho de Melo179f36d2015-09-17 11:30:20 -030042static int elf_getphdrnum(Elf *elf, size_t *dst)
Adrian Huntere955d5c2013-09-13 16:49:30 +030043{
44 GElf_Ehdr gehdr;
45 GElf_Ehdr *ehdr;
46
47 ehdr = gelf_getehdr(elf, &gehdr);
48 if (!ehdr)
49 return -1;
50
51 *dst = ehdr->e_phnum;
52
53 return 0;
54}
55#endif
56
Namhyung Kime5a18452012-08-06 13:41:20 +090057#ifndef NT_GNU_BUILD_ID
58#define NT_GNU_BUILD_ID 3
59#endif
60
61/**
62 * elf_symtab__for_each_symbol - iterate thru all the symbols
63 *
64 * @syms: struct elf_symtab instance to iterate
65 * @idx: uint32_t idx
66 * @sym: GElf_Sym iterator
67 */
68#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
69 for (idx = 0, gelf_getsym(syms, idx, &sym);\
70 idx < nr_syms; \
71 idx++, gelf_getsym(syms, idx, &sym))
72
73static inline uint8_t elf_sym__type(const GElf_Sym *sym)
74{
75 return GELF_ST_TYPE(sym->st_info);
76}
77
Vinson Lee4e310502015-02-09 16:29:37 -080078#ifndef STT_GNU_IFUNC
79#define STT_GNU_IFUNC 10
80#endif
81
Namhyung Kime5a18452012-08-06 13:41:20 +090082static inline int elf_sym__is_function(const GElf_Sym *sym)
83{
Adrian Huntera2f3b6b2014-07-14 13:02:33 +030084 return (elf_sym__type(sym) == STT_FUNC ||
85 elf_sym__type(sym) == STT_GNU_IFUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +090086 sym->st_name != 0 &&
87 sym->st_shndx != SHN_UNDEF;
88}
89
90static inline bool elf_sym__is_object(const GElf_Sym *sym)
91{
92 return elf_sym__type(sym) == STT_OBJECT &&
93 sym->st_name != 0 &&
94 sym->st_shndx != SHN_UNDEF;
95}
96
97static inline int elf_sym__is_label(const GElf_Sym *sym)
98{
99 return elf_sym__type(sym) == STT_NOTYPE &&
100 sym->st_name != 0 &&
101 sym->st_shndx != SHN_UNDEF &&
102 sym->st_shndx != SHN_ABS;
103}
104
105static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
106{
107 switch (type) {
108 case MAP__FUNCTION:
109 return elf_sym__is_function(sym);
110 case MAP__VARIABLE:
111 return elf_sym__is_object(sym);
112 default:
113 return false;
114 }
115}
116
117static inline const char *elf_sym__name(const GElf_Sym *sym,
118 const Elf_Data *symstrs)
119{
120 return symstrs->d_buf + sym->st_name;
121}
122
123static inline const char *elf_sec__name(const GElf_Shdr *shdr,
124 const Elf_Data *secstrs)
125{
126 return secstrs->d_buf + shdr->sh_name;
127}
128
129static inline int elf_sec__is_text(const GElf_Shdr *shdr,
130 const Elf_Data *secstrs)
131{
132 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
133}
134
135static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
136 const Elf_Data *secstrs)
137{
138 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
139}
140
141static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
142 enum map_type type)
143{
144 switch (type) {
145 case MAP__FUNCTION:
146 return elf_sec__is_text(shdr, secstrs);
147 case MAP__VARIABLE:
148 return elf_sec__is_data(shdr, secstrs);
149 default:
150 return false;
151 }
152}
153
154static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
155{
156 Elf_Scn *sec = NULL;
157 GElf_Shdr shdr;
158 size_t cnt = 1;
159
160 while ((sec = elf_nextscn(elf, sec)) != NULL) {
161 gelf_getshdr(sec, &shdr);
162
163 if ((addr >= shdr.sh_addr) &&
164 (addr < (shdr.sh_addr + shdr.sh_size)))
165 return cnt;
166
167 ++cnt;
168 }
169
170 return -1;
171}
172
Masami Hiramatsu99ca4232014-01-16 09:39:49 +0000173Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
174 GElf_Shdr *shp, const char *name, size_t *idx)
Namhyung Kime5a18452012-08-06 13:41:20 +0900175{
176 Elf_Scn *sec = NULL;
177 size_t cnt = 1;
178
Cody P Schafer49274652012-08-10 15:22:55 -0700179 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
180 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
181 return NULL;
182
Namhyung Kime5a18452012-08-06 13:41:20 +0900183 while ((sec = elf_nextscn(elf, sec)) != NULL) {
184 char *str;
185
186 gelf_getshdr(sec, shp);
187 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
Jiri Olsa155b3a12014-03-02 14:32:07 +0100188 if (str && !strcmp(name, str)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900189 if (idx)
190 *idx = cnt;
Jiri Olsa155b3a12014-03-02 14:32:07 +0100191 return sec;
Namhyung Kime5a18452012-08-06 13:41:20 +0900192 }
193 ++cnt;
194 }
195
Jiri Olsa155b3a12014-03-02 14:32:07 +0100196 return NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900197}
198
199#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
200 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
201 idx < nr_entries; \
202 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
203
204#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
205 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
206 idx < nr_entries; \
207 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
208
209/*
210 * We need to check if we have a .dynsym, so that we can handle the
211 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
212 * .dynsym or .symtab).
213 * And always look at the original dso, not at debuginfo packages, that
214 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
215 */
Cody P Schafera44f6052012-08-10 15:22:59 -0700216int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
Namhyung Kime5a18452012-08-06 13:41:20 +0900217 symbol_filter_t filter)
218{
219 uint32_t nr_rel_entries, idx;
220 GElf_Sym sym;
221 u64 plt_offset;
222 GElf_Shdr shdr_plt;
223 struct symbol *f;
224 GElf_Shdr shdr_rel_plt, shdr_dynsym;
225 Elf_Data *reldata, *syms, *symstrs;
226 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
227 size_t dynsym_idx;
228 GElf_Ehdr ehdr;
229 char sympltname[1024];
230 Elf *elf;
Cody P Schafera44f6052012-08-10 15:22:59 -0700231 int nr = 0, symidx, err = 0;
Namhyung Kime5a18452012-08-06 13:41:20 +0900232
David Ahernf47b58b2012-08-19 09:47:14 -0600233 if (!ss->dynsym)
234 return 0;
235
Cody P Schafera44f6052012-08-10 15:22:59 -0700236 elf = ss->elf;
237 ehdr = ss->ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900238
Cody P Schafera44f6052012-08-10 15:22:59 -0700239 scn_dynsym = ss->dynsym;
240 shdr_dynsym = ss->dynshdr;
241 dynsym_idx = ss->dynsym_idx;
Namhyung Kime5a18452012-08-06 13:41:20 +0900242
Namhyung Kime5a18452012-08-06 13:41:20 +0900243 if (scn_dynsym == NULL)
244 goto out_elf_end;
245
246 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
247 ".rela.plt", NULL);
248 if (scn_plt_rel == NULL) {
249 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
250 ".rel.plt", NULL);
251 if (scn_plt_rel == NULL)
252 goto out_elf_end;
253 }
254
255 err = -1;
256
257 if (shdr_rel_plt.sh_link != dynsym_idx)
258 goto out_elf_end;
259
260 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
261 goto out_elf_end;
262
263 /*
264 * Fetch the relocation section to find the idxes to the GOT
265 * and the symbols in the .dynsym they refer to.
266 */
267 reldata = elf_getdata(scn_plt_rel, NULL);
268 if (reldata == NULL)
269 goto out_elf_end;
270
271 syms = elf_getdata(scn_dynsym, NULL);
272 if (syms == NULL)
273 goto out_elf_end;
274
275 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
276 if (scn_symstrs == NULL)
277 goto out_elf_end;
278
279 symstrs = elf_getdata(scn_symstrs, NULL);
280 if (symstrs == NULL)
281 goto out_elf_end;
282
Cody P Schafer52f9ddb2012-08-10 15:22:51 -0700283 if (symstrs->d_size == 0)
284 goto out_elf_end;
285
Namhyung Kime5a18452012-08-06 13:41:20 +0900286 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
287 plt_offset = shdr_plt.sh_offset;
288
289 if (shdr_rel_plt.sh_type == SHT_RELA) {
290 GElf_Rela pos_mem, *pos;
291
292 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
293 nr_rel_entries) {
294 symidx = GELF_R_SYM(pos->r_info);
295 plt_offset += shdr_plt.sh_entsize;
296 gelf_getsym(syms, symidx, &sym);
297 snprintf(sympltname, sizeof(sympltname),
298 "%s@plt", elf_sym__name(&sym, symstrs));
299
300 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
301 STB_GLOBAL, sympltname);
302 if (!f)
303 goto out_elf_end;
304
305 if (filter && filter(map, f))
306 symbol__delete(f);
307 else {
308 symbols__insert(&dso->symbols[map->type], f);
309 ++nr;
310 }
311 }
312 } else if (shdr_rel_plt.sh_type == SHT_REL) {
313 GElf_Rel pos_mem, *pos;
314 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
315 nr_rel_entries) {
316 symidx = GELF_R_SYM(pos->r_info);
317 plt_offset += shdr_plt.sh_entsize;
318 gelf_getsym(syms, symidx, &sym);
319 snprintf(sympltname, sizeof(sympltname),
320 "%s@plt", elf_sym__name(&sym, symstrs));
321
322 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
323 STB_GLOBAL, sympltname);
324 if (!f)
325 goto out_elf_end;
326
327 if (filter && filter(map, f))
328 symbol__delete(f);
329 else {
330 symbols__insert(&dso->symbols[map->type], f);
331 ++nr;
332 }
333 }
334 }
335
336 err = 0;
337out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900338 if (err == 0)
339 return nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900340 pr_debug("%s: problems reading %s PLT info.\n",
341 __func__, dso->long_name);
342 return 0;
343}
344
345/*
346 * Align offset to 4 bytes as needed for note name and descriptor data.
347 */
348#define NOTE_ALIGN(n) (((n) + 3) & -4U)
349
350static int elf_read_build_id(Elf *elf, void *bf, size_t size)
351{
352 int err = -1;
353 GElf_Ehdr ehdr;
354 GElf_Shdr shdr;
355 Elf_Data *data;
356 Elf_Scn *sec;
357 Elf_Kind ek;
358 void *ptr;
359
360 if (size < BUILD_ID_SIZE)
361 goto out;
362
363 ek = elf_kind(elf);
364 if (ek != ELF_K_ELF)
365 goto out;
366
367 if (gelf_getehdr(elf, &ehdr) == NULL) {
368 pr_err("%s: cannot get elf header.\n", __func__);
369 goto out;
370 }
371
372 /*
373 * Check following sections for notes:
374 * '.note.gnu.build-id'
375 * '.notes'
376 * '.note' (VDSO specific)
377 */
378 do {
379 sec = elf_section_by_name(elf, &ehdr, &shdr,
380 ".note.gnu.build-id", NULL);
381 if (sec)
382 break;
383
384 sec = elf_section_by_name(elf, &ehdr, &shdr,
385 ".notes", NULL);
386 if (sec)
387 break;
388
389 sec = elf_section_by_name(elf, &ehdr, &shdr,
390 ".note", NULL);
391 if (sec)
392 break;
393
394 return err;
395
396 } while (0);
397
398 data = elf_getdata(sec, NULL);
399 if (data == NULL)
400 goto out;
401
402 ptr = data->d_buf;
403 while (ptr < (data->d_buf + data->d_size)) {
404 GElf_Nhdr *nhdr = ptr;
405 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
406 descsz = NOTE_ALIGN(nhdr->n_descsz);
407 const char *name;
408
409 ptr += sizeof(*nhdr);
410 name = ptr;
411 ptr += namesz;
412 if (nhdr->n_type == NT_GNU_BUILD_ID &&
413 nhdr->n_namesz == sizeof("GNU")) {
414 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
415 size_t sz = min(size, descsz);
416 memcpy(bf, ptr, sz);
417 memset(bf + sz, 0, size - sz);
418 err = descsz;
419 break;
420 }
421 }
422 ptr += descsz;
423 }
424
425out:
426 return err;
427}
428
429int filename__read_build_id(const char *filename, void *bf, size_t size)
430{
431 int fd, err = -1;
432 Elf *elf;
433
434 if (size < BUILD_ID_SIZE)
435 goto out;
436
437 fd = open(filename, O_RDONLY);
438 if (fd < 0)
439 goto out;
440
441 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
442 if (elf == NULL) {
443 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
444 goto out_close;
445 }
446
447 err = elf_read_build_id(elf, bf, size);
448
449 elf_end(elf);
450out_close:
451 close(fd);
452out:
453 return err;
454}
455
456int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
457{
458 int fd, err = -1;
459
460 if (size < BUILD_ID_SIZE)
461 goto out;
462
463 fd = open(filename, O_RDONLY);
464 if (fd < 0)
465 goto out;
466
467 while (1) {
468 char bf[BUFSIZ];
469 GElf_Nhdr nhdr;
470 size_t namesz, descsz;
471
472 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
473 break;
474
475 namesz = NOTE_ALIGN(nhdr.n_namesz);
476 descsz = NOTE_ALIGN(nhdr.n_descsz);
477 if (nhdr.n_type == NT_GNU_BUILD_ID &&
478 nhdr.n_namesz == sizeof("GNU")) {
479 if (read(fd, bf, namesz) != (ssize_t)namesz)
480 break;
481 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
482 size_t sz = min(descsz, size);
483 if (read(fd, build_id, sz) == (ssize_t)sz) {
484 memset(build_id + sz, 0, size - sz);
485 err = 0;
486 break;
487 }
488 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
489 break;
490 } else {
491 int n = namesz + descsz;
492 if (read(fd, bf, n) != n)
493 break;
494 }
495 }
496 close(fd);
497out:
498 return err;
499}
500
501int filename__read_debuglink(const char *filename, char *debuglink,
502 size_t size)
503{
504 int fd, err = -1;
505 Elf *elf;
506 GElf_Ehdr ehdr;
507 GElf_Shdr shdr;
508 Elf_Data *data;
509 Elf_Scn *sec;
510 Elf_Kind ek;
511
512 fd = open(filename, O_RDONLY);
513 if (fd < 0)
514 goto out;
515
516 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
517 if (elf == NULL) {
518 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
519 goto out_close;
520 }
521
522 ek = elf_kind(elf);
523 if (ek != ELF_K_ELF)
Chenggang Qin784f3392013-10-11 08:27:57 +0800524 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900525
526 if (gelf_getehdr(elf, &ehdr) == NULL) {
527 pr_err("%s: cannot get elf header.\n", __func__);
Chenggang Qin784f3392013-10-11 08:27:57 +0800528 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900529 }
530
531 sec = elf_section_by_name(elf, &ehdr, &shdr,
532 ".gnu_debuglink", NULL);
533 if (sec == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800534 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900535
536 data = elf_getdata(sec, NULL);
537 if (data == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800538 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900539
540 /* the start of this section is a zero-terminated string */
541 strncpy(debuglink, data->d_buf, size);
542
Stephane Eranian0d3dc5e2014-02-20 10:32:55 +0900543 err = 0;
544
Chenggang Qin784f3392013-10-11 08:27:57 +0800545out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900546 elf_end(elf);
Namhyung Kime5a18452012-08-06 13:41:20 +0900547out_close:
548 close(fd);
549out:
550 return err;
551}
552
553static int dso__swap_init(struct dso *dso, unsigned char eidata)
554{
555 static unsigned int const endian = 1;
556
557 dso->needs_swap = DSO_SWAP__NO;
558
559 switch (eidata) {
560 case ELFDATA2LSB:
561 /* We are big endian, DSO is little endian. */
562 if (*(unsigned char const *)&endian != 1)
563 dso->needs_swap = DSO_SWAP__YES;
564 break;
565
566 case ELFDATA2MSB:
567 /* We are little endian, DSO is big endian. */
568 if (*(unsigned char const *)&endian != 0)
569 dso->needs_swap = DSO_SWAP__YES;
570 break;
571
572 default:
573 pr_err("unrecognized DSO data encoding %d\n", eidata);
574 return -EINVAL;
575 }
576
577 return 0;
578}
579
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900580static int decompress_kmodule(struct dso *dso, const char *name,
581 enum dso_binary_type type)
582{
Jiri Olsa914f85c2015-02-12 22:27:50 +0100583 int fd = -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900584 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
Jiri Olsa914f85c2015-02-12 22:27:50 +0100585 struct kmod_path m;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900586
Namhyung Kim0b064f42015-01-29 17:06:42 +0900587 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
588 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
589 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900590 return -1;
591
Jiri Olsa914f85c2015-02-12 22:27:50 +0100592 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
593 name = dso->long_name;
594
595 if (kmod_path__parse_ext(&m, name) || !m.comp)
596 return -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900597
598 fd = mkstemp(tmpbuf);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300599 if (fd < 0) {
600 dso->load_errno = errno;
Jiri Olsa914f85c2015-02-12 22:27:50 +0100601 goto out;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300602 }
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900603
Jiri Olsa914f85c2015-02-12 22:27:50 +0100604 if (!decompress_to_file(m.ext, name, fd)) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300605 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900606 close(fd);
607 fd = -1;
608 }
609
610 unlink(tmpbuf);
611
Jiri Olsa914f85c2015-02-12 22:27:50 +0100612out:
613 free(m.ext);
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900614 return fd;
615}
616
Cody P Schafer3aafe5a2012-08-10 15:23:02 -0700617bool symsrc__possibly_runtime(struct symsrc *ss)
618{
619 return ss->dynsym || ss->opdsec;
620}
621
Cody P Schaferd26cd122012-08-10 15:23:00 -0700622bool symsrc__has_symtab(struct symsrc *ss)
623{
624 return ss->symtab != NULL;
625}
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700626
627void symsrc__destroy(struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900628{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300629 zfree(&ss->name);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700630 elf_end(ss->elf);
631 close(ss->fd);
632}
633
Naveen N. Raod2332092015-04-28 17:35:35 +0530634bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
635{
636 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
637}
638
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700639int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
640 enum dso_binary_type type)
641{
Namhyung Kime5a18452012-08-06 13:41:20 +0900642 int err = -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900643 GElf_Ehdr ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900644 Elf *elf;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700645 int fd;
646
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300647 if (dso__needs_decompress(dso)) {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900648 fd = decompress_kmodule(dso, name, type);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300649 if (fd < 0)
650 return -1;
651 } else {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900652 fd = open(name, O_RDONLY);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300653 if (fd < 0) {
654 dso->load_errno = errno;
655 return -1;
656 }
657 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900658
659 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
660 if (elf == NULL) {
661 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300662 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900663 goto out_close;
664 }
665
666 if (gelf_getehdr(elf, &ehdr) == NULL) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300667 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900668 pr_debug("%s: cannot get elf header.\n", __func__);
669 goto out_elf_end;
670 }
671
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300672 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
673 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
Namhyung Kime5a18452012-08-06 13:41:20 +0900674 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300675 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900676
677 /* Always reject images with a mismatched build-id: */
678 if (dso->has_build_id) {
679 u8 build_id[BUILD_ID_SIZE];
680
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300681 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
682 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900683 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300684 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900685
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300686 if (!dso__build_id_equal(dso, build_id)) {
Naveen N. Rao468f3d22015-04-25 01:14:46 +0530687 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300688 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900689 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300690 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900691 }
692
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300693 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
694
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700695 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
696 NULL);
697 if (ss->symshdr.sh_type != SHT_SYMTAB)
698 ss->symtab = NULL;
699
700 ss->dynsym_idx = 0;
701 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
702 &ss->dynsym_idx);
703 if (ss->dynshdr.sh_type != SHT_DYNSYM)
704 ss->dynsym = NULL;
705
706 ss->opdidx = 0;
707 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
708 &ss->opdidx);
709 if (ss->opdshdr.sh_type != SHT_PROGBITS)
710 ss->opdsec = NULL;
711
Wang Nan99e87f72016-04-07 10:24:31 +0000712 if (dso->kernel == DSO_TYPE_USER)
713 ss->adjust_symbols = true;
714 else
Naveen N. Raod2332092015-04-28 17:35:35 +0530715 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700716
717 ss->name = strdup(name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300718 if (!ss->name) {
719 dso->load_errno = errno;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700720 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300721 }
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700722
723 ss->elf = elf;
724 ss->fd = fd;
725 ss->ehdr = ehdr;
726 ss->type = type;
727
728 return 0;
729
730out_elf_end:
731 elf_end(elf);
732out_close:
733 close(fd);
734 return err;
735}
736
Adrian Hunter39b12f782013-08-07 14:38:47 +0300737/**
738 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
739 * @kmap: kernel maps and relocation reference symbol
740 *
741 * This function returns %true if we are dealing with the kernel maps and the
742 * relocation reference symbol has not yet been found. Otherwise %false is
743 * returned.
744 */
745static bool ref_reloc_sym_not_found(struct kmap *kmap)
746{
747 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
748 !kmap->ref_reloc_sym->unrelocated_addr;
749}
750
751/**
752 * ref_reloc - kernel relocation offset.
753 * @kmap: kernel maps and relocation reference symbol
754 *
755 * This function returns the offset of kernel addresses as determined by using
756 * the relocation reference symbol i.e. if the kernel has not been relocated
757 * then the return value is zero.
758 */
759static u64 ref_reloc(struct kmap *kmap)
760{
761 if (kmap && kmap->ref_reloc_sym &&
762 kmap->ref_reloc_sym->unrelocated_addr)
763 return kmap->ref_reloc_sym->addr -
764 kmap->ref_reloc_sym->unrelocated_addr;
765 return 0;
766}
767
Avi Kivity763122a2014-09-13 07:15:05 +0300768static bool want_demangle(bool is_kernel_sym)
769{
770 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
771}
772
Ananth N Mavinakayanahallic50fc0a2015-04-28 17:35:38 +0530773void __weak arch__elf_sym_adjust(GElf_Sym *sym __maybe_unused) { }
774
Cody P Schafer261360b2012-08-10 15:23:01 -0700775int dso__load_sym(struct dso *dso, struct map *map,
776 struct symsrc *syms_ss, struct symsrc *runtime_ss,
Cody P Schaferd26cd122012-08-10 15:23:00 -0700777 symbol_filter_t filter, int kmodule)
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700778{
779 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
Wang Nanba927322015-04-07 08:22:45 +0000780 struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700781 struct map *curr_map = map;
782 struct dso *curr_dso = dso;
783 Elf_Data *symstrs, *secstrs;
784 uint32_t nr_syms;
785 int err = -1;
786 uint32_t idx;
787 GElf_Ehdr ehdr;
Cody P Schafer261360b2012-08-10 15:23:01 -0700788 GElf_Shdr shdr;
Wang Nan73cdf0c2016-02-26 09:31:49 +0000789 GElf_Shdr tshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700790 Elf_Data *syms, *opddata = NULL;
791 GElf_Sym sym;
Cody P Schafer261360b2012-08-10 15:23:01 -0700792 Elf_Scn *sec, *sec_strndx;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700793 Elf *elf;
794 int nr = 0;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300795 bool remap_kernel = false, adjust_kernel_syms = false;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700796
Wang Nanba927322015-04-07 08:22:45 +0000797 if (kmap && !kmaps)
798 return -1;
799
Cody P Schafer261360b2012-08-10 15:23:01 -0700800 dso->symtab_type = syms_ss->type;
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300801 dso->is_64_bit = syms_ss->is_64_bit;
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300802 dso->rel = syms_ss->ehdr.e_type == ET_REL;
803
804 /*
805 * Modules may already have symbols from kallsyms, but those symbols
806 * have the wrong values for the dso maps, so remove them.
807 */
808 if (kmodule && syms_ss->symtab)
809 symbols__delete(&dso->symbols[map->type]);
Cody P Schafer005f9292012-08-10 15:22:58 -0700810
Cody P Schafer261360b2012-08-10 15:23:01 -0700811 if (!syms_ss->symtab) {
Anton Blanchardd0b0d042014-09-09 08:59:29 +1000812 /*
813 * If the vmlinux is stripped, fail so we will fall back
814 * to using kallsyms. The vmlinux runtime symbols aren't
815 * of much use.
816 */
817 if (dso->kernel)
818 goto out_elf_end;
819
Cody P Schafer261360b2012-08-10 15:23:01 -0700820 syms_ss->symtab = syms_ss->dynsym;
821 syms_ss->symshdr = syms_ss->dynshdr;
Cody P Schaferd26cd122012-08-10 15:23:00 -0700822 }
823
Cody P Schafer261360b2012-08-10 15:23:01 -0700824 elf = syms_ss->elf;
825 ehdr = syms_ss->ehdr;
826 sec = syms_ss->symtab;
827 shdr = syms_ss->symshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700828
Wang Nan73cdf0c2016-02-26 09:31:49 +0000829 if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL))
830 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
831
Cody P Schafer261360b2012-08-10 15:23:01 -0700832 if (runtime_ss->opdsec)
833 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
Namhyung Kime5a18452012-08-06 13:41:20 +0900834
835 syms = elf_getdata(sec, NULL);
836 if (syms == NULL)
837 goto out_elf_end;
838
839 sec = elf_getscn(elf, shdr.sh_link);
840 if (sec == NULL)
841 goto out_elf_end;
842
843 symstrs = elf_getdata(sec, NULL);
844 if (symstrs == NULL)
845 goto out_elf_end;
846
Adrian Hunterf247fb82014-07-31 09:00:46 +0300847 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900848 if (sec_strndx == NULL)
849 goto out_elf_end;
850
851 secstrs = elf_getdata(sec_strndx, NULL);
852 if (secstrs == NULL)
853 goto out_elf_end;
854
855 nr_syms = shdr.sh_size / shdr.sh_entsize;
856
857 memset(&sym, 0, sizeof(sym));
Adrian Hunter39b12f782013-08-07 14:38:47 +0300858
859 /*
860 * The kernel relocation symbol is needed in advance in order to adjust
861 * kernel maps correctly.
862 */
863 if (ref_reloc_sym_not_found(kmap)) {
864 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
865 const char *elf_name = elf_sym__name(&sym, symstrs);
866
867 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
868 continue;
869 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
Adrian Hunter91767532014-01-29 16:14:36 +0200870 map->reloc = kmap->ref_reloc_sym->addr -
871 kmap->ref_reloc_sym->unrelocated_addr;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300872 break;
873 }
874 }
875
Adrian Hunterf0ee3b42015-08-14 15:50:06 +0300876 /*
877 * Handle any relocation of vdso necessary because older kernels
878 * attempted to prelink vdso to its virtual address.
879 */
Wang Nan73cdf0c2016-02-26 09:31:49 +0000880 if (dso__is_vdso(dso))
881 map->reloc = map->start - dso->text_offset;
Adrian Hunterf0ee3b42015-08-14 15:50:06 +0300882
Adrian Hunter39b12f782013-08-07 14:38:47 +0300883 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
884 /*
885 * Initial kernel and module mappings do not map to the dso. For
886 * function mappings, flag the fixups.
887 */
888 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
889 remap_kernel = true;
890 adjust_kernel_syms = dso->adjust_symbols;
891 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900892 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
893 struct symbol *f;
894 const char *elf_name = elf_sym__name(&sym, symstrs);
895 char *demangled = NULL;
896 int is_label = elf_sym__is_label(&sym);
897 const char *section_name;
Cody P Schafer261360b2012-08-10 15:23:01 -0700898 bool used_opd = false;
Namhyung Kime5a18452012-08-06 13:41:20 +0900899
Namhyung Kime5a18452012-08-06 13:41:20 +0900900 if (!is_label && !elf_sym__is_a(&sym, map->type))
901 continue;
902
903 /* Reject ARM ELF "mapping symbols": these aren't unique and
904 * don't identify functions, so will confuse the profile
905 * output: */
Victor Kamensky4886f2c2015-01-26 22:34:01 -0800906 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
907 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
908 && (elf_name[2] == '\0' || elf_name[2] == '.'))
Namhyung Kime5a18452012-08-06 13:41:20 +0900909 continue;
910 }
911
Cody P Schafer261360b2012-08-10 15:23:01 -0700912 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
913 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900914 u64 *opd = opddata->d_buf + offset;
915 sym.st_value = DSO__SWAP(dso, u64, *opd);
Cody P Schafer261360b2012-08-10 15:23:01 -0700916 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
917 sym.st_value);
918 used_opd = true;
Namhyung Kime5a18452012-08-06 13:41:20 +0900919 }
Namhyung Kim3843b052012-11-21 13:49:44 +0100920 /*
921 * When loading symbols in a data mapping, ABS symbols (which
922 * has a value of SHN_ABS in its st_shndx) failed at
923 * elf_getscn(). And it marks the loading as a failure so
924 * already loaded symbols cannot be fixed up.
925 *
926 * I'm not sure what should be done. Just ignore them for now.
927 * - Namhyung Kim
928 */
929 if (sym.st_shndx == SHN_ABS)
930 continue;
Namhyung Kime5a18452012-08-06 13:41:20 +0900931
Cody P Schafer261360b2012-08-10 15:23:01 -0700932 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900933 if (!sec)
934 goto out_elf_end;
935
936 gelf_getshdr(sec, &shdr);
937
938 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
939 continue;
940
941 section_name = elf_sec__name(&shdr, secstrs);
942
943 /* On ARM, symbols for thumb functions have 1 added to
944 * the symbol address as a flag - remove it */
945 if ((ehdr.e_machine == EM_ARM) &&
946 (map->type == MAP__FUNCTION) &&
947 (sym.st_value & 1))
948 --sym.st_value;
949
Ananth N Mavinakayanahallic50fc0a2015-04-28 17:35:38 +0530950 arch__elf_sym_adjust(&sym);
951
Adrian Hunter39b12f782013-08-07 14:38:47 +0300952 if (dso->kernel || kmodule) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900953 char dso_name[PATH_MAX];
954
Adrian Hunter39b12f782013-08-07 14:38:47 +0300955 /* Adjust symbol to map to file offset */
956 if (adjust_kernel_syms)
957 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
958
Namhyung Kime5a18452012-08-06 13:41:20 +0900959 if (strcmp(section_name,
960 (curr_dso->short_name +
961 dso->short_name_len)) == 0)
962 goto new_symbol;
963
964 if (strcmp(section_name, ".text") == 0) {
Adrian Hunter39b12f782013-08-07 14:38:47 +0300965 /*
966 * The initial kernel mapping is based on
967 * kallsyms and identity maps. Overwrite it to
968 * map to the kernel dso.
969 */
970 if (remap_kernel && dso->kernel) {
971 remap_kernel = false;
972 map->start = shdr.sh_addr +
973 ref_reloc(kmap);
974 map->end = map->start + shdr.sh_size;
975 map->pgoff = shdr.sh_offset;
976 map->map_ip = map__map_ip;
977 map->unmap_ip = map__unmap_ip;
978 /* Ensure maps are correctly ordered */
Wang Nanba927322015-04-07 08:22:45 +0000979 if (kmaps) {
Arnaldo Carvalho de Melo84c2caf2015-05-25 16:59:56 -0300980 map__get(map);
Wang Nanba927322015-04-07 08:22:45 +0000981 map_groups__remove(kmaps, map);
982 map_groups__insert(kmaps, map);
Arnaldo Carvalho de Melo84c2caf2015-05-25 16:59:56 -0300983 map__put(map);
Wang Nanba927322015-04-07 08:22:45 +0000984 }
Adrian Hunter39b12f782013-08-07 14:38:47 +0300985 }
986
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300987 /*
988 * The initial module mapping is based on
989 * /proc/modules mapped to offset zero.
990 * Overwrite it to map to the module dso.
991 */
992 if (remap_kernel && kmodule) {
993 remap_kernel = false;
994 map->pgoff = shdr.sh_offset;
995 }
996
Namhyung Kime5a18452012-08-06 13:41:20 +0900997 curr_map = map;
998 curr_dso = dso;
999 goto new_symbol;
1000 }
1001
Adrian Hunter0131c4e2013-08-07 14:38:50 +03001002 if (!kmap)
1003 goto new_symbol;
1004
Namhyung Kime5a18452012-08-06 13:41:20 +09001005 snprintf(dso_name, sizeof(dso_name),
1006 "%s%s", dso->short_name, section_name);
1007
Wang Nanba927322015-04-07 08:22:45 +00001008 curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
Namhyung Kime5a18452012-08-06 13:41:20 +09001009 if (curr_map == NULL) {
1010 u64 start = sym.st_value;
1011
1012 if (kmodule)
1013 start += map->start + shdr.sh_offset;
1014
1015 curr_dso = dso__new(dso_name);
1016 if (curr_dso == NULL)
1017 goto out_elf_end;
1018 curr_dso->kernel = dso->kernel;
1019 curr_dso->long_name = dso->long_name;
1020 curr_dso->long_name_len = dso->long_name_len;
1021 curr_map = map__new2(start, curr_dso,
1022 map->type);
Masami Hiramatsue7a78652015-12-09 11:11:18 +09001023 dso__put(curr_dso);
Namhyung Kime5a18452012-08-06 13:41:20 +09001024 if (curr_map == NULL) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001025 goto out_elf_end;
1026 }
Adrian Hunter39b12f782013-08-07 14:38:47 +03001027 if (adjust_kernel_syms) {
1028 curr_map->start = shdr.sh_addr +
1029 ref_reloc(kmap);
1030 curr_map->end = curr_map->start +
1031 shdr.sh_size;
1032 curr_map->pgoff = shdr.sh_offset;
1033 } else {
1034 curr_map->map_ip = identity__map_ip;
1035 curr_map->unmap_ip = identity__map_ip;
1036 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001037 curr_dso->symtab_type = dso->symtab_type;
Wang Nanba927322015-04-07 08:22:45 +00001038 map_groups__insert(kmaps, curr_map);
Masami Hiramatsue7a78652015-12-09 11:11:18 +09001039 /*
1040 * Add it before we drop the referece to curr_map,
1041 * i.e. while we still are sure to have a reference
1042 * to this DSO via curr_map->dso.
1043 */
1044 dsos__add(&map->groups->machine->dsos, curr_dso);
Masami Hiramatsu8d5c3402015-11-18 15:40:27 +09001045 /* kmaps already got it */
1046 map__put(curr_map);
Namhyung Kime5a18452012-08-06 13:41:20 +09001047 dso__set_loaded(curr_dso, map->type);
1048 } else
1049 curr_dso = curr_map->dso;
1050
1051 goto new_symbol;
1052 }
1053
Cody P Schafer261360b2012-08-10 15:23:01 -07001054 if ((used_opd && runtime_ss->adjust_symbols)
1055 || (!used_opd && syms_ss->adjust_symbols)) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001056 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1057 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1058 (u64)sym.st_value, (u64)shdr.sh_addr,
1059 (u64)shdr.sh_offset);
1060 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1061 }
Avi Kivity950b8352014-01-22 21:58:46 +02001062new_symbol:
Namhyung Kime5a18452012-08-06 13:41:20 +09001063 /*
1064 * We need to figure out if the object was created from C++ sources
1065 * DWARF DW_compile_unit has this, but we don't always have access
1066 * to it...
1067 */
Avi Kivity763122a2014-09-13 07:15:05 +03001068 if (want_demangle(dso->kernel || kmodule)) {
Namhyung Kime71e7942014-07-31 14:47:42 +09001069 int demangle_flags = DMGL_NO_OPTS;
1070 if (verbose)
1071 demangle_flags = DMGL_PARAMS | DMGL_ANSI;
1072
1073 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
Stephane Eraniane9c4bcd2015-11-30 10:02:20 +01001074 if (demangled == NULL)
1075 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
Namhyung Kim328ccda2013-03-25 18:18:18 +09001076 if (demangled != NULL)
1077 elf_name = demangled;
1078 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001079 f = symbol__new(sym.st_value, sym.st_size,
1080 GELF_ST_BIND(sym.st_info), elf_name);
1081 free(demangled);
1082 if (!f)
1083 goto out_elf_end;
1084
1085 if (filter && filter(curr_map, f))
1086 symbol__delete(f);
1087 else {
1088 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1089 nr++;
1090 }
1091 }
1092
1093 /*
1094 * For misannotated, zeroed, ASM function sizes.
1095 */
1096 if (nr > 0) {
Namhyung Kim680d9262015-03-06 16:31:27 +09001097 if (!symbol_conf.allow_aliases)
1098 symbols__fixup_duplicate(&dso->symbols[map->type]);
Namhyung Kime5a18452012-08-06 13:41:20 +09001099 symbols__fixup_end(&dso->symbols[map->type]);
1100 if (kmap) {
1101 /*
1102 * We need to fixup this here too because we create new
1103 * maps here, for things like vsyscall sections.
1104 */
Wang Nanba927322015-04-07 08:22:45 +00001105 __map_groups__fixup_end(kmaps, map->type);
Namhyung Kime5a18452012-08-06 13:41:20 +09001106 }
1107 }
1108 err = nr;
1109out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +09001110 return err;
1111}
1112
Adrian Hunter8e0cf962013-08-07 14:38:51 +03001113static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1114{
1115 GElf_Phdr phdr;
1116 size_t i, phdrnum;
1117 int err;
1118 u64 sz;
1119
1120 if (elf_getphdrnum(elf, &phdrnum))
1121 return -1;
1122
1123 for (i = 0; i < phdrnum; i++) {
1124 if (gelf_getphdr(elf, i, &phdr) == NULL)
1125 return -1;
1126 if (phdr.p_type != PT_LOAD)
1127 continue;
1128 if (exe) {
1129 if (!(phdr.p_flags & PF_X))
1130 continue;
1131 } else {
1132 if (!(phdr.p_flags & PF_R))
1133 continue;
1134 }
1135 sz = min(phdr.p_memsz, phdr.p_filesz);
1136 if (!sz)
1137 continue;
1138 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1139 if (err)
1140 return err;
1141 }
1142 return 0;
1143}
1144
1145int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1146 bool *is_64_bit)
1147{
1148 int err;
1149 Elf *elf;
1150
1151 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1152 if (elf == NULL)
1153 return -1;
1154
1155 if (is_64_bit)
1156 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1157
1158 err = elf_read_maps(elf, exe, mapfn, data);
1159
1160 elf_end(elf);
1161 return err;
1162}
1163
Adrian Hunter2b5b8bb2014-07-22 16:17:59 +03001164enum dso_type dso__type_fd(int fd)
1165{
1166 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1167 GElf_Ehdr ehdr;
1168 Elf_Kind ek;
1169 Elf *elf;
1170
1171 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1172 if (elf == NULL)
1173 goto out;
1174
1175 ek = elf_kind(elf);
1176 if (ek != ELF_K_ELF)
1177 goto out_end;
1178
1179 if (gelf_getclass(elf) == ELFCLASS64) {
1180 dso_type = DSO__TYPE_64BIT;
1181 goto out_end;
1182 }
1183
1184 if (gelf_getehdr(elf, &ehdr) == NULL)
1185 goto out_end;
1186
1187 if (ehdr.e_machine == EM_X86_64)
1188 dso_type = DSO__TYPE_X32BIT;
1189 else
1190 dso_type = DSO__TYPE_32BIT;
1191out_end:
1192 elf_end(elf);
1193out:
1194 return dso_type;
1195}
1196
Adrian Hunterafba19d2013-10-09 15:01:12 +03001197static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1198{
1199 ssize_t r;
1200 size_t n;
1201 int err = -1;
1202 char *buf = malloc(page_size);
1203
1204 if (buf == NULL)
1205 return -1;
1206
1207 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1208 goto out;
1209
1210 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1211 goto out;
1212
1213 while (len) {
1214 n = page_size;
1215 if (len < n)
1216 n = len;
1217 /* Use read because mmap won't work on proc files */
1218 r = read(from, buf, n);
1219 if (r < 0)
1220 goto out;
1221 if (!r)
1222 break;
1223 n = r;
1224 r = write(to, buf, n);
1225 if (r < 0)
1226 goto out;
1227 if ((size_t)r != n)
1228 goto out;
1229 len -= n;
1230 }
1231
1232 err = 0;
1233out:
1234 free(buf);
1235 return err;
1236}
1237
1238struct kcore {
1239 int fd;
1240 int elfclass;
1241 Elf *elf;
1242 GElf_Ehdr ehdr;
1243};
1244
1245static int kcore__open(struct kcore *kcore, const char *filename)
1246{
1247 GElf_Ehdr *ehdr;
1248
1249 kcore->fd = open(filename, O_RDONLY);
1250 if (kcore->fd == -1)
1251 return -1;
1252
1253 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1254 if (!kcore->elf)
1255 goto out_close;
1256
1257 kcore->elfclass = gelf_getclass(kcore->elf);
1258 if (kcore->elfclass == ELFCLASSNONE)
1259 goto out_end;
1260
1261 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1262 if (!ehdr)
1263 goto out_end;
1264
1265 return 0;
1266
1267out_end:
1268 elf_end(kcore->elf);
1269out_close:
1270 close(kcore->fd);
1271 return -1;
1272}
1273
1274static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1275 bool temp)
1276{
Adrian Hunterafba19d2013-10-09 15:01:12 +03001277 kcore->elfclass = elfclass;
1278
1279 if (temp)
1280 kcore->fd = mkstemp(filename);
1281 else
1282 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1283 if (kcore->fd == -1)
1284 return -1;
1285
1286 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1287 if (!kcore->elf)
1288 goto out_close;
1289
1290 if (!gelf_newehdr(kcore->elf, elfclass))
1291 goto out_end;
1292
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001293 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
Adrian Hunterafba19d2013-10-09 15:01:12 +03001294
1295 return 0;
1296
1297out_end:
1298 elf_end(kcore->elf);
1299out_close:
1300 close(kcore->fd);
1301 unlink(filename);
1302 return -1;
1303}
1304
1305static void kcore__close(struct kcore *kcore)
1306{
1307 elf_end(kcore->elf);
1308 close(kcore->fd);
1309}
1310
1311static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1312{
1313 GElf_Ehdr *ehdr = &to->ehdr;
1314 GElf_Ehdr *kehdr = &from->ehdr;
1315
1316 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1317 ehdr->e_type = kehdr->e_type;
1318 ehdr->e_machine = kehdr->e_machine;
1319 ehdr->e_version = kehdr->e_version;
1320 ehdr->e_entry = 0;
1321 ehdr->e_shoff = 0;
1322 ehdr->e_flags = kehdr->e_flags;
1323 ehdr->e_phnum = count;
1324 ehdr->e_shentsize = 0;
1325 ehdr->e_shnum = 0;
1326 ehdr->e_shstrndx = 0;
1327
1328 if (from->elfclass == ELFCLASS32) {
1329 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1330 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1331 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1332 } else {
1333 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1334 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1335 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1336 }
1337
1338 if (!gelf_update_ehdr(to->elf, ehdr))
1339 return -1;
1340
1341 if (!gelf_newphdr(to->elf, count))
1342 return -1;
1343
1344 return 0;
1345}
1346
1347static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1348 u64 addr, u64 len)
1349{
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001350 GElf_Phdr phdr = {
1351 .p_type = PT_LOAD,
1352 .p_flags = PF_R | PF_W | PF_X,
1353 .p_offset = offset,
1354 .p_vaddr = addr,
1355 .p_paddr = 0,
1356 .p_filesz = len,
1357 .p_memsz = len,
1358 .p_align = page_size,
1359 };
Adrian Hunterafba19d2013-10-09 15:01:12 +03001360
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001361 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
Adrian Hunterafba19d2013-10-09 15:01:12 +03001362 return -1;
1363
1364 return 0;
1365}
1366
1367static off_t kcore__write(struct kcore *kcore)
1368{
1369 return elf_update(kcore->elf, ELF_C_WRITE);
1370}
1371
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001372struct phdr_data {
1373 off_t offset;
1374 u64 addr;
1375 u64 len;
1376};
1377
1378struct kcore_copy_info {
1379 u64 stext;
1380 u64 etext;
1381 u64 first_symbol;
1382 u64 last_symbol;
1383 u64 first_module;
1384 u64 last_module_symbol;
1385 struct phdr_data kernel_map;
1386 struct phdr_data modules_map;
1387};
1388
1389static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1390 u64 start)
1391{
1392 struct kcore_copy_info *kci = arg;
1393
1394 if (!symbol_type__is_a(type, MAP__FUNCTION))
1395 return 0;
1396
1397 if (strchr(name, '[')) {
1398 if (start > kci->last_module_symbol)
1399 kci->last_module_symbol = start;
1400 return 0;
1401 }
1402
1403 if (!kci->first_symbol || start < kci->first_symbol)
1404 kci->first_symbol = start;
1405
1406 if (!kci->last_symbol || start > kci->last_symbol)
1407 kci->last_symbol = start;
1408
1409 if (!strcmp(name, "_stext")) {
1410 kci->stext = start;
1411 return 0;
1412 }
1413
1414 if (!strcmp(name, "_etext")) {
1415 kci->etext = start;
1416 return 0;
1417 }
1418
1419 return 0;
1420}
1421
1422static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1423 const char *dir)
1424{
1425 char kallsyms_filename[PATH_MAX];
1426
1427 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1428
1429 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1430 return -1;
1431
1432 if (kallsyms__parse(kallsyms_filename, kci,
1433 kcore_copy__process_kallsyms) < 0)
1434 return -1;
1435
1436 return 0;
1437}
1438
1439static int kcore_copy__process_modules(void *arg,
1440 const char *name __maybe_unused,
1441 u64 start)
1442{
1443 struct kcore_copy_info *kci = arg;
1444
1445 if (!kci->first_module || start < kci->first_module)
1446 kci->first_module = start;
1447
1448 return 0;
1449}
1450
1451static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1452 const char *dir)
1453{
1454 char modules_filename[PATH_MAX];
1455
1456 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1457
1458 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1459 return -1;
1460
1461 if (modules__parse(modules_filename, kci,
1462 kcore_copy__process_modules) < 0)
1463 return -1;
1464
1465 return 0;
1466}
1467
1468static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1469 u64 s, u64 e)
1470{
1471 if (p->addr || s < start || s >= end)
1472 return;
1473
1474 p->addr = s;
1475 p->offset = (s - start) + pgoff;
1476 p->len = e < end ? e - s : end - s;
1477}
1478
1479static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1480{
1481 struct kcore_copy_info *kci = data;
1482 u64 end = start + len;
1483
1484 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1485 kci->etext);
1486
1487 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1488 kci->last_module_symbol);
1489
1490 return 0;
1491}
1492
1493static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1494{
1495 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1496 return -1;
1497
1498 return 0;
1499}
1500
1501static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1502 Elf *elf)
1503{
1504 if (kcore_copy__parse_kallsyms(kci, dir))
1505 return -1;
1506
1507 if (kcore_copy__parse_modules(kci, dir))
1508 return -1;
1509
1510 if (kci->stext)
1511 kci->stext = round_down(kci->stext, page_size);
1512 else
1513 kci->stext = round_down(kci->first_symbol, page_size);
1514
1515 if (kci->etext) {
1516 kci->etext = round_up(kci->etext, page_size);
1517 } else if (kci->last_symbol) {
1518 kci->etext = round_up(kci->last_symbol, page_size);
1519 kci->etext += page_size;
1520 }
1521
1522 kci->first_module = round_down(kci->first_module, page_size);
1523
1524 if (kci->last_module_symbol) {
1525 kci->last_module_symbol = round_up(kci->last_module_symbol,
1526 page_size);
1527 kci->last_module_symbol += page_size;
1528 }
1529
1530 if (!kci->stext || !kci->etext)
1531 return -1;
1532
1533 if (kci->first_module && !kci->last_module_symbol)
1534 return -1;
1535
1536 return kcore_copy__read_maps(kci, elf);
1537}
1538
1539static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1540 const char *name)
1541{
1542 char from_filename[PATH_MAX];
1543 char to_filename[PATH_MAX];
1544
1545 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1546 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1547
1548 return copyfile_mode(from_filename, to_filename, 0400);
1549}
1550
1551static int kcore_copy__unlink(const char *dir, const char *name)
1552{
1553 char filename[PATH_MAX];
1554
1555 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1556
1557 return unlink(filename);
1558}
1559
1560static int kcore_copy__compare_fds(int from, int to)
1561{
1562 char *buf_from;
1563 char *buf_to;
1564 ssize_t ret;
1565 size_t len;
1566 int err = -1;
1567
1568 buf_from = malloc(page_size);
1569 buf_to = malloc(page_size);
1570 if (!buf_from || !buf_to)
1571 goto out;
1572
1573 while (1) {
1574 /* Use read because mmap won't work on proc files */
1575 ret = read(from, buf_from, page_size);
1576 if (ret < 0)
1577 goto out;
1578
1579 if (!ret)
1580 break;
1581
1582 len = ret;
1583
1584 if (readn(to, buf_to, len) != (int)len)
1585 goto out;
1586
1587 if (memcmp(buf_from, buf_to, len))
1588 goto out;
1589 }
1590
1591 err = 0;
1592out:
1593 free(buf_to);
1594 free(buf_from);
1595 return err;
1596}
1597
1598static int kcore_copy__compare_files(const char *from_filename,
1599 const char *to_filename)
1600{
1601 int from, to, err = -1;
1602
1603 from = open(from_filename, O_RDONLY);
1604 if (from < 0)
1605 return -1;
1606
1607 to = open(to_filename, O_RDONLY);
1608 if (to < 0)
1609 goto out_close_from;
1610
1611 err = kcore_copy__compare_fds(from, to);
1612
1613 close(to);
1614out_close_from:
1615 close(from);
1616 return err;
1617}
1618
1619static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1620 const char *name)
1621{
1622 char from_filename[PATH_MAX];
1623 char to_filename[PATH_MAX];
1624
1625 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1626 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1627
1628 return kcore_copy__compare_files(from_filename, to_filename);
1629}
1630
1631/**
1632 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1633 * @from_dir: from directory
1634 * @to_dir: to directory
1635 *
1636 * This function copies kallsyms, modules and kcore files from one directory to
1637 * another. kallsyms and modules are copied entirely. Only code segments are
1638 * copied from kcore. It is assumed that two segments suffice: one for the
1639 * kernel proper and one for all the modules. The code segments are determined
1640 * from kallsyms and modules files. The kernel map starts at _stext or the
1641 * lowest function symbol, and ends at _etext or the highest function symbol.
1642 * The module map starts at the lowest module address and ends at the highest
1643 * module symbol. Start addresses are rounded down to the nearest page. End
1644 * addresses are rounded up to the nearest page. An extra page is added to the
1645 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1646 * symbol too. Because it contains only code sections, the resulting kcore is
1647 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1648 * is not the same for the kernel map and the modules map. That happens because
1649 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1650 * kallsyms and modules files are compared with their copies to check that
1651 * modules have not been loaded or unloaded while the copies were taking place.
1652 *
1653 * Return: %0 on success, %-1 on failure.
1654 */
1655int kcore_copy(const char *from_dir, const char *to_dir)
1656{
1657 struct kcore kcore;
1658 struct kcore extract;
1659 size_t count = 2;
1660 int idx = 0, err = -1;
1661 off_t offset = page_size, sz, modules_offset = 0;
1662 struct kcore_copy_info kci = { .stext = 0, };
1663 char kcore_filename[PATH_MAX];
1664 char extract_filename[PATH_MAX];
1665
1666 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1667 return -1;
1668
1669 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1670 goto out_unlink_kallsyms;
1671
1672 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1673 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1674
1675 if (kcore__open(&kcore, kcore_filename))
1676 goto out_unlink_modules;
1677
1678 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1679 goto out_kcore_close;
1680
1681 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1682 goto out_kcore_close;
1683
1684 if (!kci.modules_map.addr)
1685 count -= 1;
1686
1687 if (kcore__copy_hdr(&kcore, &extract, count))
1688 goto out_extract_close;
1689
1690 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1691 kci.kernel_map.len))
1692 goto out_extract_close;
1693
1694 if (kci.modules_map.addr) {
1695 modules_offset = offset + kci.kernel_map.len;
1696 if (kcore__add_phdr(&extract, idx, modules_offset,
1697 kci.modules_map.addr, kci.modules_map.len))
1698 goto out_extract_close;
1699 }
1700
1701 sz = kcore__write(&extract);
1702 if (sz < 0 || sz > offset)
1703 goto out_extract_close;
1704
1705 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1706 kci.kernel_map.len))
1707 goto out_extract_close;
1708
1709 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1710 extract.fd, modules_offset,
1711 kci.modules_map.len))
1712 goto out_extract_close;
1713
1714 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1715 goto out_extract_close;
1716
1717 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1718 goto out_extract_close;
1719
1720 err = 0;
1721
1722out_extract_close:
1723 kcore__close(&extract);
1724 if (err)
1725 unlink(extract_filename);
1726out_kcore_close:
1727 kcore__close(&kcore);
1728out_unlink_modules:
1729 if (err)
1730 kcore_copy__unlink(to_dir, "modules");
1731out_unlink_kallsyms:
1732 if (err)
1733 kcore_copy__unlink(to_dir, "kallsyms");
1734
1735 return err;
1736}
1737
Adrian Hunterafba19d2013-10-09 15:01:12 +03001738int kcore_extract__create(struct kcore_extract *kce)
1739{
1740 struct kcore kcore;
1741 struct kcore extract;
1742 size_t count = 1;
1743 int idx = 0, err = -1;
1744 off_t offset = page_size, sz;
1745
1746 if (kcore__open(&kcore, kce->kcore_filename))
1747 return -1;
1748
1749 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1750 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1751 goto out_kcore_close;
1752
1753 if (kcore__copy_hdr(&kcore, &extract, count))
1754 goto out_extract_close;
1755
1756 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1757 goto out_extract_close;
1758
1759 sz = kcore__write(&extract);
1760 if (sz < 0 || sz > offset)
1761 goto out_extract_close;
1762
1763 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1764 goto out_extract_close;
1765
1766 err = 0;
1767
1768out_extract_close:
1769 kcore__close(&extract);
1770 if (err)
1771 unlink(kce->extract_filename);
1772out_kcore_close:
1773 kcore__close(&kcore);
1774
1775 return err;
1776}
1777
1778void kcore_extract__delete(struct kcore_extract *kce)
1779{
1780 unlink(kce->extract_filename);
1781}
1782
Namhyung Kime5a18452012-08-06 13:41:20 +09001783void symbol__elf_init(void)
1784{
1785 elf_version(EV_CURRENT);
1786}