blob: bc229a74c6a9aa70f2605fe04b7076c9c346f6cb [file] [log] [blame]
Namhyung Kime5a18452012-08-06 13:41:20 +09001#include <fcntl.h>
2#include <stdio.h>
3#include <errno.h>
4#include <string.h>
5#include <unistd.h>
6#include <inttypes.h>
7
8#include "symbol.h"
Stephane Eraniane9c4bcd2015-11-30 10:02:20 +01009#include "demangle-java.h"
Waiman Long8fa7d872014-09-29 16:07:28 -040010#include "machine.h"
Vladimir Nikulichev922d0e42014-04-17 08:27:01 -070011#include "vdso.h"
Arnaldo Carvalho de Meloc506c962013-12-11 09:15:00 -030012#include <symbol/kallsyms.h>
Namhyung Kime5a18452012-08-06 13:41:20 +090013#include "debug.h"
14
David Aherne370a3d2015-02-18 19:33:37 -050015#ifndef EM_AARCH64
16#define EM_AARCH64 183 /* ARM 64 bit */
17#endif
18
19
Arnaldo Carvalho de Meloaaba4e12014-11-24 17:10:52 -030020#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
21extern char *cplus_demangle(const char *, int);
22
23static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
24{
25 return cplus_demangle(c, i);
26}
27#else
28#ifdef NO_DEMANGLE
29static inline char *bfd_demangle(void __maybe_unused *v,
30 const char __maybe_unused *c,
31 int __maybe_unused i)
32{
33 return NULL;
34}
35#else
36#define PACKAGE 'perf'
37#include <bfd.h>
38#endif
39#endif
40
Ingo Molnar89fe8082013-09-30 12:07:11 +020041#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
Arnaldo Carvalho de Melo179f36d2015-09-17 11:30:20 -030042static int elf_getphdrnum(Elf *elf, size_t *dst)
Adrian Huntere955d5c2013-09-13 16:49:30 +030043{
44 GElf_Ehdr gehdr;
45 GElf_Ehdr *ehdr;
46
47 ehdr = gelf_getehdr(elf, &gehdr);
48 if (!ehdr)
49 return -1;
50
51 *dst = ehdr->e_phnum;
52
53 return 0;
54}
55#endif
56
Namhyung Kime5a18452012-08-06 13:41:20 +090057#ifndef NT_GNU_BUILD_ID
58#define NT_GNU_BUILD_ID 3
59#endif
60
61/**
62 * elf_symtab__for_each_symbol - iterate thru all the symbols
63 *
64 * @syms: struct elf_symtab instance to iterate
65 * @idx: uint32_t idx
66 * @sym: GElf_Sym iterator
67 */
68#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
69 for (idx = 0, gelf_getsym(syms, idx, &sym);\
70 idx < nr_syms; \
71 idx++, gelf_getsym(syms, idx, &sym))
72
73static inline uint8_t elf_sym__type(const GElf_Sym *sym)
74{
75 return GELF_ST_TYPE(sym->st_info);
76}
77
Vinson Lee4e310502015-02-09 16:29:37 -080078#ifndef STT_GNU_IFUNC
79#define STT_GNU_IFUNC 10
80#endif
81
Namhyung Kime5a18452012-08-06 13:41:20 +090082static inline int elf_sym__is_function(const GElf_Sym *sym)
83{
Adrian Huntera2f3b6b2014-07-14 13:02:33 +030084 return (elf_sym__type(sym) == STT_FUNC ||
85 elf_sym__type(sym) == STT_GNU_IFUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +090086 sym->st_name != 0 &&
87 sym->st_shndx != SHN_UNDEF;
88}
89
90static inline bool elf_sym__is_object(const GElf_Sym *sym)
91{
92 return elf_sym__type(sym) == STT_OBJECT &&
93 sym->st_name != 0 &&
94 sym->st_shndx != SHN_UNDEF;
95}
96
97static inline int elf_sym__is_label(const GElf_Sym *sym)
98{
99 return elf_sym__type(sym) == STT_NOTYPE &&
100 sym->st_name != 0 &&
101 sym->st_shndx != SHN_UNDEF &&
102 sym->st_shndx != SHN_ABS;
103}
104
105static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
106{
107 switch (type) {
108 case MAP__FUNCTION:
109 return elf_sym__is_function(sym);
110 case MAP__VARIABLE:
111 return elf_sym__is_object(sym);
112 default:
113 return false;
114 }
115}
116
117static inline const char *elf_sym__name(const GElf_Sym *sym,
118 const Elf_Data *symstrs)
119{
120 return symstrs->d_buf + sym->st_name;
121}
122
123static inline const char *elf_sec__name(const GElf_Shdr *shdr,
124 const Elf_Data *secstrs)
125{
126 return secstrs->d_buf + shdr->sh_name;
127}
128
129static inline int elf_sec__is_text(const GElf_Shdr *shdr,
130 const Elf_Data *secstrs)
131{
132 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
133}
134
135static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
136 const Elf_Data *secstrs)
137{
138 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
139}
140
141static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
142 enum map_type type)
143{
144 switch (type) {
145 case MAP__FUNCTION:
146 return elf_sec__is_text(shdr, secstrs);
147 case MAP__VARIABLE:
148 return elf_sec__is_data(shdr, secstrs);
149 default:
150 return false;
151 }
152}
153
154static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
155{
156 Elf_Scn *sec = NULL;
157 GElf_Shdr shdr;
158 size_t cnt = 1;
159
160 while ((sec = elf_nextscn(elf, sec)) != NULL) {
161 gelf_getshdr(sec, &shdr);
162
163 if ((addr >= shdr.sh_addr) &&
164 (addr < (shdr.sh_addr + shdr.sh_size)))
165 return cnt;
166
167 ++cnt;
168 }
169
170 return -1;
171}
172
Masami Hiramatsu99ca4232014-01-16 09:39:49 +0000173Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
174 GElf_Shdr *shp, const char *name, size_t *idx)
Namhyung Kime5a18452012-08-06 13:41:20 +0900175{
176 Elf_Scn *sec = NULL;
177 size_t cnt = 1;
178
Cody P Schafer49274652012-08-10 15:22:55 -0700179 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
180 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
181 return NULL;
182
Namhyung Kime5a18452012-08-06 13:41:20 +0900183 while ((sec = elf_nextscn(elf, sec)) != NULL) {
184 char *str;
185
186 gelf_getshdr(sec, shp);
187 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
Jiri Olsa155b3a12014-03-02 14:32:07 +0100188 if (str && !strcmp(name, str)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900189 if (idx)
190 *idx = cnt;
Jiri Olsa155b3a12014-03-02 14:32:07 +0100191 return sec;
Namhyung Kime5a18452012-08-06 13:41:20 +0900192 }
193 ++cnt;
194 }
195
Jiri Olsa155b3a12014-03-02 14:32:07 +0100196 return NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900197}
198
199#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
200 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
201 idx < nr_entries; \
202 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
203
204#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
205 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
206 idx < nr_entries; \
207 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
208
209/*
210 * We need to check if we have a .dynsym, so that we can handle the
211 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
212 * .dynsym or .symtab).
213 * And always look at the original dso, not at debuginfo packages, that
214 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
215 */
Cody P Schafera44f6052012-08-10 15:22:59 -0700216int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
Namhyung Kime5a18452012-08-06 13:41:20 +0900217 symbol_filter_t filter)
218{
219 uint32_t nr_rel_entries, idx;
220 GElf_Sym sym;
221 u64 plt_offset;
222 GElf_Shdr shdr_plt;
223 struct symbol *f;
224 GElf_Shdr shdr_rel_plt, shdr_dynsym;
225 Elf_Data *reldata, *syms, *symstrs;
226 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
227 size_t dynsym_idx;
228 GElf_Ehdr ehdr;
229 char sympltname[1024];
230 Elf *elf;
Cody P Schafera44f6052012-08-10 15:22:59 -0700231 int nr = 0, symidx, err = 0;
Namhyung Kime5a18452012-08-06 13:41:20 +0900232
David Ahernf47b58b2012-08-19 09:47:14 -0600233 if (!ss->dynsym)
234 return 0;
235
Cody P Schafera44f6052012-08-10 15:22:59 -0700236 elf = ss->elf;
237 ehdr = ss->ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900238
Cody P Schafera44f6052012-08-10 15:22:59 -0700239 scn_dynsym = ss->dynsym;
240 shdr_dynsym = ss->dynshdr;
241 dynsym_idx = ss->dynsym_idx;
Namhyung Kime5a18452012-08-06 13:41:20 +0900242
Namhyung Kime5a18452012-08-06 13:41:20 +0900243 if (scn_dynsym == NULL)
244 goto out_elf_end;
245
246 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
247 ".rela.plt", NULL);
248 if (scn_plt_rel == NULL) {
249 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
250 ".rel.plt", NULL);
251 if (scn_plt_rel == NULL)
252 goto out_elf_end;
253 }
254
255 err = -1;
256
257 if (shdr_rel_plt.sh_link != dynsym_idx)
258 goto out_elf_end;
259
260 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
261 goto out_elf_end;
262
263 /*
264 * Fetch the relocation section to find the idxes to the GOT
265 * and the symbols in the .dynsym they refer to.
266 */
267 reldata = elf_getdata(scn_plt_rel, NULL);
268 if (reldata == NULL)
269 goto out_elf_end;
270
271 syms = elf_getdata(scn_dynsym, NULL);
272 if (syms == NULL)
273 goto out_elf_end;
274
275 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
276 if (scn_symstrs == NULL)
277 goto out_elf_end;
278
279 symstrs = elf_getdata(scn_symstrs, NULL);
280 if (symstrs == NULL)
281 goto out_elf_end;
282
Cody P Schafer52f9ddb2012-08-10 15:22:51 -0700283 if (symstrs->d_size == 0)
284 goto out_elf_end;
285
Namhyung Kime5a18452012-08-06 13:41:20 +0900286 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
287 plt_offset = shdr_plt.sh_offset;
288
289 if (shdr_rel_plt.sh_type == SHT_RELA) {
290 GElf_Rela pos_mem, *pos;
291
292 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
293 nr_rel_entries) {
294 symidx = GELF_R_SYM(pos->r_info);
295 plt_offset += shdr_plt.sh_entsize;
296 gelf_getsym(syms, symidx, &sym);
297 snprintf(sympltname, sizeof(sympltname),
298 "%s@plt", elf_sym__name(&sym, symstrs));
299
300 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
301 STB_GLOBAL, sympltname);
302 if (!f)
303 goto out_elf_end;
304
305 if (filter && filter(map, f))
306 symbol__delete(f);
307 else {
308 symbols__insert(&dso->symbols[map->type], f);
309 ++nr;
310 }
311 }
312 } else if (shdr_rel_plt.sh_type == SHT_REL) {
313 GElf_Rel pos_mem, *pos;
314 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
315 nr_rel_entries) {
316 symidx = GELF_R_SYM(pos->r_info);
317 plt_offset += shdr_plt.sh_entsize;
318 gelf_getsym(syms, symidx, &sym);
319 snprintf(sympltname, sizeof(sympltname),
320 "%s@plt", elf_sym__name(&sym, symstrs));
321
322 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
323 STB_GLOBAL, sympltname);
324 if (!f)
325 goto out_elf_end;
326
327 if (filter && filter(map, f))
328 symbol__delete(f);
329 else {
330 symbols__insert(&dso->symbols[map->type], f);
331 ++nr;
332 }
333 }
334 }
335
336 err = 0;
337out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900338 if (err == 0)
339 return nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900340 pr_debug("%s: problems reading %s PLT info.\n",
341 __func__, dso->long_name);
342 return 0;
343}
344
345/*
346 * Align offset to 4 bytes as needed for note name and descriptor data.
347 */
348#define NOTE_ALIGN(n) (((n) + 3) & -4U)
349
350static int elf_read_build_id(Elf *elf, void *bf, size_t size)
351{
352 int err = -1;
353 GElf_Ehdr ehdr;
354 GElf_Shdr shdr;
355 Elf_Data *data;
356 Elf_Scn *sec;
357 Elf_Kind ek;
358 void *ptr;
359
360 if (size < BUILD_ID_SIZE)
361 goto out;
362
363 ek = elf_kind(elf);
364 if (ek != ELF_K_ELF)
365 goto out;
366
367 if (gelf_getehdr(elf, &ehdr) == NULL) {
368 pr_err("%s: cannot get elf header.\n", __func__);
369 goto out;
370 }
371
372 /*
373 * Check following sections for notes:
374 * '.note.gnu.build-id'
375 * '.notes'
376 * '.note' (VDSO specific)
377 */
378 do {
379 sec = elf_section_by_name(elf, &ehdr, &shdr,
380 ".note.gnu.build-id", NULL);
381 if (sec)
382 break;
383
384 sec = elf_section_by_name(elf, &ehdr, &shdr,
385 ".notes", NULL);
386 if (sec)
387 break;
388
389 sec = elf_section_by_name(elf, &ehdr, &shdr,
390 ".note", NULL);
391 if (sec)
392 break;
393
394 return err;
395
396 } while (0);
397
398 data = elf_getdata(sec, NULL);
399 if (data == NULL)
400 goto out;
401
402 ptr = data->d_buf;
403 while (ptr < (data->d_buf + data->d_size)) {
404 GElf_Nhdr *nhdr = ptr;
405 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
406 descsz = NOTE_ALIGN(nhdr->n_descsz);
407 const char *name;
408
409 ptr += sizeof(*nhdr);
410 name = ptr;
411 ptr += namesz;
412 if (nhdr->n_type == NT_GNU_BUILD_ID &&
413 nhdr->n_namesz == sizeof("GNU")) {
414 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
415 size_t sz = min(size, descsz);
416 memcpy(bf, ptr, sz);
417 memset(bf + sz, 0, size - sz);
418 err = descsz;
419 break;
420 }
421 }
422 ptr += descsz;
423 }
424
425out:
426 return err;
427}
428
429int filename__read_build_id(const char *filename, void *bf, size_t size)
430{
431 int fd, err = -1;
432 Elf *elf;
433
434 if (size < BUILD_ID_SIZE)
435 goto out;
436
437 fd = open(filename, O_RDONLY);
438 if (fd < 0)
439 goto out;
440
441 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
442 if (elf == NULL) {
443 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
444 goto out_close;
445 }
446
447 err = elf_read_build_id(elf, bf, size);
448
449 elf_end(elf);
450out_close:
451 close(fd);
452out:
453 return err;
454}
455
456int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
457{
458 int fd, err = -1;
459
460 if (size < BUILD_ID_SIZE)
461 goto out;
462
463 fd = open(filename, O_RDONLY);
464 if (fd < 0)
465 goto out;
466
467 while (1) {
468 char bf[BUFSIZ];
469 GElf_Nhdr nhdr;
470 size_t namesz, descsz;
471
472 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
473 break;
474
475 namesz = NOTE_ALIGN(nhdr.n_namesz);
476 descsz = NOTE_ALIGN(nhdr.n_descsz);
477 if (nhdr.n_type == NT_GNU_BUILD_ID &&
478 nhdr.n_namesz == sizeof("GNU")) {
479 if (read(fd, bf, namesz) != (ssize_t)namesz)
480 break;
481 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
482 size_t sz = min(descsz, size);
483 if (read(fd, build_id, sz) == (ssize_t)sz) {
484 memset(build_id + sz, 0, size - sz);
485 err = 0;
486 break;
487 }
488 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
489 break;
490 } else {
491 int n = namesz + descsz;
492 if (read(fd, bf, n) != n)
493 break;
494 }
495 }
496 close(fd);
497out:
498 return err;
499}
500
501int filename__read_debuglink(const char *filename, char *debuglink,
502 size_t size)
503{
504 int fd, err = -1;
505 Elf *elf;
506 GElf_Ehdr ehdr;
507 GElf_Shdr shdr;
508 Elf_Data *data;
509 Elf_Scn *sec;
510 Elf_Kind ek;
511
512 fd = open(filename, O_RDONLY);
513 if (fd < 0)
514 goto out;
515
516 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
517 if (elf == NULL) {
518 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
519 goto out_close;
520 }
521
522 ek = elf_kind(elf);
523 if (ek != ELF_K_ELF)
Chenggang Qin784f3392013-10-11 08:27:57 +0800524 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900525
526 if (gelf_getehdr(elf, &ehdr) == NULL) {
527 pr_err("%s: cannot get elf header.\n", __func__);
Chenggang Qin784f3392013-10-11 08:27:57 +0800528 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900529 }
530
531 sec = elf_section_by_name(elf, &ehdr, &shdr,
532 ".gnu_debuglink", NULL);
533 if (sec == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800534 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900535
536 data = elf_getdata(sec, NULL);
537 if (data == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800538 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900539
540 /* the start of this section is a zero-terminated string */
541 strncpy(debuglink, data->d_buf, size);
542
Stephane Eranian0d3dc5e2014-02-20 10:32:55 +0900543 err = 0;
544
Chenggang Qin784f3392013-10-11 08:27:57 +0800545out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900546 elf_end(elf);
Namhyung Kime5a18452012-08-06 13:41:20 +0900547out_close:
548 close(fd);
549out:
550 return err;
551}
552
553static int dso__swap_init(struct dso *dso, unsigned char eidata)
554{
555 static unsigned int const endian = 1;
556
557 dso->needs_swap = DSO_SWAP__NO;
558
559 switch (eidata) {
560 case ELFDATA2LSB:
561 /* We are big endian, DSO is little endian. */
562 if (*(unsigned char const *)&endian != 1)
563 dso->needs_swap = DSO_SWAP__YES;
564 break;
565
566 case ELFDATA2MSB:
567 /* We are little endian, DSO is big endian. */
568 if (*(unsigned char const *)&endian != 0)
569 dso->needs_swap = DSO_SWAP__YES;
570 break;
571
572 default:
573 pr_err("unrecognized DSO data encoding %d\n", eidata);
574 return -EINVAL;
575 }
576
577 return 0;
578}
579
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900580static int decompress_kmodule(struct dso *dso, const char *name,
581 enum dso_binary_type type)
582{
Jiri Olsa914f85c2015-02-12 22:27:50 +0100583 int fd = -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900584 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
Jiri Olsa914f85c2015-02-12 22:27:50 +0100585 struct kmod_path m;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900586
Namhyung Kim0b064f42015-01-29 17:06:42 +0900587 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
588 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
589 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900590 return -1;
591
Jiri Olsa914f85c2015-02-12 22:27:50 +0100592 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
593 name = dso->long_name;
594
595 if (kmod_path__parse_ext(&m, name) || !m.comp)
596 return -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900597
598 fd = mkstemp(tmpbuf);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300599 if (fd < 0) {
600 dso->load_errno = errno;
Jiri Olsa914f85c2015-02-12 22:27:50 +0100601 goto out;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300602 }
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900603
Jiri Olsa914f85c2015-02-12 22:27:50 +0100604 if (!decompress_to_file(m.ext, name, fd)) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300605 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900606 close(fd);
607 fd = -1;
608 }
609
610 unlink(tmpbuf);
611
Jiri Olsa914f85c2015-02-12 22:27:50 +0100612out:
613 free(m.ext);
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900614 return fd;
615}
616
Cody P Schafer3aafe5a2012-08-10 15:23:02 -0700617bool symsrc__possibly_runtime(struct symsrc *ss)
618{
619 return ss->dynsym || ss->opdsec;
620}
621
Cody P Schaferd26cd122012-08-10 15:23:00 -0700622bool symsrc__has_symtab(struct symsrc *ss)
623{
624 return ss->symtab != NULL;
625}
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700626
627void symsrc__destroy(struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900628{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300629 zfree(&ss->name);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700630 elf_end(ss->elf);
631 close(ss->fd);
632}
633
Naveen N. Raod2332092015-04-28 17:35:35 +0530634bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
635{
636 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
637}
638
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700639int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
640 enum dso_binary_type type)
641{
Namhyung Kime5a18452012-08-06 13:41:20 +0900642 int err = -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900643 GElf_Ehdr ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900644 Elf *elf;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700645 int fd;
646
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300647 if (dso__needs_decompress(dso)) {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900648 fd = decompress_kmodule(dso, name, type);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300649 if (fd < 0)
650 return -1;
651 } else {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900652 fd = open(name, O_RDONLY);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300653 if (fd < 0) {
654 dso->load_errno = errno;
655 return -1;
656 }
657 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900658
659 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
660 if (elf == NULL) {
661 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300662 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900663 goto out_close;
664 }
665
666 if (gelf_getehdr(elf, &ehdr) == NULL) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300667 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900668 pr_debug("%s: cannot get elf header.\n", __func__);
669 goto out_elf_end;
670 }
671
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300672 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
673 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
Namhyung Kime5a18452012-08-06 13:41:20 +0900674 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300675 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900676
677 /* Always reject images with a mismatched build-id: */
678 if (dso->has_build_id) {
679 u8 build_id[BUILD_ID_SIZE];
680
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300681 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
682 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900683 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300684 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900685
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300686 if (!dso__build_id_equal(dso, build_id)) {
Naveen N. Rao468f3d22015-04-25 01:14:46 +0530687 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300688 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900689 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300690 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900691 }
692
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300693 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
694
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700695 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
696 NULL);
697 if (ss->symshdr.sh_type != SHT_SYMTAB)
698 ss->symtab = NULL;
699
700 ss->dynsym_idx = 0;
701 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
702 &ss->dynsym_idx);
703 if (ss->dynshdr.sh_type != SHT_DYNSYM)
704 ss->dynsym = NULL;
705
706 ss->opdidx = 0;
707 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
708 &ss->opdidx);
709 if (ss->opdshdr.sh_type != SHT_PROGBITS)
710 ss->opdsec = NULL;
711
712 if (dso->kernel == DSO_TYPE_USER) {
713 GElf_Shdr shdr;
714 ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300715 ehdr.e_type == ET_REL ||
Adrian Hunter51682dc2014-07-22 16:17:57 +0300716 dso__is_vdso(dso) ||
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700717 elf_section_by_name(elf, &ehdr, &shdr,
718 ".gnu.prelink_undo",
719 NULL) != NULL);
720 } else {
Naveen N. Raod2332092015-04-28 17:35:35 +0530721 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700722 }
723
724 ss->name = strdup(name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300725 if (!ss->name) {
726 dso->load_errno = errno;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700727 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300728 }
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700729
730 ss->elf = elf;
731 ss->fd = fd;
732 ss->ehdr = ehdr;
733 ss->type = type;
734
735 return 0;
736
737out_elf_end:
738 elf_end(elf);
739out_close:
740 close(fd);
741 return err;
742}
743
Adrian Hunter39b12f782013-08-07 14:38:47 +0300744/**
745 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
746 * @kmap: kernel maps and relocation reference symbol
747 *
748 * This function returns %true if we are dealing with the kernel maps and the
749 * relocation reference symbol has not yet been found. Otherwise %false is
750 * returned.
751 */
752static bool ref_reloc_sym_not_found(struct kmap *kmap)
753{
754 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
755 !kmap->ref_reloc_sym->unrelocated_addr;
756}
757
758/**
759 * ref_reloc - kernel relocation offset.
760 * @kmap: kernel maps and relocation reference symbol
761 *
762 * This function returns the offset of kernel addresses as determined by using
763 * the relocation reference symbol i.e. if the kernel has not been relocated
764 * then the return value is zero.
765 */
766static u64 ref_reloc(struct kmap *kmap)
767{
768 if (kmap && kmap->ref_reloc_sym &&
769 kmap->ref_reloc_sym->unrelocated_addr)
770 return kmap->ref_reloc_sym->addr -
771 kmap->ref_reloc_sym->unrelocated_addr;
772 return 0;
773}
774
Avi Kivity763122a2014-09-13 07:15:05 +0300775static bool want_demangle(bool is_kernel_sym)
776{
777 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
778}
779
Ananth N Mavinakayanahallic50fc0a2015-04-28 17:35:38 +0530780void __weak arch__elf_sym_adjust(GElf_Sym *sym __maybe_unused) { }
781
Cody P Schafer261360b2012-08-10 15:23:01 -0700782int dso__load_sym(struct dso *dso, struct map *map,
783 struct symsrc *syms_ss, struct symsrc *runtime_ss,
Cody P Schaferd26cd122012-08-10 15:23:00 -0700784 symbol_filter_t filter, int kmodule)
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700785{
786 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
Wang Nanba927322015-04-07 08:22:45 +0000787 struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700788 struct map *curr_map = map;
789 struct dso *curr_dso = dso;
790 Elf_Data *symstrs, *secstrs;
791 uint32_t nr_syms;
792 int err = -1;
793 uint32_t idx;
794 GElf_Ehdr ehdr;
Cody P Schafer261360b2012-08-10 15:23:01 -0700795 GElf_Shdr shdr;
Wang Nan73cdf0c2016-02-26 09:31:49 +0000796 GElf_Shdr tshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700797 Elf_Data *syms, *opddata = NULL;
798 GElf_Sym sym;
Cody P Schafer261360b2012-08-10 15:23:01 -0700799 Elf_Scn *sec, *sec_strndx;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700800 Elf *elf;
801 int nr = 0;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300802 bool remap_kernel = false, adjust_kernel_syms = false;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700803
Wang Nanba927322015-04-07 08:22:45 +0000804 if (kmap && !kmaps)
805 return -1;
806
Cody P Schafer261360b2012-08-10 15:23:01 -0700807 dso->symtab_type = syms_ss->type;
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300808 dso->is_64_bit = syms_ss->is_64_bit;
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300809 dso->rel = syms_ss->ehdr.e_type == ET_REL;
810
811 /*
812 * Modules may already have symbols from kallsyms, but those symbols
813 * have the wrong values for the dso maps, so remove them.
814 */
815 if (kmodule && syms_ss->symtab)
816 symbols__delete(&dso->symbols[map->type]);
Cody P Schafer005f9292012-08-10 15:22:58 -0700817
Cody P Schafer261360b2012-08-10 15:23:01 -0700818 if (!syms_ss->symtab) {
Anton Blanchardd0b0d042014-09-09 08:59:29 +1000819 /*
820 * If the vmlinux is stripped, fail so we will fall back
821 * to using kallsyms. The vmlinux runtime symbols aren't
822 * of much use.
823 */
824 if (dso->kernel)
825 goto out_elf_end;
826
Cody P Schafer261360b2012-08-10 15:23:01 -0700827 syms_ss->symtab = syms_ss->dynsym;
828 syms_ss->symshdr = syms_ss->dynshdr;
Cody P Schaferd26cd122012-08-10 15:23:00 -0700829 }
830
Cody P Schafer261360b2012-08-10 15:23:01 -0700831 elf = syms_ss->elf;
832 ehdr = syms_ss->ehdr;
833 sec = syms_ss->symtab;
834 shdr = syms_ss->symshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700835
Wang Nan73cdf0c2016-02-26 09:31:49 +0000836 if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL))
837 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
838
Cody P Schafer261360b2012-08-10 15:23:01 -0700839 if (runtime_ss->opdsec)
840 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
Namhyung Kime5a18452012-08-06 13:41:20 +0900841
842 syms = elf_getdata(sec, NULL);
843 if (syms == NULL)
844 goto out_elf_end;
845
846 sec = elf_getscn(elf, shdr.sh_link);
847 if (sec == NULL)
848 goto out_elf_end;
849
850 symstrs = elf_getdata(sec, NULL);
851 if (symstrs == NULL)
852 goto out_elf_end;
853
Adrian Hunterf247fb82014-07-31 09:00:46 +0300854 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900855 if (sec_strndx == NULL)
856 goto out_elf_end;
857
858 secstrs = elf_getdata(sec_strndx, NULL);
859 if (secstrs == NULL)
860 goto out_elf_end;
861
862 nr_syms = shdr.sh_size / shdr.sh_entsize;
863
864 memset(&sym, 0, sizeof(sym));
Adrian Hunter39b12f782013-08-07 14:38:47 +0300865
866 /*
867 * The kernel relocation symbol is needed in advance in order to adjust
868 * kernel maps correctly.
869 */
870 if (ref_reloc_sym_not_found(kmap)) {
871 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
872 const char *elf_name = elf_sym__name(&sym, symstrs);
873
874 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
875 continue;
876 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
Adrian Hunter91767532014-01-29 16:14:36 +0200877 map->reloc = kmap->ref_reloc_sym->addr -
878 kmap->ref_reloc_sym->unrelocated_addr;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300879 break;
880 }
881 }
882
Adrian Hunterf0ee3b42015-08-14 15:50:06 +0300883 /*
884 * Handle any relocation of vdso necessary because older kernels
885 * attempted to prelink vdso to its virtual address.
886 */
Wang Nan73cdf0c2016-02-26 09:31:49 +0000887 if (dso__is_vdso(dso))
888 map->reloc = map->start - dso->text_offset;
Adrian Hunterf0ee3b42015-08-14 15:50:06 +0300889
Adrian Hunter39b12f782013-08-07 14:38:47 +0300890 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
891 /*
892 * Initial kernel and module mappings do not map to the dso. For
893 * function mappings, flag the fixups.
894 */
895 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
896 remap_kernel = true;
897 adjust_kernel_syms = dso->adjust_symbols;
898 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900899 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
900 struct symbol *f;
901 const char *elf_name = elf_sym__name(&sym, symstrs);
902 char *demangled = NULL;
903 int is_label = elf_sym__is_label(&sym);
904 const char *section_name;
Cody P Schafer261360b2012-08-10 15:23:01 -0700905 bool used_opd = false;
Namhyung Kime5a18452012-08-06 13:41:20 +0900906
Namhyung Kime5a18452012-08-06 13:41:20 +0900907 if (!is_label && !elf_sym__is_a(&sym, map->type))
908 continue;
909
910 /* Reject ARM ELF "mapping symbols": these aren't unique and
911 * don't identify functions, so will confuse the profile
912 * output: */
Victor Kamensky4886f2c2015-01-26 22:34:01 -0800913 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
914 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
915 && (elf_name[2] == '\0' || elf_name[2] == '.'))
Namhyung Kime5a18452012-08-06 13:41:20 +0900916 continue;
917 }
918
Cody P Schafer261360b2012-08-10 15:23:01 -0700919 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
920 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900921 u64 *opd = opddata->d_buf + offset;
922 sym.st_value = DSO__SWAP(dso, u64, *opd);
Cody P Schafer261360b2012-08-10 15:23:01 -0700923 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
924 sym.st_value);
925 used_opd = true;
Namhyung Kime5a18452012-08-06 13:41:20 +0900926 }
Namhyung Kim3843b052012-11-21 13:49:44 +0100927 /*
928 * When loading symbols in a data mapping, ABS symbols (which
929 * has a value of SHN_ABS in its st_shndx) failed at
930 * elf_getscn(). And it marks the loading as a failure so
931 * already loaded symbols cannot be fixed up.
932 *
933 * I'm not sure what should be done. Just ignore them for now.
934 * - Namhyung Kim
935 */
936 if (sym.st_shndx == SHN_ABS)
937 continue;
Namhyung Kime5a18452012-08-06 13:41:20 +0900938
Cody P Schafer261360b2012-08-10 15:23:01 -0700939 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900940 if (!sec)
941 goto out_elf_end;
942
943 gelf_getshdr(sec, &shdr);
944
945 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
946 continue;
947
948 section_name = elf_sec__name(&shdr, secstrs);
949
950 /* On ARM, symbols for thumb functions have 1 added to
951 * the symbol address as a flag - remove it */
952 if ((ehdr.e_machine == EM_ARM) &&
953 (map->type == MAP__FUNCTION) &&
954 (sym.st_value & 1))
955 --sym.st_value;
956
Ananth N Mavinakayanahallic50fc0a2015-04-28 17:35:38 +0530957 arch__elf_sym_adjust(&sym);
958
Adrian Hunter39b12f782013-08-07 14:38:47 +0300959 if (dso->kernel || kmodule) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900960 char dso_name[PATH_MAX];
961
Adrian Hunter39b12f782013-08-07 14:38:47 +0300962 /* Adjust symbol to map to file offset */
963 if (adjust_kernel_syms)
964 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
965
Namhyung Kime5a18452012-08-06 13:41:20 +0900966 if (strcmp(section_name,
967 (curr_dso->short_name +
968 dso->short_name_len)) == 0)
969 goto new_symbol;
970
971 if (strcmp(section_name, ".text") == 0) {
Adrian Hunter39b12f782013-08-07 14:38:47 +0300972 /*
973 * The initial kernel mapping is based on
974 * kallsyms and identity maps. Overwrite it to
975 * map to the kernel dso.
976 */
977 if (remap_kernel && dso->kernel) {
978 remap_kernel = false;
979 map->start = shdr.sh_addr +
980 ref_reloc(kmap);
981 map->end = map->start + shdr.sh_size;
982 map->pgoff = shdr.sh_offset;
983 map->map_ip = map__map_ip;
984 map->unmap_ip = map__unmap_ip;
985 /* Ensure maps are correctly ordered */
Wang Nanba927322015-04-07 08:22:45 +0000986 if (kmaps) {
Arnaldo Carvalho de Melo84c2caf2015-05-25 16:59:56 -0300987 map__get(map);
Wang Nanba927322015-04-07 08:22:45 +0000988 map_groups__remove(kmaps, map);
989 map_groups__insert(kmaps, map);
Arnaldo Carvalho de Melo84c2caf2015-05-25 16:59:56 -0300990 map__put(map);
Wang Nanba927322015-04-07 08:22:45 +0000991 }
Adrian Hunter39b12f782013-08-07 14:38:47 +0300992 }
993
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300994 /*
995 * The initial module mapping is based on
996 * /proc/modules mapped to offset zero.
997 * Overwrite it to map to the module dso.
998 */
999 if (remap_kernel && kmodule) {
1000 remap_kernel = false;
1001 map->pgoff = shdr.sh_offset;
1002 }
1003
Namhyung Kime5a18452012-08-06 13:41:20 +09001004 curr_map = map;
1005 curr_dso = dso;
1006 goto new_symbol;
1007 }
1008
Adrian Hunter0131c4e2013-08-07 14:38:50 +03001009 if (!kmap)
1010 goto new_symbol;
1011
Namhyung Kime5a18452012-08-06 13:41:20 +09001012 snprintf(dso_name, sizeof(dso_name),
1013 "%s%s", dso->short_name, section_name);
1014
Wang Nanba927322015-04-07 08:22:45 +00001015 curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
Namhyung Kime5a18452012-08-06 13:41:20 +09001016 if (curr_map == NULL) {
1017 u64 start = sym.st_value;
1018
1019 if (kmodule)
1020 start += map->start + shdr.sh_offset;
1021
1022 curr_dso = dso__new(dso_name);
1023 if (curr_dso == NULL)
1024 goto out_elf_end;
1025 curr_dso->kernel = dso->kernel;
1026 curr_dso->long_name = dso->long_name;
1027 curr_dso->long_name_len = dso->long_name_len;
1028 curr_map = map__new2(start, curr_dso,
1029 map->type);
Masami Hiramatsue7a78652015-12-09 11:11:18 +09001030 dso__put(curr_dso);
Namhyung Kime5a18452012-08-06 13:41:20 +09001031 if (curr_map == NULL) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001032 goto out_elf_end;
1033 }
Adrian Hunter39b12f782013-08-07 14:38:47 +03001034 if (adjust_kernel_syms) {
1035 curr_map->start = shdr.sh_addr +
1036 ref_reloc(kmap);
1037 curr_map->end = curr_map->start +
1038 shdr.sh_size;
1039 curr_map->pgoff = shdr.sh_offset;
1040 } else {
1041 curr_map->map_ip = identity__map_ip;
1042 curr_map->unmap_ip = identity__map_ip;
1043 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001044 curr_dso->symtab_type = dso->symtab_type;
Wang Nanba927322015-04-07 08:22:45 +00001045 map_groups__insert(kmaps, curr_map);
Masami Hiramatsue7a78652015-12-09 11:11:18 +09001046 /*
1047 * Add it before we drop the referece to curr_map,
1048 * i.e. while we still are sure to have a reference
1049 * to this DSO via curr_map->dso.
1050 */
1051 dsos__add(&map->groups->machine->dsos, curr_dso);
Masami Hiramatsu8d5c3402015-11-18 15:40:27 +09001052 /* kmaps already got it */
1053 map__put(curr_map);
Namhyung Kime5a18452012-08-06 13:41:20 +09001054 dso__set_loaded(curr_dso, map->type);
1055 } else
1056 curr_dso = curr_map->dso;
1057
1058 goto new_symbol;
1059 }
1060
Cody P Schafer261360b2012-08-10 15:23:01 -07001061 if ((used_opd && runtime_ss->adjust_symbols)
1062 || (!used_opd && syms_ss->adjust_symbols)) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001063 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1064 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1065 (u64)sym.st_value, (u64)shdr.sh_addr,
1066 (u64)shdr.sh_offset);
1067 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1068 }
Avi Kivity950b8352014-01-22 21:58:46 +02001069new_symbol:
Namhyung Kime5a18452012-08-06 13:41:20 +09001070 /*
1071 * We need to figure out if the object was created from C++ sources
1072 * DWARF DW_compile_unit has this, but we don't always have access
1073 * to it...
1074 */
Avi Kivity763122a2014-09-13 07:15:05 +03001075 if (want_demangle(dso->kernel || kmodule)) {
Namhyung Kime71e7942014-07-31 14:47:42 +09001076 int demangle_flags = DMGL_NO_OPTS;
1077 if (verbose)
1078 demangle_flags = DMGL_PARAMS | DMGL_ANSI;
1079
1080 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
Stephane Eraniane9c4bcd2015-11-30 10:02:20 +01001081 if (demangled == NULL)
1082 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
Namhyung Kim328ccda2013-03-25 18:18:18 +09001083 if (demangled != NULL)
1084 elf_name = demangled;
1085 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001086 f = symbol__new(sym.st_value, sym.st_size,
1087 GELF_ST_BIND(sym.st_info), elf_name);
1088 free(demangled);
1089 if (!f)
1090 goto out_elf_end;
1091
1092 if (filter && filter(curr_map, f))
1093 symbol__delete(f);
1094 else {
1095 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1096 nr++;
1097 }
1098 }
1099
1100 /*
1101 * For misannotated, zeroed, ASM function sizes.
1102 */
1103 if (nr > 0) {
Namhyung Kim680d9262015-03-06 16:31:27 +09001104 if (!symbol_conf.allow_aliases)
1105 symbols__fixup_duplicate(&dso->symbols[map->type]);
Namhyung Kime5a18452012-08-06 13:41:20 +09001106 symbols__fixup_end(&dso->symbols[map->type]);
1107 if (kmap) {
1108 /*
1109 * We need to fixup this here too because we create new
1110 * maps here, for things like vsyscall sections.
1111 */
Wang Nanba927322015-04-07 08:22:45 +00001112 __map_groups__fixup_end(kmaps, map->type);
Namhyung Kime5a18452012-08-06 13:41:20 +09001113 }
1114 }
1115 err = nr;
1116out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +09001117 return err;
1118}
1119
Adrian Hunter8e0cf962013-08-07 14:38:51 +03001120static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1121{
1122 GElf_Phdr phdr;
1123 size_t i, phdrnum;
1124 int err;
1125 u64 sz;
1126
1127 if (elf_getphdrnum(elf, &phdrnum))
1128 return -1;
1129
1130 for (i = 0; i < phdrnum; i++) {
1131 if (gelf_getphdr(elf, i, &phdr) == NULL)
1132 return -1;
1133 if (phdr.p_type != PT_LOAD)
1134 continue;
1135 if (exe) {
1136 if (!(phdr.p_flags & PF_X))
1137 continue;
1138 } else {
1139 if (!(phdr.p_flags & PF_R))
1140 continue;
1141 }
1142 sz = min(phdr.p_memsz, phdr.p_filesz);
1143 if (!sz)
1144 continue;
1145 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1146 if (err)
1147 return err;
1148 }
1149 return 0;
1150}
1151
1152int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1153 bool *is_64_bit)
1154{
1155 int err;
1156 Elf *elf;
1157
1158 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1159 if (elf == NULL)
1160 return -1;
1161
1162 if (is_64_bit)
1163 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1164
1165 err = elf_read_maps(elf, exe, mapfn, data);
1166
1167 elf_end(elf);
1168 return err;
1169}
1170
Adrian Hunter2b5b8bb2014-07-22 16:17:59 +03001171enum dso_type dso__type_fd(int fd)
1172{
1173 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1174 GElf_Ehdr ehdr;
1175 Elf_Kind ek;
1176 Elf *elf;
1177
1178 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1179 if (elf == NULL)
1180 goto out;
1181
1182 ek = elf_kind(elf);
1183 if (ek != ELF_K_ELF)
1184 goto out_end;
1185
1186 if (gelf_getclass(elf) == ELFCLASS64) {
1187 dso_type = DSO__TYPE_64BIT;
1188 goto out_end;
1189 }
1190
1191 if (gelf_getehdr(elf, &ehdr) == NULL)
1192 goto out_end;
1193
1194 if (ehdr.e_machine == EM_X86_64)
1195 dso_type = DSO__TYPE_X32BIT;
1196 else
1197 dso_type = DSO__TYPE_32BIT;
1198out_end:
1199 elf_end(elf);
1200out:
1201 return dso_type;
1202}
1203
Adrian Hunterafba19d2013-10-09 15:01:12 +03001204static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1205{
1206 ssize_t r;
1207 size_t n;
1208 int err = -1;
1209 char *buf = malloc(page_size);
1210
1211 if (buf == NULL)
1212 return -1;
1213
1214 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1215 goto out;
1216
1217 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1218 goto out;
1219
1220 while (len) {
1221 n = page_size;
1222 if (len < n)
1223 n = len;
1224 /* Use read because mmap won't work on proc files */
1225 r = read(from, buf, n);
1226 if (r < 0)
1227 goto out;
1228 if (!r)
1229 break;
1230 n = r;
1231 r = write(to, buf, n);
1232 if (r < 0)
1233 goto out;
1234 if ((size_t)r != n)
1235 goto out;
1236 len -= n;
1237 }
1238
1239 err = 0;
1240out:
1241 free(buf);
1242 return err;
1243}
1244
1245struct kcore {
1246 int fd;
1247 int elfclass;
1248 Elf *elf;
1249 GElf_Ehdr ehdr;
1250};
1251
1252static int kcore__open(struct kcore *kcore, const char *filename)
1253{
1254 GElf_Ehdr *ehdr;
1255
1256 kcore->fd = open(filename, O_RDONLY);
1257 if (kcore->fd == -1)
1258 return -1;
1259
1260 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1261 if (!kcore->elf)
1262 goto out_close;
1263
1264 kcore->elfclass = gelf_getclass(kcore->elf);
1265 if (kcore->elfclass == ELFCLASSNONE)
1266 goto out_end;
1267
1268 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1269 if (!ehdr)
1270 goto out_end;
1271
1272 return 0;
1273
1274out_end:
1275 elf_end(kcore->elf);
1276out_close:
1277 close(kcore->fd);
1278 return -1;
1279}
1280
1281static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1282 bool temp)
1283{
Adrian Hunterafba19d2013-10-09 15:01:12 +03001284 kcore->elfclass = elfclass;
1285
1286 if (temp)
1287 kcore->fd = mkstemp(filename);
1288 else
1289 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1290 if (kcore->fd == -1)
1291 return -1;
1292
1293 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1294 if (!kcore->elf)
1295 goto out_close;
1296
1297 if (!gelf_newehdr(kcore->elf, elfclass))
1298 goto out_end;
1299
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001300 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
Adrian Hunterafba19d2013-10-09 15:01:12 +03001301
1302 return 0;
1303
1304out_end:
1305 elf_end(kcore->elf);
1306out_close:
1307 close(kcore->fd);
1308 unlink(filename);
1309 return -1;
1310}
1311
1312static void kcore__close(struct kcore *kcore)
1313{
1314 elf_end(kcore->elf);
1315 close(kcore->fd);
1316}
1317
1318static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1319{
1320 GElf_Ehdr *ehdr = &to->ehdr;
1321 GElf_Ehdr *kehdr = &from->ehdr;
1322
1323 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1324 ehdr->e_type = kehdr->e_type;
1325 ehdr->e_machine = kehdr->e_machine;
1326 ehdr->e_version = kehdr->e_version;
1327 ehdr->e_entry = 0;
1328 ehdr->e_shoff = 0;
1329 ehdr->e_flags = kehdr->e_flags;
1330 ehdr->e_phnum = count;
1331 ehdr->e_shentsize = 0;
1332 ehdr->e_shnum = 0;
1333 ehdr->e_shstrndx = 0;
1334
1335 if (from->elfclass == ELFCLASS32) {
1336 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1337 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1338 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1339 } else {
1340 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1341 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1342 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1343 }
1344
1345 if (!gelf_update_ehdr(to->elf, ehdr))
1346 return -1;
1347
1348 if (!gelf_newphdr(to->elf, count))
1349 return -1;
1350
1351 return 0;
1352}
1353
1354static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1355 u64 addr, u64 len)
1356{
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001357 GElf_Phdr phdr = {
1358 .p_type = PT_LOAD,
1359 .p_flags = PF_R | PF_W | PF_X,
1360 .p_offset = offset,
1361 .p_vaddr = addr,
1362 .p_paddr = 0,
1363 .p_filesz = len,
1364 .p_memsz = len,
1365 .p_align = page_size,
1366 };
Adrian Hunterafba19d2013-10-09 15:01:12 +03001367
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001368 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
Adrian Hunterafba19d2013-10-09 15:01:12 +03001369 return -1;
1370
1371 return 0;
1372}
1373
1374static off_t kcore__write(struct kcore *kcore)
1375{
1376 return elf_update(kcore->elf, ELF_C_WRITE);
1377}
1378
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001379struct phdr_data {
1380 off_t offset;
1381 u64 addr;
1382 u64 len;
1383};
1384
1385struct kcore_copy_info {
1386 u64 stext;
1387 u64 etext;
1388 u64 first_symbol;
1389 u64 last_symbol;
1390 u64 first_module;
1391 u64 last_module_symbol;
1392 struct phdr_data kernel_map;
1393 struct phdr_data modules_map;
1394};
1395
1396static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1397 u64 start)
1398{
1399 struct kcore_copy_info *kci = arg;
1400
1401 if (!symbol_type__is_a(type, MAP__FUNCTION))
1402 return 0;
1403
1404 if (strchr(name, '[')) {
1405 if (start > kci->last_module_symbol)
1406 kci->last_module_symbol = start;
1407 return 0;
1408 }
1409
1410 if (!kci->first_symbol || start < kci->first_symbol)
1411 kci->first_symbol = start;
1412
1413 if (!kci->last_symbol || start > kci->last_symbol)
1414 kci->last_symbol = start;
1415
1416 if (!strcmp(name, "_stext")) {
1417 kci->stext = start;
1418 return 0;
1419 }
1420
1421 if (!strcmp(name, "_etext")) {
1422 kci->etext = start;
1423 return 0;
1424 }
1425
1426 return 0;
1427}
1428
1429static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1430 const char *dir)
1431{
1432 char kallsyms_filename[PATH_MAX];
1433
1434 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1435
1436 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1437 return -1;
1438
1439 if (kallsyms__parse(kallsyms_filename, kci,
1440 kcore_copy__process_kallsyms) < 0)
1441 return -1;
1442
1443 return 0;
1444}
1445
1446static int kcore_copy__process_modules(void *arg,
1447 const char *name __maybe_unused,
1448 u64 start)
1449{
1450 struct kcore_copy_info *kci = arg;
1451
1452 if (!kci->first_module || start < kci->first_module)
1453 kci->first_module = start;
1454
1455 return 0;
1456}
1457
1458static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1459 const char *dir)
1460{
1461 char modules_filename[PATH_MAX];
1462
1463 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1464
1465 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1466 return -1;
1467
1468 if (modules__parse(modules_filename, kci,
1469 kcore_copy__process_modules) < 0)
1470 return -1;
1471
1472 return 0;
1473}
1474
1475static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1476 u64 s, u64 e)
1477{
1478 if (p->addr || s < start || s >= end)
1479 return;
1480
1481 p->addr = s;
1482 p->offset = (s - start) + pgoff;
1483 p->len = e < end ? e - s : end - s;
1484}
1485
1486static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1487{
1488 struct kcore_copy_info *kci = data;
1489 u64 end = start + len;
1490
1491 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1492 kci->etext);
1493
1494 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1495 kci->last_module_symbol);
1496
1497 return 0;
1498}
1499
1500static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1501{
1502 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1503 return -1;
1504
1505 return 0;
1506}
1507
1508static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1509 Elf *elf)
1510{
1511 if (kcore_copy__parse_kallsyms(kci, dir))
1512 return -1;
1513
1514 if (kcore_copy__parse_modules(kci, dir))
1515 return -1;
1516
1517 if (kci->stext)
1518 kci->stext = round_down(kci->stext, page_size);
1519 else
1520 kci->stext = round_down(kci->first_symbol, page_size);
1521
1522 if (kci->etext) {
1523 kci->etext = round_up(kci->etext, page_size);
1524 } else if (kci->last_symbol) {
1525 kci->etext = round_up(kci->last_symbol, page_size);
1526 kci->etext += page_size;
1527 }
1528
1529 kci->first_module = round_down(kci->first_module, page_size);
1530
1531 if (kci->last_module_symbol) {
1532 kci->last_module_symbol = round_up(kci->last_module_symbol,
1533 page_size);
1534 kci->last_module_symbol += page_size;
1535 }
1536
1537 if (!kci->stext || !kci->etext)
1538 return -1;
1539
1540 if (kci->first_module && !kci->last_module_symbol)
1541 return -1;
1542
1543 return kcore_copy__read_maps(kci, elf);
1544}
1545
1546static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1547 const char *name)
1548{
1549 char from_filename[PATH_MAX];
1550 char to_filename[PATH_MAX];
1551
1552 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1553 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1554
1555 return copyfile_mode(from_filename, to_filename, 0400);
1556}
1557
1558static int kcore_copy__unlink(const char *dir, const char *name)
1559{
1560 char filename[PATH_MAX];
1561
1562 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1563
1564 return unlink(filename);
1565}
1566
1567static int kcore_copy__compare_fds(int from, int to)
1568{
1569 char *buf_from;
1570 char *buf_to;
1571 ssize_t ret;
1572 size_t len;
1573 int err = -1;
1574
1575 buf_from = malloc(page_size);
1576 buf_to = malloc(page_size);
1577 if (!buf_from || !buf_to)
1578 goto out;
1579
1580 while (1) {
1581 /* Use read because mmap won't work on proc files */
1582 ret = read(from, buf_from, page_size);
1583 if (ret < 0)
1584 goto out;
1585
1586 if (!ret)
1587 break;
1588
1589 len = ret;
1590
1591 if (readn(to, buf_to, len) != (int)len)
1592 goto out;
1593
1594 if (memcmp(buf_from, buf_to, len))
1595 goto out;
1596 }
1597
1598 err = 0;
1599out:
1600 free(buf_to);
1601 free(buf_from);
1602 return err;
1603}
1604
1605static int kcore_copy__compare_files(const char *from_filename,
1606 const char *to_filename)
1607{
1608 int from, to, err = -1;
1609
1610 from = open(from_filename, O_RDONLY);
1611 if (from < 0)
1612 return -1;
1613
1614 to = open(to_filename, O_RDONLY);
1615 if (to < 0)
1616 goto out_close_from;
1617
1618 err = kcore_copy__compare_fds(from, to);
1619
1620 close(to);
1621out_close_from:
1622 close(from);
1623 return err;
1624}
1625
1626static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1627 const char *name)
1628{
1629 char from_filename[PATH_MAX];
1630 char to_filename[PATH_MAX];
1631
1632 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1633 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1634
1635 return kcore_copy__compare_files(from_filename, to_filename);
1636}
1637
1638/**
1639 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1640 * @from_dir: from directory
1641 * @to_dir: to directory
1642 *
1643 * This function copies kallsyms, modules and kcore files from one directory to
1644 * another. kallsyms and modules are copied entirely. Only code segments are
1645 * copied from kcore. It is assumed that two segments suffice: one for the
1646 * kernel proper and one for all the modules. The code segments are determined
1647 * from kallsyms and modules files. The kernel map starts at _stext or the
1648 * lowest function symbol, and ends at _etext or the highest function symbol.
1649 * The module map starts at the lowest module address and ends at the highest
1650 * module symbol. Start addresses are rounded down to the nearest page. End
1651 * addresses are rounded up to the nearest page. An extra page is added to the
1652 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1653 * symbol too. Because it contains only code sections, the resulting kcore is
1654 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1655 * is not the same for the kernel map and the modules map. That happens because
1656 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1657 * kallsyms and modules files are compared with their copies to check that
1658 * modules have not been loaded or unloaded while the copies were taking place.
1659 *
1660 * Return: %0 on success, %-1 on failure.
1661 */
1662int kcore_copy(const char *from_dir, const char *to_dir)
1663{
1664 struct kcore kcore;
1665 struct kcore extract;
1666 size_t count = 2;
1667 int idx = 0, err = -1;
1668 off_t offset = page_size, sz, modules_offset = 0;
1669 struct kcore_copy_info kci = { .stext = 0, };
1670 char kcore_filename[PATH_MAX];
1671 char extract_filename[PATH_MAX];
1672
1673 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1674 return -1;
1675
1676 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1677 goto out_unlink_kallsyms;
1678
1679 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1680 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1681
1682 if (kcore__open(&kcore, kcore_filename))
1683 goto out_unlink_modules;
1684
1685 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1686 goto out_kcore_close;
1687
1688 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1689 goto out_kcore_close;
1690
1691 if (!kci.modules_map.addr)
1692 count -= 1;
1693
1694 if (kcore__copy_hdr(&kcore, &extract, count))
1695 goto out_extract_close;
1696
1697 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1698 kci.kernel_map.len))
1699 goto out_extract_close;
1700
1701 if (kci.modules_map.addr) {
1702 modules_offset = offset + kci.kernel_map.len;
1703 if (kcore__add_phdr(&extract, idx, modules_offset,
1704 kci.modules_map.addr, kci.modules_map.len))
1705 goto out_extract_close;
1706 }
1707
1708 sz = kcore__write(&extract);
1709 if (sz < 0 || sz > offset)
1710 goto out_extract_close;
1711
1712 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1713 kci.kernel_map.len))
1714 goto out_extract_close;
1715
1716 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1717 extract.fd, modules_offset,
1718 kci.modules_map.len))
1719 goto out_extract_close;
1720
1721 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1722 goto out_extract_close;
1723
1724 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1725 goto out_extract_close;
1726
1727 err = 0;
1728
1729out_extract_close:
1730 kcore__close(&extract);
1731 if (err)
1732 unlink(extract_filename);
1733out_kcore_close:
1734 kcore__close(&kcore);
1735out_unlink_modules:
1736 if (err)
1737 kcore_copy__unlink(to_dir, "modules");
1738out_unlink_kallsyms:
1739 if (err)
1740 kcore_copy__unlink(to_dir, "kallsyms");
1741
1742 return err;
1743}
1744
Adrian Hunterafba19d2013-10-09 15:01:12 +03001745int kcore_extract__create(struct kcore_extract *kce)
1746{
1747 struct kcore kcore;
1748 struct kcore extract;
1749 size_t count = 1;
1750 int idx = 0, err = -1;
1751 off_t offset = page_size, sz;
1752
1753 if (kcore__open(&kcore, kce->kcore_filename))
1754 return -1;
1755
1756 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1757 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1758 goto out_kcore_close;
1759
1760 if (kcore__copy_hdr(&kcore, &extract, count))
1761 goto out_extract_close;
1762
1763 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1764 goto out_extract_close;
1765
1766 sz = kcore__write(&extract);
1767 if (sz < 0 || sz > offset)
1768 goto out_extract_close;
1769
1770 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1771 goto out_extract_close;
1772
1773 err = 0;
1774
1775out_extract_close:
1776 kcore__close(&extract);
1777 if (err)
1778 unlink(kce->extract_filename);
1779out_kcore_close:
1780 kcore__close(&kcore);
1781
1782 return err;
1783}
1784
1785void kcore_extract__delete(struct kcore_extract *kce)
1786{
1787 unlink(kce->extract_filename);
1788}
1789
Namhyung Kime5a18452012-08-06 13:41:20 +09001790void symbol__elf_init(void)
1791{
1792 elf_version(EV_CURRENT);
1793}