blob: b222552c715946b11c009390bee676757b7eb1e5 [file] [log] [blame]
Namhyung Kime5a18452012-08-06 13:41:20 +09001#include <fcntl.h>
2#include <stdio.h>
3#include <errno.h>
4#include <string.h>
5#include <unistd.h>
6#include <inttypes.h>
7
8#include "symbol.h"
Stephane Eraniane9c4bcd2015-11-30 10:02:20 +01009#include "demangle-java.h"
Waiman Long8fa7d872014-09-29 16:07:28 -040010#include "machine.h"
Vladimir Nikulichev922d0e42014-04-17 08:27:01 -070011#include "vdso.h"
Arnaldo Carvalho de Meloc506c962013-12-11 09:15:00 -030012#include <symbol/kallsyms.h>
Namhyung Kime5a18452012-08-06 13:41:20 +090013#include "debug.h"
14
David Aherne370a3d2015-02-18 19:33:37 -050015#ifndef EM_AARCH64
16#define EM_AARCH64 183 /* ARM 64 bit */
17#endif
18
19
Arnaldo Carvalho de Meloaaba4e12014-11-24 17:10:52 -030020#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
21extern char *cplus_demangle(const char *, int);
22
23static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
24{
25 return cplus_demangle(c, i);
26}
27#else
28#ifdef NO_DEMANGLE
29static inline char *bfd_demangle(void __maybe_unused *v,
30 const char __maybe_unused *c,
31 int __maybe_unused i)
32{
33 return NULL;
34}
35#else
36#define PACKAGE 'perf'
37#include <bfd.h>
38#endif
39#endif
40
Ingo Molnar89fe8082013-09-30 12:07:11 +020041#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
Arnaldo Carvalho de Melo179f36d2015-09-17 11:30:20 -030042static int elf_getphdrnum(Elf *elf, size_t *dst)
Adrian Huntere955d5c2013-09-13 16:49:30 +030043{
44 GElf_Ehdr gehdr;
45 GElf_Ehdr *ehdr;
46
47 ehdr = gelf_getehdr(elf, &gehdr);
48 if (!ehdr)
49 return -1;
50
51 *dst = ehdr->e_phnum;
52
53 return 0;
54}
55#endif
56
Arnaldo Carvalho de Melo2492c462016-07-04 19:35:47 -030057#ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT
58static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused)
59{
60 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__);
61 return -1;
62}
63#endif
64
Namhyung Kime5a18452012-08-06 13:41:20 +090065#ifndef NT_GNU_BUILD_ID
66#define NT_GNU_BUILD_ID 3
67#endif
68
69/**
70 * elf_symtab__for_each_symbol - iterate thru all the symbols
71 *
72 * @syms: struct elf_symtab instance to iterate
73 * @idx: uint32_t idx
74 * @sym: GElf_Sym iterator
75 */
76#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
77 for (idx = 0, gelf_getsym(syms, idx, &sym);\
78 idx < nr_syms; \
79 idx++, gelf_getsym(syms, idx, &sym))
80
81static inline uint8_t elf_sym__type(const GElf_Sym *sym)
82{
83 return GELF_ST_TYPE(sym->st_info);
84}
85
Vinson Lee4e310502015-02-09 16:29:37 -080086#ifndef STT_GNU_IFUNC
87#define STT_GNU_IFUNC 10
88#endif
89
Namhyung Kime5a18452012-08-06 13:41:20 +090090static inline int elf_sym__is_function(const GElf_Sym *sym)
91{
Adrian Huntera2f3b6b2014-07-14 13:02:33 +030092 return (elf_sym__type(sym) == STT_FUNC ||
93 elf_sym__type(sym) == STT_GNU_IFUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +090094 sym->st_name != 0 &&
95 sym->st_shndx != SHN_UNDEF;
96}
97
98static inline bool elf_sym__is_object(const GElf_Sym *sym)
99{
100 return elf_sym__type(sym) == STT_OBJECT &&
101 sym->st_name != 0 &&
102 sym->st_shndx != SHN_UNDEF;
103}
104
105static inline int elf_sym__is_label(const GElf_Sym *sym)
106{
107 return elf_sym__type(sym) == STT_NOTYPE &&
108 sym->st_name != 0 &&
109 sym->st_shndx != SHN_UNDEF &&
110 sym->st_shndx != SHN_ABS;
111}
112
113static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
114{
115 switch (type) {
116 case MAP__FUNCTION:
117 return elf_sym__is_function(sym);
118 case MAP__VARIABLE:
119 return elf_sym__is_object(sym);
120 default:
121 return false;
122 }
123}
124
125static inline const char *elf_sym__name(const GElf_Sym *sym,
126 const Elf_Data *symstrs)
127{
128 return symstrs->d_buf + sym->st_name;
129}
130
131static inline const char *elf_sec__name(const GElf_Shdr *shdr,
132 const Elf_Data *secstrs)
133{
134 return secstrs->d_buf + shdr->sh_name;
135}
136
137static inline int elf_sec__is_text(const GElf_Shdr *shdr,
138 const Elf_Data *secstrs)
139{
140 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
141}
142
143static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
144 const Elf_Data *secstrs)
145{
146 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
147}
148
149static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
150 enum map_type type)
151{
152 switch (type) {
153 case MAP__FUNCTION:
154 return elf_sec__is_text(shdr, secstrs);
155 case MAP__VARIABLE:
156 return elf_sec__is_data(shdr, secstrs);
157 default:
158 return false;
159 }
160}
161
162static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
163{
164 Elf_Scn *sec = NULL;
165 GElf_Shdr shdr;
166 size_t cnt = 1;
167
168 while ((sec = elf_nextscn(elf, sec)) != NULL) {
169 gelf_getshdr(sec, &shdr);
170
171 if ((addr >= shdr.sh_addr) &&
172 (addr < (shdr.sh_addr + shdr.sh_size)))
173 return cnt;
174
175 ++cnt;
176 }
177
178 return -1;
179}
180
Masami Hiramatsu99ca4232014-01-16 09:39:49 +0000181Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
182 GElf_Shdr *shp, const char *name, size_t *idx)
Namhyung Kime5a18452012-08-06 13:41:20 +0900183{
184 Elf_Scn *sec = NULL;
185 size_t cnt = 1;
186
Cody P Schafer49274652012-08-10 15:22:55 -0700187 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
188 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
189 return NULL;
190
Namhyung Kime5a18452012-08-06 13:41:20 +0900191 while ((sec = elf_nextscn(elf, sec)) != NULL) {
192 char *str;
193
194 gelf_getshdr(sec, shp);
195 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
Jiri Olsa155b3a12014-03-02 14:32:07 +0100196 if (str && !strcmp(name, str)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900197 if (idx)
198 *idx = cnt;
Jiri Olsa155b3a12014-03-02 14:32:07 +0100199 return sec;
Namhyung Kime5a18452012-08-06 13:41:20 +0900200 }
201 ++cnt;
202 }
203
Jiri Olsa155b3a12014-03-02 14:32:07 +0100204 return NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900205}
206
207#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
208 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
209 idx < nr_entries; \
210 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
211
212#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
213 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
214 idx < nr_entries; \
215 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
216
217/*
218 * We need to check if we have a .dynsym, so that we can handle the
219 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
220 * .dynsym or .symtab).
221 * And always look at the original dso, not at debuginfo packages, that
222 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
223 */
Cody P Schafera44f6052012-08-10 15:22:59 -0700224int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
Namhyung Kime5a18452012-08-06 13:41:20 +0900225 symbol_filter_t filter)
226{
227 uint32_t nr_rel_entries, idx;
228 GElf_Sym sym;
229 u64 plt_offset;
230 GElf_Shdr shdr_plt;
231 struct symbol *f;
232 GElf_Shdr shdr_rel_plt, shdr_dynsym;
233 Elf_Data *reldata, *syms, *symstrs;
234 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
235 size_t dynsym_idx;
236 GElf_Ehdr ehdr;
237 char sympltname[1024];
238 Elf *elf;
Cody P Schafera44f6052012-08-10 15:22:59 -0700239 int nr = 0, symidx, err = 0;
Namhyung Kime5a18452012-08-06 13:41:20 +0900240
David Ahernf47b58b2012-08-19 09:47:14 -0600241 if (!ss->dynsym)
242 return 0;
243
Cody P Schafera44f6052012-08-10 15:22:59 -0700244 elf = ss->elf;
245 ehdr = ss->ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900246
Cody P Schafera44f6052012-08-10 15:22:59 -0700247 scn_dynsym = ss->dynsym;
248 shdr_dynsym = ss->dynshdr;
249 dynsym_idx = ss->dynsym_idx;
Namhyung Kime5a18452012-08-06 13:41:20 +0900250
Namhyung Kime5a18452012-08-06 13:41:20 +0900251 if (scn_dynsym == NULL)
252 goto out_elf_end;
253
254 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
255 ".rela.plt", NULL);
256 if (scn_plt_rel == NULL) {
257 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
258 ".rel.plt", NULL);
259 if (scn_plt_rel == NULL)
260 goto out_elf_end;
261 }
262
263 err = -1;
264
265 if (shdr_rel_plt.sh_link != dynsym_idx)
266 goto out_elf_end;
267
268 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
269 goto out_elf_end;
270
271 /*
272 * Fetch the relocation section to find the idxes to the GOT
273 * and the symbols in the .dynsym they refer to.
274 */
275 reldata = elf_getdata(scn_plt_rel, NULL);
276 if (reldata == NULL)
277 goto out_elf_end;
278
279 syms = elf_getdata(scn_dynsym, NULL);
280 if (syms == NULL)
281 goto out_elf_end;
282
283 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
284 if (scn_symstrs == NULL)
285 goto out_elf_end;
286
287 symstrs = elf_getdata(scn_symstrs, NULL);
288 if (symstrs == NULL)
289 goto out_elf_end;
290
Cody P Schafer52f9ddb2012-08-10 15:22:51 -0700291 if (symstrs->d_size == 0)
292 goto out_elf_end;
293
Namhyung Kime5a18452012-08-06 13:41:20 +0900294 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
295 plt_offset = shdr_plt.sh_offset;
296
297 if (shdr_rel_plt.sh_type == SHT_RELA) {
298 GElf_Rela pos_mem, *pos;
299
300 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
301 nr_rel_entries) {
302 symidx = GELF_R_SYM(pos->r_info);
303 plt_offset += shdr_plt.sh_entsize;
304 gelf_getsym(syms, symidx, &sym);
305 snprintf(sympltname, sizeof(sympltname),
306 "%s@plt", elf_sym__name(&sym, symstrs));
307
308 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
309 STB_GLOBAL, sympltname);
310 if (!f)
311 goto out_elf_end;
312
313 if (filter && filter(map, f))
314 symbol__delete(f);
315 else {
316 symbols__insert(&dso->symbols[map->type], f);
317 ++nr;
318 }
319 }
320 } else if (shdr_rel_plt.sh_type == SHT_REL) {
321 GElf_Rel pos_mem, *pos;
322 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
323 nr_rel_entries) {
324 symidx = GELF_R_SYM(pos->r_info);
325 plt_offset += shdr_plt.sh_entsize;
326 gelf_getsym(syms, symidx, &sym);
327 snprintf(sympltname, sizeof(sympltname),
328 "%s@plt", elf_sym__name(&sym, symstrs));
329
330 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
331 STB_GLOBAL, sympltname);
332 if (!f)
333 goto out_elf_end;
334
335 if (filter && filter(map, f))
336 symbol__delete(f);
337 else {
338 symbols__insert(&dso->symbols[map->type], f);
339 ++nr;
340 }
341 }
342 }
343
344 err = 0;
345out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900346 if (err == 0)
347 return nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900348 pr_debug("%s: problems reading %s PLT info.\n",
349 __func__, dso->long_name);
350 return 0;
351}
352
353/*
354 * Align offset to 4 bytes as needed for note name and descriptor data.
355 */
356#define NOTE_ALIGN(n) (((n) + 3) & -4U)
357
358static int elf_read_build_id(Elf *elf, void *bf, size_t size)
359{
360 int err = -1;
361 GElf_Ehdr ehdr;
362 GElf_Shdr shdr;
363 Elf_Data *data;
364 Elf_Scn *sec;
365 Elf_Kind ek;
366 void *ptr;
367
368 if (size < BUILD_ID_SIZE)
369 goto out;
370
371 ek = elf_kind(elf);
372 if (ek != ELF_K_ELF)
373 goto out;
374
375 if (gelf_getehdr(elf, &ehdr) == NULL) {
376 pr_err("%s: cannot get elf header.\n", __func__);
377 goto out;
378 }
379
380 /*
381 * Check following sections for notes:
382 * '.note.gnu.build-id'
383 * '.notes'
384 * '.note' (VDSO specific)
385 */
386 do {
387 sec = elf_section_by_name(elf, &ehdr, &shdr,
388 ".note.gnu.build-id", NULL);
389 if (sec)
390 break;
391
392 sec = elf_section_by_name(elf, &ehdr, &shdr,
393 ".notes", NULL);
394 if (sec)
395 break;
396
397 sec = elf_section_by_name(elf, &ehdr, &shdr,
398 ".note", NULL);
399 if (sec)
400 break;
401
402 return err;
403
404 } while (0);
405
406 data = elf_getdata(sec, NULL);
407 if (data == NULL)
408 goto out;
409
410 ptr = data->d_buf;
411 while (ptr < (data->d_buf + data->d_size)) {
412 GElf_Nhdr *nhdr = ptr;
413 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
414 descsz = NOTE_ALIGN(nhdr->n_descsz);
415 const char *name;
416
417 ptr += sizeof(*nhdr);
418 name = ptr;
419 ptr += namesz;
420 if (nhdr->n_type == NT_GNU_BUILD_ID &&
421 nhdr->n_namesz == sizeof("GNU")) {
422 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
423 size_t sz = min(size, descsz);
424 memcpy(bf, ptr, sz);
425 memset(bf + sz, 0, size - sz);
426 err = descsz;
427 break;
428 }
429 }
430 ptr += descsz;
431 }
432
433out:
434 return err;
435}
436
437int filename__read_build_id(const char *filename, void *bf, size_t size)
438{
439 int fd, err = -1;
440 Elf *elf;
441
442 if (size < BUILD_ID_SIZE)
443 goto out;
444
445 fd = open(filename, O_RDONLY);
446 if (fd < 0)
447 goto out;
448
449 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
450 if (elf == NULL) {
451 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
452 goto out_close;
453 }
454
455 err = elf_read_build_id(elf, bf, size);
456
457 elf_end(elf);
458out_close:
459 close(fd);
460out:
461 return err;
462}
463
464int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
465{
466 int fd, err = -1;
467
468 if (size < BUILD_ID_SIZE)
469 goto out;
470
471 fd = open(filename, O_RDONLY);
472 if (fd < 0)
473 goto out;
474
475 while (1) {
476 char bf[BUFSIZ];
477 GElf_Nhdr nhdr;
478 size_t namesz, descsz;
479
480 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
481 break;
482
483 namesz = NOTE_ALIGN(nhdr.n_namesz);
484 descsz = NOTE_ALIGN(nhdr.n_descsz);
485 if (nhdr.n_type == NT_GNU_BUILD_ID &&
486 nhdr.n_namesz == sizeof("GNU")) {
487 if (read(fd, bf, namesz) != (ssize_t)namesz)
488 break;
489 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
490 size_t sz = min(descsz, size);
491 if (read(fd, build_id, sz) == (ssize_t)sz) {
492 memset(build_id + sz, 0, size - sz);
493 err = 0;
494 break;
495 }
496 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
497 break;
498 } else {
499 int n = namesz + descsz;
500 if (read(fd, bf, n) != n)
501 break;
502 }
503 }
504 close(fd);
505out:
506 return err;
507}
508
509int filename__read_debuglink(const char *filename, char *debuglink,
510 size_t size)
511{
512 int fd, err = -1;
513 Elf *elf;
514 GElf_Ehdr ehdr;
515 GElf_Shdr shdr;
516 Elf_Data *data;
517 Elf_Scn *sec;
518 Elf_Kind ek;
519
520 fd = open(filename, O_RDONLY);
521 if (fd < 0)
522 goto out;
523
524 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
525 if (elf == NULL) {
526 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
527 goto out_close;
528 }
529
530 ek = elf_kind(elf);
531 if (ek != ELF_K_ELF)
Chenggang Qin784f3392013-10-11 08:27:57 +0800532 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900533
534 if (gelf_getehdr(elf, &ehdr) == NULL) {
535 pr_err("%s: cannot get elf header.\n", __func__);
Chenggang Qin784f3392013-10-11 08:27:57 +0800536 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900537 }
538
539 sec = elf_section_by_name(elf, &ehdr, &shdr,
540 ".gnu_debuglink", NULL);
541 if (sec == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800542 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900543
544 data = elf_getdata(sec, NULL);
545 if (data == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800546 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900547
548 /* the start of this section is a zero-terminated string */
549 strncpy(debuglink, data->d_buf, size);
550
Stephane Eranian0d3dc5e2014-02-20 10:32:55 +0900551 err = 0;
552
Chenggang Qin784f3392013-10-11 08:27:57 +0800553out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900554 elf_end(elf);
Namhyung Kime5a18452012-08-06 13:41:20 +0900555out_close:
556 close(fd);
557out:
558 return err;
559}
560
561static int dso__swap_init(struct dso *dso, unsigned char eidata)
562{
563 static unsigned int const endian = 1;
564
565 dso->needs_swap = DSO_SWAP__NO;
566
567 switch (eidata) {
568 case ELFDATA2LSB:
569 /* We are big endian, DSO is little endian. */
570 if (*(unsigned char const *)&endian != 1)
571 dso->needs_swap = DSO_SWAP__YES;
572 break;
573
574 case ELFDATA2MSB:
575 /* We are little endian, DSO is big endian. */
576 if (*(unsigned char const *)&endian != 0)
577 dso->needs_swap = DSO_SWAP__YES;
578 break;
579
580 default:
581 pr_err("unrecognized DSO data encoding %d\n", eidata);
582 return -EINVAL;
583 }
584
585 return 0;
586}
587
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900588static int decompress_kmodule(struct dso *dso, const char *name,
589 enum dso_binary_type type)
590{
Jiri Olsa914f85c2015-02-12 22:27:50 +0100591 int fd = -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900592 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
Jiri Olsa914f85c2015-02-12 22:27:50 +0100593 struct kmod_path m;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900594
Namhyung Kim0b064f42015-01-29 17:06:42 +0900595 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
596 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
597 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900598 return -1;
599
Jiri Olsa914f85c2015-02-12 22:27:50 +0100600 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
601 name = dso->long_name;
602
603 if (kmod_path__parse_ext(&m, name) || !m.comp)
604 return -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900605
606 fd = mkstemp(tmpbuf);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300607 if (fd < 0) {
608 dso->load_errno = errno;
Jiri Olsa914f85c2015-02-12 22:27:50 +0100609 goto out;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300610 }
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900611
Jiri Olsa914f85c2015-02-12 22:27:50 +0100612 if (!decompress_to_file(m.ext, name, fd)) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300613 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900614 close(fd);
615 fd = -1;
616 }
617
618 unlink(tmpbuf);
619
Jiri Olsa914f85c2015-02-12 22:27:50 +0100620out:
621 free(m.ext);
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900622 return fd;
623}
624
Cody P Schafer3aafe5a2012-08-10 15:23:02 -0700625bool symsrc__possibly_runtime(struct symsrc *ss)
626{
627 return ss->dynsym || ss->opdsec;
628}
629
Cody P Schaferd26cd122012-08-10 15:23:00 -0700630bool symsrc__has_symtab(struct symsrc *ss)
631{
632 return ss->symtab != NULL;
633}
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700634
635void symsrc__destroy(struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900636{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300637 zfree(&ss->name);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700638 elf_end(ss->elf);
639 close(ss->fd);
640}
641
Naveen N. Raod2332092015-04-28 17:35:35 +0530642bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
643{
644 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
645}
646
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700647int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
648 enum dso_binary_type type)
649{
Namhyung Kime5a18452012-08-06 13:41:20 +0900650 int err = -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900651 GElf_Ehdr ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900652 Elf *elf;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700653 int fd;
654
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300655 if (dso__needs_decompress(dso)) {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900656 fd = decompress_kmodule(dso, name, type);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300657 if (fd < 0)
658 return -1;
659 } else {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900660 fd = open(name, O_RDONLY);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300661 if (fd < 0) {
662 dso->load_errno = errno;
663 return -1;
664 }
665 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900666
667 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
668 if (elf == NULL) {
669 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300670 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900671 goto out_close;
672 }
673
674 if (gelf_getehdr(elf, &ehdr) == NULL) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300675 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900676 pr_debug("%s: cannot get elf header.\n", __func__);
677 goto out_elf_end;
678 }
679
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300680 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
681 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
Namhyung Kime5a18452012-08-06 13:41:20 +0900682 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300683 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900684
685 /* Always reject images with a mismatched build-id: */
686 if (dso->has_build_id) {
687 u8 build_id[BUILD_ID_SIZE];
688
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300689 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
690 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900691 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300692 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900693
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300694 if (!dso__build_id_equal(dso, build_id)) {
Naveen N. Rao468f3d22015-04-25 01:14:46 +0530695 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300696 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900697 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300698 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900699 }
700
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300701 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
702
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700703 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
704 NULL);
705 if (ss->symshdr.sh_type != SHT_SYMTAB)
706 ss->symtab = NULL;
707
708 ss->dynsym_idx = 0;
709 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
710 &ss->dynsym_idx);
711 if (ss->dynshdr.sh_type != SHT_DYNSYM)
712 ss->dynsym = NULL;
713
714 ss->opdidx = 0;
715 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
716 &ss->opdidx);
717 if (ss->opdshdr.sh_type != SHT_PROGBITS)
718 ss->opdsec = NULL;
719
Wang Nan99e87f72016-04-07 10:24:31 +0000720 if (dso->kernel == DSO_TYPE_USER)
721 ss->adjust_symbols = true;
722 else
Naveen N. Raod2332092015-04-28 17:35:35 +0530723 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700724
725 ss->name = strdup(name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300726 if (!ss->name) {
727 dso->load_errno = errno;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700728 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300729 }
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700730
731 ss->elf = elf;
732 ss->fd = fd;
733 ss->ehdr = ehdr;
734 ss->type = type;
735
736 return 0;
737
738out_elf_end:
739 elf_end(elf);
740out_close:
741 close(fd);
742 return err;
743}
744
Adrian Hunter39b12f782013-08-07 14:38:47 +0300745/**
746 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
747 * @kmap: kernel maps and relocation reference symbol
748 *
749 * This function returns %true if we are dealing with the kernel maps and the
750 * relocation reference symbol has not yet been found. Otherwise %false is
751 * returned.
752 */
753static bool ref_reloc_sym_not_found(struct kmap *kmap)
754{
755 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
756 !kmap->ref_reloc_sym->unrelocated_addr;
757}
758
759/**
760 * ref_reloc - kernel relocation offset.
761 * @kmap: kernel maps and relocation reference symbol
762 *
763 * This function returns the offset of kernel addresses as determined by using
764 * the relocation reference symbol i.e. if the kernel has not been relocated
765 * then the return value is zero.
766 */
767static u64 ref_reloc(struct kmap *kmap)
768{
769 if (kmap && kmap->ref_reloc_sym &&
770 kmap->ref_reloc_sym->unrelocated_addr)
771 return kmap->ref_reloc_sym->addr -
772 kmap->ref_reloc_sym->unrelocated_addr;
773 return 0;
774}
775
Avi Kivity763122a2014-09-13 07:15:05 +0300776static bool want_demangle(bool is_kernel_sym)
777{
778 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
779}
780
Naveen N. Rao0b3c2262016-04-12 14:40:50 +0530781void __weak arch__sym_update(struct symbol *s __maybe_unused,
782 GElf_Sym *sym __maybe_unused) { }
Ananth N Mavinakayanahallic50fc0a2015-04-28 17:35:38 +0530783
Cody P Schafer261360b2012-08-10 15:23:01 -0700784int dso__load_sym(struct dso *dso, struct map *map,
785 struct symsrc *syms_ss, struct symsrc *runtime_ss,
Cody P Schaferd26cd122012-08-10 15:23:00 -0700786 symbol_filter_t filter, int kmodule)
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700787{
788 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
Wang Nanba927322015-04-07 08:22:45 +0000789 struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700790 struct map *curr_map = map;
791 struct dso *curr_dso = dso;
792 Elf_Data *symstrs, *secstrs;
793 uint32_t nr_syms;
794 int err = -1;
795 uint32_t idx;
796 GElf_Ehdr ehdr;
Cody P Schafer261360b2012-08-10 15:23:01 -0700797 GElf_Shdr shdr;
Wang Nan73cdf0c2016-02-26 09:31:49 +0000798 GElf_Shdr tshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700799 Elf_Data *syms, *opddata = NULL;
800 GElf_Sym sym;
Cody P Schafer261360b2012-08-10 15:23:01 -0700801 Elf_Scn *sec, *sec_strndx;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700802 Elf *elf;
803 int nr = 0;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300804 bool remap_kernel = false, adjust_kernel_syms = false;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700805
Wang Nanba927322015-04-07 08:22:45 +0000806 if (kmap && !kmaps)
807 return -1;
808
Cody P Schafer261360b2012-08-10 15:23:01 -0700809 dso->symtab_type = syms_ss->type;
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300810 dso->is_64_bit = syms_ss->is_64_bit;
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300811 dso->rel = syms_ss->ehdr.e_type == ET_REL;
812
813 /*
814 * Modules may already have symbols from kallsyms, but those symbols
815 * have the wrong values for the dso maps, so remove them.
816 */
817 if (kmodule && syms_ss->symtab)
818 symbols__delete(&dso->symbols[map->type]);
Cody P Schafer005f9292012-08-10 15:22:58 -0700819
Cody P Schafer261360b2012-08-10 15:23:01 -0700820 if (!syms_ss->symtab) {
Anton Blanchardd0b0d042014-09-09 08:59:29 +1000821 /*
822 * If the vmlinux is stripped, fail so we will fall back
823 * to using kallsyms. The vmlinux runtime symbols aren't
824 * of much use.
825 */
826 if (dso->kernel)
827 goto out_elf_end;
828
Cody P Schafer261360b2012-08-10 15:23:01 -0700829 syms_ss->symtab = syms_ss->dynsym;
830 syms_ss->symshdr = syms_ss->dynshdr;
Cody P Schaferd26cd122012-08-10 15:23:00 -0700831 }
832
Cody P Schafer261360b2012-08-10 15:23:01 -0700833 elf = syms_ss->elf;
834 ehdr = syms_ss->ehdr;
835 sec = syms_ss->symtab;
836 shdr = syms_ss->symshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700837
Wang Nan73cdf0c2016-02-26 09:31:49 +0000838 if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL))
839 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
840
Cody P Schafer261360b2012-08-10 15:23:01 -0700841 if (runtime_ss->opdsec)
842 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
Namhyung Kime5a18452012-08-06 13:41:20 +0900843
844 syms = elf_getdata(sec, NULL);
845 if (syms == NULL)
846 goto out_elf_end;
847
848 sec = elf_getscn(elf, shdr.sh_link);
849 if (sec == NULL)
850 goto out_elf_end;
851
852 symstrs = elf_getdata(sec, NULL);
853 if (symstrs == NULL)
854 goto out_elf_end;
855
Adrian Hunterf247fb82014-07-31 09:00:46 +0300856 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900857 if (sec_strndx == NULL)
858 goto out_elf_end;
859
860 secstrs = elf_getdata(sec_strndx, NULL);
861 if (secstrs == NULL)
862 goto out_elf_end;
863
864 nr_syms = shdr.sh_size / shdr.sh_entsize;
865
866 memset(&sym, 0, sizeof(sym));
Adrian Hunter39b12f782013-08-07 14:38:47 +0300867
868 /*
869 * The kernel relocation symbol is needed in advance in order to adjust
870 * kernel maps correctly.
871 */
872 if (ref_reloc_sym_not_found(kmap)) {
873 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
874 const char *elf_name = elf_sym__name(&sym, symstrs);
875
876 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
877 continue;
878 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
Adrian Hunter91767532014-01-29 16:14:36 +0200879 map->reloc = kmap->ref_reloc_sym->addr -
880 kmap->ref_reloc_sym->unrelocated_addr;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300881 break;
882 }
883 }
884
Adrian Hunterf0ee3b42015-08-14 15:50:06 +0300885 /*
886 * Handle any relocation of vdso necessary because older kernels
887 * attempted to prelink vdso to its virtual address.
888 */
Wang Nan73cdf0c2016-02-26 09:31:49 +0000889 if (dso__is_vdso(dso))
890 map->reloc = map->start - dso->text_offset;
Adrian Hunterf0ee3b42015-08-14 15:50:06 +0300891
Adrian Hunter39b12f782013-08-07 14:38:47 +0300892 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
893 /*
894 * Initial kernel and module mappings do not map to the dso. For
895 * function mappings, flag the fixups.
896 */
897 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
898 remap_kernel = true;
899 adjust_kernel_syms = dso->adjust_symbols;
900 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900901 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
902 struct symbol *f;
903 const char *elf_name = elf_sym__name(&sym, symstrs);
904 char *demangled = NULL;
905 int is_label = elf_sym__is_label(&sym);
906 const char *section_name;
Cody P Schafer261360b2012-08-10 15:23:01 -0700907 bool used_opd = false;
Namhyung Kime5a18452012-08-06 13:41:20 +0900908
Namhyung Kime5a18452012-08-06 13:41:20 +0900909 if (!is_label && !elf_sym__is_a(&sym, map->type))
910 continue;
911
912 /* Reject ARM ELF "mapping symbols": these aren't unique and
913 * don't identify functions, so will confuse the profile
914 * output: */
Victor Kamensky4886f2c2015-01-26 22:34:01 -0800915 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
916 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
917 && (elf_name[2] == '\0' || elf_name[2] == '.'))
Namhyung Kime5a18452012-08-06 13:41:20 +0900918 continue;
919 }
920
Cody P Schafer261360b2012-08-10 15:23:01 -0700921 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
922 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900923 u64 *opd = opddata->d_buf + offset;
924 sym.st_value = DSO__SWAP(dso, u64, *opd);
Cody P Schafer261360b2012-08-10 15:23:01 -0700925 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
926 sym.st_value);
927 used_opd = true;
Namhyung Kime5a18452012-08-06 13:41:20 +0900928 }
Namhyung Kim3843b052012-11-21 13:49:44 +0100929 /*
930 * When loading symbols in a data mapping, ABS symbols (which
931 * has a value of SHN_ABS in its st_shndx) failed at
932 * elf_getscn(). And it marks the loading as a failure so
933 * already loaded symbols cannot be fixed up.
934 *
935 * I'm not sure what should be done. Just ignore them for now.
936 * - Namhyung Kim
937 */
938 if (sym.st_shndx == SHN_ABS)
939 continue;
Namhyung Kime5a18452012-08-06 13:41:20 +0900940
Cody P Schafer261360b2012-08-10 15:23:01 -0700941 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900942 if (!sec)
943 goto out_elf_end;
944
945 gelf_getshdr(sec, &shdr);
946
947 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
948 continue;
949
950 section_name = elf_sec__name(&shdr, secstrs);
951
952 /* On ARM, symbols for thumb functions have 1 added to
953 * the symbol address as a flag - remove it */
954 if ((ehdr.e_machine == EM_ARM) &&
955 (map->type == MAP__FUNCTION) &&
956 (sym.st_value & 1))
957 --sym.st_value;
958
Adrian Hunter39b12f782013-08-07 14:38:47 +0300959 if (dso->kernel || kmodule) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900960 char dso_name[PATH_MAX];
961
Adrian Hunter39b12f782013-08-07 14:38:47 +0300962 /* Adjust symbol to map to file offset */
963 if (adjust_kernel_syms)
964 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
965
Namhyung Kime5a18452012-08-06 13:41:20 +0900966 if (strcmp(section_name,
967 (curr_dso->short_name +
968 dso->short_name_len)) == 0)
969 goto new_symbol;
970
971 if (strcmp(section_name, ".text") == 0) {
Adrian Hunter39b12f782013-08-07 14:38:47 +0300972 /*
973 * The initial kernel mapping is based on
974 * kallsyms and identity maps. Overwrite it to
975 * map to the kernel dso.
976 */
977 if (remap_kernel && dso->kernel) {
978 remap_kernel = false;
979 map->start = shdr.sh_addr +
980 ref_reloc(kmap);
981 map->end = map->start + shdr.sh_size;
982 map->pgoff = shdr.sh_offset;
983 map->map_ip = map__map_ip;
984 map->unmap_ip = map__unmap_ip;
985 /* Ensure maps are correctly ordered */
Wang Nanba927322015-04-07 08:22:45 +0000986 if (kmaps) {
Arnaldo Carvalho de Melo84c2caf2015-05-25 16:59:56 -0300987 map__get(map);
Wang Nanba927322015-04-07 08:22:45 +0000988 map_groups__remove(kmaps, map);
989 map_groups__insert(kmaps, map);
Arnaldo Carvalho de Melo84c2caf2015-05-25 16:59:56 -0300990 map__put(map);
Wang Nanba927322015-04-07 08:22:45 +0000991 }
Adrian Hunter39b12f782013-08-07 14:38:47 +0300992 }
993
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300994 /*
995 * The initial module mapping is based on
996 * /proc/modules mapped to offset zero.
997 * Overwrite it to map to the module dso.
998 */
999 if (remap_kernel && kmodule) {
1000 remap_kernel = false;
1001 map->pgoff = shdr.sh_offset;
1002 }
1003
Namhyung Kime5a18452012-08-06 13:41:20 +09001004 curr_map = map;
1005 curr_dso = dso;
1006 goto new_symbol;
1007 }
1008
Adrian Hunter0131c4e2013-08-07 14:38:50 +03001009 if (!kmap)
1010 goto new_symbol;
1011
Namhyung Kime5a18452012-08-06 13:41:20 +09001012 snprintf(dso_name, sizeof(dso_name),
1013 "%s%s", dso->short_name, section_name);
1014
Wang Nanba927322015-04-07 08:22:45 +00001015 curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
Namhyung Kime5a18452012-08-06 13:41:20 +09001016 if (curr_map == NULL) {
1017 u64 start = sym.st_value;
1018
1019 if (kmodule)
1020 start += map->start + shdr.sh_offset;
1021
1022 curr_dso = dso__new(dso_name);
1023 if (curr_dso == NULL)
1024 goto out_elf_end;
1025 curr_dso->kernel = dso->kernel;
1026 curr_dso->long_name = dso->long_name;
1027 curr_dso->long_name_len = dso->long_name_len;
1028 curr_map = map__new2(start, curr_dso,
1029 map->type);
Masami Hiramatsue7a78652015-12-09 11:11:18 +09001030 dso__put(curr_dso);
Namhyung Kime5a18452012-08-06 13:41:20 +09001031 if (curr_map == NULL) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001032 goto out_elf_end;
1033 }
Adrian Hunter39b12f782013-08-07 14:38:47 +03001034 if (adjust_kernel_syms) {
1035 curr_map->start = shdr.sh_addr +
1036 ref_reloc(kmap);
1037 curr_map->end = curr_map->start +
1038 shdr.sh_size;
1039 curr_map->pgoff = shdr.sh_offset;
1040 } else {
1041 curr_map->map_ip = identity__map_ip;
1042 curr_map->unmap_ip = identity__map_ip;
1043 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001044 curr_dso->symtab_type = dso->symtab_type;
Wang Nanba927322015-04-07 08:22:45 +00001045 map_groups__insert(kmaps, curr_map);
Masami Hiramatsue7a78652015-12-09 11:11:18 +09001046 /*
1047 * Add it before we drop the referece to curr_map,
1048 * i.e. while we still are sure to have a reference
1049 * to this DSO via curr_map->dso.
1050 */
1051 dsos__add(&map->groups->machine->dsos, curr_dso);
Masami Hiramatsu8d5c3402015-11-18 15:40:27 +09001052 /* kmaps already got it */
1053 map__put(curr_map);
Namhyung Kime5a18452012-08-06 13:41:20 +09001054 dso__set_loaded(curr_dso, map->type);
1055 } else
1056 curr_dso = curr_map->dso;
1057
1058 goto new_symbol;
1059 }
1060
Cody P Schafer261360b2012-08-10 15:23:01 -07001061 if ((used_opd && runtime_ss->adjust_symbols)
1062 || (!used_opd && syms_ss->adjust_symbols)) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001063 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1064 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1065 (u64)sym.st_value, (u64)shdr.sh_addr,
1066 (u64)shdr.sh_offset);
1067 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1068 }
Avi Kivity950b8352014-01-22 21:58:46 +02001069new_symbol:
Namhyung Kime5a18452012-08-06 13:41:20 +09001070 /*
1071 * We need to figure out if the object was created from C++ sources
1072 * DWARF DW_compile_unit has this, but we don't always have access
1073 * to it...
1074 */
Avi Kivity763122a2014-09-13 07:15:05 +03001075 if (want_demangle(dso->kernel || kmodule)) {
Namhyung Kime71e7942014-07-31 14:47:42 +09001076 int demangle_flags = DMGL_NO_OPTS;
1077 if (verbose)
1078 demangle_flags = DMGL_PARAMS | DMGL_ANSI;
1079
1080 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
Stephane Eraniane9c4bcd2015-11-30 10:02:20 +01001081 if (demangled == NULL)
1082 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
Namhyung Kim328ccda2013-03-25 18:18:18 +09001083 if (demangled != NULL)
1084 elf_name = demangled;
1085 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001086 f = symbol__new(sym.st_value, sym.st_size,
1087 GELF_ST_BIND(sym.st_info), elf_name);
1088 free(demangled);
1089 if (!f)
1090 goto out_elf_end;
1091
Naveen N. Rao0b3c2262016-04-12 14:40:50 +05301092 arch__sym_update(f, &sym);
1093
Namhyung Kime5a18452012-08-06 13:41:20 +09001094 if (filter && filter(curr_map, f))
1095 symbol__delete(f);
1096 else {
1097 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1098 nr++;
1099 }
1100 }
1101
1102 /*
1103 * For misannotated, zeroed, ASM function sizes.
1104 */
1105 if (nr > 0) {
Namhyung Kim680d9262015-03-06 16:31:27 +09001106 if (!symbol_conf.allow_aliases)
1107 symbols__fixup_duplicate(&dso->symbols[map->type]);
Namhyung Kime5a18452012-08-06 13:41:20 +09001108 symbols__fixup_end(&dso->symbols[map->type]);
1109 if (kmap) {
1110 /*
1111 * We need to fixup this here too because we create new
1112 * maps here, for things like vsyscall sections.
1113 */
Wang Nanba927322015-04-07 08:22:45 +00001114 __map_groups__fixup_end(kmaps, map->type);
Namhyung Kime5a18452012-08-06 13:41:20 +09001115 }
1116 }
1117 err = nr;
1118out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +09001119 return err;
1120}
1121
Adrian Hunter8e0cf962013-08-07 14:38:51 +03001122static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1123{
1124 GElf_Phdr phdr;
1125 size_t i, phdrnum;
1126 int err;
1127 u64 sz;
1128
1129 if (elf_getphdrnum(elf, &phdrnum))
1130 return -1;
1131
1132 for (i = 0; i < phdrnum; i++) {
1133 if (gelf_getphdr(elf, i, &phdr) == NULL)
1134 return -1;
1135 if (phdr.p_type != PT_LOAD)
1136 continue;
1137 if (exe) {
1138 if (!(phdr.p_flags & PF_X))
1139 continue;
1140 } else {
1141 if (!(phdr.p_flags & PF_R))
1142 continue;
1143 }
1144 sz = min(phdr.p_memsz, phdr.p_filesz);
1145 if (!sz)
1146 continue;
1147 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1148 if (err)
1149 return err;
1150 }
1151 return 0;
1152}
1153
1154int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1155 bool *is_64_bit)
1156{
1157 int err;
1158 Elf *elf;
1159
1160 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1161 if (elf == NULL)
1162 return -1;
1163
1164 if (is_64_bit)
1165 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1166
1167 err = elf_read_maps(elf, exe, mapfn, data);
1168
1169 elf_end(elf);
1170 return err;
1171}
1172
Adrian Hunter2b5b8bb2014-07-22 16:17:59 +03001173enum dso_type dso__type_fd(int fd)
1174{
1175 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1176 GElf_Ehdr ehdr;
1177 Elf_Kind ek;
1178 Elf *elf;
1179
1180 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1181 if (elf == NULL)
1182 goto out;
1183
1184 ek = elf_kind(elf);
1185 if (ek != ELF_K_ELF)
1186 goto out_end;
1187
1188 if (gelf_getclass(elf) == ELFCLASS64) {
1189 dso_type = DSO__TYPE_64BIT;
1190 goto out_end;
1191 }
1192
1193 if (gelf_getehdr(elf, &ehdr) == NULL)
1194 goto out_end;
1195
1196 if (ehdr.e_machine == EM_X86_64)
1197 dso_type = DSO__TYPE_X32BIT;
1198 else
1199 dso_type = DSO__TYPE_32BIT;
1200out_end:
1201 elf_end(elf);
1202out:
1203 return dso_type;
1204}
1205
Adrian Hunterafba19d2013-10-09 15:01:12 +03001206static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1207{
1208 ssize_t r;
1209 size_t n;
1210 int err = -1;
1211 char *buf = malloc(page_size);
1212
1213 if (buf == NULL)
1214 return -1;
1215
1216 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1217 goto out;
1218
1219 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1220 goto out;
1221
1222 while (len) {
1223 n = page_size;
1224 if (len < n)
1225 n = len;
1226 /* Use read because mmap won't work on proc files */
1227 r = read(from, buf, n);
1228 if (r < 0)
1229 goto out;
1230 if (!r)
1231 break;
1232 n = r;
1233 r = write(to, buf, n);
1234 if (r < 0)
1235 goto out;
1236 if ((size_t)r != n)
1237 goto out;
1238 len -= n;
1239 }
1240
1241 err = 0;
1242out:
1243 free(buf);
1244 return err;
1245}
1246
1247struct kcore {
1248 int fd;
1249 int elfclass;
1250 Elf *elf;
1251 GElf_Ehdr ehdr;
1252};
1253
1254static int kcore__open(struct kcore *kcore, const char *filename)
1255{
1256 GElf_Ehdr *ehdr;
1257
1258 kcore->fd = open(filename, O_RDONLY);
1259 if (kcore->fd == -1)
1260 return -1;
1261
1262 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1263 if (!kcore->elf)
1264 goto out_close;
1265
1266 kcore->elfclass = gelf_getclass(kcore->elf);
1267 if (kcore->elfclass == ELFCLASSNONE)
1268 goto out_end;
1269
1270 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1271 if (!ehdr)
1272 goto out_end;
1273
1274 return 0;
1275
1276out_end:
1277 elf_end(kcore->elf);
1278out_close:
1279 close(kcore->fd);
1280 return -1;
1281}
1282
1283static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1284 bool temp)
1285{
Adrian Hunterafba19d2013-10-09 15:01:12 +03001286 kcore->elfclass = elfclass;
1287
1288 if (temp)
1289 kcore->fd = mkstemp(filename);
1290 else
1291 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1292 if (kcore->fd == -1)
1293 return -1;
1294
1295 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1296 if (!kcore->elf)
1297 goto out_close;
1298
1299 if (!gelf_newehdr(kcore->elf, elfclass))
1300 goto out_end;
1301
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001302 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
Adrian Hunterafba19d2013-10-09 15:01:12 +03001303
1304 return 0;
1305
1306out_end:
1307 elf_end(kcore->elf);
1308out_close:
1309 close(kcore->fd);
1310 unlink(filename);
1311 return -1;
1312}
1313
1314static void kcore__close(struct kcore *kcore)
1315{
1316 elf_end(kcore->elf);
1317 close(kcore->fd);
1318}
1319
1320static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1321{
1322 GElf_Ehdr *ehdr = &to->ehdr;
1323 GElf_Ehdr *kehdr = &from->ehdr;
1324
1325 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1326 ehdr->e_type = kehdr->e_type;
1327 ehdr->e_machine = kehdr->e_machine;
1328 ehdr->e_version = kehdr->e_version;
1329 ehdr->e_entry = 0;
1330 ehdr->e_shoff = 0;
1331 ehdr->e_flags = kehdr->e_flags;
1332 ehdr->e_phnum = count;
1333 ehdr->e_shentsize = 0;
1334 ehdr->e_shnum = 0;
1335 ehdr->e_shstrndx = 0;
1336
1337 if (from->elfclass == ELFCLASS32) {
1338 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1339 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1340 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1341 } else {
1342 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1343 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1344 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1345 }
1346
1347 if (!gelf_update_ehdr(to->elf, ehdr))
1348 return -1;
1349
1350 if (!gelf_newphdr(to->elf, count))
1351 return -1;
1352
1353 return 0;
1354}
1355
1356static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1357 u64 addr, u64 len)
1358{
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001359 GElf_Phdr phdr = {
1360 .p_type = PT_LOAD,
1361 .p_flags = PF_R | PF_W | PF_X,
1362 .p_offset = offset,
1363 .p_vaddr = addr,
1364 .p_paddr = 0,
1365 .p_filesz = len,
1366 .p_memsz = len,
1367 .p_align = page_size,
1368 };
Adrian Hunterafba19d2013-10-09 15:01:12 +03001369
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001370 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
Adrian Hunterafba19d2013-10-09 15:01:12 +03001371 return -1;
1372
1373 return 0;
1374}
1375
1376static off_t kcore__write(struct kcore *kcore)
1377{
1378 return elf_update(kcore->elf, ELF_C_WRITE);
1379}
1380
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001381struct phdr_data {
1382 off_t offset;
1383 u64 addr;
1384 u64 len;
1385};
1386
1387struct kcore_copy_info {
1388 u64 stext;
1389 u64 etext;
1390 u64 first_symbol;
1391 u64 last_symbol;
1392 u64 first_module;
1393 u64 last_module_symbol;
1394 struct phdr_data kernel_map;
1395 struct phdr_data modules_map;
1396};
1397
1398static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1399 u64 start)
1400{
1401 struct kcore_copy_info *kci = arg;
1402
1403 if (!symbol_type__is_a(type, MAP__FUNCTION))
1404 return 0;
1405
1406 if (strchr(name, '[')) {
1407 if (start > kci->last_module_symbol)
1408 kci->last_module_symbol = start;
1409 return 0;
1410 }
1411
1412 if (!kci->first_symbol || start < kci->first_symbol)
1413 kci->first_symbol = start;
1414
1415 if (!kci->last_symbol || start > kci->last_symbol)
1416 kci->last_symbol = start;
1417
1418 if (!strcmp(name, "_stext")) {
1419 kci->stext = start;
1420 return 0;
1421 }
1422
1423 if (!strcmp(name, "_etext")) {
1424 kci->etext = start;
1425 return 0;
1426 }
1427
1428 return 0;
1429}
1430
1431static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1432 const char *dir)
1433{
1434 char kallsyms_filename[PATH_MAX];
1435
1436 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1437
1438 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1439 return -1;
1440
1441 if (kallsyms__parse(kallsyms_filename, kci,
1442 kcore_copy__process_kallsyms) < 0)
1443 return -1;
1444
1445 return 0;
1446}
1447
1448static int kcore_copy__process_modules(void *arg,
1449 const char *name __maybe_unused,
1450 u64 start)
1451{
1452 struct kcore_copy_info *kci = arg;
1453
1454 if (!kci->first_module || start < kci->first_module)
1455 kci->first_module = start;
1456
1457 return 0;
1458}
1459
1460static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1461 const char *dir)
1462{
1463 char modules_filename[PATH_MAX];
1464
1465 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1466
1467 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1468 return -1;
1469
1470 if (modules__parse(modules_filename, kci,
1471 kcore_copy__process_modules) < 0)
1472 return -1;
1473
1474 return 0;
1475}
1476
1477static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1478 u64 s, u64 e)
1479{
1480 if (p->addr || s < start || s >= end)
1481 return;
1482
1483 p->addr = s;
1484 p->offset = (s - start) + pgoff;
1485 p->len = e < end ? e - s : end - s;
1486}
1487
1488static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1489{
1490 struct kcore_copy_info *kci = data;
1491 u64 end = start + len;
1492
1493 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1494 kci->etext);
1495
1496 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1497 kci->last_module_symbol);
1498
1499 return 0;
1500}
1501
1502static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1503{
1504 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1505 return -1;
1506
1507 return 0;
1508}
1509
1510static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1511 Elf *elf)
1512{
1513 if (kcore_copy__parse_kallsyms(kci, dir))
1514 return -1;
1515
1516 if (kcore_copy__parse_modules(kci, dir))
1517 return -1;
1518
1519 if (kci->stext)
1520 kci->stext = round_down(kci->stext, page_size);
1521 else
1522 kci->stext = round_down(kci->first_symbol, page_size);
1523
1524 if (kci->etext) {
1525 kci->etext = round_up(kci->etext, page_size);
1526 } else if (kci->last_symbol) {
1527 kci->etext = round_up(kci->last_symbol, page_size);
1528 kci->etext += page_size;
1529 }
1530
1531 kci->first_module = round_down(kci->first_module, page_size);
1532
1533 if (kci->last_module_symbol) {
1534 kci->last_module_symbol = round_up(kci->last_module_symbol,
1535 page_size);
1536 kci->last_module_symbol += page_size;
1537 }
1538
1539 if (!kci->stext || !kci->etext)
1540 return -1;
1541
1542 if (kci->first_module && !kci->last_module_symbol)
1543 return -1;
1544
1545 return kcore_copy__read_maps(kci, elf);
1546}
1547
1548static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1549 const char *name)
1550{
1551 char from_filename[PATH_MAX];
1552 char to_filename[PATH_MAX];
1553
1554 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1555 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1556
1557 return copyfile_mode(from_filename, to_filename, 0400);
1558}
1559
1560static int kcore_copy__unlink(const char *dir, const char *name)
1561{
1562 char filename[PATH_MAX];
1563
1564 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1565
1566 return unlink(filename);
1567}
1568
1569static int kcore_copy__compare_fds(int from, int to)
1570{
1571 char *buf_from;
1572 char *buf_to;
1573 ssize_t ret;
1574 size_t len;
1575 int err = -1;
1576
1577 buf_from = malloc(page_size);
1578 buf_to = malloc(page_size);
1579 if (!buf_from || !buf_to)
1580 goto out;
1581
1582 while (1) {
1583 /* Use read because mmap won't work on proc files */
1584 ret = read(from, buf_from, page_size);
1585 if (ret < 0)
1586 goto out;
1587
1588 if (!ret)
1589 break;
1590
1591 len = ret;
1592
1593 if (readn(to, buf_to, len) != (int)len)
1594 goto out;
1595
1596 if (memcmp(buf_from, buf_to, len))
1597 goto out;
1598 }
1599
1600 err = 0;
1601out:
1602 free(buf_to);
1603 free(buf_from);
1604 return err;
1605}
1606
1607static int kcore_copy__compare_files(const char *from_filename,
1608 const char *to_filename)
1609{
1610 int from, to, err = -1;
1611
1612 from = open(from_filename, O_RDONLY);
1613 if (from < 0)
1614 return -1;
1615
1616 to = open(to_filename, O_RDONLY);
1617 if (to < 0)
1618 goto out_close_from;
1619
1620 err = kcore_copy__compare_fds(from, to);
1621
1622 close(to);
1623out_close_from:
1624 close(from);
1625 return err;
1626}
1627
1628static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1629 const char *name)
1630{
1631 char from_filename[PATH_MAX];
1632 char to_filename[PATH_MAX];
1633
1634 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1635 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1636
1637 return kcore_copy__compare_files(from_filename, to_filename);
1638}
1639
1640/**
1641 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1642 * @from_dir: from directory
1643 * @to_dir: to directory
1644 *
1645 * This function copies kallsyms, modules and kcore files from one directory to
1646 * another. kallsyms and modules are copied entirely. Only code segments are
1647 * copied from kcore. It is assumed that two segments suffice: one for the
1648 * kernel proper and one for all the modules. The code segments are determined
1649 * from kallsyms and modules files. The kernel map starts at _stext or the
1650 * lowest function symbol, and ends at _etext or the highest function symbol.
1651 * The module map starts at the lowest module address and ends at the highest
1652 * module symbol. Start addresses are rounded down to the nearest page. End
1653 * addresses are rounded up to the nearest page. An extra page is added to the
1654 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1655 * symbol too. Because it contains only code sections, the resulting kcore is
1656 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1657 * is not the same for the kernel map and the modules map. That happens because
1658 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1659 * kallsyms and modules files are compared with their copies to check that
1660 * modules have not been loaded or unloaded while the copies were taking place.
1661 *
1662 * Return: %0 on success, %-1 on failure.
1663 */
1664int kcore_copy(const char *from_dir, const char *to_dir)
1665{
1666 struct kcore kcore;
1667 struct kcore extract;
1668 size_t count = 2;
1669 int idx = 0, err = -1;
1670 off_t offset = page_size, sz, modules_offset = 0;
1671 struct kcore_copy_info kci = { .stext = 0, };
1672 char kcore_filename[PATH_MAX];
1673 char extract_filename[PATH_MAX];
1674
1675 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1676 return -1;
1677
1678 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1679 goto out_unlink_kallsyms;
1680
1681 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1682 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1683
1684 if (kcore__open(&kcore, kcore_filename))
1685 goto out_unlink_modules;
1686
1687 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1688 goto out_kcore_close;
1689
1690 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1691 goto out_kcore_close;
1692
1693 if (!kci.modules_map.addr)
1694 count -= 1;
1695
1696 if (kcore__copy_hdr(&kcore, &extract, count))
1697 goto out_extract_close;
1698
1699 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1700 kci.kernel_map.len))
1701 goto out_extract_close;
1702
1703 if (kci.modules_map.addr) {
1704 modules_offset = offset + kci.kernel_map.len;
1705 if (kcore__add_phdr(&extract, idx, modules_offset,
1706 kci.modules_map.addr, kci.modules_map.len))
1707 goto out_extract_close;
1708 }
1709
1710 sz = kcore__write(&extract);
1711 if (sz < 0 || sz > offset)
1712 goto out_extract_close;
1713
1714 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1715 kci.kernel_map.len))
1716 goto out_extract_close;
1717
1718 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1719 extract.fd, modules_offset,
1720 kci.modules_map.len))
1721 goto out_extract_close;
1722
1723 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1724 goto out_extract_close;
1725
1726 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1727 goto out_extract_close;
1728
1729 err = 0;
1730
1731out_extract_close:
1732 kcore__close(&extract);
1733 if (err)
1734 unlink(extract_filename);
1735out_kcore_close:
1736 kcore__close(&kcore);
1737out_unlink_modules:
1738 if (err)
1739 kcore_copy__unlink(to_dir, "modules");
1740out_unlink_kallsyms:
1741 if (err)
1742 kcore_copy__unlink(to_dir, "kallsyms");
1743
1744 return err;
1745}
1746
Adrian Hunterafba19d2013-10-09 15:01:12 +03001747int kcore_extract__create(struct kcore_extract *kce)
1748{
1749 struct kcore kcore;
1750 struct kcore extract;
1751 size_t count = 1;
1752 int idx = 0, err = -1;
1753 off_t offset = page_size, sz;
1754
1755 if (kcore__open(&kcore, kce->kcore_filename))
1756 return -1;
1757
1758 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1759 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1760 goto out_kcore_close;
1761
1762 if (kcore__copy_hdr(&kcore, &extract, count))
1763 goto out_extract_close;
1764
1765 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1766 goto out_extract_close;
1767
1768 sz = kcore__write(&extract);
1769 if (sz < 0 || sz > offset)
1770 goto out_extract_close;
1771
1772 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1773 goto out_extract_close;
1774
1775 err = 0;
1776
1777out_extract_close:
1778 kcore__close(&extract);
1779 if (err)
1780 unlink(kce->extract_filename);
1781out_kcore_close:
1782 kcore__close(&kcore);
1783
1784 return err;
1785}
1786
1787void kcore_extract__delete(struct kcore_extract *kce)
1788{
1789 unlink(kce->extract_filename);
1790}
1791
Namhyung Kime5a18452012-08-06 13:41:20 +09001792void symbol__elf_init(void)
1793{
1794 elf_version(EV_CURRENT);
1795}