blob: adbc6c02c3aaac757028e6bbe1ae63cd11ee437a [file] [log] [blame]
Namhyung Kime5a18452012-08-06 13:41:20 +09001#include <fcntl.h>
2#include <stdio.h>
3#include <errno.h>
4#include <string.h>
5#include <unistd.h>
6#include <inttypes.h>
7
8#include "symbol.h"
Stephane Eraniane9c4bcd2015-11-30 10:02:20 +01009#include "demangle-java.h"
David Tolnaycae15db2016-07-09 00:20:00 -070010#include "demangle-rust.h"
Waiman Long8fa7d872014-09-29 16:07:28 -040011#include "machine.h"
Vladimir Nikulichev922d0e42014-04-17 08:27:01 -070012#include "vdso.h"
Arnaldo Carvalho de Meloc506c962013-12-11 09:15:00 -030013#include <symbol/kallsyms.h>
Namhyung Kime5a18452012-08-06 13:41:20 +090014#include "debug.h"
15
David Aherne370a3d2015-02-18 19:33:37 -050016#ifndef EM_AARCH64
17#define EM_AARCH64 183 /* ARM 64 bit */
18#endif
19
Arnaldo Carvalho de Melocc310782016-07-12 11:04:13 -030020typedef Elf64_Nhdr GElf_Nhdr;
David Aherne370a3d2015-02-18 19:33:37 -050021
Arnaldo Carvalho de Meloaaba4e12014-11-24 17:10:52 -030022#ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
23extern char *cplus_demangle(const char *, int);
24
25static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i)
26{
27 return cplus_demangle(c, i);
28}
29#else
30#ifdef NO_DEMANGLE
31static inline char *bfd_demangle(void __maybe_unused *v,
32 const char __maybe_unused *c,
33 int __maybe_unused i)
34{
35 return NULL;
36}
37#else
38#define PACKAGE 'perf'
39#include <bfd.h>
40#endif
41#endif
42
Ingo Molnar89fe8082013-09-30 12:07:11 +020043#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
Arnaldo Carvalho de Melo179f36d2015-09-17 11:30:20 -030044static int elf_getphdrnum(Elf *elf, size_t *dst)
Adrian Huntere955d5c2013-09-13 16:49:30 +030045{
46 GElf_Ehdr gehdr;
47 GElf_Ehdr *ehdr;
48
49 ehdr = gelf_getehdr(elf, &gehdr);
50 if (!ehdr)
51 return -1;
52
53 *dst = ehdr->e_phnum;
54
55 return 0;
56}
57#endif
58
Arnaldo Carvalho de Melo2492c462016-07-04 19:35:47 -030059#ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT
60static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused)
61{
62 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__);
63 return -1;
64}
65#endif
66
Namhyung Kime5a18452012-08-06 13:41:20 +090067#ifndef NT_GNU_BUILD_ID
68#define NT_GNU_BUILD_ID 3
69#endif
70
71/**
72 * elf_symtab__for_each_symbol - iterate thru all the symbols
73 *
74 * @syms: struct elf_symtab instance to iterate
75 * @idx: uint32_t idx
76 * @sym: GElf_Sym iterator
77 */
78#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
79 for (idx = 0, gelf_getsym(syms, idx, &sym);\
80 idx < nr_syms; \
81 idx++, gelf_getsym(syms, idx, &sym))
82
83static inline uint8_t elf_sym__type(const GElf_Sym *sym)
84{
85 return GELF_ST_TYPE(sym->st_info);
86}
87
Vinson Lee4e310502015-02-09 16:29:37 -080088#ifndef STT_GNU_IFUNC
89#define STT_GNU_IFUNC 10
90#endif
91
Namhyung Kime5a18452012-08-06 13:41:20 +090092static inline int elf_sym__is_function(const GElf_Sym *sym)
93{
Adrian Huntera2f3b6b2014-07-14 13:02:33 +030094 return (elf_sym__type(sym) == STT_FUNC ||
95 elf_sym__type(sym) == STT_GNU_IFUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +090096 sym->st_name != 0 &&
97 sym->st_shndx != SHN_UNDEF;
98}
99
100static inline bool elf_sym__is_object(const GElf_Sym *sym)
101{
102 return elf_sym__type(sym) == STT_OBJECT &&
103 sym->st_name != 0 &&
104 sym->st_shndx != SHN_UNDEF;
105}
106
107static inline int elf_sym__is_label(const GElf_Sym *sym)
108{
109 return elf_sym__type(sym) == STT_NOTYPE &&
110 sym->st_name != 0 &&
111 sym->st_shndx != SHN_UNDEF &&
112 sym->st_shndx != SHN_ABS;
113}
114
115static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
116{
117 switch (type) {
118 case MAP__FUNCTION:
119 return elf_sym__is_function(sym);
120 case MAP__VARIABLE:
121 return elf_sym__is_object(sym);
122 default:
123 return false;
124 }
125}
126
127static inline const char *elf_sym__name(const GElf_Sym *sym,
128 const Elf_Data *symstrs)
129{
130 return symstrs->d_buf + sym->st_name;
131}
132
133static inline const char *elf_sec__name(const GElf_Shdr *shdr,
134 const Elf_Data *secstrs)
135{
136 return secstrs->d_buf + shdr->sh_name;
137}
138
139static inline int elf_sec__is_text(const GElf_Shdr *shdr,
140 const Elf_Data *secstrs)
141{
142 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
143}
144
145static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
146 const Elf_Data *secstrs)
147{
148 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
149}
150
151static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
152 enum map_type type)
153{
154 switch (type) {
155 case MAP__FUNCTION:
156 return elf_sec__is_text(shdr, secstrs);
157 case MAP__VARIABLE:
158 return elf_sec__is_data(shdr, secstrs);
159 default:
160 return false;
161 }
162}
163
164static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
165{
166 Elf_Scn *sec = NULL;
167 GElf_Shdr shdr;
168 size_t cnt = 1;
169
170 while ((sec = elf_nextscn(elf, sec)) != NULL) {
171 gelf_getshdr(sec, &shdr);
172
173 if ((addr >= shdr.sh_addr) &&
174 (addr < (shdr.sh_addr + shdr.sh_size)))
175 return cnt;
176
177 ++cnt;
178 }
179
180 return -1;
181}
182
Masami Hiramatsu99ca4232014-01-16 09:39:49 +0000183Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
184 GElf_Shdr *shp, const char *name, size_t *idx)
Namhyung Kime5a18452012-08-06 13:41:20 +0900185{
186 Elf_Scn *sec = NULL;
187 size_t cnt = 1;
188
Cody P Schafer49274652012-08-10 15:22:55 -0700189 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
190 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
191 return NULL;
192
Namhyung Kime5a18452012-08-06 13:41:20 +0900193 while ((sec = elf_nextscn(elf, sec)) != NULL) {
194 char *str;
195
196 gelf_getshdr(sec, shp);
197 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
Jiri Olsa155b3a12014-03-02 14:32:07 +0100198 if (str && !strcmp(name, str)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900199 if (idx)
200 *idx = cnt;
Jiri Olsa155b3a12014-03-02 14:32:07 +0100201 return sec;
Namhyung Kime5a18452012-08-06 13:41:20 +0900202 }
203 ++cnt;
204 }
205
Jiri Olsa155b3a12014-03-02 14:32:07 +0100206 return NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900207}
208
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200209static bool want_demangle(bool is_kernel_sym)
210{
211 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
212}
213
214static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
215{
216 int demangle_flags = verbose ? (DMGL_PARAMS | DMGL_ANSI) : DMGL_NO_OPTS;
217 char *demangled = NULL;
218
219 /*
220 * We need to figure out if the object was created from C++ sources
221 * DWARF DW_compile_unit has this, but we don't always have access
222 * to it...
223 */
224 if (!want_demangle(dso->kernel || kmodule))
225 return demangled;
226
227 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
228 if (demangled == NULL)
229 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
230 else if (rust_is_mangled(demangled))
231 /*
232 * Input to Rust demangling is the BFD-demangled
233 * name which it Rust-demangles in place.
234 */
235 rust_demangle_sym(demangled);
236
237 return demangled;
238}
239
Namhyung Kime5a18452012-08-06 13:41:20 +0900240#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
241 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
242 idx < nr_entries; \
243 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
244
245#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
246 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
247 idx < nr_entries; \
248 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
249
250/*
251 * We need to check if we have a .dynsym, so that we can handle the
252 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
253 * .dynsym or .symtab).
254 * And always look at the original dso, not at debuginfo packages, that
255 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
256 */
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -0300257int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map)
Namhyung Kime5a18452012-08-06 13:41:20 +0900258{
259 uint32_t nr_rel_entries, idx;
260 GElf_Sym sym;
261 u64 plt_offset;
262 GElf_Shdr shdr_plt;
263 struct symbol *f;
264 GElf_Shdr shdr_rel_plt, shdr_dynsym;
265 Elf_Data *reldata, *syms, *symstrs;
266 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
267 size_t dynsym_idx;
268 GElf_Ehdr ehdr;
269 char sympltname[1024];
270 Elf *elf;
Cody P Schafera44f6052012-08-10 15:22:59 -0700271 int nr = 0, symidx, err = 0;
Namhyung Kime5a18452012-08-06 13:41:20 +0900272
David Ahernf47b58b2012-08-19 09:47:14 -0600273 if (!ss->dynsym)
274 return 0;
275
Cody P Schafera44f6052012-08-10 15:22:59 -0700276 elf = ss->elf;
277 ehdr = ss->ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900278
Cody P Schafera44f6052012-08-10 15:22:59 -0700279 scn_dynsym = ss->dynsym;
280 shdr_dynsym = ss->dynshdr;
281 dynsym_idx = ss->dynsym_idx;
Namhyung Kime5a18452012-08-06 13:41:20 +0900282
Namhyung Kime5a18452012-08-06 13:41:20 +0900283 if (scn_dynsym == NULL)
284 goto out_elf_end;
285
286 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
287 ".rela.plt", NULL);
288 if (scn_plt_rel == NULL) {
289 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
290 ".rel.plt", NULL);
291 if (scn_plt_rel == NULL)
292 goto out_elf_end;
293 }
294
295 err = -1;
296
297 if (shdr_rel_plt.sh_link != dynsym_idx)
298 goto out_elf_end;
299
300 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
301 goto out_elf_end;
302
303 /*
304 * Fetch the relocation section to find the idxes to the GOT
305 * and the symbols in the .dynsym they refer to.
306 */
307 reldata = elf_getdata(scn_plt_rel, NULL);
308 if (reldata == NULL)
309 goto out_elf_end;
310
311 syms = elf_getdata(scn_dynsym, NULL);
312 if (syms == NULL)
313 goto out_elf_end;
314
315 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
316 if (scn_symstrs == NULL)
317 goto out_elf_end;
318
319 symstrs = elf_getdata(scn_symstrs, NULL);
320 if (symstrs == NULL)
321 goto out_elf_end;
322
Cody P Schafer52f9ddb2012-08-10 15:22:51 -0700323 if (symstrs->d_size == 0)
324 goto out_elf_end;
325
Namhyung Kime5a18452012-08-06 13:41:20 +0900326 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
327 plt_offset = shdr_plt.sh_offset;
328
329 if (shdr_rel_plt.sh_type == SHT_RELA) {
330 GElf_Rela pos_mem, *pos;
331
332 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
333 nr_rel_entries) {
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200334 const char *elf_name = NULL;
335 char *demangled = NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900336 symidx = GELF_R_SYM(pos->r_info);
337 plt_offset += shdr_plt.sh_entsize;
338 gelf_getsym(syms, symidx, &sym);
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200339
340 elf_name = elf_sym__name(&sym, symstrs);
341 demangled = demangle_sym(dso, 0, elf_name);
342 if (demangled != NULL)
343 elf_name = demangled;
Namhyung Kime5a18452012-08-06 13:41:20 +0900344 snprintf(sympltname, sizeof(sympltname),
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200345 "%s@plt", elf_name);
346 free(demangled);
Namhyung Kime5a18452012-08-06 13:41:20 +0900347
348 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
349 STB_GLOBAL, sympltname);
350 if (!f)
351 goto out_elf_end;
352
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -0300353 symbols__insert(&dso->symbols[map->type], f);
354 ++nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900355 }
356 } else if (shdr_rel_plt.sh_type == SHT_REL) {
357 GElf_Rel pos_mem, *pos;
358 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
359 nr_rel_entries) {
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200360 const char *elf_name = NULL;
361 char *demangled = NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900362 symidx = GELF_R_SYM(pos->r_info);
363 plt_offset += shdr_plt.sh_entsize;
364 gelf_getsym(syms, symidx, &sym);
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200365
366 elf_name = elf_sym__name(&sym, symstrs);
367 demangled = demangle_sym(dso, 0, elf_name);
368 if (demangled != NULL)
369 elf_name = demangled;
Namhyung Kime5a18452012-08-06 13:41:20 +0900370 snprintf(sympltname, sizeof(sympltname),
Milian Wolff2a8d41b2016-08-30 13:41:02 +0200371 "%s@plt", elf_name);
372 free(demangled);
Namhyung Kime5a18452012-08-06 13:41:20 +0900373
374 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
375 STB_GLOBAL, sympltname);
376 if (!f)
377 goto out_elf_end;
378
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -0300379 symbols__insert(&dso->symbols[map->type], f);
380 ++nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900381 }
382 }
383
384 err = 0;
385out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900386 if (err == 0)
387 return nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900388 pr_debug("%s: problems reading %s PLT info.\n",
389 __func__, dso->long_name);
390 return 0;
391}
392
393/*
394 * Align offset to 4 bytes as needed for note name and descriptor data.
395 */
396#define NOTE_ALIGN(n) (((n) + 3) & -4U)
397
398static int elf_read_build_id(Elf *elf, void *bf, size_t size)
399{
400 int err = -1;
401 GElf_Ehdr ehdr;
402 GElf_Shdr shdr;
403 Elf_Data *data;
404 Elf_Scn *sec;
405 Elf_Kind ek;
406 void *ptr;
407
408 if (size < BUILD_ID_SIZE)
409 goto out;
410
411 ek = elf_kind(elf);
412 if (ek != ELF_K_ELF)
413 goto out;
414
415 if (gelf_getehdr(elf, &ehdr) == NULL) {
416 pr_err("%s: cannot get elf header.\n", __func__);
417 goto out;
418 }
419
420 /*
421 * Check following sections for notes:
422 * '.note.gnu.build-id'
423 * '.notes'
424 * '.note' (VDSO specific)
425 */
426 do {
427 sec = elf_section_by_name(elf, &ehdr, &shdr,
428 ".note.gnu.build-id", NULL);
429 if (sec)
430 break;
431
432 sec = elf_section_by_name(elf, &ehdr, &shdr,
433 ".notes", NULL);
434 if (sec)
435 break;
436
437 sec = elf_section_by_name(elf, &ehdr, &shdr,
438 ".note", NULL);
439 if (sec)
440 break;
441
442 return err;
443
444 } while (0);
445
446 data = elf_getdata(sec, NULL);
447 if (data == NULL)
448 goto out;
449
450 ptr = data->d_buf;
451 while (ptr < (data->d_buf + data->d_size)) {
452 GElf_Nhdr *nhdr = ptr;
453 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
454 descsz = NOTE_ALIGN(nhdr->n_descsz);
455 const char *name;
456
457 ptr += sizeof(*nhdr);
458 name = ptr;
459 ptr += namesz;
460 if (nhdr->n_type == NT_GNU_BUILD_ID &&
461 nhdr->n_namesz == sizeof("GNU")) {
462 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
463 size_t sz = min(size, descsz);
464 memcpy(bf, ptr, sz);
465 memset(bf + sz, 0, size - sz);
466 err = descsz;
467 break;
468 }
469 }
470 ptr += descsz;
471 }
472
473out:
474 return err;
475}
476
477int filename__read_build_id(const char *filename, void *bf, size_t size)
478{
479 int fd, err = -1;
480 Elf *elf;
481
482 if (size < BUILD_ID_SIZE)
483 goto out;
484
485 fd = open(filename, O_RDONLY);
486 if (fd < 0)
487 goto out;
488
489 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
490 if (elf == NULL) {
491 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
492 goto out_close;
493 }
494
495 err = elf_read_build_id(elf, bf, size);
496
497 elf_end(elf);
498out_close:
499 close(fd);
500out:
501 return err;
502}
503
504int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
505{
506 int fd, err = -1;
507
508 if (size < BUILD_ID_SIZE)
509 goto out;
510
511 fd = open(filename, O_RDONLY);
512 if (fd < 0)
513 goto out;
514
515 while (1) {
516 char bf[BUFSIZ];
517 GElf_Nhdr nhdr;
518 size_t namesz, descsz;
519
520 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
521 break;
522
523 namesz = NOTE_ALIGN(nhdr.n_namesz);
524 descsz = NOTE_ALIGN(nhdr.n_descsz);
525 if (nhdr.n_type == NT_GNU_BUILD_ID &&
526 nhdr.n_namesz == sizeof("GNU")) {
527 if (read(fd, bf, namesz) != (ssize_t)namesz)
528 break;
529 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
530 size_t sz = min(descsz, size);
531 if (read(fd, build_id, sz) == (ssize_t)sz) {
532 memset(build_id + sz, 0, size - sz);
533 err = 0;
534 break;
535 }
536 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
537 break;
538 } else {
539 int n = namesz + descsz;
Arnaldo Carvalho de Meloddc0ec32017-01-03 15:19:21 -0300540
541 if (n > (int)sizeof(bf)) {
542 n = sizeof(bf);
543 pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
544 __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
545 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900546 if (read(fd, bf, n) != n)
547 break;
548 }
549 }
550 close(fd);
551out:
552 return err;
553}
554
555int filename__read_debuglink(const char *filename, char *debuglink,
556 size_t size)
557{
558 int fd, err = -1;
559 Elf *elf;
560 GElf_Ehdr ehdr;
561 GElf_Shdr shdr;
562 Elf_Data *data;
563 Elf_Scn *sec;
564 Elf_Kind ek;
565
566 fd = open(filename, O_RDONLY);
567 if (fd < 0)
568 goto out;
569
570 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
571 if (elf == NULL) {
572 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
573 goto out_close;
574 }
575
576 ek = elf_kind(elf);
577 if (ek != ELF_K_ELF)
Chenggang Qin784f3392013-10-11 08:27:57 +0800578 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900579
580 if (gelf_getehdr(elf, &ehdr) == NULL) {
581 pr_err("%s: cannot get elf header.\n", __func__);
Chenggang Qin784f3392013-10-11 08:27:57 +0800582 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900583 }
584
585 sec = elf_section_by_name(elf, &ehdr, &shdr,
586 ".gnu_debuglink", NULL);
587 if (sec == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800588 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900589
590 data = elf_getdata(sec, NULL);
591 if (data == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800592 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900593
594 /* the start of this section is a zero-terminated string */
595 strncpy(debuglink, data->d_buf, size);
596
Stephane Eranian0d3dc5e2014-02-20 10:32:55 +0900597 err = 0;
598
Chenggang Qin784f3392013-10-11 08:27:57 +0800599out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900600 elf_end(elf);
Namhyung Kime5a18452012-08-06 13:41:20 +0900601out_close:
602 close(fd);
603out:
604 return err;
605}
606
607static int dso__swap_init(struct dso *dso, unsigned char eidata)
608{
609 static unsigned int const endian = 1;
610
611 dso->needs_swap = DSO_SWAP__NO;
612
613 switch (eidata) {
614 case ELFDATA2LSB:
615 /* We are big endian, DSO is little endian. */
616 if (*(unsigned char const *)&endian != 1)
617 dso->needs_swap = DSO_SWAP__YES;
618 break;
619
620 case ELFDATA2MSB:
621 /* We are little endian, DSO is big endian. */
622 if (*(unsigned char const *)&endian != 0)
623 dso->needs_swap = DSO_SWAP__YES;
624 break;
625
626 default:
627 pr_err("unrecognized DSO data encoding %d\n", eidata);
628 return -EINVAL;
629 }
630
631 return 0;
632}
633
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900634static int decompress_kmodule(struct dso *dso, const char *name,
635 enum dso_binary_type type)
636{
Jiri Olsa914f85c2015-02-12 22:27:50 +0100637 int fd = -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900638 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
Jiri Olsa914f85c2015-02-12 22:27:50 +0100639 struct kmod_path m;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900640
Namhyung Kim0b064f42015-01-29 17:06:42 +0900641 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
642 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
643 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900644 return -1;
645
Jiri Olsa914f85c2015-02-12 22:27:50 +0100646 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
647 name = dso->long_name;
648
649 if (kmod_path__parse_ext(&m, name) || !m.comp)
650 return -1;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900651
652 fd = mkstemp(tmpbuf);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300653 if (fd < 0) {
654 dso->load_errno = errno;
Jiri Olsa914f85c2015-02-12 22:27:50 +0100655 goto out;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300656 }
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900657
Jiri Olsa914f85c2015-02-12 22:27:50 +0100658 if (!decompress_to_file(m.ext, name, fd)) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300659 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900660 close(fd);
661 fd = -1;
662 }
663
664 unlink(tmpbuf);
665
Jiri Olsa914f85c2015-02-12 22:27:50 +0100666out:
667 free(m.ext);
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900668 return fd;
669}
670
Cody P Schafer3aafe5a2012-08-10 15:23:02 -0700671bool symsrc__possibly_runtime(struct symsrc *ss)
672{
673 return ss->dynsym || ss->opdsec;
674}
675
Cody P Schaferd26cd122012-08-10 15:23:00 -0700676bool symsrc__has_symtab(struct symsrc *ss)
677{
678 return ss->symtab != NULL;
679}
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700680
681void symsrc__destroy(struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900682{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300683 zfree(&ss->name);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700684 elf_end(ss->elf);
685 close(ss->fd);
686}
687
Naveen N. Raod2332092015-04-28 17:35:35 +0530688bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
689{
690 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
691}
692
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700693int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
694 enum dso_binary_type type)
695{
Namhyung Kime5a18452012-08-06 13:41:20 +0900696 int err = -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900697 GElf_Ehdr ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900698 Elf *elf;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700699 int fd;
700
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300701 if (dso__needs_decompress(dso)) {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900702 fd = decompress_kmodule(dso, name, type);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300703 if (fd < 0)
704 return -1;
705 } else {
Namhyung Kimc00c48f2014-11-04 10:14:27 +0900706 fd = open(name, O_RDONLY);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300707 if (fd < 0) {
708 dso->load_errno = errno;
709 return -1;
710 }
711 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900712
713 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
714 if (elf == NULL) {
715 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300716 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900717 goto out_close;
718 }
719
720 if (gelf_getehdr(elf, &ehdr) == NULL) {
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300721 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
Namhyung Kime5a18452012-08-06 13:41:20 +0900722 pr_debug("%s: cannot get elf header.\n", __func__);
723 goto out_elf_end;
724 }
725
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300726 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
727 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
Namhyung Kime5a18452012-08-06 13:41:20 +0900728 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300729 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900730
731 /* Always reject images with a mismatched build-id: */
Masami Hiramatsu428aff82016-08-26 01:24:42 +0900732 if (dso->has_build_id && !symbol_conf.ignore_vmlinux_buildid) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900733 u8 build_id[BUILD_ID_SIZE];
734
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300735 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) {
736 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900737 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300738 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900739
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300740 if (!dso__build_id_equal(dso, build_id)) {
Naveen N. Rao468f3d22015-04-25 01:14:46 +0530741 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300742 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
Namhyung Kime5a18452012-08-06 13:41:20 +0900743 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300744 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900745 }
746
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300747 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
748
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700749 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
750 NULL);
751 if (ss->symshdr.sh_type != SHT_SYMTAB)
752 ss->symtab = NULL;
753
754 ss->dynsym_idx = 0;
755 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
756 &ss->dynsym_idx);
757 if (ss->dynshdr.sh_type != SHT_DYNSYM)
758 ss->dynsym = NULL;
759
760 ss->opdidx = 0;
761 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
762 &ss->opdidx);
763 if (ss->opdshdr.sh_type != SHT_PROGBITS)
764 ss->opdsec = NULL;
765
Wang Nan99e87f72016-04-07 10:24:31 +0000766 if (dso->kernel == DSO_TYPE_USER)
767 ss->adjust_symbols = true;
768 else
Naveen N. Raod2332092015-04-28 17:35:35 +0530769 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700770
771 ss->name = strdup(name);
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300772 if (!ss->name) {
773 dso->load_errno = errno;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700774 goto out_elf_end;
Arnaldo Carvalho de Melo18425f12015-03-24 11:49:02 -0300775 }
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700776
777 ss->elf = elf;
778 ss->fd = fd;
779 ss->ehdr = ehdr;
780 ss->type = type;
781
782 return 0;
783
784out_elf_end:
785 elf_end(elf);
786out_close:
787 close(fd);
788 return err;
789}
790
Adrian Hunter39b12f782013-08-07 14:38:47 +0300791/**
792 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
793 * @kmap: kernel maps and relocation reference symbol
794 *
795 * This function returns %true if we are dealing with the kernel maps and the
796 * relocation reference symbol has not yet been found. Otherwise %false is
797 * returned.
798 */
799static bool ref_reloc_sym_not_found(struct kmap *kmap)
800{
801 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
802 !kmap->ref_reloc_sym->unrelocated_addr;
803}
804
805/**
806 * ref_reloc - kernel relocation offset.
807 * @kmap: kernel maps and relocation reference symbol
808 *
809 * This function returns the offset of kernel addresses as determined by using
810 * the relocation reference symbol i.e. if the kernel has not been relocated
811 * then the return value is zero.
812 */
813static u64 ref_reloc(struct kmap *kmap)
814{
815 if (kmap && kmap->ref_reloc_sym &&
816 kmap->ref_reloc_sym->unrelocated_addr)
817 return kmap->ref_reloc_sym->addr -
818 kmap->ref_reloc_sym->unrelocated_addr;
819 return 0;
820}
821
Naveen N. Rao0b3c2262016-04-12 14:40:50 +0530822void __weak arch__sym_update(struct symbol *s __maybe_unused,
823 GElf_Sym *sym __maybe_unused) { }
Ananth N Mavinakayanahallic50fc0a2015-04-28 17:35:38 +0530824
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -0300825int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
826 struct symsrc *runtime_ss, int kmodule)
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700827{
828 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
Wang Nanba927322015-04-07 08:22:45 +0000829 struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700830 struct map *curr_map = map;
831 struct dso *curr_dso = dso;
832 Elf_Data *symstrs, *secstrs;
833 uint32_t nr_syms;
834 int err = -1;
835 uint32_t idx;
836 GElf_Ehdr ehdr;
Cody P Schafer261360b2012-08-10 15:23:01 -0700837 GElf_Shdr shdr;
Wang Nan73cdf0c2016-02-26 09:31:49 +0000838 GElf_Shdr tshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700839 Elf_Data *syms, *opddata = NULL;
840 GElf_Sym sym;
Cody P Schafer261360b2012-08-10 15:23:01 -0700841 Elf_Scn *sec, *sec_strndx;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700842 Elf *elf;
843 int nr = 0;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300844 bool remap_kernel = false, adjust_kernel_syms = false;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700845
Wang Nanba927322015-04-07 08:22:45 +0000846 if (kmap && !kmaps)
847 return -1;
848
Cody P Schafer261360b2012-08-10 15:23:01 -0700849 dso->symtab_type = syms_ss->type;
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300850 dso->is_64_bit = syms_ss->is_64_bit;
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300851 dso->rel = syms_ss->ehdr.e_type == ET_REL;
852
853 /*
854 * Modules may already have symbols from kallsyms, but those symbols
855 * have the wrong values for the dso maps, so remove them.
856 */
857 if (kmodule && syms_ss->symtab)
858 symbols__delete(&dso->symbols[map->type]);
Cody P Schafer005f9292012-08-10 15:22:58 -0700859
Cody P Schafer261360b2012-08-10 15:23:01 -0700860 if (!syms_ss->symtab) {
Anton Blanchardd0b0d042014-09-09 08:59:29 +1000861 /*
862 * If the vmlinux is stripped, fail so we will fall back
863 * to using kallsyms. The vmlinux runtime symbols aren't
864 * of much use.
865 */
866 if (dso->kernel)
867 goto out_elf_end;
868
Cody P Schafer261360b2012-08-10 15:23:01 -0700869 syms_ss->symtab = syms_ss->dynsym;
870 syms_ss->symshdr = syms_ss->dynshdr;
Cody P Schaferd26cd122012-08-10 15:23:00 -0700871 }
872
Cody P Schafer261360b2012-08-10 15:23:01 -0700873 elf = syms_ss->elf;
874 ehdr = syms_ss->ehdr;
875 sec = syms_ss->symtab;
876 shdr = syms_ss->symshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700877
Anton Blanchard50de1a02016-08-13 11:55:33 +1000878 if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
879 ".text", NULL))
Wang Nan73cdf0c2016-02-26 09:31:49 +0000880 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
881
Cody P Schafer261360b2012-08-10 15:23:01 -0700882 if (runtime_ss->opdsec)
883 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
Namhyung Kime5a18452012-08-06 13:41:20 +0900884
885 syms = elf_getdata(sec, NULL);
886 if (syms == NULL)
887 goto out_elf_end;
888
889 sec = elf_getscn(elf, shdr.sh_link);
890 if (sec == NULL)
891 goto out_elf_end;
892
893 symstrs = elf_getdata(sec, NULL);
894 if (symstrs == NULL)
895 goto out_elf_end;
896
Adrian Hunterf247fb82014-07-31 09:00:46 +0300897 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900898 if (sec_strndx == NULL)
899 goto out_elf_end;
900
901 secstrs = elf_getdata(sec_strndx, NULL);
902 if (secstrs == NULL)
903 goto out_elf_end;
904
905 nr_syms = shdr.sh_size / shdr.sh_entsize;
906
907 memset(&sym, 0, sizeof(sym));
Adrian Hunter39b12f782013-08-07 14:38:47 +0300908
909 /*
910 * The kernel relocation symbol is needed in advance in order to adjust
911 * kernel maps correctly.
912 */
913 if (ref_reloc_sym_not_found(kmap)) {
914 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
915 const char *elf_name = elf_sym__name(&sym, symstrs);
916
917 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
918 continue;
919 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
Adrian Hunter91767532014-01-29 16:14:36 +0200920 map->reloc = kmap->ref_reloc_sym->addr -
921 kmap->ref_reloc_sym->unrelocated_addr;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300922 break;
923 }
924 }
925
Adrian Hunterf0ee3b42015-08-14 15:50:06 +0300926 /*
927 * Handle any relocation of vdso necessary because older kernels
928 * attempted to prelink vdso to its virtual address.
929 */
Wang Nan73cdf0c2016-02-26 09:31:49 +0000930 if (dso__is_vdso(dso))
931 map->reloc = map->start - dso->text_offset;
Adrian Hunterf0ee3b42015-08-14 15:50:06 +0300932
Adrian Hunter39b12f782013-08-07 14:38:47 +0300933 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
934 /*
935 * Initial kernel and module mappings do not map to the dso. For
936 * function mappings, flag the fixups.
937 */
938 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
939 remap_kernel = true;
940 adjust_kernel_syms = dso->adjust_symbols;
941 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900942 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
943 struct symbol *f;
944 const char *elf_name = elf_sym__name(&sym, symstrs);
945 char *demangled = NULL;
946 int is_label = elf_sym__is_label(&sym);
947 const char *section_name;
Cody P Schafer261360b2012-08-10 15:23:01 -0700948 bool used_opd = false;
Namhyung Kime5a18452012-08-06 13:41:20 +0900949
Namhyung Kime5a18452012-08-06 13:41:20 +0900950 if (!is_label && !elf_sym__is_a(&sym, map->type))
951 continue;
952
953 /* Reject ARM ELF "mapping symbols": these aren't unique and
954 * don't identify functions, so will confuse the profile
955 * output: */
Victor Kamensky4886f2c2015-01-26 22:34:01 -0800956 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
957 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
958 && (elf_name[2] == '\0' || elf_name[2] == '.'))
Namhyung Kime5a18452012-08-06 13:41:20 +0900959 continue;
960 }
961
Cody P Schafer261360b2012-08-10 15:23:01 -0700962 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
963 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900964 u64 *opd = opddata->d_buf + offset;
965 sym.st_value = DSO__SWAP(dso, u64, *opd);
Cody P Schafer261360b2012-08-10 15:23:01 -0700966 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
967 sym.st_value);
968 used_opd = true;
Namhyung Kime5a18452012-08-06 13:41:20 +0900969 }
Namhyung Kim3843b052012-11-21 13:49:44 +0100970 /*
971 * When loading symbols in a data mapping, ABS symbols (which
972 * has a value of SHN_ABS in its st_shndx) failed at
973 * elf_getscn(). And it marks the loading as a failure so
974 * already loaded symbols cannot be fixed up.
975 *
976 * I'm not sure what should be done. Just ignore them for now.
977 * - Namhyung Kim
978 */
979 if (sym.st_shndx == SHN_ABS)
980 continue;
Namhyung Kime5a18452012-08-06 13:41:20 +0900981
Cody P Schafer261360b2012-08-10 15:23:01 -0700982 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900983 if (!sec)
984 goto out_elf_end;
985
986 gelf_getshdr(sec, &shdr);
987
988 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
989 continue;
990
991 section_name = elf_sec__name(&shdr, secstrs);
992
993 /* On ARM, symbols for thumb functions have 1 added to
994 * the symbol address as a flag - remove it */
995 if ((ehdr.e_machine == EM_ARM) &&
996 (map->type == MAP__FUNCTION) &&
997 (sym.st_value & 1))
998 --sym.st_value;
999
Adrian Hunter39b12f782013-08-07 14:38:47 +03001000 if (dso->kernel || kmodule) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001001 char dso_name[PATH_MAX];
1002
Adrian Hunter39b12f782013-08-07 14:38:47 +03001003 /* Adjust symbol to map to file offset */
1004 if (adjust_kernel_syms)
1005 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1006
Namhyung Kime5a18452012-08-06 13:41:20 +09001007 if (strcmp(section_name,
1008 (curr_dso->short_name +
1009 dso->short_name_len)) == 0)
1010 goto new_symbol;
1011
1012 if (strcmp(section_name, ".text") == 0) {
Adrian Hunter39b12f782013-08-07 14:38:47 +03001013 /*
1014 * The initial kernel mapping is based on
1015 * kallsyms and identity maps. Overwrite it to
1016 * map to the kernel dso.
1017 */
1018 if (remap_kernel && dso->kernel) {
1019 remap_kernel = false;
1020 map->start = shdr.sh_addr +
1021 ref_reloc(kmap);
1022 map->end = map->start + shdr.sh_size;
1023 map->pgoff = shdr.sh_offset;
1024 map->map_ip = map__map_ip;
1025 map->unmap_ip = map__unmap_ip;
1026 /* Ensure maps are correctly ordered */
Wang Nanba927322015-04-07 08:22:45 +00001027 if (kmaps) {
Arnaldo Carvalho de Melo84c2caf2015-05-25 16:59:56 -03001028 map__get(map);
Wang Nanba927322015-04-07 08:22:45 +00001029 map_groups__remove(kmaps, map);
1030 map_groups__insert(kmaps, map);
Arnaldo Carvalho de Melo84c2caf2015-05-25 16:59:56 -03001031 map__put(map);
Wang Nanba927322015-04-07 08:22:45 +00001032 }
Adrian Hunter39b12f782013-08-07 14:38:47 +03001033 }
1034
Adrian Hunter0131c4e2013-08-07 14:38:50 +03001035 /*
1036 * The initial module mapping is based on
1037 * /proc/modules mapped to offset zero.
1038 * Overwrite it to map to the module dso.
1039 */
1040 if (remap_kernel && kmodule) {
1041 remap_kernel = false;
1042 map->pgoff = shdr.sh_offset;
1043 }
1044
Namhyung Kime5a18452012-08-06 13:41:20 +09001045 curr_map = map;
1046 curr_dso = dso;
1047 goto new_symbol;
1048 }
1049
Adrian Hunter0131c4e2013-08-07 14:38:50 +03001050 if (!kmap)
1051 goto new_symbol;
1052
Namhyung Kime5a18452012-08-06 13:41:20 +09001053 snprintf(dso_name, sizeof(dso_name),
1054 "%s%s", dso->short_name, section_name);
1055
Wang Nanba927322015-04-07 08:22:45 +00001056 curr_map = map_groups__find_by_name(kmaps, map->type, dso_name);
Namhyung Kime5a18452012-08-06 13:41:20 +09001057 if (curr_map == NULL) {
1058 u64 start = sym.st_value;
1059
1060 if (kmodule)
1061 start += map->start + shdr.sh_offset;
1062
1063 curr_dso = dso__new(dso_name);
1064 if (curr_dso == NULL)
1065 goto out_elf_end;
1066 curr_dso->kernel = dso->kernel;
1067 curr_dso->long_name = dso->long_name;
1068 curr_dso->long_name_len = dso->long_name_len;
1069 curr_map = map__new2(start, curr_dso,
1070 map->type);
Masami Hiramatsue7a78652015-12-09 11:11:18 +09001071 dso__put(curr_dso);
Namhyung Kime5a18452012-08-06 13:41:20 +09001072 if (curr_map == NULL) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001073 goto out_elf_end;
1074 }
Adrian Hunter39b12f782013-08-07 14:38:47 +03001075 if (adjust_kernel_syms) {
1076 curr_map->start = shdr.sh_addr +
1077 ref_reloc(kmap);
1078 curr_map->end = curr_map->start +
1079 shdr.sh_size;
1080 curr_map->pgoff = shdr.sh_offset;
1081 } else {
1082 curr_map->map_ip = identity__map_ip;
1083 curr_map->unmap_ip = identity__map_ip;
1084 }
Namhyung Kime5a18452012-08-06 13:41:20 +09001085 curr_dso->symtab_type = dso->symtab_type;
Wang Nanba927322015-04-07 08:22:45 +00001086 map_groups__insert(kmaps, curr_map);
Masami Hiramatsue7a78652015-12-09 11:11:18 +09001087 /*
1088 * Add it before we drop the referece to curr_map,
1089 * i.e. while we still are sure to have a reference
1090 * to this DSO via curr_map->dso.
1091 */
1092 dsos__add(&map->groups->machine->dsos, curr_dso);
Masami Hiramatsu8d5c3402015-11-18 15:40:27 +09001093 /* kmaps already got it */
1094 map__put(curr_map);
Namhyung Kime5a18452012-08-06 13:41:20 +09001095 dso__set_loaded(curr_dso, map->type);
1096 } else
1097 curr_dso = curr_map->dso;
1098
1099 goto new_symbol;
1100 }
1101
Cody P Schafer261360b2012-08-10 15:23:01 -07001102 if ((used_opd && runtime_ss->adjust_symbols)
1103 || (!used_opd && syms_ss->adjust_symbols)) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001104 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1105 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1106 (u64)sym.st_value, (u64)shdr.sh_addr,
1107 (u64)shdr.sh_offset);
1108 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1109 }
Avi Kivity950b8352014-01-22 21:58:46 +02001110new_symbol:
Milian Wolff2a8d41b2016-08-30 13:41:02 +02001111 demangled = demangle_sym(dso, kmodule, elf_name);
1112 if (demangled != NULL)
1113 elf_name = demangled;
Namhyung Kime71e7942014-07-31 14:47:42 +09001114
Namhyung Kime5a18452012-08-06 13:41:20 +09001115 f = symbol__new(sym.st_value, sym.st_size,
1116 GELF_ST_BIND(sym.st_info), elf_name);
1117 free(demangled);
1118 if (!f)
1119 goto out_elf_end;
1120
Naveen N. Rao0b3c2262016-04-12 14:40:50 +05301121 arch__sym_update(f, &sym);
1122
Arnaldo Carvalho de Melobe39db92016-09-01 19:25:52 -03001123 __symbols__insert(&curr_dso->symbols[curr_map->type], f, dso->kernel);
1124 nr++;
Namhyung Kime5a18452012-08-06 13:41:20 +09001125 }
1126
1127 /*
1128 * For misannotated, zeroed, ASM function sizes.
1129 */
1130 if (nr > 0) {
Namhyung Kime5a18452012-08-06 13:41:20 +09001131 symbols__fixup_end(&dso->symbols[map->type]);
Arnaldo Carvalho de Melo432746f2016-09-01 11:00:23 -03001132 symbols__fixup_duplicate(&dso->symbols[map->type]);
Namhyung Kime5a18452012-08-06 13:41:20 +09001133 if (kmap) {
1134 /*
1135 * We need to fixup this here too because we create new
1136 * maps here, for things like vsyscall sections.
1137 */
Wang Nanba927322015-04-07 08:22:45 +00001138 __map_groups__fixup_end(kmaps, map->type);
Namhyung Kime5a18452012-08-06 13:41:20 +09001139 }
1140 }
1141 err = nr;
1142out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +09001143 return err;
1144}
1145
Adrian Hunter8e0cf962013-08-07 14:38:51 +03001146static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1147{
1148 GElf_Phdr phdr;
1149 size_t i, phdrnum;
1150 int err;
1151 u64 sz;
1152
1153 if (elf_getphdrnum(elf, &phdrnum))
1154 return -1;
1155
1156 for (i = 0; i < phdrnum; i++) {
1157 if (gelf_getphdr(elf, i, &phdr) == NULL)
1158 return -1;
1159 if (phdr.p_type != PT_LOAD)
1160 continue;
1161 if (exe) {
1162 if (!(phdr.p_flags & PF_X))
1163 continue;
1164 } else {
1165 if (!(phdr.p_flags & PF_R))
1166 continue;
1167 }
1168 sz = min(phdr.p_memsz, phdr.p_filesz);
1169 if (!sz)
1170 continue;
1171 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1172 if (err)
1173 return err;
1174 }
1175 return 0;
1176}
1177
1178int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1179 bool *is_64_bit)
1180{
1181 int err;
1182 Elf *elf;
1183
1184 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1185 if (elf == NULL)
1186 return -1;
1187
1188 if (is_64_bit)
1189 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1190
1191 err = elf_read_maps(elf, exe, mapfn, data);
1192
1193 elf_end(elf);
1194 return err;
1195}
1196
Adrian Hunter2b5b8bb2014-07-22 16:17:59 +03001197enum dso_type dso__type_fd(int fd)
1198{
1199 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1200 GElf_Ehdr ehdr;
1201 Elf_Kind ek;
1202 Elf *elf;
1203
1204 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1205 if (elf == NULL)
1206 goto out;
1207
1208 ek = elf_kind(elf);
1209 if (ek != ELF_K_ELF)
1210 goto out_end;
1211
1212 if (gelf_getclass(elf) == ELFCLASS64) {
1213 dso_type = DSO__TYPE_64BIT;
1214 goto out_end;
1215 }
1216
1217 if (gelf_getehdr(elf, &ehdr) == NULL)
1218 goto out_end;
1219
1220 if (ehdr.e_machine == EM_X86_64)
1221 dso_type = DSO__TYPE_X32BIT;
1222 else
1223 dso_type = DSO__TYPE_32BIT;
1224out_end:
1225 elf_end(elf);
1226out:
1227 return dso_type;
1228}
1229
Adrian Hunterafba19d2013-10-09 15:01:12 +03001230static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1231{
1232 ssize_t r;
1233 size_t n;
1234 int err = -1;
1235 char *buf = malloc(page_size);
1236
1237 if (buf == NULL)
1238 return -1;
1239
1240 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1241 goto out;
1242
1243 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1244 goto out;
1245
1246 while (len) {
1247 n = page_size;
1248 if (len < n)
1249 n = len;
1250 /* Use read because mmap won't work on proc files */
1251 r = read(from, buf, n);
1252 if (r < 0)
1253 goto out;
1254 if (!r)
1255 break;
1256 n = r;
1257 r = write(to, buf, n);
1258 if (r < 0)
1259 goto out;
1260 if ((size_t)r != n)
1261 goto out;
1262 len -= n;
1263 }
1264
1265 err = 0;
1266out:
1267 free(buf);
1268 return err;
1269}
1270
1271struct kcore {
1272 int fd;
1273 int elfclass;
1274 Elf *elf;
1275 GElf_Ehdr ehdr;
1276};
1277
1278static int kcore__open(struct kcore *kcore, const char *filename)
1279{
1280 GElf_Ehdr *ehdr;
1281
1282 kcore->fd = open(filename, O_RDONLY);
1283 if (kcore->fd == -1)
1284 return -1;
1285
1286 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1287 if (!kcore->elf)
1288 goto out_close;
1289
1290 kcore->elfclass = gelf_getclass(kcore->elf);
1291 if (kcore->elfclass == ELFCLASSNONE)
1292 goto out_end;
1293
1294 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1295 if (!ehdr)
1296 goto out_end;
1297
1298 return 0;
1299
1300out_end:
1301 elf_end(kcore->elf);
1302out_close:
1303 close(kcore->fd);
1304 return -1;
1305}
1306
1307static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1308 bool temp)
1309{
Adrian Hunterafba19d2013-10-09 15:01:12 +03001310 kcore->elfclass = elfclass;
1311
1312 if (temp)
1313 kcore->fd = mkstemp(filename);
1314 else
1315 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1316 if (kcore->fd == -1)
1317 return -1;
1318
1319 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1320 if (!kcore->elf)
1321 goto out_close;
1322
1323 if (!gelf_newehdr(kcore->elf, elfclass))
1324 goto out_end;
1325
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001326 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
Adrian Hunterafba19d2013-10-09 15:01:12 +03001327
1328 return 0;
1329
1330out_end:
1331 elf_end(kcore->elf);
1332out_close:
1333 close(kcore->fd);
1334 unlink(filename);
1335 return -1;
1336}
1337
1338static void kcore__close(struct kcore *kcore)
1339{
1340 elf_end(kcore->elf);
1341 close(kcore->fd);
1342}
1343
1344static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1345{
1346 GElf_Ehdr *ehdr = &to->ehdr;
1347 GElf_Ehdr *kehdr = &from->ehdr;
1348
1349 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1350 ehdr->e_type = kehdr->e_type;
1351 ehdr->e_machine = kehdr->e_machine;
1352 ehdr->e_version = kehdr->e_version;
1353 ehdr->e_entry = 0;
1354 ehdr->e_shoff = 0;
1355 ehdr->e_flags = kehdr->e_flags;
1356 ehdr->e_phnum = count;
1357 ehdr->e_shentsize = 0;
1358 ehdr->e_shnum = 0;
1359 ehdr->e_shstrndx = 0;
1360
1361 if (from->elfclass == ELFCLASS32) {
1362 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1363 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1364 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1365 } else {
1366 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1367 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1368 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1369 }
1370
1371 if (!gelf_update_ehdr(to->elf, ehdr))
1372 return -1;
1373
1374 if (!gelf_newphdr(to->elf, count))
1375 return -1;
1376
1377 return 0;
1378}
1379
1380static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1381 u64 addr, u64 len)
1382{
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001383 GElf_Phdr phdr = {
1384 .p_type = PT_LOAD,
1385 .p_flags = PF_R | PF_W | PF_X,
1386 .p_offset = offset,
1387 .p_vaddr = addr,
1388 .p_paddr = 0,
1389 .p_filesz = len,
1390 .p_memsz = len,
1391 .p_align = page_size,
1392 };
Adrian Hunterafba19d2013-10-09 15:01:12 +03001393
Adrian Hunterb5cabbc2015-09-24 13:05:22 +03001394 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
Adrian Hunterafba19d2013-10-09 15:01:12 +03001395 return -1;
1396
1397 return 0;
1398}
1399
1400static off_t kcore__write(struct kcore *kcore)
1401{
1402 return elf_update(kcore->elf, ELF_C_WRITE);
1403}
1404
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001405struct phdr_data {
1406 off_t offset;
1407 u64 addr;
1408 u64 len;
1409};
1410
1411struct kcore_copy_info {
1412 u64 stext;
1413 u64 etext;
1414 u64 first_symbol;
1415 u64 last_symbol;
1416 u64 first_module;
1417 u64 last_module_symbol;
1418 struct phdr_data kernel_map;
1419 struct phdr_data modules_map;
1420};
1421
1422static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1423 u64 start)
1424{
1425 struct kcore_copy_info *kci = arg;
1426
1427 if (!symbol_type__is_a(type, MAP__FUNCTION))
1428 return 0;
1429
1430 if (strchr(name, '[')) {
1431 if (start > kci->last_module_symbol)
1432 kci->last_module_symbol = start;
1433 return 0;
1434 }
1435
1436 if (!kci->first_symbol || start < kci->first_symbol)
1437 kci->first_symbol = start;
1438
1439 if (!kci->last_symbol || start > kci->last_symbol)
1440 kci->last_symbol = start;
1441
1442 if (!strcmp(name, "_stext")) {
1443 kci->stext = start;
1444 return 0;
1445 }
1446
1447 if (!strcmp(name, "_etext")) {
1448 kci->etext = start;
1449 return 0;
1450 }
1451
1452 return 0;
1453}
1454
1455static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1456 const char *dir)
1457{
1458 char kallsyms_filename[PATH_MAX];
1459
1460 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1461
1462 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1463 return -1;
1464
1465 if (kallsyms__parse(kallsyms_filename, kci,
1466 kcore_copy__process_kallsyms) < 0)
1467 return -1;
1468
1469 return 0;
1470}
1471
1472static int kcore_copy__process_modules(void *arg,
1473 const char *name __maybe_unused,
1474 u64 start)
1475{
1476 struct kcore_copy_info *kci = arg;
1477
1478 if (!kci->first_module || start < kci->first_module)
1479 kci->first_module = start;
1480
1481 return 0;
1482}
1483
1484static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1485 const char *dir)
1486{
1487 char modules_filename[PATH_MAX];
1488
1489 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1490
1491 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1492 return -1;
1493
1494 if (modules__parse(modules_filename, kci,
1495 kcore_copy__process_modules) < 0)
1496 return -1;
1497
1498 return 0;
1499}
1500
1501static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1502 u64 s, u64 e)
1503{
1504 if (p->addr || s < start || s >= end)
1505 return;
1506
1507 p->addr = s;
1508 p->offset = (s - start) + pgoff;
1509 p->len = e < end ? e - s : end - s;
1510}
1511
1512static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1513{
1514 struct kcore_copy_info *kci = data;
1515 u64 end = start + len;
1516
1517 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1518 kci->etext);
1519
1520 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1521 kci->last_module_symbol);
1522
1523 return 0;
1524}
1525
1526static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1527{
1528 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1529 return -1;
1530
1531 return 0;
1532}
1533
1534static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1535 Elf *elf)
1536{
1537 if (kcore_copy__parse_kallsyms(kci, dir))
1538 return -1;
1539
1540 if (kcore_copy__parse_modules(kci, dir))
1541 return -1;
1542
1543 if (kci->stext)
1544 kci->stext = round_down(kci->stext, page_size);
1545 else
1546 kci->stext = round_down(kci->first_symbol, page_size);
1547
1548 if (kci->etext) {
1549 kci->etext = round_up(kci->etext, page_size);
1550 } else if (kci->last_symbol) {
1551 kci->etext = round_up(kci->last_symbol, page_size);
1552 kci->etext += page_size;
1553 }
1554
1555 kci->first_module = round_down(kci->first_module, page_size);
1556
1557 if (kci->last_module_symbol) {
1558 kci->last_module_symbol = round_up(kci->last_module_symbol,
1559 page_size);
1560 kci->last_module_symbol += page_size;
1561 }
1562
1563 if (!kci->stext || !kci->etext)
1564 return -1;
1565
1566 if (kci->first_module && !kci->last_module_symbol)
1567 return -1;
1568
1569 return kcore_copy__read_maps(kci, elf);
1570}
1571
1572static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1573 const char *name)
1574{
1575 char from_filename[PATH_MAX];
1576 char to_filename[PATH_MAX];
1577
1578 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1579 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1580
1581 return copyfile_mode(from_filename, to_filename, 0400);
1582}
1583
1584static int kcore_copy__unlink(const char *dir, const char *name)
1585{
1586 char filename[PATH_MAX];
1587
1588 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1589
1590 return unlink(filename);
1591}
1592
1593static int kcore_copy__compare_fds(int from, int to)
1594{
1595 char *buf_from;
1596 char *buf_to;
1597 ssize_t ret;
1598 size_t len;
1599 int err = -1;
1600
1601 buf_from = malloc(page_size);
1602 buf_to = malloc(page_size);
1603 if (!buf_from || !buf_to)
1604 goto out;
1605
1606 while (1) {
1607 /* Use read because mmap won't work on proc files */
1608 ret = read(from, buf_from, page_size);
1609 if (ret < 0)
1610 goto out;
1611
1612 if (!ret)
1613 break;
1614
1615 len = ret;
1616
1617 if (readn(to, buf_to, len) != (int)len)
1618 goto out;
1619
1620 if (memcmp(buf_from, buf_to, len))
1621 goto out;
1622 }
1623
1624 err = 0;
1625out:
1626 free(buf_to);
1627 free(buf_from);
1628 return err;
1629}
1630
1631static int kcore_copy__compare_files(const char *from_filename,
1632 const char *to_filename)
1633{
1634 int from, to, err = -1;
1635
1636 from = open(from_filename, O_RDONLY);
1637 if (from < 0)
1638 return -1;
1639
1640 to = open(to_filename, O_RDONLY);
1641 if (to < 0)
1642 goto out_close_from;
1643
1644 err = kcore_copy__compare_fds(from, to);
1645
1646 close(to);
1647out_close_from:
1648 close(from);
1649 return err;
1650}
1651
1652static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1653 const char *name)
1654{
1655 char from_filename[PATH_MAX];
1656 char to_filename[PATH_MAX];
1657
1658 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1659 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1660
1661 return kcore_copy__compare_files(from_filename, to_filename);
1662}
1663
1664/**
1665 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1666 * @from_dir: from directory
1667 * @to_dir: to directory
1668 *
1669 * This function copies kallsyms, modules and kcore files from one directory to
1670 * another. kallsyms and modules are copied entirely. Only code segments are
1671 * copied from kcore. It is assumed that two segments suffice: one for the
1672 * kernel proper and one for all the modules. The code segments are determined
1673 * from kallsyms and modules files. The kernel map starts at _stext or the
1674 * lowest function symbol, and ends at _etext or the highest function symbol.
1675 * The module map starts at the lowest module address and ends at the highest
1676 * module symbol. Start addresses are rounded down to the nearest page. End
1677 * addresses are rounded up to the nearest page. An extra page is added to the
1678 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1679 * symbol too. Because it contains only code sections, the resulting kcore is
1680 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1681 * is not the same for the kernel map and the modules map. That happens because
1682 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1683 * kallsyms and modules files are compared with their copies to check that
1684 * modules have not been loaded or unloaded while the copies were taking place.
1685 *
1686 * Return: %0 on success, %-1 on failure.
1687 */
1688int kcore_copy(const char *from_dir, const char *to_dir)
1689{
1690 struct kcore kcore;
1691 struct kcore extract;
1692 size_t count = 2;
1693 int idx = 0, err = -1;
1694 off_t offset = page_size, sz, modules_offset = 0;
1695 struct kcore_copy_info kci = { .stext = 0, };
1696 char kcore_filename[PATH_MAX];
1697 char extract_filename[PATH_MAX];
1698
1699 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1700 return -1;
1701
1702 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1703 goto out_unlink_kallsyms;
1704
1705 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1706 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1707
1708 if (kcore__open(&kcore, kcore_filename))
1709 goto out_unlink_modules;
1710
1711 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1712 goto out_kcore_close;
1713
1714 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1715 goto out_kcore_close;
1716
1717 if (!kci.modules_map.addr)
1718 count -= 1;
1719
1720 if (kcore__copy_hdr(&kcore, &extract, count))
1721 goto out_extract_close;
1722
1723 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1724 kci.kernel_map.len))
1725 goto out_extract_close;
1726
1727 if (kci.modules_map.addr) {
1728 modules_offset = offset + kci.kernel_map.len;
1729 if (kcore__add_phdr(&extract, idx, modules_offset,
1730 kci.modules_map.addr, kci.modules_map.len))
1731 goto out_extract_close;
1732 }
1733
1734 sz = kcore__write(&extract);
1735 if (sz < 0 || sz > offset)
1736 goto out_extract_close;
1737
1738 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1739 kci.kernel_map.len))
1740 goto out_extract_close;
1741
1742 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1743 extract.fd, modules_offset,
1744 kci.modules_map.len))
1745 goto out_extract_close;
1746
1747 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1748 goto out_extract_close;
1749
1750 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1751 goto out_extract_close;
1752
1753 err = 0;
1754
1755out_extract_close:
1756 kcore__close(&extract);
1757 if (err)
1758 unlink(extract_filename);
1759out_kcore_close:
1760 kcore__close(&kcore);
1761out_unlink_modules:
1762 if (err)
1763 kcore_copy__unlink(to_dir, "modules");
1764out_unlink_kallsyms:
1765 if (err)
1766 kcore_copy__unlink(to_dir, "kallsyms");
1767
1768 return err;
1769}
1770
Adrian Hunterafba19d2013-10-09 15:01:12 +03001771int kcore_extract__create(struct kcore_extract *kce)
1772{
1773 struct kcore kcore;
1774 struct kcore extract;
1775 size_t count = 1;
1776 int idx = 0, err = -1;
1777 off_t offset = page_size, sz;
1778
1779 if (kcore__open(&kcore, kce->kcore_filename))
1780 return -1;
1781
1782 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1783 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1784 goto out_kcore_close;
1785
1786 if (kcore__copy_hdr(&kcore, &extract, count))
1787 goto out_extract_close;
1788
1789 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1790 goto out_extract_close;
1791
1792 sz = kcore__write(&extract);
1793 if (sz < 0 || sz > offset)
1794 goto out_extract_close;
1795
1796 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1797 goto out_extract_close;
1798
1799 err = 0;
1800
1801out_extract_close:
1802 kcore__close(&extract);
1803 if (err)
1804 unlink(kce->extract_filename);
1805out_kcore_close:
1806 kcore__close(&kcore);
1807
1808 return err;
1809}
1810
1811void kcore_extract__delete(struct kcore_extract *kce)
1812{
1813 unlink(kce->extract_filename);
1814}
1815
Arnaldo Carvalho de Melo1c1a3a42016-07-12 12:19:09 -03001816#ifdef HAVE_GELF_GETNOTE_SUPPORT
Hemant Kumar060fa0c2016-07-01 17:03:46 +09001817/**
1818 * populate_sdt_note : Parse raw data and identify SDT note
1819 * @elf: elf of the opened file
1820 * @data: raw data of a section with description offset applied
1821 * @len: note description size
1822 * @type: type of the note
1823 * @sdt_notes: List to add the SDT note
1824 *
1825 * Responsible for parsing the @data in section .note.stapsdt in @elf and
1826 * if its an SDT note, it appends to @sdt_notes list.
1827 */
1828static int populate_sdt_note(Elf **elf, const char *data, size_t len,
1829 struct list_head *sdt_notes)
1830{
1831 const char *provider, *name;
1832 struct sdt_note *tmp = NULL;
1833 GElf_Ehdr ehdr;
1834 GElf_Addr base_off = 0;
1835 GElf_Shdr shdr;
1836 int ret = -EINVAL;
1837
1838 union {
1839 Elf64_Addr a64[NR_ADDR];
1840 Elf32_Addr a32[NR_ADDR];
1841 } buf;
1842
1843 Elf_Data dst = {
1844 .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT,
1845 .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT),
1846 .d_off = 0, .d_align = 0
1847 };
1848 Elf_Data src = {
1849 .d_buf = (void *) data, .d_type = ELF_T_ADDR,
1850 .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0,
1851 .d_align = 0
1852 };
1853
1854 tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note));
1855 if (!tmp) {
1856 ret = -ENOMEM;
1857 goto out_err;
1858 }
1859
1860 INIT_LIST_HEAD(&tmp->note_list);
1861
1862 if (len < dst.d_size + 3)
1863 goto out_free_note;
1864
1865 /* Translation from file representation to memory representation */
1866 if (gelf_xlatetom(*elf, &dst, &src,
1867 elf_getident(*elf, NULL)[EI_DATA]) == NULL) {
1868 pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1));
1869 goto out_free_note;
1870 }
1871
1872 /* Populate the fields of sdt_note */
1873 provider = data + dst.d_size;
1874
1875 name = (const char *)memchr(provider, '\0', data + len - provider);
1876 if (name++ == NULL)
1877 goto out_free_note;
1878
1879 tmp->provider = strdup(provider);
1880 if (!tmp->provider) {
1881 ret = -ENOMEM;
1882 goto out_free_note;
1883 }
1884 tmp->name = strdup(name);
1885 if (!tmp->name) {
1886 ret = -ENOMEM;
1887 goto out_free_prov;
1888 }
1889
1890 if (gelf_getclass(*elf) == ELFCLASS32) {
1891 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr));
1892 tmp->bit32 = true;
1893 } else {
1894 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr));
1895 tmp->bit32 = false;
1896 }
1897
1898 if (!gelf_getehdr(*elf, &ehdr)) {
1899 pr_debug("%s : cannot get elf header.\n", __func__);
1900 ret = -EBADF;
1901 goto out_free_name;
1902 }
1903
1904 /* Adjust the prelink effect :
1905 * Find out the .stapsdt.base section.
1906 * This scn will help us to handle prelinking (if present).
1907 * Compare the retrieved file offset of the base section with the
1908 * base address in the description of the SDT note. If its different,
1909 * then accordingly, adjust the note location.
1910 */
1911 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL)) {
1912 base_off = shdr.sh_offset;
1913 if (base_off) {
1914 if (tmp->bit32)
1915 tmp->addr.a32[0] = tmp->addr.a32[0] + base_off -
1916 tmp->addr.a32[1];
1917 else
1918 tmp->addr.a64[0] = tmp->addr.a64[0] + base_off -
1919 tmp->addr.a64[1];
1920 }
1921 }
1922
1923 list_add_tail(&tmp->note_list, sdt_notes);
1924 return 0;
1925
1926out_free_name:
1927 free(tmp->name);
1928out_free_prov:
1929 free(tmp->provider);
1930out_free_note:
1931 free(tmp);
1932out_err:
1933 return ret;
1934}
1935
1936/**
1937 * construct_sdt_notes_list : constructs a list of SDT notes
1938 * @elf : elf to look into
1939 * @sdt_notes : empty list_head
1940 *
1941 * Scans the sections in 'elf' for the section
1942 * .note.stapsdt. It, then calls populate_sdt_note to find
1943 * out the SDT events and populates the 'sdt_notes'.
1944 */
1945static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes)
1946{
1947 GElf_Ehdr ehdr;
1948 Elf_Scn *scn = NULL;
1949 Elf_Data *data;
1950 GElf_Shdr shdr;
1951 size_t shstrndx, next;
1952 GElf_Nhdr nhdr;
1953 size_t name_off, desc_off, offset;
1954 int ret = 0;
1955
1956 if (gelf_getehdr(elf, &ehdr) == NULL) {
1957 ret = -EBADF;
1958 goto out_ret;
1959 }
1960 if (elf_getshdrstrndx(elf, &shstrndx) != 0) {
1961 ret = -EBADF;
1962 goto out_ret;
1963 }
1964
1965 /* Look for the required section */
1966 scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL);
1967 if (!scn) {
1968 ret = -ENOENT;
1969 goto out_ret;
1970 }
1971
1972 if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) {
1973 ret = -ENOENT;
1974 goto out_ret;
1975 }
1976
1977 data = elf_getdata(scn, NULL);
1978
1979 /* Get the SDT notes */
1980 for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off,
1981 &desc_off)) > 0; offset = next) {
1982 if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) &&
1983 !memcmp(data->d_buf + name_off, SDT_NOTE_NAME,
1984 sizeof(SDT_NOTE_NAME))) {
1985 /* Check the type of the note */
1986 if (nhdr.n_type != SDT_NOTE_TYPE)
1987 goto out_ret;
1988
1989 ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off),
1990 nhdr.n_descsz, sdt_notes);
1991 if (ret < 0)
1992 goto out_ret;
1993 }
1994 }
1995 if (list_empty(sdt_notes))
1996 ret = -ENOENT;
1997
1998out_ret:
1999 return ret;
2000}
2001
2002/**
2003 * get_sdt_note_list : Wrapper to construct a list of sdt notes
2004 * @head : empty list_head
2005 * @target : file to find SDT notes from
2006 *
2007 * This opens the file, initializes
2008 * the ELF and then calls construct_sdt_notes_list.
2009 */
2010int get_sdt_note_list(struct list_head *head, const char *target)
2011{
2012 Elf *elf;
2013 int fd, ret;
2014
2015 fd = open(target, O_RDONLY);
2016 if (fd < 0)
2017 return -EBADF;
2018
2019 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
2020 if (!elf) {
2021 ret = -EBADF;
2022 goto out_close;
2023 }
2024 ret = construct_sdt_notes_list(elf, head);
2025 elf_end(elf);
2026out_close:
2027 close(fd);
2028 return ret;
2029}
2030
2031/**
2032 * cleanup_sdt_note_list : free the sdt notes' list
2033 * @sdt_notes: sdt notes' list
2034 *
2035 * Free up the SDT notes in @sdt_notes.
2036 * Returns the number of SDT notes free'd.
2037 */
2038int cleanup_sdt_note_list(struct list_head *sdt_notes)
2039{
2040 struct sdt_note *tmp, *pos;
2041 int nr_free = 0;
2042
2043 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
2044 list_del(&pos->note_list);
2045 free(pos->name);
2046 free(pos->provider);
2047 free(pos);
2048 nr_free++;
2049 }
2050 return nr_free;
2051}
2052
2053/**
2054 * sdt_notes__get_count: Counts the number of sdt events
2055 * @start: list_head to sdt_notes list
2056 *
2057 * Returns the number of SDT notes in a list
2058 */
2059int sdt_notes__get_count(struct list_head *start)
2060{
2061 struct sdt_note *sdt_ptr;
2062 int count = 0;
2063
2064 list_for_each_entry(sdt_ptr, start, note_list)
2065 count++;
2066 return count;
2067}
Arnaldo Carvalho de Melo1c1a3a42016-07-12 12:19:09 -03002068#endif
Hemant Kumar060fa0c2016-07-01 17:03:46 +09002069
Namhyung Kime5a18452012-08-06 13:41:20 +09002070void symbol__elf_init(void)
2071{
2072 elf_version(EV_CURRENT);
2073}