blob: 9c9b27fbc78de5280b56c32ee6e49bc90388d501 [file] [log] [blame]
Namhyung Kime5a18452012-08-06 13:41:20 +09001#include <fcntl.h>
2#include <stdio.h>
3#include <errno.h>
4#include <string.h>
5#include <unistd.h>
6#include <inttypes.h>
7
8#include "symbol.h"
Vladimir Nikulichev922d0e42014-04-17 08:27:01 -07009#include "vdso.h"
Arnaldo Carvalho de Meloc506c962013-12-11 09:15:00 -030010#include <symbol/kallsyms.h>
Namhyung Kime5a18452012-08-06 13:41:20 +090011#include "debug.h"
12
Ingo Molnar89fe8082013-09-30 12:07:11 +020013#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
Adrian Huntere955d5c2013-09-13 16:49:30 +030014static int elf_getphdrnum(Elf *elf, size_t *dst)
15{
16 GElf_Ehdr gehdr;
17 GElf_Ehdr *ehdr;
18
19 ehdr = gelf_getehdr(elf, &gehdr);
20 if (!ehdr)
21 return -1;
22
23 *dst = ehdr->e_phnum;
24
25 return 0;
26}
27#endif
28
Namhyung Kime5a18452012-08-06 13:41:20 +090029#ifndef NT_GNU_BUILD_ID
30#define NT_GNU_BUILD_ID 3
31#endif
32
33/**
34 * elf_symtab__for_each_symbol - iterate thru all the symbols
35 *
36 * @syms: struct elf_symtab instance to iterate
37 * @idx: uint32_t idx
38 * @sym: GElf_Sym iterator
39 */
40#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
41 for (idx = 0, gelf_getsym(syms, idx, &sym);\
42 idx < nr_syms; \
43 idx++, gelf_getsym(syms, idx, &sym))
44
45static inline uint8_t elf_sym__type(const GElf_Sym *sym)
46{
47 return GELF_ST_TYPE(sym->st_info);
48}
49
50static inline int elf_sym__is_function(const GElf_Sym *sym)
51{
Adrian Huntera2f3b6b2014-07-14 13:02:33 +030052 return (elf_sym__type(sym) == STT_FUNC ||
53 elf_sym__type(sym) == STT_GNU_IFUNC) &&
Namhyung Kime5a18452012-08-06 13:41:20 +090054 sym->st_name != 0 &&
55 sym->st_shndx != SHN_UNDEF;
56}
57
58static inline bool elf_sym__is_object(const GElf_Sym *sym)
59{
60 return elf_sym__type(sym) == STT_OBJECT &&
61 sym->st_name != 0 &&
62 sym->st_shndx != SHN_UNDEF;
63}
64
65static inline int elf_sym__is_label(const GElf_Sym *sym)
66{
67 return elf_sym__type(sym) == STT_NOTYPE &&
68 sym->st_name != 0 &&
69 sym->st_shndx != SHN_UNDEF &&
70 sym->st_shndx != SHN_ABS;
71}
72
73static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
74{
75 switch (type) {
76 case MAP__FUNCTION:
77 return elf_sym__is_function(sym);
78 case MAP__VARIABLE:
79 return elf_sym__is_object(sym);
80 default:
81 return false;
82 }
83}
84
85static inline const char *elf_sym__name(const GElf_Sym *sym,
86 const Elf_Data *symstrs)
87{
88 return symstrs->d_buf + sym->st_name;
89}
90
91static inline const char *elf_sec__name(const GElf_Shdr *shdr,
92 const Elf_Data *secstrs)
93{
94 return secstrs->d_buf + shdr->sh_name;
95}
96
97static inline int elf_sec__is_text(const GElf_Shdr *shdr,
98 const Elf_Data *secstrs)
99{
100 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
101}
102
103static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
104 const Elf_Data *secstrs)
105{
106 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
107}
108
109static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
110 enum map_type type)
111{
112 switch (type) {
113 case MAP__FUNCTION:
114 return elf_sec__is_text(shdr, secstrs);
115 case MAP__VARIABLE:
116 return elf_sec__is_data(shdr, secstrs);
117 default:
118 return false;
119 }
120}
121
122static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
123{
124 Elf_Scn *sec = NULL;
125 GElf_Shdr shdr;
126 size_t cnt = 1;
127
128 while ((sec = elf_nextscn(elf, sec)) != NULL) {
129 gelf_getshdr(sec, &shdr);
130
131 if ((addr >= shdr.sh_addr) &&
132 (addr < (shdr.sh_addr + shdr.sh_size)))
133 return cnt;
134
135 ++cnt;
136 }
137
138 return -1;
139}
140
Masami Hiramatsu99ca4232014-01-16 09:39:49 +0000141Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
142 GElf_Shdr *shp, const char *name, size_t *idx)
Namhyung Kime5a18452012-08-06 13:41:20 +0900143{
144 Elf_Scn *sec = NULL;
145 size_t cnt = 1;
146
Cody P Schafer49274652012-08-10 15:22:55 -0700147 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
148 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
149 return NULL;
150
Namhyung Kime5a18452012-08-06 13:41:20 +0900151 while ((sec = elf_nextscn(elf, sec)) != NULL) {
152 char *str;
153
154 gelf_getshdr(sec, shp);
155 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
Jiri Olsa155b3a12014-03-02 14:32:07 +0100156 if (str && !strcmp(name, str)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900157 if (idx)
158 *idx = cnt;
Jiri Olsa155b3a12014-03-02 14:32:07 +0100159 return sec;
Namhyung Kime5a18452012-08-06 13:41:20 +0900160 }
161 ++cnt;
162 }
163
Jiri Olsa155b3a12014-03-02 14:32:07 +0100164 return NULL;
Namhyung Kime5a18452012-08-06 13:41:20 +0900165}
166
167#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
168 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
169 idx < nr_entries; \
170 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
171
172#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
173 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
174 idx < nr_entries; \
175 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
176
177/*
178 * We need to check if we have a .dynsym, so that we can handle the
179 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
180 * .dynsym or .symtab).
181 * And always look at the original dso, not at debuginfo packages, that
182 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
183 */
Cody P Schafera44f6052012-08-10 15:22:59 -0700184int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
Namhyung Kime5a18452012-08-06 13:41:20 +0900185 symbol_filter_t filter)
186{
187 uint32_t nr_rel_entries, idx;
188 GElf_Sym sym;
189 u64 plt_offset;
190 GElf_Shdr shdr_plt;
191 struct symbol *f;
192 GElf_Shdr shdr_rel_plt, shdr_dynsym;
193 Elf_Data *reldata, *syms, *symstrs;
194 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
195 size_t dynsym_idx;
196 GElf_Ehdr ehdr;
197 char sympltname[1024];
198 Elf *elf;
Cody P Schafera44f6052012-08-10 15:22:59 -0700199 int nr = 0, symidx, err = 0;
Namhyung Kime5a18452012-08-06 13:41:20 +0900200
David Ahernf47b58b2012-08-19 09:47:14 -0600201 if (!ss->dynsym)
202 return 0;
203
Cody P Schafera44f6052012-08-10 15:22:59 -0700204 elf = ss->elf;
205 ehdr = ss->ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900206
Cody P Schafera44f6052012-08-10 15:22:59 -0700207 scn_dynsym = ss->dynsym;
208 shdr_dynsym = ss->dynshdr;
209 dynsym_idx = ss->dynsym_idx;
Namhyung Kime5a18452012-08-06 13:41:20 +0900210
Namhyung Kime5a18452012-08-06 13:41:20 +0900211 if (scn_dynsym == NULL)
212 goto out_elf_end;
213
214 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
215 ".rela.plt", NULL);
216 if (scn_plt_rel == NULL) {
217 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
218 ".rel.plt", NULL);
219 if (scn_plt_rel == NULL)
220 goto out_elf_end;
221 }
222
223 err = -1;
224
225 if (shdr_rel_plt.sh_link != dynsym_idx)
226 goto out_elf_end;
227
228 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
229 goto out_elf_end;
230
231 /*
232 * Fetch the relocation section to find the idxes to the GOT
233 * and the symbols in the .dynsym they refer to.
234 */
235 reldata = elf_getdata(scn_plt_rel, NULL);
236 if (reldata == NULL)
237 goto out_elf_end;
238
239 syms = elf_getdata(scn_dynsym, NULL);
240 if (syms == NULL)
241 goto out_elf_end;
242
243 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
244 if (scn_symstrs == NULL)
245 goto out_elf_end;
246
247 symstrs = elf_getdata(scn_symstrs, NULL);
248 if (symstrs == NULL)
249 goto out_elf_end;
250
Cody P Schafer52f9ddb2012-08-10 15:22:51 -0700251 if (symstrs->d_size == 0)
252 goto out_elf_end;
253
Namhyung Kime5a18452012-08-06 13:41:20 +0900254 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
255 plt_offset = shdr_plt.sh_offset;
256
257 if (shdr_rel_plt.sh_type == SHT_RELA) {
258 GElf_Rela pos_mem, *pos;
259
260 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
261 nr_rel_entries) {
262 symidx = GELF_R_SYM(pos->r_info);
263 plt_offset += shdr_plt.sh_entsize;
264 gelf_getsym(syms, symidx, &sym);
265 snprintf(sympltname, sizeof(sympltname),
266 "%s@plt", elf_sym__name(&sym, symstrs));
267
268 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
269 STB_GLOBAL, sympltname);
270 if (!f)
271 goto out_elf_end;
272
273 if (filter && filter(map, f))
274 symbol__delete(f);
275 else {
276 symbols__insert(&dso->symbols[map->type], f);
277 ++nr;
278 }
279 }
280 } else if (shdr_rel_plt.sh_type == SHT_REL) {
281 GElf_Rel pos_mem, *pos;
282 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
283 nr_rel_entries) {
284 symidx = GELF_R_SYM(pos->r_info);
285 plt_offset += shdr_plt.sh_entsize;
286 gelf_getsym(syms, symidx, &sym);
287 snprintf(sympltname, sizeof(sympltname),
288 "%s@plt", elf_sym__name(&sym, symstrs));
289
290 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
291 STB_GLOBAL, sympltname);
292 if (!f)
293 goto out_elf_end;
294
295 if (filter && filter(map, f))
296 symbol__delete(f);
297 else {
298 symbols__insert(&dso->symbols[map->type], f);
299 ++nr;
300 }
301 }
302 }
303
304 err = 0;
305out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900306 if (err == 0)
307 return nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900308 pr_debug("%s: problems reading %s PLT info.\n",
309 __func__, dso->long_name);
310 return 0;
311}
312
313/*
314 * Align offset to 4 bytes as needed for note name and descriptor data.
315 */
316#define NOTE_ALIGN(n) (((n) + 3) & -4U)
317
318static int elf_read_build_id(Elf *elf, void *bf, size_t size)
319{
320 int err = -1;
321 GElf_Ehdr ehdr;
322 GElf_Shdr shdr;
323 Elf_Data *data;
324 Elf_Scn *sec;
325 Elf_Kind ek;
326 void *ptr;
327
328 if (size < BUILD_ID_SIZE)
329 goto out;
330
331 ek = elf_kind(elf);
332 if (ek != ELF_K_ELF)
333 goto out;
334
335 if (gelf_getehdr(elf, &ehdr) == NULL) {
336 pr_err("%s: cannot get elf header.\n", __func__);
337 goto out;
338 }
339
340 /*
341 * Check following sections for notes:
342 * '.note.gnu.build-id'
343 * '.notes'
344 * '.note' (VDSO specific)
345 */
346 do {
347 sec = elf_section_by_name(elf, &ehdr, &shdr,
348 ".note.gnu.build-id", NULL);
349 if (sec)
350 break;
351
352 sec = elf_section_by_name(elf, &ehdr, &shdr,
353 ".notes", NULL);
354 if (sec)
355 break;
356
357 sec = elf_section_by_name(elf, &ehdr, &shdr,
358 ".note", NULL);
359 if (sec)
360 break;
361
362 return err;
363
364 } while (0);
365
366 data = elf_getdata(sec, NULL);
367 if (data == NULL)
368 goto out;
369
370 ptr = data->d_buf;
371 while (ptr < (data->d_buf + data->d_size)) {
372 GElf_Nhdr *nhdr = ptr;
373 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
374 descsz = NOTE_ALIGN(nhdr->n_descsz);
375 const char *name;
376
377 ptr += sizeof(*nhdr);
378 name = ptr;
379 ptr += namesz;
380 if (nhdr->n_type == NT_GNU_BUILD_ID &&
381 nhdr->n_namesz == sizeof("GNU")) {
382 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
383 size_t sz = min(size, descsz);
384 memcpy(bf, ptr, sz);
385 memset(bf + sz, 0, size - sz);
386 err = descsz;
387 break;
388 }
389 }
390 ptr += descsz;
391 }
392
393out:
394 return err;
395}
396
397int filename__read_build_id(const char *filename, void *bf, size_t size)
398{
399 int fd, err = -1;
400 Elf *elf;
401
402 if (size < BUILD_ID_SIZE)
403 goto out;
404
405 fd = open(filename, O_RDONLY);
406 if (fd < 0)
407 goto out;
408
409 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
410 if (elf == NULL) {
411 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
412 goto out_close;
413 }
414
415 err = elf_read_build_id(elf, bf, size);
416
417 elf_end(elf);
418out_close:
419 close(fd);
420out:
421 return err;
422}
423
424int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
425{
426 int fd, err = -1;
427
428 if (size < BUILD_ID_SIZE)
429 goto out;
430
431 fd = open(filename, O_RDONLY);
432 if (fd < 0)
433 goto out;
434
435 while (1) {
436 char bf[BUFSIZ];
437 GElf_Nhdr nhdr;
438 size_t namesz, descsz;
439
440 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
441 break;
442
443 namesz = NOTE_ALIGN(nhdr.n_namesz);
444 descsz = NOTE_ALIGN(nhdr.n_descsz);
445 if (nhdr.n_type == NT_GNU_BUILD_ID &&
446 nhdr.n_namesz == sizeof("GNU")) {
447 if (read(fd, bf, namesz) != (ssize_t)namesz)
448 break;
449 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
450 size_t sz = min(descsz, size);
451 if (read(fd, build_id, sz) == (ssize_t)sz) {
452 memset(build_id + sz, 0, size - sz);
453 err = 0;
454 break;
455 }
456 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
457 break;
458 } else {
459 int n = namesz + descsz;
460 if (read(fd, bf, n) != n)
461 break;
462 }
463 }
464 close(fd);
465out:
466 return err;
467}
468
469int filename__read_debuglink(const char *filename, char *debuglink,
470 size_t size)
471{
472 int fd, err = -1;
473 Elf *elf;
474 GElf_Ehdr ehdr;
475 GElf_Shdr shdr;
476 Elf_Data *data;
477 Elf_Scn *sec;
478 Elf_Kind ek;
479
480 fd = open(filename, O_RDONLY);
481 if (fd < 0)
482 goto out;
483
484 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
485 if (elf == NULL) {
486 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
487 goto out_close;
488 }
489
490 ek = elf_kind(elf);
491 if (ek != ELF_K_ELF)
Chenggang Qin784f3392013-10-11 08:27:57 +0800492 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900493
494 if (gelf_getehdr(elf, &ehdr) == NULL) {
495 pr_err("%s: cannot get elf header.\n", __func__);
Chenggang Qin784f3392013-10-11 08:27:57 +0800496 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900497 }
498
499 sec = elf_section_by_name(elf, &ehdr, &shdr,
500 ".gnu_debuglink", NULL);
501 if (sec == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800502 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900503
504 data = elf_getdata(sec, NULL);
505 if (data == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800506 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900507
508 /* the start of this section is a zero-terminated string */
509 strncpy(debuglink, data->d_buf, size);
510
Stephane Eranian0d3dc5e2014-02-20 10:32:55 +0900511 err = 0;
512
Chenggang Qin784f3392013-10-11 08:27:57 +0800513out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900514 elf_end(elf);
Namhyung Kime5a18452012-08-06 13:41:20 +0900515out_close:
516 close(fd);
517out:
518 return err;
519}
520
521static int dso__swap_init(struct dso *dso, unsigned char eidata)
522{
523 static unsigned int const endian = 1;
524
525 dso->needs_swap = DSO_SWAP__NO;
526
527 switch (eidata) {
528 case ELFDATA2LSB:
529 /* We are big endian, DSO is little endian. */
530 if (*(unsigned char const *)&endian != 1)
531 dso->needs_swap = DSO_SWAP__YES;
532 break;
533
534 case ELFDATA2MSB:
535 /* We are little endian, DSO is big endian. */
536 if (*(unsigned char const *)&endian != 0)
537 dso->needs_swap = DSO_SWAP__YES;
538 break;
539
540 default:
541 pr_err("unrecognized DSO data encoding %d\n", eidata);
542 return -EINVAL;
543 }
544
545 return 0;
546}
547
Cody P Schafer3aafe5a2012-08-10 15:23:02 -0700548bool symsrc__possibly_runtime(struct symsrc *ss)
549{
550 return ss->dynsym || ss->opdsec;
551}
552
Cody P Schaferd26cd122012-08-10 15:23:00 -0700553bool symsrc__has_symtab(struct symsrc *ss)
554{
555 return ss->symtab != NULL;
556}
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700557
558void symsrc__destroy(struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900559{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300560 zfree(&ss->name);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700561 elf_end(ss->elf);
562 close(ss->fd);
563}
564
565int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
566 enum dso_binary_type type)
567{
Namhyung Kime5a18452012-08-06 13:41:20 +0900568 int err = -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900569 GElf_Ehdr ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900570 Elf *elf;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700571 int fd;
572
573 fd = open(name, O_RDONLY);
574 if (fd < 0)
575 return -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900576
577 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
578 if (elf == NULL) {
579 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
580 goto out_close;
581 }
582
583 if (gelf_getehdr(elf, &ehdr) == NULL) {
584 pr_debug("%s: cannot get elf header.\n", __func__);
585 goto out_elf_end;
586 }
587
588 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
589 goto out_elf_end;
590
591 /* Always reject images with a mismatched build-id: */
592 if (dso->has_build_id) {
593 u8 build_id[BUILD_ID_SIZE];
594
595 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
596 goto out_elf_end;
597
598 if (!dso__build_id_equal(dso, build_id))
599 goto out_elf_end;
600 }
601
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300602 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
603
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700604 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
605 NULL);
606 if (ss->symshdr.sh_type != SHT_SYMTAB)
607 ss->symtab = NULL;
608
609 ss->dynsym_idx = 0;
610 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
611 &ss->dynsym_idx);
612 if (ss->dynshdr.sh_type != SHT_DYNSYM)
613 ss->dynsym = NULL;
614
615 ss->opdidx = 0;
616 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
617 &ss->opdidx);
618 if (ss->opdshdr.sh_type != SHT_PROGBITS)
619 ss->opdsec = NULL;
620
621 if (dso->kernel == DSO_TYPE_USER) {
622 GElf_Shdr shdr;
623 ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300624 ehdr.e_type == ET_REL ||
Adrian Hunter51682dc2014-07-22 16:17:57 +0300625 dso__is_vdso(dso) ||
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700626 elf_section_by_name(elf, &ehdr, &shdr,
627 ".gnu.prelink_undo",
628 NULL) != NULL);
629 } else {
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300630 ss->adjust_symbols = ehdr.e_type == ET_EXEC ||
631 ehdr.e_type == ET_REL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700632 }
633
634 ss->name = strdup(name);
635 if (!ss->name)
636 goto out_elf_end;
637
638 ss->elf = elf;
639 ss->fd = fd;
640 ss->ehdr = ehdr;
641 ss->type = type;
642
643 return 0;
644
645out_elf_end:
646 elf_end(elf);
647out_close:
648 close(fd);
649 return err;
650}
651
Adrian Hunter39b12f782013-08-07 14:38:47 +0300652/**
653 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
654 * @kmap: kernel maps and relocation reference symbol
655 *
656 * This function returns %true if we are dealing with the kernel maps and the
657 * relocation reference symbol has not yet been found. Otherwise %false is
658 * returned.
659 */
660static bool ref_reloc_sym_not_found(struct kmap *kmap)
661{
662 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
663 !kmap->ref_reloc_sym->unrelocated_addr;
664}
665
666/**
667 * ref_reloc - kernel relocation offset.
668 * @kmap: kernel maps and relocation reference symbol
669 *
670 * This function returns the offset of kernel addresses as determined by using
671 * the relocation reference symbol i.e. if the kernel has not been relocated
672 * then the return value is zero.
673 */
674static u64 ref_reloc(struct kmap *kmap)
675{
676 if (kmap && kmap->ref_reloc_sym &&
677 kmap->ref_reloc_sym->unrelocated_addr)
678 return kmap->ref_reloc_sym->addr -
679 kmap->ref_reloc_sym->unrelocated_addr;
680 return 0;
681}
682
Avi Kivity763122a2014-09-13 07:15:05 +0300683static bool want_demangle(bool is_kernel_sym)
684{
685 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
686}
687
Cody P Schafer261360b2012-08-10 15:23:01 -0700688int dso__load_sym(struct dso *dso, struct map *map,
689 struct symsrc *syms_ss, struct symsrc *runtime_ss,
Cody P Schaferd26cd122012-08-10 15:23:00 -0700690 symbol_filter_t filter, int kmodule)
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700691{
692 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
693 struct map *curr_map = map;
694 struct dso *curr_dso = dso;
695 Elf_Data *symstrs, *secstrs;
696 uint32_t nr_syms;
697 int err = -1;
698 uint32_t idx;
699 GElf_Ehdr ehdr;
Cody P Schafer261360b2012-08-10 15:23:01 -0700700 GElf_Shdr shdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700701 Elf_Data *syms, *opddata = NULL;
702 GElf_Sym sym;
Cody P Schafer261360b2012-08-10 15:23:01 -0700703 Elf_Scn *sec, *sec_strndx;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700704 Elf *elf;
705 int nr = 0;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300706 bool remap_kernel = false, adjust_kernel_syms = false;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700707
Cody P Schafer261360b2012-08-10 15:23:01 -0700708 dso->symtab_type = syms_ss->type;
Adrian Hunterc6d8f2a2014-07-14 13:02:41 +0300709 dso->is_64_bit = syms_ss->is_64_bit;
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300710 dso->rel = syms_ss->ehdr.e_type == ET_REL;
711
712 /*
713 * Modules may already have symbols from kallsyms, but those symbols
714 * have the wrong values for the dso maps, so remove them.
715 */
716 if (kmodule && syms_ss->symtab)
717 symbols__delete(&dso->symbols[map->type]);
Cody P Schafer005f9292012-08-10 15:22:58 -0700718
Cody P Schafer261360b2012-08-10 15:23:01 -0700719 if (!syms_ss->symtab) {
720 syms_ss->symtab = syms_ss->dynsym;
721 syms_ss->symshdr = syms_ss->dynshdr;
Cody P Schaferd26cd122012-08-10 15:23:00 -0700722 }
723
Cody P Schafer261360b2012-08-10 15:23:01 -0700724 elf = syms_ss->elf;
725 ehdr = syms_ss->ehdr;
726 sec = syms_ss->symtab;
727 shdr = syms_ss->symshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700728
Cody P Schafer261360b2012-08-10 15:23:01 -0700729 if (runtime_ss->opdsec)
730 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
Namhyung Kime5a18452012-08-06 13:41:20 +0900731
732 syms = elf_getdata(sec, NULL);
733 if (syms == NULL)
734 goto out_elf_end;
735
736 sec = elf_getscn(elf, shdr.sh_link);
737 if (sec == NULL)
738 goto out_elf_end;
739
740 symstrs = elf_getdata(sec, NULL);
741 if (symstrs == NULL)
742 goto out_elf_end;
743
Adrian Hunterf247fb82014-07-31 09:00:46 +0300744 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900745 if (sec_strndx == NULL)
746 goto out_elf_end;
747
748 secstrs = elf_getdata(sec_strndx, NULL);
749 if (secstrs == NULL)
750 goto out_elf_end;
751
752 nr_syms = shdr.sh_size / shdr.sh_entsize;
753
754 memset(&sym, 0, sizeof(sym));
Adrian Hunter39b12f782013-08-07 14:38:47 +0300755
756 /*
757 * The kernel relocation symbol is needed in advance in order to adjust
758 * kernel maps correctly.
759 */
760 if (ref_reloc_sym_not_found(kmap)) {
761 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
762 const char *elf_name = elf_sym__name(&sym, symstrs);
763
764 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
765 continue;
766 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
Adrian Hunter91767532014-01-29 16:14:36 +0200767 map->reloc = kmap->ref_reloc_sym->addr -
768 kmap->ref_reloc_sym->unrelocated_addr;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300769 break;
770 }
771 }
772
773 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
774 /*
775 * Initial kernel and module mappings do not map to the dso. For
776 * function mappings, flag the fixups.
777 */
778 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
779 remap_kernel = true;
780 adjust_kernel_syms = dso->adjust_symbols;
781 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900782 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
783 struct symbol *f;
784 const char *elf_name = elf_sym__name(&sym, symstrs);
785 char *demangled = NULL;
786 int is_label = elf_sym__is_label(&sym);
787 const char *section_name;
Cody P Schafer261360b2012-08-10 15:23:01 -0700788 bool used_opd = false;
Namhyung Kime5a18452012-08-06 13:41:20 +0900789
Namhyung Kime5a18452012-08-06 13:41:20 +0900790 if (!is_label && !elf_sym__is_a(&sym, map->type))
791 continue;
792
793 /* Reject ARM ELF "mapping symbols": these aren't unique and
794 * don't identify functions, so will confuse the profile
795 * output: */
796 if (ehdr.e_machine == EM_ARM) {
797 if (!strcmp(elf_name, "$a") ||
798 !strcmp(elf_name, "$d") ||
799 !strcmp(elf_name, "$t"))
800 continue;
801 }
802
Cody P Schafer261360b2012-08-10 15:23:01 -0700803 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
804 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900805 u64 *opd = opddata->d_buf + offset;
806 sym.st_value = DSO__SWAP(dso, u64, *opd);
Cody P Schafer261360b2012-08-10 15:23:01 -0700807 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
808 sym.st_value);
809 used_opd = true;
Namhyung Kime5a18452012-08-06 13:41:20 +0900810 }
Namhyung Kim3843b052012-11-21 13:49:44 +0100811 /*
812 * When loading symbols in a data mapping, ABS symbols (which
813 * has a value of SHN_ABS in its st_shndx) failed at
814 * elf_getscn(). And it marks the loading as a failure so
815 * already loaded symbols cannot be fixed up.
816 *
817 * I'm not sure what should be done. Just ignore them for now.
818 * - Namhyung Kim
819 */
820 if (sym.st_shndx == SHN_ABS)
821 continue;
Namhyung Kime5a18452012-08-06 13:41:20 +0900822
Cody P Schafer261360b2012-08-10 15:23:01 -0700823 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900824 if (!sec)
825 goto out_elf_end;
826
827 gelf_getshdr(sec, &shdr);
828
829 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
830 continue;
831
832 section_name = elf_sec__name(&shdr, secstrs);
833
834 /* On ARM, symbols for thumb functions have 1 added to
835 * the symbol address as a flag - remove it */
836 if ((ehdr.e_machine == EM_ARM) &&
837 (map->type == MAP__FUNCTION) &&
838 (sym.st_value & 1))
839 --sym.st_value;
840
Adrian Hunter39b12f782013-08-07 14:38:47 +0300841 if (dso->kernel || kmodule) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900842 char dso_name[PATH_MAX];
843
Adrian Hunter39b12f782013-08-07 14:38:47 +0300844 /* Adjust symbol to map to file offset */
845 if (adjust_kernel_syms)
846 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
847
Namhyung Kime5a18452012-08-06 13:41:20 +0900848 if (strcmp(section_name,
849 (curr_dso->short_name +
850 dso->short_name_len)) == 0)
851 goto new_symbol;
852
853 if (strcmp(section_name, ".text") == 0) {
Adrian Hunter39b12f782013-08-07 14:38:47 +0300854 /*
855 * The initial kernel mapping is based on
856 * kallsyms and identity maps. Overwrite it to
857 * map to the kernel dso.
858 */
859 if (remap_kernel && dso->kernel) {
860 remap_kernel = false;
861 map->start = shdr.sh_addr +
862 ref_reloc(kmap);
863 map->end = map->start + shdr.sh_size;
864 map->pgoff = shdr.sh_offset;
865 map->map_ip = map__map_ip;
866 map->unmap_ip = map__unmap_ip;
867 /* Ensure maps are correctly ordered */
868 map_groups__remove(kmap->kmaps, map);
869 map_groups__insert(kmap->kmaps, map);
870 }
871
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300872 /*
873 * The initial module mapping is based on
874 * /proc/modules mapped to offset zero.
875 * Overwrite it to map to the module dso.
876 */
877 if (remap_kernel && kmodule) {
878 remap_kernel = false;
879 map->pgoff = shdr.sh_offset;
880 }
881
Namhyung Kime5a18452012-08-06 13:41:20 +0900882 curr_map = map;
883 curr_dso = dso;
884 goto new_symbol;
885 }
886
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300887 if (!kmap)
888 goto new_symbol;
889
Namhyung Kime5a18452012-08-06 13:41:20 +0900890 snprintf(dso_name, sizeof(dso_name),
891 "%s%s", dso->short_name, section_name);
892
893 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
894 if (curr_map == NULL) {
895 u64 start = sym.st_value;
896
897 if (kmodule)
898 start += map->start + shdr.sh_offset;
899
900 curr_dso = dso__new(dso_name);
901 if (curr_dso == NULL)
902 goto out_elf_end;
903 curr_dso->kernel = dso->kernel;
904 curr_dso->long_name = dso->long_name;
905 curr_dso->long_name_len = dso->long_name_len;
906 curr_map = map__new2(start, curr_dso,
907 map->type);
908 if (curr_map == NULL) {
909 dso__delete(curr_dso);
910 goto out_elf_end;
911 }
Adrian Hunter39b12f782013-08-07 14:38:47 +0300912 if (adjust_kernel_syms) {
913 curr_map->start = shdr.sh_addr +
914 ref_reloc(kmap);
915 curr_map->end = curr_map->start +
916 shdr.sh_size;
917 curr_map->pgoff = shdr.sh_offset;
918 } else {
919 curr_map->map_ip = identity__map_ip;
920 curr_map->unmap_ip = identity__map_ip;
921 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900922 curr_dso->symtab_type = dso->symtab_type;
923 map_groups__insert(kmap->kmaps, curr_map);
924 dsos__add(&dso->node, curr_dso);
925 dso__set_loaded(curr_dso, map->type);
926 } else
927 curr_dso = curr_map->dso;
928
929 goto new_symbol;
930 }
931
Cody P Schafer261360b2012-08-10 15:23:01 -0700932 if ((used_opd && runtime_ss->adjust_symbols)
933 || (!used_opd && syms_ss->adjust_symbols)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900934 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
935 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
936 (u64)sym.st_value, (u64)shdr.sh_addr,
937 (u64)shdr.sh_offset);
938 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
939 }
Avi Kivity950b8352014-01-22 21:58:46 +0200940new_symbol:
Namhyung Kime5a18452012-08-06 13:41:20 +0900941 /*
942 * We need to figure out if the object was created from C++ sources
943 * DWARF DW_compile_unit has this, but we don't always have access
944 * to it...
945 */
Avi Kivity763122a2014-09-13 07:15:05 +0300946 if (want_demangle(dso->kernel || kmodule)) {
Namhyung Kime71e7942014-07-31 14:47:42 +0900947 int demangle_flags = DMGL_NO_OPTS;
948 if (verbose)
949 demangle_flags = DMGL_PARAMS | DMGL_ANSI;
950
951 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
Namhyung Kim328ccda2013-03-25 18:18:18 +0900952 if (demangled != NULL)
953 elf_name = demangled;
954 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900955 f = symbol__new(sym.st_value, sym.st_size,
956 GELF_ST_BIND(sym.st_info), elf_name);
957 free(demangled);
958 if (!f)
959 goto out_elf_end;
960
961 if (filter && filter(curr_map, f))
962 symbol__delete(f);
963 else {
964 symbols__insert(&curr_dso->symbols[curr_map->type], f);
965 nr++;
966 }
967 }
968
969 /*
970 * For misannotated, zeroed, ASM function sizes.
971 */
972 if (nr > 0) {
973 symbols__fixup_duplicate(&dso->symbols[map->type]);
974 symbols__fixup_end(&dso->symbols[map->type]);
975 if (kmap) {
976 /*
977 * We need to fixup this here too because we create new
978 * maps here, for things like vsyscall sections.
979 */
980 __map_groups__fixup_end(kmap->kmaps, map->type);
981 }
982 }
983 err = nr;
984out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900985 return err;
986}
987
Adrian Hunter8e0cf962013-08-07 14:38:51 +0300988static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
989{
990 GElf_Phdr phdr;
991 size_t i, phdrnum;
992 int err;
993 u64 sz;
994
995 if (elf_getphdrnum(elf, &phdrnum))
996 return -1;
997
998 for (i = 0; i < phdrnum; i++) {
999 if (gelf_getphdr(elf, i, &phdr) == NULL)
1000 return -1;
1001 if (phdr.p_type != PT_LOAD)
1002 continue;
1003 if (exe) {
1004 if (!(phdr.p_flags & PF_X))
1005 continue;
1006 } else {
1007 if (!(phdr.p_flags & PF_R))
1008 continue;
1009 }
1010 sz = min(phdr.p_memsz, phdr.p_filesz);
1011 if (!sz)
1012 continue;
1013 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1014 if (err)
1015 return err;
1016 }
1017 return 0;
1018}
1019
1020int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1021 bool *is_64_bit)
1022{
1023 int err;
1024 Elf *elf;
1025
1026 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1027 if (elf == NULL)
1028 return -1;
1029
1030 if (is_64_bit)
1031 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1032
1033 err = elf_read_maps(elf, exe, mapfn, data);
1034
1035 elf_end(elf);
1036 return err;
1037}
1038
Adrian Hunter2b5b8bb2014-07-22 16:17:59 +03001039enum dso_type dso__type_fd(int fd)
1040{
1041 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1042 GElf_Ehdr ehdr;
1043 Elf_Kind ek;
1044 Elf *elf;
1045
1046 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1047 if (elf == NULL)
1048 goto out;
1049
1050 ek = elf_kind(elf);
1051 if (ek != ELF_K_ELF)
1052 goto out_end;
1053
1054 if (gelf_getclass(elf) == ELFCLASS64) {
1055 dso_type = DSO__TYPE_64BIT;
1056 goto out_end;
1057 }
1058
1059 if (gelf_getehdr(elf, &ehdr) == NULL)
1060 goto out_end;
1061
1062 if (ehdr.e_machine == EM_X86_64)
1063 dso_type = DSO__TYPE_X32BIT;
1064 else
1065 dso_type = DSO__TYPE_32BIT;
1066out_end:
1067 elf_end(elf);
1068out:
1069 return dso_type;
1070}
1071
Adrian Hunterafba19d2013-10-09 15:01:12 +03001072static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1073{
1074 ssize_t r;
1075 size_t n;
1076 int err = -1;
1077 char *buf = malloc(page_size);
1078
1079 if (buf == NULL)
1080 return -1;
1081
1082 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1083 goto out;
1084
1085 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1086 goto out;
1087
1088 while (len) {
1089 n = page_size;
1090 if (len < n)
1091 n = len;
1092 /* Use read because mmap won't work on proc files */
1093 r = read(from, buf, n);
1094 if (r < 0)
1095 goto out;
1096 if (!r)
1097 break;
1098 n = r;
1099 r = write(to, buf, n);
1100 if (r < 0)
1101 goto out;
1102 if ((size_t)r != n)
1103 goto out;
1104 len -= n;
1105 }
1106
1107 err = 0;
1108out:
1109 free(buf);
1110 return err;
1111}
1112
1113struct kcore {
1114 int fd;
1115 int elfclass;
1116 Elf *elf;
1117 GElf_Ehdr ehdr;
1118};
1119
1120static int kcore__open(struct kcore *kcore, const char *filename)
1121{
1122 GElf_Ehdr *ehdr;
1123
1124 kcore->fd = open(filename, O_RDONLY);
1125 if (kcore->fd == -1)
1126 return -1;
1127
1128 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1129 if (!kcore->elf)
1130 goto out_close;
1131
1132 kcore->elfclass = gelf_getclass(kcore->elf);
1133 if (kcore->elfclass == ELFCLASSNONE)
1134 goto out_end;
1135
1136 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1137 if (!ehdr)
1138 goto out_end;
1139
1140 return 0;
1141
1142out_end:
1143 elf_end(kcore->elf);
1144out_close:
1145 close(kcore->fd);
1146 return -1;
1147}
1148
1149static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1150 bool temp)
1151{
1152 GElf_Ehdr *ehdr;
1153
1154 kcore->elfclass = elfclass;
1155
1156 if (temp)
1157 kcore->fd = mkstemp(filename);
1158 else
1159 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1160 if (kcore->fd == -1)
1161 return -1;
1162
1163 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1164 if (!kcore->elf)
1165 goto out_close;
1166
1167 if (!gelf_newehdr(kcore->elf, elfclass))
1168 goto out_end;
1169
1170 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1171 if (!ehdr)
1172 goto out_end;
1173
1174 return 0;
1175
1176out_end:
1177 elf_end(kcore->elf);
1178out_close:
1179 close(kcore->fd);
1180 unlink(filename);
1181 return -1;
1182}
1183
1184static void kcore__close(struct kcore *kcore)
1185{
1186 elf_end(kcore->elf);
1187 close(kcore->fd);
1188}
1189
1190static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1191{
1192 GElf_Ehdr *ehdr = &to->ehdr;
1193 GElf_Ehdr *kehdr = &from->ehdr;
1194
1195 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1196 ehdr->e_type = kehdr->e_type;
1197 ehdr->e_machine = kehdr->e_machine;
1198 ehdr->e_version = kehdr->e_version;
1199 ehdr->e_entry = 0;
1200 ehdr->e_shoff = 0;
1201 ehdr->e_flags = kehdr->e_flags;
1202 ehdr->e_phnum = count;
1203 ehdr->e_shentsize = 0;
1204 ehdr->e_shnum = 0;
1205 ehdr->e_shstrndx = 0;
1206
1207 if (from->elfclass == ELFCLASS32) {
1208 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1209 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1210 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1211 } else {
1212 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1213 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1214 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1215 }
1216
1217 if (!gelf_update_ehdr(to->elf, ehdr))
1218 return -1;
1219
1220 if (!gelf_newphdr(to->elf, count))
1221 return -1;
1222
1223 return 0;
1224}
1225
1226static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1227 u64 addr, u64 len)
1228{
1229 GElf_Phdr gphdr;
1230 GElf_Phdr *phdr;
1231
1232 phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
1233 if (!phdr)
1234 return -1;
1235
1236 phdr->p_type = PT_LOAD;
1237 phdr->p_flags = PF_R | PF_W | PF_X;
1238 phdr->p_offset = offset;
1239 phdr->p_vaddr = addr;
1240 phdr->p_paddr = 0;
1241 phdr->p_filesz = len;
1242 phdr->p_memsz = len;
1243 phdr->p_align = page_size;
1244
1245 if (!gelf_update_phdr(kcore->elf, idx, phdr))
1246 return -1;
1247
1248 return 0;
1249}
1250
1251static off_t kcore__write(struct kcore *kcore)
1252{
1253 return elf_update(kcore->elf, ELF_C_WRITE);
1254}
1255
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001256struct phdr_data {
1257 off_t offset;
1258 u64 addr;
1259 u64 len;
1260};
1261
1262struct kcore_copy_info {
1263 u64 stext;
1264 u64 etext;
1265 u64 first_symbol;
1266 u64 last_symbol;
1267 u64 first_module;
1268 u64 last_module_symbol;
1269 struct phdr_data kernel_map;
1270 struct phdr_data modules_map;
1271};
1272
1273static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1274 u64 start)
1275{
1276 struct kcore_copy_info *kci = arg;
1277
1278 if (!symbol_type__is_a(type, MAP__FUNCTION))
1279 return 0;
1280
1281 if (strchr(name, '[')) {
1282 if (start > kci->last_module_symbol)
1283 kci->last_module_symbol = start;
1284 return 0;
1285 }
1286
1287 if (!kci->first_symbol || start < kci->first_symbol)
1288 kci->first_symbol = start;
1289
1290 if (!kci->last_symbol || start > kci->last_symbol)
1291 kci->last_symbol = start;
1292
1293 if (!strcmp(name, "_stext")) {
1294 kci->stext = start;
1295 return 0;
1296 }
1297
1298 if (!strcmp(name, "_etext")) {
1299 kci->etext = start;
1300 return 0;
1301 }
1302
1303 return 0;
1304}
1305
1306static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1307 const char *dir)
1308{
1309 char kallsyms_filename[PATH_MAX];
1310
1311 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1312
1313 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1314 return -1;
1315
1316 if (kallsyms__parse(kallsyms_filename, kci,
1317 kcore_copy__process_kallsyms) < 0)
1318 return -1;
1319
1320 return 0;
1321}
1322
1323static int kcore_copy__process_modules(void *arg,
1324 const char *name __maybe_unused,
1325 u64 start)
1326{
1327 struct kcore_copy_info *kci = arg;
1328
1329 if (!kci->first_module || start < kci->first_module)
1330 kci->first_module = start;
1331
1332 return 0;
1333}
1334
1335static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1336 const char *dir)
1337{
1338 char modules_filename[PATH_MAX];
1339
1340 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1341
1342 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1343 return -1;
1344
1345 if (modules__parse(modules_filename, kci,
1346 kcore_copy__process_modules) < 0)
1347 return -1;
1348
1349 return 0;
1350}
1351
1352static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1353 u64 s, u64 e)
1354{
1355 if (p->addr || s < start || s >= end)
1356 return;
1357
1358 p->addr = s;
1359 p->offset = (s - start) + pgoff;
1360 p->len = e < end ? e - s : end - s;
1361}
1362
1363static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1364{
1365 struct kcore_copy_info *kci = data;
1366 u64 end = start + len;
1367
1368 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1369 kci->etext);
1370
1371 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1372 kci->last_module_symbol);
1373
1374 return 0;
1375}
1376
1377static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1378{
1379 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1380 return -1;
1381
1382 return 0;
1383}
1384
1385static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1386 Elf *elf)
1387{
1388 if (kcore_copy__parse_kallsyms(kci, dir))
1389 return -1;
1390
1391 if (kcore_copy__parse_modules(kci, dir))
1392 return -1;
1393
1394 if (kci->stext)
1395 kci->stext = round_down(kci->stext, page_size);
1396 else
1397 kci->stext = round_down(kci->first_symbol, page_size);
1398
1399 if (kci->etext) {
1400 kci->etext = round_up(kci->etext, page_size);
1401 } else if (kci->last_symbol) {
1402 kci->etext = round_up(kci->last_symbol, page_size);
1403 kci->etext += page_size;
1404 }
1405
1406 kci->first_module = round_down(kci->first_module, page_size);
1407
1408 if (kci->last_module_symbol) {
1409 kci->last_module_symbol = round_up(kci->last_module_symbol,
1410 page_size);
1411 kci->last_module_symbol += page_size;
1412 }
1413
1414 if (!kci->stext || !kci->etext)
1415 return -1;
1416
1417 if (kci->first_module && !kci->last_module_symbol)
1418 return -1;
1419
1420 return kcore_copy__read_maps(kci, elf);
1421}
1422
1423static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1424 const char *name)
1425{
1426 char from_filename[PATH_MAX];
1427 char to_filename[PATH_MAX];
1428
1429 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1430 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1431
1432 return copyfile_mode(from_filename, to_filename, 0400);
1433}
1434
1435static int kcore_copy__unlink(const char *dir, const char *name)
1436{
1437 char filename[PATH_MAX];
1438
1439 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1440
1441 return unlink(filename);
1442}
1443
1444static int kcore_copy__compare_fds(int from, int to)
1445{
1446 char *buf_from;
1447 char *buf_to;
1448 ssize_t ret;
1449 size_t len;
1450 int err = -1;
1451
1452 buf_from = malloc(page_size);
1453 buf_to = malloc(page_size);
1454 if (!buf_from || !buf_to)
1455 goto out;
1456
1457 while (1) {
1458 /* Use read because mmap won't work on proc files */
1459 ret = read(from, buf_from, page_size);
1460 if (ret < 0)
1461 goto out;
1462
1463 if (!ret)
1464 break;
1465
1466 len = ret;
1467
1468 if (readn(to, buf_to, len) != (int)len)
1469 goto out;
1470
1471 if (memcmp(buf_from, buf_to, len))
1472 goto out;
1473 }
1474
1475 err = 0;
1476out:
1477 free(buf_to);
1478 free(buf_from);
1479 return err;
1480}
1481
1482static int kcore_copy__compare_files(const char *from_filename,
1483 const char *to_filename)
1484{
1485 int from, to, err = -1;
1486
1487 from = open(from_filename, O_RDONLY);
1488 if (from < 0)
1489 return -1;
1490
1491 to = open(to_filename, O_RDONLY);
1492 if (to < 0)
1493 goto out_close_from;
1494
1495 err = kcore_copy__compare_fds(from, to);
1496
1497 close(to);
1498out_close_from:
1499 close(from);
1500 return err;
1501}
1502
1503static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1504 const char *name)
1505{
1506 char from_filename[PATH_MAX];
1507 char to_filename[PATH_MAX];
1508
1509 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1510 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1511
1512 return kcore_copy__compare_files(from_filename, to_filename);
1513}
1514
1515/**
1516 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1517 * @from_dir: from directory
1518 * @to_dir: to directory
1519 *
1520 * This function copies kallsyms, modules and kcore files from one directory to
1521 * another. kallsyms and modules are copied entirely. Only code segments are
1522 * copied from kcore. It is assumed that two segments suffice: one for the
1523 * kernel proper and one for all the modules. The code segments are determined
1524 * from kallsyms and modules files. The kernel map starts at _stext or the
1525 * lowest function symbol, and ends at _etext or the highest function symbol.
1526 * The module map starts at the lowest module address and ends at the highest
1527 * module symbol. Start addresses are rounded down to the nearest page. End
1528 * addresses are rounded up to the nearest page. An extra page is added to the
1529 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1530 * symbol too. Because it contains only code sections, the resulting kcore is
1531 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1532 * is not the same for the kernel map and the modules map. That happens because
1533 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1534 * kallsyms and modules files are compared with their copies to check that
1535 * modules have not been loaded or unloaded while the copies were taking place.
1536 *
1537 * Return: %0 on success, %-1 on failure.
1538 */
1539int kcore_copy(const char *from_dir, const char *to_dir)
1540{
1541 struct kcore kcore;
1542 struct kcore extract;
1543 size_t count = 2;
1544 int idx = 0, err = -1;
1545 off_t offset = page_size, sz, modules_offset = 0;
1546 struct kcore_copy_info kci = { .stext = 0, };
1547 char kcore_filename[PATH_MAX];
1548 char extract_filename[PATH_MAX];
1549
1550 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1551 return -1;
1552
1553 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1554 goto out_unlink_kallsyms;
1555
1556 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1557 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1558
1559 if (kcore__open(&kcore, kcore_filename))
1560 goto out_unlink_modules;
1561
1562 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1563 goto out_kcore_close;
1564
1565 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1566 goto out_kcore_close;
1567
1568 if (!kci.modules_map.addr)
1569 count -= 1;
1570
1571 if (kcore__copy_hdr(&kcore, &extract, count))
1572 goto out_extract_close;
1573
1574 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1575 kci.kernel_map.len))
1576 goto out_extract_close;
1577
1578 if (kci.modules_map.addr) {
1579 modules_offset = offset + kci.kernel_map.len;
1580 if (kcore__add_phdr(&extract, idx, modules_offset,
1581 kci.modules_map.addr, kci.modules_map.len))
1582 goto out_extract_close;
1583 }
1584
1585 sz = kcore__write(&extract);
1586 if (sz < 0 || sz > offset)
1587 goto out_extract_close;
1588
1589 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1590 kci.kernel_map.len))
1591 goto out_extract_close;
1592
1593 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1594 extract.fd, modules_offset,
1595 kci.modules_map.len))
1596 goto out_extract_close;
1597
1598 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1599 goto out_extract_close;
1600
1601 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1602 goto out_extract_close;
1603
1604 err = 0;
1605
1606out_extract_close:
1607 kcore__close(&extract);
1608 if (err)
1609 unlink(extract_filename);
1610out_kcore_close:
1611 kcore__close(&kcore);
1612out_unlink_modules:
1613 if (err)
1614 kcore_copy__unlink(to_dir, "modules");
1615out_unlink_kallsyms:
1616 if (err)
1617 kcore_copy__unlink(to_dir, "kallsyms");
1618
1619 return err;
1620}
1621
Adrian Hunterafba19d2013-10-09 15:01:12 +03001622int kcore_extract__create(struct kcore_extract *kce)
1623{
1624 struct kcore kcore;
1625 struct kcore extract;
1626 size_t count = 1;
1627 int idx = 0, err = -1;
1628 off_t offset = page_size, sz;
1629
1630 if (kcore__open(&kcore, kce->kcore_filename))
1631 return -1;
1632
1633 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1634 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1635 goto out_kcore_close;
1636
1637 if (kcore__copy_hdr(&kcore, &extract, count))
1638 goto out_extract_close;
1639
1640 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1641 goto out_extract_close;
1642
1643 sz = kcore__write(&extract);
1644 if (sz < 0 || sz > offset)
1645 goto out_extract_close;
1646
1647 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1648 goto out_extract_close;
1649
1650 err = 0;
1651
1652out_extract_close:
1653 kcore__close(&extract);
1654 if (err)
1655 unlink(kce->extract_filename);
1656out_kcore_close:
1657 kcore__close(&kcore);
1658
1659 return err;
1660}
1661
1662void kcore_extract__delete(struct kcore_extract *kce)
1663{
1664 unlink(kce->extract_filename);
1665}
1666
Namhyung Kime5a18452012-08-06 13:41:20 +09001667void symbol__elf_init(void)
1668{
1669 elf_version(EV_CURRENT);
1670}