blob: 8ac4a4fe2abd6e5996d61b9d94041bb6c85bb935 [file] [log] [blame]
Namhyung Kime5a18452012-08-06 13:41:20 +09001#include <fcntl.h>
2#include <stdio.h>
3#include <errno.h>
4#include <string.h>
5#include <unistd.h>
6#include <inttypes.h>
7
8#include "symbol.h"
Arnaldo Carvalho de Meloc506c962013-12-11 09:15:00 -03009#include <symbol/kallsyms.h>
Namhyung Kime5a18452012-08-06 13:41:20 +090010#include "debug.h"
11
Ingo Molnar89fe8082013-09-30 12:07:11 +020012#ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
Adrian Huntere955d5c2013-09-13 16:49:30 +030013static int elf_getphdrnum(Elf *elf, size_t *dst)
14{
15 GElf_Ehdr gehdr;
16 GElf_Ehdr *ehdr;
17
18 ehdr = gelf_getehdr(elf, &gehdr);
19 if (!ehdr)
20 return -1;
21
22 *dst = ehdr->e_phnum;
23
24 return 0;
25}
26#endif
27
Namhyung Kime5a18452012-08-06 13:41:20 +090028#ifndef NT_GNU_BUILD_ID
29#define NT_GNU_BUILD_ID 3
30#endif
31
32/**
33 * elf_symtab__for_each_symbol - iterate thru all the symbols
34 *
35 * @syms: struct elf_symtab instance to iterate
36 * @idx: uint32_t idx
37 * @sym: GElf_Sym iterator
38 */
39#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
40 for (idx = 0, gelf_getsym(syms, idx, &sym);\
41 idx < nr_syms; \
42 idx++, gelf_getsym(syms, idx, &sym))
43
44static inline uint8_t elf_sym__type(const GElf_Sym *sym)
45{
46 return GELF_ST_TYPE(sym->st_info);
47}
48
49static inline int elf_sym__is_function(const GElf_Sym *sym)
50{
51 return elf_sym__type(sym) == STT_FUNC &&
52 sym->st_name != 0 &&
53 sym->st_shndx != SHN_UNDEF;
54}
55
56static inline bool elf_sym__is_object(const GElf_Sym *sym)
57{
58 return elf_sym__type(sym) == STT_OBJECT &&
59 sym->st_name != 0 &&
60 sym->st_shndx != SHN_UNDEF;
61}
62
63static inline int elf_sym__is_label(const GElf_Sym *sym)
64{
65 return elf_sym__type(sym) == STT_NOTYPE &&
66 sym->st_name != 0 &&
67 sym->st_shndx != SHN_UNDEF &&
68 sym->st_shndx != SHN_ABS;
69}
70
71static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
72{
73 switch (type) {
74 case MAP__FUNCTION:
75 return elf_sym__is_function(sym);
76 case MAP__VARIABLE:
77 return elf_sym__is_object(sym);
78 default:
79 return false;
80 }
81}
82
83static inline const char *elf_sym__name(const GElf_Sym *sym,
84 const Elf_Data *symstrs)
85{
86 return symstrs->d_buf + sym->st_name;
87}
88
89static inline const char *elf_sec__name(const GElf_Shdr *shdr,
90 const Elf_Data *secstrs)
91{
92 return secstrs->d_buf + shdr->sh_name;
93}
94
95static inline int elf_sec__is_text(const GElf_Shdr *shdr,
96 const Elf_Data *secstrs)
97{
98 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
99}
100
101static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
102 const Elf_Data *secstrs)
103{
104 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
105}
106
107static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
108 enum map_type type)
109{
110 switch (type) {
111 case MAP__FUNCTION:
112 return elf_sec__is_text(shdr, secstrs);
113 case MAP__VARIABLE:
114 return elf_sec__is_data(shdr, secstrs);
115 default:
116 return false;
117 }
118}
119
120static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
121{
122 Elf_Scn *sec = NULL;
123 GElf_Shdr shdr;
124 size_t cnt = 1;
125
126 while ((sec = elf_nextscn(elf, sec)) != NULL) {
127 gelf_getshdr(sec, &shdr);
128
129 if ((addr >= shdr.sh_addr) &&
130 (addr < (shdr.sh_addr + shdr.sh_size)))
131 return cnt;
132
133 ++cnt;
134 }
135
136 return -1;
137}
138
Masami Hiramatsu99ca4232014-01-16 09:39:49 +0000139Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
140 GElf_Shdr *shp, const char *name, size_t *idx)
Namhyung Kime5a18452012-08-06 13:41:20 +0900141{
142 Elf_Scn *sec = NULL;
143 size_t cnt = 1;
144
Cody P Schafer49274652012-08-10 15:22:55 -0700145 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
146 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
147 return NULL;
148
Namhyung Kime5a18452012-08-06 13:41:20 +0900149 while ((sec = elf_nextscn(elf, sec)) != NULL) {
150 char *str;
151
152 gelf_getshdr(sec, shp);
153 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
154 if (!strcmp(name, str)) {
155 if (idx)
156 *idx = cnt;
157 break;
158 }
159 ++cnt;
160 }
161
162 return sec;
163}
164
165#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
166 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
167 idx < nr_entries; \
168 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
169
170#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
171 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
172 idx < nr_entries; \
173 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
174
175/*
176 * We need to check if we have a .dynsym, so that we can handle the
177 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
178 * .dynsym or .symtab).
179 * And always look at the original dso, not at debuginfo packages, that
180 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
181 */
Cody P Schafera44f6052012-08-10 15:22:59 -0700182int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
Namhyung Kime5a18452012-08-06 13:41:20 +0900183 symbol_filter_t filter)
184{
185 uint32_t nr_rel_entries, idx;
186 GElf_Sym sym;
187 u64 plt_offset;
188 GElf_Shdr shdr_plt;
189 struct symbol *f;
190 GElf_Shdr shdr_rel_plt, shdr_dynsym;
191 Elf_Data *reldata, *syms, *symstrs;
192 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
193 size_t dynsym_idx;
194 GElf_Ehdr ehdr;
195 char sympltname[1024];
196 Elf *elf;
Cody P Schafera44f6052012-08-10 15:22:59 -0700197 int nr = 0, symidx, err = 0;
Namhyung Kime5a18452012-08-06 13:41:20 +0900198
David Ahernf47b58b2012-08-19 09:47:14 -0600199 if (!ss->dynsym)
200 return 0;
201
Cody P Schafera44f6052012-08-10 15:22:59 -0700202 elf = ss->elf;
203 ehdr = ss->ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900204
Cody P Schafera44f6052012-08-10 15:22:59 -0700205 scn_dynsym = ss->dynsym;
206 shdr_dynsym = ss->dynshdr;
207 dynsym_idx = ss->dynsym_idx;
Namhyung Kime5a18452012-08-06 13:41:20 +0900208
Namhyung Kime5a18452012-08-06 13:41:20 +0900209 if (scn_dynsym == NULL)
210 goto out_elf_end;
211
212 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
213 ".rela.plt", NULL);
214 if (scn_plt_rel == NULL) {
215 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
216 ".rel.plt", NULL);
217 if (scn_plt_rel == NULL)
218 goto out_elf_end;
219 }
220
221 err = -1;
222
223 if (shdr_rel_plt.sh_link != dynsym_idx)
224 goto out_elf_end;
225
226 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
227 goto out_elf_end;
228
229 /*
230 * Fetch the relocation section to find the idxes to the GOT
231 * and the symbols in the .dynsym they refer to.
232 */
233 reldata = elf_getdata(scn_plt_rel, NULL);
234 if (reldata == NULL)
235 goto out_elf_end;
236
237 syms = elf_getdata(scn_dynsym, NULL);
238 if (syms == NULL)
239 goto out_elf_end;
240
241 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
242 if (scn_symstrs == NULL)
243 goto out_elf_end;
244
245 symstrs = elf_getdata(scn_symstrs, NULL);
246 if (symstrs == NULL)
247 goto out_elf_end;
248
Cody P Schafer52f9ddb2012-08-10 15:22:51 -0700249 if (symstrs->d_size == 0)
250 goto out_elf_end;
251
Namhyung Kime5a18452012-08-06 13:41:20 +0900252 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
253 plt_offset = shdr_plt.sh_offset;
254
255 if (shdr_rel_plt.sh_type == SHT_RELA) {
256 GElf_Rela pos_mem, *pos;
257
258 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
259 nr_rel_entries) {
260 symidx = GELF_R_SYM(pos->r_info);
261 plt_offset += shdr_plt.sh_entsize;
262 gelf_getsym(syms, symidx, &sym);
263 snprintf(sympltname, sizeof(sympltname),
264 "%s@plt", elf_sym__name(&sym, symstrs));
265
266 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
267 STB_GLOBAL, sympltname);
268 if (!f)
269 goto out_elf_end;
270
271 if (filter && filter(map, f))
272 symbol__delete(f);
273 else {
274 symbols__insert(&dso->symbols[map->type], f);
275 ++nr;
276 }
277 }
278 } else if (shdr_rel_plt.sh_type == SHT_REL) {
279 GElf_Rel pos_mem, *pos;
280 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
281 nr_rel_entries) {
282 symidx = GELF_R_SYM(pos->r_info);
283 plt_offset += shdr_plt.sh_entsize;
284 gelf_getsym(syms, symidx, &sym);
285 snprintf(sympltname, sizeof(sympltname),
286 "%s@plt", elf_sym__name(&sym, symstrs));
287
288 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
289 STB_GLOBAL, sympltname);
290 if (!f)
291 goto out_elf_end;
292
293 if (filter && filter(map, f))
294 symbol__delete(f);
295 else {
296 symbols__insert(&dso->symbols[map->type], f);
297 ++nr;
298 }
299 }
300 }
301
302 err = 0;
303out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900304 if (err == 0)
305 return nr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900306 pr_debug("%s: problems reading %s PLT info.\n",
307 __func__, dso->long_name);
308 return 0;
309}
310
311/*
312 * Align offset to 4 bytes as needed for note name and descriptor data.
313 */
314#define NOTE_ALIGN(n) (((n) + 3) & -4U)
315
316static int elf_read_build_id(Elf *elf, void *bf, size_t size)
317{
318 int err = -1;
319 GElf_Ehdr ehdr;
320 GElf_Shdr shdr;
321 Elf_Data *data;
322 Elf_Scn *sec;
323 Elf_Kind ek;
324 void *ptr;
325
326 if (size < BUILD_ID_SIZE)
327 goto out;
328
329 ek = elf_kind(elf);
330 if (ek != ELF_K_ELF)
331 goto out;
332
333 if (gelf_getehdr(elf, &ehdr) == NULL) {
334 pr_err("%s: cannot get elf header.\n", __func__);
335 goto out;
336 }
337
338 /*
339 * Check following sections for notes:
340 * '.note.gnu.build-id'
341 * '.notes'
342 * '.note' (VDSO specific)
343 */
344 do {
345 sec = elf_section_by_name(elf, &ehdr, &shdr,
346 ".note.gnu.build-id", NULL);
347 if (sec)
348 break;
349
350 sec = elf_section_by_name(elf, &ehdr, &shdr,
351 ".notes", NULL);
352 if (sec)
353 break;
354
355 sec = elf_section_by_name(elf, &ehdr, &shdr,
356 ".note", NULL);
357 if (sec)
358 break;
359
360 return err;
361
362 } while (0);
363
364 data = elf_getdata(sec, NULL);
365 if (data == NULL)
366 goto out;
367
368 ptr = data->d_buf;
369 while (ptr < (data->d_buf + data->d_size)) {
370 GElf_Nhdr *nhdr = ptr;
371 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
372 descsz = NOTE_ALIGN(nhdr->n_descsz);
373 const char *name;
374
375 ptr += sizeof(*nhdr);
376 name = ptr;
377 ptr += namesz;
378 if (nhdr->n_type == NT_GNU_BUILD_ID &&
379 nhdr->n_namesz == sizeof("GNU")) {
380 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
381 size_t sz = min(size, descsz);
382 memcpy(bf, ptr, sz);
383 memset(bf + sz, 0, size - sz);
384 err = descsz;
385 break;
386 }
387 }
388 ptr += descsz;
389 }
390
391out:
392 return err;
393}
394
395int filename__read_build_id(const char *filename, void *bf, size_t size)
396{
397 int fd, err = -1;
398 Elf *elf;
399
400 if (size < BUILD_ID_SIZE)
401 goto out;
402
403 fd = open(filename, O_RDONLY);
404 if (fd < 0)
405 goto out;
406
407 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
408 if (elf == NULL) {
409 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
410 goto out_close;
411 }
412
413 err = elf_read_build_id(elf, bf, size);
414
415 elf_end(elf);
416out_close:
417 close(fd);
418out:
419 return err;
420}
421
422int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
423{
424 int fd, err = -1;
425
426 if (size < BUILD_ID_SIZE)
427 goto out;
428
429 fd = open(filename, O_RDONLY);
430 if (fd < 0)
431 goto out;
432
433 while (1) {
434 char bf[BUFSIZ];
435 GElf_Nhdr nhdr;
436 size_t namesz, descsz;
437
438 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
439 break;
440
441 namesz = NOTE_ALIGN(nhdr.n_namesz);
442 descsz = NOTE_ALIGN(nhdr.n_descsz);
443 if (nhdr.n_type == NT_GNU_BUILD_ID &&
444 nhdr.n_namesz == sizeof("GNU")) {
445 if (read(fd, bf, namesz) != (ssize_t)namesz)
446 break;
447 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
448 size_t sz = min(descsz, size);
449 if (read(fd, build_id, sz) == (ssize_t)sz) {
450 memset(build_id + sz, 0, size - sz);
451 err = 0;
452 break;
453 }
454 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
455 break;
456 } else {
457 int n = namesz + descsz;
458 if (read(fd, bf, n) != n)
459 break;
460 }
461 }
462 close(fd);
463out:
464 return err;
465}
466
467int filename__read_debuglink(const char *filename, char *debuglink,
468 size_t size)
469{
470 int fd, err = -1;
471 Elf *elf;
472 GElf_Ehdr ehdr;
473 GElf_Shdr shdr;
474 Elf_Data *data;
475 Elf_Scn *sec;
476 Elf_Kind ek;
477
478 fd = open(filename, O_RDONLY);
479 if (fd < 0)
480 goto out;
481
482 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
483 if (elf == NULL) {
484 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
485 goto out_close;
486 }
487
488 ek = elf_kind(elf);
489 if (ek != ELF_K_ELF)
Chenggang Qin784f3392013-10-11 08:27:57 +0800490 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900491
492 if (gelf_getehdr(elf, &ehdr) == NULL) {
493 pr_err("%s: cannot get elf header.\n", __func__);
Chenggang Qin784f3392013-10-11 08:27:57 +0800494 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900495 }
496
497 sec = elf_section_by_name(elf, &ehdr, &shdr,
498 ".gnu_debuglink", NULL);
499 if (sec == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800500 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900501
502 data = elf_getdata(sec, NULL);
503 if (data == NULL)
Chenggang Qin784f3392013-10-11 08:27:57 +0800504 goto out_elf_end;
Namhyung Kime5a18452012-08-06 13:41:20 +0900505
506 /* the start of this section is a zero-terminated string */
507 strncpy(debuglink, data->d_buf, size);
508
Stephane Eranian0d3dc5e2014-02-20 10:32:55 +0900509 err = 0;
510
Chenggang Qin784f3392013-10-11 08:27:57 +0800511out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900512 elf_end(elf);
Namhyung Kime5a18452012-08-06 13:41:20 +0900513out_close:
514 close(fd);
515out:
516 return err;
517}
518
519static int dso__swap_init(struct dso *dso, unsigned char eidata)
520{
521 static unsigned int const endian = 1;
522
523 dso->needs_swap = DSO_SWAP__NO;
524
525 switch (eidata) {
526 case ELFDATA2LSB:
527 /* We are big endian, DSO is little endian. */
528 if (*(unsigned char const *)&endian != 1)
529 dso->needs_swap = DSO_SWAP__YES;
530 break;
531
532 case ELFDATA2MSB:
533 /* We are little endian, DSO is big endian. */
534 if (*(unsigned char const *)&endian != 0)
535 dso->needs_swap = DSO_SWAP__YES;
536 break;
537
538 default:
539 pr_err("unrecognized DSO data encoding %d\n", eidata);
540 return -EINVAL;
541 }
542
543 return 0;
544}
545
Cody P Schafer3aafe5a2012-08-10 15:23:02 -0700546bool symsrc__possibly_runtime(struct symsrc *ss)
547{
548 return ss->dynsym || ss->opdsec;
549}
550
Cody P Schaferd26cd122012-08-10 15:23:00 -0700551bool symsrc__has_symtab(struct symsrc *ss)
552{
553 return ss->symtab != NULL;
554}
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700555
556void symsrc__destroy(struct symsrc *ss)
Namhyung Kime5a18452012-08-06 13:41:20 +0900557{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300558 zfree(&ss->name);
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700559 elf_end(ss->elf);
560 close(ss->fd);
561}
562
563int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
564 enum dso_binary_type type)
565{
Namhyung Kime5a18452012-08-06 13:41:20 +0900566 int err = -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900567 GElf_Ehdr ehdr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900568 Elf *elf;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700569 int fd;
570
571 fd = open(name, O_RDONLY);
572 if (fd < 0)
573 return -1;
Namhyung Kime5a18452012-08-06 13:41:20 +0900574
575 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
576 if (elf == NULL) {
577 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
578 goto out_close;
579 }
580
581 if (gelf_getehdr(elf, &ehdr) == NULL) {
582 pr_debug("%s: cannot get elf header.\n", __func__);
583 goto out_elf_end;
584 }
585
586 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
587 goto out_elf_end;
588
589 /* Always reject images with a mismatched build-id: */
590 if (dso->has_build_id) {
591 u8 build_id[BUILD_ID_SIZE];
592
593 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
594 goto out_elf_end;
595
596 if (!dso__build_id_equal(dso, build_id))
597 goto out_elf_end;
598 }
599
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700600 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
601 NULL);
602 if (ss->symshdr.sh_type != SHT_SYMTAB)
603 ss->symtab = NULL;
604
605 ss->dynsym_idx = 0;
606 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
607 &ss->dynsym_idx);
608 if (ss->dynshdr.sh_type != SHT_DYNSYM)
609 ss->dynsym = NULL;
610
611 ss->opdidx = 0;
612 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
613 &ss->opdidx);
614 if (ss->opdshdr.sh_type != SHT_PROGBITS)
615 ss->opdsec = NULL;
616
617 if (dso->kernel == DSO_TYPE_USER) {
618 GElf_Shdr shdr;
619 ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300620 ehdr.e_type == ET_REL ||
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700621 elf_section_by_name(elf, &ehdr, &shdr,
622 ".gnu.prelink_undo",
623 NULL) != NULL);
624 } else {
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300625 ss->adjust_symbols = ehdr.e_type == ET_EXEC ||
626 ehdr.e_type == ET_REL;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700627 }
628
629 ss->name = strdup(name);
630 if (!ss->name)
631 goto out_elf_end;
632
633 ss->elf = elf;
634 ss->fd = fd;
635 ss->ehdr = ehdr;
636 ss->type = type;
637
638 return 0;
639
640out_elf_end:
641 elf_end(elf);
642out_close:
643 close(fd);
644 return err;
645}
646
Adrian Hunter39b12f782013-08-07 14:38:47 +0300647/**
648 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
649 * @kmap: kernel maps and relocation reference symbol
650 *
651 * This function returns %true if we are dealing with the kernel maps and the
652 * relocation reference symbol has not yet been found. Otherwise %false is
653 * returned.
654 */
655static bool ref_reloc_sym_not_found(struct kmap *kmap)
656{
657 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
658 !kmap->ref_reloc_sym->unrelocated_addr;
659}
660
661/**
662 * ref_reloc - kernel relocation offset.
663 * @kmap: kernel maps and relocation reference symbol
664 *
665 * This function returns the offset of kernel addresses as determined by using
666 * the relocation reference symbol i.e. if the kernel has not been relocated
667 * then the return value is zero.
668 */
669static u64 ref_reloc(struct kmap *kmap)
670{
671 if (kmap && kmap->ref_reloc_sym &&
672 kmap->ref_reloc_sym->unrelocated_addr)
673 return kmap->ref_reloc_sym->addr -
674 kmap->ref_reloc_sym->unrelocated_addr;
675 return 0;
676}
677
Cody P Schafer261360b2012-08-10 15:23:01 -0700678int dso__load_sym(struct dso *dso, struct map *map,
679 struct symsrc *syms_ss, struct symsrc *runtime_ss,
Cody P Schaferd26cd122012-08-10 15:23:00 -0700680 symbol_filter_t filter, int kmodule)
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700681{
682 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
683 struct map *curr_map = map;
684 struct dso *curr_dso = dso;
685 Elf_Data *symstrs, *secstrs;
686 uint32_t nr_syms;
687 int err = -1;
688 uint32_t idx;
689 GElf_Ehdr ehdr;
Cody P Schafer261360b2012-08-10 15:23:01 -0700690 GElf_Shdr shdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700691 Elf_Data *syms, *opddata = NULL;
692 GElf_Sym sym;
Cody P Schafer261360b2012-08-10 15:23:01 -0700693 Elf_Scn *sec, *sec_strndx;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700694 Elf *elf;
695 int nr = 0;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300696 bool remap_kernel = false, adjust_kernel_syms = false;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700697
Cody P Schafer261360b2012-08-10 15:23:01 -0700698 dso->symtab_type = syms_ss->type;
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300699 dso->rel = syms_ss->ehdr.e_type == ET_REL;
700
701 /*
702 * Modules may already have symbols from kallsyms, but those symbols
703 * have the wrong values for the dso maps, so remove them.
704 */
705 if (kmodule && syms_ss->symtab)
706 symbols__delete(&dso->symbols[map->type]);
Cody P Schafer005f9292012-08-10 15:22:58 -0700707
Cody P Schafer261360b2012-08-10 15:23:01 -0700708 if (!syms_ss->symtab) {
709 syms_ss->symtab = syms_ss->dynsym;
710 syms_ss->symshdr = syms_ss->dynshdr;
Cody P Schaferd26cd122012-08-10 15:23:00 -0700711 }
712
Cody P Schafer261360b2012-08-10 15:23:01 -0700713 elf = syms_ss->elf;
714 ehdr = syms_ss->ehdr;
715 sec = syms_ss->symtab;
716 shdr = syms_ss->symshdr;
Cody P Schaferb68e2f92012-08-10 15:22:57 -0700717
Cody P Schafer261360b2012-08-10 15:23:01 -0700718 if (runtime_ss->opdsec)
719 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
Namhyung Kime5a18452012-08-06 13:41:20 +0900720
721 syms = elf_getdata(sec, NULL);
722 if (syms == NULL)
723 goto out_elf_end;
724
725 sec = elf_getscn(elf, shdr.sh_link);
726 if (sec == NULL)
727 goto out_elf_end;
728
729 symstrs = elf_getdata(sec, NULL);
730 if (symstrs == NULL)
731 goto out_elf_end;
732
733 sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
734 if (sec_strndx == NULL)
735 goto out_elf_end;
736
737 secstrs = elf_getdata(sec_strndx, NULL);
738 if (secstrs == NULL)
739 goto out_elf_end;
740
741 nr_syms = shdr.sh_size / shdr.sh_entsize;
742
743 memset(&sym, 0, sizeof(sym));
Adrian Hunter39b12f782013-08-07 14:38:47 +0300744
745 /*
746 * The kernel relocation symbol is needed in advance in order to adjust
747 * kernel maps correctly.
748 */
749 if (ref_reloc_sym_not_found(kmap)) {
750 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
751 const char *elf_name = elf_sym__name(&sym, symstrs);
752
753 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
754 continue;
755 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
Adrian Hunter91767532014-01-29 16:14:36 +0200756 map->reloc = kmap->ref_reloc_sym->addr -
757 kmap->ref_reloc_sym->unrelocated_addr;
Adrian Hunter39b12f782013-08-07 14:38:47 +0300758 break;
759 }
760 }
761
762 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
763 /*
764 * Initial kernel and module mappings do not map to the dso. For
765 * function mappings, flag the fixups.
766 */
767 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
768 remap_kernel = true;
769 adjust_kernel_syms = dso->adjust_symbols;
770 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900771 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
772 struct symbol *f;
773 const char *elf_name = elf_sym__name(&sym, symstrs);
774 char *demangled = NULL;
775 int is_label = elf_sym__is_label(&sym);
776 const char *section_name;
Cody P Schafer261360b2012-08-10 15:23:01 -0700777 bool used_opd = false;
Namhyung Kime5a18452012-08-06 13:41:20 +0900778
Namhyung Kime5a18452012-08-06 13:41:20 +0900779 if (!is_label && !elf_sym__is_a(&sym, map->type))
780 continue;
781
782 /* Reject ARM ELF "mapping symbols": these aren't unique and
783 * don't identify functions, so will confuse the profile
784 * output: */
785 if (ehdr.e_machine == EM_ARM) {
786 if (!strcmp(elf_name, "$a") ||
787 !strcmp(elf_name, "$d") ||
788 !strcmp(elf_name, "$t"))
789 continue;
790 }
791
Cody P Schafer261360b2012-08-10 15:23:01 -0700792 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
793 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
Namhyung Kime5a18452012-08-06 13:41:20 +0900794 u64 *opd = opddata->d_buf + offset;
795 sym.st_value = DSO__SWAP(dso, u64, *opd);
Cody P Schafer261360b2012-08-10 15:23:01 -0700796 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
797 sym.st_value);
798 used_opd = true;
Namhyung Kime5a18452012-08-06 13:41:20 +0900799 }
Namhyung Kim3843b052012-11-21 13:49:44 +0100800 /*
801 * When loading symbols in a data mapping, ABS symbols (which
802 * has a value of SHN_ABS in its st_shndx) failed at
803 * elf_getscn(). And it marks the loading as a failure so
804 * already loaded symbols cannot be fixed up.
805 *
806 * I'm not sure what should be done. Just ignore them for now.
807 * - Namhyung Kim
808 */
809 if (sym.st_shndx == SHN_ABS)
810 continue;
Namhyung Kime5a18452012-08-06 13:41:20 +0900811
Cody P Schafer261360b2012-08-10 15:23:01 -0700812 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
Namhyung Kime5a18452012-08-06 13:41:20 +0900813 if (!sec)
814 goto out_elf_end;
815
816 gelf_getshdr(sec, &shdr);
817
818 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
819 continue;
820
821 section_name = elf_sec__name(&shdr, secstrs);
822
823 /* On ARM, symbols for thumb functions have 1 added to
824 * the symbol address as a flag - remove it */
825 if ((ehdr.e_machine == EM_ARM) &&
826 (map->type == MAP__FUNCTION) &&
827 (sym.st_value & 1))
828 --sym.st_value;
829
Adrian Hunter39b12f782013-08-07 14:38:47 +0300830 if (dso->kernel || kmodule) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900831 char dso_name[PATH_MAX];
832
Adrian Hunter39b12f782013-08-07 14:38:47 +0300833 /* Adjust symbol to map to file offset */
834 if (adjust_kernel_syms)
835 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
836
Namhyung Kime5a18452012-08-06 13:41:20 +0900837 if (strcmp(section_name,
838 (curr_dso->short_name +
839 dso->short_name_len)) == 0)
840 goto new_symbol;
841
842 if (strcmp(section_name, ".text") == 0) {
Adrian Hunter39b12f782013-08-07 14:38:47 +0300843 /*
844 * The initial kernel mapping is based on
845 * kallsyms and identity maps. Overwrite it to
846 * map to the kernel dso.
847 */
848 if (remap_kernel && dso->kernel) {
849 remap_kernel = false;
850 map->start = shdr.sh_addr +
851 ref_reloc(kmap);
852 map->end = map->start + shdr.sh_size;
853 map->pgoff = shdr.sh_offset;
854 map->map_ip = map__map_ip;
855 map->unmap_ip = map__unmap_ip;
856 /* Ensure maps are correctly ordered */
857 map_groups__remove(kmap->kmaps, map);
858 map_groups__insert(kmap->kmaps, map);
859 }
860
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300861 /*
862 * The initial module mapping is based on
863 * /proc/modules mapped to offset zero.
864 * Overwrite it to map to the module dso.
865 */
866 if (remap_kernel && kmodule) {
867 remap_kernel = false;
868 map->pgoff = shdr.sh_offset;
869 }
870
Namhyung Kime5a18452012-08-06 13:41:20 +0900871 curr_map = map;
872 curr_dso = dso;
873 goto new_symbol;
874 }
875
Adrian Hunter0131c4e2013-08-07 14:38:50 +0300876 if (!kmap)
877 goto new_symbol;
878
Namhyung Kime5a18452012-08-06 13:41:20 +0900879 snprintf(dso_name, sizeof(dso_name),
880 "%s%s", dso->short_name, section_name);
881
882 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
883 if (curr_map == NULL) {
884 u64 start = sym.st_value;
885
886 if (kmodule)
887 start += map->start + shdr.sh_offset;
888
889 curr_dso = dso__new(dso_name);
890 if (curr_dso == NULL)
891 goto out_elf_end;
892 curr_dso->kernel = dso->kernel;
893 curr_dso->long_name = dso->long_name;
894 curr_dso->long_name_len = dso->long_name_len;
895 curr_map = map__new2(start, curr_dso,
896 map->type);
897 if (curr_map == NULL) {
898 dso__delete(curr_dso);
899 goto out_elf_end;
900 }
Adrian Hunter39b12f782013-08-07 14:38:47 +0300901 if (adjust_kernel_syms) {
902 curr_map->start = shdr.sh_addr +
903 ref_reloc(kmap);
904 curr_map->end = curr_map->start +
905 shdr.sh_size;
906 curr_map->pgoff = shdr.sh_offset;
907 } else {
908 curr_map->map_ip = identity__map_ip;
909 curr_map->unmap_ip = identity__map_ip;
910 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900911 curr_dso->symtab_type = dso->symtab_type;
912 map_groups__insert(kmap->kmaps, curr_map);
913 dsos__add(&dso->node, curr_dso);
914 dso__set_loaded(curr_dso, map->type);
915 } else
916 curr_dso = curr_map->dso;
917
918 goto new_symbol;
919 }
920
Cody P Schafer261360b2012-08-10 15:23:01 -0700921 if ((used_opd && runtime_ss->adjust_symbols)
922 || (!used_opd && syms_ss->adjust_symbols)) {
Namhyung Kime5a18452012-08-06 13:41:20 +0900923 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
924 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
925 (u64)sym.st_value, (u64)shdr.sh_addr,
926 (u64)shdr.sh_offset);
927 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
928 }
Avi Kivity950b8352014-01-22 21:58:46 +0200929new_symbol:
Namhyung Kime5a18452012-08-06 13:41:20 +0900930 /*
931 * We need to figure out if the object was created from C++ sources
932 * DWARF DW_compile_unit has this, but we don't always have access
933 * to it...
934 */
Namhyung Kim328ccda2013-03-25 18:18:18 +0900935 if (symbol_conf.demangle) {
Ingo Molnar14951f22013-09-29 16:12:54 +0200936 demangled = bfd_demangle(NULL, elf_name,
Namhyung Kim328ccda2013-03-25 18:18:18 +0900937 DMGL_PARAMS | DMGL_ANSI);
938 if (demangled != NULL)
939 elf_name = demangled;
940 }
Namhyung Kime5a18452012-08-06 13:41:20 +0900941 f = symbol__new(sym.st_value, sym.st_size,
942 GELF_ST_BIND(sym.st_info), elf_name);
943 free(demangled);
944 if (!f)
945 goto out_elf_end;
946
947 if (filter && filter(curr_map, f))
948 symbol__delete(f);
949 else {
950 symbols__insert(&curr_dso->symbols[curr_map->type], f);
951 nr++;
952 }
953 }
954
955 /*
956 * For misannotated, zeroed, ASM function sizes.
957 */
958 if (nr > 0) {
959 symbols__fixup_duplicate(&dso->symbols[map->type]);
960 symbols__fixup_end(&dso->symbols[map->type]);
961 if (kmap) {
962 /*
963 * We need to fixup this here too because we create new
964 * maps here, for things like vsyscall sections.
965 */
966 __map_groups__fixup_end(kmap->kmaps, map->type);
967 }
968 }
969 err = nr;
970out_elf_end:
Namhyung Kime5a18452012-08-06 13:41:20 +0900971 return err;
972}
973
Adrian Hunter8e0cf962013-08-07 14:38:51 +0300974static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
975{
976 GElf_Phdr phdr;
977 size_t i, phdrnum;
978 int err;
979 u64 sz;
980
981 if (elf_getphdrnum(elf, &phdrnum))
982 return -1;
983
984 for (i = 0; i < phdrnum; i++) {
985 if (gelf_getphdr(elf, i, &phdr) == NULL)
986 return -1;
987 if (phdr.p_type != PT_LOAD)
988 continue;
989 if (exe) {
990 if (!(phdr.p_flags & PF_X))
991 continue;
992 } else {
993 if (!(phdr.p_flags & PF_R))
994 continue;
995 }
996 sz = min(phdr.p_memsz, phdr.p_filesz);
997 if (!sz)
998 continue;
999 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1000 if (err)
1001 return err;
1002 }
1003 return 0;
1004}
1005
1006int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1007 bool *is_64_bit)
1008{
1009 int err;
1010 Elf *elf;
1011
1012 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1013 if (elf == NULL)
1014 return -1;
1015
1016 if (is_64_bit)
1017 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1018
1019 err = elf_read_maps(elf, exe, mapfn, data);
1020
1021 elf_end(elf);
1022 return err;
1023}
1024
Adrian Hunterafba19d2013-10-09 15:01:12 +03001025static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1026{
1027 ssize_t r;
1028 size_t n;
1029 int err = -1;
1030 char *buf = malloc(page_size);
1031
1032 if (buf == NULL)
1033 return -1;
1034
1035 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1036 goto out;
1037
1038 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1039 goto out;
1040
1041 while (len) {
1042 n = page_size;
1043 if (len < n)
1044 n = len;
1045 /* Use read because mmap won't work on proc files */
1046 r = read(from, buf, n);
1047 if (r < 0)
1048 goto out;
1049 if (!r)
1050 break;
1051 n = r;
1052 r = write(to, buf, n);
1053 if (r < 0)
1054 goto out;
1055 if ((size_t)r != n)
1056 goto out;
1057 len -= n;
1058 }
1059
1060 err = 0;
1061out:
1062 free(buf);
1063 return err;
1064}
1065
1066struct kcore {
1067 int fd;
1068 int elfclass;
1069 Elf *elf;
1070 GElf_Ehdr ehdr;
1071};
1072
1073static int kcore__open(struct kcore *kcore, const char *filename)
1074{
1075 GElf_Ehdr *ehdr;
1076
1077 kcore->fd = open(filename, O_RDONLY);
1078 if (kcore->fd == -1)
1079 return -1;
1080
1081 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1082 if (!kcore->elf)
1083 goto out_close;
1084
1085 kcore->elfclass = gelf_getclass(kcore->elf);
1086 if (kcore->elfclass == ELFCLASSNONE)
1087 goto out_end;
1088
1089 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1090 if (!ehdr)
1091 goto out_end;
1092
1093 return 0;
1094
1095out_end:
1096 elf_end(kcore->elf);
1097out_close:
1098 close(kcore->fd);
1099 return -1;
1100}
1101
1102static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1103 bool temp)
1104{
1105 GElf_Ehdr *ehdr;
1106
1107 kcore->elfclass = elfclass;
1108
1109 if (temp)
1110 kcore->fd = mkstemp(filename);
1111 else
1112 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1113 if (kcore->fd == -1)
1114 return -1;
1115
1116 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1117 if (!kcore->elf)
1118 goto out_close;
1119
1120 if (!gelf_newehdr(kcore->elf, elfclass))
1121 goto out_end;
1122
1123 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1124 if (!ehdr)
1125 goto out_end;
1126
1127 return 0;
1128
1129out_end:
1130 elf_end(kcore->elf);
1131out_close:
1132 close(kcore->fd);
1133 unlink(filename);
1134 return -1;
1135}
1136
1137static void kcore__close(struct kcore *kcore)
1138{
1139 elf_end(kcore->elf);
1140 close(kcore->fd);
1141}
1142
1143static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1144{
1145 GElf_Ehdr *ehdr = &to->ehdr;
1146 GElf_Ehdr *kehdr = &from->ehdr;
1147
1148 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1149 ehdr->e_type = kehdr->e_type;
1150 ehdr->e_machine = kehdr->e_machine;
1151 ehdr->e_version = kehdr->e_version;
1152 ehdr->e_entry = 0;
1153 ehdr->e_shoff = 0;
1154 ehdr->e_flags = kehdr->e_flags;
1155 ehdr->e_phnum = count;
1156 ehdr->e_shentsize = 0;
1157 ehdr->e_shnum = 0;
1158 ehdr->e_shstrndx = 0;
1159
1160 if (from->elfclass == ELFCLASS32) {
1161 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1162 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1163 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1164 } else {
1165 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1166 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1167 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1168 }
1169
1170 if (!gelf_update_ehdr(to->elf, ehdr))
1171 return -1;
1172
1173 if (!gelf_newphdr(to->elf, count))
1174 return -1;
1175
1176 return 0;
1177}
1178
1179static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1180 u64 addr, u64 len)
1181{
1182 GElf_Phdr gphdr;
1183 GElf_Phdr *phdr;
1184
1185 phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
1186 if (!phdr)
1187 return -1;
1188
1189 phdr->p_type = PT_LOAD;
1190 phdr->p_flags = PF_R | PF_W | PF_X;
1191 phdr->p_offset = offset;
1192 phdr->p_vaddr = addr;
1193 phdr->p_paddr = 0;
1194 phdr->p_filesz = len;
1195 phdr->p_memsz = len;
1196 phdr->p_align = page_size;
1197
1198 if (!gelf_update_phdr(kcore->elf, idx, phdr))
1199 return -1;
1200
1201 return 0;
1202}
1203
1204static off_t kcore__write(struct kcore *kcore)
1205{
1206 return elf_update(kcore->elf, ELF_C_WRITE);
1207}
1208
Adrian Hunterfc1b6912013-10-14 16:57:29 +03001209struct phdr_data {
1210 off_t offset;
1211 u64 addr;
1212 u64 len;
1213};
1214
1215struct kcore_copy_info {
1216 u64 stext;
1217 u64 etext;
1218 u64 first_symbol;
1219 u64 last_symbol;
1220 u64 first_module;
1221 u64 last_module_symbol;
1222 struct phdr_data kernel_map;
1223 struct phdr_data modules_map;
1224};
1225
1226static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1227 u64 start)
1228{
1229 struct kcore_copy_info *kci = arg;
1230
1231 if (!symbol_type__is_a(type, MAP__FUNCTION))
1232 return 0;
1233
1234 if (strchr(name, '[')) {
1235 if (start > kci->last_module_symbol)
1236 kci->last_module_symbol = start;
1237 return 0;
1238 }
1239
1240 if (!kci->first_symbol || start < kci->first_symbol)
1241 kci->first_symbol = start;
1242
1243 if (!kci->last_symbol || start > kci->last_symbol)
1244 kci->last_symbol = start;
1245
1246 if (!strcmp(name, "_stext")) {
1247 kci->stext = start;
1248 return 0;
1249 }
1250
1251 if (!strcmp(name, "_etext")) {
1252 kci->etext = start;
1253 return 0;
1254 }
1255
1256 return 0;
1257}
1258
1259static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1260 const char *dir)
1261{
1262 char kallsyms_filename[PATH_MAX];
1263
1264 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1265
1266 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1267 return -1;
1268
1269 if (kallsyms__parse(kallsyms_filename, kci,
1270 kcore_copy__process_kallsyms) < 0)
1271 return -1;
1272
1273 return 0;
1274}
1275
1276static int kcore_copy__process_modules(void *arg,
1277 const char *name __maybe_unused,
1278 u64 start)
1279{
1280 struct kcore_copy_info *kci = arg;
1281
1282 if (!kci->first_module || start < kci->first_module)
1283 kci->first_module = start;
1284
1285 return 0;
1286}
1287
1288static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1289 const char *dir)
1290{
1291 char modules_filename[PATH_MAX];
1292
1293 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1294
1295 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1296 return -1;
1297
1298 if (modules__parse(modules_filename, kci,
1299 kcore_copy__process_modules) < 0)
1300 return -1;
1301
1302 return 0;
1303}
1304
1305static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1306 u64 s, u64 e)
1307{
1308 if (p->addr || s < start || s >= end)
1309 return;
1310
1311 p->addr = s;
1312 p->offset = (s - start) + pgoff;
1313 p->len = e < end ? e - s : end - s;
1314}
1315
1316static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1317{
1318 struct kcore_copy_info *kci = data;
1319 u64 end = start + len;
1320
1321 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1322 kci->etext);
1323
1324 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1325 kci->last_module_symbol);
1326
1327 return 0;
1328}
1329
1330static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1331{
1332 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1333 return -1;
1334
1335 return 0;
1336}
1337
1338static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1339 Elf *elf)
1340{
1341 if (kcore_copy__parse_kallsyms(kci, dir))
1342 return -1;
1343
1344 if (kcore_copy__parse_modules(kci, dir))
1345 return -1;
1346
1347 if (kci->stext)
1348 kci->stext = round_down(kci->stext, page_size);
1349 else
1350 kci->stext = round_down(kci->first_symbol, page_size);
1351
1352 if (kci->etext) {
1353 kci->etext = round_up(kci->etext, page_size);
1354 } else if (kci->last_symbol) {
1355 kci->etext = round_up(kci->last_symbol, page_size);
1356 kci->etext += page_size;
1357 }
1358
1359 kci->first_module = round_down(kci->first_module, page_size);
1360
1361 if (kci->last_module_symbol) {
1362 kci->last_module_symbol = round_up(kci->last_module_symbol,
1363 page_size);
1364 kci->last_module_symbol += page_size;
1365 }
1366
1367 if (!kci->stext || !kci->etext)
1368 return -1;
1369
1370 if (kci->first_module && !kci->last_module_symbol)
1371 return -1;
1372
1373 return kcore_copy__read_maps(kci, elf);
1374}
1375
1376static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1377 const char *name)
1378{
1379 char from_filename[PATH_MAX];
1380 char to_filename[PATH_MAX];
1381
1382 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1383 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1384
1385 return copyfile_mode(from_filename, to_filename, 0400);
1386}
1387
1388static int kcore_copy__unlink(const char *dir, const char *name)
1389{
1390 char filename[PATH_MAX];
1391
1392 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1393
1394 return unlink(filename);
1395}
1396
1397static int kcore_copy__compare_fds(int from, int to)
1398{
1399 char *buf_from;
1400 char *buf_to;
1401 ssize_t ret;
1402 size_t len;
1403 int err = -1;
1404
1405 buf_from = malloc(page_size);
1406 buf_to = malloc(page_size);
1407 if (!buf_from || !buf_to)
1408 goto out;
1409
1410 while (1) {
1411 /* Use read because mmap won't work on proc files */
1412 ret = read(from, buf_from, page_size);
1413 if (ret < 0)
1414 goto out;
1415
1416 if (!ret)
1417 break;
1418
1419 len = ret;
1420
1421 if (readn(to, buf_to, len) != (int)len)
1422 goto out;
1423
1424 if (memcmp(buf_from, buf_to, len))
1425 goto out;
1426 }
1427
1428 err = 0;
1429out:
1430 free(buf_to);
1431 free(buf_from);
1432 return err;
1433}
1434
1435static int kcore_copy__compare_files(const char *from_filename,
1436 const char *to_filename)
1437{
1438 int from, to, err = -1;
1439
1440 from = open(from_filename, O_RDONLY);
1441 if (from < 0)
1442 return -1;
1443
1444 to = open(to_filename, O_RDONLY);
1445 if (to < 0)
1446 goto out_close_from;
1447
1448 err = kcore_copy__compare_fds(from, to);
1449
1450 close(to);
1451out_close_from:
1452 close(from);
1453 return err;
1454}
1455
1456static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1457 const char *name)
1458{
1459 char from_filename[PATH_MAX];
1460 char to_filename[PATH_MAX];
1461
1462 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1463 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1464
1465 return kcore_copy__compare_files(from_filename, to_filename);
1466}
1467
1468/**
1469 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1470 * @from_dir: from directory
1471 * @to_dir: to directory
1472 *
1473 * This function copies kallsyms, modules and kcore files from one directory to
1474 * another. kallsyms and modules are copied entirely. Only code segments are
1475 * copied from kcore. It is assumed that two segments suffice: one for the
1476 * kernel proper and one for all the modules. The code segments are determined
1477 * from kallsyms and modules files. The kernel map starts at _stext or the
1478 * lowest function symbol, and ends at _etext or the highest function symbol.
1479 * The module map starts at the lowest module address and ends at the highest
1480 * module symbol. Start addresses are rounded down to the nearest page. End
1481 * addresses are rounded up to the nearest page. An extra page is added to the
1482 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1483 * symbol too. Because it contains only code sections, the resulting kcore is
1484 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1485 * is not the same for the kernel map and the modules map. That happens because
1486 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1487 * kallsyms and modules files are compared with their copies to check that
1488 * modules have not been loaded or unloaded while the copies were taking place.
1489 *
1490 * Return: %0 on success, %-1 on failure.
1491 */
1492int kcore_copy(const char *from_dir, const char *to_dir)
1493{
1494 struct kcore kcore;
1495 struct kcore extract;
1496 size_t count = 2;
1497 int idx = 0, err = -1;
1498 off_t offset = page_size, sz, modules_offset = 0;
1499 struct kcore_copy_info kci = { .stext = 0, };
1500 char kcore_filename[PATH_MAX];
1501 char extract_filename[PATH_MAX];
1502
1503 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1504 return -1;
1505
1506 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1507 goto out_unlink_kallsyms;
1508
1509 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1510 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1511
1512 if (kcore__open(&kcore, kcore_filename))
1513 goto out_unlink_modules;
1514
1515 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1516 goto out_kcore_close;
1517
1518 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1519 goto out_kcore_close;
1520
1521 if (!kci.modules_map.addr)
1522 count -= 1;
1523
1524 if (kcore__copy_hdr(&kcore, &extract, count))
1525 goto out_extract_close;
1526
1527 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1528 kci.kernel_map.len))
1529 goto out_extract_close;
1530
1531 if (kci.modules_map.addr) {
1532 modules_offset = offset + kci.kernel_map.len;
1533 if (kcore__add_phdr(&extract, idx, modules_offset,
1534 kci.modules_map.addr, kci.modules_map.len))
1535 goto out_extract_close;
1536 }
1537
1538 sz = kcore__write(&extract);
1539 if (sz < 0 || sz > offset)
1540 goto out_extract_close;
1541
1542 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1543 kci.kernel_map.len))
1544 goto out_extract_close;
1545
1546 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1547 extract.fd, modules_offset,
1548 kci.modules_map.len))
1549 goto out_extract_close;
1550
1551 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1552 goto out_extract_close;
1553
1554 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1555 goto out_extract_close;
1556
1557 err = 0;
1558
1559out_extract_close:
1560 kcore__close(&extract);
1561 if (err)
1562 unlink(extract_filename);
1563out_kcore_close:
1564 kcore__close(&kcore);
1565out_unlink_modules:
1566 if (err)
1567 kcore_copy__unlink(to_dir, "modules");
1568out_unlink_kallsyms:
1569 if (err)
1570 kcore_copy__unlink(to_dir, "kallsyms");
1571
1572 return err;
1573}
1574
Adrian Hunterafba19d2013-10-09 15:01:12 +03001575int kcore_extract__create(struct kcore_extract *kce)
1576{
1577 struct kcore kcore;
1578 struct kcore extract;
1579 size_t count = 1;
1580 int idx = 0, err = -1;
1581 off_t offset = page_size, sz;
1582
1583 if (kcore__open(&kcore, kce->kcore_filename))
1584 return -1;
1585
1586 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1587 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1588 goto out_kcore_close;
1589
1590 if (kcore__copy_hdr(&kcore, &extract, count))
1591 goto out_extract_close;
1592
1593 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1594 goto out_extract_close;
1595
1596 sz = kcore__write(&extract);
1597 if (sz < 0 || sz > offset)
1598 goto out_extract_close;
1599
1600 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1601 goto out_extract_close;
1602
1603 err = 0;
1604
1605out_extract_close:
1606 kcore__close(&extract);
1607 if (err)
1608 unlink(kce->extract_filename);
1609out_kcore_close:
1610 kcore__close(&kcore);
1611
1612 return err;
1613}
1614
1615void kcore_extract__delete(struct kcore_extract *kce)
1616{
1617 unlink(kce->extract_filename);
1618}
1619
Namhyung Kime5a18452012-08-06 13:41:20 +09001620void symbol__elf_init(void)
1621{
1622 elf_version(EV_CURRENT);
1623}