11 #include <symbol/kallsyms.h>
14 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
15 static int elf_getphdrnum(Elf *elf, size_t *dst)
20 ehdr = gelf_getehdr(elf, &gehdr);
30 #ifndef NT_GNU_BUILD_ID
31 #define NT_GNU_BUILD_ID 3
35 * elf_symtab__for_each_symbol - iterate thru all the symbols
37 * @syms: struct elf_symtab instance to iterate
39 * @sym: GElf_Sym iterator
41 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
42 for (idx = 0, gelf_getsym(syms, idx, &sym);\
44 idx++, gelf_getsym(syms, idx, &sym))
46 static inline uint8_t elf_sym__type(const GElf_Sym *sym)
48 return GELF_ST_TYPE(sym->st_info);
51 static inline int elf_sym__is_function(const GElf_Sym *sym)
53 return (elf_sym__type(sym) == STT_FUNC ||
54 elf_sym__type(sym) == STT_GNU_IFUNC) &&
56 sym->st_shndx != SHN_UNDEF;
59 static inline bool elf_sym__is_object(const GElf_Sym *sym)
61 return elf_sym__type(sym) == STT_OBJECT &&
63 sym->st_shndx != SHN_UNDEF;
66 static inline int elf_sym__is_label(const GElf_Sym *sym)
68 return elf_sym__type(sym) == STT_NOTYPE &&
70 sym->st_shndx != SHN_UNDEF &&
71 sym->st_shndx != SHN_ABS;
74 static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
78 return elf_sym__is_function(sym);
80 return elf_sym__is_object(sym);
86 static inline const char *elf_sym__name(const GElf_Sym *sym,
87 const Elf_Data *symstrs)
89 return symstrs->d_buf + sym->st_name;
92 static inline const char *elf_sec__name(const GElf_Shdr *shdr,
93 const Elf_Data *secstrs)
95 return secstrs->d_buf + shdr->sh_name;
98 static inline int elf_sec__is_text(const GElf_Shdr *shdr,
99 const Elf_Data *secstrs)
101 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
104 static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
105 const Elf_Data *secstrs)
107 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
110 static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs,
115 return elf_sec__is_text(shdr, secstrs);
117 return elf_sec__is_data(shdr, secstrs);
123 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
129 while ((sec = elf_nextscn(elf, sec)) != NULL) {
130 gelf_getshdr(sec, &shdr);
132 if ((addr >= shdr.sh_addr) &&
133 (addr < (shdr.sh_addr + shdr.sh_size)))
142 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
143 GElf_Shdr *shp, const char *name, size_t *idx)
148 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
149 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
152 while ((sec = elf_nextscn(elf, sec)) != NULL) {
155 gelf_getshdr(sec, shp);
156 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
157 if (str && !strcmp(name, str)) {
168 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
169 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
171 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
173 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
174 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
176 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
179 * We need to check if we have a .dynsym, so that we can handle the
180 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
181 * .dynsym or .symtab).
182 * And always look at the original dso, not at debuginfo packages, that
183 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
185 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map,
186 symbol_filter_t filter)
188 uint32_t nr_rel_entries, idx;
193 GElf_Shdr shdr_rel_plt, shdr_dynsym;
194 Elf_Data *reldata, *syms, *symstrs;
195 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
198 char sympltname[1024];
200 int nr = 0, symidx, err = 0;
208 scn_dynsym = ss->dynsym;
209 shdr_dynsym = ss->dynshdr;
210 dynsym_idx = ss->dynsym_idx;
212 if (scn_dynsym == NULL)
215 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
217 if (scn_plt_rel == NULL) {
218 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
220 if (scn_plt_rel == NULL)
226 if (shdr_rel_plt.sh_link != dynsym_idx)
229 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
233 * Fetch the relocation section to find the idxes to the GOT
234 * and the symbols in the .dynsym they refer to.
236 reldata = elf_getdata(scn_plt_rel, NULL);
240 syms = elf_getdata(scn_dynsym, NULL);
244 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
245 if (scn_symstrs == NULL)
248 symstrs = elf_getdata(scn_symstrs, NULL);
252 if (symstrs->d_size == 0)
255 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
256 plt_offset = shdr_plt.sh_offset;
258 if (shdr_rel_plt.sh_type == SHT_RELA) {
259 GElf_Rela pos_mem, *pos;
261 elf_section__for_each_rela(reldata, pos, pos_mem, idx,
263 symidx = GELF_R_SYM(pos->r_info);
264 plt_offset += shdr_plt.sh_entsize;
265 gelf_getsym(syms, symidx, &sym);
266 snprintf(sympltname, sizeof(sympltname),
267 "%s@plt", elf_sym__name(&sym, symstrs));
269 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
270 STB_GLOBAL, sympltname);
274 if (filter && filter(map, f))
277 symbols__insert(&dso->symbols[map->type], f);
281 } else if (shdr_rel_plt.sh_type == SHT_REL) {
282 GElf_Rel pos_mem, *pos;
283 elf_section__for_each_rel(reldata, pos, pos_mem, idx,
285 symidx = GELF_R_SYM(pos->r_info);
286 plt_offset += shdr_plt.sh_entsize;
287 gelf_getsym(syms, symidx, &sym);
288 snprintf(sympltname, sizeof(sympltname),
289 "%s@plt", elf_sym__name(&sym, symstrs));
291 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
292 STB_GLOBAL, sympltname);
296 if (filter && filter(map, f))
299 symbols__insert(&dso->symbols[map->type], f);
309 pr_debug("%s: problems reading %s PLT info.\n",
310 __func__, dso->long_name);
315 * Align offset to 4 bytes as needed for note name and descriptor data.
317 #define NOTE_ALIGN(n) (((n) + 3) & -4U)
319 static int elf_read_build_id(Elf *elf, void *bf, size_t size)
329 if (size < BUILD_ID_SIZE)
336 if (gelf_getehdr(elf, &ehdr) == NULL) {
337 pr_err("%s: cannot get elf header.\n", __func__);
342 * Check following sections for notes:
343 * '.note.gnu.build-id'
345 * '.note' (VDSO specific)
348 sec = elf_section_by_name(elf, &ehdr, &shdr,
349 ".note.gnu.build-id", NULL);
353 sec = elf_section_by_name(elf, &ehdr, &shdr,
358 sec = elf_section_by_name(elf, &ehdr, &shdr,
367 data = elf_getdata(sec, NULL);
372 while (ptr < (data->d_buf + data->d_size)) {
373 GElf_Nhdr *nhdr = ptr;
374 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
375 descsz = NOTE_ALIGN(nhdr->n_descsz);
378 ptr += sizeof(*nhdr);
381 if (nhdr->n_type == NT_GNU_BUILD_ID &&
382 nhdr->n_namesz == sizeof("GNU")) {
383 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
384 size_t sz = min(size, descsz);
386 memset(bf + sz, 0, size - sz);
398 int filename__read_build_id(const char *filename, void *bf, size_t size)
403 if (size < BUILD_ID_SIZE)
406 fd = open(filename, O_RDONLY);
410 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
412 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
416 err = elf_read_build_id(elf, bf, size);
425 int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
429 if (size < BUILD_ID_SIZE)
432 fd = open(filename, O_RDONLY);
439 size_t namesz, descsz;
441 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
444 namesz = NOTE_ALIGN(nhdr.n_namesz);
445 descsz = NOTE_ALIGN(nhdr.n_descsz);
446 if (nhdr.n_type == NT_GNU_BUILD_ID &&
447 nhdr.n_namesz == sizeof("GNU")) {
448 if (read(fd, bf, namesz) != (ssize_t)namesz)
450 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
451 size_t sz = min(descsz, size);
452 if (read(fd, build_id, sz) == (ssize_t)sz) {
453 memset(build_id + sz, 0, size - sz);
457 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
460 int n = namesz + descsz;
461 if (read(fd, bf, n) != n)
470 int filename__read_debuglink(const char *filename, char *debuglink,
481 fd = open(filename, O_RDONLY);
485 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
487 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
495 if (gelf_getehdr(elf, &ehdr) == NULL) {
496 pr_err("%s: cannot get elf header.\n", __func__);
500 sec = elf_section_by_name(elf, &ehdr, &shdr,
501 ".gnu_debuglink", NULL);
505 data = elf_getdata(sec, NULL);
509 /* the start of this section is a zero-terminated string */
510 strncpy(debuglink, data->d_buf, size);
522 static int dso__swap_init(struct dso *dso, unsigned char eidata)
524 static unsigned int const endian = 1;
526 dso->needs_swap = DSO_SWAP__NO;
530 /* We are big endian, DSO is little endian. */
531 if (*(unsigned char const *)&endian != 1)
532 dso->needs_swap = DSO_SWAP__YES;
536 /* We are little endian, DSO is big endian. */
537 if (*(unsigned char const *)&endian != 0)
538 dso->needs_swap = DSO_SWAP__YES;
542 pr_err("unrecognized DSO data encoding %d\n", eidata);
549 static int decompress_kmodule(struct dso *dso, const char *name,
550 enum dso_binary_type type)
553 const char *ext = strrchr(name, '.');
554 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
556 if ((type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
557 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP) ||
558 type != dso->symtab_type)
561 if (!ext || !is_supported_compression(ext + 1))
564 fd = mkstemp(tmpbuf);
568 if (!decompress_to_file(ext + 1, name, fd)) {
578 bool symsrc__possibly_runtime(struct symsrc *ss)
580 return ss->dynsym || ss->opdsec;
583 bool symsrc__has_symtab(struct symsrc *ss)
585 return ss->symtab != NULL;
588 void symsrc__destroy(struct symsrc *ss)
595 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
596 enum dso_binary_type type)
603 if (dso__needs_decompress(dso))
604 fd = decompress_kmodule(dso, name, type);
606 fd = open(name, O_RDONLY);
611 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
613 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
617 if (gelf_getehdr(elf, &ehdr) == NULL) {
618 pr_debug("%s: cannot get elf header.\n", __func__);
622 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
625 /* Always reject images with a mismatched build-id: */
626 if (dso->has_build_id) {
627 u8 build_id[BUILD_ID_SIZE];
629 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0)
632 if (!dso__build_id_equal(dso, build_id))
636 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
638 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
640 if (ss->symshdr.sh_type != SHT_SYMTAB)
644 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
646 if (ss->dynshdr.sh_type != SHT_DYNSYM)
650 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
652 if (ss->opdshdr.sh_type != SHT_PROGBITS)
655 if (dso->kernel == DSO_TYPE_USER) {
657 ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
658 ehdr.e_type == ET_REL ||
660 elf_section_by_name(elf, &ehdr, &shdr,
664 ss->adjust_symbols = ehdr.e_type == ET_EXEC ||
665 ehdr.e_type == ET_REL;
668 ss->name = strdup(name);
687 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
688 * @kmap: kernel maps and relocation reference symbol
690 * This function returns %true if we are dealing with the kernel maps and the
691 * relocation reference symbol has not yet been found. Otherwise %false is
694 static bool ref_reloc_sym_not_found(struct kmap *kmap)
696 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
697 !kmap->ref_reloc_sym->unrelocated_addr;
701 * ref_reloc - kernel relocation offset.
702 * @kmap: kernel maps and relocation reference symbol
704 * This function returns the offset of kernel addresses as determined by using
705 * the relocation reference symbol i.e. if the kernel has not been relocated
706 * then the return value is zero.
708 static u64 ref_reloc(struct kmap *kmap)
710 if (kmap && kmap->ref_reloc_sym &&
711 kmap->ref_reloc_sym->unrelocated_addr)
712 return kmap->ref_reloc_sym->addr -
713 kmap->ref_reloc_sym->unrelocated_addr;
717 static bool want_demangle(bool is_kernel_sym)
719 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
722 int dso__load_sym(struct dso *dso, struct map *map,
723 struct symsrc *syms_ss, struct symsrc *runtime_ss,
724 symbol_filter_t filter, int kmodule)
726 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
727 struct map *curr_map = map;
728 struct dso *curr_dso = dso;
729 Elf_Data *symstrs, *secstrs;
735 Elf_Data *syms, *opddata = NULL;
737 Elf_Scn *sec, *sec_strndx;
740 bool remap_kernel = false, adjust_kernel_syms = false;
742 dso->symtab_type = syms_ss->type;
743 dso->is_64_bit = syms_ss->is_64_bit;
744 dso->rel = syms_ss->ehdr.e_type == ET_REL;
747 * Modules may already have symbols from kallsyms, but those symbols
748 * have the wrong values for the dso maps, so remove them.
750 if (kmodule && syms_ss->symtab)
751 symbols__delete(&dso->symbols[map->type]);
753 if (!syms_ss->symtab) {
755 * If the vmlinux is stripped, fail so we will fall back
756 * to using kallsyms. The vmlinux runtime symbols aren't
762 syms_ss->symtab = syms_ss->dynsym;
763 syms_ss->symshdr = syms_ss->dynshdr;
767 ehdr = syms_ss->ehdr;
768 sec = syms_ss->symtab;
769 shdr = syms_ss->symshdr;
771 if (runtime_ss->opdsec)
772 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
774 syms = elf_getdata(sec, NULL);
778 sec = elf_getscn(elf, shdr.sh_link);
782 symstrs = elf_getdata(sec, NULL);
786 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
787 if (sec_strndx == NULL)
790 secstrs = elf_getdata(sec_strndx, NULL);
794 nr_syms = shdr.sh_size / shdr.sh_entsize;
796 memset(&sym, 0, sizeof(sym));
799 * The kernel relocation symbol is needed in advance in order to adjust
800 * kernel maps correctly.
802 if (ref_reloc_sym_not_found(kmap)) {
803 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
804 const char *elf_name = elf_sym__name(&sym, symstrs);
806 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
808 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
809 map->reloc = kmap->ref_reloc_sym->addr -
810 kmap->ref_reloc_sym->unrelocated_addr;
815 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
817 * Initial kernel and module mappings do not map to the dso. For
818 * function mappings, flag the fixups.
820 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
822 adjust_kernel_syms = dso->adjust_symbols;
824 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
826 const char *elf_name = elf_sym__name(&sym, symstrs);
827 char *demangled = NULL;
828 int is_label = elf_sym__is_label(&sym);
829 const char *section_name;
830 bool used_opd = false;
832 if (!is_label && !elf_sym__is_a(&sym, map->type))
835 /* Reject ARM ELF "mapping symbols": these aren't unique and
836 * don't identify functions, so will confuse the profile
838 if (ehdr.e_machine == EM_ARM) {
839 if (!strcmp(elf_name, "$a") ||
840 !strcmp(elf_name, "$d") ||
841 !strcmp(elf_name, "$t"))
845 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
846 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
847 u64 *opd = opddata->d_buf + offset;
848 sym.st_value = DSO__SWAP(dso, u64, *opd);
849 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
854 * When loading symbols in a data mapping, ABS symbols (which
855 * has a value of SHN_ABS in its st_shndx) failed at
856 * elf_getscn(). And it marks the loading as a failure so
857 * already loaded symbols cannot be fixed up.
859 * I'm not sure what should be done. Just ignore them for now.
862 if (sym.st_shndx == SHN_ABS)
865 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
869 gelf_getshdr(sec, &shdr);
871 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
874 section_name = elf_sec__name(&shdr, secstrs);
876 /* On ARM, symbols for thumb functions have 1 added to
877 * the symbol address as a flag - remove it */
878 if ((ehdr.e_machine == EM_ARM) &&
879 (map->type == MAP__FUNCTION) &&
883 if (dso->kernel || kmodule) {
884 char dso_name[PATH_MAX];
886 /* Adjust symbol to map to file offset */
887 if (adjust_kernel_syms)
888 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
890 if (strcmp(section_name,
891 (curr_dso->short_name +
892 dso->short_name_len)) == 0)
895 if (strcmp(section_name, ".text") == 0) {
897 * The initial kernel mapping is based on
898 * kallsyms and identity maps. Overwrite it to
899 * map to the kernel dso.
901 if (remap_kernel && dso->kernel) {
902 remap_kernel = false;
903 map->start = shdr.sh_addr +
905 map->end = map->start + shdr.sh_size;
906 map->pgoff = shdr.sh_offset;
907 map->map_ip = map__map_ip;
908 map->unmap_ip = map__unmap_ip;
909 /* Ensure maps are correctly ordered */
910 map_groups__remove(kmap->kmaps, map);
911 map_groups__insert(kmap->kmaps, map);
915 * The initial module mapping is based on
916 * /proc/modules mapped to offset zero.
917 * Overwrite it to map to the module dso.
919 if (remap_kernel && kmodule) {
920 remap_kernel = false;
921 map->pgoff = shdr.sh_offset;
932 snprintf(dso_name, sizeof(dso_name),
933 "%s%s", dso->short_name, section_name);
935 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
936 if (curr_map == NULL) {
937 u64 start = sym.st_value;
940 start += map->start + shdr.sh_offset;
942 curr_dso = dso__new(dso_name);
943 if (curr_dso == NULL)
945 curr_dso->kernel = dso->kernel;
946 curr_dso->long_name = dso->long_name;
947 curr_dso->long_name_len = dso->long_name_len;
948 curr_map = map__new2(start, curr_dso,
950 if (curr_map == NULL) {
951 dso__delete(curr_dso);
954 if (adjust_kernel_syms) {
955 curr_map->start = shdr.sh_addr +
957 curr_map->end = curr_map->start +
959 curr_map->pgoff = shdr.sh_offset;
961 curr_map->map_ip = identity__map_ip;
962 curr_map->unmap_ip = identity__map_ip;
964 curr_dso->symtab_type = dso->symtab_type;
965 map_groups__insert(kmap->kmaps, curr_map);
967 * The new DSO should go to the kernel DSOS
969 dsos__add(&map->groups->machine->kernel_dsos,
971 dso__set_loaded(curr_dso, map->type);
973 curr_dso = curr_map->dso;
978 if ((used_opd && runtime_ss->adjust_symbols)
979 || (!used_opd && syms_ss->adjust_symbols)) {
980 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
981 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
982 (u64)sym.st_value, (u64)shdr.sh_addr,
983 (u64)shdr.sh_offset);
984 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
988 * We need to figure out if the object was created from C++ sources
989 * DWARF DW_compile_unit has this, but we don't always have access
992 if (want_demangle(dso->kernel || kmodule)) {
993 int demangle_flags = DMGL_NO_OPTS;
995 demangle_flags = DMGL_PARAMS | DMGL_ANSI;
997 demangled = bfd_demangle(NULL, elf_name, demangle_flags);
998 if (demangled != NULL)
999 elf_name = demangled;
1001 f = symbol__new(sym.st_value, sym.st_size,
1002 GELF_ST_BIND(sym.st_info), elf_name);
1007 if (filter && filter(curr_map, f))
1010 symbols__insert(&curr_dso->symbols[curr_map->type], f);
1016 * For misannotated, zeroed, ASM function sizes.
1019 symbols__fixup_duplicate(&dso->symbols[map->type]);
1020 symbols__fixup_end(&dso->symbols[map->type]);
1023 * We need to fixup this here too because we create new
1024 * maps here, for things like vsyscall sections.
1026 __map_groups__fixup_end(kmap->kmaps, map->type);
1034 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1041 if (elf_getphdrnum(elf, &phdrnum))
1044 for (i = 0; i < phdrnum; i++) {
1045 if (gelf_getphdr(elf, i, &phdr) == NULL)
1047 if (phdr.p_type != PT_LOAD)
1050 if (!(phdr.p_flags & PF_X))
1053 if (!(phdr.p_flags & PF_R))
1056 sz = min(phdr.p_memsz, phdr.p_filesz);
1059 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1066 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1072 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1077 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1079 err = elf_read_maps(elf, exe, mapfn, data);
1085 enum dso_type dso__type_fd(int fd)
1087 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1092 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1097 if (ek != ELF_K_ELF)
1100 if (gelf_getclass(elf) == ELFCLASS64) {
1101 dso_type = DSO__TYPE_64BIT;
1105 if (gelf_getehdr(elf, &ehdr) == NULL)
1108 if (ehdr.e_machine == EM_X86_64)
1109 dso_type = DSO__TYPE_X32BIT;
1111 dso_type = DSO__TYPE_32BIT;
1118 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1123 char *buf = malloc(page_size);
1128 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1131 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1138 /* Use read because mmap won't work on proc files */
1139 r = read(from, buf, n);
1145 r = write(to, buf, n);
1166 static int kcore__open(struct kcore *kcore, const char *filename)
1170 kcore->fd = open(filename, O_RDONLY);
1171 if (kcore->fd == -1)
1174 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1178 kcore->elfclass = gelf_getclass(kcore->elf);
1179 if (kcore->elfclass == ELFCLASSNONE)
1182 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1189 elf_end(kcore->elf);
1195 static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1200 kcore->elfclass = elfclass;
1203 kcore->fd = mkstemp(filename);
1205 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1206 if (kcore->fd == -1)
1209 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1213 if (!gelf_newehdr(kcore->elf, elfclass))
1216 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1223 elf_end(kcore->elf);
1230 static void kcore__close(struct kcore *kcore)
1232 elf_end(kcore->elf);
1236 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1238 GElf_Ehdr *ehdr = &to->ehdr;
1239 GElf_Ehdr *kehdr = &from->ehdr;
1241 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1242 ehdr->e_type = kehdr->e_type;
1243 ehdr->e_machine = kehdr->e_machine;
1244 ehdr->e_version = kehdr->e_version;
1247 ehdr->e_flags = kehdr->e_flags;
1248 ehdr->e_phnum = count;
1249 ehdr->e_shentsize = 0;
1251 ehdr->e_shstrndx = 0;
1253 if (from->elfclass == ELFCLASS32) {
1254 ehdr->e_phoff = sizeof(Elf32_Ehdr);
1255 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
1256 ehdr->e_phentsize = sizeof(Elf32_Phdr);
1258 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1259 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1260 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1263 if (!gelf_update_ehdr(to->elf, ehdr))
1266 if (!gelf_newphdr(to->elf, count))
1272 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
1278 phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
1282 phdr->p_type = PT_LOAD;
1283 phdr->p_flags = PF_R | PF_W | PF_X;
1284 phdr->p_offset = offset;
1285 phdr->p_vaddr = addr;
1287 phdr->p_filesz = len;
1288 phdr->p_memsz = len;
1289 phdr->p_align = page_size;
1291 if (!gelf_update_phdr(kcore->elf, idx, phdr))
1297 static off_t kcore__write(struct kcore *kcore)
1299 return elf_update(kcore->elf, ELF_C_WRITE);
1308 struct kcore_copy_info {
1314 u64 last_module_symbol;
1315 struct phdr_data kernel_map;
1316 struct phdr_data modules_map;
1319 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
1322 struct kcore_copy_info *kci = arg;
1324 if (!symbol_type__is_a(type, MAP__FUNCTION))
1327 if (strchr(name, '[')) {
1328 if (start > kci->last_module_symbol)
1329 kci->last_module_symbol = start;
1333 if (!kci->first_symbol || start < kci->first_symbol)
1334 kci->first_symbol = start;
1336 if (!kci->last_symbol || start > kci->last_symbol)
1337 kci->last_symbol = start;
1339 if (!strcmp(name, "_stext")) {
1344 if (!strcmp(name, "_etext")) {
1352 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
1355 char kallsyms_filename[PATH_MAX];
1357 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
1359 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
1362 if (kallsyms__parse(kallsyms_filename, kci,
1363 kcore_copy__process_kallsyms) < 0)
1369 static int kcore_copy__process_modules(void *arg,
1370 const char *name __maybe_unused,
1373 struct kcore_copy_info *kci = arg;
1375 if (!kci->first_module || start < kci->first_module)
1376 kci->first_module = start;
1381 static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
1384 char modules_filename[PATH_MAX];
1386 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
1388 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
1391 if (modules__parse(modules_filename, kci,
1392 kcore_copy__process_modules) < 0)
1398 static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff,
1401 if (p->addr || s < start || s >= end)
1405 p->offset = (s - start) + pgoff;
1406 p->len = e < end ? e - s : end - s;
1409 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
1411 struct kcore_copy_info *kci = data;
1412 u64 end = start + len;
1414 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext,
1417 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module,
1418 kci->last_module_symbol);
1423 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
1425 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
1431 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
1434 if (kcore_copy__parse_kallsyms(kci, dir))
1437 if (kcore_copy__parse_modules(kci, dir))
1441 kci->stext = round_down(kci->stext, page_size);
1443 kci->stext = round_down(kci->first_symbol, page_size);
1446 kci->etext = round_up(kci->etext, page_size);
1447 } else if (kci->last_symbol) {
1448 kci->etext = round_up(kci->last_symbol, page_size);
1449 kci->etext += page_size;
1452 kci->first_module = round_down(kci->first_module, page_size);
1454 if (kci->last_module_symbol) {
1455 kci->last_module_symbol = round_up(kci->last_module_symbol,
1457 kci->last_module_symbol += page_size;
1460 if (!kci->stext || !kci->etext)
1463 if (kci->first_module && !kci->last_module_symbol)
1466 return kcore_copy__read_maps(kci, elf);
1469 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
1472 char from_filename[PATH_MAX];
1473 char to_filename[PATH_MAX];
1475 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1476 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1478 return copyfile_mode(from_filename, to_filename, 0400);
1481 static int kcore_copy__unlink(const char *dir, const char *name)
1483 char filename[PATH_MAX];
1485 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
1487 return unlink(filename);
1490 static int kcore_copy__compare_fds(int from, int to)
1498 buf_from = malloc(page_size);
1499 buf_to = malloc(page_size);
1500 if (!buf_from || !buf_to)
1504 /* Use read because mmap won't work on proc files */
1505 ret = read(from, buf_from, page_size);
1514 if (readn(to, buf_to, len) != (int)len)
1517 if (memcmp(buf_from, buf_to, len))
1528 static int kcore_copy__compare_files(const char *from_filename,
1529 const char *to_filename)
1531 int from, to, err = -1;
1533 from = open(from_filename, O_RDONLY);
1537 to = open(to_filename, O_RDONLY);
1539 goto out_close_from;
1541 err = kcore_copy__compare_fds(from, to);
1549 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
1552 char from_filename[PATH_MAX];
1553 char to_filename[PATH_MAX];
1555 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
1556 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
1558 return kcore_copy__compare_files(from_filename, to_filename);
1562 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
1563 * @from_dir: from directory
1564 * @to_dir: to directory
1566 * This function copies kallsyms, modules and kcore files from one directory to
1567 * another. kallsyms and modules are copied entirely. Only code segments are
1568 * copied from kcore. It is assumed that two segments suffice: one for the
1569 * kernel proper and one for all the modules. The code segments are determined
1570 * from kallsyms and modules files. The kernel map starts at _stext or the
1571 * lowest function symbol, and ends at _etext or the highest function symbol.
1572 * The module map starts at the lowest module address and ends at the highest
1573 * module symbol. Start addresses are rounded down to the nearest page. End
1574 * addresses are rounded up to the nearest page. An extra page is added to the
1575 * highest kernel symbol and highest module symbol to, hopefully, encompass that
1576 * symbol too. Because it contains only code sections, the resulting kcore is
1577 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
1578 * is not the same for the kernel map and the modules map. That happens because
1579 * the data is copied adjacently whereas the original kcore has gaps. Finally,
1580 * kallsyms and modules files are compared with their copies to check that
1581 * modules have not been loaded or unloaded while the copies were taking place.
1583 * Return: %0 on success, %-1 on failure.
1585 int kcore_copy(const char *from_dir, const char *to_dir)
1588 struct kcore extract;
1590 int idx = 0, err = -1;
1591 off_t offset = page_size, sz, modules_offset = 0;
1592 struct kcore_copy_info kci = { .stext = 0, };
1593 char kcore_filename[PATH_MAX];
1594 char extract_filename[PATH_MAX];
1596 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
1599 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
1600 goto out_unlink_kallsyms;
1602 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
1603 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
1605 if (kcore__open(&kcore, kcore_filename))
1606 goto out_unlink_modules;
1608 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
1609 goto out_kcore_close;
1611 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
1612 goto out_kcore_close;
1614 if (!kci.modules_map.addr)
1617 if (kcore__copy_hdr(&kcore, &extract, count))
1618 goto out_extract_close;
1620 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr,
1621 kci.kernel_map.len))
1622 goto out_extract_close;
1624 if (kci.modules_map.addr) {
1625 modules_offset = offset + kci.kernel_map.len;
1626 if (kcore__add_phdr(&extract, idx, modules_offset,
1627 kci.modules_map.addr, kci.modules_map.len))
1628 goto out_extract_close;
1631 sz = kcore__write(&extract);
1632 if (sz < 0 || sz > offset)
1633 goto out_extract_close;
1635 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset,
1636 kci.kernel_map.len))
1637 goto out_extract_close;
1639 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset,
1640 extract.fd, modules_offset,
1641 kci.modules_map.len))
1642 goto out_extract_close;
1644 if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
1645 goto out_extract_close;
1647 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
1648 goto out_extract_close;
1653 kcore__close(&extract);
1655 unlink(extract_filename);
1657 kcore__close(&kcore);
1660 kcore_copy__unlink(to_dir, "modules");
1661 out_unlink_kallsyms:
1663 kcore_copy__unlink(to_dir, "kallsyms");
1668 int kcore_extract__create(struct kcore_extract *kce)
1671 struct kcore extract;
1673 int idx = 0, err = -1;
1674 off_t offset = page_size, sz;
1676 if (kcore__open(&kcore, kce->kcore_filename))
1679 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
1680 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
1681 goto out_kcore_close;
1683 if (kcore__copy_hdr(&kcore, &extract, count))
1684 goto out_extract_close;
1686 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
1687 goto out_extract_close;
1689 sz = kcore__write(&extract);
1690 if (sz < 0 || sz > offset)
1691 goto out_extract_close;
1693 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
1694 goto out_extract_close;
1699 kcore__close(&extract);
1701 unlink(kce->extract_filename);
1703 kcore__close(&kcore);
1708 void kcore_extract__delete(struct kcore_extract *kce)
1710 unlink(kce->extract_filename);
1713 void symbol__elf_init(void)
1715 elf_version(EV_CURRENT);