GNU Linux-libre 4.14.266-gnu1
[releases.git] / tools / perf / util / symbol.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <string.h>
7 #include <linux/kernel.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <sys/param.h>
11 #include <fcntl.h>
12 #include <unistd.h>
13 #include <inttypes.h>
14 #include "annotate.h"
15 #include "build-id.h"
16 #include "util.h"
17 #include "debug.h"
18 #include "machine.h"
19 #include "symbol.h"
20 #include "strlist.h"
21 #include "intlist.h"
22 #include "namespaces.h"
23 #include "header.h"
24 #include "path.h"
25 #include "sane_ctype.h"
26
27 #include <elf.h>
28 #include <limits.h>
29 #include <symbol/kallsyms.h>
30 #include <sys/utsname.h>
31
32 static int dso__load_kernel_sym(struct dso *dso, struct map *map);
33 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
34 static bool symbol__is_idle(const char *name);
35
36 int vmlinux_path__nr_entries;
37 char **vmlinux_path;
38
39 struct symbol_conf symbol_conf = {
40         .use_modules            = true,
41         .try_vmlinux_path       = true,
42         .annotate_src           = true,
43         .demangle               = true,
44         .demangle_kernel        = false,
45         .cumulate_callchain     = true,
46         .show_hist_headers      = true,
47         .symfs                  = "",
48         .event_group            = true,
49 };
50
51 static enum dso_binary_type binary_type_symtab[] = {
52         DSO_BINARY_TYPE__KALLSYMS,
53         DSO_BINARY_TYPE__GUEST_KALLSYMS,
54         DSO_BINARY_TYPE__JAVA_JIT,
55         DSO_BINARY_TYPE__DEBUGLINK,
56         DSO_BINARY_TYPE__BUILD_ID_CACHE,
57         DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
58         DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
59         DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
60         DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
61         DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
62         DSO_BINARY_TYPE__GUEST_KMODULE,
63         DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
64         DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
65         DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
66         DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
67         DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
68         DSO_BINARY_TYPE__NOT_FOUND,
69 };
70
71 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
72
73 bool symbol_type__is_a(char symbol_type, enum map_type map_type)
74 {
75         symbol_type = toupper(symbol_type);
76
77         switch (map_type) {
78         case MAP__FUNCTION:
79                 return symbol_type == 'T' || symbol_type == 'W';
80         case MAP__VARIABLE:
81                 return symbol_type == 'D';
82         default:
83                 return false;
84         }
85 }
86
87 static int prefix_underscores_count(const char *str)
88 {
89         const char *tail = str;
90
91         while (*tail == '_')
92                 tail++;
93
94         return tail - str;
95 }
96
97 void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
98 {
99         p->end = c->start;
100 }
101
102 const char * __weak arch__normalize_symbol_name(const char *name)
103 {
104         return name;
105 }
106
107 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
108 {
109         return strcmp(namea, nameb);
110 }
111
112 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
113                                         unsigned int n)
114 {
115         return strncmp(namea, nameb, n);
116 }
117
118 int __weak arch__choose_best_symbol(struct symbol *syma,
119                                     struct symbol *symb __maybe_unused)
120 {
121         /* Avoid "SyS" kernel syscall aliases */
122         if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
123                 return SYMBOL_B;
124         if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
125                 return SYMBOL_B;
126
127         return SYMBOL_A;
128 }
129
130 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
131 {
132         s64 a;
133         s64 b;
134         size_t na, nb;
135
136         /* Prefer a symbol with non zero length */
137         a = syma->end - syma->start;
138         b = symb->end - symb->start;
139         if ((b == 0) && (a > 0))
140                 return SYMBOL_A;
141         else if ((a == 0) && (b > 0))
142                 return SYMBOL_B;
143
144         /* Prefer a non weak symbol over a weak one */
145         a = syma->binding == STB_WEAK;
146         b = symb->binding == STB_WEAK;
147         if (b && !a)
148                 return SYMBOL_A;
149         if (a && !b)
150                 return SYMBOL_B;
151
152         /* Prefer a global symbol over a non global one */
153         a = syma->binding == STB_GLOBAL;
154         b = symb->binding == STB_GLOBAL;
155         if (a && !b)
156                 return SYMBOL_A;
157         if (b && !a)
158                 return SYMBOL_B;
159
160         /* Prefer a symbol with less underscores */
161         a = prefix_underscores_count(syma->name);
162         b = prefix_underscores_count(symb->name);
163         if (b > a)
164                 return SYMBOL_A;
165         else if (a > b)
166                 return SYMBOL_B;
167
168         /* Choose the symbol with the longest name */
169         na = strlen(syma->name);
170         nb = strlen(symb->name);
171         if (na > nb)
172                 return SYMBOL_A;
173         else if (na < nb)
174                 return SYMBOL_B;
175
176         return arch__choose_best_symbol(syma, symb);
177 }
178
179 void symbols__fixup_duplicate(struct rb_root *symbols)
180 {
181         struct rb_node *nd;
182         struct symbol *curr, *next;
183
184         if (symbol_conf.allow_aliases)
185                 return;
186
187         nd = rb_first(symbols);
188
189         while (nd) {
190                 curr = rb_entry(nd, struct symbol, rb_node);
191 again:
192                 nd = rb_next(&curr->rb_node);
193                 next = rb_entry(nd, struct symbol, rb_node);
194
195                 if (!nd)
196                         break;
197
198                 if (curr->start != next->start)
199                         continue;
200
201                 if (choose_best_symbol(curr, next) == SYMBOL_A) {
202                         rb_erase(&next->rb_node, symbols);
203                         symbol__delete(next);
204                         goto again;
205                 } else {
206                         nd = rb_next(&curr->rb_node);
207                         rb_erase(&curr->rb_node, symbols);
208                         symbol__delete(curr);
209                 }
210         }
211 }
212
213 void symbols__fixup_end(struct rb_root *symbols)
214 {
215         struct rb_node *nd, *prevnd = rb_first(symbols);
216         struct symbol *curr, *prev;
217
218         if (prevnd == NULL)
219                 return;
220
221         curr = rb_entry(prevnd, struct symbol, rb_node);
222
223         for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
224                 prev = curr;
225                 curr = rb_entry(nd, struct symbol, rb_node);
226
227                 if (prev->end == prev->start && prev->end != curr->start)
228                         arch__symbols__fixup_end(prev, curr);
229         }
230
231         /* Last entry */
232         if (curr->end == curr->start)
233                 curr->end = roundup(curr->start, 4096) + 4096;
234 }
235
236 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
237 {
238         struct maps *maps = &mg->maps[type];
239         struct map *next, *curr;
240
241         pthread_rwlock_wrlock(&maps->lock);
242
243         curr = maps__first(maps);
244         if (curr == NULL)
245                 goto out_unlock;
246
247         for (next = map__next(curr); next; next = map__next(curr)) {
248                 if (!curr->end)
249                         curr->end = next->start;
250                 curr = next;
251         }
252
253         /*
254          * We still haven't the actual symbols, so guess the
255          * last map final address.
256          */
257         if (!curr->end)
258                 curr->end = ~0ULL;
259
260 out_unlock:
261         pthread_rwlock_unlock(&maps->lock);
262 }
263
264 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
265 {
266         size_t namelen = strlen(name) + 1;
267         struct symbol *sym = calloc(1, (symbol_conf.priv_size +
268                                         sizeof(*sym) + namelen));
269         if (sym == NULL)
270                 return NULL;
271
272         if (symbol_conf.priv_size) {
273                 if (symbol_conf.init_annotation) {
274                         struct annotation *notes = (void *)sym;
275                         pthread_mutex_init(&notes->lock, NULL);
276                 }
277                 sym = ((void *)sym) + symbol_conf.priv_size;
278         }
279
280         sym->start   = start;
281         sym->end     = len ? start + len : start;
282         sym->binding = binding;
283         sym->namelen = namelen - 1;
284
285         pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
286                   __func__, name, start, sym->end);
287         memcpy(sym->name, name, namelen);
288
289         return sym;
290 }
291
292 void symbol__delete(struct symbol *sym)
293 {
294         free(((void *)sym) - symbol_conf.priv_size);
295 }
296
297 void symbols__delete(struct rb_root *symbols)
298 {
299         struct symbol *pos;
300         struct rb_node *next = rb_first(symbols);
301
302         while (next) {
303                 pos = rb_entry(next, struct symbol, rb_node);
304                 next = rb_next(&pos->rb_node);
305                 rb_erase(&pos->rb_node, symbols);
306                 symbol__delete(pos);
307         }
308 }
309
310 void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
311 {
312         struct rb_node **p = &symbols->rb_node;
313         struct rb_node *parent = NULL;
314         const u64 ip = sym->start;
315         struct symbol *s;
316
317         if (kernel) {
318                 const char *name = sym->name;
319                 /*
320                  * ppc64 uses function descriptors and appends a '.' to the
321                  * start of every instruction address. Remove it.
322                  */
323                 if (name[0] == '.')
324                         name++;
325                 sym->idle = symbol__is_idle(name);
326         }
327
328         while (*p != NULL) {
329                 parent = *p;
330                 s = rb_entry(parent, struct symbol, rb_node);
331                 if (ip < s->start)
332                         p = &(*p)->rb_left;
333                 else
334                         p = &(*p)->rb_right;
335         }
336         rb_link_node(&sym->rb_node, parent, p);
337         rb_insert_color(&sym->rb_node, symbols);
338 }
339
340 void symbols__insert(struct rb_root *symbols, struct symbol *sym)
341 {
342         __symbols__insert(symbols, sym, false);
343 }
344
345 static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
346 {
347         struct rb_node *n;
348
349         if (symbols == NULL)
350                 return NULL;
351
352         n = symbols->rb_node;
353
354         while (n) {
355                 struct symbol *s = rb_entry(n, struct symbol, rb_node);
356
357                 if (ip < s->start)
358                         n = n->rb_left;
359                 else if (ip > s->end || (ip == s->end && ip != s->start))
360                         n = n->rb_right;
361                 else
362                         return s;
363         }
364
365         return NULL;
366 }
367
368 static struct symbol *symbols__first(struct rb_root *symbols)
369 {
370         struct rb_node *n = rb_first(symbols);
371
372         if (n)
373                 return rb_entry(n, struct symbol, rb_node);
374
375         return NULL;
376 }
377
378 static struct symbol *symbols__last(struct rb_root *symbols)
379 {
380         struct rb_node *n = rb_last(symbols);
381
382         if (n)
383                 return rb_entry(n, struct symbol, rb_node);
384
385         return NULL;
386 }
387
388 static struct symbol *symbols__next(struct symbol *sym)
389 {
390         struct rb_node *n = rb_next(&sym->rb_node);
391
392         if (n)
393                 return rb_entry(n, struct symbol, rb_node);
394
395         return NULL;
396 }
397
398 static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
399 {
400         struct rb_node **p = &symbols->rb_node;
401         struct rb_node *parent = NULL;
402         struct symbol_name_rb_node *symn, *s;
403
404         symn = container_of(sym, struct symbol_name_rb_node, sym);
405
406         while (*p != NULL) {
407                 parent = *p;
408                 s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
409                 if (strcmp(sym->name, s->sym.name) < 0)
410                         p = &(*p)->rb_left;
411                 else
412                         p = &(*p)->rb_right;
413         }
414         rb_link_node(&symn->rb_node, parent, p);
415         rb_insert_color(&symn->rb_node, symbols);
416 }
417
418 static void symbols__sort_by_name(struct rb_root *symbols,
419                                   struct rb_root *source)
420 {
421         struct rb_node *nd;
422
423         for (nd = rb_first(source); nd; nd = rb_next(nd)) {
424                 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
425                 symbols__insert_by_name(symbols, pos);
426         }
427 }
428
429 int symbol__match_symbol_name(const char *name, const char *str,
430                               enum symbol_tag_include includes)
431 {
432         const char *versioning;
433
434         if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
435             (versioning = strstr(name, "@@"))) {
436                 int len = strlen(str);
437
438                 if (len < versioning - name)
439                         len = versioning - name;
440
441                 return arch__compare_symbol_names_n(name, str, len);
442         } else
443                 return arch__compare_symbol_names(name, str);
444 }
445
446 static struct symbol *symbols__find_by_name(struct rb_root *symbols,
447                                             const char *name,
448                                             enum symbol_tag_include includes)
449 {
450         struct rb_node *n;
451         struct symbol_name_rb_node *s = NULL;
452
453         if (symbols == NULL)
454                 return NULL;
455
456         n = symbols->rb_node;
457
458         while (n) {
459                 int cmp;
460
461                 s = rb_entry(n, struct symbol_name_rb_node, rb_node);
462                 cmp = symbol__match_symbol_name(s->sym.name, name, includes);
463
464                 if (cmp > 0)
465                         n = n->rb_left;
466                 else if (cmp < 0)
467                         n = n->rb_right;
468                 else
469                         break;
470         }
471
472         if (n == NULL)
473                 return NULL;
474
475         if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
476                 /* return first symbol that has same name (if any) */
477                 for (n = rb_prev(n); n; n = rb_prev(n)) {
478                         struct symbol_name_rb_node *tmp;
479
480                         tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
481                         if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
482                                 break;
483
484                         s = tmp;
485                 }
486
487         return &s->sym;
488 }
489
490 void dso__reset_find_symbol_cache(struct dso *dso)
491 {
492         enum map_type type;
493
494         for (type = MAP__FUNCTION; type <= MAP__VARIABLE; ++type) {
495                 dso->last_find_result[type].addr   = 0;
496                 dso->last_find_result[type].symbol = NULL;
497         }
498 }
499
500 void dso__insert_symbol(struct dso *dso, enum map_type type, struct symbol *sym)
501 {
502         __symbols__insert(&dso->symbols[type], sym, dso->kernel);
503
504         /* update the symbol cache if necessary */
505         if (dso->last_find_result[type].addr >= sym->start &&
506             (dso->last_find_result[type].addr < sym->end ||
507             sym->start == sym->end)) {
508                 dso->last_find_result[type].symbol = sym;
509         }
510 }
511
512 struct symbol *dso__find_symbol(struct dso *dso,
513                                 enum map_type type, u64 addr)
514 {
515         if (dso->last_find_result[type].addr != addr || dso->last_find_result[type].symbol == NULL) {
516                 dso->last_find_result[type].addr   = addr;
517                 dso->last_find_result[type].symbol = symbols__find(&dso->symbols[type], addr);
518         }
519
520         return dso->last_find_result[type].symbol;
521 }
522
523 struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
524 {
525         return symbols__first(&dso->symbols[type]);
526 }
527
528 struct symbol *dso__last_symbol(struct dso *dso, enum map_type type)
529 {
530         return symbols__last(&dso->symbols[type]);
531 }
532
533 struct symbol *dso__next_symbol(struct symbol *sym)
534 {
535         return symbols__next(sym);
536 }
537
538 struct symbol *symbol__next_by_name(struct symbol *sym)
539 {
540         struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
541         struct rb_node *n = rb_next(&s->rb_node);
542
543         return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
544 }
545
546  /*
547   * Teturns first symbol that matched with @name.
548   */
549 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
550                                         const char *name)
551 {
552         struct symbol *s = symbols__find_by_name(&dso->symbol_names[type], name,
553                                                  SYMBOL_TAG_INCLUDE__NONE);
554         if (!s)
555                 s = symbols__find_by_name(&dso->symbol_names[type], name,
556                                           SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
557         return s;
558 }
559
560 void dso__sort_by_name(struct dso *dso, enum map_type type)
561 {
562         dso__set_sorted_by_name(dso, type);
563         return symbols__sort_by_name(&dso->symbol_names[type],
564                                      &dso->symbols[type]);
565 }
566
567 int modules__parse(const char *filename, void *arg,
568                    int (*process_module)(void *arg, const char *name,
569                                          u64 start, u64 size))
570 {
571         char *line = NULL;
572         size_t n;
573         FILE *file;
574         int err = 0;
575
576         file = fopen(filename, "r");
577         if (file == NULL)
578                 return -1;
579
580         while (1) {
581                 char name[PATH_MAX];
582                 u64 start, size;
583                 char *sep, *endptr;
584                 ssize_t line_len;
585
586                 line_len = getline(&line, &n, file);
587                 if (line_len < 0) {
588                         if (feof(file))
589                                 break;
590                         err = -1;
591                         goto out;
592                 }
593
594                 if (!line) {
595                         err = -1;
596                         goto out;
597                 }
598
599                 line[--line_len] = '\0'; /* \n */
600
601                 sep = strrchr(line, 'x');
602                 if (sep == NULL)
603                         continue;
604
605                 hex2u64(sep + 1, &start);
606
607                 sep = strchr(line, ' ');
608                 if (sep == NULL)
609                         continue;
610
611                 *sep = '\0';
612
613                 scnprintf(name, sizeof(name), "[%s]", line);
614
615                 size = strtoul(sep + 1, &endptr, 0);
616                 if (*endptr != ' ' && *endptr != '\t')
617                         continue;
618
619                 err = process_module(arg, name, start, size);
620                 if (err)
621                         break;
622         }
623 out:
624         free(line);
625         fclose(file);
626         return err;
627 }
628
629 struct process_kallsyms_args {
630         struct map *map;
631         struct dso *dso;
632 };
633
634 /*
635  * These are symbols in the kernel image, so make sure that
636  * sym is from a kernel DSO.
637  */
638 static bool symbol__is_idle(const char *name)
639 {
640         const char * const idle_symbols[] = {
641                 "cpu_idle",
642                 "cpu_startup_entry",
643                 "intel_idle",
644                 "default_idle",
645                 "native_safe_halt",
646                 "enter_idle",
647                 "exit_idle",
648                 "mwait_idle",
649                 "mwait_idle_with_hints",
650                 "poll_idle",
651                 "ppc64_runlatch_off",
652                 "pseries_dedicated_idle_sleep",
653                 NULL
654         };
655         int i;
656
657         for (i = 0; idle_symbols[i]; i++) {
658                 if (!strcmp(idle_symbols[i], name))
659                         return true;
660         }
661
662         return false;
663 }
664
665 static int map__process_kallsym_symbol(void *arg, const char *name,
666                                        char type, u64 start)
667 {
668         struct symbol *sym;
669         struct process_kallsyms_args *a = arg;
670         struct rb_root *root = &a->dso->symbols[a->map->type];
671
672         if (!symbol_type__is_a(type, a->map->type))
673                 return 0;
674
675         /*
676          * module symbols are not sorted so we add all
677          * symbols, setting length to 0, and rely on
678          * symbols__fixup_end() to fix it up.
679          */
680         sym = symbol__new(start, 0, kallsyms2elf_binding(type), name);
681         if (sym == NULL)
682                 return -ENOMEM;
683         /*
684          * We will pass the symbols to the filter later, in
685          * map__split_kallsyms, when we have split the maps per module
686          */
687         __symbols__insert(root, sym, !strchr(name, '['));
688
689         return 0;
690 }
691
692 /*
693  * Loads the function entries in /proc/kallsyms into kernel_map->dso,
694  * so that we can in the next step set the symbol ->end address and then
695  * call kernel_maps__split_kallsyms.
696  */
697 static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
698                                   struct map *map)
699 {
700         struct process_kallsyms_args args = { .map = map, .dso = dso, };
701         return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
702 }
703
704 static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map)
705 {
706         struct map_groups *kmaps = map__kmaps(map);
707         struct map *curr_map;
708         struct symbol *pos;
709         int count = 0;
710         struct rb_root old_root = dso->symbols[map->type];
711         struct rb_root *root = &dso->symbols[map->type];
712         struct rb_node *next = rb_first(root);
713
714         if (!kmaps)
715                 return -1;
716
717         *root = RB_ROOT;
718
719         while (next) {
720                 char *module;
721
722                 pos = rb_entry(next, struct symbol, rb_node);
723                 next = rb_next(&pos->rb_node);
724
725                 rb_erase_init(&pos->rb_node, &old_root);
726
727                 module = strchr(pos->name, '\t');
728                 if (module)
729                         *module = '\0';
730
731                 curr_map = map_groups__find(kmaps, map->type, pos->start);
732
733                 if (!curr_map) {
734                         symbol__delete(pos);
735                         continue;
736                 }
737
738                 pos->start -= curr_map->start - curr_map->pgoff;
739                 if (pos->end)
740                         pos->end -= curr_map->start - curr_map->pgoff;
741                 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
742                 ++count;
743         }
744
745         /* Symbols have been adjusted */
746         dso->adjust_symbols = 1;
747
748         return count;
749 }
750
751 /*
752  * Split the symbols into maps, making sure there are no overlaps, i.e. the
753  * kernel range is broken in several maps, named [kernel].N, as we don't have
754  * the original ELF section names vmlinux have.
755  */
756 static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
757 {
758         struct map_groups *kmaps = map__kmaps(map);
759         struct machine *machine;
760         struct map *curr_map = map;
761         struct symbol *pos;
762         int count = 0, moved = 0;
763         struct rb_root *root = &dso->symbols[map->type];
764         struct rb_node *next = rb_first(root);
765         int kernel_range = 0;
766
767         if (!kmaps)
768                 return -1;
769
770         machine = kmaps->machine;
771
772         while (next) {
773                 char *module;
774
775                 pos = rb_entry(next, struct symbol, rb_node);
776                 next = rb_next(&pos->rb_node);
777
778                 module = strchr(pos->name, '\t');
779                 if (module) {
780                         if (!symbol_conf.use_modules)
781                                 goto discard_symbol;
782
783                         *module++ = '\0';
784
785                         if (strcmp(curr_map->dso->short_name, module)) {
786                                 if (curr_map != map &&
787                                     dso->kernel == DSO_TYPE_GUEST_KERNEL &&
788                                     machine__is_default_guest(machine)) {
789                                         /*
790                                          * We assume all symbols of a module are
791                                          * continuous in * kallsyms, so curr_map
792                                          * points to a module and all its
793                                          * symbols are in its kmap. Mark it as
794                                          * loaded.
795                                          */
796                                         dso__set_loaded(curr_map->dso,
797                                                         curr_map->type);
798                                 }
799
800                                 curr_map = map_groups__find_by_name(kmaps,
801                                                         map->type, module);
802                                 if (curr_map == NULL) {
803                                         pr_debug("%s/proc/{kallsyms,modules} "
804                                                  "inconsistency while looking "
805                                                  "for \"%s\" module!\n",
806                                                  machine->root_dir, module);
807                                         curr_map = map;
808                                         goto discard_symbol;
809                                 }
810
811                                 if (curr_map->dso->loaded &&
812                                     !machine__is_default_guest(machine))
813                                         goto discard_symbol;
814                         }
815                         /*
816                          * So that we look just like we get from .ko files,
817                          * i.e. not prelinked, relative to map->start.
818                          */
819                         pos->start = curr_map->map_ip(curr_map, pos->start);
820                         pos->end   = curr_map->map_ip(curr_map, pos->end);
821                 } else if (curr_map != map) {
822                         char dso_name[PATH_MAX];
823                         struct dso *ndso;
824
825                         if (delta) {
826                                 /* Kernel was relocated at boot time */
827                                 pos->start -= delta;
828                                 pos->end -= delta;
829                         }
830
831                         if (count == 0) {
832                                 curr_map = map;
833                                 goto add_symbol;
834                         }
835
836                         if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
837                                 snprintf(dso_name, sizeof(dso_name),
838                                         "[guest.kernel].%d",
839                                         kernel_range++);
840                         else
841                                 snprintf(dso_name, sizeof(dso_name),
842                                         "[kernel].%d",
843                                         kernel_range++);
844
845                         ndso = dso__new(dso_name);
846                         if (ndso == NULL)
847                                 return -1;
848
849                         ndso->kernel = dso->kernel;
850
851                         curr_map = map__new2(pos->start, ndso, map->type);
852                         if (curr_map == NULL) {
853                                 dso__put(ndso);
854                                 return -1;
855                         }
856
857                         curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
858                         map_groups__insert(kmaps, curr_map);
859                         ++kernel_range;
860                 } else if (delta) {
861                         /* Kernel was relocated at boot time */
862                         pos->start -= delta;
863                         pos->end -= delta;
864                 }
865 add_symbol:
866                 if (curr_map != map) {
867                         rb_erase(&pos->rb_node, root);
868                         symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
869                         ++moved;
870                 } else
871                         ++count;
872
873                 continue;
874 discard_symbol:
875                 rb_erase(&pos->rb_node, root);
876                 symbol__delete(pos);
877         }
878
879         if (curr_map != map &&
880             dso->kernel == DSO_TYPE_GUEST_KERNEL &&
881             machine__is_default_guest(kmaps->machine)) {
882                 dso__set_loaded(curr_map->dso, curr_map->type);
883         }
884
885         return count + moved;
886 }
887
888 bool symbol__restricted_filename(const char *filename,
889                                  const char *restricted_filename)
890 {
891         bool restricted = false;
892
893         if (symbol_conf.kptr_restrict) {
894                 char *r = realpath(filename, NULL);
895
896                 if (r != NULL) {
897                         restricted = strcmp(r, restricted_filename) == 0;
898                         free(r);
899                         return restricted;
900                 }
901         }
902
903         return restricted;
904 }
905
906 struct module_info {
907         struct rb_node rb_node;
908         char *name;
909         u64 start;
910 };
911
912 static void add_module(struct module_info *mi, struct rb_root *modules)
913 {
914         struct rb_node **p = &modules->rb_node;
915         struct rb_node *parent = NULL;
916         struct module_info *m;
917
918         while (*p != NULL) {
919                 parent = *p;
920                 m = rb_entry(parent, struct module_info, rb_node);
921                 if (strcmp(mi->name, m->name) < 0)
922                         p = &(*p)->rb_left;
923                 else
924                         p = &(*p)->rb_right;
925         }
926         rb_link_node(&mi->rb_node, parent, p);
927         rb_insert_color(&mi->rb_node, modules);
928 }
929
930 static void delete_modules(struct rb_root *modules)
931 {
932         struct module_info *mi;
933         struct rb_node *next = rb_first(modules);
934
935         while (next) {
936                 mi = rb_entry(next, struct module_info, rb_node);
937                 next = rb_next(&mi->rb_node);
938                 rb_erase(&mi->rb_node, modules);
939                 zfree(&mi->name);
940                 free(mi);
941         }
942 }
943
944 static struct module_info *find_module(const char *name,
945                                        struct rb_root *modules)
946 {
947         struct rb_node *n = modules->rb_node;
948
949         while (n) {
950                 struct module_info *m;
951                 int cmp;
952
953                 m = rb_entry(n, struct module_info, rb_node);
954                 cmp = strcmp(name, m->name);
955                 if (cmp < 0)
956                         n = n->rb_left;
957                 else if (cmp > 0)
958                         n = n->rb_right;
959                 else
960                         return m;
961         }
962
963         return NULL;
964 }
965
966 static int __read_proc_modules(void *arg, const char *name, u64 start,
967                                u64 size __maybe_unused)
968 {
969         struct rb_root *modules = arg;
970         struct module_info *mi;
971
972         mi = zalloc(sizeof(struct module_info));
973         if (!mi)
974                 return -ENOMEM;
975
976         mi->name = strdup(name);
977         mi->start = start;
978
979         if (!mi->name) {
980                 free(mi);
981                 return -ENOMEM;
982         }
983
984         add_module(mi, modules);
985
986         return 0;
987 }
988
989 static int read_proc_modules(const char *filename, struct rb_root *modules)
990 {
991         if (symbol__restricted_filename(filename, "/proc/modules"))
992                 return -1;
993
994         if (modules__parse(filename, modules, __read_proc_modules)) {
995                 delete_modules(modules);
996                 return -1;
997         }
998
999         return 0;
1000 }
1001
1002 int compare_proc_modules(const char *from, const char *to)
1003 {
1004         struct rb_root from_modules = RB_ROOT;
1005         struct rb_root to_modules = RB_ROOT;
1006         struct rb_node *from_node, *to_node;
1007         struct module_info *from_m, *to_m;
1008         int ret = -1;
1009
1010         if (read_proc_modules(from, &from_modules))
1011                 return -1;
1012
1013         if (read_proc_modules(to, &to_modules))
1014                 goto out_delete_from;
1015
1016         from_node = rb_first(&from_modules);
1017         to_node = rb_first(&to_modules);
1018         while (from_node) {
1019                 if (!to_node)
1020                         break;
1021
1022                 from_m = rb_entry(from_node, struct module_info, rb_node);
1023                 to_m = rb_entry(to_node, struct module_info, rb_node);
1024
1025                 if (from_m->start != to_m->start ||
1026                     strcmp(from_m->name, to_m->name))
1027                         break;
1028
1029                 from_node = rb_next(from_node);
1030                 to_node = rb_next(to_node);
1031         }
1032
1033         if (!from_node && !to_node)
1034                 ret = 0;
1035
1036         delete_modules(&to_modules);
1037 out_delete_from:
1038         delete_modules(&from_modules);
1039
1040         return ret;
1041 }
1042
1043 static int do_validate_kcore_modules(const char *filename, struct map *map,
1044                                   struct map_groups *kmaps)
1045 {
1046         struct rb_root modules = RB_ROOT;
1047         struct map *old_map;
1048         int err;
1049
1050         err = read_proc_modules(filename, &modules);
1051         if (err)
1052                 return err;
1053
1054         old_map = map_groups__first(kmaps, map->type);
1055         while (old_map) {
1056                 struct map *next = map_groups__next(old_map);
1057                 struct module_info *mi;
1058
1059                 if (old_map == map || old_map->start == map->start) {
1060                         /* The kernel map */
1061                         old_map = next;
1062                         continue;
1063                 }
1064
1065                 /* Module must be in memory at the same address */
1066                 mi = find_module(old_map->dso->short_name, &modules);
1067                 if (!mi || mi->start != old_map->start) {
1068                         err = -EINVAL;
1069                         goto out;
1070                 }
1071
1072                 old_map = next;
1073         }
1074 out:
1075         delete_modules(&modules);
1076         return err;
1077 }
1078
1079 /*
1080  * If kallsyms is referenced by name then we look for filename in the same
1081  * directory.
1082  */
1083 static bool filename_from_kallsyms_filename(char *filename,
1084                                             const char *base_name,
1085                                             const char *kallsyms_filename)
1086 {
1087         char *name;
1088
1089         strcpy(filename, kallsyms_filename);
1090         name = strrchr(filename, '/');
1091         if (!name)
1092                 return false;
1093
1094         name += 1;
1095
1096         if (!strcmp(name, "kallsyms")) {
1097                 strcpy(name, base_name);
1098                 return true;
1099         }
1100
1101         return false;
1102 }
1103
1104 static int validate_kcore_modules(const char *kallsyms_filename,
1105                                   struct map *map)
1106 {
1107         struct map_groups *kmaps = map__kmaps(map);
1108         char modules_filename[PATH_MAX];
1109
1110         if (!kmaps)
1111                 return -EINVAL;
1112
1113         if (!filename_from_kallsyms_filename(modules_filename, "modules",
1114                                              kallsyms_filename))
1115                 return -EINVAL;
1116
1117         if (do_validate_kcore_modules(modules_filename, map, kmaps))
1118                 return -EINVAL;
1119
1120         return 0;
1121 }
1122
1123 static int validate_kcore_addresses(const char *kallsyms_filename,
1124                                     struct map *map)
1125 {
1126         struct kmap *kmap = map__kmap(map);
1127
1128         if (!kmap)
1129                 return -EINVAL;
1130
1131         if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1132                 u64 start;
1133
1134                 if (kallsyms__get_function_start(kallsyms_filename,
1135                                                  kmap->ref_reloc_sym->name, &start))
1136                         return -ENOENT;
1137                 if (start != kmap->ref_reloc_sym->addr)
1138                         return -EINVAL;
1139         }
1140
1141         return validate_kcore_modules(kallsyms_filename, map);
1142 }
1143
1144 struct kcore_mapfn_data {
1145         struct dso *dso;
1146         enum map_type type;
1147         struct list_head maps;
1148 };
1149
1150 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1151 {
1152         struct kcore_mapfn_data *md = data;
1153         struct map *map;
1154
1155         map = map__new2(start, md->dso, md->type);
1156         if (map == NULL)
1157                 return -ENOMEM;
1158
1159         map->end = map->start + len;
1160         map->pgoff = pgoff;
1161
1162         list_add(&map->node, &md->maps);
1163
1164         return 0;
1165 }
1166
1167 static int dso__load_kcore(struct dso *dso, struct map *map,
1168                            const char *kallsyms_filename)
1169 {
1170         struct map_groups *kmaps = map__kmaps(map);
1171         struct machine *machine;
1172         struct kcore_mapfn_data md;
1173         struct map *old_map, *new_map, *replacement_map = NULL;
1174         bool is_64_bit;
1175         int err, fd;
1176         char kcore_filename[PATH_MAX];
1177         struct symbol *sym;
1178
1179         if (!kmaps)
1180                 return -EINVAL;
1181
1182         machine = kmaps->machine;
1183
1184         /* This function requires that the map is the kernel map */
1185         if (map != machine->vmlinux_maps[map->type])
1186                 return -EINVAL;
1187
1188         if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1189                                              kallsyms_filename))
1190                 return -EINVAL;
1191
1192         /* Modules and kernel must be present at their original addresses */
1193         if (validate_kcore_addresses(kallsyms_filename, map))
1194                 return -EINVAL;
1195
1196         md.dso = dso;
1197         md.type = map->type;
1198         INIT_LIST_HEAD(&md.maps);
1199
1200         fd = open(kcore_filename, O_RDONLY);
1201         if (fd < 0) {
1202                 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1203                          kcore_filename);
1204                 return -EINVAL;
1205         }
1206
1207         /* Read new maps into temporary lists */
1208         err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
1209                               &is_64_bit);
1210         if (err)
1211                 goto out_err;
1212         dso->is_64_bit = is_64_bit;
1213
1214         if (list_empty(&md.maps)) {
1215                 err = -EINVAL;
1216                 goto out_err;
1217         }
1218
1219         /* Remove old maps */
1220         old_map = map_groups__first(kmaps, map->type);
1221         while (old_map) {
1222                 struct map *next = map_groups__next(old_map);
1223
1224                 if (old_map != map)
1225                         map_groups__remove(kmaps, old_map);
1226                 old_map = next;
1227         }
1228
1229         /* Find the kernel map using the first symbol */
1230         sym = dso__first_symbol(dso, map->type);
1231         list_for_each_entry(new_map, &md.maps, node) {
1232                 if (sym && sym->start >= new_map->start &&
1233                     sym->start < new_map->end) {
1234                         replacement_map = new_map;
1235                         break;
1236                 }
1237         }
1238
1239         if (!replacement_map)
1240                 replacement_map = list_entry(md.maps.next, struct map, node);
1241
1242         /* Add new maps */
1243         while (!list_empty(&md.maps)) {
1244                 new_map = list_entry(md.maps.next, struct map, node);
1245                 list_del_init(&new_map->node);
1246                 if (new_map == replacement_map) {
1247                         map->start      = new_map->start;
1248                         map->end        = new_map->end;
1249                         map->pgoff      = new_map->pgoff;
1250                         map->map_ip     = new_map->map_ip;
1251                         map->unmap_ip   = new_map->unmap_ip;
1252                         /* Ensure maps are correctly ordered */
1253                         map__get(map);
1254                         map_groups__remove(kmaps, map);
1255                         map_groups__insert(kmaps, map);
1256                         map__put(map);
1257                 } else {
1258                         map_groups__insert(kmaps, new_map);
1259                 }
1260
1261                 map__put(new_map);
1262         }
1263
1264         /*
1265          * Set the data type and long name so that kcore can be read via
1266          * dso__data_read_addr().
1267          */
1268         if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1269                 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1270         else
1271                 dso->binary_type = DSO_BINARY_TYPE__KCORE;
1272         dso__set_long_name(dso, strdup(kcore_filename), true);
1273
1274         close(fd);
1275
1276         if (map->type == MAP__FUNCTION)
1277                 pr_debug("Using %s for kernel object code\n", kcore_filename);
1278         else
1279                 pr_debug("Using %s for kernel data\n", kcore_filename);
1280
1281         return 0;
1282
1283 out_err:
1284         while (!list_empty(&md.maps)) {
1285                 map = list_entry(md.maps.next, struct map, node);
1286                 list_del_init(&map->node);
1287                 map__put(map);
1288         }
1289         close(fd);
1290         return -EINVAL;
1291 }
1292
1293 /*
1294  * If the kernel is relocated at boot time, kallsyms won't match.  Compute the
1295  * delta based on the relocation reference symbol.
1296  */
1297 static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
1298 {
1299         struct kmap *kmap = map__kmap(map);
1300         u64 addr;
1301
1302         if (!kmap)
1303                 return -1;
1304
1305         if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1306                 return 0;
1307
1308         if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1309                 return -1;
1310
1311         *delta = addr - kmap->ref_reloc_sym->addr;
1312         return 0;
1313 }
1314
1315 int __dso__load_kallsyms(struct dso *dso, const char *filename,
1316                          struct map *map, bool no_kcore)
1317 {
1318         u64 delta = 0;
1319
1320         if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1321                 return -1;
1322
1323         if (dso__load_all_kallsyms(dso, filename, map) < 0)
1324                 return -1;
1325
1326         if (kallsyms__delta(map, filename, &delta))
1327                 return -1;
1328
1329         symbols__fixup_end(&dso->symbols[map->type]);
1330         symbols__fixup_duplicate(&dso->symbols[map->type]);
1331
1332         if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1333                 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1334         else
1335                 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1336
1337         if (!no_kcore && !dso__load_kcore(dso, map, filename))
1338                 return dso__split_kallsyms_for_kcore(dso, map);
1339         else
1340                 return dso__split_kallsyms(dso, map, delta);
1341 }
1342
1343 int dso__load_kallsyms(struct dso *dso, const char *filename,
1344                        struct map *map)
1345 {
1346         return __dso__load_kallsyms(dso, filename, map, false);
1347 }
1348
1349 static int dso__load_perf_map(const char *map_path, struct dso *dso,
1350                               struct map *map)
1351 {
1352         char *line = NULL;
1353         size_t n;
1354         FILE *file;
1355         int nr_syms = 0;
1356
1357         file = fopen(map_path, "r");
1358         if (file == NULL)
1359                 goto out_failure;
1360
1361         while (!feof(file)) {
1362                 u64 start, size;
1363                 struct symbol *sym;
1364                 int line_len, len;
1365
1366                 line_len = getline(&line, &n, file);
1367                 if (line_len < 0)
1368                         break;
1369
1370                 if (!line)
1371                         goto out_failure;
1372
1373                 line[--line_len] = '\0'; /* \n */
1374
1375                 len = hex2u64(line, &start);
1376
1377                 len++;
1378                 if (len + 2 >= line_len)
1379                         continue;
1380
1381                 len += hex2u64(line + len, &size);
1382
1383                 len++;
1384                 if (len + 2 >= line_len)
1385                         continue;
1386
1387                 sym = symbol__new(start, size, STB_GLOBAL, line + len);
1388
1389                 if (sym == NULL)
1390                         goto out_delete_line;
1391
1392                 symbols__insert(&dso->symbols[map->type], sym);
1393                 nr_syms++;
1394         }
1395
1396         free(line);
1397         fclose(file);
1398
1399         return nr_syms;
1400
1401 out_delete_line:
1402         free(line);
1403 out_failure:
1404         return -1;
1405 }
1406
1407 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1408                                            enum dso_binary_type type)
1409 {
1410         switch (type) {
1411         case DSO_BINARY_TYPE__JAVA_JIT:
1412         case DSO_BINARY_TYPE__DEBUGLINK:
1413         case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1414         case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1415         case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1416         case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
1417         case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1418         case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1419                 return !kmod && dso->kernel == DSO_TYPE_USER;
1420
1421         case DSO_BINARY_TYPE__KALLSYMS:
1422         case DSO_BINARY_TYPE__VMLINUX:
1423         case DSO_BINARY_TYPE__KCORE:
1424                 return dso->kernel == DSO_TYPE_KERNEL;
1425
1426         case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1427         case DSO_BINARY_TYPE__GUEST_VMLINUX:
1428         case DSO_BINARY_TYPE__GUEST_KCORE:
1429                 return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1430
1431         case DSO_BINARY_TYPE__GUEST_KMODULE:
1432         case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1433         case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1434         case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1435                 /*
1436                  * kernel modules know their symtab type - it's set when
1437                  * creating a module dso in machine__findnew_module_map().
1438                  */
1439                 return kmod && dso->symtab_type == type;
1440
1441         case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1442         case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1443                 return true;
1444
1445         case DSO_BINARY_TYPE__NOT_FOUND:
1446         default:
1447                 return false;
1448         }
1449 }
1450
1451 /* Checks for the existence of the perf-<pid>.map file in two different
1452  * locations.  First, if the process is a separate mount namespace, check in
1453  * that namespace using the pid of the innermost pid namespace.  If's not in a
1454  * namespace, or the file can't be found there, try in the mount namespace of
1455  * the tracing process using our view of its pid.
1456  */
1457 static int dso__find_perf_map(char *filebuf, size_t bufsz,
1458                               struct nsinfo **nsip)
1459 {
1460         struct nscookie nsc;
1461         struct nsinfo *nsi;
1462         struct nsinfo *nnsi;
1463         int rc = -1;
1464
1465         nsi = *nsip;
1466
1467         if (nsi->need_setns) {
1468                 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
1469                 nsinfo__mountns_enter(nsi, &nsc);
1470                 rc = access(filebuf, R_OK);
1471                 nsinfo__mountns_exit(&nsc);
1472                 if (rc == 0)
1473                         return rc;
1474         }
1475
1476         nnsi = nsinfo__copy(nsi);
1477         if (nnsi) {
1478                 nsinfo__put(nsi);
1479
1480                 nnsi->need_setns = false;
1481                 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
1482                 *nsip = nnsi;
1483                 rc = 0;
1484         }
1485
1486         return rc;
1487 }
1488
1489 int dso__load(struct dso *dso, struct map *map)
1490 {
1491         char *name;
1492         int ret = -1;
1493         u_int i;
1494         struct machine *machine;
1495         char *root_dir = (char *) "";
1496         int ss_pos = 0;
1497         struct symsrc ss_[2];
1498         struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1499         bool kmod;
1500         bool perfmap;
1501         unsigned char build_id[BUILD_ID_SIZE];
1502         struct nscookie nsc;
1503         char newmapname[PATH_MAX];
1504         const char *map_path = dso->long_name;
1505
1506         perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
1507         if (perfmap) {
1508                 if (dso->nsinfo && (dso__find_perf_map(newmapname,
1509                     sizeof(newmapname), &dso->nsinfo) == 0)) {
1510                         map_path = newmapname;
1511                 }
1512         }
1513
1514         nsinfo__mountns_enter(dso->nsinfo, &nsc);
1515         pthread_mutex_lock(&dso->lock);
1516
1517         /* check again under the dso->lock */
1518         if (dso__loaded(dso, map->type)) {
1519                 ret = 1;
1520                 goto out;
1521         }
1522
1523         if (map->groups && map->groups->machine)
1524                 machine = map->groups->machine;
1525         else
1526                 machine = NULL;
1527
1528         if (dso->kernel) {
1529                 if (dso->kernel == DSO_TYPE_KERNEL)
1530                         ret = dso__load_kernel_sym(dso, map);
1531                 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1532                         ret = dso__load_guest_kernel_sym(dso, map);
1533
1534                 if (machine__is(machine, "x86_64"))
1535                         machine__map_x86_64_entry_trampolines(machine, dso);
1536                 goto out;
1537         }
1538
1539         dso->adjust_symbols = 0;
1540
1541         if (perfmap) {
1542                 struct stat st;
1543
1544                 if (lstat(map_path, &st) < 0)
1545                         goto out;
1546
1547                 if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
1548                         pr_warning("File %s not owned by current user or root, "
1549                                    "ignoring it (use -f to override).\n", map_path);
1550                         goto out;
1551                 }
1552
1553                 ret = dso__load_perf_map(map_path, dso, map);
1554                 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1555                                              DSO_BINARY_TYPE__NOT_FOUND;
1556                 goto out;
1557         }
1558
1559         if (machine)
1560                 root_dir = machine->root_dir;
1561
1562         name = malloc(PATH_MAX);
1563         if (!name)
1564                 goto out;
1565
1566         kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1567                 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1568                 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1569                 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1570
1571
1572         /*
1573          * Read the build id if possible. This is required for
1574          * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1575          */
1576         if (!dso->has_build_id &&
1577             is_regular_file(dso->long_name)) {
1578             __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1579             if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
1580                 dso__set_build_id(dso, build_id);
1581         }
1582
1583         /*
1584          * Iterate over candidate debug images.
1585          * Keep track of "interesting" ones (those which have a symtab, dynsym,
1586          * and/or opd section) for processing.
1587          */
1588         for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1589                 struct symsrc *ss = &ss_[ss_pos];
1590                 bool next_slot = false;
1591                 bool is_reg;
1592                 bool nsexit;
1593                 int sirc;
1594
1595                 enum dso_binary_type symtab_type = binary_type_symtab[i];
1596
1597                 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1598                     symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1599
1600                 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1601                         continue;
1602
1603                 if (dso__read_binary_type_filename(dso, symtab_type,
1604                                                    root_dir, name, PATH_MAX))
1605                         continue;
1606
1607                 if (nsexit)
1608                         nsinfo__mountns_exit(&nsc);
1609
1610                 is_reg = is_regular_file(name);
1611                 sirc = symsrc__init(ss, dso, name, symtab_type);
1612
1613                 if (nsexit)
1614                         nsinfo__mountns_enter(dso->nsinfo, &nsc);
1615
1616                 if (!is_reg || sirc < 0) {
1617                         if (sirc >= 0)
1618                                 symsrc__destroy(ss);
1619                         continue;
1620                 }
1621
1622                 if (!syms_ss && symsrc__has_symtab(ss)) {
1623                         syms_ss = ss;
1624                         next_slot = true;
1625                         if (!dso->symsrc_filename)
1626                                 dso->symsrc_filename = strdup(name);
1627                 }
1628
1629                 if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1630                         runtime_ss = ss;
1631                         next_slot = true;
1632                 }
1633
1634                 if (next_slot) {
1635                         ss_pos++;
1636
1637                         if (syms_ss && runtime_ss)
1638                                 break;
1639                 } else {
1640                         symsrc__destroy(ss);
1641                 }
1642
1643         }
1644
1645         if (!runtime_ss && !syms_ss)
1646                 goto out_free;
1647
1648         if (runtime_ss && !syms_ss) {
1649                 syms_ss = runtime_ss;
1650         }
1651
1652         /* We'll have to hope for the best */
1653         if (!runtime_ss && syms_ss)
1654                 runtime_ss = syms_ss;
1655
1656         if (syms_ss)
1657                 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1658         else
1659                 ret = -1;
1660
1661         if (ret > 0) {
1662                 int nr_plt;
1663
1664                 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map);
1665                 if (nr_plt > 0)
1666                         ret += nr_plt;
1667         }
1668
1669         for (; ss_pos > 0; ss_pos--)
1670                 symsrc__destroy(&ss_[ss_pos - 1]);
1671 out_free:
1672         free(name);
1673         if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1674                 ret = 0;
1675 out:
1676         dso__set_loaded(dso, map->type);
1677         pthread_mutex_unlock(&dso->lock);
1678         nsinfo__mountns_exit(&nsc);
1679
1680         return ret;
1681 }
1682
1683 struct map *map_groups__find_by_name(struct map_groups *mg,
1684                                      enum map_type type, const char *name)
1685 {
1686         struct maps *maps = &mg->maps[type];
1687         struct map *map;
1688
1689         pthread_rwlock_rdlock(&maps->lock);
1690
1691         for (map = maps__first(maps); map; map = map__next(map)) {
1692                 if (map->dso && strcmp(map->dso->short_name, name) == 0)
1693                         goto out_unlock;
1694         }
1695
1696         map = NULL;
1697
1698 out_unlock:
1699         pthread_rwlock_unlock(&maps->lock);
1700         return map;
1701 }
1702
1703 int dso__load_vmlinux(struct dso *dso, struct map *map,
1704                       const char *vmlinux, bool vmlinux_allocated)
1705 {
1706         int err = -1;
1707         struct symsrc ss;
1708         char symfs_vmlinux[PATH_MAX];
1709         enum dso_binary_type symtab_type;
1710
1711         if (vmlinux[0] == '/')
1712                 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1713         else
1714                 symbol__join_symfs(symfs_vmlinux, vmlinux);
1715
1716         if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1717                 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1718         else
1719                 symtab_type = DSO_BINARY_TYPE__VMLINUX;
1720
1721         if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1722                 return -1;
1723
1724         err = dso__load_sym(dso, map, &ss, &ss, 0);
1725         symsrc__destroy(&ss);
1726
1727         if (err > 0) {
1728                 if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1729                         dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1730                 else
1731                         dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1732                 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1733                 dso__set_loaded(dso, map->type);
1734                 pr_debug("Using %s for symbols\n", symfs_vmlinux);
1735         }
1736
1737         return err;
1738 }
1739
1740 int dso__load_vmlinux_path(struct dso *dso, struct map *map)
1741 {
1742         int i, err = 0;
1743         char *filename = NULL;
1744
1745         pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1746                  vmlinux_path__nr_entries + 1);
1747
1748         for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1749                 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
1750                 if (err > 0)
1751                         goto out;
1752         }
1753
1754         if (!symbol_conf.ignore_vmlinux_buildid)
1755                 filename = dso__build_id_filename(dso, NULL, 0, false);
1756         if (filename != NULL) {
1757                 err = dso__load_vmlinux(dso, map, filename, true);
1758                 if (err > 0)
1759                         goto out;
1760                 free(filename);
1761         }
1762 out:
1763         return err;
1764 }
1765
1766 static bool visible_dir_filter(const char *name, struct dirent *d)
1767 {
1768         if (d->d_type != DT_DIR)
1769                 return false;
1770         return lsdir_no_dot_filter(name, d);
1771 }
1772
1773 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1774 {
1775         char kallsyms_filename[PATH_MAX];
1776         int ret = -1;
1777         struct strlist *dirs;
1778         struct str_node *nd;
1779
1780         dirs = lsdir(dir, visible_dir_filter);
1781         if (!dirs)
1782                 return -1;
1783
1784         strlist__for_each_entry(nd, dirs) {
1785                 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1786                           "%s/%s/kallsyms", dir, nd->s);
1787                 if (!validate_kcore_addresses(kallsyms_filename, map)) {
1788                         strlcpy(dir, kallsyms_filename, dir_sz);
1789                         ret = 0;
1790                         break;
1791                 }
1792         }
1793
1794         strlist__delete(dirs);
1795
1796         return ret;
1797 }
1798
1799 /*
1800  * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
1801  * since access(R_OK) only checks with real UID/GID but open() use effective
1802  * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
1803  */
1804 static bool filename__readable(const char *file)
1805 {
1806         int fd = open(file, O_RDONLY);
1807         if (fd < 0)
1808                 return false;
1809         close(fd);
1810         return true;
1811 }
1812
1813 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1814 {
1815         u8 host_build_id[BUILD_ID_SIZE];
1816         char sbuild_id[SBUILD_ID_SIZE];
1817         bool is_host = false;
1818         char path[PATH_MAX];
1819
1820         if (!dso->has_build_id) {
1821                 /*
1822                  * Last resort, if we don't have a build-id and couldn't find
1823                  * any vmlinux file, try the running kernel kallsyms table.
1824                  */
1825                 goto proc_kallsyms;
1826         }
1827
1828         if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1829                                  sizeof(host_build_id)) == 0)
1830                 is_host = dso__build_id_equal(dso, host_build_id);
1831
1832         /* Try a fast path for /proc/kallsyms if possible */
1833         if (is_host) {
1834                 /*
1835                  * Do not check the build-id cache, unless we know we cannot use
1836                  * /proc/kcore or module maps don't match to /proc/kallsyms.
1837                  * To check readability of /proc/kcore, do not use access(R_OK)
1838                  * since /proc/kcore requires CAP_SYS_RAWIO to read and access
1839                  * can't check it.
1840                  */
1841                 if (filename__readable("/proc/kcore") &&
1842                     !validate_kcore_addresses("/proc/kallsyms", map))
1843                         goto proc_kallsyms;
1844         }
1845
1846         build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1847
1848         /* Find kallsyms in build-id cache with kcore */
1849         scnprintf(path, sizeof(path), "%s/%s/%s",
1850                   buildid_dir, DSO__NAME_KCORE, sbuild_id);
1851
1852         if (!find_matching_kcore(map, path, sizeof(path)))
1853                 return strdup(path);
1854
1855         /* Use current /proc/kallsyms if possible */
1856         if (is_host) {
1857 proc_kallsyms:
1858                 return strdup("/proc/kallsyms");
1859         }
1860
1861         /* Finally, find a cache of kallsyms */
1862         if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
1863                 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
1864                        sbuild_id);
1865                 return NULL;
1866         }
1867
1868         return strdup(path);
1869 }
1870
1871 static int dso__load_kernel_sym(struct dso *dso, struct map *map)
1872 {
1873         int err;
1874         const char *kallsyms_filename = NULL;
1875         char *kallsyms_allocated_filename = NULL;
1876         /*
1877          * Step 1: if the user specified a kallsyms or vmlinux filename, use
1878          * it and only it, reporting errors to the user if it cannot be used.
1879          *
1880          * For instance, try to analyse an ARM perf.data file _without_ a
1881          * build-id, or if the user specifies the wrong path to the right
1882          * vmlinux file, obviously we can't fallback to another vmlinux (a
1883          * x86_86 one, on the machine where analysis is being performed, say),
1884          * or worse, /proc/kallsyms.
1885          *
1886          * If the specified file _has_ a build-id and there is a build-id
1887          * section in the perf.data file, we will still do the expected
1888          * validation in dso__load_vmlinux and will bail out if they don't
1889          * match.
1890          */
1891         if (symbol_conf.kallsyms_name != NULL) {
1892                 kallsyms_filename = symbol_conf.kallsyms_name;
1893                 goto do_kallsyms;
1894         }
1895
1896         if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
1897                 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
1898         }
1899
1900         if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
1901                 err = dso__load_vmlinux_path(dso, map);
1902                 if (err > 0)
1903                         return err;
1904         }
1905
1906         /* do not try local files if a symfs was given */
1907         if (symbol_conf.symfs[0] != 0)
1908                 return -1;
1909
1910         kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
1911         if (!kallsyms_allocated_filename)
1912                 return -1;
1913
1914         kallsyms_filename = kallsyms_allocated_filename;
1915
1916 do_kallsyms:
1917         err = dso__load_kallsyms(dso, kallsyms_filename, map);
1918         if (err > 0)
1919                 pr_debug("Using %s for symbols\n", kallsyms_filename);
1920         free(kallsyms_allocated_filename);
1921
1922         if (err > 0 && !dso__is_kcore(dso)) {
1923                 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
1924                 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
1925                 map__fixup_start(map);
1926                 map__fixup_end(map);
1927         }
1928
1929         return err;
1930 }
1931
1932 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
1933 {
1934         int err;
1935         const char *kallsyms_filename = NULL;
1936         struct machine *machine;
1937         char path[PATH_MAX];
1938
1939         if (!map->groups) {
1940                 pr_debug("Guest kernel map hasn't the point to groups\n");
1941                 return -1;
1942         }
1943         machine = map->groups->machine;
1944
1945         if (machine__is_default_guest(machine)) {
1946                 /*
1947                  * if the user specified a vmlinux filename, use it and only
1948                  * it, reporting errors to the user if it cannot be used.
1949                  * Or use file guest_kallsyms inputted by user on commandline
1950                  */
1951                 if (symbol_conf.default_guest_vmlinux_name != NULL) {
1952                         err = dso__load_vmlinux(dso, map,
1953                                                 symbol_conf.default_guest_vmlinux_name,
1954                                                 false);
1955                         return err;
1956                 }
1957
1958                 kallsyms_filename = symbol_conf.default_guest_kallsyms;
1959                 if (!kallsyms_filename)
1960                         return -1;
1961         } else {
1962                 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1963                 kallsyms_filename = path;
1964         }
1965
1966         err = dso__load_kallsyms(dso, kallsyms_filename, map);
1967         if (err > 0)
1968                 pr_debug("Using %s for symbols\n", kallsyms_filename);
1969         if (err > 0 && !dso__is_kcore(dso)) {
1970                 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1971                 machine__mmap_name(machine, path, sizeof(path));
1972                 dso__set_long_name(dso, strdup(path), true);
1973                 map__fixup_start(map);
1974                 map__fixup_end(map);
1975         }
1976
1977         return err;
1978 }
1979
1980 static void vmlinux_path__exit(void)
1981 {
1982         while (--vmlinux_path__nr_entries >= 0)
1983                 zfree(&vmlinux_path[vmlinux_path__nr_entries]);
1984         vmlinux_path__nr_entries = 0;
1985
1986         zfree(&vmlinux_path);
1987 }
1988
1989 static const char * const vmlinux_paths[] = {
1990         "vmlinux",
1991         "/boot/vmlinux"
1992 };
1993
1994 static const char * const vmlinux_paths_upd[] = {
1995         "/boot/vmlinux-%s",
1996         "/usr/lib/debug/boot/vmlinux-%s",
1997         "/lib/modules/%s/build/vmlinux",
1998         "/usr/lib/debug/lib/modules/%s/vmlinux",
1999         "/usr/lib/debug/boot/vmlinux-%s.debug"
2000 };
2001
2002 static int vmlinux_path__add(const char *new_entry)
2003 {
2004         vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
2005         if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2006                 return -1;
2007         ++vmlinux_path__nr_entries;
2008
2009         return 0;
2010 }
2011
2012 static int vmlinux_path__init(struct perf_env *env)
2013 {
2014         struct utsname uts;
2015         char bf[PATH_MAX];
2016         char *kernel_version;
2017         unsigned int i;
2018
2019         vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2020                               ARRAY_SIZE(vmlinux_paths_upd)));
2021         if (vmlinux_path == NULL)
2022                 return -1;
2023
2024         for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2025                 if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2026                         goto out_fail;
2027
2028         /* only try kernel version if no symfs was given */
2029         if (symbol_conf.symfs[0] != 0)
2030                 return 0;
2031
2032         if (env) {
2033                 kernel_version = env->os_release;
2034         } else {
2035                 if (uname(&uts) < 0)
2036                         goto out_fail;
2037
2038                 kernel_version = uts.release;
2039         }
2040
2041         for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2042                 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2043                 if (vmlinux_path__add(bf) < 0)
2044                         goto out_fail;
2045         }
2046
2047         return 0;
2048
2049 out_fail:
2050         vmlinux_path__exit();
2051         return -1;
2052 }
2053
2054 int setup_list(struct strlist **list, const char *list_str,
2055                       const char *list_name)
2056 {
2057         if (list_str == NULL)
2058                 return 0;
2059
2060         *list = strlist__new(list_str, NULL);
2061         if (!*list) {
2062                 pr_err("problems parsing %s list\n", list_name);
2063                 return -1;
2064         }
2065
2066         symbol_conf.has_filter = true;
2067         return 0;
2068 }
2069
2070 int setup_intlist(struct intlist **list, const char *list_str,
2071                   const char *list_name)
2072 {
2073         if (list_str == NULL)
2074                 return 0;
2075
2076         *list = intlist__new(list_str);
2077         if (!*list) {
2078                 pr_err("problems parsing %s list\n", list_name);
2079                 return -1;
2080         }
2081         return 0;
2082 }
2083
2084 static bool symbol__read_kptr_restrict(void)
2085 {
2086         bool value = false;
2087         FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2088
2089         if (fp != NULL) {
2090                 char line[8];
2091
2092                 if (fgets(line, sizeof(line), fp) != NULL)
2093                         value = ((geteuid() != 0) || (getuid() != 0)) ?
2094                                         (atoi(line) != 0) :
2095                                         (atoi(line) == 2);
2096
2097                 fclose(fp);
2098         }
2099
2100         return value;
2101 }
2102
2103 int symbol__annotation_init(void)
2104 {
2105         if (symbol_conf.init_annotation)
2106                 return 0;
2107
2108         if (symbol_conf.initialized) {
2109                 pr_err("Annotation needs to be init before symbol__init()\n");
2110                 return -1;
2111         }
2112
2113         symbol_conf.priv_size += sizeof(struct annotation);
2114         symbol_conf.init_annotation = true;
2115         return 0;
2116 }
2117
2118 int symbol__init(struct perf_env *env)
2119 {
2120         const char *symfs;
2121
2122         if (symbol_conf.initialized)
2123                 return 0;
2124
2125         symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2126
2127         symbol__elf_init();
2128
2129         if (symbol_conf.sort_by_name)
2130                 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2131                                           sizeof(struct symbol));
2132
2133         if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2134                 return -1;
2135
2136         if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2137                 pr_err("'.' is the only non valid --field-separator argument\n");
2138                 return -1;
2139         }
2140
2141         if (setup_list(&symbol_conf.dso_list,
2142                        symbol_conf.dso_list_str, "dso") < 0)
2143                 return -1;
2144
2145         if (setup_list(&symbol_conf.comm_list,
2146                        symbol_conf.comm_list_str, "comm") < 0)
2147                 goto out_free_dso_list;
2148
2149         if (setup_intlist(&symbol_conf.pid_list,
2150                        symbol_conf.pid_list_str, "pid") < 0)
2151                 goto out_free_comm_list;
2152
2153         if (setup_intlist(&symbol_conf.tid_list,
2154                        symbol_conf.tid_list_str, "tid") < 0)
2155                 goto out_free_pid_list;
2156
2157         if (setup_list(&symbol_conf.sym_list,
2158                        symbol_conf.sym_list_str, "symbol") < 0)
2159                 goto out_free_tid_list;
2160
2161         if (setup_list(&symbol_conf.bt_stop_list,
2162                        symbol_conf.bt_stop_list_str, "symbol") < 0)
2163                 goto out_free_sym_list;
2164
2165         /*
2166          * A path to symbols of "/" is identical to ""
2167          * reset here for simplicity.
2168          */
2169         symfs = realpath(symbol_conf.symfs, NULL);
2170         if (symfs == NULL)
2171                 symfs = symbol_conf.symfs;
2172         if (strcmp(symfs, "/") == 0)
2173                 symbol_conf.symfs = "";
2174         if (symfs != symbol_conf.symfs)
2175                 free((void *)symfs);
2176
2177         symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2178
2179         symbol_conf.initialized = true;
2180         return 0;
2181
2182 out_free_sym_list:
2183         strlist__delete(symbol_conf.sym_list);
2184 out_free_tid_list:
2185         intlist__delete(symbol_conf.tid_list);
2186 out_free_pid_list:
2187         intlist__delete(symbol_conf.pid_list);
2188 out_free_comm_list:
2189         strlist__delete(symbol_conf.comm_list);
2190 out_free_dso_list:
2191         strlist__delete(symbol_conf.dso_list);
2192         return -1;
2193 }
2194
2195 void symbol__exit(void)
2196 {
2197         if (!symbol_conf.initialized)
2198                 return;
2199         strlist__delete(symbol_conf.bt_stop_list);
2200         strlist__delete(symbol_conf.sym_list);
2201         strlist__delete(symbol_conf.dso_list);
2202         strlist__delete(symbol_conf.comm_list);
2203         intlist__delete(symbol_conf.tid_list);
2204         intlist__delete(symbol_conf.pid_list);
2205         vmlinux_path__exit();
2206         symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2207         symbol_conf.bt_stop_list = NULL;
2208         symbol_conf.initialized = false;
2209 }
2210
2211 int symbol__config_symfs(const struct option *opt __maybe_unused,
2212                          const char *dir, int unset __maybe_unused)
2213 {
2214         char *bf = NULL;
2215         int ret;
2216
2217         symbol_conf.symfs = strdup(dir);
2218         if (symbol_conf.symfs == NULL)
2219                 return -ENOMEM;
2220
2221         /* skip the locally configured cache if a symfs is given, and
2222          * config buildid dir to symfs/.debug
2223          */
2224         ret = asprintf(&bf, "%s/%s", dir, ".debug");
2225         if (ret < 0)
2226                 return -ENOMEM;
2227
2228         set_buildid_dir(bf);
2229
2230         free(bf);
2231         return 0;
2232 }