GNU Linux-libre 4.14.290-gnu1
[releases.git] / tools / perf / util / machine.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <inttypes.h>
5 #include <regex.h>
6 #include "callchain.h"
7 #include "debug.h"
8 #include "event.h"
9 #include "evsel.h"
10 #include "hist.h"
11 #include "machine.h"
12 #include "map.h"
13 #include "sort.h"
14 #include "strlist.h"
15 #include "thread.h"
16 #include "vdso.h"
17 #include <stdbool.h>
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <unistd.h>
21 #include "unwind.h"
22 #include "linux/hash.h"
23 #include "asm/bug.h"
24
25 #include "sane_ctype.h"
26 #include <symbol/kallsyms.h>
27
28 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
29
30 static void dsos__init(struct dsos *dsos)
31 {
32         INIT_LIST_HEAD(&dsos->head);
33         dsos->root = RB_ROOT;
34         pthread_rwlock_init(&dsos->lock, NULL);
35 }
36
37 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
38 {
39         memset(machine, 0, sizeof(*machine));
40         map_groups__init(&machine->kmaps, machine);
41         RB_CLEAR_NODE(&machine->rb_node);
42         dsos__init(&machine->dsos);
43
44         machine->threads = RB_ROOT;
45         pthread_rwlock_init(&machine->threads_lock, NULL);
46         machine->nr_threads = 0;
47         INIT_LIST_HEAD(&machine->dead_threads);
48         machine->last_match = NULL;
49
50         machine->vdso_info = NULL;
51         machine->env = NULL;
52
53         machine->pid = pid;
54
55         machine->id_hdr_size = 0;
56         machine->kptr_restrict_warned = false;
57         machine->comm_exec = false;
58         machine->kernel_start = 0;
59
60         memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps));
61
62         machine->root_dir = strdup(root_dir);
63         if (machine->root_dir == NULL)
64                 return -ENOMEM;
65
66         if (pid != HOST_KERNEL_ID) {
67                 struct thread *thread = machine__findnew_thread(machine, -1,
68                                                                 pid);
69                 char comm[64];
70
71                 if (thread == NULL)
72                         return -ENOMEM;
73
74                 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
75                 thread__set_comm(thread, comm, 0);
76                 thread__put(thread);
77         }
78
79         machine->current_tid = NULL;
80
81         return 0;
82 }
83
84 struct machine *machine__new_host(void)
85 {
86         struct machine *machine = malloc(sizeof(*machine));
87
88         if (machine != NULL) {
89                 machine__init(machine, "", HOST_KERNEL_ID);
90
91                 if (machine__create_kernel_maps(machine) < 0)
92                         goto out_delete;
93         }
94
95         return machine;
96 out_delete:
97         free(machine);
98         return NULL;
99 }
100
101 struct machine *machine__new_kallsyms(void)
102 {
103         struct machine *machine = machine__new_host();
104         /*
105          * FIXME:
106          * 1) MAP__FUNCTION will go away when we stop loading separate maps for
107          *    functions and data objects.
108          * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely
109          *    ask for not using the kcore parsing code, once this one is fixed
110          *    to create a map per module.
111          */
112         if (machine && __machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION, true) <= 0) {
113                 machine__delete(machine);
114                 machine = NULL;
115         }
116
117         return machine;
118 }
119
120 static void dsos__purge(struct dsos *dsos)
121 {
122         struct dso *pos, *n;
123
124         pthread_rwlock_wrlock(&dsos->lock);
125
126         list_for_each_entry_safe(pos, n, &dsos->head, node) {
127                 RB_CLEAR_NODE(&pos->rb_node);
128                 pos->root = NULL;
129                 list_del_init(&pos->node);
130                 dso__put(pos);
131         }
132
133         pthread_rwlock_unlock(&dsos->lock);
134 }
135
136 static void dsos__exit(struct dsos *dsos)
137 {
138         dsos__purge(dsos);
139         pthread_rwlock_destroy(&dsos->lock);
140 }
141
142 void machine__delete_threads(struct machine *machine)
143 {
144         struct rb_node *nd;
145
146         pthread_rwlock_wrlock(&machine->threads_lock);
147         nd = rb_first(&machine->threads);
148         while (nd) {
149                 struct thread *t = rb_entry(nd, struct thread, rb_node);
150
151                 nd = rb_next(nd);
152                 __machine__remove_thread(machine, t, false);
153         }
154         pthread_rwlock_unlock(&machine->threads_lock);
155 }
156
157 void machine__exit(struct machine *machine)
158 {
159         if (machine == NULL)
160                 return;
161
162         machine__destroy_kernel_maps(machine);
163         map_groups__exit(&machine->kmaps);
164         dsos__exit(&machine->dsos);
165         machine__exit_vdso(machine);
166         zfree(&machine->root_dir);
167         zfree(&machine->current_tid);
168         pthread_rwlock_destroy(&machine->threads_lock);
169 }
170
171 void machine__delete(struct machine *machine)
172 {
173         if (machine) {
174                 machine__exit(machine);
175                 free(machine);
176         }
177 }
178
179 void machines__init(struct machines *machines)
180 {
181         machine__init(&machines->host, "", HOST_KERNEL_ID);
182         machines->guests = RB_ROOT;
183 }
184
185 void machines__exit(struct machines *machines)
186 {
187         machine__exit(&machines->host);
188         /* XXX exit guest */
189 }
190
191 struct machine *machines__add(struct machines *machines, pid_t pid,
192                               const char *root_dir)
193 {
194         struct rb_node **p = &machines->guests.rb_node;
195         struct rb_node *parent = NULL;
196         struct machine *pos, *machine = malloc(sizeof(*machine));
197
198         if (machine == NULL)
199                 return NULL;
200
201         if (machine__init(machine, root_dir, pid) != 0) {
202                 free(machine);
203                 return NULL;
204         }
205
206         while (*p != NULL) {
207                 parent = *p;
208                 pos = rb_entry(parent, struct machine, rb_node);
209                 if (pid < pos->pid)
210                         p = &(*p)->rb_left;
211                 else
212                         p = &(*p)->rb_right;
213         }
214
215         rb_link_node(&machine->rb_node, parent, p);
216         rb_insert_color(&machine->rb_node, &machines->guests);
217
218         return machine;
219 }
220
221 void machines__set_comm_exec(struct machines *machines, bool comm_exec)
222 {
223         struct rb_node *nd;
224
225         machines->host.comm_exec = comm_exec;
226
227         for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
228                 struct machine *machine = rb_entry(nd, struct machine, rb_node);
229
230                 machine->comm_exec = comm_exec;
231         }
232 }
233
234 struct machine *machines__find(struct machines *machines, pid_t pid)
235 {
236         struct rb_node **p = &machines->guests.rb_node;
237         struct rb_node *parent = NULL;
238         struct machine *machine;
239         struct machine *default_machine = NULL;
240
241         if (pid == HOST_KERNEL_ID)
242                 return &machines->host;
243
244         while (*p != NULL) {
245                 parent = *p;
246                 machine = rb_entry(parent, struct machine, rb_node);
247                 if (pid < machine->pid)
248                         p = &(*p)->rb_left;
249                 else if (pid > machine->pid)
250                         p = &(*p)->rb_right;
251                 else
252                         return machine;
253                 if (!machine->pid)
254                         default_machine = machine;
255         }
256
257         return default_machine;
258 }
259
260 struct machine *machines__findnew(struct machines *machines, pid_t pid)
261 {
262         char path[PATH_MAX];
263         const char *root_dir = "";
264         struct machine *machine = machines__find(machines, pid);
265
266         if (machine && (machine->pid == pid))
267                 goto out;
268
269         if ((pid != HOST_KERNEL_ID) &&
270             (pid != DEFAULT_GUEST_KERNEL_ID) &&
271             (symbol_conf.guestmount)) {
272                 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
273                 if (access(path, R_OK)) {
274                         static struct strlist *seen;
275
276                         if (!seen)
277                                 seen = strlist__new(NULL, NULL);
278
279                         if (!strlist__has_entry(seen, path)) {
280                                 pr_err("Can't access file %s\n", path);
281                                 strlist__add(seen, path);
282                         }
283                         machine = NULL;
284                         goto out;
285                 }
286                 root_dir = path;
287         }
288
289         machine = machines__add(machines, pid, root_dir);
290 out:
291         return machine;
292 }
293
294 void machines__process_guests(struct machines *machines,
295                               machine__process_t process, void *data)
296 {
297         struct rb_node *nd;
298
299         for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
300                 struct machine *pos = rb_entry(nd, struct machine, rb_node);
301                 process(pos, data);
302         }
303 }
304
305 char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
306 {
307         if (machine__is_host(machine))
308                 snprintf(bf, size, "[%s]", "kernel.kallsyms");
309         else if (machine__is_default_guest(machine))
310                 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
311         else {
312                 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
313                          machine->pid);
314         }
315
316         return bf;
317 }
318
319 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
320 {
321         struct rb_node *node;
322         struct machine *machine;
323
324         machines->host.id_hdr_size = id_hdr_size;
325
326         for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
327                 machine = rb_entry(node, struct machine, rb_node);
328                 machine->id_hdr_size = id_hdr_size;
329         }
330
331         return;
332 }
333
334 static void machine__update_thread_pid(struct machine *machine,
335                                        struct thread *th, pid_t pid)
336 {
337         struct thread *leader;
338
339         if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
340                 return;
341
342         th->pid_ = pid;
343
344         if (th->pid_ == th->tid)
345                 return;
346
347         leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
348         if (!leader)
349                 goto out_err;
350
351         if (!leader->mg)
352                 leader->mg = map_groups__new(machine);
353
354         if (!leader->mg)
355                 goto out_err;
356
357         if (th->mg == leader->mg)
358                 return;
359
360         if (th->mg) {
361                 /*
362                  * Maps are created from MMAP events which provide the pid and
363                  * tid.  Consequently there never should be any maps on a thread
364                  * with an unknown pid.  Just print an error if there are.
365                  */
366                 if (!map_groups__empty(th->mg))
367                         pr_err("Discarding thread maps for %d:%d\n",
368                                th->pid_, th->tid);
369                 map_groups__put(th->mg);
370         }
371
372         th->mg = map_groups__get(leader->mg);
373 out_put:
374         thread__put(leader);
375         return;
376 out_err:
377         pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
378         goto out_put;
379 }
380
381 /*
382  * Caller must eventually drop thread->refcnt returned with a successful
383  * lookup/new thread inserted.
384  */
385 static struct thread *____machine__findnew_thread(struct machine *machine,
386                                                   pid_t pid, pid_t tid,
387                                                   bool create)
388 {
389         struct rb_node **p = &machine->threads.rb_node;
390         struct rb_node *parent = NULL;
391         struct thread *th;
392
393         /*
394          * Front-end cache - TID lookups come in blocks,
395          * so most of the time we dont have to look up
396          * the full rbtree:
397          */
398         th = machine->last_match;
399         if (th != NULL) {
400                 if (th->tid == tid) {
401                         machine__update_thread_pid(machine, th, pid);
402                         return thread__get(th);
403                 }
404
405                 machine->last_match = NULL;
406         }
407
408         while (*p != NULL) {
409                 parent = *p;
410                 th = rb_entry(parent, struct thread, rb_node);
411
412                 if (th->tid == tid) {
413                         machine->last_match = th;
414                         machine__update_thread_pid(machine, th, pid);
415                         return thread__get(th);
416                 }
417
418                 if (tid < th->tid)
419                         p = &(*p)->rb_left;
420                 else
421                         p = &(*p)->rb_right;
422         }
423
424         if (!create)
425                 return NULL;
426
427         th = thread__new(pid, tid);
428         if (th != NULL) {
429                 rb_link_node(&th->rb_node, parent, p);
430                 rb_insert_color(&th->rb_node, &machine->threads);
431
432                 /*
433                  * We have to initialize map_groups separately
434                  * after rb tree is updated.
435                  *
436                  * The reason is that we call machine__findnew_thread
437                  * within thread__init_map_groups to find the thread
438                  * leader and that would screwed the rb tree.
439                  */
440                 if (thread__init_map_groups(th, machine)) {
441                         rb_erase_init(&th->rb_node, &machine->threads);
442                         RB_CLEAR_NODE(&th->rb_node);
443                         thread__put(th);
444                         return NULL;
445                 }
446                 /*
447                  * It is now in the rbtree, get a ref
448                  */
449                 thread__get(th);
450                 machine->last_match = th;
451                 ++machine->nr_threads;
452         }
453
454         return th;
455 }
456
457 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
458 {
459         return ____machine__findnew_thread(machine, pid, tid, true);
460 }
461
462 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
463                                        pid_t tid)
464 {
465         struct thread *th;
466
467         pthread_rwlock_wrlock(&machine->threads_lock);
468         th = __machine__findnew_thread(machine, pid, tid);
469         pthread_rwlock_unlock(&machine->threads_lock);
470         return th;
471 }
472
473 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
474                                     pid_t tid)
475 {
476         struct thread *th;
477         pthread_rwlock_rdlock(&machine->threads_lock);
478         th =  ____machine__findnew_thread(machine, pid, tid, false);
479         pthread_rwlock_unlock(&machine->threads_lock);
480         return th;
481 }
482
483 struct comm *machine__thread_exec_comm(struct machine *machine,
484                                        struct thread *thread)
485 {
486         if (machine->comm_exec)
487                 return thread__exec_comm(thread);
488         else
489                 return thread__comm(thread);
490 }
491
492 int machine__process_comm_event(struct machine *machine, union perf_event *event,
493                                 struct perf_sample *sample)
494 {
495         struct thread *thread = machine__findnew_thread(machine,
496                                                         event->comm.pid,
497                                                         event->comm.tid);
498         bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
499         int err = 0;
500
501         if (exec)
502                 machine->comm_exec = true;
503
504         if (dump_trace)
505                 perf_event__fprintf_comm(event, stdout);
506
507         if (thread == NULL ||
508             __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
509                 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
510                 err = -1;
511         }
512
513         thread__put(thread);
514
515         return err;
516 }
517
518 int machine__process_namespaces_event(struct machine *machine __maybe_unused,
519                                       union perf_event *event,
520                                       struct perf_sample *sample __maybe_unused)
521 {
522         struct thread *thread = machine__findnew_thread(machine,
523                                                         event->namespaces.pid,
524                                                         event->namespaces.tid);
525         int err = 0;
526
527         WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
528                   "\nWARNING: kernel seems to support more namespaces than perf"
529                   " tool.\nTry updating the perf tool..\n\n");
530
531         WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
532                   "\nWARNING: perf tool seems to support more namespaces than"
533                   " the kernel.\nTry updating the kernel..\n\n");
534
535         if (dump_trace)
536                 perf_event__fprintf_namespaces(event, stdout);
537
538         if (thread == NULL ||
539             thread__set_namespaces(thread, sample->time, &event->namespaces)) {
540                 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
541                 err = -1;
542         }
543
544         thread__put(thread);
545
546         return err;
547 }
548
549 int machine__process_lost_event(struct machine *machine __maybe_unused,
550                                 union perf_event *event, struct perf_sample *sample __maybe_unused)
551 {
552         dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
553                     event->lost.id, event->lost.lost);
554         return 0;
555 }
556
557 int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
558                                         union perf_event *event, struct perf_sample *sample)
559 {
560         dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
561                     sample->id, event->lost_samples.lost);
562         return 0;
563 }
564
565 static struct dso *machine__findnew_module_dso(struct machine *machine,
566                                                struct kmod_path *m,
567                                                const char *filename)
568 {
569         struct dso *dso;
570
571         pthread_rwlock_wrlock(&machine->dsos.lock);
572
573         dso = __dsos__find(&machine->dsos, m->name, true);
574         if (!dso) {
575                 dso = __dsos__addnew(&machine->dsos, m->name);
576                 if (dso == NULL)
577                         goto out_unlock;
578
579                 dso__set_module_info(dso, m, machine);
580                 dso__set_long_name(dso, strdup(filename), true);
581         }
582
583         dso__get(dso);
584 out_unlock:
585         pthread_rwlock_unlock(&machine->dsos.lock);
586         return dso;
587 }
588
589 int machine__process_aux_event(struct machine *machine __maybe_unused,
590                                union perf_event *event)
591 {
592         if (dump_trace)
593                 perf_event__fprintf_aux(event, stdout);
594         return 0;
595 }
596
597 int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
598                                         union perf_event *event)
599 {
600         if (dump_trace)
601                 perf_event__fprintf_itrace_start(event, stdout);
602         return 0;
603 }
604
605 int machine__process_switch_event(struct machine *machine __maybe_unused,
606                                   union perf_event *event)
607 {
608         if (dump_trace)
609                 perf_event__fprintf_switch(event, stdout);
610         return 0;
611 }
612
613 static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
614 {
615         const char *dup_filename;
616
617         if (!filename || !dso || !dso->long_name)
618                 return;
619         if (dso->long_name[0] != '[')
620                 return;
621         if (!strchr(filename, '/'))
622                 return;
623
624         dup_filename = strdup(filename);
625         if (!dup_filename)
626                 return;
627
628         dso__set_long_name(dso, dup_filename, true);
629 }
630
631 struct map *machine__findnew_module_map(struct machine *machine, u64 start,
632                                         const char *filename)
633 {
634         struct map *map = NULL;
635         struct dso *dso = NULL;
636         struct kmod_path m;
637
638         if (kmod_path__parse_name(&m, filename))
639                 return NULL;
640
641         map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
642                                        m.name);
643         if (map) {
644                 /*
645                  * If the map's dso is an offline module, give dso__load()
646                  * a chance to find the file path of that module by fixing
647                  * long_name.
648                  */
649                 dso__adjust_kmod_long_name(map->dso, filename);
650                 goto out;
651         }
652
653         dso = machine__findnew_module_dso(machine, &m, filename);
654         if (dso == NULL)
655                 goto out;
656
657         map = map__new2(start, dso, MAP__FUNCTION);
658         if (map == NULL)
659                 goto out;
660
661         map_groups__insert(&machine->kmaps, map);
662
663         /* Put the map here because map_groups__insert alread got it */
664         map__put(map);
665 out:
666         /* put the dso here, corresponding to  machine__findnew_module_dso */
667         dso__put(dso);
668         free(m.name);
669         return map;
670 }
671
672 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
673 {
674         struct rb_node *nd;
675         size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
676
677         for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
678                 struct machine *pos = rb_entry(nd, struct machine, rb_node);
679                 ret += __dsos__fprintf(&pos->dsos.head, fp);
680         }
681
682         return ret;
683 }
684
685 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
686                                      bool (skip)(struct dso *dso, int parm), int parm)
687 {
688         return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
689 }
690
691 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
692                                      bool (skip)(struct dso *dso, int parm), int parm)
693 {
694         struct rb_node *nd;
695         size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
696
697         for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
698                 struct machine *pos = rb_entry(nd, struct machine, rb_node);
699                 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
700         }
701         return ret;
702 }
703
704 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
705 {
706         int i;
707         size_t printed = 0;
708         struct dso *kdso = machine__kernel_map(machine)->dso;
709
710         if (kdso->has_build_id) {
711                 char filename[PATH_MAX];
712                 if (dso__build_id_filename(kdso, filename, sizeof(filename),
713                                            false))
714                         printed += fprintf(fp, "[0] %s\n", filename);
715         }
716
717         for (i = 0; i < vmlinux_path__nr_entries; ++i)
718                 printed += fprintf(fp, "[%d] %s\n",
719                                    i + kdso->has_build_id, vmlinux_path[i]);
720
721         return printed;
722 }
723
724 size_t machine__fprintf(struct machine *machine, FILE *fp)
725 {
726         size_t ret;
727         struct rb_node *nd;
728
729         pthread_rwlock_rdlock(&machine->threads_lock);
730
731         ret = fprintf(fp, "Threads: %u\n", machine->nr_threads);
732
733         for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
734                 struct thread *pos = rb_entry(nd, struct thread, rb_node);
735
736                 ret += thread__fprintf(pos, fp);
737         }
738
739         pthread_rwlock_unlock(&machine->threads_lock);
740
741         return ret;
742 }
743
744 static struct dso *machine__get_kernel(struct machine *machine)
745 {
746         const char *vmlinux_name = NULL;
747         struct dso *kernel;
748
749         if (machine__is_host(machine)) {
750                 vmlinux_name = symbol_conf.vmlinux_name;
751                 if (!vmlinux_name)
752                         vmlinux_name = DSO__NAME_KALLSYMS;
753
754                 kernel = machine__findnew_kernel(machine, vmlinux_name,
755                                                  "[kernel]", DSO_TYPE_KERNEL);
756         } else {
757                 char bf[PATH_MAX];
758
759                 if (machine__is_default_guest(machine))
760                         vmlinux_name = symbol_conf.default_guest_vmlinux_name;
761                 if (!vmlinux_name)
762                         vmlinux_name = machine__mmap_name(machine, bf,
763                                                           sizeof(bf));
764
765                 kernel = machine__findnew_kernel(machine, vmlinux_name,
766                                                  "[guest.kernel]",
767                                                  DSO_TYPE_GUEST_KERNEL);
768         }
769
770         if (kernel != NULL && (!kernel->has_build_id))
771                 dso__read_running_kernel_build_id(kernel, machine);
772
773         return kernel;
774 }
775
776 struct process_args {
777         u64 start;
778 };
779
780 static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
781                                            size_t bufsz)
782 {
783         if (machine__is_default_guest(machine))
784                 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
785         else
786                 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
787 }
788
789 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
790
791 /* Figure out the start address of kernel map from /proc/kallsyms.
792  * Returns the name of the start symbol in *symbol_name. Pass in NULL as
793  * symbol_name if it's not that important.
794  */
795 static int machine__get_running_kernel_start(struct machine *machine,
796                                              const char **symbol_name, u64 *start)
797 {
798         char filename[PATH_MAX];
799         int i, err = -1;
800         const char *name;
801         u64 addr = 0;
802
803         machine__get_kallsyms_filename(machine, filename, PATH_MAX);
804
805         if (symbol__restricted_filename(filename, "/proc/kallsyms"))
806                 return 0;
807
808         for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
809                 err = kallsyms__get_function_start(filename, name, &addr);
810                 if (!err)
811                         break;
812         }
813
814         if (err)
815                 return -1;
816
817         if (symbol_name)
818                 *symbol_name = name;
819
820         *start = addr;
821         return 0;
822 }
823
824 /* Kernel-space maps for symbols that are outside the main kernel map and module maps */
825 struct extra_kernel_map {
826         u64 start;
827         u64 end;
828         u64 pgoff;
829 };
830
831 static int machine__create_extra_kernel_map(struct machine *machine,
832                                             struct dso *kernel,
833                                             struct extra_kernel_map *xm)
834 {
835         struct kmap *kmap;
836         struct map *map;
837
838         map = map__new2(xm->start, kernel, MAP__FUNCTION);
839         if (!map)
840                 return -1;
841
842         map->end   = xm->end;
843         map->pgoff = xm->pgoff;
844
845         kmap = map__kmap(map);
846
847         kmap->kmaps = &machine->kmaps;
848
849         map_groups__insert(&machine->kmaps, map);
850
851         pr_debug2("Added extra kernel map %" PRIx64 "-%" PRIx64 "\n",
852                   map->start, map->end);
853
854         map__put(map);
855
856         return 0;
857 }
858
859 static u64 find_entry_trampoline(struct dso *dso)
860 {
861         /* Duplicates are removed so lookup all aliases */
862         const char *syms[] = {
863                 "_entry_trampoline",
864                 "__entry_trampoline_start",
865                 "entry_SYSCALL_64_trampoline",
866         };
867         struct symbol *sym = dso__first_symbol(dso, MAP__FUNCTION);
868         unsigned int i;
869
870         for (; sym; sym = dso__next_symbol(sym)) {
871                 if (sym->binding != STB_GLOBAL)
872                         continue;
873                 for (i = 0; i < ARRAY_SIZE(syms); i++) {
874                         if (!strcmp(sym->name, syms[i]))
875                                 return sym->start;
876                 }
877         }
878
879         return 0;
880 }
881
882 /*
883  * These values can be used for kernels that do not have symbols for the entry
884  * trampolines in kallsyms.
885  */
886 #define X86_64_CPU_ENTRY_AREA_PER_CPU   0xfffffe0000000000ULL
887 #define X86_64_CPU_ENTRY_AREA_SIZE      0x2c000
888 #define X86_64_ENTRY_TRAMPOLINE         0x6000
889
890 /* Map x86_64 PTI entry trampolines */
891 int machine__map_x86_64_entry_trampolines(struct machine *machine,
892                                           struct dso *kernel)
893 {
894         u64 pgoff = find_entry_trampoline(kernel);
895         int nr_cpus_avail, cpu;
896
897         if (!pgoff)
898                 return 0;
899
900         nr_cpus_avail = machine__nr_cpus_avail(machine);
901
902         /* Add a 1 page map for each CPU's entry trampoline */
903         for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
904                 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
905                          cpu * X86_64_CPU_ENTRY_AREA_SIZE +
906                          X86_64_ENTRY_TRAMPOLINE;
907                 struct extra_kernel_map xm = {
908                         .start = va,
909                         .end   = va + page_size,
910                         .pgoff = pgoff,
911                 };
912
913                 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
914                         return -1;
915         }
916
917         return 0;
918 }
919
920 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
921 {
922         int type;
923         u64 start = 0;
924
925         if (machine__get_running_kernel_start(machine, NULL, &start))
926                 return -1;
927
928         /* In case of renewal the kernel map, destroy previous one */
929         machine__destroy_kernel_maps(machine);
930
931         for (type = 0; type < MAP__NR_TYPES; ++type) {
932                 struct kmap *kmap;
933                 struct map *map;
934
935                 machine->vmlinux_maps[type] = map__new2(start, kernel, type);
936                 if (machine->vmlinux_maps[type] == NULL)
937                         return -1;
938
939                 machine->vmlinux_maps[type]->map_ip =
940                         machine->vmlinux_maps[type]->unmap_ip =
941                                 identity__map_ip;
942                 map = __machine__kernel_map(machine, type);
943                 kmap = map__kmap(map);
944                 if (!kmap)
945                         return -1;
946
947                 kmap->kmaps = &machine->kmaps;
948                 map_groups__insert(&machine->kmaps, map);
949         }
950
951         return 0;
952 }
953
954 void machine__destroy_kernel_maps(struct machine *machine)
955 {
956         int type;
957
958         for (type = 0; type < MAP__NR_TYPES; ++type) {
959                 struct kmap *kmap;
960                 struct map *map = __machine__kernel_map(machine, type);
961
962                 if (map == NULL)
963                         continue;
964
965                 kmap = map__kmap(map);
966                 map_groups__remove(&machine->kmaps, map);
967                 if (kmap && kmap->ref_reloc_sym) {
968                         /*
969                          * ref_reloc_sym is shared among all maps, so free just
970                          * on one of them.
971                          */
972                         if (type == MAP__FUNCTION) {
973                                 zfree((char **)&kmap->ref_reloc_sym->name);
974                                 zfree(&kmap->ref_reloc_sym);
975                         } else
976                                 kmap->ref_reloc_sym = NULL;
977                 }
978
979                 map__put(machine->vmlinux_maps[type]);
980                 machine->vmlinux_maps[type] = NULL;
981         }
982 }
983
984 int machines__create_guest_kernel_maps(struct machines *machines)
985 {
986         int ret = 0;
987         struct dirent **namelist = NULL;
988         int i, items = 0;
989         char path[PATH_MAX];
990         pid_t pid;
991         char *endp;
992
993         if (symbol_conf.default_guest_vmlinux_name ||
994             symbol_conf.default_guest_modules ||
995             symbol_conf.default_guest_kallsyms) {
996                 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
997         }
998
999         if (symbol_conf.guestmount) {
1000                 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1001                 if (items <= 0)
1002                         return -ENOENT;
1003                 for (i = 0; i < items; i++) {
1004                         if (!isdigit(namelist[i]->d_name[0])) {
1005                                 /* Filter out . and .. */
1006                                 continue;
1007                         }
1008                         pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1009                         if ((*endp != '\0') ||
1010                             (endp == namelist[i]->d_name) ||
1011                             (errno == ERANGE)) {
1012                                 pr_debug("invalid directory (%s). Skipping.\n",
1013                                          namelist[i]->d_name);
1014                                 continue;
1015                         }
1016                         sprintf(path, "%s/%s/proc/kallsyms",
1017                                 symbol_conf.guestmount,
1018                                 namelist[i]->d_name);
1019                         ret = access(path, R_OK);
1020                         if (ret) {
1021                                 pr_debug("Can't access file %s\n", path);
1022                                 goto failure;
1023                         }
1024                         machines__create_kernel_maps(machines, pid);
1025                 }
1026 failure:
1027                 free(namelist);
1028         }
1029
1030         return ret;
1031 }
1032
1033 void machines__destroy_kernel_maps(struct machines *machines)
1034 {
1035         struct rb_node *next = rb_first(&machines->guests);
1036
1037         machine__destroy_kernel_maps(&machines->host);
1038
1039         while (next) {
1040                 struct machine *pos = rb_entry(next, struct machine, rb_node);
1041
1042                 next = rb_next(&pos->rb_node);
1043                 rb_erase(&pos->rb_node, &machines->guests);
1044                 machine__delete(pos);
1045         }
1046 }
1047
1048 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1049 {
1050         struct machine *machine = machines__findnew(machines, pid);
1051
1052         if (machine == NULL)
1053                 return -1;
1054
1055         return machine__create_kernel_maps(machine);
1056 }
1057
1058 int __machine__load_kallsyms(struct machine *machine, const char *filename,
1059                              enum map_type type, bool no_kcore)
1060 {
1061         struct map *map = machine__kernel_map(machine);
1062         int ret = __dso__load_kallsyms(map->dso, filename, map, no_kcore);
1063
1064         if (ret > 0) {
1065                 dso__set_loaded(map->dso, type);
1066                 /*
1067                  * Since /proc/kallsyms will have multiple sessions for the
1068                  * kernel, with modules between them, fixup the end of all
1069                  * sections.
1070                  */
1071                 __map_groups__fixup_end(&machine->kmaps, type);
1072         }
1073
1074         return ret;
1075 }
1076
1077 int machine__load_kallsyms(struct machine *machine, const char *filename,
1078                            enum map_type type)
1079 {
1080         return __machine__load_kallsyms(machine, filename, type, false);
1081 }
1082
1083 int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
1084 {
1085         struct map *map = machine__kernel_map(machine);
1086         int ret = dso__load_vmlinux_path(map->dso, map);
1087
1088         if (ret > 0)
1089                 dso__set_loaded(map->dso, type);
1090
1091         return ret;
1092 }
1093
1094 static void map_groups__fixup_end(struct map_groups *mg)
1095 {
1096         int i;
1097         for (i = 0; i < MAP__NR_TYPES; ++i)
1098                 __map_groups__fixup_end(mg, i);
1099 }
1100
1101 static char *get_kernel_version(const char *root_dir)
1102 {
1103         char version[PATH_MAX];
1104         FILE *file;
1105         char *name, *tmp;
1106         const char *prefix = "Linux version ";
1107
1108         sprintf(version, "%s/proc/version", root_dir);
1109         file = fopen(version, "r");
1110         if (!file)
1111                 return NULL;
1112
1113         version[0] = '\0';
1114         tmp = fgets(version, sizeof(version), file);
1115         fclose(file);
1116
1117         name = strstr(version, prefix);
1118         if (!name)
1119                 return NULL;
1120         name += strlen(prefix);
1121         tmp = strchr(name, ' ');
1122         if (tmp)
1123                 *tmp = '\0';
1124
1125         return strdup(name);
1126 }
1127
1128 static bool is_kmod_dso(struct dso *dso)
1129 {
1130         return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1131                dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1132 }
1133
1134 static int map_groups__set_module_path(struct map_groups *mg, const char *path,
1135                                        struct kmod_path *m)
1136 {
1137         struct map *map;
1138         char *long_name;
1139
1140         map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
1141         if (map == NULL)
1142                 return 0;
1143
1144         long_name = strdup(path);
1145         if (long_name == NULL)
1146                 return -ENOMEM;
1147
1148         dso__set_long_name(map->dso, long_name, true);
1149         dso__kernel_module_get_build_id(map->dso, "");
1150
1151         /*
1152          * Full name could reveal us kmod compression, so
1153          * we need to update the symtab_type if needed.
1154          */
1155         if (m->comp && is_kmod_dso(map->dso))
1156                 map->dso->symtab_type++;
1157
1158         return 0;
1159 }
1160
1161 static int map_groups__set_modules_path_dir(struct map_groups *mg,
1162                                 const char *dir_name, int depth)
1163 {
1164         struct dirent *dent;
1165         DIR *dir = opendir(dir_name);
1166         int ret = 0;
1167
1168         if (!dir) {
1169                 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1170                 return -1;
1171         }
1172
1173         while ((dent = readdir(dir)) != NULL) {
1174                 char path[PATH_MAX];
1175                 struct stat st;
1176
1177                 /*sshfs might return bad dent->d_type, so we have to stat*/
1178                 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1179                 if (stat(path, &st))
1180                         continue;
1181
1182                 if (S_ISDIR(st.st_mode)) {
1183                         if (!strcmp(dent->d_name, ".") ||
1184                             !strcmp(dent->d_name, ".."))
1185                                 continue;
1186
1187                         /* Do not follow top-level source and build symlinks */
1188                         if (depth == 0) {
1189                                 if (!strcmp(dent->d_name, "source") ||
1190                                     !strcmp(dent->d_name, "build"))
1191                                         continue;
1192                         }
1193
1194                         ret = map_groups__set_modules_path_dir(mg, path,
1195                                                                depth + 1);
1196                         if (ret < 0)
1197                                 goto out;
1198                 } else {
1199                         struct kmod_path m;
1200
1201                         ret = kmod_path__parse_name(&m, dent->d_name);
1202                         if (ret)
1203                                 goto out;
1204
1205                         if (m.kmod)
1206                                 ret = map_groups__set_module_path(mg, path, &m);
1207
1208                         free(m.name);
1209
1210                         if (ret)
1211                                 goto out;
1212                 }
1213         }
1214
1215 out:
1216         closedir(dir);
1217         return ret;
1218 }
1219
1220 static int machine__set_modules_path(struct machine *machine)
1221 {
1222         char *version;
1223         char modules_path[PATH_MAX];
1224
1225         version = get_kernel_version(machine->root_dir);
1226         if (!version)
1227                 return -1;
1228
1229         snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1230                  machine->root_dir, version);
1231         free(version);
1232
1233         return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1234 }
1235 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1236                                 u64 *size __maybe_unused,
1237                                 const char *name __maybe_unused)
1238 {
1239         return 0;
1240 }
1241
1242 static int machine__create_module(void *arg, const char *name, u64 start,
1243                                   u64 size)
1244 {
1245         struct machine *machine = arg;
1246         struct map *map;
1247
1248         if (arch__fix_module_text_start(&start, &size, name) < 0)
1249                 return -1;
1250
1251         map = machine__findnew_module_map(machine, start, name);
1252         if (map == NULL)
1253                 return -1;
1254         map->end = start + size;
1255
1256         dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1257
1258         return 0;
1259 }
1260
1261 static int machine__create_modules(struct machine *machine)
1262 {
1263         const char *modules;
1264         char path[PATH_MAX];
1265
1266         if (machine__is_default_guest(machine)) {
1267                 modules = symbol_conf.default_guest_modules;
1268         } else {
1269                 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1270                 modules = path;
1271         }
1272
1273         if (symbol__restricted_filename(modules, "/proc/modules"))
1274                 return -1;
1275
1276         if (modules__parse(modules, machine, machine__create_module))
1277                 return -1;
1278
1279         if (!machine__set_modules_path(machine))
1280                 return 0;
1281
1282         pr_debug("Problems setting modules path maps, continuing anyway...\n");
1283
1284         return 0;
1285 }
1286
1287 int machine__create_kernel_maps(struct machine *machine)
1288 {
1289         struct dso *kernel = machine__get_kernel(machine);
1290         const char *name = NULL;
1291         u64 addr = 0;
1292         int ret;
1293
1294         if (kernel == NULL)
1295                 return -1;
1296
1297         ret = __machine__create_kernel_maps(machine, kernel);
1298         dso__put(kernel);
1299         if (ret < 0)
1300                 return -1;
1301
1302         if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1303                 if (machine__is_host(machine))
1304                         pr_debug("Problems creating module maps, "
1305                                  "continuing anyway...\n");
1306                 else
1307                         pr_debug("Problems creating module maps for guest %d, "
1308                                  "continuing anyway...\n", machine->pid);
1309         }
1310
1311         /*
1312          * Now that we have all the maps created, just set the ->end of them:
1313          */
1314         map_groups__fixup_end(&machine->kmaps);
1315
1316         if (!machine__get_running_kernel_start(machine, &name, &addr)) {
1317                 if (name &&
1318                     maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
1319                         machine__destroy_kernel_maps(machine);
1320                         return -1;
1321                 }
1322         }
1323
1324         return 0;
1325 }
1326
1327 static void machine__set_kernel_mmap_len(struct machine *machine,
1328                                          union perf_event *event)
1329 {
1330         int i;
1331
1332         for (i = 0; i < MAP__NR_TYPES; i++) {
1333                 machine->vmlinux_maps[i]->start = event->mmap.start;
1334                 machine->vmlinux_maps[i]->end   = (event->mmap.start +
1335                                                    event->mmap.len);
1336                 /*
1337                  * Be a bit paranoid here, some perf.data file came with
1338                  * a zero sized synthesized MMAP event for the kernel.
1339                  */
1340                 if (machine->vmlinux_maps[i]->end == 0)
1341                         machine->vmlinux_maps[i]->end = ~0ULL;
1342         }
1343 }
1344
1345 static bool machine__uses_kcore(struct machine *machine)
1346 {
1347         struct dso *dso;
1348
1349         list_for_each_entry(dso, &machine->dsos.head, node) {
1350                 if (dso__is_kcore(dso))
1351                         return true;
1352         }
1353
1354         return false;
1355 }
1356
1357 static int machine__process_kernel_mmap_event(struct machine *machine,
1358                                               union perf_event *event)
1359 {
1360         struct map *map;
1361         char kmmap_prefix[PATH_MAX];
1362         enum dso_kernel_type kernel_type;
1363         bool is_kernel_mmap;
1364
1365         /* If we have maps from kcore then we do not need or want any others */
1366         if (machine__uses_kcore(machine))
1367                 return 0;
1368
1369         machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1370         if (machine__is_host(machine))
1371                 kernel_type = DSO_TYPE_KERNEL;
1372         else
1373                 kernel_type = DSO_TYPE_GUEST_KERNEL;
1374
1375         is_kernel_mmap = memcmp(event->mmap.filename,
1376                                 kmmap_prefix,
1377                                 strlen(kmmap_prefix) - 1) == 0;
1378         if (event->mmap.filename[0] == '/' ||
1379             (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1380                 map = machine__findnew_module_map(machine, event->mmap.start,
1381                                                   event->mmap.filename);
1382                 if (map == NULL)
1383                         goto out_problem;
1384
1385                 map->end = map->start + event->mmap.len;
1386         } else if (is_kernel_mmap) {
1387                 const char *symbol_name = (event->mmap.filename +
1388                                 strlen(kmmap_prefix));
1389                 /*
1390                  * Should be there already, from the build-id table in
1391                  * the header.
1392                  */
1393                 struct dso *kernel = NULL;
1394                 struct dso *dso;
1395
1396                 pthread_rwlock_rdlock(&machine->dsos.lock);
1397
1398                 list_for_each_entry(dso, &machine->dsos.head, node) {
1399
1400                         /*
1401                          * The cpumode passed to is_kernel_module is not the
1402                          * cpumode of *this* event. If we insist on passing
1403                          * correct cpumode to is_kernel_module, we should
1404                          * record the cpumode when we adding this dso to the
1405                          * linked list.
1406                          *
1407                          * However we don't really need passing correct
1408                          * cpumode.  We know the correct cpumode must be kernel
1409                          * mode (if not, we should not link it onto kernel_dsos
1410                          * list).
1411                          *
1412                          * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1413                          * is_kernel_module() treats it as a kernel cpumode.
1414                          */
1415
1416                         if (!dso->kernel ||
1417                             is_kernel_module(dso->long_name,
1418                                              PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1419                                 continue;
1420
1421
1422                         kernel = dso;
1423                         break;
1424                 }
1425
1426                 pthread_rwlock_unlock(&machine->dsos.lock);
1427
1428                 if (kernel == NULL)
1429                         kernel = machine__findnew_dso(machine, kmmap_prefix);
1430                 if (kernel == NULL)
1431                         goto out_problem;
1432
1433                 kernel->kernel = kernel_type;
1434                 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1435                         dso__put(kernel);
1436                         goto out_problem;
1437                 }
1438
1439                 if (strstr(kernel->long_name, "vmlinux"))
1440                         dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1441
1442                 machine__set_kernel_mmap_len(machine, event);
1443
1444                 /*
1445                  * Avoid using a zero address (kptr_restrict) for the ref reloc
1446                  * symbol. Effectively having zero here means that at record
1447                  * time /proc/sys/kernel/kptr_restrict was non zero.
1448                  */
1449                 if (event->mmap.pgoff != 0) {
1450                         maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1451                                                          symbol_name,
1452                                                          event->mmap.pgoff);
1453                 }
1454
1455                 if (machine__is_default_guest(machine)) {
1456                         /*
1457                          * preload dso of guest kernel and modules
1458                          */
1459                         dso__load(kernel, machine__kernel_map(machine));
1460                 }
1461         }
1462         return 0;
1463 out_problem:
1464         return -1;
1465 }
1466
1467 int machine__process_mmap2_event(struct machine *machine,
1468                                  union perf_event *event,
1469                                  struct perf_sample *sample)
1470 {
1471         struct thread *thread;
1472         struct map *map;
1473         enum map_type type;
1474         int ret = 0;
1475
1476         if (dump_trace)
1477                 perf_event__fprintf_mmap2(event, stdout);
1478
1479         if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1480             sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1481                 ret = machine__process_kernel_mmap_event(machine, event);
1482                 if (ret < 0)
1483                         goto out_problem;
1484                 return 0;
1485         }
1486
1487         thread = machine__findnew_thread(machine, event->mmap2.pid,
1488                                         event->mmap2.tid);
1489         if (thread == NULL)
1490                 goto out_problem;
1491
1492         if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1493                 type = MAP__VARIABLE;
1494         else
1495                 type = MAP__FUNCTION;
1496
1497         map = map__new(machine, event->mmap2.start,
1498                         event->mmap2.len, event->mmap2.pgoff,
1499                         event->mmap2.maj,
1500                         event->mmap2.min, event->mmap2.ino,
1501                         event->mmap2.ino_generation,
1502                         event->mmap2.prot,
1503                         event->mmap2.flags,
1504                         event->mmap2.filename, type, thread);
1505
1506         if (map == NULL)
1507                 goto out_problem_map;
1508
1509         ret = thread__insert_map(thread, map);
1510         if (ret)
1511                 goto out_problem_insert;
1512
1513         thread__put(thread);
1514         map__put(map);
1515         return 0;
1516
1517 out_problem_insert:
1518         map__put(map);
1519 out_problem_map:
1520         thread__put(thread);
1521 out_problem:
1522         dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1523         return 0;
1524 }
1525
1526 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1527                                 struct perf_sample *sample)
1528 {
1529         struct thread *thread;
1530         struct map *map;
1531         enum map_type type;
1532         int ret = 0;
1533
1534         if (dump_trace)
1535                 perf_event__fprintf_mmap(event, stdout);
1536
1537         if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1538             sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1539                 ret = machine__process_kernel_mmap_event(machine, event);
1540                 if (ret < 0)
1541                         goto out_problem;
1542                 return 0;
1543         }
1544
1545         thread = machine__findnew_thread(machine, event->mmap.pid,
1546                                          event->mmap.tid);
1547         if (thread == NULL)
1548                 goto out_problem;
1549
1550         if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1551                 type = MAP__VARIABLE;
1552         else
1553                 type = MAP__FUNCTION;
1554
1555         map = map__new(machine, event->mmap.start,
1556                         event->mmap.len, event->mmap.pgoff,
1557                         0, 0, 0, 0, 0, 0,
1558                         event->mmap.filename,
1559                         type, thread);
1560
1561         if (map == NULL)
1562                 goto out_problem_map;
1563
1564         ret = thread__insert_map(thread, map);
1565         if (ret)
1566                 goto out_problem_insert;
1567
1568         thread__put(thread);
1569         map__put(map);
1570         return 0;
1571
1572 out_problem_insert:
1573         map__put(map);
1574 out_problem_map:
1575         thread__put(thread);
1576 out_problem:
1577         dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1578         return 0;
1579 }
1580
1581 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1582 {
1583         if (machine->last_match == th)
1584                 machine->last_match = NULL;
1585
1586         BUG_ON(refcount_read(&th->refcnt) == 0);
1587         if (lock)
1588                 pthread_rwlock_wrlock(&machine->threads_lock);
1589         rb_erase_init(&th->rb_node, &machine->threads);
1590         RB_CLEAR_NODE(&th->rb_node);
1591         --machine->nr_threads;
1592         /*
1593          * Move it first to the dead_threads list, then drop the reference,
1594          * if this is the last reference, then the thread__delete destructor
1595          * will be called and we will remove it from the dead_threads list.
1596          */
1597         list_add_tail(&th->node, &machine->dead_threads);
1598         if (lock)
1599                 pthread_rwlock_unlock(&machine->threads_lock);
1600         thread__put(th);
1601 }
1602
1603 void machine__remove_thread(struct machine *machine, struct thread *th)
1604 {
1605         return __machine__remove_thread(machine, th, true);
1606 }
1607
1608 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1609                                 struct perf_sample *sample)
1610 {
1611         struct thread *thread = machine__find_thread(machine,
1612                                                      event->fork.pid,
1613                                                      event->fork.tid);
1614         struct thread *parent = machine__findnew_thread(machine,
1615                                                         event->fork.ppid,
1616                                                         event->fork.ptid);
1617         int err = 0;
1618
1619         if (dump_trace)
1620                 perf_event__fprintf_task(event, stdout);
1621
1622         /*
1623          * There may be an existing thread that is not actually the parent,
1624          * either because we are processing events out of order, or because the
1625          * (fork) event that would have removed the thread was lost. Assume the
1626          * latter case and continue on as best we can.
1627          */
1628         if (parent->pid_ != (pid_t)event->fork.ppid) {
1629                 dump_printf("removing erroneous parent thread %d/%d\n",
1630                             parent->pid_, parent->tid);
1631                 machine__remove_thread(machine, parent);
1632                 thread__put(parent);
1633                 parent = machine__findnew_thread(machine, event->fork.ppid,
1634                                                  event->fork.ptid);
1635         }
1636
1637         /* if a thread currently exists for the thread id remove it */
1638         if (thread != NULL) {
1639                 machine__remove_thread(machine, thread);
1640                 thread__put(thread);
1641         }
1642
1643         thread = machine__findnew_thread(machine, event->fork.pid,
1644                                          event->fork.tid);
1645
1646         if (thread == NULL || parent == NULL ||
1647             thread__fork(thread, parent, sample->time) < 0) {
1648                 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1649                 err = -1;
1650         }
1651         thread__put(thread);
1652         thread__put(parent);
1653
1654         return err;
1655 }
1656
1657 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1658                                 struct perf_sample *sample __maybe_unused)
1659 {
1660         struct thread *thread = machine__find_thread(machine,
1661                                                      event->fork.pid,
1662                                                      event->fork.tid);
1663
1664         if (dump_trace)
1665                 perf_event__fprintf_task(event, stdout);
1666
1667         if (thread != NULL) {
1668                 thread__exited(thread);
1669                 thread__put(thread);
1670         }
1671
1672         return 0;
1673 }
1674
1675 int machine__process_event(struct machine *machine, union perf_event *event,
1676                            struct perf_sample *sample)
1677 {
1678         int ret;
1679
1680         switch (event->header.type) {
1681         case PERF_RECORD_COMM:
1682                 ret = machine__process_comm_event(machine, event, sample); break;
1683         case PERF_RECORD_MMAP:
1684                 ret = machine__process_mmap_event(machine, event, sample); break;
1685         case PERF_RECORD_NAMESPACES:
1686                 ret = machine__process_namespaces_event(machine, event, sample); break;
1687         case PERF_RECORD_MMAP2:
1688                 ret = machine__process_mmap2_event(machine, event, sample); break;
1689         case PERF_RECORD_FORK:
1690                 ret = machine__process_fork_event(machine, event, sample); break;
1691         case PERF_RECORD_EXIT:
1692                 ret = machine__process_exit_event(machine, event, sample); break;
1693         case PERF_RECORD_LOST:
1694                 ret = machine__process_lost_event(machine, event, sample); break;
1695         case PERF_RECORD_AUX:
1696                 ret = machine__process_aux_event(machine, event); break;
1697         case PERF_RECORD_ITRACE_START:
1698                 ret = machine__process_itrace_start_event(machine, event); break;
1699         case PERF_RECORD_LOST_SAMPLES:
1700                 ret = machine__process_lost_samples_event(machine, event, sample); break;
1701         case PERF_RECORD_SWITCH:
1702         case PERF_RECORD_SWITCH_CPU_WIDE:
1703                 ret = machine__process_switch_event(machine, event); break;
1704         default:
1705                 ret = -1;
1706                 break;
1707         }
1708
1709         return ret;
1710 }
1711
1712 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1713 {
1714         if (!regexec(regex, sym->name, 0, NULL, 0))
1715                 return 1;
1716         return 0;
1717 }
1718
1719 static void ip__resolve_ams(struct thread *thread,
1720                             struct addr_map_symbol *ams,
1721                             u64 ip)
1722 {
1723         struct addr_location al;
1724
1725         memset(&al, 0, sizeof(al));
1726         /*
1727          * We cannot use the header.misc hint to determine whether a
1728          * branch stack address is user, kernel, guest, hypervisor.
1729          * Branches may straddle the kernel/user/hypervisor boundaries.
1730          * Thus, we have to try consecutively until we find a match
1731          * or else, the symbol is unknown
1732          */
1733         thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
1734
1735         ams->addr = ip;
1736         ams->al_addr = al.addr;
1737         ams->sym = al.sym;
1738         ams->map = al.map;
1739         ams->phys_addr = 0;
1740 }
1741
1742 static void ip__resolve_data(struct thread *thread,
1743                              u8 m, struct addr_map_symbol *ams,
1744                              u64 addr, u64 phys_addr)
1745 {
1746         struct addr_location al;
1747
1748         memset(&al, 0, sizeof(al));
1749
1750         thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
1751         if (al.map == NULL) {
1752                 /*
1753                  * some shared data regions have execute bit set which puts
1754                  * their mapping in the MAP__FUNCTION type array.
1755                  * Check there as a fallback option before dropping the sample.
1756                  */
1757                 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
1758         }
1759
1760         ams->addr = addr;
1761         ams->al_addr = al.addr;
1762         ams->sym = al.sym;
1763         ams->map = al.map;
1764         ams->phys_addr = phys_addr;
1765 }
1766
1767 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1768                                      struct addr_location *al)
1769 {
1770         struct mem_info *mi = zalloc(sizeof(*mi));
1771
1772         if (!mi)
1773                 return NULL;
1774
1775         ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1776         ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
1777                          sample->addr, sample->phys_addr);
1778         mi->data_src.val = sample->data_src;
1779
1780         return mi;
1781 }
1782
1783 struct iterations {
1784         int nr_loop_iter;
1785         u64 cycles;
1786 };
1787
1788 static int add_callchain_ip(struct thread *thread,
1789                             struct callchain_cursor *cursor,
1790                             struct symbol **parent,
1791                             struct addr_location *root_al,
1792                             u8 *cpumode,
1793                             u64 ip,
1794                             bool branch,
1795                             struct branch_flags *flags,
1796                             struct iterations *iter,
1797                             u64 branch_from)
1798 {
1799         struct addr_location al;
1800         int nr_loop_iter = 0;
1801         u64 iter_cycles = 0;
1802
1803         al.filtered = 0;
1804         al.sym = NULL;
1805         if (!cpumode) {
1806                 thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
1807                                                    ip, &al);
1808         } else {
1809                 if (ip >= PERF_CONTEXT_MAX) {
1810                         switch (ip) {
1811                         case PERF_CONTEXT_HV:
1812                                 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
1813                                 break;
1814                         case PERF_CONTEXT_KERNEL:
1815                                 *cpumode = PERF_RECORD_MISC_KERNEL;
1816                                 break;
1817                         case PERF_CONTEXT_USER:
1818                                 *cpumode = PERF_RECORD_MISC_USER;
1819                                 break;
1820                         default:
1821                                 pr_debug("invalid callchain context: "
1822                                          "%"PRId64"\n", (s64) ip);
1823                                 /*
1824                                  * It seems the callchain is corrupted.
1825                                  * Discard all.
1826                                  */
1827                                 callchain_cursor_reset(cursor);
1828                                 return 1;
1829                         }
1830                         return 0;
1831                 }
1832                 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
1833                                            ip, &al);
1834         }
1835
1836         if (al.sym != NULL) {
1837                 if (perf_hpp_list.parent && !*parent &&
1838                     symbol__match_regex(al.sym, &parent_regex))
1839                         *parent = al.sym;
1840                 else if (have_ignore_callees && root_al &&
1841                   symbol__match_regex(al.sym, &ignore_callees_regex)) {
1842                         /* Treat this symbol as the root,
1843                            forgetting its callees. */
1844                         *root_al = al;
1845                         callchain_cursor_reset(cursor);
1846                 }
1847         }
1848
1849         if (symbol_conf.hide_unresolved && al.sym == NULL)
1850                 return 0;
1851
1852         if (iter) {
1853                 nr_loop_iter = iter->nr_loop_iter;
1854                 iter_cycles = iter->cycles;
1855         }
1856
1857         return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
1858                                        branch, flags, nr_loop_iter,
1859                                        iter_cycles, branch_from);
1860 }
1861
1862 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1863                                            struct addr_location *al)
1864 {
1865         unsigned int i;
1866         const struct branch_stack *bs = sample->branch_stack;
1867         struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1868
1869         if (!bi)
1870                 return NULL;
1871
1872         for (i = 0; i < bs->nr; i++) {
1873                 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1874                 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
1875                 bi[i].flags = bs->entries[i].flags;
1876         }
1877         return bi;
1878 }
1879
1880 static void save_iterations(struct iterations *iter,
1881                             struct branch_entry *be, int nr)
1882 {
1883         int i;
1884
1885         iter->nr_loop_iter = nr;
1886         iter->cycles = 0;
1887
1888         for (i = 0; i < nr; i++)
1889                 iter->cycles += be[i].flags.cycles;
1890 }
1891
1892 #define CHASHSZ 127
1893 #define CHASHBITS 7
1894 #define NO_ENTRY 0xff
1895
1896 #define PERF_MAX_BRANCH_DEPTH 127
1897
1898 /* Remove loops. */
1899 static int remove_loops(struct branch_entry *l, int nr,
1900                         struct iterations *iter)
1901 {
1902         int i, j, off;
1903         unsigned char chash[CHASHSZ];
1904
1905         memset(chash, NO_ENTRY, sizeof(chash));
1906
1907         BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1908
1909         for (i = 0; i < nr; i++) {
1910                 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1911
1912                 /* no collision handling for now */
1913                 if (chash[h] == NO_ENTRY) {
1914                         chash[h] = i;
1915                 } else if (l[chash[h]].from == l[i].from) {
1916                         bool is_loop = true;
1917                         /* check if it is a real loop */
1918                         off = 0;
1919                         for (j = chash[h]; j < i && i + off < nr; j++, off++)
1920                                 if (l[j].from != l[i + off].from) {
1921                                         is_loop = false;
1922                                         break;
1923                                 }
1924                         if (is_loop) {
1925                                 j = nr - (i + off);
1926                                 if (j > 0) {
1927                                         save_iterations(iter + i + off,
1928                                                 l + i, off);
1929
1930                                         memmove(iter + i, iter + i + off,
1931                                                 j * sizeof(*iter));
1932
1933                                         memmove(l + i, l + i + off,
1934                                                 j * sizeof(*l));
1935                                 }
1936
1937                                 nr -= off;
1938                         }
1939                 }
1940         }
1941         return nr;
1942 }
1943
1944 /*
1945  * Recolve LBR callstack chain sample
1946  * Return:
1947  * 1 on success get LBR callchain information
1948  * 0 no available LBR callchain information, should try fp
1949  * negative error code on other errors.
1950  */
1951 static int resolve_lbr_callchain_sample(struct thread *thread,
1952                                         struct callchain_cursor *cursor,
1953                                         struct perf_sample *sample,
1954                                         struct symbol **parent,
1955                                         struct addr_location *root_al,
1956                                         int max_stack)
1957 {
1958         struct ip_callchain *chain = sample->callchain;
1959         int chain_nr = min(max_stack, (int)chain->nr), i;
1960         u8 cpumode = PERF_RECORD_MISC_USER;
1961         u64 ip, branch_from = 0;
1962
1963         for (i = 0; i < chain_nr; i++) {
1964                 if (chain->ips[i] == PERF_CONTEXT_USER)
1965                         break;
1966         }
1967
1968         /* LBR only affects the user callchain */
1969         if (i != chain_nr) {
1970                 struct branch_stack *lbr_stack = sample->branch_stack;
1971                 int lbr_nr = lbr_stack->nr, j, k;
1972                 bool branch;
1973                 struct branch_flags *flags;
1974                 /*
1975                  * LBR callstack can only get user call chain.
1976                  * The mix_chain_nr is kernel call chain
1977                  * number plus LBR user call chain number.
1978                  * i is kernel call chain number,
1979                  * 1 is PERF_CONTEXT_USER,
1980                  * lbr_nr + 1 is the user call chain number.
1981                  * For details, please refer to the comments
1982                  * in callchain__printf
1983                  */
1984                 int mix_chain_nr = i + 1 + lbr_nr + 1;
1985
1986                 for (j = 0; j < mix_chain_nr; j++) {
1987                         int err;
1988                         branch = false;
1989                         flags = NULL;
1990
1991                         if (callchain_param.order == ORDER_CALLEE) {
1992                                 if (j < i + 1)
1993                                         ip = chain->ips[j];
1994                                 else if (j > i + 1) {
1995                                         k = j - i - 2;
1996                                         ip = lbr_stack->entries[k].from;
1997                                         branch = true;
1998                                         flags = &lbr_stack->entries[k].flags;
1999                                 } else {
2000                                         ip = lbr_stack->entries[0].to;
2001                                         branch = true;
2002                                         flags = &lbr_stack->entries[0].flags;
2003                                         branch_from =
2004                                                 lbr_stack->entries[0].from;
2005                                 }
2006                         } else {
2007                                 if (j < lbr_nr) {
2008                                         k = lbr_nr - j - 1;
2009                                         ip = lbr_stack->entries[k].from;
2010                                         branch = true;
2011                                         flags = &lbr_stack->entries[k].flags;
2012                                 }
2013                                 else if (j > lbr_nr)
2014                                         ip = chain->ips[i + 1 - (j - lbr_nr)];
2015                                 else {
2016                                         ip = lbr_stack->entries[0].to;
2017                                         branch = true;
2018                                         flags = &lbr_stack->entries[0].flags;
2019                                         branch_from =
2020                                                 lbr_stack->entries[0].from;
2021                                 }
2022                         }
2023
2024                         err = add_callchain_ip(thread, cursor, parent,
2025                                                root_al, &cpumode, ip,
2026                                                branch, flags, NULL,
2027                                                branch_from);
2028                         if (err)
2029                                 return (err < 0) ? err : 0;
2030                 }
2031                 return 1;
2032         }
2033
2034         return 0;
2035 }
2036
2037 static int thread__resolve_callchain_sample(struct thread *thread,
2038                                             struct callchain_cursor *cursor,
2039                                             struct perf_evsel *evsel,
2040                                             struct perf_sample *sample,
2041                                             struct symbol **parent,
2042                                             struct addr_location *root_al,
2043                                             int max_stack)
2044 {
2045         struct branch_stack *branch = sample->branch_stack;
2046         struct ip_callchain *chain = sample->callchain;
2047         int chain_nr = 0;
2048         u8 cpumode = PERF_RECORD_MISC_USER;
2049         int i, j, err, nr_entries;
2050         int skip_idx = -1;
2051         int first_call = 0;
2052
2053         if (chain)
2054                 chain_nr = chain->nr;
2055
2056         if (perf_evsel__has_branch_callstack(evsel)) {
2057                 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2058                                                    root_al, max_stack);
2059                 if (err)
2060                         return (err < 0) ? err : 0;
2061         }
2062
2063         /*
2064          * Based on DWARF debug information, some architectures skip
2065          * a callchain entry saved by the kernel.
2066          */
2067         skip_idx = arch_skip_callchain_idx(thread, chain);
2068
2069         /*
2070          * Add branches to call stack for easier browsing. This gives
2071          * more context for a sample than just the callers.
2072          *
2073          * This uses individual histograms of paths compared to the
2074          * aggregated histograms the normal LBR mode uses.
2075          *
2076          * Limitations for now:
2077          * - No extra filters
2078          * - No annotations (should annotate somehow)
2079          */
2080
2081         if (branch && callchain_param.branch_callstack) {
2082                 int nr = min(max_stack, (int)branch->nr);
2083                 struct branch_entry be[nr];
2084                 struct iterations iter[nr];
2085
2086                 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2087                         pr_warning("corrupted branch chain. skipping...\n");
2088                         goto check_calls;
2089                 }
2090
2091                 for (i = 0; i < nr; i++) {
2092                         if (callchain_param.order == ORDER_CALLEE) {
2093                                 be[i] = branch->entries[i];
2094
2095                                 if (chain == NULL)
2096                                         continue;
2097
2098                                 /*
2099                                  * Check for overlap into the callchain.
2100                                  * The return address is one off compared to
2101                                  * the branch entry. To adjust for this
2102                                  * assume the calling instruction is not longer
2103                                  * than 8 bytes.
2104                                  */
2105                                 if (i == skip_idx ||
2106                                     chain->ips[first_call] >= PERF_CONTEXT_MAX)
2107                                         first_call++;
2108                                 else if (be[i].from < chain->ips[first_call] &&
2109                                     be[i].from >= chain->ips[first_call] - 8)
2110                                         first_call++;
2111                         } else
2112                                 be[i] = branch->entries[branch->nr - i - 1];
2113                 }
2114
2115                 memset(iter, 0, sizeof(struct iterations) * nr);
2116                 nr = remove_loops(be, nr, iter);
2117
2118                 for (i = 0; i < nr; i++) {
2119                         err = add_callchain_ip(thread, cursor, parent,
2120                                                root_al,
2121                                                NULL, be[i].to,
2122                                                true, &be[i].flags,
2123                                                NULL, be[i].from);
2124
2125                         if (!err)
2126                                 err = add_callchain_ip(thread, cursor, parent, root_al,
2127                                                        NULL, be[i].from,
2128                                                        true, &be[i].flags,
2129                                                        &iter[i], 0);
2130                         if (err == -EINVAL)
2131                                 break;
2132                         if (err)
2133                                 return err;
2134                 }
2135
2136                 if (chain_nr == 0)
2137                         return 0;
2138
2139                 chain_nr -= nr;
2140         }
2141
2142 check_calls:
2143         for (i = first_call, nr_entries = 0;
2144              i < chain_nr && nr_entries < max_stack; i++) {
2145                 u64 ip;
2146
2147                 if (callchain_param.order == ORDER_CALLEE)
2148                         j = i;
2149                 else
2150                         j = chain->nr - i - 1;
2151
2152 #ifdef HAVE_SKIP_CALLCHAIN_IDX
2153                 if (j == skip_idx)
2154                         continue;
2155 #endif
2156                 ip = chain->ips[j];
2157
2158                 if (ip < PERF_CONTEXT_MAX)
2159                        ++nr_entries;
2160
2161                 err = add_callchain_ip(thread, cursor, parent,
2162                                        root_al, &cpumode, ip,
2163                                        false, NULL, NULL, 0);
2164
2165                 if (err)
2166                         return (err < 0) ? err : 0;
2167         }
2168
2169         return 0;
2170 }
2171
2172 static int unwind_entry(struct unwind_entry *entry, void *arg)
2173 {
2174         struct callchain_cursor *cursor = arg;
2175
2176         if (symbol_conf.hide_unresolved && entry->sym == NULL)
2177                 return 0;
2178         return callchain_cursor_append(cursor, entry->ip,
2179                                        entry->map, entry->sym,
2180                                        false, NULL, 0, 0, 0);
2181 }
2182
2183 static int thread__resolve_callchain_unwind(struct thread *thread,
2184                                             struct callchain_cursor *cursor,
2185                                             struct perf_evsel *evsel,
2186                                             struct perf_sample *sample,
2187                                             int max_stack)
2188 {
2189         /* Can we do dwarf post unwind? */
2190         if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2191               (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
2192                 return 0;
2193
2194         /* Bail out if nothing was captured. */
2195         if ((!sample->user_regs.regs) ||
2196             (!sample->user_stack.size))
2197                 return 0;
2198
2199         return unwind__get_entries(unwind_entry, cursor,
2200                                    thread, sample, max_stack);
2201 }
2202
2203 int thread__resolve_callchain(struct thread *thread,
2204                               struct callchain_cursor *cursor,
2205                               struct perf_evsel *evsel,
2206                               struct perf_sample *sample,
2207                               struct symbol **parent,
2208                               struct addr_location *root_al,
2209                               int max_stack)
2210 {
2211         int ret = 0;
2212
2213         callchain_cursor_reset(&callchain_cursor);
2214
2215         if (callchain_param.order == ORDER_CALLEE) {
2216                 ret = thread__resolve_callchain_sample(thread, cursor,
2217                                                        evsel, sample,
2218                                                        parent, root_al,
2219                                                        max_stack);
2220                 if (ret)
2221                         return ret;
2222                 ret = thread__resolve_callchain_unwind(thread, cursor,
2223                                                        evsel, sample,
2224                                                        max_stack);
2225         } else {
2226                 ret = thread__resolve_callchain_unwind(thread, cursor,
2227                                                        evsel, sample,
2228                                                        max_stack);
2229                 if (ret)
2230                         return ret;
2231                 ret = thread__resolve_callchain_sample(thread, cursor,
2232                                                        evsel, sample,
2233                                                        parent, root_al,
2234                                                        max_stack);
2235         }
2236
2237         return ret;
2238 }
2239
2240 int machine__for_each_thread(struct machine *machine,
2241                              int (*fn)(struct thread *thread, void *p),
2242                              void *priv)
2243 {
2244         struct rb_node *nd;
2245         struct thread *thread;
2246         int rc = 0;
2247
2248         for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
2249                 thread = rb_entry(nd, struct thread, rb_node);
2250                 rc = fn(thread, priv);
2251                 if (rc != 0)
2252                         return rc;
2253         }
2254
2255         list_for_each_entry(thread, &machine->dead_threads, node) {
2256                 rc = fn(thread, priv);
2257                 if (rc != 0)
2258                         return rc;
2259         }
2260         return rc;
2261 }
2262
2263 int machines__for_each_thread(struct machines *machines,
2264                               int (*fn)(struct thread *thread, void *p),
2265                               void *priv)
2266 {
2267         struct rb_node *nd;
2268         int rc = 0;
2269
2270         rc = machine__for_each_thread(&machines->host, fn, priv);
2271         if (rc != 0)
2272                 return rc;
2273
2274         for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
2275                 struct machine *machine = rb_entry(nd, struct machine, rb_node);
2276
2277                 rc = machine__for_each_thread(machine, fn, priv);
2278                 if (rc != 0)
2279                         return rc;
2280         }
2281         return rc;
2282 }
2283
2284 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
2285                                   struct target *target, struct thread_map *threads,
2286                                   perf_event__handler_t process, bool data_mmap,
2287                                   unsigned int proc_map_timeout)
2288 {
2289         if (target__has_task(target))
2290                 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
2291         else if (target__has_cpu(target))
2292                 return perf_event__synthesize_threads(tool, process, machine, data_mmap, proc_map_timeout);
2293         /* command specified */
2294         return 0;
2295 }
2296
2297 pid_t machine__get_current_tid(struct machine *machine, int cpu)
2298 {
2299         if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
2300                 return -1;
2301
2302         return machine->current_tid[cpu];
2303 }
2304
2305 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2306                              pid_t tid)
2307 {
2308         struct thread *thread;
2309
2310         if (cpu < 0)
2311                 return -EINVAL;
2312
2313         if (!machine->current_tid) {
2314                 int i;
2315
2316                 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
2317                 if (!machine->current_tid)
2318                         return -ENOMEM;
2319                 for (i = 0; i < MAX_NR_CPUS; i++)
2320                         machine->current_tid[i] = -1;
2321         }
2322
2323         if (cpu >= MAX_NR_CPUS) {
2324                 pr_err("Requested CPU %d too large. ", cpu);
2325                 pr_err("Consider raising MAX_NR_CPUS\n");
2326                 return -EINVAL;
2327         }
2328
2329         machine->current_tid[cpu] = tid;
2330
2331         thread = machine__findnew_thread(machine, pid, tid);
2332         if (!thread)
2333                 return -ENOMEM;
2334
2335         thread->cpu = cpu;
2336         thread__put(thread);
2337
2338         return 0;
2339 }
2340
2341 /*
2342  * Compares the raw arch string. N.B. see instead perf_env__arch() if a
2343  * normalized arch is needed.
2344  */
2345 bool machine__is(struct machine *machine, const char *arch)
2346 {
2347         return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
2348 }
2349
2350 int machine__nr_cpus_avail(struct machine *machine)
2351 {
2352         return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
2353 }
2354
2355 int machine__get_kernel_start(struct machine *machine)
2356 {
2357         struct map *map = machine__kernel_map(machine);
2358         int err = 0;
2359
2360         /*
2361          * The only addresses above 2^63 are kernel addresses of a 64-bit
2362          * kernel.  Note that addresses are unsigned so that on a 32-bit system
2363          * all addresses including kernel addresses are less than 2^32.  In
2364          * that case (32-bit system), if the kernel mapping is unknown, all
2365          * addresses will be assumed to be in user space - see
2366          * machine__kernel_ip().
2367          */
2368         machine->kernel_start = 1ULL << 63;
2369         if (map) {
2370                 err = map__load(map);
2371                 /*
2372                  * On x86_64, PTI entry trampolines are less than the
2373                  * start of kernel text, but still above 2^63. So leave
2374                  * kernel_start = 1ULL << 63 for x86_64.
2375                  */
2376                 if (!err && !machine__is(machine, "x86_64"))
2377                         machine->kernel_start = map->start;
2378         }
2379         return err;
2380 }
2381
2382 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
2383 {
2384         return dsos__findnew(&machine->dsos, filename);
2385 }
2386
2387 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
2388 {
2389         struct machine *machine = vmachine;
2390         struct map *map;
2391         struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map);
2392
2393         if (sym == NULL)
2394                 return NULL;
2395
2396         *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
2397         *addrp = map->unmap_ip(map, sym->start);
2398         return sym->name;
2399 }