GNU Linux-libre 4.9.309-gnu1
[releases.git] / tools / perf / util / session.c
1 #include <linux/kernel.h>
2 #include <traceevent/event-parse.h>
3
4 #include <byteswap.h>
5 #include <unistd.h>
6 #include <sys/types.h>
7 #include <sys/mman.h>
8
9 #include "evlist.h"
10 #include "evsel.h"
11 #include "session.h"
12 #include "tool.h"
13 #include "sort.h"
14 #include "util.h"
15 #include "cpumap.h"
16 #include "perf_regs.h"
17 #include "asm/bug.h"
18 #include "auxtrace.h"
19 #include "thread-stack.h"
20 #include "stat.h"
21
22 static int perf_session__deliver_event(struct perf_session *session,
23                                        union perf_event *event,
24                                        struct perf_sample *sample,
25                                        struct perf_tool *tool,
26                                        u64 file_offset);
27
28 static int perf_session__open(struct perf_session *session)
29 {
30         struct perf_data_file *file = session->file;
31
32         if (perf_session__read_header(session) < 0) {
33                 pr_err("incompatible file format (rerun with -v to learn more)\n");
34                 return -1;
35         }
36
37         if (perf_data_file__is_pipe(file))
38                 return 0;
39
40         if (perf_header__has_feat(&session->header, HEADER_STAT))
41                 return 0;
42
43         if (!perf_evlist__valid_sample_type(session->evlist)) {
44                 pr_err("non matching sample_type\n");
45                 return -1;
46         }
47
48         if (!perf_evlist__valid_sample_id_all(session->evlist)) {
49                 pr_err("non matching sample_id_all\n");
50                 return -1;
51         }
52
53         if (!perf_evlist__valid_read_format(session->evlist)) {
54                 pr_err("non matching read_format\n");
55                 return -1;
56         }
57
58         return 0;
59 }
60
61 void perf_session__set_id_hdr_size(struct perf_session *session)
62 {
63         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
64
65         machines__set_id_hdr_size(&session->machines, id_hdr_size);
66 }
67
68 int perf_session__create_kernel_maps(struct perf_session *session)
69 {
70         int ret = machine__create_kernel_maps(&session->machines.host);
71
72         if (ret >= 0)
73                 ret = machines__create_guest_kernel_maps(&session->machines);
74         return ret;
75 }
76
77 static void perf_session__destroy_kernel_maps(struct perf_session *session)
78 {
79         machines__destroy_kernel_maps(&session->machines);
80 }
81
82 static bool perf_session__has_comm_exec(struct perf_session *session)
83 {
84         struct perf_evsel *evsel;
85
86         evlist__for_each_entry(session->evlist, evsel) {
87                 if (evsel->attr.comm_exec)
88                         return true;
89         }
90
91         return false;
92 }
93
94 static void perf_session__set_comm_exec(struct perf_session *session)
95 {
96         bool comm_exec = perf_session__has_comm_exec(session);
97
98         machines__set_comm_exec(&session->machines, comm_exec);
99 }
100
101 static int ordered_events__deliver_event(struct ordered_events *oe,
102                                          struct ordered_event *event)
103 {
104         struct perf_sample sample;
105         struct perf_session *session = container_of(oe, struct perf_session,
106                                                     ordered_events);
107         int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
108
109         if (ret) {
110                 pr_err("Can't parse sample, err = %d\n", ret);
111                 return ret;
112         }
113
114         return perf_session__deliver_event(session, event->event, &sample,
115                                            session->tool, event->file_offset);
116 }
117
118 struct perf_session *perf_session__new(struct perf_data_file *file,
119                                        bool repipe, struct perf_tool *tool)
120 {
121         struct perf_session *session = zalloc(sizeof(*session));
122
123         if (!session)
124                 goto out;
125
126         session->repipe = repipe;
127         session->tool   = tool;
128         INIT_LIST_HEAD(&session->auxtrace_index);
129         machines__init(&session->machines);
130         ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
131
132         if (file) {
133                 if (perf_data_file__open(file))
134                         goto out_delete;
135
136                 session->file = file;
137
138                 if (perf_data_file__is_read(file)) {
139                         if (perf_session__open(session) < 0)
140                                 goto out_close;
141
142                         /*
143                          * set session attributes that are present in perf.data
144                          * but not in pipe-mode.
145                          */
146                         if (!file->is_pipe) {
147                                 perf_session__set_id_hdr_size(session);
148                                 perf_session__set_comm_exec(session);
149                         }
150                 }
151         } else  {
152                 session->machines.host.env = &perf_env;
153         }
154
155         if (!file || perf_data_file__is_write(file)) {
156                 /*
157                  * In O_RDONLY mode this will be performed when reading the
158                  * kernel MMAP event, in perf_event__process_mmap().
159                  */
160                 if (perf_session__create_kernel_maps(session) < 0)
161                         pr_warning("Cannot read kernel map\n");
162         }
163
164         /*
165          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
166          * processed, so perf_evlist__sample_id_all is not meaningful here.
167          */
168         if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps &&
169             tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
170                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
171                 tool->ordered_events = false;
172         }
173
174         return session;
175
176  out_close:
177         perf_data_file__close(file);
178  out_delete:
179         perf_session__delete(session);
180  out:
181         return NULL;
182 }
183
184 static void perf_session__delete_threads(struct perf_session *session)
185 {
186         machine__delete_threads(&session->machines.host);
187 }
188
189 void perf_session__delete(struct perf_session *session)
190 {
191         if (session == NULL)
192                 return;
193         auxtrace__free(session);
194         auxtrace_index__free(&session->auxtrace_index);
195         perf_session__destroy_kernel_maps(session);
196         perf_session__delete_threads(session);
197         perf_env__exit(&session->header.env);
198         machines__exit(&session->machines);
199         if (session->file)
200                 perf_data_file__close(session->file);
201         free(session);
202 }
203
204 static int process_event_synth_tracing_data_stub(struct perf_tool *tool
205                                                  __maybe_unused,
206                                                  union perf_event *event
207                                                  __maybe_unused,
208                                                  struct perf_session *session
209                                                 __maybe_unused)
210 {
211         dump_printf(": unhandled!\n");
212         return 0;
213 }
214
215 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
216                                          union perf_event *event __maybe_unused,
217                                          struct perf_evlist **pevlist
218                                          __maybe_unused)
219 {
220         dump_printf(": unhandled!\n");
221         return 0;
222 }
223
224 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
225                                                  union perf_event *event __maybe_unused,
226                                                  struct perf_evlist **pevlist
227                                                  __maybe_unused)
228 {
229         if (dump_trace)
230                 perf_event__fprintf_event_update(event, stdout);
231
232         dump_printf(": unhandled!\n");
233         return 0;
234 }
235
236 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
237                                      union perf_event *event __maybe_unused,
238                                      struct perf_sample *sample __maybe_unused,
239                                      struct perf_evsel *evsel __maybe_unused,
240                                      struct machine *machine __maybe_unused)
241 {
242         dump_printf(": unhandled!\n");
243         return 0;
244 }
245
246 static int process_event_stub(struct perf_tool *tool __maybe_unused,
247                               union perf_event *event __maybe_unused,
248                               struct perf_sample *sample __maybe_unused,
249                               struct machine *machine __maybe_unused)
250 {
251         dump_printf(": unhandled!\n");
252         return 0;
253 }
254
255 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
256                                        union perf_event *event __maybe_unused,
257                                        struct ordered_events *oe __maybe_unused)
258 {
259         dump_printf(": unhandled!\n");
260         return 0;
261 }
262
263 static int process_finished_round(struct perf_tool *tool,
264                                   union perf_event *event,
265                                   struct ordered_events *oe);
266
267 static int skipn(int fd, off_t n)
268 {
269         char buf[4096];
270         ssize_t ret;
271
272         while (n > 0) {
273                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
274                 if (ret <= 0)
275                         return ret;
276                 n -= ret;
277         }
278
279         return 0;
280 }
281
282 static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
283                                        union perf_event *event,
284                                        struct perf_session *session
285                                        __maybe_unused)
286 {
287         dump_printf(": unhandled!\n");
288         if (perf_data_file__is_pipe(session->file))
289                 skipn(perf_data_file__fd(session->file), event->auxtrace.size);
290         return event->auxtrace.size;
291 }
292
293 static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
294                                   union perf_event *event __maybe_unused,
295                                   struct perf_session *session __maybe_unused)
296 {
297         dump_printf(": unhandled!\n");
298         return 0;
299 }
300
301
302 static
303 int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
304                                   union perf_event *event __maybe_unused,
305                                   struct perf_session *session __maybe_unused)
306 {
307         if (dump_trace)
308                 perf_event__fprintf_thread_map(event, stdout);
309
310         dump_printf(": unhandled!\n");
311         return 0;
312 }
313
314 static
315 int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
316                                union perf_event *event __maybe_unused,
317                                struct perf_session *session __maybe_unused)
318 {
319         if (dump_trace)
320                 perf_event__fprintf_cpu_map(event, stdout);
321
322         dump_printf(": unhandled!\n");
323         return 0;
324 }
325
326 static
327 int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
328                                    union perf_event *event __maybe_unused,
329                                    struct perf_session *session __maybe_unused)
330 {
331         if (dump_trace)
332                 perf_event__fprintf_stat_config(event, stdout);
333
334         dump_printf(": unhandled!\n");
335         return 0;
336 }
337
338 static int process_stat_stub(struct perf_tool *tool __maybe_unused,
339                              union perf_event *event __maybe_unused,
340                              struct perf_session *perf_session
341                              __maybe_unused)
342 {
343         if (dump_trace)
344                 perf_event__fprintf_stat(event, stdout);
345
346         dump_printf(": unhandled!\n");
347         return 0;
348 }
349
350 static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
351                                    union perf_event *event __maybe_unused,
352                                    struct perf_session *perf_session
353                                    __maybe_unused)
354 {
355         if (dump_trace)
356                 perf_event__fprintf_stat_round(event, stdout);
357
358         dump_printf(": unhandled!\n");
359         return 0;
360 }
361
362 void perf_tool__fill_defaults(struct perf_tool *tool)
363 {
364         if (tool->sample == NULL)
365                 tool->sample = process_event_sample_stub;
366         if (tool->mmap == NULL)
367                 tool->mmap = process_event_stub;
368         if (tool->mmap2 == NULL)
369                 tool->mmap2 = process_event_stub;
370         if (tool->comm == NULL)
371                 tool->comm = process_event_stub;
372         if (tool->fork == NULL)
373                 tool->fork = process_event_stub;
374         if (tool->exit == NULL)
375                 tool->exit = process_event_stub;
376         if (tool->lost == NULL)
377                 tool->lost = perf_event__process_lost;
378         if (tool->lost_samples == NULL)
379                 tool->lost_samples = perf_event__process_lost_samples;
380         if (tool->aux == NULL)
381                 tool->aux = perf_event__process_aux;
382         if (tool->itrace_start == NULL)
383                 tool->itrace_start = perf_event__process_itrace_start;
384         if (tool->context_switch == NULL)
385                 tool->context_switch = perf_event__process_switch;
386         if (tool->read == NULL)
387                 tool->read = process_event_sample_stub;
388         if (tool->throttle == NULL)
389                 tool->throttle = process_event_stub;
390         if (tool->unthrottle == NULL)
391                 tool->unthrottle = process_event_stub;
392         if (tool->attr == NULL)
393                 tool->attr = process_event_synth_attr_stub;
394         if (tool->event_update == NULL)
395                 tool->event_update = process_event_synth_event_update_stub;
396         if (tool->tracing_data == NULL)
397                 tool->tracing_data = process_event_synth_tracing_data_stub;
398         if (tool->build_id == NULL)
399                 tool->build_id = process_event_op2_stub;
400         if (tool->finished_round == NULL) {
401                 if (tool->ordered_events)
402                         tool->finished_round = process_finished_round;
403                 else
404                         tool->finished_round = process_finished_round_stub;
405         }
406         if (tool->id_index == NULL)
407                 tool->id_index = process_event_op2_stub;
408         if (tool->auxtrace_info == NULL)
409                 tool->auxtrace_info = process_event_op2_stub;
410         if (tool->auxtrace == NULL)
411                 tool->auxtrace = process_event_auxtrace_stub;
412         if (tool->auxtrace_error == NULL)
413                 tool->auxtrace_error = process_event_op2_stub;
414         if (tool->thread_map == NULL)
415                 tool->thread_map = process_event_thread_map_stub;
416         if (tool->cpu_map == NULL)
417                 tool->cpu_map = process_event_cpu_map_stub;
418         if (tool->stat_config == NULL)
419                 tool->stat_config = process_event_stat_config_stub;
420         if (tool->stat == NULL)
421                 tool->stat = process_stat_stub;
422         if (tool->stat_round == NULL)
423                 tool->stat_round = process_stat_round_stub;
424         if (tool->time_conv == NULL)
425                 tool->time_conv = process_event_op2_stub;
426 }
427
428 static void swap_sample_id_all(union perf_event *event, void *data)
429 {
430         void *end = (void *) event + event->header.size;
431         int size = end - data;
432
433         BUG_ON(size % sizeof(u64));
434         mem_bswap_64(data, size);
435 }
436
437 static void perf_event__all64_swap(union perf_event *event,
438                                    bool sample_id_all __maybe_unused)
439 {
440         struct perf_event_header *hdr = &event->header;
441         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
442 }
443
444 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
445 {
446         event->comm.pid = bswap_32(event->comm.pid);
447         event->comm.tid = bswap_32(event->comm.tid);
448
449         if (sample_id_all) {
450                 void *data = &event->comm.comm;
451
452                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
453                 swap_sample_id_all(event, data);
454         }
455 }
456
457 static void perf_event__mmap_swap(union perf_event *event,
458                                   bool sample_id_all)
459 {
460         event->mmap.pid   = bswap_32(event->mmap.pid);
461         event->mmap.tid   = bswap_32(event->mmap.tid);
462         event->mmap.start = bswap_64(event->mmap.start);
463         event->mmap.len   = bswap_64(event->mmap.len);
464         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
465
466         if (sample_id_all) {
467                 void *data = &event->mmap.filename;
468
469                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
470                 swap_sample_id_all(event, data);
471         }
472 }
473
474 static void perf_event__mmap2_swap(union perf_event *event,
475                                   bool sample_id_all)
476 {
477         event->mmap2.pid   = bswap_32(event->mmap2.pid);
478         event->mmap2.tid   = bswap_32(event->mmap2.tid);
479         event->mmap2.start = bswap_64(event->mmap2.start);
480         event->mmap2.len   = bswap_64(event->mmap2.len);
481         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
482         event->mmap2.maj   = bswap_32(event->mmap2.maj);
483         event->mmap2.min   = bswap_32(event->mmap2.min);
484         event->mmap2.ino   = bswap_64(event->mmap2.ino);
485         event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
486
487         if (sample_id_all) {
488                 void *data = &event->mmap2.filename;
489
490                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
491                 swap_sample_id_all(event, data);
492         }
493 }
494 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
495 {
496         event->fork.pid  = bswap_32(event->fork.pid);
497         event->fork.tid  = bswap_32(event->fork.tid);
498         event->fork.ppid = bswap_32(event->fork.ppid);
499         event->fork.ptid = bswap_32(event->fork.ptid);
500         event->fork.time = bswap_64(event->fork.time);
501
502         if (sample_id_all)
503                 swap_sample_id_all(event, &event->fork + 1);
504 }
505
506 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
507 {
508         event->read.pid          = bswap_32(event->read.pid);
509         event->read.tid          = bswap_32(event->read.tid);
510         event->read.value        = bswap_64(event->read.value);
511         event->read.time_enabled = bswap_64(event->read.time_enabled);
512         event->read.time_running = bswap_64(event->read.time_running);
513         event->read.id           = bswap_64(event->read.id);
514
515         if (sample_id_all)
516                 swap_sample_id_all(event, &event->read + 1);
517 }
518
519 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
520 {
521         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
522         event->aux.aux_size   = bswap_64(event->aux.aux_size);
523         event->aux.flags      = bswap_64(event->aux.flags);
524
525         if (sample_id_all)
526                 swap_sample_id_all(event, &event->aux + 1);
527 }
528
529 static void perf_event__itrace_start_swap(union perf_event *event,
530                                           bool sample_id_all)
531 {
532         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
533         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
534
535         if (sample_id_all)
536                 swap_sample_id_all(event, &event->itrace_start + 1);
537 }
538
539 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
540 {
541         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
542                 event->context_switch.next_prev_pid =
543                                 bswap_32(event->context_switch.next_prev_pid);
544                 event->context_switch.next_prev_tid =
545                                 bswap_32(event->context_switch.next_prev_tid);
546         }
547
548         if (sample_id_all)
549                 swap_sample_id_all(event, &event->context_switch + 1);
550 }
551
552 static void perf_event__throttle_swap(union perf_event *event,
553                                       bool sample_id_all)
554 {
555         event->throttle.time      = bswap_64(event->throttle.time);
556         event->throttle.id        = bswap_64(event->throttle.id);
557         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
558
559         if (sample_id_all)
560                 swap_sample_id_all(event, &event->throttle + 1);
561 }
562
563 static u8 revbyte(u8 b)
564 {
565         int rev = (b >> 4) | ((b & 0xf) << 4);
566         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
567         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
568         return (u8) rev;
569 }
570
571 /*
572  * XXX this is hack in attempt to carry flags bitfield
573  * through endian village. ABI says:
574  *
575  * Bit-fields are allocated from right to left (least to most significant)
576  * on little-endian implementations and from left to right (most to least
577  * significant) on big-endian implementations.
578  *
579  * The above seems to be byte specific, so we need to reverse each
580  * byte of the bitfield. 'Internet' also says this might be implementation
581  * specific and we probably need proper fix and carry perf_event_attr
582  * bitfield flags in separate data file FEAT_ section. Thought this seems
583  * to work for now.
584  */
585 static void swap_bitfield(u8 *p, unsigned len)
586 {
587         unsigned i;
588
589         for (i = 0; i < len; i++) {
590                 *p = revbyte(*p);
591                 p++;
592         }
593 }
594
595 /* exported for swapping attributes in file header */
596 void perf_event__attr_swap(struct perf_event_attr *attr)
597 {
598         attr->type              = bswap_32(attr->type);
599         attr->size              = bswap_32(attr->size);
600
601 #define bswap_safe(f, n)                                        \
602         (attr->size > (offsetof(struct perf_event_attr, f) +    \
603                        sizeof(attr->f) * (n)))
604 #define bswap_field(f, sz)                      \
605 do {                                            \
606         if (bswap_safe(f, 0))                   \
607                 attr->f = bswap_##sz(attr->f);  \
608 } while(0)
609 #define bswap_field_16(f) bswap_field(f, 16)
610 #define bswap_field_32(f) bswap_field(f, 32)
611 #define bswap_field_64(f) bswap_field(f, 64)
612
613         bswap_field_64(config);
614         bswap_field_64(sample_period);
615         bswap_field_64(sample_type);
616         bswap_field_64(read_format);
617         bswap_field_32(wakeup_events);
618         bswap_field_32(bp_type);
619         bswap_field_64(bp_addr);
620         bswap_field_64(bp_len);
621         bswap_field_64(branch_sample_type);
622         bswap_field_64(sample_regs_user);
623         bswap_field_32(sample_stack_user);
624         bswap_field_32(aux_watermark);
625         bswap_field_16(sample_max_stack);
626
627         /*
628          * After read_format are bitfields. Check read_format because
629          * we are unable to use offsetof on bitfield.
630          */
631         if (bswap_safe(read_format, 1))
632                 swap_bitfield((u8 *) (&attr->read_format + 1),
633                               sizeof(u64));
634 #undef bswap_field_64
635 #undef bswap_field_32
636 #undef bswap_field
637 #undef bswap_safe
638 }
639
640 static void perf_event__hdr_attr_swap(union perf_event *event,
641                                       bool sample_id_all __maybe_unused)
642 {
643         size_t size;
644
645         perf_event__attr_swap(&event->attr.attr);
646
647         size = event->header.size;
648         size -= (void *)&event->attr.id - (void *)event;
649         mem_bswap_64(event->attr.id, size);
650 }
651
652 static void perf_event__event_update_swap(union perf_event *event,
653                                           bool sample_id_all __maybe_unused)
654 {
655         event->event_update.type = bswap_64(event->event_update.type);
656         event->event_update.id   = bswap_64(event->event_update.id);
657 }
658
659 static void perf_event__event_type_swap(union perf_event *event,
660                                         bool sample_id_all __maybe_unused)
661 {
662         event->event_type.event_type.event_id =
663                 bswap_64(event->event_type.event_type.event_id);
664 }
665
666 static void perf_event__tracing_data_swap(union perf_event *event,
667                                           bool sample_id_all __maybe_unused)
668 {
669         event->tracing_data.size = bswap_32(event->tracing_data.size);
670 }
671
672 static void perf_event__auxtrace_info_swap(union perf_event *event,
673                                            bool sample_id_all __maybe_unused)
674 {
675         size_t size;
676
677         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
678
679         size = event->header.size;
680         size -= (void *)&event->auxtrace_info.priv - (void *)event;
681         mem_bswap_64(event->auxtrace_info.priv, size);
682 }
683
684 static void perf_event__auxtrace_swap(union perf_event *event,
685                                       bool sample_id_all __maybe_unused)
686 {
687         event->auxtrace.size      = bswap_64(event->auxtrace.size);
688         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
689         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
690         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
691         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
692         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
693 }
694
695 static void perf_event__auxtrace_error_swap(union perf_event *event,
696                                             bool sample_id_all __maybe_unused)
697 {
698         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
699         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
700         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
701         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
702         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
703         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
704 }
705
706 static void perf_event__thread_map_swap(union perf_event *event,
707                                         bool sample_id_all __maybe_unused)
708 {
709         unsigned i;
710
711         event->thread_map.nr = bswap_64(event->thread_map.nr);
712
713         for (i = 0; i < event->thread_map.nr; i++)
714                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
715 }
716
717 static void perf_event__cpu_map_swap(union perf_event *event,
718                                      bool sample_id_all __maybe_unused)
719 {
720         struct cpu_map_data *data = &event->cpu_map.data;
721         struct cpu_map_entries *cpus;
722         struct cpu_map_mask *mask;
723         unsigned i;
724
725         data->type = bswap_64(data->type);
726
727         switch (data->type) {
728         case PERF_CPU_MAP__CPUS:
729                 cpus = (struct cpu_map_entries *)data->data;
730
731                 cpus->nr = bswap_16(cpus->nr);
732
733                 for (i = 0; i < cpus->nr; i++)
734                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
735                 break;
736         case PERF_CPU_MAP__MASK:
737                 mask = (struct cpu_map_mask *) data->data;
738
739                 mask->nr = bswap_16(mask->nr);
740                 mask->long_size = bswap_16(mask->long_size);
741
742                 switch (mask->long_size) {
743                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
744                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
745                 default:
746                         pr_err("cpu_map swap: unsupported long size\n");
747                 }
748         default:
749                 break;
750         }
751 }
752
753 static void perf_event__stat_config_swap(union perf_event *event,
754                                          bool sample_id_all __maybe_unused)
755 {
756         u64 size;
757
758         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
759         size += 1; /* nr item itself */
760         mem_bswap_64(&event->stat_config.nr, size);
761 }
762
763 static void perf_event__stat_swap(union perf_event *event,
764                                   bool sample_id_all __maybe_unused)
765 {
766         event->stat.id     = bswap_64(event->stat.id);
767         event->stat.thread = bswap_32(event->stat.thread);
768         event->stat.cpu    = bswap_32(event->stat.cpu);
769         event->stat.val    = bswap_64(event->stat.val);
770         event->stat.ena    = bswap_64(event->stat.ena);
771         event->stat.run    = bswap_64(event->stat.run);
772 }
773
774 static void perf_event__stat_round_swap(union perf_event *event,
775                                         bool sample_id_all __maybe_unused)
776 {
777         event->stat_round.type = bswap_64(event->stat_round.type);
778         event->stat_round.time = bswap_64(event->stat_round.time);
779 }
780
781 typedef void (*perf_event__swap_op)(union perf_event *event,
782                                     bool sample_id_all);
783
784 static perf_event__swap_op perf_event__swap_ops[] = {
785         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
786         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
787         [PERF_RECORD_COMM]                = perf_event__comm_swap,
788         [PERF_RECORD_FORK]                = perf_event__task_swap,
789         [PERF_RECORD_EXIT]                = perf_event__task_swap,
790         [PERF_RECORD_LOST]                = perf_event__all64_swap,
791         [PERF_RECORD_READ]                = perf_event__read_swap,
792         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
793         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
794         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
795         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
796         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
797         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
798         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
799         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
800         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
801         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
802         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
803         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
804         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
805         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
806         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
807         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
808         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
809         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
810         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
811         [PERF_RECORD_STAT]                = perf_event__stat_swap,
812         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
813         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
814         [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
815         [PERF_RECORD_HEADER_MAX]          = NULL,
816 };
817
818 /*
819  * When perf record finishes a pass on every buffers, it records this pseudo
820  * event.
821  * We record the max timestamp t found in the pass n.
822  * Assuming these timestamps are monotonic across cpus, we know that if
823  * a buffer still has events with timestamps below t, they will be all
824  * available and then read in the pass n + 1.
825  * Hence when we start to read the pass n + 2, we can safely flush every
826  * events with timestamps below t.
827  *
828  *    ============ PASS n =================
829  *       CPU 0         |   CPU 1
830  *                     |
831  *    cnt1 timestamps  |   cnt2 timestamps
832  *          1          |         2
833  *          2          |         3
834  *          -          |         4  <--- max recorded
835  *
836  *    ============ PASS n + 1 ==============
837  *       CPU 0         |   CPU 1
838  *                     |
839  *    cnt1 timestamps  |   cnt2 timestamps
840  *          3          |         5
841  *          4          |         6
842  *          5          |         7 <---- max recorded
843  *
844  *      Flush every events below timestamp 4
845  *
846  *    ============ PASS n + 2 ==============
847  *       CPU 0         |   CPU 1
848  *                     |
849  *    cnt1 timestamps  |   cnt2 timestamps
850  *          6          |         8
851  *          7          |         9
852  *          -          |         10
853  *
854  *      Flush every events below timestamp 7
855  *      etc...
856  */
857 static int process_finished_round(struct perf_tool *tool __maybe_unused,
858                                   union perf_event *event __maybe_unused,
859                                   struct ordered_events *oe)
860 {
861         if (dump_trace)
862                 fprintf(stdout, "\n");
863         return ordered_events__flush(oe, OE_FLUSH__ROUND);
864 }
865
866 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
867                               struct perf_sample *sample, u64 file_offset)
868 {
869         return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
870 }
871
872 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
873 {
874         struct ip_callchain *callchain = sample->callchain;
875         struct branch_stack *lbr_stack = sample->branch_stack;
876         u64 kernel_callchain_nr = callchain->nr;
877         unsigned int i;
878
879         for (i = 0; i < kernel_callchain_nr; i++) {
880                 if (callchain->ips[i] == PERF_CONTEXT_USER)
881                         break;
882         }
883
884         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
885                 u64 total_nr;
886                 /*
887                  * LBR callstack can only get user call chain,
888                  * i is kernel call chain number,
889                  * 1 is PERF_CONTEXT_USER.
890                  *
891                  * The user call chain is stored in LBR registers.
892                  * LBR are pair registers. The caller is stored
893                  * in "from" register, while the callee is stored
894                  * in "to" register.
895                  * For example, there is a call stack
896                  * "A"->"B"->"C"->"D".
897                  * The LBR registers will recorde like
898                  * "C"->"D", "B"->"C", "A"->"B".
899                  * So only the first "to" register and all "from"
900                  * registers are needed to construct the whole stack.
901                  */
902                 total_nr = i + 1 + lbr_stack->nr + 1;
903                 kernel_callchain_nr = i + 1;
904
905                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
906
907                 for (i = 0; i < kernel_callchain_nr; i++)
908                         printf("..... %2d: %016" PRIx64 "\n",
909                                i, callchain->ips[i]);
910
911                 printf("..... %2d: %016" PRIx64 "\n",
912                        (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
913                 for (i = 0; i < lbr_stack->nr; i++)
914                         printf("..... %2d: %016" PRIx64 "\n",
915                                (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
916         }
917 }
918
919 static void callchain__printf(struct perf_evsel *evsel,
920                               struct perf_sample *sample)
921 {
922         unsigned int i;
923         struct ip_callchain *callchain = sample->callchain;
924
925         if (perf_evsel__has_branch_callstack(evsel))
926                 callchain__lbr_callstack_printf(sample);
927
928         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
929
930         for (i = 0; i < callchain->nr; i++)
931                 printf("..... %2d: %016" PRIx64 "\n",
932                        i, callchain->ips[i]);
933 }
934
935 static void branch_stack__printf(struct perf_sample *sample)
936 {
937         uint64_t i;
938
939         printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
940
941         for (i = 0; i < sample->branch_stack->nr; i++) {
942                 struct branch_entry *e = &sample->branch_stack->entries[i];
943
944                 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
945                         i, e->from, e->to,
946                         e->flags.cycles,
947                         e->flags.mispred ? "M" : " ",
948                         e->flags.predicted ? "P" : " ",
949                         e->flags.abort ? "A" : " ",
950                         e->flags.in_tx ? "T" : " ",
951                         (unsigned)e->flags.reserved);
952         }
953 }
954
955 static void regs_dump__printf(u64 mask, u64 *regs)
956 {
957         unsigned rid, i = 0;
958
959         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
960                 u64 val = regs[i++];
961
962                 printf(".... %-5s 0x%" PRIx64 "\n",
963                        perf_reg_name(rid), val);
964         }
965 }
966
967 static const char *regs_abi[] = {
968         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
969         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
970         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
971 };
972
973 static inline const char *regs_dump_abi(struct regs_dump *d)
974 {
975         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
976                 return "unknown";
977
978         return regs_abi[d->abi];
979 }
980
981 static void regs__printf(const char *type, struct regs_dump *regs)
982 {
983         u64 mask = regs->mask;
984
985         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
986                type,
987                mask,
988                regs_dump_abi(regs));
989
990         regs_dump__printf(mask, regs->regs);
991 }
992
993 static void regs_user__printf(struct perf_sample *sample)
994 {
995         struct regs_dump *user_regs = &sample->user_regs;
996
997         if (user_regs->regs)
998                 regs__printf("user", user_regs);
999 }
1000
1001 static void regs_intr__printf(struct perf_sample *sample)
1002 {
1003         struct regs_dump *intr_regs = &sample->intr_regs;
1004
1005         if (intr_regs->regs)
1006                 regs__printf("intr", intr_regs);
1007 }
1008
1009 static void stack_user__printf(struct stack_dump *dump)
1010 {
1011         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1012                dump->size, dump->offset);
1013 }
1014
1015 static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1016                                        union perf_event *event,
1017                                        struct perf_sample *sample)
1018 {
1019         u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1020
1021         if (event->header.type != PERF_RECORD_SAMPLE &&
1022             !perf_evlist__sample_id_all(evlist)) {
1023                 fputs("-1 -1 ", stdout);
1024                 return;
1025         }
1026
1027         if ((sample_type & PERF_SAMPLE_CPU))
1028                 printf("%u ", sample->cpu);
1029
1030         if (sample_type & PERF_SAMPLE_TIME)
1031                 printf("%" PRIu64 " ", sample->time);
1032 }
1033
1034 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1035 {
1036         printf("... sample_read:\n");
1037
1038         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1039                 printf("...... time enabled %016" PRIx64 "\n",
1040                        sample->read.time_enabled);
1041
1042         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1043                 printf("...... time running %016" PRIx64 "\n",
1044                        sample->read.time_running);
1045
1046         if (read_format & PERF_FORMAT_GROUP) {
1047                 u64 i;
1048
1049                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1050
1051                 for (i = 0; i < sample->read.group.nr; i++) {
1052                         struct sample_read_value *value;
1053
1054                         value = &sample->read.group.values[i];
1055                         printf("..... id %016" PRIx64
1056                                ", value %016" PRIx64 "\n",
1057                                value->id, value->value);
1058                 }
1059         } else
1060                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1061                         sample->read.one.id, sample->read.one.value);
1062 }
1063
1064 static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1065                        u64 file_offset, struct perf_sample *sample)
1066 {
1067         if (!dump_trace)
1068                 return;
1069
1070         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1071                file_offset, event->header.size, event->header.type);
1072
1073         trace_event(event);
1074
1075         if (sample)
1076                 perf_evlist__print_tstamp(evlist, event, sample);
1077
1078         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1079                event->header.size, perf_event__name(event->header.type));
1080 }
1081
1082 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1083                         struct perf_sample *sample)
1084 {
1085         u64 sample_type;
1086
1087         if (!dump_trace)
1088                 return;
1089
1090         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1091                event->header.misc, sample->pid, sample->tid, sample->ip,
1092                sample->period, sample->addr);
1093
1094         sample_type = evsel->attr.sample_type;
1095
1096         if (sample_type & PERF_SAMPLE_CALLCHAIN)
1097                 callchain__printf(evsel, sample);
1098
1099         if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1100                 branch_stack__printf(sample);
1101
1102         if (sample_type & PERF_SAMPLE_REGS_USER)
1103                 regs_user__printf(sample);
1104
1105         if (sample_type & PERF_SAMPLE_REGS_INTR)
1106                 regs_intr__printf(sample);
1107
1108         if (sample_type & PERF_SAMPLE_STACK_USER)
1109                 stack_user__printf(&sample->user_stack);
1110
1111         if (sample_type & PERF_SAMPLE_WEIGHT)
1112                 printf("... weight: %" PRIu64 "\n", sample->weight);
1113
1114         if (sample_type & PERF_SAMPLE_DATA_SRC)
1115                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1116
1117         if (sample_type & PERF_SAMPLE_TRANSACTION)
1118                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1119
1120         if (sample_type & PERF_SAMPLE_READ)
1121                 sample_read__printf(sample, evsel->attr.read_format);
1122 }
1123
1124 static struct machine *machines__find_for_cpumode(struct machines *machines,
1125                                                union perf_event *event,
1126                                                struct perf_sample *sample)
1127 {
1128         struct machine *machine;
1129
1130         if (perf_guest &&
1131             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1132              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1133                 u32 pid;
1134
1135                 if (event->header.type == PERF_RECORD_MMAP
1136                     || event->header.type == PERF_RECORD_MMAP2)
1137                         pid = event->mmap.pid;
1138                 else
1139                         pid = sample->pid;
1140
1141                 machine = machines__find(machines, pid);
1142                 if (!machine)
1143                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1144                 return machine;
1145         }
1146
1147         return &machines->host;
1148 }
1149
1150 static int deliver_sample_value(struct perf_evlist *evlist,
1151                                 struct perf_tool *tool,
1152                                 union perf_event *event,
1153                                 struct perf_sample *sample,
1154                                 struct sample_read_value *v,
1155                                 struct machine *machine)
1156 {
1157         struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1158
1159         if (sid) {
1160                 sample->id     = v->id;
1161                 sample->period = v->value - sid->period;
1162                 sid->period    = v->value;
1163         }
1164
1165         if (!sid || sid->evsel == NULL) {
1166                 ++evlist->stats.nr_unknown_id;
1167                 return 0;
1168         }
1169
1170         return tool->sample(tool, event, sample, sid->evsel, machine);
1171 }
1172
1173 static int deliver_sample_group(struct perf_evlist *evlist,
1174                                 struct perf_tool *tool,
1175                                 union  perf_event *event,
1176                                 struct perf_sample *sample,
1177                                 struct machine *machine)
1178 {
1179         int ret = -EINVAL;
1180         u64 i;
1181
1182         for (i = 0; i < sample->read.group.nr; i++) {
1183                 ret = deliver_sample_value(evlist, tool, event, sample,
1184                                            &sample->read.group.values[i],
1185                                            machine);
1186                 if (ret)
1187                         break;
1188         }
1189
1190         return ret;
1191 }
1192
1193 static int
1194  perf_evlist__deliver_sample(struct perf_evlist *evlist,
1195                              struct perf_tool *tool,
1196                              union  perf_event *event,
1197                              struct perf_sample *sample,
1198                              struct perf_evsel *evsel,
1199                              struct machine *machine)
1200 {
1201         /* We know evsel != NULL. */
1202         u64 sample_type = evsel->attr.sample_type;
1203         u64 read_format = evsel->attr.read_format;
1204
1205         /* Standard sample delievery. */
1206         if (!(sample_type & PERF_SAMPLE_READ))
1207                 return tool->sample(tool, event, sample, evsel, machine);
1208
1209         /* For PERF_SAMPLE_READ we have either single or group mode. */
1210         if (read_format & PERF_FORMAT_GROUP)
1211                 return deliver_sample_group(evlist, tool, event, sample,
1212                                             machine);
1213         else
1214                 return deliver_sample_value(evlist, tool, event, sample,
1215                                             &sample->read.one, machine);
1216 }
1217
1218 static int machines__deliver_event(struct machines *machines,
1219                                    struct perf_evlist *evlist,
1220                                    union perf_event *event,
1221                                    struct perf_sample *sample,
1222                                    struct perf_tool *tool, u64 file_offset)
1223 {
1224         struct perf_evsel *evsel;
1225         struct machine *machine;
1226
1227         dump_event(evlist, event, file_offset, sample);
1228
1229         evsel = perf_evlist__id2evsel(evlist, sample->id);
1230
1231         machine = machines__find_for_cpumode(machines, event, sample);
1232
1233         switch (event->header.type) {
1234         case PERF_RECORD_SAMPLE:
1235                 if (evsel == NULL) {
1236                         ++evlist->stats.nr_unknown_id;
1237                         return 0;
1238                 }
1239                 dump_sample(evsel, event, sample);
1240                 if (machine == NULL) {
1241                         ++evlist->stats.nr_unprocessable_samples;
1242                         return 0;
1243                 }
1244                 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1245         case PERF_RECORD_MMAP:
1246                 return tool->mmap(tool, event, sample, machine);
1247         case PERF_RECORD_MMAP2:
1248                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1249                         ++evlist->stats.nr_proc_map_timeout;
1250                 return tool->mmap2(tool, event, sample, machine);
1251         case PERF_RECORD_COMM:
1252                 return tool->comm(tool, event, sample, machine);
1253         case PERF_RECORD_FORK:
1254                 return tool->fork(tool, event, sample, machine);
1255         case PERF_RECORD_EXIT:
1256                 return tool->exit(tool, event, sample, machine);
1257         case PERF_RECORD_LOST:
1258                 if (tool->lost == perf_event__process_lost)
1259                         evlist->stats.total_lost += event->lost.lost;
1260                 return tool->lost(tool, event, sample, machine);
1261         case PERF_RECORD_LOST_SAMPLES:
1262                 if (tool->lost_samples == perf_event__process_lost_samples)
1263                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1264                 return tool->lost_samples(tool, event, sample, machine);
1265         case PERF_RECORD_READ:
1266                 return tool->read(tool, event, sample, evsel, machine);
1267         case PERF_RECORD_THROTTLE:
1268                 return tool->throttle(tool, event, sample, machine);
1269         case PERF_RECORD_UNTHROTTLE:
1270                 return tool->unthrottle(tool, event, sample, machine);
1271         case PERF_RECORD_AUX:
1272                 if (tool->aux == perf_event__process_aux &&
1273                     (event->aux.flags & PERF_AUX_FLAG_TRUNCATED))
1274                         evlist->stats.total_aux_lost += 1;
1275                 return tool->aux(tool, event, sample, machine);
1276         case PERF_RECORD_ITRACE_START:
1277                 return tool->itrace_start(tool, event, sample, machine);
1278         case PERF_RECORD_SWITCH:
1279         case PERF_RECORD_SWITCH_CPU_WIDE:
1280                 return tool->context_switch(tool, event, sample, machine);
1281         default:
1282                 ++evlist->stats.nr_unknown_events;
1283                 return -1;
1284         }
1285 }
1286
1287 static int perf_session__deliver_event(struct perf_session *session,
1288                                        union perf_event *event,
1289                                        struct perf_sample *sample,
1290                                        struct perf_tool *tool,
1291                                        u64 file_offset)
1292 {
1293         int ret;
1294
1295         ret = auxtrace__process_event(session, event, sample, tool);
1296         if (ret < 0)
1297                 return ret;
1298         if (ret > 0)
1299                 return 0;
1300
1301         return machines__deliver_event(&session->machines, session->evlist,
1302                                        event, sample, tool, file_offset);
1303 }
1304
1305 static s64 perf_session__process_user_event(struct perf_session *session,
1306                                             union perf_event *event,
1307                                             u64 file_offset)
1308 {
1309         struct ordered_events *oe = &session->ordered_events;
1310         struct perf_tool *tool = session->tool;
1311         int fd = perf_data_file__fd(session->file);
1312         int err;
1313
1314         dump_event(session->evlist, event, file_offset, NULL);
1315
1316         /* These events are processed right away */
1317         switch (event->header.type) {
1318         case PERF_RECORD_HEADER_ATTR:
1319                 err = tool->attr(tool, event, &session->evlist);
1320                 if (err == 0) {
1321                         perf_session__set_id_hdr_size(session);
1322                         perf_session__set_comm_exec(session);
1323                 }
1324                 return err;
1325         case PERF_RECORD_EVENT_UPDATE:
1326                 return tool->event_update(tool, event, &session->evlist);
1327         case PERF_RECORD_HEADER_EVENT_TYPE:
1328                 /*
1329                  * Depreceated, but we need to handle it for sake
1330                  * of old data files create in pipe mode.
1331                  */
1332                 return 0;
1333         case PERF_RECORD_HEADER_TRACING_DATA:
1334                 /* setup for reading amidst mmap */
1335                 lseek(fd, file_offset, SEEK_SET);
1336                 return tool->tracing_data(tool, event, session);
1337         case PERF_RECORD_HEADER_BUILD_ID:
1338                 return tool->build_id(tool, event, session);
1339         case PERF_RECORD_FINISHED_ROUND:
1340                 return tool->finished_round(tool, event, oe);
1341         case PERF_RECORD_ID_INDEX:
1342                 return tool->id_index(tool, event, session);
1343         case PERF_RECORD_AUXTRACE_INFO:
1344                 return tool->auxtrace_info(tool, event, session);
1345         case PERF_RECORD_AUXTRACE:
1346                 /* setup for reading amidst mmap */
1347                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1348                 return tool->auxtrace(tool, event, session);
1349         case PERF_RECORD_AUXTRACE_ERROR:
1350                 perf_session__auxtrace_error_inc(session, event);
1351                 return tool->auxtrace_error(tool, event, session);
1352         case PERF_RECORD_THREAD_MAP:
1353                 return tool->thread_map(tool, event, session);
1354         case PERF_RECORD_CPU_MAP:
1355                 return tool->cpu_map(tool, event, session);
1356         case PERF_RECORD_STAT_CONFIG:
1357                 return tool->stat_config(tool, event, session);
1358         case PERF_RECORD_STAT:
1359                 return tool->stat(tool, event, session);
1360         case PERF_RECORD_STAT_ROUND:
1361                 return tool->stat_round(tool, event, session);
1362         case PERF_RECORD_TIME_CONV:
1363                 session->time_conv = event->time_conv;
1364                 return tool->time_conv(tool, event, session);
1365         default:
1366                 return -EINVAL;
1367         }
1368 }
1369
1370 int perf_session__deliver_synth_event(struct perf_session *session,
1371                                       union perf_event *event,
1372                                       struct perf_sample *sample)
1373 {
1374         struct perf_evlist *evlist = session->evlist;
1375         struct perf_tool *tool = session->tool;
1376
1377         events_stats__inc(&evlist->stats, event->header.type);
1378
1379         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1380                 return perf_session__process_user_event(session, event, 0);
1381
1382         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1383 }
1384
1385 static void event_swap(union perf_event *event, bool sample_id_all)
1386 {
1387         perf_event__swap_op swap;
1388
1389         swap = perf_event__swap_ops[event->header.type];
1390         if (swap)
1391                 swap(event, sample_id_all);
1392 }
1393
1394 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1395                              void *buf, size_t buf_sz,
1396                              union perf_event **event_ptr,
1397                              struct perf_sample *sample)
1398 {
1399         union perf_event *event;
1400         size_t hdr_sz, rest;
1401         int fd;
1402
1403         if (session->one_mmap && !session->header.needs_swap) {
1404                 event = file_offset - session->one_mmap_offset +
1405                         session->one_mmap_addr;
1406                 goto out_parse_sample;
1407         }
1408
1409         if (perf_data_file__is_pipe(session->file))
1410                 return -1;
1411
1412         fd = perf_data_file__fd(session->file);
1413         hdr_sz = sizeof(struct perf_event_header);
1414
1415         if (buf_sz < hdr_sz)
1416                 return -1;
1417
1418         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1419             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1420                 return -1;
1421
1422         event = (union perf_event *)buf;
1423
1424         if (session->header.needs_swap)
1425                 perf_event_header__bswap(&event->header);
1426
1427         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1428                 return -1;
1429
1430         buf += hdr_sz;
1431         rest = event->header.size - hdr_sz;
1432
1433         if (readn(fd, buf, rest) != (ssize_t)rest)
1434                 return -1;
1435
1436         if (session->header.needs_swap)
1437                 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1438
1439 out_parse_sample:
1440
1441         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1442             perf_evlist__parse_sample(session->evlist, event, sample))
1443                 return -1;
1444
1445         *event_ptr = event;
1446
1447         return 0;
1448 }
1449
1450 static s64 perf_session__process_event(struct perf_session *session,
1451                                        union perf_event *event, u64 file_offset)
1452 {
1453         struct perf_evlist *evlist = session->evlist;
1454         struct perf_tool *tool = session->tool;
1455         struct perf_sample sample;
1456         int ret;
1457
1458         if (session->header.needs_swap)
1459                 event_swap(event, perf_evlist__sample_id_all(evlist));
1460
1461         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1462                 return -EINVAL;
1463
1464         events_stats__inc(&evlist->stats, event->header.type);
1465
1466         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1467                 return perf_session__process_user_event(session, event, file_offset);
1468
1469         /*
1470          * For all kernel events we get the sample data
1471          */
1472         ret = perf_evlist__parse_sample(evlist, event, &sample);
1473         if (ret)
1474                 return ret;
1475
1476         if (tool->ordered_events) {
1477                 ret = perf_session__queue_event(session, event, &sample, file_offset);
1478                 if (ret != -ETIME)
1479                         return ret;
1480         }
1481
1482         return perf_session__deliver_event(session, event, &sample, tool,
1483                                            file_offset);
1484 }
1485
1486 void perf_event_header__bswap(struct perf_event_header *hdr)
1487 {
1488         hdr->type = bswap_32(hdr->type);
1489         hdr->misc = bswap_16(hdr->misc);
1490         hdr->size = bswap_16(hdr->size);
1491 }
1492
1493 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1494 {
1495         return machine__findnew_thread(&session->machines.host, -1, pid);
1496 }
1497
1498 int perf_session__register_idle_thread(struct perf_session *session)
1499 {
1500         struct thread *thread;
1501         int err = 0;
1502
1503         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1504         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1505                 pr_err("problem inserting idle task.\n");
1506                 err = -1;
1507         }
1508
1509         /* machine__findnew_thread() got the thread, so put it */
1510         thread__put(thread);
1511         return err;
1512 }
1513
1514 static void
1515 perf_session__warn_order(const struct perf_session *session)
1516 {
1517         const struct ordered_events *oe = &session->ordered_events;
1518         struct perf_evsel *evsel;
1519         bool should_warn = true;
1520
1521         evlist__for_each_entry(session->evlist, evsel) {
1522                 if (evsel->attr.write_backward)
1523                         should_warn = false;
1524         }
1525
1526         if (!should_warn)
1527                 return;
1528         if (oe->nr_unordered_events != 0)
1529                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1530 }
1531
1532 static void perf_session__warn_about_errors(const struct perf_session *session)
1533 {
1534         const struct events_stats *stats = &session->evlist->stats;
1535
1536         if (session->tool->lost == perf_event__process_lost &&
1537             stats->nr_events[PERF_RECORD_LOST] != 0) {
1538                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1539                             "Check IO/CPU overload!\n\n",
1540                             stats->nr_events[0],
1541                             stats->nr_events[PERF_RECORD_LOST]);
1542         }
1543
1544         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1545                 double drop_rate;
1546
1547                 drop_rate = (double)stats->total_lost_samples /
1548                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1549                 if (drop_rate > 0.05) {
1550                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
1551                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1552                                     drop_rate * 100.0);
1553                 }
1554         }
1555
1556         if (session->tool->aux == perf_event__process_aux &&
1557             stats->total_aux_lost != 0) {
1558                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1559                             stats->total_aux_lost,
1560                             stats->nr_events[PERF_RECORD_AUX]);
1561         }
1562
1563         if (stats->nr_unknown_events != 0) {
1564                 ui__warning("Found %u unknown events!\n\n"
1565                             "Is this an older tool processing a perf.data "
1566                             "file generated by a more recent tool?\n\n"
1567                             "If that is not the case, consider "
1568                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1569                             stats->nr_unknown_events);
1570         }
1571
1572         if (stats->nr_unknown_id != 0) {
1573                 ui__warning("%u samples with id not present in the header\n",
1574                             stats->nr_unknown_id);
1575         }
1576
1577         if (stats->nr_invalid_chains != 0) {
1578                 ui__warning("Found invalid callchains!\n\n"
1579                             "%u out of %u events were discarded for this reason.\n\n"
1580                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1581                             stats->nr_invalid_chains,
1582                             stats->nr_events[PERF_RECORD_SAMPLE]);
1583         }
1584
1585         if (stats->nr_unprocessable_samples != 0) {
1586                 ui__warning("%u unprocessable samples recorded.\n"
1587                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1588                             stats->nr_unprocessable_samples);
1589         }
1590
1591         perf_session__warn_order(session);
1592
1593         events_stats__auxtrace_error_warn(stats);
1594
1595         if (stats->nr_proc_map_timeout != 0) {
1596                 ui__warning("%d map information files for pre-existing threads were\n"
1597                             "not processed, if there are samples for addresses they\n"
1598                             "will not be resolved, you may find out which are these\n"
1599                             "threads by running with -v and redirecting the output\n"
1600                             "to a file.\n"
1601                             "The time limit to process proc map is too short?\n"
1602                             "Increase it by --proc-map-timeout\n",
1603                             stats->nr_proc_map_timeout);
1604         }
1605 }
1606
1607 static int perf_session__flush_thread_stack(struct thread *thread,
1608                                             void *p __maybe_unused)
1609 {
1610         return thread_stack__flush(thread);
1611 }
1612
1613 static int perf_session__flush_thread_stacks(struct perf_session *session)
1614 {
1615         return machines__for_each_thread(&session->machines,
1616                                          perf_session__flush_thread_stack,
1617                                          NULL);
1618 }
1619
1620 volatile int session_done;
1621
1622 static int __perf_session__process_pipe_events(struct perf_session *session)
1623 {
1624         struct ordered_events *oe = &session->ordered_events;
1625         struct perf_tool *tool = session->tool;
1626         int fd = perf_data_file__fd(session->file);
1627         union perf_event *event;
1628         uint32_t size, cur_size = 0;
1629         void *buf = NULL;
1630         s64 skip = 0;
1631         u64 head;
1632         ssize_t err;
1633         void *p;
1634
1635         perf_tool__fill_defaults(tool);
1636
1637         head = 0;
1638         cur_size = sizeof(union perf_event);
1639
1640         buf = malloc(cur_size);
1641         if (!buf)
1642                 return -errno;
1643         ordered_events__set_copy_on_queue(oe, true);
1644 more:
1645         event = buf;
1646         err = readn(fd, event, sizeof(struct perf_event_header));
1647         if (err <= 0) {
1648                 if (err == 0)
1649                         goto done;
1650
1651                 pr_err("failed to read event header\n");
1652                 goto out_err;
1653         }
1654
1655         if (session->header.needs_swap)
1656                 perf_event_header__bswap(&event->header);
1657
1658         size = event->header.size;
1659         if (size < sizeof(struct perf_event_header)) {
1660                 pr_err("bad event header size\n");
1661                 goto out_err;
1662         }
1663
1664         if (size > cur_size) {
1665                 void *new = realloc(buf, size);
1666                 if (!new) {
1667                         pr_err("failed to allocate memory to read event\n");
1668                         goto out_err;
1669                 }
1670                 buf = new;
1671                 cur_size = size;
1672                 event = buf;
1673         }
1674         p = event;
1675         p += sizeof(struct perf_event_header);
1676
1677         if (size - sizeof(struct perf_event_header)) {
1678                 err = readn(fd, p, size - sizeof(struct perf_event_header));
1679                 if (err <= 0) {
1680                         if (err == 0) {
1681                                 pr_err("unexpected end of event stream\n");
1682                                 goto done;
1683                         }
1684
1685                         pr_err("failed to read event data\n");
1686                         goto out_err;
1687                 }
1688         }
1689
1690         if ((skip = perf_session__process_event(session, event, head)) < 0) {
1691                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1692                        head, event->header.size, event->header.type);
1693                 err = -EINVAL;
1694                 goto out_err;
1695         }
1696
1697         head += size;
1698
1699         if (skip > 0)
1700                 head += skip;
1701
1702         if (!session_done())
1703                 goto more;
1704 done:
1705         /* do the final flush for ordered samples */
1706         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1707         if (err)
1708                 goto out_err;
1709         err = auxtrace__flush_events(session, tool);
1710         if (err)
1711                 goto out_err;
1712         err = perf_session__flush_thread_stacks(session);
1713 out_err:
1714         free(buf);
1715         perf_session__warn_about_errors(session);
1716         ordered_events__free(&session->ordered_events);
1717         auxtrace__free_events(session);
1718         return err;
1719 }
1720
1721 static union perf_event *
1722 fetch_mmaped_event(struct perf_session *session,
1723                    u64 head, size_t mmap_size, char *buf)
1724 {
1725         union perf_event *event;
1726
1727         /*
1728          * Ensure we have enough space remaining to read
1729          * the size of the event in the headers.
1730          */
1731         if (head + sizeof(event->header) > mmap_size)
1732                 return NULL;
1733
1734         event = (union perf_event *)(buf + head);
1735
1736         if (session->header.needs_swap)
1737                 perf_event_header__bswap(&event->header);
1738
1739         if (head + event->header.size > mmap_size) {
1740                 /* We're not fetching the event so swap back again */
1741                 if (session->header.needs_swap)
1742                         perf_event_header__bswap(&event->header);
1743                 return NULL;
1744         }
1745
1746         return event;
1747 }
1748
1749 /*
1750  * On 64bit we can mmap the data file in one go. No need for tiny mmap
1751  * slices. On 32bit we use 32MB.
1752  */
1753 #if BITS_PER_LONG == 64
1754 #define MMAP_SIZE ULLONG_MAX
1755 #define NUM_MMAPS 1
1756 #else
1757 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1758 #define NUM_MMAPS 128
1759 #endif
1760
1761 static int __perf_session__process_events(struct perf_session *session,
1762                                           u64 data_offset, u64 data_size,
1763                                           u64 file_size)
1764 {
1765         struct ordered_events *oe = &session->ordered_events;
1766         struct perf_tool *tool = session->tool;
1767         int fd = perf_data_file__fd(session->file);
1768         u64 head, page_offset, file_offset, file_pos, size;
1769         int err, mmap_prot, mmap_flags, map_idx = 0;
1770         size_t  mmap_size;
1771         char *buf, *mmaps[NUM_MMAPS];
1772         union perf_event *event;
1773         struct ui_progress prog;
1774         s64 skip;
1775
1776         perf_tool__fill_defaults(tool);
1777
1778         page_offset = page_size * (data_offset / page_size);
1779         file_offset = page_offset;
1780         head = data_offset - page_offset;
1781
1782         if (data_size == 0)
1783                 goto out;
1784
1785         if (data_offset + data_size < file_size)
1786                 file_size = data_offset + data_size;
1787
1788         ui_progress__init(&prog, file_size, "Processing events...");
1789
1790         mmap_size = MMAP_SIZE;
1791         if (mmap_size > file_size) {
1792                 mmap_size = file_size;
1793                 session->one_mmap = true;
1794         }
1795
1796         memset(mmaps, 0, sizeof(mmaps));
1797
1798         mmap_prot  = PROT_READ;
1799         mmap_flags = MAP_SHARED;
1800
1801         if (session->header.needs_swap) {
1802                 mmap_prot  |= PROT_WRITE;
1803                 mmap_flags = MAP_PRIVATE;
1804         }
1805 remap:
1806         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1807                    file_offset);
1808         if (buf == MAP_FAILED) {
1809                 pr_err("failed to mmap file\n");
1810                 err = -errno;
1811                 goto out_err;
1812         }
1813         mmaps[map_idx] = buf;
1814         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1815         file_pos = file_offset + head;
1816         if (session->one_mmap) {
1817                 session->one_mmap_addr = buf;
1818                 session->one_mmap_offset = file_offset;
1819         }
1820
1821 more:
1822         event = fetch_mmaped_event(session, head, mmap_size, buf);
1823         if (!event) {
1824                 if (mmaps[map_idx]) {
1825                         munmap(mmaps[map_idx], mmap_size);
1826                         mmaps[map_idx] = NULL;
1827                 }
1828
1829                 page_offset = page_size * (head / page_size);
1830                 file_offset += page_offset;
1831                 head -= page_offset;
1832                 goto remap;
1833         }
1834
1835         size = event->header.size;
1836
1837         if (size < sizeof(struct perf_event_header) ||
1838             (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1839                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1840                        file_offset + head, event->header.size,
1841                        event->header.type);
1842                 err = -EINVAL;
1843                 goto out_err;
1844         }
1845
1846         if (skip)
1847                 size += skip;
1848
1849         head += size;
1850         file_pos += size;
1851
1852         ui_progress__update(&prog, size);
1853
1854         if (session_done())
1855                 goto out;
1856
1857         if (file_pos < file_size)
1858                 goto more;
1859
1860 out:
1861         /* do the final flush for ordered samples */
1862         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1863         if (err)
1864                 goto out_err;
1865         err = auxtrace__flush_events(session, tool);
1866         if (err)
1867                 goto out_err;
1868         err = perf_session__flush_thread_stacks(session);
1869 out_err:
1870         ui_progress__finish();
1871         perf_session__warn_about_errors(session);
1872         /*
1873          * We may switching perf.data output, make ordered_events
1874          * reusable.
1875          */
1876         ordered_events__reinit(&session->ordered_events);
1877         auxtrace__free_events(session);
1878         session->one_mmap = false;
1879         return err;
1880 }
1881
1882 int perf_session__process_events(struct perf_session *session)
1883 {
1884         u64 size = perf_data_file__size(session->file);
1885         int err;
1886
1887         if (perf_session__register_idle_thread(session) < 0)
1888                 return -ENOMEM;
1889
1890         if (!perf_data_file__is_pipe(session->file))
1891                 err = __perf_session__process_events(session,
1892                                                      session->header.data_offset,
1893                                                      session->header.data_size, size);
1894         else
1895                 err = __perf_session__process_pipe_events(session);
1896
1897         return err;
1898 }
1899
1900 bool perf_session__has_traces(struct perf_session *session, const char *msg)
1901 {
1902         struct perf_evsel *evsel;
1903
1904         evlist__for_each_entry(session->evlist, evsel) {
1905                 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1906                         return true;
1907         }
1908
1909         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1910         return false;
1911 }
1912
1913 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1914                                      const char *symbol_name, u64 addr)
1915 {
1916         char *bracket;
1917         enum map_type i;
1918         struct ref_reloc_sym *ref;
1919
1920         ref = zalloc(sizeof(struct ref_reloc_sym));
1921         if (ref == NULL)
1922                 return -ENOMEM;
1923
1924         ref->name = strdup(symbol_name);
1925         if (ref->name == NULL) {
1926                 free(ref);
1927                 return -ENOMEM;
1928         }
1929
1930         bracket = strchr(ref->name, ']');
1931         if (bracket)
1932                 *bracket = '\0';
1933
1934         ref->addr = addr;
1935
1936         for (i = 0; i < MAP__NR_TYPES; ++i) {
1937                 struct kmap *kmap = map__kmap(maps[i]);
1938
1939                 if (!kmap)
1940                         continue;
1941                 kmap->ref_reloc_sym = ref;
1942         }
1943
1944         return 0;
1945 }
1946
1947 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1948 {
1949         return machines__fprintf_dsos(&session->machines, fp);
1950 }
1951
1952 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1953                                           bool (skip)(struct dso *dso, int parm), int parm)
1954 {
1955         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1956 }
1957
1958 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1959 {
1960         size_t ret;
1961         const char *msg = "";
1962
1963         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
1964                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
1965
1966         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
1967
1968         ret += events_stats__fprintf(&session->evlist->stats, fp);
1969         return ret;
1970 }
1971
1972 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1973 {
1974         /*
1975          * FIXME: Here we have to actually print all the machines in this
1976          * session, not just the host...
1977          */
1978         return machine__fprintf(&session->machines.host, fp);
1979 }
1980
1981 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1982                                               unsigned int type)
1983 {
1984         struct perf_evsel *pos;
1985
1986         evlist__for_each_entry(session->evlist, pos) {
1987                 if (pos->attr.type == type)
1988                         return pos;
1989         }
1990         return NULL;
1991 }
1992
1993 int perf_session__cpu_bitmap(struct perf_session *session,
1994                              const char *cpu_list, unsigned long *cpu_bitmap)
1995 {
1996         int i, err = -1;
1997         struct cpu_map *map;
1998
1999         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2000                 struct perf_evsel *evsel;
2001
2002                 evsel = perf_session__find_first_evtype(session, i);
2003                 if (!evsel)
2004                         continue;
2005
2006                 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2007                         pr_err("File does not contain CPU events. "
2008                                "Remove -c option to proceed.\n");
2009                         return -1;
2010                 }
2011         }
2012
2013         map = cpu_map__new(cpu_list);
2014         if (map == NULL) {
2015                 pr_err("Invalid cpu_list\n");
2016                 return -1;
2017         }
2018
2019         for (i = 0; i < map->nr; i++) {
2020                 int cpu = map->map[i];
2021
2022                 if (cpu >= MAX_NR_CPUS) {
2023                         pr_err("Requested CPU %d too large. "
2024                                "Consider raising MAX_NR_CPUS\n", cpu);
2025                         goto out_delete_map;
2026                 }
2027
2028                 set_bit(cpu, cpu_bitmap);
2029         }
2030
2031         err = 0;
2032
2033 out_delete_map:
2034         cpu_map__put(map);
2035         return err;
2036 }
2037
2038 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2039                                 bool full)
2040 {
2041         struct stat st;
2042         int fd, ret;
2043
2044         if (session == NULL || fp == NULL)
2045                 return;
2046
2047         fd = perf_data_file__fd(session->file);
2048
2049         ret = fstat(fd, &st);
2050         if (ret == -1)
2051                 return;
2052
2053         fprintf(fp, "# ========\n");
2054         fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2055         perf_header__fprintf_info(session, fp, full);
2056         fprintf(fp, "# ========\n#\n");
2057 }
2058
2059
2060 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2061                                              const struct perf_evsel_str_handler *assocs,
2062                                              size_t nr_assocs)
2063 {
2064         struct perf_evsel *evsel;
2065         size_t i;
2066         int err;
2067
2068         for (i = 0; i < nr_assocs; i++) {
2069                 /*
2070                  * Adding a handler for an event not in the session,
2071                  * just ignore it.
2072                  */
2073                 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2074                 if (evsel == NULL)
2075                         continue;
2076
2077                 err = -EEXIST;
2078                 if (evsel->handler != NULL)
2079                         goto out;
2080                 evsel->handler = assocs[i].handler;
2081         }
2082
2083         err = 0;
2084 out:
2085         return err;
2086 }
2087
2088 int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
2089                                  union perf_event *event,
2090                                  struct perf_session *session)
2091 {
2092         struct perf_evlist *evlist = session->evlist;
2093         struct id_index_event *ie = &event->id_index;
2094         size_t i, nr, max_nr;
2095
2096         max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2097                  sizeof(struct id_index_entry);
2098         nr = ie->nr;
2099         if (nr > max_nr)
2100                 return -EINVAL;
2101
2102         if (dump_trace)
2103                 fprintf(stdout, " nr: %zu\n", nr);
2104
2105         for (i = 0; i < nr; i++) {
2106                 struct id_index_entry *e = &ie->entries[i];
2107                 struct perf_sample_id *sid;
2108
2109                 if (dump_trace) {
2110                         fprintf(stdout, " ... id: %"PRIu64, e->id);
2111                         fprintf(stdout, "  idx: %"PRIu64, e->idx);
2112                         fprintf(stdout, "  cpu: %"PRId64, e->cpu);
2113                         fprintf(stdout, "  tid: %"PRId64"\n", e->tid);
2114                 }
2115
2116                 sid = perf_evlist__id2sid(evlist, e->id);
2117                 if (!sid)
2118                         return -ENOENT;
2119                 sid->idx = e->idx;
2120                 sid->cpu = e->cpu;
2121                 sid->tid = e->tid;
2122         }
2123         return 0;
2124 }
2125
2126 int perf_event__synthesize_id_index(struct perf_tool *tool,
2127                                     perf_event__handler_t process,
2128                                     struct perf_evlist *evlist,
2129                                     struct machine *machine)
2130 {
2131         union perf_event *ev;
2132         struct perf_evsel *evsel;
2133         size_t nr = 0, i = 0, sz, max_nr, n;
2134         int err;
2135
2136         pr_debug2("Synthesizing id index\n");
2137
2138         max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2139                  sizeof(struct id_index_entry);
2140
2141         evlist__for_each_entry(evlist, evsel)
2142                 nr += evsel->ids;
2143
2144         n = nr > max_nr ? max_nr : nr;
2145         sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2146         ev = zalloc(sz);
2147         if (!ev)
2148                 return -ENOMEM;
2149
2150         ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2151         ev->id_index.header.size = sz;
2152         ev->id_index.nr = n;
2153
2154         evlist__for_each_entry(evlist, evsel) {
2155                 u32 j;
2156
2157                 for (j = 0; j < evsel->ids; j++) {
2158                         struct id_index_entry *e;
2159                         struct perf_sample_id *sid;
2160
2161                         if (i >= n) {
2162                                 err = process(tool, ev, NULL, machine);
2163                                 if (err)
2164                                         goto out_err;
2165                                 nr -= n;
2166                                 i = 0;
2167                         }
2168
2169                         e = &ev->id_index.entries[i++];
2170
2171                         e->id = evsel->id[j];
2172
2173                         sid = perf_evlist__id2sid(evlist, e->id);
2174                         if (!sid) {
2175                                 free(ev);
2176                                 return -ENOENT;
2177                         }
2178
2179                         e->idx = sid->idx;
2180                         e->cpu = sid->cpu;
2181                         e->tid = sid->tid;
2182                 }
2183         }
2184
2185         sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2186         ev->id_index.header.size = sz;
2187         ev->id_index.nr = nr;
2188
2189         err = process(tool, ev, NULL, machine);
2190 out_err:
2191         free(ev);
2192
2193         return err;
2194 }