3 #include "../../util/util.h"
4 #include "../../util/hist.h"
5 #include "../../util/sort.h"
6 #include "../../util/evsel.h"
9 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
12 int ret = fprintf(fp, " ");
14 for (i = 0; i < left_margin; i++)
15 ret += fprintf(fp, " ");
20 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
24 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
26 for (i = 0; i < depth; i++)
27 if (depth_mask & (1 << i))
28 ret += fprintf(fp, "| ");
30 ret += fprintf(fp, " ");
32 ret += fprintf(fp, "\n");
37 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
38 struct callchain_list *chain,
39 int depth, int depth_mask, int period,
40 u64 total_samples, int left_margin)
46 ret += callchain__fprintf_left_margin(fp, left_margin);
47 for (i = 0; i < depth; i++) {
48 if (depth_mask & (1 << i))
49 ret += fprintf(fp, "|");
51 ret += fprintf(fp, " ");
52 if (!period && i == depth - 1) {
53 ret += fprintf(fp, "--");
54 ret += callchain_node__fprintf_value(node, fp, total_samples);
55 ret += fprintf(fp, "--");
57 ret += fprintf(fp, "%s", " ");
59 fputs(callchain_list__sym_name(chain, bf, sizeof(bf), false), fp);
64 static struct symbol *rem_sq_bracket;
65 static struct callchain_list rem_hits;
67 static void init_rem_hits(void)
69 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
70 if (!rem_sq_bracket) {
71 fprintf(stderr, "Not enough memory to display remaining hits\n");
75 strcpy(rem_sq_bracket->name, "[...]");
76 rem_hits.ms.sym = rem_sq_bracket;
79 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
80 u64 total_samples, int depth,
81 int depth_mask, int left_margin)
83 struct rb_node *node, *next;
84 struct callchain_node *child = NULL;
85 struct callchain_list *chain;
86 int new_depth_mask = depth_mask;
90 uint entries_printed = 0;
93 remaining = total_samples;
95 node = rb_first(root);
100 child = rb_entry(node, struct callchain_node, rb_node);
101 cumul = callchain_cumul_hits(child);
103 cumul_count += callchain_cumul_counts(child);
106 * The depth mask manages the output of pipes that show
107 * the depth. We don't want to keep the pipes of the current
108 * level for the last child of this depth.
109 * Except if we have remaining filtered hits. They will
110 * supersede the last child
112 next = rb_next(node);
113 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
114 new_depth_mask &= ~(1 << (depth - 1));
117 * But we keep the older depth mask for the line separator
118 * to keep the level link until we reach the last child
120 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
123 list_for_each_entry(chain, &child->val, list) {
124 ret += ipchain__fprintf_graph(fp, child, chain, depth,
130 if (callchain_param.mode == CHAIN_GRAPH_REL)
131 new_total = child->children_hit;
133 new_total = total_samples;
135 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
137 new_depth_mask | (1 << depth),
140 if (++entries_printed == callchain_param.print_limit)
144 if (callchain_param.mode == CHAIN_GRAPH_REL &&
145 remaining && remaining != total_samples) {
146 struct callchain_node rem_node = {
153 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
154 rem_node.count = child->parent->children_count - cumul_count;
155 if (rem_node.count <= 0)
159 new_depth_mask &= ~(1 << (depth - 1));
160 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
161 new_depth_mask, 0, total_samples,
169 * If have one single callchain root, don't bother printing
170 * its percentage (100 % in fractal mode and the same percentage
171 * than the hist in graph mode). This also avoid one level of column.
173 * However when percent-limit applied, it's possible that single callchain
174 * node have different (non-100% in fractal mode) percentage.
176 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
178 struct callchain_node *cnode;
183 cnode = rb_entry(node, struct callchain_node, rb_node);
184 return callchain_cumul_hits(cnode) != parent_samples;
187 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
188 u64 total_samples, u64 parent_samples,
191 struct callchain_node *cnode;
192 struct callchain_list *chain;
193 u32 entries_printed = 0;
194 bool printed = false;
195 struct rb_node *node;
200 node = rb_first(root);
201 if (node && !need_percent_display(node, parent_samples)) {
202 cnode = rb_entry(node, struct callchain_node, rb_node);
203 list_for_each_entry(chain, &cnode->val, list) {
205 * If we sort by symbol, the first entry is the same than
206 * the symbol. No need to print it otherwise it appears as
209 if (!i++ && field_order == NULL &&
210 sort_order && !prefixcmp(sort_order, "sym"))
213 ret += callchain__fprintf_left_margin(fp, left_margin);
214 ret += fprintf(fp, "|\n");
215 ret += callchain__fprintf_left_margin(fp, left_margin);
216 ret += fprintf(fp, "---");
220 ret += callchain__fprintf_left_margin(fp, left_margin);
222 ret += fprintf(fp, "%s\n", callchain_list__sym_name(chain, bf, sizeof(bf),
225 if (++entries_printed == callchain_param.print_limit)
228 root = &cnode->rb_root;
231 if (callchain_param.mode == CHAIN_GRAPH_REL)
232 total_samples = parent_samples;
234 ret += __callchain__fprintf_graph(fp, root, total_samples,
237 /* do not add a blank line if it printed nothing */
238 ret += fprintf(fp, "\n");
244 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
247 struct callchain_list *chain;
254 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
257 list_for_each_entry(chain, &node->val, list) {
258 if (chain->ip >= PERF_CONTEXT_MAX)
260 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
261 bf, sizeof(bf), false));
267 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
271 u32 entries_printed = 0;
272 struct callchain_node *chain;
273 struct rb_node *rb_node = rb_first(tree);
276 chain = rb_entry(rb_node, struct callchain_node, rb_node);
278 ret += fprintf(fp, " ");
279 ret += callchain_node__fprintf_value(chain, fp, total_samples);
280 ret += fprintf(fp, "\n");
281 ret += __callchain__fprintf_flat(fp, chain, total_samples);
282 ret += fprintf(fp, "\n");
283 if (++entries_printed == callchain_param.print_limit)
286 rb_node = rb_next(rb_node);
292 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
294 const char *sep = symbol_conf.field_sep ?: ";";
295 struct callchain_list *chain;
303 ret += __callchain__fprintf_folded(fp, node->parent);
306 list_for_each_entry(chain, &node->val, list) {
307 if (chain->ip >= PERF_CONTEXT_MAX)
309 ret += fprintf(fp, "%s%s", first ? "" : sep,
310 callchain_list__sym_name(chain,
311 bf, sizeof(bf), false));
318 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
322 u32 entries_printed = 0;
323 struct callchain_node *chain;
324 struct rb_node *rb_node = rb_first(tree);
328 chain = rb_entry(rb_node, struct callchain_node, rb_node);
330 ret += callchain_node__fprintf_value(chain, fp, total_samples);
331 ret += fprintf(fp, " ");
332 ret += __callchain__fprintf_folded(fp, chain);
333 ret += fprintf(fp, "\n");
334 if (++entries_printed == callchain_param.print_limit)
337 rb_node = rb_next(rb_node);
343 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
344 u64 total_samples, int left_margin,
347 u64 parent_samples = he->stat.period;
349 if (symbol_conf.cumulate_callchain)
350 parent_samples = he->stat_acc->period;
352 switch (callchain_param.mode) {
353 case CHAIN_GRAPH_REL:
354 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
355 parent_samples, left_margin);
357 case CHAIN_GRAPH_ABS:
358 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
359 parent_samples, left_margin);
362 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
365 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
370 pr_err("Bad callchain mode\n");
376 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
377 struct perf_hpp_list *hpp_list)
379 const char *sep = symbol_conf.field_sep;
380 struct perf_hpp_fmt *fmt;
381 char *start = hpp->buf;
385 if (symbol_conf.exclude_other && !he->parent)
388 perf_hpp_list__for_each_format(hpp_list, fmt) {
389 if (perf_hpp__should_skip(fmt, he->hists))
393 * If there's no field_sep, we still need
394 * to display initial ' '.
396 if (!sep || !first) {
397 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
398 advance_hpp(hpp, ret);
402 if (perf_hpp__use_color() && fmt->color)
403 ret = fmt->color(fmt, hpp, he);
405 ret = fmt->entry(fmt, hpp, he);
407 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
408 advance_hpp(hpp, ret);
411 return hpp->buf - start;
414 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
416 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
419 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
420 struct perf_hpp *hpp,
424 const char *sep = symbol_conf.field_sep;
425 struct perf_hpp_fmt *fmt;
426 struct perf_hpp_list_node *fmt_node;
427 char *buf = hpp->buf;
428 size_t size = hpp->size;
429 int ret, printed = 0;
432 if (symbol_conf.exclude_other && !he->parent)
435 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
436 advance_hpp(hpp, ret);
438 /* the first hpp_list_node is for overhead columns */
439 fmt_node = list_first_entry(&hists->hpp_formats,
440 struct perf_hpp_list_node, list);
441 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
443 * If there's no field_sep, we still need
444 * to display initial ' '.
446 if (!sep || !first) {
447 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
448 advance_hpp(hpp, ret);
452 if (perf_hpp__use_color() && fmt->color)
453 ret = fmt->color(fmt, hpp, he);
455 ret = fmt->entry(fmt, hpp, he);
457 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
458 advance_hpp(hpp, ret);
462 ret = scnprintf(hpp->buf, hpp->size, "%*s",
463 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
464 advance_hpp(hpp, ret);
466 printed += fprintf(fp, "%s", buf);
468 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
473 * No need to call hist_entry__snprintf_alignment() since this
474 * fmt is always the last column in the hierarchy mode.
476 if (perf_hpp__use_color() && fmt->color)
477 fmt->color(fmt, hpp, he);
479 fmt->entry(fmt, hpp, he);
482 * dynamic entries are right-aligned but we want left-aligned
483 * in the hierarchy mode
485 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
487 printed += putc('\n', fp);
489 if (symbol_conf.use_callchain && he->leaf) {
490 u64 total = hists__total_period(hists);
492 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
500 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
501 char *bf, size_t bfsz, FILE *fp,
505 struct perf_hpp hpp = {
509 struct hists *hists = he->hists;
510 u64 total_period = hists->stats.total_period;
512 if (size == 0 || size > bfsz)
513 size = hpp.size = bfsz;
515 if (symbol_conf.report_hierarchy)
516 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
518 hist_entry__snprintf(he, &hpp);
520 ret = fprintf(fp, "%s\n", bf);
523 ret += hist_entry_callchain__fprintf(he, total_period, 0, fp);
528 static int print_hierarchy_indent(const char *sep, int indent,
529 const char *line, FILE *fp)
531 if (sep != NULL || indent < 2)
534 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
537 static int hists__fprintf_hierarchy_headers(struct hists *hists,
538 struct perf_hpp *hpp, FILE *fp)
540 bool first_node, first_col;
544 unsigned header_width = 0;
545 struct perf_hpp_fmt *fmt;
546 struct perf_hpp_list_node *fmt_node;
547 const char *sep = symbol_conf.field_sep;
549 indent = hists->nr_hpp_node;
551 /* preserve max indent depth for column headers */
552 print_hierarchy_indent(sep, indent, spaces, fp);
554 /* the first hpp_list_node is for overhead columns */
555 fmt_node = list_first_entry(&hists->hpp_formats,
556 struct perf_hpp_list_node, list);
558 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
559 fmt->header(fmt, hpp, hists, 0, NULL);
560 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
563 /* combine sort headers with ' / ' */
565 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
567 header_width += fprintf(fp, " / ");
571 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
572 if (perf_hpp__should_skip(fmt, hists))
576 header_width += fprintf(fp, "+");
579 fmt->header(fmt, hpp, hists, 0, NULL);
581 header_width += fprintf(fp, "%s", trim(hpp->buf));
587 /* preserve max indent depth for initial dots */
588 print_hierarchy_indent(sep, indent, dots, fp);
590 /* the first hpp_list_node is for overhead columns */
591 fmt_node = list_first_entry(&hists->hpp_formats,
592 struct perf_hpp_list_node, list);
595 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
597 fprintf(fp, "%s", sep ?: "..");
600 width = fmt->width(fmt, hpp, hists);
601 fprintf(fp, "%.*s", width, dots);
605 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
607 width = depth * HIERARCHY_INDENT;
609 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
610 if (perf_hpp__should_skip(fmt, hists))
614 width++; /* for '+' sign between column header */
617 width += fmt->width(fmt, hpp, hists);
620 if (width > header_width)
621 header_width = width;
626 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
628 fprintf(fp, "\n#\n");
633 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
636 struct perf_hpp_fmt *fmt;
637 const char *sep = symbol_conf.field_sep;
641 hists__for_each_format(hists, fmt) {
642 if (perf_hpp__should_skip(fmt, hists))
646 fprintf(fp, "%s", sep ?: " ");
650 fmt->header(fmt, hpp, hists, line, &span);
653 fprintf(fp, "%s", hpp->buf);
658 hists__fprintf_standard_headers(struct hists *hists,
659 struct perf_hpp *hpp,
662 struct perf_hpp_list *hpp_list = hists->hpp_list;
663 struct perf_hpp_fmt *fmt;
665 const char *sep = symbol_conf.field_sep;
669 for (line = 0; line < hpp_list->nr_header_lines; line++) {
670 /* first # is displayed one level up */
673 fprintf_line(hists, hpp, line, fp);
678 return hpp_list->nr_header_lines;
684 hists__for_each_format(hists, fmt) {
687 if (perf_hpp__should_skip(fmt, hists))
691 fprintf(fp, "%s", sep ?: " ");
695 width = fmt->width(fmt, hpp, hists);
696 for (i = 0; i < width; i++)
702 return hpp_list->nr_header_lines + 2;
705 int hists__fprintf_headers(struct hists *hists, FILE *fp)
708 struct perf_hpp dummy_hpp = {
715 if (symbol_conf.report_hierarchy)
716 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
718 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
722 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
723 int max_cols, float min_pcnt, FILE *fp,
728 const char *sep = symbol_conf.field_sep;
736 hists__reset_column_width(hists);
738 if (symbol_conf.col_width_list_str)
739 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
742 nr_rows += hists__fprintf_headers(hists, fp);
744 if (max_rows && nr_rows >= max_rows)
747 linesz = hists__sort_list_width(hists) + 3 + 1;
748 linesz += perf_hpp__color_overhead();
749 line = malloc(linesz);
755 indent = hists__overhead_width(hists) + 4;
757 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
758 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
764 percent = hist_entry__get_percent_limit(h);
765 if (percent < min_pcnt)
768 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
770 if (max_rows && ++nr_rows >= max_rows)
774 * If all children are filtered out or percent-limited,
775 * display "no entry >= x.xx%" message.
777 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
778 int depth = hists->nr_hpp_node + h->depth + 1;
780 print_hierarchy_indent(sep, depth, spaces, fp);
781 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
783 if (max_rows && ++nr_rows >= max_rows)
787 if (h->ms.map == NULL && verbose > 1) {
788 __map_groups__fprintf_maps(h->thread->mg,
790 fprintf(fp, "%.10s end\n", graph_dotted_line);
796 zfree(&rem_sq_bracket);
801 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
806 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
809 if (stats->nr_events[i] == 0)
812 name = perf_event__name(i);
813 if (!strcmp(name, "UNKNOWN"))
816 ret += fprintf(fp, "%16s events: %10d\n", name,
817 stats->nr_events[i]);