5 #include "thread_map.h"
7 void update_stats(struct stats *stats, u64 val)
12 delta = val - stats->mean;
13 stats->mean += delta / stats->n;
14 stats->M2 += delta*(val - stats->mean);
23 double avg_stats(struct stats *stats)
29 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
31 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
32 * s^2 = -------------------------------
35 * http://en.wikipedia.org/wiki/Stddev
37 * The std dev of the mean is related to the std dev by:
44 double stddev_stats(struct stats *stats)
46 double variance, variance_mean;
51 variance = stats->M2 / (stats->n - 1);
52 variance_mean = variance / stats->n;
54 return sqrt(variance_mean);
57 double rel_stddev_stats(double stddev, double avg)
62 pct = 100.0 * stddev/avg;
67 bool __perf_evsel_stat__is(struct perf_evsel *evsel,
68 enum perf_stat_evsel_id id)
70 struct perf_stat_evsel *ps = evsel->priv;
75 #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
76 static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
78 ID(CYCLES_IN_TX, cpu/cycles-t/),
79 ID(TRANSACTION_START, cpu/tx-start/),
80 ID(ELISION_START, cpu/el-start/),
81 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
82 ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
83 ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
84 ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
85 ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
86 ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
90 void perf_stat_evsel_id_init(struct perf_evsel *evsel)
92 struct perf_stat_evsel *ps = evsel->priv;
95 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
97 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
98 if (!strcmp(perf_evsel__name(evsel), id_str[i])) {
105 static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
108 struct perf_stat_evsel *ps = evsel->priv;
110 for (i = 0; i < 3; i++)
111 init_stats(&ps->res_stats[i]);
113 perf_stat_evsel_id_init(evsel);
116 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
118 evsel->priv = zalloc(sizeof(struct perf_stat_evsel));
119 if (evsel->priv == NULL)
121 perf_evsel__reset_stat_priv(evsel);
125 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
130 static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
131 int ncpus, int nthreads)
133 struct perf_counts *counts;
135 counts = perf_counts__new(ncpus, nthreads);
137 evsel->prev_raw_counts = counts;
139 return counts ? 0 : -ENOMEM;
142 static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
144 perf_counts__delete(evsel->prev_raw_counts);
145 evsel->prev_raw_counts = NULL;
148 static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel)
150 if (evsel->prev_raw_counts) {
151 evsel->prev_raw_counts->aggr.val = 0;
152 evsel->prev_raw_counts->aggr.ena = 0;
153 evsel->prev_raw_counts->aggr.run = 0;
157 static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
159 int ncpus = perf_evsel__nr_cpus(evsel);
160 int nthreads = thread_map__nr(evsel->threads);
162 if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
163 perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
164 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
170 int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
172 struct perf_evsel *evsel;
174 evlist__for_each_entry(evlist, evsel) {
175 if (perf_evsel__alloc_stats(evsel, alloc_raw))
182 perf_evlist__free_stats(evlist);
186 void perf_evlist__free_stats(struct perf_evlist *evlist)
188 struct perf_evsel *evsel;
190 evlist__for_each_entry(evlist, evsel) {
191 perf_evsel__free_stat_priv(evsel);
192 perf_evsel__free_counts(evsel);
193 perf_evsel__free_prev_raw_counts(evsel);
197 void perf_evlist__reset_stats(struct perf_evlist *evlist)
199 struct perf_evsel *evsel;
201 evlist__for_each_entry(evlist, evsel) {
202 perf_evsel__reset_stat_priv(evsel);
203 perf_evsel__reset_counts(evsel);
207 void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist)
209 struct perf_evsel *evsel;
211 evlist__for_each_entry(evlist, evsel)
212 perf_evsel__reset_prev_raw_counts(evsel);
215 static void zero_per_pkg(struct perf_evsel *counter)
217 if (counter->per_pkg_mask)
218 memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
221 static int check_per_pkg(struct perf_evsel *counter,
222 struct perf_counts_values *vals, int cpu, bool *skip)
224 unsigned long *mask = counter->per_pkg_mask;
225 struct cpu_map *cpus = perf_evsel__cpus(counter);
230 if (!counter->per_pkg)
233 if (cpu_map__empty(cpus))
237 mask = zalloc(MAX_NR_CPUS);
241 counter->per_pkg_mask = mask;
245 * we do not consider an event that has not run as a good
246 * instance to mark a package as used (skip=1). Otherwise
247 * we may run into a situation where the first CPU in a package
248 * is not running anything, yet the second is, and this function
249 * would mark the package as used after the first CPU and would
250 * not read the values from the second CPU.
252 if (!(vals->run && vals->ena))
255 s = cpu_map__get_socket(cpus, cpu, NULL);
259 *skip = test_and_set_bit(s, mask) == 1;
264 process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel,
266 struct perf_counts_values *count)
268 struct perf_counts_values *aggr = &evsel->counts->aggr;
269 static struct perf_counts_values zero;
272 if (check_per_pkg(evsel, count, cpu, &skip)) {
273 pr_err("failed to read per-pkg counter\n");
280 switch (config->aggr_mode) {
285 if (!evsel->snapshot)
286 perf_evsel__compute_deltas(evsel, cpu, thread, count);
287 perf_counts_values__scale(count, config->scale, NULL);
288 if (config->aggr_mode == AGGR_NONE)
289 perf_stat__update_shadow_stats(evsel, count->values, cpu);
292 aggr->val += count->val;
294 aggr->ena += count->ena;
295 aggr->run += count->run;
305 static int process_counter_maps(struct perf_stat_config *config,
306 struct perf_evsel *counter)
308 int nthreads = thread_map__nr(counter->threads);
309 int ncpus = perf_evsel__nr_cpus(counter);
312 if (counter->system_wide)
315 for (thread = 0; thread < nthreads; thread++) {
316 for (cpu = 0; cpu < ncpus; cpu++) {
317 if (process_counter_values(config, counter, cpu, thread,
318 perf_counts(counter->counts, cpu, thread)))
326 int perf_stat_process_counter(struct perf_stat_config *config,
327 struct perf_evsel *counter)
329 struct perf_counts_values *aggr = &counter->counts->aggr;
330 struct perf_stat_evsel *ps = counter->priv;
331 u64 *count = counter->counts->aggr.values;
335 aggr->val = aggr->ena = aggr->run = 0;
338 * We calculate counter's data every interval,
339 * and the display code shows ps->res_stats
340 * avg value. We need to zero the stats for
341 * interval mode, otherwise overall avg running
342 * averages will be shown for each interval.
344 if (config->interval) {
345 for (i = 0; i < 3; i++)
346 init_stats(&ps->res_stats[i]);
349 if (counter->per_pkg)
350 zero_per_pkg(counter);
352 ret = process_counter_maps(config, counter);
356 if (config->aggr_mode != AGGR_GLOBAL)
359 if (!counter->snapshot)
360 perf_evsel__compute_deltas(counter, -1, -1, aggr);
361 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
363 for (i = 0; i < 3; i++)
364 update_stats(&ps->res_stats[i], count[i]);
367 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
368 perf_evsel__name(counter), count[0], count[1], count[2]);
372 * Save the full runtime - to allow normalization during printout:
374 val = counter->scale * *count;
375 perf_stat__update_shadow_stats(counter, &val, 0);
380 int perf_event__process_stat_event(struct perf_tool *tool __maybe_unused,
381 union perf_event *event,
382 struct perf_session *session)
384 struct perf_counts_values count;
385 struct stat_event *st = &event->stat;
386 struct perf_evsel *counter;
392 counter = perf_evlist__id2evsel(session->evlist, st->id);
394 pr_err("Failed to resolve counter for stat event.\n");
398 *perf_counts(counter->counts, st->cpu, st->thread) = count;
399 counter->supported = true;
403 size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
405 struct stat_event *st = (struct stat_event *) event;
408 ret = fprintf(fp, "\n... id %" PRIu64 ", cpu %d, thread %d\n",
409 st->id, st->cpu, st->thread);
410 ret += fprintf(fp, "... value %" PRIu64 ", enabled %" PRIu64 ", running %" PRIu64 "\n",
411 st->val, st->ena, st->run);
416 size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
418 struct stat_round_event *rd = (struct stat_round_event *)event;
421 ret = fprintf(fp, "\n... time %" PRIu64 ", type %s\n", rd->time,
422 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
427 size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
429 struct perf_stat_config sc;
432 perf_event__read_stat_config(&sc, &event->stat_config);
434 ret = fprintf(fp, "\n");
435 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
436 ret += fprintf(fp, "... scale %d\n", sc.scale);
437 ret += fprintf(fp, "... interval %u\n", sc.interval);