1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2017, Intel Corporation.
4 */
5
6 /* Manage metrics and groups of metrics from JSON files */
7
8 #include "metricgroup.h"
9 #include "debug.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "strbuf.h"
13 #include "pmu.h"
14 #include "pmu-hybrid.h"
15 #include "expr.h"
16 #include "rblist.h"
17 #include <string.h>
18 #include <errno.h>
19 #include "strlist.h"
20 #include <assert.h>
21 #include <linux/ctype.h>
22 #include <linux/list_sort.h>
23 #include <linux/string.h>
24 #include <linux/zalloc.h>
25 #include <perf/cpumap.h>
26 #include <subcmd/parse-options.h>
27 #include <api/fs/fs.h>
28 #include "util.h"
29 #include <asm/bug.h>
30 #include "cgroup.h"
31
metricgroup__lookup(struct rblist * metric_events,struct evsel * evsel,bool create)32 struct metric_event *metricgroup__lookup(struct rblist *metric_events,
33 struct evsel *evsel,
34 bool create)
35 {
36 struct rb_node *nd;
37 struct metric_event me = {
38 .evsel = evsel
39 };
40
41 if (!metric_events)
42 return NULL;
43
44 nd = rblist__find(metric_events, &me);
45 if (nd)
46 return container_of(nd, struct metric_event, nd);
47 if (create) {
48 rblist__add_node(metric_events, &me);
49 nd = rblist__find(metric_events, &me);
50 if (nd)
51 return container_of(nd, struct metric_event, nd);
52 }
53 return NULL;
54 }
55
metric_event_cmp(struct rb_node * rb_node,const void * entry)56 static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
57 {
58 struct metric_event *a = container_of(rb_node,
59 struct metric_event,
60 nd);
61 const struct metric_event *b = entry;
62
63 if (a->evsel == b->evsel)
64 return 0;
65 if ((char *)a->evsel < (char *)b->evsel)
66 return -1;
67 return +1;
68 }
69
metric_event_new(struct rblist * rblist __maybe_unused,const void * entry)70 static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
71 const void *entry)
72 {
73 struct metric_event *me = malloc(sizeof(struct metric_event));
74
75 if (!me)
76 return NULL;
77 memcpy(me, entry, sizeof(struct metric_event));
78 me->evsel = ((struct metric_event *)entry)->evsel;
79 INIT_LIST_HEAD(&me->head);
80 return &me->nd;
81 }
82
metric_event_delete(struct rblist * rblist __maybe_unused,struct rb_node * rb_node)83 static void metric_event_delete(struct rblist *rblist __maybe_unused,
84 struct rb_node *rb_node)
85 {
86 struct metric_event *me = container_of(rb_node, struct metric_event, nd);
87 struct metric_expr *expr, *tmp;
88
89 list_for_each_entry_safe(expr, tmp, &me->head, nd) {
90 free((char *)expr->metric_name);
91 free(expr->metric_refs);
92 free(expr->metric_events);
93 free(expr);
94 }
95
96 free(me);
97 }
98
metricgroup__rblist_init(struct rblist * metric_events)99 static void metricgroup__rblist_init(struct rblist *metric_events)
100 {
101 rblist__init(metric_events);
102 metric_events->node_cmp = metric_event_cmp;
103 metric_events->node_new = metric_event_new;
104 metric_events->node_delete = metric_event_delete;
105 }
106
metricgroup__rblist_exit(struct rblist * metric_events)107 void metricgroup__rblist_exit(struct rblist *metric_events)
108 {
109 rblist__exit(metric_events);
110 }
111
112 /**
113 * The metric under construction. The data held here will be placed in a
114 * metric_expr.
115 */
116 struct metric {
117 struct list_head nd;
118 /**
119 * The expression parse context importantly holding the IDs contained
120 * within the expression.
121 */
122 struct expr_parse_ctx *pctx;
123 /** The name of the metric such as "IPC". */
124 const char *metric_name;
125 /** Modifier on the metric such as "u" or NULL for none. */
126 const char *modifier;
127 /** The expression to parse, for example, "instructions/cycles". */
128 const char *metric_expr;
129 /**
130 * The "ScaleUnit" that scales and adds a unit to the metric during
131 * output.
132 */
133 const char *metric_unit;
134 /** Optional null terminated array of referenced metrics. */
135 struct metric_ref *metric_refs;
136 /**
137 * Is there a constraint on the group of events? In which case the
138 * events won't be grouped.
139 */
140 bool has_constraint;
141 /**
142 * Parsed events for the metric. Optional as events may be taken from a
143 * different metric whose group contains all the IDs necessary for this
144 * one.
145 */
146 struct evlist *evlist;
147 };
148
metricgroup___watchdog_constraint_hint(const char * name,bool foot)149 static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
150 {
151 static bool violate_nmi_constraint;
152
153 if (!foot) {
154 pr_warning("Splitting metric group %s into standalone metrics.\n", name);
155 violate_nmi_constraint = true;
156 return;
157 }
158
159 if (!violate_nmi_constraint)
160 return;
161
162 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
163 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
164 " perf stat ...\n"
165 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
166 }
167
metricgroup__has_constraint(const struct pmu_event * pe)168 static bool metricgroup__has_constraint(const struct pmu_event *pe)
169 {
170 if (!pe->metric_constraint)
171 return false;
172
173 if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
174 sysctl__nmi_watchdog_enabled()) {
175 metricgroup___watchdog_constraint_hint(pe->metric_name, false);
176 return true;
177 }
178
179 return false;
180 }
181
metric__free(struct metric * m)182 static void metric__free(struct metric *m)
183 {
184 if (!m)
185 return;
186
187 free(m->metric_refs);
188 expr__ctx_free(m->pctx);
189 free((char *)m->modifier);
190 evlist__delete(m->evlist);
191 free(m);
192 }
193
metric__new(const struct pmu_event * pe,const char * modifier,bool metric_no_group,int runtime,const char * user_requested_cpu_list,bool system_wide)194 static struct metric *metric__new(const struct pmu_event *pe,
195 const char *modifier,
196 bool metric_no_group,
197 int runtime,
198 const char *user_requested_cpu_list,
199 bool system_wide)
200 {
201 struct metric *m;
202
203 m = zalloc(sizeof(*m));
204 if (!m)
205 return NULL;
206
207 m->pctx = expr__ctx_new();
208 if (!m->pctx)
209 goto out_err;
210
211 m->metric_name = pe->metric_name;
212 m->modifier = NULL;
213 if (modifier) {
214 m->modifier = strdup(modifier);
215 if (!m->modifier)
216 goto out_err;
217 }
218 m->metric_expr = pe->metric_expr;
219 m->metric_unit = pe->unit;
220 m->pctx->sctx.user_requested_cpu_list = NULL;
221 if (user_requested_cpu_list) {
222 m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
223 if (!m->pctx->sctx.user_requested_cpu_list)
224 goto out_err;
225 }
226 m->pctx->sctx.runtime = runtime;
227 m->pctx->sctx.system_wide = system_wide;
228 m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
229 m->metric_refs = NULL;
230 m->evlist = NULL;
231
232 return m;
233 out_err:
234 metric__free(m);
235 return NULL;
236 }
237
contains_metric_id(struct evsel ** metric_events,int num_events,const char * metric_id)238 static bool contains_metric_id(struct evsel **metric_events, int num_events,
239 const char *metric_id)
240 {
241 int i;
242
243 for (i = 0; i < num_events; i++) {
244 if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
245 return true;
246 }
247 return false;
248 }
249
250 /**
251 * setup_metric_events - Find a group of events in metric_evlist that correspond
252 * to the IDs from a parsed metric expression.
253 * @ids: the metric IDs to match.
254 * @metric_evlist: the list of perf events.
255 * @out_metric_events: holds the created metric events array.
256 */
setup_metric_events(struct hashmap * ids,struct evlist * metric_evlist,struct evsel *** out_metric_events)257 static int setup_metric_events(struct hashmap *ids,
258 struct evlist *metric_evlist,
259 struct evsel ***out_metric_events)
260 {
261 struct evsel **metric_events;
262 const char *metric_id;
263 struct evsel *ev;
264 size_t ids_size, matched_events, i;
265
266 *out_metric_events = NULL;
267 ids_size = hashmap__size(ids);
268
269 metric_events = calloc(sizeof(void *), ids_size + 1);
270 if (!metric_events)
271 return -ENOMEM;
272
273 matched_events = 0;
274 evlist__for_each_entry(metric_evlist, ev) {
275 struct expr_id_data *val_ptr;
276
277 /*
278 * Check for duplicate events with the same name. For
279 * example, uncore_imc/cas_count_read/ will turn into 6
280 * events per socket on skylakex. Only the first such
281 * event is placed in metric_events.
282 */
283 metric_id = evsel__metric_id(ev);
284 if (contains_metric_id(metric_events, matched_events, metric_id))
285 continue;
286 /*
287 * Does this event belong to the parse context? For
288 * combined or shared groups, this metric may not care
289 * about this event.
290 */
291 if (hashmap__find(ids, metric_id, (void **)&val_ptr)) {
292 metric_events[matched_events++] = ev;
293
294 if (matched_events >= ids_size)
295 break;
296 }
297 }
298 if (matched_events < ids_size) {
299 free(metric_events);
300 return -EINVAL;
301 }
302 for (i = 0; i < ids_size; i++) {
303 ev = metric_events[i];
304 ev->collect_stat = true;
305
306 /*
307 * The metric leader points to the identically named
308 * event in metric_events.
309 */
310 ev->metric_leader = ev;
311 /*
312 * Mark two events with identical names in the same
313 * group (or globally) as being in use as uncore events
314 * may be duplicated for each pmu. Set the metric leader
315 * of such events to be the event that appears in
316 * metric_events.
317 */
318 metric_id = evsel__metric_id(ev);
319 evlist__for_each_entry_continue(metric_evlist, ev) {
320 if (!strcmp(evsel__metric_id(ev), metric_id))
321 ev->metric_leader = metric_events[i];
322 }
323 }
324 *out_metric_events = metric_events;
325 return 0;
326 }
327
match_metric(const char * n,const char * list)328 static bool match_metric(const char *n, const char *list)
329 {
330 int len;
331 char *m;
332
333 if (!list)
334 return false;
335 if (!strcmp(list, "all"))
336 return true;
337 if (!n)
338 return !strcasecmp(list, "No_group");
339 len = strlen(list);
340 m = strcasestr(n, list);
341 if (!m)
342 return false;
343 if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
344 (m[len] == 0 || m[len] == ';'))
345 return true;
346 return false;
347 }
348
match_pe_metric(const struct pmu_event * pe,const char * metric)349 static bool match_pe_metric(const struct pmu_event *pe, const char *metric)
350 {
351 return match_metric(pe->metric_group, metric) ||
352 match_metric(pe->metric_name, metric);
353 }
354
355 struct mep {
356 struct rb_node nd;
357 const char *name;
358 struct strlist *metrics;
359 };
360
mep_cmp(struct rb_node * rb_node,const void * entry)361 static int mep_cmp(struct rb_node *rb_node, const void *entry)
362 {
363 struct mep *a = container_of(rb_node, struct mep, nd);
364 struct mep *b = (struct mep *)entry;
365
366 return strcmp(a->name, b->name);
367 }
368
mep_new(struct rblist * rl __maybe_unused,const void * entry)369 static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
370 const void *entry)
371 {
372 struct mep *me = malloc(sizeof(struct mep));
373
374 if (!me)
375 return NULL;
376 memcpy(me, entry, sizeof(struct mep));
377 me->name = strdup(me->name);
378 if (!me->name)
379 goto out_me;
380 me->metrics = strlist__new(NULL, NULL);
381 if (!me->metrics)
382 goto out_name;
383 return &me->nd;
384 out_name:
385 zfree(&me->name);
386 out_me:
387 free(me);
388 return NULL;
389 }
390
mep_lookup(struct rblist * groups,const char * name)391 static struct mep *mep_lookup(struct rblist *groups, const char *name)
392 {
393 struct rb_node *nd;
394 struct mep me = {
395 .name = name
396 };
397 nd = rblist__find(groups, &me);
398 if (nd)
399 return container_of(nd, struct mep, nd);
400 rblist__add_node(groups, &me);
401 nd = rblist__find(groups, &me);
402 if (nd)
403 return container_of(nd, struct mep, nd);
404 return NULL;
405 }
406
mep_delete(struct rblist * rl __maybe_unused,struct rb_node * nd)407 static void mep_delete(struct rblist *rl __maybe_unused,
408 struct rb_node *nd)
409 {
410 struct mep *me = container_of(nd, struct mep, nd);
411
412 strlist__delete(me->metrics);
413 zfree(&me->name);
414 free(me);
415 }
416
metricgroup__print_strlist(struct strlist * metrics,bool raw)417 static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
418 {
419 struct str_node *sn;
420 int n = 0;
421
422 strlist__for_each_entry (sn, metrics) {
423 if (raw)
424 printf("%s%s", n > 0 ? " " : "", sn->s);
425 else
426 printf(" %s\n", sn->s);
427 n++;
428 }
429 if (raw)
430 putchar('\n');
431 }
432
metricgroup__print_pmu_event(const struct pmu_event * pe,bool metricgroups,char * filter,bool raw,bool details,struct rblist * groups,struct strlist * metriclist)433 static int metricgroup__print_pmu_event(const struct pmu_event *pe,
434 bool metricgroups, char *filter,
435 bool raw, bool details,
436 struct rblist *groups,
437 struct strlist *metriclist)
438 {
439 const char *g;
440 char *omg, *mg;
441
442 g = pe->metric_group;
443 if (!g && pe->metric_name) {
444 if (pe->name)
445 return 0;
446 g = "No_group";
447 }
448
449 if (!g)
450 return 0;
451
452 mg = strdup(g);
453
454 if (!mg)
455 return -ENOMEM;
456 omg = mg;
457 while ((g = strsep(&mg, ";")) != NULL) {
458 struct mep *me;
459 char *s;
460
461 g = skip_spaces(g);
462 if (*g == 0)
463 g = "No_group";
464 if (filter && !strstr(g, filter))
465 continue;
466 if (raw)
467 s = (char *)pe->metric_name;
468 else {
469 if (asprintf(&s, "%s\n%*s%s]",
470 pe->metric_name, 8, "[", pe->desc) < 0)
471 return -1;
472 if (details) {
473 if (asprintf(&s, "%s\n%*s%s]",
474 s, 8, "[", pe->metric_expr) < 0)
475 return -1;
476 }
477 }
478
479 if (!s)
480 continue;
481
482 if (!metricgroups) {
483 strlist__add(metriclist, s);
484 } else {
485 me = mep_lookup(groups, g);
486 if (!me)
487 continue;
488 strlist__add(me->metrics, s);
489 }
490
491 if (!raw)
492 free(s);
493 }
494 free(omg);
495
496 return 0;
497 }
498
499 struct metricgroup_print_sys_idata {
500 struct strlist *metriclist;
501 char *filter;
502 struct rblist *groups;
503 bool metricgroups;
504 bool raw;
505 bool details;
506 };
507
508 struct metricgroup_iter_data {
509 pmu_event_iter_fn fn;
510 void *data;
511 };
512
metricgroup__sys_event_iter(const struct pmu_event * pe,const struct pmu_events_table * table,void * data)513 static int metricgroup__sys_event_iter(const struct pmu_event *pe,
514 const struct pmu_events_table *table,
515 void *data)
516 {
517 struct metricgroup_iter_data *d = data;
518 struct perf_pmu *pmu = NULL;
519
520 if (!pe->metric_expr || !pe->compat)
521 return 0;
522
523 while ((pmu = perf_pmu__scan(pmu))) {
524
525 if (!pmu->id || strcmp(pmu->id, pe->compat))
526 continue;
527
528 return d->fn(pe, table, d->data);
529 }
530
531 return 0;
532 }
533
metricgroup__print_sys_event_iter(const struct pmu_event * pe,const struct pmu_events_table * table __maybe_unused,void * data)534 static int metricgroup__print_sys_event_iter(const struct pmu_event *pe,
535 const struct pmu_events_table *table __maybe_unused,
536 void *data)
537 {
538 struct metricgroup_print_sys_idata *d = data;
539
540 return metricgroup__print_pmu_event(pe, d->metricgroups, d->filter, d->raw,
541 d->details, d->groups, d->metriclist);
542 }
543
544 struct metricgroup_print_data {
545 const char *pmu_name;
546 struct strlist *metriclist;
547 char *filter;
548 struct rblist *groups;
549 bool metricgroups;
550 bool raw;
551 bool details;
552 };
553
metricgroup__print_callback(const struct pmu_event * pe,const struct pmu_events_table * table __maybe_unused,void * vdata)554 static int metricgroup__print_callback(const struct pmu_event *pe,
555 const struct pmu_events_table *table __maybe_unused,
556 void *vdata)
557 {
558 struct metricgroup_print_data *data = vdata;
559
560 if (!pe->metric_expr)
561 return 0;
562
563 if (data->pmu_name && perf_pmu__is_hybrid(pe->pmu) && strcmp(data->pmu_name, pe->pmu))
564 return 0;
565
566 return metricgroup__print_pmu_event(pe, data->metricgroups, data->filter,
567 data->raw, data->details, data->groups,
568 data->metriclist);
569 }
570
metricgroup__print(bool metrics,bool metricgroups,char * filter,bool raw,bool details,const char * pmu_name)571 void metricgroup__print(bool metrics, bool metricgroups, char *filter,
572 bool raw, bool details, const char *pmu_name)
573 {
574 struct rblist groups;
575 struct rb_node *node, *next;
576 struct strlist *metriclist = NULL;
577 const struct pmu_events_table *table;
578
579 if (!metricgroups) {
580 metriclist = strlist__new(NULL, NULL);
581 if (!metriclist)
582 return;
583 }
584
585 rblist__init(&groups);
586 groups.node_new = mep_new;
587 groups.node_cmp = mep_cmp;
588 groups.node_delete = mep_delete;
589 table = pmu_events_table__find();
590 if (table) {
591 struct metricgroup_print_data data = {
592 .pmu_name = pmu_name,
593 .metriclist = metriclist,
594 .metricgroups = metricgroups,
595 .filter = filter,
596 .raw = raw,
597 .details = details,
598 .groups = &groups,
599 };
600
601 pmu_events_table_for_each_event(table,
602 metricgroup__print_callback,
603 &data);
604 }
605 {
606 struct metricgroup_iter_data data = {
607 .fn = metricgroup__print_sys_event_iter,
608 .data = (void *) &(struct metricgroup_print_sys_idata){
609 .metriclist = metriclist,
610 .metricgroups = metricgroups,
611 .filter = filter,
612 .raw = raw,
613 .details = details,
614 .groups = &groups,
615 },
616 };
617
618 pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
619 }
620
621 if (!filter || !rblist__empty(&groups)) {
622 if (metricgroups && !raw)
623 printf("\nMetric Groups:\n\n");
624 else if (metrics && !raw)
625 printf("\nMetrics:\n\n");
626 }
627
628 for (node = rb_first_cached(&groups.entries); node; node = next) {
629 struct mep *me = container_of(node, struct mep, nd);
630
631 if (metricgroups)
632 printf("%s%s%s", me->name, metrics && !raw ? ":" : "", raw ? " " : "\n");
633 if (metrics)
634 metricgroup__print_strlist(me->metrics, raw);
635 next = rb_next(node);
636 rblist__remove_node(&groups, node);
637 }
638 if (!metricgroups)
639 metricgroup__print_strlist(metriclist, raw);
640 strlist__delete(metriclist);
641 }
642
643 static const char *code_characters = ",-=@";
644
encode_metric_id(struct strbuf * sb,const char * x)645 static int encode_metric_id(struct strbuf *sb, const char *x)
646 {
647 char *c;
648 int ret = 0;
649
650 for (; *x; x++) {
651 c = strchr(code_characters, *x);
652 if (c) {
653 ret = strbuf_addch(sb, '!');
654 if (ret)
655 break;
656
657 ret = strbuf_addch(sb, '0' + (c - code_characters));
658 if (ret)
659 break;
660 } else {
661 ret = strbuf_addch(sb, *x);
662 if (ret)
663 break;
664 }
665 }
666 return ret;
667 }
668
decode_metric_id(struct strbuf * sb,const char * x)669 static int decode_metric_id(struct strbuf *sb, const char *x)
670 {
671 const char *orig = x;
672 size_t i;
673 char c;
674 int ret;
675
676 for (; *x; x++) {
677 c = *x;
678 if (*x == '!') {
679 x++;
680 i = *x - '0';
681 if (i > strlen(code_characters)) {
682 pr_err("Bad metric-id encoding in: '%s'", orig);
683 return -1;
684 }
685 c = code_characters[i];
686 }
687 ret = strbuf_addch(sb, c);
688 if (ret)
689 return ret;
690 }
691 return 0;
692 }
693
decode_all_metric_ids(struct evlist * perf_evlist,const char * modifier)694 static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
695 {
696 struct evsel *ev;
697 struct strbuf sb = STRBUF_INIT;
698 char *cur;
699 int ret = 0;
700
701 evlist__for_each_entry(perf_evlist, ev) {
702 if (!ev->metric_id)
703 continue;
704
705 ret = strbuf_setlen(&sb, 0);
706 if (ret)
707 break;
708
709 ret = decode_metric_id(&sb, ev->metric_id);
710 if (ret)
711 break;
712
713 free((char *)ev->metric_id);
714 ev->metric_id = strdup(sb.buf);
715 if (!ev->metric_id) {
716 ret = -ENOMEM;
717 break;
718 }
719 /*
720 * If the name is just the parsed event, use the metric-id to
721 * give a more friendly display version.
722 */
723 if (strstr(ev->name, "metric-id=")) {
724 bool has_slash = false;
725
726 free(ev->name);
727 for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
728 *cur = '/';
729 has_slash = true;
730 }
731
732 if (modifier) {
733 if (!has_slash && !strchr(sb.buf, ':')) {
734 ret = strbuf_addch(&sb, ':');
735 if (ret)
736 break;
737 }
738 ret = strbuf_addstr(&sb, modifier);
739 if (ret)
740 break;
741 }
742 ev->name = strdup(sb.buf);
743 if (!ev->name) {
744 ret = -ENOMEM;
745 break;
746 }
747 }
748 }
749 strbuf_release(&sb);
750 return ret;
751 }
752
metricgroup__build_event_string(struct strbuf * events,const struct expr_parse_ctx * ctx,const char * modifier,bool has_constraint)753 static int metricgroup__build_event_string(struct strbuf *events,
754 const struct expr_parse_ctx *ctx,
755 const char *modifier,
756 bool has_constraint)
757 {
758 struct hashmap_entry *cur;
759 size_t bkt;
760 bool no_group = true, has_tool_events = false;
761 bool tool_events[PERF_TOOL_MAX] = {false};
762 int ret = 0;
763
764 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
765
766 hashmap__for_each_entry(ctx->ids, cur, bkt) {
767 const char *sep, *rsep, *id = cur->key;
768 enum perf_tool_event ev;
769
770 pr_debug("found event %s\n", id);
771
772 /* Always move tool events outside of the group. */
773 ev = perf_tool_event__from_str(id);
774 if (ev != PERF_TOOL_NONE) {
775 has_tool_events = true;
776 tool_events[ev] = true;
777 continue;
778 }
779 /* Separate events with commas and open the group if necessary. */
780 if (no_group) {
781 if (!has_constraint) {
782 ret = strbuf_addch(events, '{');
783 RETURN_IF_NON_ZERO(ret);
784 }
785
786 no_group = false;
787 } else {
788 ret = strbuf_addch(events, ',');
789 RETURN_IF_NON_ZERO(ret);
790 }
791 /*
792 * Encode the ID as an event string. Add a qualifier for
793 * metric_id that is the original name except with characters
794 * that parse-events can't parse replaced. For example,
795 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
796 */
797 sep = strchr(id, '@');
798 if (sep != NULL) {
799 ret = strbuf_add(events, id, sep - id);
800 RETURN_IF_NON_ZERO(ret);
801 ret = strbuf_addch(events, '/');
802 RETURN_IF_NON_ZERO(ret);
803 rsep = strrchr(sep, '@');
804 ret = strbuf_add(events, sep + 1, rsep - sep - 1);
805 RETURN_IF_NON_ZERO(ret);
806 ret = strbuf_addstr(events, ",metric-id=");
807 RETURN_IF_NON_ZERO(ret);
808 sep = rsep;
809 } else {
810 sep = strchr(id, ':');
811 if (sep != NULL) {
812 ret = strbuf_add(events, id, sep - id);
813 RETURN_IF_NON_ZERO(ret);
814 } else {
815 ret = strbuf_addstr(events, id);
816 RETURN_IF_NON_ZERO(ret);
817 }
818 ret = strbuf_addstr(events, "/metric-id=");
819 RETURN_IF_NON_ZERO(ret);
820 }
821 ret = encode_metric_id(events, id);
822 RETURN_IF_NON_ZERO(ret);
823 ret = strbuf_addstr(events, "/");
824 RETURN_IF_NON_ZERO(ret);
825
826 if (sep != NULL) {
827 ret = strbuf_addstr(events, sep + 1);
828 RETURN_IF_NON_ZERO(ret);
829 }
830 if (modifier) {
831 ret = strbuf_addstr(events, modifier);
832 RETURN_IF_NON_ZERO(ret);
833 }
834 }
835 if (!no_group && !has_constraint) {
836 ret = strbuf_addf(events, "}:W");
837 RETURN_IF_NON_ZERO(ret);
838 }
839 if (has_tool_events) {
840 int i;
841
842 perf_tool_event__for_each_event(i) {
843 if (tool_events[i]) {
844 if (!no_group) {
845 ret = strbuf_addch(events, ',');
846 RETURN_IF_NON_ZERO(ret);
847 }
848 no_group = false;
849 ret = strbuf_addstr(events, perf_tool_event__to_str(i));
850 RETURN_IF_NON_ZERO(ret);
851 }
852 }
853 }
854
855 return ret;
856 #undef RETURN_IF_NON_ZERO
857 }
858
arch_get_runtimeparam(const struct pmu_event * pe __maybe_unused)859 int __weak arch_get_runtimeparam(const struct pmu_event *pe __maybe_unused)
860 {
861 return 1;
862 }
863
864 /*
865 * A singly linked list on the stack of the names of metrics being
866 * processed. Used to identify recursion.
867 */
868 struct visited_metric {
869 const char *name;
870 const struct visited_metric *parent;
871 };
872
873 struct metricgroup_add_iter_data {
874 struct list_head *metric_list;
875 const char *metric_name;
876 const char *modifier;
877 int *ret;
878 bool *has_match;
879 bool metric_no_group;
880 const char *user_requested_cpu_list;
881 bool system_wide;
882 struct metric *root_metric;
883 const struct visited_metric *visited;
884 const struct pmu_events_table *table;
885 };
886
887 static bool metricgroup__find_metric(const char *metric,
888 const struct pmu_events_table *table,
889 struct pmu_event *pe);
890
891 static int add_metric(struct list_head *metric_list,
892 const struct pmu_event *pe,
893 const char *modifier,
894 bool metric_no_group,
895 const char *user_requested_cpu_list,
896 bool system_wide,
897 struct metric *root_metric,
898 const struct visited_metric *visited,
899 const struct pmu_events_table *table);
900
901 /**
902 * resolve_metric - Locate metrics within the root metric and recursively add
903 * references to them.
904 * @metric_list: The list the metric is added to.
905 * @modifier: if non-null event modifiers like "u".
906 * @metric_no_group: Should events written to events be grouped "{}" or
907 * global. Grouping is the default but due to multiplexing the
908 * user may override.
909 * @user_requested_cpu_list: Command line specified CPUs to record on.
910 * @system_wide: Are events for all processes recorded.
911 * @root_metric: Metrics may reference other metrics to form a tree. In this
912 * case the root_metric holds all the IDs and a list of referenced
913 * metrics. When adding a root this argument is NULL.
914 * @visited: A singly linked list of metric names being added that is used to
915 * detect recursion.
916 * @table: The table that is searched for metrics, most commonly the table for the
917 * architecture perf is running upon.
918 */
resolve_metric(struct list_head * metric_list,const char * modifier,bool metric_no_group,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_events_table * table)919 static int resolve_metric(struct list_head *metric_list,
920 const char *modifier,
921 bool metric_no_group,
922 const char *user_requested_cpu_list,
923 bool system_wide,
924 struct metric *root_metric,
925 const struct visited_metric *visited,
926 const struct pmu_events_table *table)
927 {
928 struct hashmap_entry *cur;
929 size_t bkt;
930 struct to_resolve {
931 /* The metric to resolve. */
932 struct pmu_event pe;
933 /*
934 * The key in the IDs map, this may differ from in case,
935 * etc. from pe->metric_name.
936 */
937 const char *key;
938 } *pending = NULL;
939 int i, ret = 0, pending_cnt = 0;
940
941 /*
942 * Iterate all the parsed IDs and if there's a matching metric and it to
943 * the pending array.
944 */
945 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
946 struct pmu_event pe;
947
948 if (metricgroup__find_metric(cur->key, table, &pe)) {
949 pending = realloc(pending,
950 (pending_cnt + 1) * sizeof(struct to_resolve));
951 if (!pending)
952 return -ENOMEM;
953
954 memcpy(&pending[pending_cnt].pe, &pe, sizeof(pe));
955 pending[pending_cnt].key = cur->key;
956 pending_cnt++;
957 }
958 }
959
960 /* Remove the metric IDs from the context. */
961 for (i = 0; i < pending_cnt; i++)
962 expr__del_id(root_metric->pctx, pending[i].key);
963
964 /*
965 * Recursively add all the metrics, IDs are added to the root metric's
966 * context.
967 */
968 for (i = 0; i < pending_cnt; i++) {
969 ret = add_metric(metric_list, &pending[i].pe, modifier, metric_no_group,
970 user_requested_cpu_list, system_wide, root_metric, visited,
971 table);
972 if (ret)
973 break;
974 }
975
976 free(pending);
977 return ret;
978 }
979
980 /**
981 * __add_metric - Add a metric to metric_list.
982 * @metric_list: The list the metric is added to.
983 * @pe: The pmu_event containing the metric to be added.
984 * @modifier: if non-null event modifiers like "u".
985 * @metric_no_group: Should events written to events be grouped "{}" or
986 * global. Grouping is the default but due to multiplexing the
987 * user may override.
988 * @runtime: A special argument for the parser only known at runtime.
989 * @user_requested_cpu_list: Command line specified CPUs to record on.
990 * @system_wide: Are events for all processes recorded.
991 * @root_metric: Metrics may reference other metrics to form a tree. In this
992 * case the root_metric holds all the IDs and a list of referenced
993 * metrics. When adding a root this argument is NULL.
994 * @visited: A singly linked list of metric names being added that is used to
995 * detect recursion.
996 * @table: The table that is searched for metrics, most commonly the table for the
997 * architecture perf is running upon.
998 */
__add_metric(struct list_head * metric_list,const struct pmu_event * pe,const char * modifier,bool metric_no_group,int runtime,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_events_table * table)999 static int __add_metric(struct list_head *metric_list,
1000 const struct pmu_event *pe,
1001 const char *modifier,
1002 bool metric_no_group,
1003 int runtime,
1004 const char *user_requested_cpu_list,
1005 bool system_wide,
1006 struct metric *root_metric,
1007 const struct visited_metric *visited,
1008 const struct pmu_events_table *table)
1009 {
1010 const struct visited_metric *vm;
1011 int ret;
1012 bool is_root = !root_metric;
1013 struct visited_metric visited_node = {
1014 .name = pe->metric_name,
1015 .parent = visited,
1016 };
1017
1018 for (vm = visited; vm; vm = vm->parent) {
1019 if (!strcmp(pe->metric_name, vm->name)) {
1020 pr_err("failed: recursion detected for %s\n", pe->metric_name);
1021 return -1;
1022 }
1023 }
1024
1025 if (is_root) {
1026 /*
1027 * This metric is the root of a tree and may reference other
1028 * metrics that are added recursively.
1029 */
1030 root_metric = metric__new(pe, modifier, metric_no_group, runtime,
1031 user_requested_cpu_list, system_wide);
1032 if (!root_metric)
1033 return -ENOMEM;
1034
1035 } else {
1036 int cnt = 0;
1037
1038 /*
1039 * This metric was referenced in a metric higher in the
1040 * tree. Check if the same metric is already resolved in the
1041 * metric_refs list.
1042 */
1043 if (root_metric->metric_refs) {
1044 for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
1045 if (!strcmp(pe->metric_name,
1046 root_metric->metric_refs[cnt].metric_name))
1047 return 0;
1048 }
1049 }
1050
1051 /* Create reference. Need space for the entry and the terminator. */
1052 root_metric->metric_refs = realloc(root_metric->metric_refs,
1053 (cnt + 2) * sizeof(struct metric_ref));
1054 if (!root_metric->metric_refs)
1055 return -ENOMEM;
1056
1057 /*
1058 * Intentionally passing just const char pointers,
1059 * from 'pe' object, so they never go away. We don't
1060 * need to change them, so there's no need to create
1061 * our own copy.
1062 */
1063 root_metric->metric_refs[cnt].metric_name = pe->metric_name;
1064 root_metric->metric_refs[cnt].metric_expr = pe->metric_expr;
1065
1066 /* Null terminate array. */
1067 root_metric->metric_refs[cnt+1].metric_name = NULL;
1068 root_metric->metric_refs[cnt+1].metric_expr = NULL;
1069 }
1070
1071 /*
1072 * For both the parent and referenced metrics, we parse
1073 * all the metric's IDs and add it to the root context.
1074 */
1075 if (expr__find_ids(pe->metric_expr, NULL, root_metric->pctx) < 0) {
1076 /* Broken metric. */
1077 ret = -EINVAL;
1078 } else {
1079 /* Resolve referenced metrics. */
1080 ret = resolve_metric(metric_list, modifier, metric_no_group,
1081 user_requested_cpu_list, system_wide,
1082 root_metric, &visited_node, table);
1083 }
1084
1085 if (ret) {
1086 if (is_root)
1087 metric__free(root_metric);
1088
1089 } else if (is_root)
1090 list_add(&root_metric->nd, metric_list);
1091
1092 return ret;
1093 }
1094
1095 struct metricgroup__find_metric_data {
1096 const char *metric;
1097 struct pmu_event *pe;
1098 };
1099
metricgroup__find_metric_callback(const struct pmu_event * pe,const struct pmu_events_table * table __maybe_unused,void * vdata)1100 static int metricgroup__find_metric_callback(const struct pmu_event *pe,
1101 const struct pmu_events_table *table __maybe_unused,
1102 void *vdata)
1103 {
1104 struct metricgroup__find_metric_data *data = vdata;
1105
1106 if (!match_metric(pe->metric_name, data->metric))
1107 return 0;
1108
1109 memcpy(data->pe, pe, sizeof(*pe));
1110 return 1;
1111 }
1112
metricgroup__find_metric(const char * metric,const struct pmu_events_table * table,struct pmu_event * pe)1113 static bool metricgroup__find_metric(const char *metric,
1114 const struct pmu_events_table *table,
1115 struct pmu_event *pe)
1116 {
1117 struct metricgroup__find_metric_data data = {
1118 .metric = metric,
1119 .pe = pe,
1120 };
1121
1122 return pmu_events_table_for_each_event(table, metricgroup__find_metric_callback, &data)
1123 ? true : false;
1124 }
1125
add_metric(struct list_head * metric_list,const struct pmu_event * pe,const char * modifier,bool metric_no_group,const char * user_requested_cpu_list,bool system_wide,struct metric * root_metric,const struct visited_metric * visited,const struct pmu_events_table * table)1126 static int add_metric(struct list_head *metric_list,
1127 const struct pmu_event *pe,
1128 const char *modifier,
1129 bool metric_no_group,
1130 const char *user_requested_cpu_list,
1131 bool system_wide,
1132 struct metric *root_metric,
1133 const struct visited_metric *visited,
1134 const struct pmu_events_table *table)
1135 {
1136 int ret = 0;
1137
1138 pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
1139
1140 if (!strstr(pe->metric_expr, "?")) {
1141 ret = __add_metric(metric_list, pe, modifier, metric_no_group, 0,
1142 user_requested_cpu_list, system_wide, root_metric,
1143 visited, table);
1144 } else {
1145 int j, count;
1146
1147 count = arch_get_runtimeparam(pe);
1148
1149 /* This loop is added to create multiple
1150 * events depend on count value and add
1151 * those events to metric_list.
1152 */
1153
1154 for (j = 0; j < count && !ret; j++)
1155 ret = __add_metric(metric_list, pe, modifier, metric_no_group, j,
1156 user_requested_cpu_list, system_wide,
1157 root_metric, visited, table);
1158 }
1159
1160 return ret;
1161 }
1162
metricgroup__add_metric_sys_event_iter(const struct pmu_event * pe,const struct pmu_events_table * table __maybe_unused,void * data)1163 static int metricgroup__add_metric_sys_event_iter(const struct pmu_event *pe,
1164 const struct pmu_events_table *table __maybe_unused,
1165 void *data)
1166 {
1167 struct metricgroup_add_iter_data *d = data;
1168 int ret;
1169
1170 if (!match_pe_metric(pe, d->metric_name))
1171 return 0;
1172
1173 ret = add_metric(d->metric_list, pe, d->modifier, d->metric_no_group,
1174 d->user_requested_cpu_list, d->system_wide,
1175 d->root_metric, d->visited, d->table);
1176 if (ret)
1177 goto out;
1178
1179 *(d->has_match) = true;
1180
1181 out:
1182 *(d->ret) = ret;
1183 return ret;
1184 }
1185
1186 /**
1187 * metric_list_cmp - list_sort comparator that sorts metrics with more events to
1188 * the front. tool events are excluded from the count.
1189 */
metric_list_cmp(void * priv __maybe_unused,const struct list_head * l,const struct list_head * r)1190 static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
1191 const struct list_head *r)
1192 {
1193 const struct metric *left = container_of(l, struct metric, nd);
1194 const struct metric *right = container_of(r, struct metric, nd);
1195 struct expr_id_data *data;
1196 int i, left_count, right_count;
1197
1198 left_count = hashmap__size(left->pctx->ids);
1199 perf_tool_event__for_each_event(i) {
1200 if (!expr__get_id(left->pctx, perf_tool_event__to_str(i), &data))
1201 left_count--;
1202 }
1203
1204 right_count = hashmap__size(right->pctx->ids);
1205 perf_tool_event__for_each_event(i) {
1206 if (!expr__get_id(right->pctx, perf_tool_event__to_str(i), &data))
1207 right_count--;
1208 }
1209
1210 return right_count - left_count;
1211 }
1212
1213 struct metricgroup__add_metric_data {
1214 struct list_head *list;
1215 const char *metric_name;
1216 const char *modifier;
1217 const char *user_requested_cpu_list;
1218 bool metric_no_group;
1219 bool system_wide;
1220 bool has_match;
1221 };
1222
metricgroup__add_metric_callback(const struct pmu_event * pe,const struct pmu_events_table * table,void * vdata)1223 static int metricgroup__add_metric_callback(const struct pmu_event *pe,
1224 const struct pmu_events_table *table,
1225 void *vdata)
1226 {
1227 struct metricgroup__add_metric_data *data = vdata;
1228 int ret = 0;
1229
1230 if (pe->metric_expr &&
1231 (match_metric(pe->metric_group, data->metric_name) ||
1232 match_metric(pe->metric_name, data->metric_name))) {
1233
1234 data->has_match = true;
1235 ret = add_metric(data->list, pe, data->modifier, data->metric_no_group,
1236 data->user_requested_cpu_list, data->system_wide,
1237 /*root_metric=*/NULL, /*visited_metrics=*/NULL, table);
1238 }
1239 return ret;
1240 }
1241
1242 /**
1243 * metricgroup__add_metric - Find and add a metric, or a metric group.
1244 * @metric_name: The name of the metric or metric group. For example, "IPC"
1245 * could be the name of a metric and "TopDownL1" the name of a
1246 * metric group.
1247 * @modifier: if non-null event modifiers like "u".
1248 * @metric_no_group: Should events written to events be grouped "{}" or
1249 * global. Grouping is the default but due to multiplexing the
1250 * user may override.
1251 * @user_requested_cpu_list: Command line specified CPUs to record on.
1252 * @system_wide: Are events for all processes recorded.
1253 * @metric_list: The list that the metric or metric group are added to.
1254 * @table: The table that is searched for metrics, most commonly the table for the
1255 * architecture perf is running upon.
1256 */
metricgroup__add_metric(const char * metric_name,const char * modifier,bool metric_no_group,const char * user_requested_cpu_list,bool system_wide,struct list_head * metric_list,const struct pmu_events_table * table)1257 static int metricgroup__add_metric(const char *metric_name, const char *modifier,
1258 bool metric_no_group,
1259 const char *user_requested_cpu_list,
1260 bool system_wide,
1261 struct list_head *metric_list,
1262 const struct pmu_events_table *table)
1263 {
1264 LIST_HEAD(list);
1265 int ret;
1266 bool has_match = false;
1267
1268 {
1269 struct metricgroup__add_metric_data data = {
1270 .list = &list,
1271 .metric_name = metric_name,
1272 .modifier = modifier,
1273 .metric_no_group = metric_no_group,
1274 .user_requested_cpu_list = user_requested_cpu_list,
1275 .system_wide = system_wide,
1276 .has_match = false,
1277 };
1278 /*
1279 * Iterate over all metrics seeing if metric matches either the
1280 * name or group. When it does add the metric to the list.
1281 */
1282 ret = pmu_events_table_for_each_event(table, metricgroup__add_metric_callback,
1283 &data);
1284 if (ret)
1285 goto out;
1286
1287 has_match = data.has_match;
1288 }
1289 {
1290 struct metricgroup_iter_data data = {
1291 .fn = metricgroup__add_metric_sys_event_iter,
1292 .data = (void *) &(struct metricgroup_add_iter_data) {
1293 .metric_list = &list,
1294 .metric_name = metric_name,
1295 .modifier = modifier,
1296 .metric_no_group = metric_no_group,
1297 .user_requested_cpu_list = user_requested_cpu_list,
1298 .system_wide = system_wide,
1299 .has_match = &has_match,
1300 .ret = &ret,
1301 .table = table,
1302 },
1303 };
1304
1305 pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
1306 }
1307 /* End of pmu events. */
1308 if (!has_match)
1309 ret = -EINVAL;
1310
1311 out:
1312 /*
1313 * add to metric_list so that they can be released
1314 * even if it's failed
1315 */
1316 list_splice(&list, metric_list);
1317 return ret;
1318 }
1319
1320 /**
1321 * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1322 * specified in a list.
1323 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1324 * would match the IPC and CPI metrics, and TopDownL1 would match all
1325 * the metrics in the TopDownL1 group.
1326 * @metric_no_group: Should events written to events be grouped "{}" or
1327 * global. Grouping is the default but due to multiplexing the
1328 * user may override.
1329 * @user_requested_cpu_list: Command line specified CPUs to record on.
1330 * @system_wide: Are events for all processes recorded.
1331 * @metric_list: The list that metrics are added to.
1332 * @table: The table that is searched for metrics, most commonly the table for the
1333 * architecture perf is running upon.
1334 */
metricgroup__add_metric_list(const char * list,bool metric_no_group,const char * user_requested_cpu_list,bool system_wide,struct list_head * metric_list,const struct pmu_events_table * table)1335 static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
1336 const char *user_requested_cpu_list,
1337 bool system_wide, struct list_head *metric_list,
1338 const struct pmu_events_table *table)
1339 {
1340 char *list_itr, *list_copy, *metric_name, *modifier;
1341 int ret, count = 0;
1342
1343 list_copy = strdup(list);
1344 if (!list_copy)
1345 return -ENOMEM;
1346 list_itr = list_copy;
1347
1348 while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1349 modifier = strchr(metric_name, ':');
1350 if (modifier)
1351 *modifier++ = '\0';
1352
1353 ret = metricgroup__add_metric(metric_name, modifier,
1354 metric_no_group, user_requested_cpu_list,
1355 system_wide, metric_list, table);
1356 if (ret == -EINVAL)
1357 pr_err("Cannot find metric or group `%s'\n", metric_name);
1358
1359 if (ret)
1360 break;
1361
1362 count++;
1363 }
1364 free(list_copy);
1365
1366 if (!ret) {
1367 /*
1368 * Warn about nmi_watchdog if any parsed metrics had the
1369 * NO_NMI_WATCHDOG constraint.
1370 */
1371 metricgroup___watchdog_constraint_hint(NULL, true);
1372 /* No metrics. */
1373 if (count == 0)
1374 return -EINVAL;
1375 }
1376 return ret;
1377 }
1378
metricgroup__free_metrics(struct list_head * metric_list)1379 static void metricgroup__free_metrics(struct list_head *metric_list)
1380 {
1381 struct metric *m, *tmp;
1382
1383 list_for_each_entry_safe (m, tmp, metric_list, nd) {
1384 list_del_init(&m->nd);
1385 metric__free(m);
1386 }
1387 }
1388
1389 /**
1390 * find_tool_events - Search for the pressence of tool events in metric_list.
1391 * @metric_list: List to take metrics from.
1392 * @tool_events: Array of false values, indices corresponding to tool events set
1393 * to true if tool event is found.
1394 */
find_tool_events(const struct list_head * metric_list,bool tool_events[PERF_TOOL_MAX])1395 static void find_tool_events(const struct list_head *metric_list,
1396 bool tool_events[PERF_TOOL_MAX])
1397 {
1398 struct metric *m;
1399
1400 list_for_each_entry(m, metric_list, nd) {
1401 int i;
1402
1403 perf_tool_event__for_each_event(i) {
1404 struct expr_id_data *data;
1405
1406 if (!tool_events[i] &&
1407 !expr__get_id(m->pctx, perf_tool_event__to_str(i), &data))
1408 tool_events[i] = true;
1409 }
1410 }
1411 }
1412
1413 /**
1414 * build_combined_expr_ctx - Make an expr_parse_ctx with all has_constraint
1415 * metric IDs, as the IDs are held in a set,
1416 * duplicates will be removed.
1417 * @metric_list: List to take metrics from.
1418 * @combined: Out argument for result.
1419 */
build_combined_expr_ctx(const struct list_head * metric_list,struct expr_parse_ctx ** combined)1420 static int build_combined_expr_ctx(const struct list_head *metric_list,
1421 struct expr_parse_ctx **combined)
1422 {
1423 struct hashmap_entry *cur;
1424 size_t bkt;
1425 struct metric *m;
1426 char *dup;
1427 int ret;
1428
1429 *combined = expr__ctx_new();
1430 if (!*combined)
1431 return -ENOMEM;
1432
1433 list_for_each_entry(m, metric_list, nd) {
1434 if (m->has_constraint && !m->modifier) {
1435 hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1436 dup = strdup(cur->key);
1437 if (!dup) {
1438 ret = -ENOMEM;
1439 goto err_out;
1440 }
1441 ret = expr__add_id(*combined, dup);
1442 if (ret)
1443 goto err_out;
1444 }
1445 }
1446 }
1447 return 0;
1448 err_out:
1449 expr__ctx_free(*combined);
1450 *combined = NULL;
1451 return ret;
1452 }
1453
1454 /**
1455 * parse_ids - Build the event string for the ids and parse them creating an
1456 * evlist. The encoded metric_ids are decoded.
1457 * @metric_no_merge: is metric sharing explicitly disabled.
1458 * @fake_pmu: used when testing metrics not supported by the current CPU.
1459 * @ids: the event identifiers parsed from a metric.
1460 * @modifier: any modifiers added to the events.
1461 * @has_constraint: false if events should be placed in a weak group.
1462 * @tool_events: entries set true if the tool event of index could be present in
1463 * the overall list of metrics.
1464 * @out_evlist: the created list of events.
1465 */
parse_ids(bool metric_no_merge,struct perf_pmu * fake_pmu,struct expr_parse_ctx * ids,const char * modifier,bool has_constraint,const bool tool_events[PERF_TOOL_MAX],struct evlist ** out_evlist)1466 static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
1467 struct expr_parse_ctx *ids, const char *modifier,
1468 bool has_constraint, const bool tool_events[PERF_TOOL_MAX],
1469 struct evlist **out_evlist)
1470 {
1471 struct parse_events_error parse_error;
1472 struct evlist *parsed_evlist;
1473 struct strbuf events = STRBUF_INIT;
1474 int ret;
1475
1476 *out_evlist = NULL;
1477 if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1478 bool added_event = false;
1479 int i;
1480 /*
1481 * We may fail to share events between metrics because a tool
1482 * event isn't present in one metric. For example, a ratio of
1483 * cache misses doesn't need duration_time but the same events
1484 * may be used for a misses per second. Events without sharing
1485 * implies multiplexing, that is best avoided, so place
1486 * all tool events in every group.
1487 *
1488 * Also, there may be no ids/events in the expression parsing
1489 * context because of constant evaluation, e.g.:
1490 * event1 if #smt_on else 0
1491 * Add a tool event to avoid a parse error on an empty string.
1492 */
1493 perf_tool_event__for_each_event(i) {
1494 if (tool_events[i]) {
1495 char *tmp = strdup(perf_tool_event__to_str(i));
1496
1497 if (!tmp)
1498 return -ENOMEM;
1499 ids__insert(ids->ids, tmp);
1500 added_event = true;
1501 }
1502 }
1503 if (!added_event && hashmap__size(ids->ids) == 0) {
1504 char *tmp = strdup("duration_time");
1505
1506 if (!tmp)
1507 return -ENOMEM;
1508 ids__insert(ids->ids, tmp);
1509 }
1510 }
1511 ret = metricgroup__build_event_string(&events, ids, modifier,
1512 has_constraint);
1513 if (ret)
1514 return ret;
1515
1516 parsed_evlist = evlist__new();
1517 if (!parsed_evlist) {
1518 ret = -ENOMEM;
1519 goto err_out;
1520 }
1521 pr_debug("Parsing metric events '%s'\n", events.buf);
1522 parse_events_error__init(&parse_error);
1523 ret = __parse_events(parsed_evlist, events.buf, &parse_error, fake_pmu);
1524 if (ret) {
1525 parse_events_error__print(&parse_error, events.buf);
1526 goto err_out;
1527 }
1528 ret = decode_all_metric_ids(parsed_evlist, modifier);
1529 if (ret)
1530 goto err_out;
1531
1532 *out_evlist = parsed_evlist;
1533 parsed_evlist = NULL;
1534 err_out:
1535 parse_events_error__exit(&parse_error);
1536 evlist__delete(parsed_evlist);
1537 strbuf_release(&events);
1538 return ret;
1539 }
1540
parse_groups(struct evlist * perf_evlist,const char * str,bool metric_no_group,bool metric_no_merge,const char * user_requested_cpu_list,bool system_wide,struct perf_pmu * fake_pmu,struct rblist * metric_events_list,const struct pmu_events_table * table)1541 static int parse_groups(struct evlist *perf_evlist, const char *str,
1542 bool metric_no_group,
1543 bool metric_no_merge,
1544 const char *user_requested_cpu_list,
1545 bool system_wide,
1546 struct perf_pmu *fake_pmu,
1547 struct rblist *metric_events_list,
1548 const struct pmu_events_table *table)
1549 {
1550 struct evlist *combined_evlist = NULL;
1551 LIST_HEAD(metric_list);
1552 struct metric *m;
1553 bool tool_events[PERF_TOOL_MAX] = {false};
1554 int ret;
1555
1556 if (metric_events_list->nr_entries == 0)
1557 metricgroup__rblist_init(metric_events_list);
1558 ret = metricgroup__add_metric_list(str, metric_no_group,
1559 user_requested_cpu_list,
1560 system_wide, &metric_list, table);
1561 if (ret)
1562 goto out;
1563
1564 /* Sort metrics from largest to smallest. */
1565 list_sort(NULL, &metric_list, metric_list_cmp);
1566
1567 if (!metric_no_merge) {
1568 struct expr_parse_ctx *combined = NULL;
1569
1570 find_tool_events(&metric_list, tool_events);
1571
1572 ret = build_combined_expr_ctx(&metric_list, &combined);
1573
1574 if (!ret && combined && hashmap__size(combined->ids)) {
1575 ret = parse_ids(metric_no_merge, fake_pmu, combined,
1576 /*modifier=*/NULL,
1577 /*has_constraint=*/true,
1578 tool_events,
1579 &combined_evlist);
1580 }
1581 if (combined)
1582 expr__ctx_free(combined);
1583
1584 if (ret)
1585 goto out;
1586 }
1587
1588 list_for_each_entry(m, &metric_list, nd) {
1589 struct metric_event *me;
1590 struct evsel **metric_events;
1591 struct evlist *metric_evlist = NULL;
1592 struct metric *n;
1593 struct metric_expr *expr;
1594
1595 if (combined_evlist && m->has_constraint) {
1596 metric_evlist = combined_evlist;
1597 } else if (!metric_no_merge) {
1598 /*
1599 * See if the IDs for this metric are a subset of an
1600 * earlier metric.
1601 */
1602 list_for_each_entry(n, &metric_list, nd) {
1603 if (m == n)
1604 break;
1605
1606 if (n->evlist == NULL)
1607 continue;
1608
1609 if ((!m->modifier && n->modifier) ||
1610 (m->modifier && !n->modifier) ||
1611 (m->modifier && n->modifier &&
1612 strcmp(m->modifier, n->modifier)))
1613 continue;
1614
1615 if (expr__subset_of_ids(n->pctx, m->pctx)) {
1616 pr_debug("Events in '%s' fully contained within '%s'\n",
1617 m->metric_name, n->metric_name);
1618 metric_evlist = n->evlist;
1619 break;
1620 }
1621
1622 }
1623 }
1624 if (!metric_evlist) {
1625 ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1626 m->has_constraint, tool_events, &m->evlist);
1627 if (ret)
1628 goto out;
1629
1630 metric_evlist = m->evlist;
1631 }
1632 ret = setup_metric_events(m->pctx->ids, metric_evlist, &metric_events);
1633 if (ret) {
1634 pr_debug("Cannot resolve IDs for %s: %s\n",
1635 m->metric_name, m->metric_expr);
1636 goto out;
1637 }
1638
1639 me = metricgroup__lookup(metric_events_list, metric_events[0], true);
1640
1641 expr = malloc(sizeof(struct metric_expr));
1642 if (!expr) {
1643 ret = -ENOMEM;
1644 free(metric_events);
1645 goto out;
1646 }
1647
1648 expr->metric_refs = m->metric_refs;
1649 m->metric_refs = NULL;
1650 expr->metric_expr = m->metric_expr;
1651 if (m->modifier) {
1652 char *tmp;
1653
1654 if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1655 expr->metric_name = NULL;
1656 else
1657 expr->metric_name = tmp;
1658 } else
1659 expr->metric_name = strdup(m->metric_name);
1660
1661 if (!expr->metric_name) {
1662 ret = -ENOMEM;
1663 free(metric_events);
1664 goto out;
1665 }
1666 expr->metric_unit = m->metric_unit;
1667 expr->metric_events = metric_events;
1668 expr->runtime = m->pctx->sctx.runtime;
1669 list_add(&expr->nd, &me->head);
1670 }
1671
1672
1673 if (combined_evlist) {
1674 evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1675 evlist__delete(combined_evlist);
1676 }
1677
1678 list_for_each_entry(m, &metric_list, nd) {
1679 if (m->evlist)
1680 evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1681 }
1682
1683 out:
1684 metricgroup__free_metrics(&metric_list);
1685 return ret;
1686 }
1687
metricgroup__parse_groups(struct evlist * perf_evlist,const char * str,bool metric_no_group,bool metric_no_merge,const char * user_requested_cpu_list,bool system_wide,struct rblist * metric_events)1688 int metricgroup__parse_groups(struct evlist *perf_evlist,
1689 const char *str,
1690 bool metric_no_group,
1691 bool metric_no_merge,
1692 const char *user_requested_cpu_list,
1693 bool system_wide,
1694 struct rblist *metric_events)
1695 {
1696 const struct pmu_events_table *table = pmu_events_table__find();
1697
1698 if (!table)
1699 return -EINVAL;
1700
1701 return parse_groups(perf_evlist, str, metric_no_group, metric_no_merge,
1702 user_requested_cpu_list, system_wide,
1703 /*fake_pmu=*/NULL, metric_events, table);
1704 }
1705
metricgroup__parse_groups_test(struct evlist * evlist,const struct pmu_events_table * table,const char * str,bool metric_no_group,bool metric_no_merge,struct rblist * metric_events)1706 int metricgroup__parse_groups_test(struct evlist *evlist,
1707 const struct pmu_events_table *table,
1708 const char *str,
1709 bool metric_no_group,
1710 bool metric_no_merge,
1711 struct rblist *metric_events)
1712 {
1713 return parse_groups(evlist, str, metric_no_group, metric_no_merge,
1714 /*user_requested_cpu_list=*/NULL,
1715 /*system_wide=*/false,
1716 &perf_pmu__fake, metric_events, table);
1717 }
1718
metricgroup__has_metric_callback(const struct pmu_event * pe,const struct pmu_events_table * table __maybe_unused,void * vdata)1719 static int metricgroup__has_metric_callback(const struct pmu_event *pe,
1720 const struct pmu_events_table *table __maybe_unused,
1721 void *vdata)
1722 {
1723 const char *metric = vdata;
1724
1725 if (!pe->metric_expr)
1726 return 0;
1727
1728 if (match_metric(pe->metric_name, metric))
1729 return 1;
1730
1731 return 0;
1732 }
1733
metricgroup__has_metric(const char * metric)1734 bool metricgroup__has_metric(const char *metric)
1735 {
1736 const struct pmu_events_table *table = pmu_events_table__find();
1737
1738 if (!table)
1739 return false;
1740
1741 return pmu_events_table_for_each_event(table, metricgroup__has_metric_callback,
1742 (void *)metric) ? true : false;
1743 }
1744
metricgroup__copy_metric_events(struct evlist * evlist,struct cgroup * cgrp,struct rblist * new_metric_events,struct rblist * old_metric_events)1745 int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1746 struct rblist *new_metric_events,
1747 struct rblist *old_metric_events)
1748 {
1749 unsigned int i;
1750
1751 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1752 struct rb_node *nd;
1753 struct metric_event *old_me, *new_me;
1754 struct metric_expr *old_expr, *new_expr;
1755 struct evsel *evsel;
1756 size_t alloc_size;
1757 int idx, nr;
1758
1759 nd = rblist__entry(old_metric_events, i);
1760 old_me = container_of(nd, struct metric_event, nd);
1761
1762 evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1763 if (!evsel)
1764 return -EINVAL;
1765 new_me = metricgroup__lookup(new_metric_events, evsel, true);
1766 if (!new_me)
1767 return -ENOMEM;
1768
1769 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1770 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1771
1772 list_for_each_entry(old_expr, &old_me->head, nd) {
1773 new_expr = malloc(sizeof(*new_expr));
1774 if (!new_expr)
1775 return -ENOMEM;
1776
1777 new_expr->metric_expr = old_expr->metric_expr;
1778 new_expr->metric_name = strdup(old_expr->metric_name);
1779 if (!new_expr->metric_name)
1780 return -ENOMEM;
1781
1782 new_expr->metric_unit = old_expr->metric_unit;
1783 new_expr->runtime = old_expr->runtime;
1784
1785 if (old_expr->metric_refs) {
1786 /* calculate number of metric_events */
1787 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1788 continue;
1789 alloc_size = sizeof(*new_expr->metric_refs);
1790 new_expr->metric_refs = calloc(nr + 1, alloc_size);
1791 if (!new_expr->metric_refs) {
1792 free(new_expr);
1793 return -ENOMEM;
1794 }
1795
1796 memcpy(new_expr->metric_refs, old_expr->metric_refs,
1797 nr * alloc_size);
1798 } else {
1799 new_expr->metric_refs = NULL;
1800 }
1801
1802 /* calculate number of metric_events */
1803 for (nr = 0; old_expr->metric_events[nr]; nr++)
1804 continue;
1805 alloc_size = sizeof(*new_expr->metric_events);
1806 new_expr->metric_events = calloc(nr + 1, alloc_size);
1807 if (!new_expr->metric_events) {
1808 free(new_expr->metric_refs);
1809 free(new_expr);
1810 return -ENOMEM;
1811 }
1812
1813 /* copy evsel in the same position */
1814 for (idx = 0; idx < nr; idx++) {
1815 evsel = old_expr->metric_events[idx];
1816 evsel = evlist__find_evsel(evlist, evsel->core.idx);
1817 if (evsel == NULL) {
1818 free(new_expr->metric_events);
1819 free(new_expr->metric_refs);
1820 free(new_expr);
1821 return -EINVAL;
1822 }
1823 new_expr->metric_events[idx] = evsel;
1824 }
1825
1826 list_add(&new_expr->nd, &new_me->head);
1827 }
1828 }
1829 return 0;
1830 }
1831