1 // SPDX-License-Identifier: GPL-2.0
2 #include <api/fs/fs.h>
3 #include "cpumap.h"
4 #include "debug.h"
5 #include "event.h"
6 #include <assert.h>
7 #include <dirent.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <linux/bitmap.h>
11 #include "asm/bug.h"
12
13 #include <linux/ctype.h>
14 #include <linux/zalloc.h>
15
16 static struct perf_cpu max_cpu_num;
17 static struct perf_cpu max_present_cpu_num;
18 static int max_node_num;
19 /**
20 * The numa node X as read from /sys/devices/system/node/nodeX indexed by the
21 * CPU number.
22 */
23 static int *cpunode_map;
24
cpu_map__from_entries(struct cpu_map_entries * cpus)25 static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
26 {
27 struct perf_cpu_map *map;
28
29 map = perf_cpu_map__empty_new(cpus->nr);
30 if (map) {
31 unsigned i;
32
33 for (i = 0; i < cpus->nr; i++) {
34 /*
35 * Special treatment for -1, which is not real cpu number,
36 * and we need to use (int) -1 to initialize map[i],
37 * otherwise it would become 65535.
38 */
39 if (cpus->cpu[i] == (u16) -1)
40 map->map[i].cpu = -1;
41 else
42 map->map[i].cpu = (int) cpus->cpu[i];
43 }
44 }
45
46 return map;
47 }
48
cpu_map__from_mask(struct perf_record_record_cpu_map * mask)49 static struct perf_cpu_map *cpu_map__from_mask(struct perf_record_record_cpu_map *mask)
50 {
51 struct perf_cpu_map *map;
52 int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
53
54 nr = bitmap_weight(mask->mask, nbits);
55
56 map = perf_cpu_map__empty_new(nr);
57 if (map) {
58 int cpu, i = 0;
59
60 for_each_set_bit(cpu, mask->mask, nbits)
61 map->map[i++].cpu = cpu;
62 }
63 return map;
64
65 }
66
cpu_map__new_data(struct perf_record_cpu_map_data * data)67 struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data)
68 {
69 if (data->type == PERF_CPU_MAP__CPUS)
70 return cpu_map__from_entries((struct cpu_map_entries *)data->data);
71 else
72 return cpu_map__from_mask((struct perf_record_record_cpu_map *)data->data);
73 }
74
cpu_map__fprintf(struct perf_cpu_map * map,FILE * fp)75 size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
76 {
77 #define BUFSIZE 1024
78 char buf[BUFSIZE];
79
80 cpu_map__snprint(map, buf, sizeof(buf));
81 return fprintf(fp, "%s\n", buf);
82 #undef BUFSIZE
83 }
84
perf_cpu_map__empty_new(int nr)85 struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
86 {
87 struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
88
89 if (cpus != NULL) {
90 int i;
91
92 cpus->nr = nr;
93 for (i = 0; i < nr; i++)
94 cpus->map[i].cpu = -1;
95
96 refcount_set(&cpus->refcnt, 1);
97 }
98
99 return cpus;
100 }
101
cpu_aggr_map__empty_new(int nr)102 struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr)
103 {
104 struct cpu_aggr_map *cpus = malloc(sizeof(*cpus) + sizeof(struct aggr_cpu_id) * nr);
105
106 if (cpus != NULL) {
107 int i;
108
109 cpus->nr = nr;
110 for (i = 0; i < nr; i++)
111 cpus->map[i] = aggr_cpu_id__empty();
112
113 refcount_set(&cpus->refcnt, 1);
114 }
115
116 return cpus;
117 }
118
cpu__get_topology_int(int cpu,const char * name,int * value)119 static int cpu__get_topology_int(int cpu, const char *name, int *value)
120 {
121 char path[PATH_MAX];
122
123 snprintf(path, PATH_MAX,
124 "devices/system/cpu/cpu%d/topology/%s", cpu, name);
125
126 return sysfs__read_int(path, value);
127 }
128
cpu__get_socket_id(struct perf_cpu cpu)129 int cpu__get_socket_id(struct perf_cpu cpu)
130 {
131 int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value);
132 return ret ?: value;
133 }
134
aggr_cpu_id__socket(struct perf_cpu cpu,void * data __maybe_unused)135 struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused)
136 {
137 struct aggr_cpu_id id = aggr_cpu_id__empty();
138
139 id.socket = cpu__get_socket_id(cpu);
140 return id;
141 }
142
aggr_cpu_id__cmp(const void * a_pointer,const void * b_pointer)143 static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer)
144 {
145 struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer;
146 struct aggr_cpu_id *b = (struct aggr_cpu_id *)b_pointer;
147
148 if (a->node != b->node)
149 return a->node - b->node;
150 else if (a->socket != b->socket)
151 return a->socket - b->socket;
152 else if (a->die != b->die)
153 return a->die - b->die;
154 else if (a->core != b->core)
155 return a->core - b->core;
156 else
157 return a->thread - b->thread;
158 }
159
cpu_aggr_map__new(const struct perf_cpu_map * cpus,aggr_cpu_id_get_t get_id,void * data)160 struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
161 aggr_cpu_id_get_t get_id,
162 void *data)
163 {
164 int idx;
165 struct perf_cpu cpu;
166 struct cpu_aggr_map *c = cpu_aggr_map__empty_new(cpus->nr);
167
168 if (!c)
169 return NULL;
170
171 /* Reset size as it may only be partially filled */
172 c->nr = 0;
173
174 perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
175 bool duplicate = false;
176 struct aggr_cpu_id cpu_id = get_id(cpu, data);
177
178 for (int j = 0; j < c->nr; j++) {
179 if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) {
180 duplicate = true;
181 break;
182 }
183 }
184 if (!duplicate) {
185 c->map[c->nr] = cpu_id;
186 c->nr++;
187 }
188 }
189 /* Trim. */
190 if (c->nr != cpus->nr) {
191 struct cpu_aggr_map *trimmed_c =
192 realloc(c,
193 sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
194
195 if (trimmed_c)
196 c = trimmed_c;
197 }
198 /* ensure we process id in increasing order */
199 qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp);
200
201 return c;
202
203 }
204
cpu__get_die_id(struct perf_cpu cpu)205 int cpu__get_die_id(struct perf_cpu cpu)
206 {
207 int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value);
208
209 return ret ?: value;
210 }
211
aggr_cpu_id__die(struct perf_cpu cpu,void * data)212 struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data)
213 {
214 struct aggr_cpu_id id;
215 int die;
216
217 die = cpu__get_die_id(cpu);
218 /* There is no die_id on legacy system. */
219 if (die == -1)
220 die = 0;
221
222 /*
223 * die_id is relative to socket, so start
224 * with the socket ID and then add die to
225 * make a unique ID.
226 */
227 id = aggr_cpu_id__socket(cpu, data);
228 if (aggr_cpu_id__is_empty(&id))
229 return id;
230
231 id.die = die;
232 return id;
233 }
234
cpu__get_core_id(struct perf_cpu cpu)235 int cpu__get_core_id(struct perf_cpu cpu)
236 {
237 int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value);
238 return ret ?: value;
239 }
240
aggr_cpu_id__core(struct perf_cpu cpu,void * data)241 struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data)
242 {
243 struct aggr_cpu_id id;
244 int core = cpu__get_core_id(cpu);
245
246 /* aggr_cpu_id__die returns a struct with socket and die set. */
247 id = aggr_cpu_id__die(cpu, data);
248 if (aggr_cpu_id__is_empty(&id))
249 return id;
250
251 /*
252 * core_id is relative to socket and die, we need a global id.
253 * So we combine the result from cpu_map__get_die with the core id
254 */
255 id.core = core;
256 return id;
257
258 }
259
aggr_cpu_id__cpu(struct perf_cpu cpu,void * data)260 struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data)
261 {
262 struct aggr_cpu_id id;
263
264 /* aggr_cpu_id__core returns a struct with socket, die and core set. */
265 id = aggr_cpu_id__core(cpu, data);
266 if (aggr_cpu_id__is_empty(&id))
267 return id;
268
269 id.cpu = cpu;
270 return id;
271
272 }
273
aggr_cpu_id__node(struct perf_cpu cpu,void * data __maybe_unused)274 struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused)
275 {
276 struct aggr_cpu_id id = aggr_cpu_id__empty();
277
278 id.node = cpu__get_node(cpu);
279 return id;
280 }
281
282 /* setup simple routines to easily access node numbers given a cpu number */
get_max_num(char * path,int * max)283 static int get_max_num(char *path, int *max)
284 {
285 size_t num;
286 char *buf;
287 int err = 0;
288
289 if (filename__read_str(path, &buf, &num))
290 return -1;
291
292 buf[num] = '\0';
293
294 /* start on the right, to find highest node num */
295 while (--num) {
296 if ((buf[num] == ',') || (buf[num] == '-')) {
297 num++;
298 break;
299 }
300 }
301 if (sscanf(&buf[num], "%d", max) < 1) {
302 err = -1;
303 goto out;
304 }
305
306 /* convert from 0-based to 1-based */
307 (*max)++;
308
309 out:
310 free(buf);
311 return err;
312 }
313
314 /* Determine highest possible cpu in the system for sparse allocation */
set_max_cpu_num(void)315 static void set_max_cpu_num(void)
316 {
317 const char *mnt;
318 char path[PATH_MAX];
319 int ret = -1;
320
321 /* set up default */
322 max_cpu_num.cpu = 4096;
323 max_present_cpu_num.cpu = 4096;
324
325 mnt = sysfs__mountpoint();
326 if (!mnt)
327 goto out;
328
329 /* get the highest possible cpu number for a sparse allocation */
330 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
331 if (ret >= PATH_MAX) {
332 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
333 goto out;
334 }
335
336 ret = get_max_num(path, &max_cpu_num.cpu);
337 if (ret)
338 goto out;
339
340 /* get the highest present cpu number for a sparse allocation */
341 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
342 if (ret >= PATH_MAX) {
343 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
344 goto out;
345 }
346
347 ret = get_max_num(path, &max_present_cpu_num.cpu);
348
349 out:
350 if (ret)
351 pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu);
352 }
353
354 /* Determine highest possible node in the system for sparse allocation */
set_max_node_num(void)355 static void set_max_node_num(void)
356 {
357 const char *mnt;
358 char path[PATH_MAX];
359 int ret = -1;
360
361 /* set up default */
362 max_node_num = 8;
363
364 mnt = sysfs__mountpoint();
365 if (!mnt)
366 goto out;
367
368 /* get the highest possible cpu number for a sparse allocation */
369 ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
370 if (ret >= PATH_MAX) {
371 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
372 goto out;
373 }
374
375 ret = get_max_num(path, &max_node_num);
376
377 out:
378 if (ret)
379 pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
380 }
381
cpu__max_node(void)382 int cpu__max_node(void)
383 {
384 if (unlikely(!max_node_num))
385 set_max_node_num();
386
387 return max_node_num;
388 }
389
cpu__max_cpu(void)390 struct perf_cpu cpu__max_cpu(void)
391 {
392 if (unlikely(!max_cpu_num.cpu))
393 set_max_cpu_num();
394
395 return max_cpu_num;
396 }
397
cpu__max_present_cpu(void)398 struct perf_cpu cpu__max_present_cpu(void)
399 {
400 if (unlikely(!max_present_cpu_num.cpu))
401 set_max_cpu_num();
402
403 return max_present_cpu_num;
404 }
405
406
cpu__get_node(struct perf_cpu cpu)407 int cpu__get_node(struct perf_cpu cpu)
408 {
409 if (unlikely(cpunode_map == NULL)) {
410 pr_debug("cpu_map not initialized\n");
411 return -1;
412 }
413
414 return cpunode_map[cpu.cpu];
415 }
416
init_cpunode_map(void)417 static int init_cpunode_map(void)
418 {
419 int i;
420
421 set_max_cpu_num();
422 set_max_node_num();
423
424 cpunode_map = calloc(max_cpu_num.cpu, sizeof(int));
425 if (!cpunode_map) {
426 pr_err("%s: calloc failed\n", __func__);
427 return -1;
428 }
429
430 for (i = 0; i < max_cpu_num.cpu; i++)
431 cpunode_map[i] = -1;
432
433 return 0;
434 }
435
cpu__setup_cpunode_map(void)436 int cpu__setup_cpunode_map(void)
437 {
438 struct dirent *dent1, *dent2;
439 DIR *dir1, *dir2;
440 unsigned int cpu, mem;
441 char buf[PATH_MAX];
442 char path[PATH_MAX];
443 const char *mnt;
444 int n;
445
446 /* initialize globals */
447 if (init_cpunode_map())
448 return -1;
449
450 mnt = sysfs__mountpoint();
451 if (!mnt)
452 return 0;
453
454 n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
455 if (n >= PATH_MAX) {
456 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
457 return -1;
458 }
459
460 dir1 = opendir(path);
461 if (!dir1)
462 return 0;
463
464 /* walk tree and setup map */
465 while ((dent1 = readdir(dir1)) != NULL) {
466 if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
467 continue;
468
469 n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
470 if (n >= PATH_MAX) {
471 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
472 continue;
473 }
474
475 dir2 = opendir(buf);
476 if (!dir2)
477 continue;
478 while ((dent2 = readdir(dir2)) != NULL) {
479 if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
480 continue;
481 cpunode_map[cpu] = mem;
482 }
483 closedir(dir2);
484 }
485 closedir(dir1);
486 return 0;
487 }
488
cpu_map__snprint(struct perf_cpu_map * map,char * buf,size_t size)489 size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
490 {
491 int i, start = -1;
492 bool first = true;
493 size_t ret = 0;
494
495 #define COMMA first ? "" : ","
496
497 for (i = 0; i < map->nr + 1; i++) {
498 struct perf_cpu cpu = { .cpu = INT_MAX };
499 bool last = i == map->nr;
500
501 if (!last)
502 cpu = map->map[i];
503
504 if (start == -1) {
505 start = i;
506 if (last) {
507 ret += snprintf(buf + ret, size - ret,
508 "%s%d", COMMA,
509 map->map[i].cpu);
510 }
511 } else if (((i - start) != (cpu.cpu - map->map[start].cpu)) || last) {
512 int end = i - 1;
513
514 if (start == end) {
515 ret += snprintf(buf + ret, size - ret,
516 "%s%d", COMMA,
517 map->map[start].cpu);
518 } else {
519 ret += snprintf(buf + ret, size - ret,
520 "%s%d-%d", COMMA,
521 map->map[start].cpu, map->map[end].cpu);
522 }
523 first = false;
524 start = i;
525 }
526 }
527
528 #undef COMMA
529
530 pr_debug2("cpumask list: %s\n", buf);
531 return ret;
532 }
533
hex_char(unsigned char val)534 static char hex_char(unsigned char val)
535 {
536 if (val < 10)
537 return val + '0';
538 if (val < 16)
539 return val - 10 + 'a';
540 return '?';
541 }
542
cpu_map__snprint_mask(struct perf_cpu_map * map,char * buf,size_t size)543 size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
544 {
545 int i, cpu;
546 char *ptr = buf;
547 unsigned char *bitmap;
548 struct perf_cpu last_cpu = perf_cpu_map__cpu(map, map->nr - 1);
549
550 if (buf == NULL)
551 return 0;
552
553 bitmap = zalloc(last_cpu.cpu / 8 + 1);
554 if (bitmap == NULL) {
555 buf[0] = '\0';
556 return 0;
557 }
558
559 for (i = 0; i < map->nr; i++) {
560 cpu = perf_cpu_map__cpu(map, i).cpu;
561 bitmap[cpu / 8] |= 1 << (cpu % 8);
562 }
563
564 for (cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
565 unsigned char bits = bitmap[cpu / 8];
566
567 if (cpu % 8)
568 bits >>= 4;
569 else
570 bits &= 0xf;
571
572 *ptr++ = hex_char(bits);
573 if ((cpu % 32) == 0 && cpu > 0)
574 *ptr++ = ',';
575 }
576 *ptr = '\0';
577 free(bitmap);
578
579 buf[size - 1] = '\0';
580 return ptr - buf;
581 }
582
cpu_map__online(void)583 const struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
584 {
585 static const struct perf_cpu_map *online = NULL;
586
587 if (!online)
588 online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
589
590 return online;
591 }
592
aggr_cpu_id__equal(const struct aggr_cpu_id * a,const struct aggr_cpu_id * b)593 bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b)
594 {
595 return a->thread == b->thread &&
596 a->node == b->node &&
597 a->socket == b->socket &&
598 a->die == b->die &&
599 a->core == b->core &&
600 a->cpu.cpu == b->cpu.cpu;
601 }
602
aggr_cpu_id__is_empty(const struct aggr_cpu_id * a)603 bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a)
604 {
605 return a->thread == -1 &&
606 a->node == -1 &&
607 a->socket == -1 &&
608 a->die == -1 &&
609 a->core == -1 &&
610 a->cpu.cpu == -1;
611 }
612
aggr_cpu_id__empty(void)613 struct aggr_cpu_id aggr_cpu_id__empty(void)
614 {
615 struct aggr_cpu_id ret = {
616 .thread = -1,
617 .node = -1,
618 .socket = -1,
619 .die = -1,
620 .core = -1,
621 .cpu = (struct perf_cpu){ .cpu = -1 },
622 };
623 return ret;
624 }
625