1 /*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9 #include <poll.h>
10 #include "cpumap.h"
11 #include "thread_map.h"
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "util.h"
15 #include "debug.h"
16
17 #include <sys/mman.h>
18
19 #include <linux/bitops.h>
20 #include <linux/hash.h>
21
22 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
23 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
24
perf_evlist__init(struct perf_evlist * evlist,struct cpu_map * cpus,struct thread_map * threads)25 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
26 struct thread_map *threads)
27 {
28 int i;
29
30 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
31 INIT_HLIST_HEAD(&evlist->heads[i]);
32 INIT_LIST_HEAD(&evlist->entries);
33 perf_evlist__set_maps(evlist, cpus, threads);
34 }
35
perf_evlist__new(struct cpu_map * cpus,struct thread_map * threads)36 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
37 struct thread_map *threads)
38 {
39 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
40
41 if (evlist != NULL)
42 perf_evlist__init(evlist, cpus, threads);
43
44 return evlist;
45 }
46
perf_evlist__purge(struct perf_evlist * evlist)47 static void perf_evlist__purge(struct perf_evlist *evlist)
48 {
49 struct perf_evsel *pos, *n;
50
51 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
52 list_del_init(&pos->node);
53 perf_evsel__delete(pos);
54 }
55
56 evlist->nr_entries = 0;
57 }
58
perf_evlist__exit(struct perf_evlist * evlist)59 void perf_evlist__exit(struct perf_evlist *evlist)
60 {
61 free(evlist->mmap);
62 free(evlist->pollfd);
63 evlist->mmap = NULL;
64 evlist->pollfd = NULL;
65 }
66
perf_evlist__delete(struct perf_evlist * evlist)67 void perf_evlist__delete(struct perf_evlist *evlist)
68 {
69 perf_evlist__purge(evlist);
70 perf_evlist__exit(evlist);
71 free(evlist);
72 }
73
perf_evlist__add(struct perf_evlist * evlist,struct perf_evsel * entry)74 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
75 {
76 list_add_tail(&entry->node, &evlist->entries);
77 ++evlist->nr_entries;
78 }
79
perf_evlist__add_default(struct perf_evlist * evlist)80 int perf_evlist__add_default(struct perf_evlist *evlist)
81 {
82 struct perf_event_attr attr = {
83 .type = PERF_TYPE_HARDWARE,
84 .config = PERF_COUNT_HW_CPU_CYCLES,
85 };
86 struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
87
88 if (evsel == NULL)
89 return -ENOMEM;
90
91 perf_evlist__add(evlist, evsel);
92 return 0;
93 }
94
perf_evlist__alloc_pollfd(struct perf_evlist * evlist)95 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
96 {
97 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
98 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
99 return evlist->pollfd != NULL ? 0 : -ENOMEM;
100 }
101
perf_evlist__add_pollfd(struct perf_evlist * evlist,int fd)102 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
103 {
104 fcntl(fd, F_SETFL, O_NONBLOCK);
105 evlist->pollfd[evlist->nr_fds].fd = fd;
106 evlist->pollfd[evlist->nr_fds].events = POLLIN;
107 evlist->nr_fds++;
108 }
109
perf_evlist__id_hash(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,u64 id)110 static void perf_evlist__id_hash(struct perf_evlist *evlist,
111 struct perf_evsel *evsel,
112 int cpu, int thread, u64 id)
113 {
114 int hash;
115 struct perf_sample_id *sid = SID(evsel, cpu, thread);
116
117 sid->id = id;
118 sid->evsel = evsel;
119 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
120 hlist_add_head(&sid->node, &evlist->heads[hash]);
121 }
122
perf_evlist__id_add(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,u64 id)123 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
124 int cpu, int thread, u64 id)
125 {
126 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
127 evsel->id[evsel->ids++] = id;
128 }
129
perf_evlist__id_add_fd(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,int fd)130 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
131 struct perf_evsel *evsel,
132 int cpu, int thread, int fd)
133 {
134 u64 read_data[4] = { 0, };
135 int id_idx = 1; /* The first entry is the counter value */
136
137 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
138 read(fd, &read_data, sizeof(read_data)) == -1)
139 return -1;
140
141 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
142 ++id_idx;
143 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
144 ++id_idx;
145
146 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
147 return 0;
148 }
149
perf_evlist__id2evsel(struct perf_evlist * evlist,u64 id)150 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
151 {
152 struct hlist_head *head;
153 struct hlist_node *pos;
154 struct perf_sample_id *sid;
155 int hash;
156
157 if (evlist->nr_entries == 1)
158 return list_entry(evlist->entries.next, struct perf_evsel, node);
159
160 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
161 head = &evlist->heads[hash];
162
163 hlist_for_each_entry(sid, pos, head, node)
164 if (sid->id == id)
165 return sid->evsel;
166 return NULL;
167 }
168
perf_evlist__mmap_read(struct perf_evlist * evlist,int idx)169 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
170 {
171 /* XXX Move this to perf.c, making it generally available */
172 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
173 struct perf_mmap *md = &evlist->mmap[idx];
174 unsigned int head = perf_mmap__read_head(md);
175 unsigned int old = md->prev;
176 unsigned char *data = md->base + page_size;
177 union perf_event *event = NULL;
178
179 if (evlist->overwrite) {
180 /*
181 * If we're further behind than half the buffer, there's a chance
182 * the writer will bite our tail and mess up the samples under us.
183 *
184 * If we somehow ended up ahead of the head, we got messed up.
185 *
186 * In either case, truncate and restart at head.
187 */
188 int diff = head - old;
189 if (diff > md->mask / 2 || diff < 0) {
190 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
191
192 /*
193 * head points to a known good entry, start there.
194 */
195 old = head;
196 }
197 }
198
199 if (old != head) {
200 size_t size;
201
202 event = (union perf_event *)&data[old & md->mask];
203 size = event->header.size;
204
205 /*
206 * Event straddles the mmap boundary -- header should always
207 * be inside due to u64 alignment of output.
208 */
209 if ((old & md->mask) + size != ((old + size) & md->mask)) {
210 unsigned int offset = old;
211 unsigned int len = min(sizeof(*event), size), cpy;
212 void *dst = &evlist->event_copy;
213
214 do {
215 cpy = min(md->mask + 1 - (offset & md->mask), len);
216 memcpy(dst, &data[offset & md->mask], cpy);
217 offset += cpy;
218 dst += cpy;
219 len -= cpy;
220 } while (len);
221
222 event = &evlist->event_copy;
223 }
224
225 old += size;
226 }
227
228 md->prev = old;
229
230 if (!evlist->overwrite)
231 perf_mmap__write_tail(md, old);
232
233 return event;
234 }
235
perf_evlist__munmap(struct perf_evlist * evlist)236 void perf_evlist__munmap(struct perf_evlist *evlist)
237 {
238 int i;
239
240 for (i = 0; i < evlist->nr_mmaps; i++) {
241 if (evlist->mmap[i].base != NULL) {
242 munmap(evlist->mmap[i].base, evlist->mmap_len);
243 evlist->mmap[i].base = NULL;
244 }
245 }
246
247 free(evlist->mmap);
248 evlist->mmap = NULL;
249 }
250
perf_evlist__alloc_mmap(struct perf_evlist * evlist)251 int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
252 {
253 evlist->nr_mmaps = evlist->cpus->nr;
254 if (evlist->cpus->map[0] == -1)
255 evlist->nr_mmaps = evlist->threads->nr;
256 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
257 return evlist->mmap != NULL ? 0 : -ENOMEM;
258 }
259
__perf_evlist__mmap(struct perf_evlist * evlist,struct perf_evsel * evsel,int idx,int prot,int mask,int fd)260 static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
261 int idx, int prot, int mask, int fd)
262 {
263 evlist->mmap[idx].prev = 0;
264 evlist->mmap[idx].mask = mask;
265 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
266 MAP_SHARED, fd, 0);
267 if (evlist->mmap[idx].base == MAP_FAILED) {
268 if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit)
269 ui__warning("Inherit is not allowed on per-task "
270 "events using mmap.\n");
271 return -1;
272 }
273
274 perf_evlist__add_pollfd(evlist, fd);
275 return 0;
276 }
277
perf_evlist__mmap_per_cpu(struct perf_evlist * evlist,int prot,int mask)278 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
279 {
280 struct perf_evsel *evsel;
281 int cpu, thread;
282
283 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
284 int output = -1;
285
286 for (thread = 0; thread < evlist->threads->nr; thread++) {
287 list_for_each_entry(evsel, &evlist->entries, node) {
288 int fd = FD(evsel, cpu, thread);
289
290 if (output == -1) {
291 output = fd;
292 if (__perf_evlist__mmap(evlist, evsel, cpu,
293 prot, mask, output) < 0)
294 goto out_unmap;
295 } else {
296 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
297 goto out_unmap;
298 }
299
300 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
301 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
302 goto out_unmap;
303 }
304 }
305 }
306
307 return 0;
308
309 out_unmap:
310 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
311 if (evlist->mmap[cpu].base != NULL) {
312 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
313 evlist->mmap[cpu].base = NULL;
314 }
315 }
316 return -1;
317 }
318
perf_evlist__mmap_per_thread(struct perf_evlist * evlist,int prot,int mask)319 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
320 {
321 struct perf_evsel *evsel;
322 int thread;
323
324 for (thread = 0; thread < evlist->threads->nr; thread++) {
325 int output = -1;
326
327 list_for_each_entry(evsel, &evlist->entries, node) {
328 int fd = FD(evsel, 0, thread);
329
330 if (output == -1) {
331 output = fd;
332 if (__perf_evlist__mmap(evlist, evsel, thread,
333 prot, mask, output) < 0)
334 goto out_unmap;
335 } else {
336 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
337 goto out_unmap;
338 }
339
340 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
341 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
342 goto out_unmap;
343 }
344 }
345
346 return 0;
347
348 out_unmap:
349 for (thread = 0; thread < evlist->threads->nr; thread++) {
350 if (evlist->mmap[thread].base != NULL) {
351 munmap(evlist->mmap[thread].base, evlist->mmap_len);
352 evlist->mmap[thread].base = NULL;
353 }
354 }
355 return -1;
356 }
357
358 /** perf_evlist__mmap - Create per cpu maps to receive events
359 *
360 * @evlist - list of events
361 * @pages - map length in pages
362 * @overwrite - overwrite older events?
363 *
364 * If overwrite is false the user needs to signal event consuption using:
365 *
366 * struct perf_mmap *m = &evlist->mmap[cpu];
367 * unsigned int head = perf_mmap__read_head(m);
368 *
369 * perf_mmap__write_tail(m, head)
370 *
371 * Using perf_evlist__read_on_cpu does this automatically.
372 */
perf_evlist__mmap(struct perf_evlist * evlist,int pages,bool overwrite)373 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
374 {
375 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
376 int mask = pages * page_size - 1;
377 struct perf_evsel *evsel;
378 const struct cpu_map *cpus = evlist->cpus;
379 const struct thread_map *threads = evlist->threads;
380 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
381
382 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
383 return -ENOMEM;
384
385 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
386 return -ENOMEM;
387
388 evlist->overwrite = overwrite;
389 evlist->mmap_len = (pages + 1) * page_size;
390
391 list_for_each_entry(evsel, &evlist->entries, node) {
392 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
393 evsel->sample_id == NULL &&
394 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
395 return -ENOMEM;
396 }
397
398 if (evlist->cpus->map[0] == -1)
399 return perf_evlist__mmap_per_thread(evlist, prot, mask);
400
401 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
402 }
403
perf_evlist__create_maps(struct perf_evlist * evlist,pid_t target_pid,pid_t target_tid,const char * cpu_list)404 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
405 pid_t target_tid, const char *cpu_list)
406 {
407 evlist->threads = thread_map__new(target_pid, target_tid);
408
409 if (evlist->threads == NULL)
410 return -1;
411
412 if (cpu_list == NULL && target_tid != -1)
413 evlist->cpus = cpu_map__dummy_new();
414 else
415 evlist->cpus = cpu_map__new(cpu_list);
416
417 if (evlist->cpus == NULL)
418 goto out_delete_threads;
419
420 return 0;
421
422 out_delete_threads:
423 thread_map__delete(evlist->threads);
424 return -1;
425 }
426
perf_evlist__delete_maps(struct perf_evlist * evlist)427 void perf_evlist__delete_maps(struct perf_evlist *evlist)
428 {
429 cpu_map__delete(evlist->cpus);
430 thread_map__delete(evlist->threads);
431 evlist->cpus = NULL;
432 evlist->threads = NULL;
433 }
434
perf_evlist__set_filters(struct perf_evlist * evlist)435 int perf_evlist__set_filters(struct perf_evlist *evlist)
436 {
437 const struct thread_map *threads = evlist->threads;
438 const struct cpu_map *cpus = evlist->cpus;
439 struct perf_evsel *evsel;
440 char *filter;
441 int thread;
442 int cpu;
443 int err;
444 int fd;
445
446 list_for_each_entry(evsel, &evlist->entries, node) {
447 filter = evsel->filter;
448 if (!filter)
449 continue;
450 for (cpu = 0; cpu < cpus->nr; cpu++) {
451 for (thread = 0; thread < threads->nr; thread++) {
452 fd = FD(evsel, cpu, thread);
453 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
454 if (err)
455 return err;
456 }
457 }
458 }
459
460 return 0;
461 }
462