1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include "debugfs.h"
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "evlist.h"
15 #include "evsel.h"
16 #include <unistd.h>
17 
18 #include "parse-events.h"
19 
20 #include <sys/mman.h>
21 
22 #include <linux/bitops.h>
23 #include <linux/hash.h>
24 
25 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
26 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
27 
perf_evlist__init(struct perf_evlist * evlist,struct cpu_map * cpus,struct thread_map * threads)28 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
29 		       struct thread_map *threads)
30 {
31 	int i;
32 
33 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
34 		INIT_HLIST_HEAD(&evlist->heads[i]);
35 	INIT_LIST_HEAD(&evlist->entries);
36 	perf_evlist__set_maps(evlist, cpus, threads);
37 	evlist->workload.pid = -1;
38 }
39 
perf_evlist__new(struct cpu_map * cpus,struct thread_map * threads)40 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
41 				     struct thread_map *threads)
42 {
43 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
44 
45 	if (evlist != NULL)
46 		perf_evlist__init(evlist, cpus, threads);
47 
48 	return evlist;
49 }
50 
perf_evlist__config_attrs(struct perf_evlist * evlist,struct perf_record_opts * opts)51 void perf_evlist__config_attrs(struct perf_evlist *evlist,
52 			       struct perf_record_opts *opts)
53 {
54 	struct perf_evsel *evsel, *first;
55 
56 	if (evlist->cpus->map[0] < 0)
57 		opts->no_inherit = true;
58 
59 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
60 
61 	list_for_each_entry(evsel, &evlist->entries, node) {
62 		perf_evsel__config(evsel, opts, first);
63 
64 		if (evlist->nr_entries > 1)
65 			evsel->attr.sample_type |= PERF_SAMPLE_ID;
66 	}
67 }
68 
perf_evlist__purge(struct perf_evlist * evlist)69 static void perf_evlist__purge(struct perf_evlist *evlist)
70 {
71 	struct perf_evsel *pos, *n;
72 
73 	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
74 		list_del_init(&pos->node);
75 		perf_evsel__delete(pos);
76 	}
77 
78 	evlist->nr_entries = 0;
79 }
80 
perf_evlist__exit(struct perf_evlist * evlist)81 void perf_evlist__exit(struct perf_evlist *evlist)
82 {
83 	free(evlist->mmap);
84 	free(evlist->pollfd);
85 	evlist->mmap = NULL;
86 	evlist->pollfd = NULL;
87 }
88 
perf_evlist__delete(struct perf_evlist * evlist)89 void perf_evlist__delete(struct perf_evlist *evlist)
90 {
91 	perf_evlist__purge(evlist);
92 	perf_evlist__exit(evlist);
93 	free(evlist);
94 }
95 
perf_evlist__add(struct perf_evlist * evlist,struct perf_evsel * entry)96 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
97 {
98 	list_add_tail(&entry->node, &evlist->entries);
99 	++evlist->nr_entries;
100 }
101 
perf_evlist__splice_list_tail(struct perf_evlist * evlist,struct list_head * list,int nr_entries)102 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
103 				   struct list_head *list,
104 				   int nr_entries)
105 {
106 	list_splice_tail(list, &evlist->entries);
107 	evlist->nr_entries += nr_entries;
108 }
109 
perf_evlist__add_default(struct perf_evlist * evlist)110 int perf_evlist__add_default(struct perf_evlist *evlist)
111 {
112 	struct perf_event_attr attr = {
113 		.type = PERF_TYPE_HARDWARE,
114 		.config = PERF_COUNT_HW_CPU_CYCLES,
115 	};
116 	struct perf_evsel *evsel;
117 
118 	event_attr_init(&attr);
119 
120 	evsel = perf_evsel__new(&attr, 0);
121 	if (evsel == NULL)
122 		goto error;
123 
124 	/* use strdup() because free(evsel) assumes name is allocated */
125 	evsel->name = strdup("cycles");
126 	if (!evsel->name)
127 		goto error_free;
128 
129 	perf_evlist__add(evlist, evsel);
130 	return 0;
131 error_free:
132 	perf_evsel__delete(evsel);
133 error:
134 	return -ENOMEM;
135 }
136 
perf_evlist__add_attrs(struct perf_evlist * evlist,struct perf_event_attr * attrs,size_t nr_attrs)137 int perf_evlist__add_attrs(struct perf_evlist *evlist,
138 			   struct perf_event_attr *attrs, size_t nr_attrs)
139 {
140 	struct perf_evsel *evsel, *n;
141 	LIST_HEAD(head);
142 	size_t i;
143 
144 	for (i = 0; i < nr_attrs; i++) {
145 		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
146 		if (evsel == NULL)
147 			goto out_delete_partial_list;
148 		list_add_tail(&evsel->node, &head);
149 	}
150 
151 	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
152 
153 	return 0;
154 
155 out_delete_partial_list:
156 	list_for_each_entry_safe(evsel, n, &head, node)
157 		perf_evsel__delete(evsel);
158 	return -1;
159 }
160 
trace_event__id(const char * evname)161 static int trace_event__id(const char *evname)
162 {
163 	char *filename, *colon;
164 	int err = -1, fd;
165 
166 	if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
167 		return -1;
168 
169 	colon = strrchr(filename, ':');
170 	if (colon != NULL)
171 		*colon = '/';
172 
173 	fd = open(filename, O_RDONLY);
174 	if (fd >= 0) {
175 		char id[16];
176 		if (read(fd, id, sizeof(id)) > 0)
177 			err = atoi(id);
178 		close(fd);
179 	}
180 
181 	free(filename);
182 	return err;
183 }
184 
perf_evlist__add_tracepoints(struct perf_evlist * evlist,const char * tracepoints[],size_t nr_tracepoints)185 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
186 				 const char *tracepoints[],
187 				 size_t nr_tracepoints)
188 {
189 	int err;
190 	size_t i;
191 	struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
192 
193 	if (attrs == NULL)
194 		return -1;
195 
196 	for (i = 0; i < nr_tracepoints; i++) {
197 		err = trace_event__id(tracepoints[i]);
198 
199 		if (err < 0)
200 			goto out_free_attrs;
201 
202 		attrs[i].type	       = PERF_TYPE_TRACEPOINT;
203 		attrs[i].config	       = err;
204 	        attrs[i].sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
205 					  PERF_SAMPLE_CPU);
206 		attrs[i].sample_period = 1;
207 	}
208 
209 	err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
210 out_free_attrs:
211 	free(attrs);
212 	return err;
213 }
214 
215 static struct perf_evsel *
perf_evlist__find_tracepoint_by_id(struct perf_evlist * evlist,int id)216 	perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
217 {
218 	struct perf_evsel *evsel;
219 
220 	list_for_each_entry(evsel, &evlist->entries, node) {
221 		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
222 		    (int)evsel->attr.config == id)
223 			return evsel;
224 	}
225 
226 	return NULL;
227 }
228 
perf_evlist__set_tracepoints_handlers(struct perf_evlist * evlist,const struct perf_evsel_str_handler * assocs,size_t nr_assocs)229 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
230 					  const struct perf_evsel_str_handler *assocs,
231 					  size_t nr_assocs)
232 {
233 	struct perf_evsel *evsel;
234 	int err;
235 	size_t i;
236 
237 	for (i = 0; i < nr_assocs; i++) {
238 		err = trace_event__id(assocs[i].name);
239 		if (err < 0)
240 			goto out;
241 
242 		evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
243 		if (evsel == NULL)
244 			continue;
245 
246 		err = -EEXIST;
247 		if (evsel->handler.func != NULL)
248 			goto out;
249 		evsel->handler.func = assocs[i].handler;
250 	}
251 
252 	err = 0;
253 out:
254 	return err;
255 }
256 
perf_evlist__disable(struct perf_evlist * evlist)257 void perf_evlist__disable(struct perf_evlist *evlist)
258 {
259 	int cpu, thread;
260 	struct perf_evsel *pos;
261 
262 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
263 		list_for_each_entry(pos, &evlist->entries, node) {
264 			for (thread = 0; thread < evlist->threads->nr; thread++)
265 				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
266 		}
267 	}
268 }
269 
perf_evlist__enable(struct perf_evlist * evlist)270 void perf_evlist__enable(struct perf_evlist *evlist)
271 {
272 	int cpu, thread;
273 	struct perf_evsel *pos;
274 
275 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
276 		list_for_each_entry(pos, &evlist->entries, node) {
277 			for (thread = 0; thread < evlist->threads->nr; thread++)
278 				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
279 		}
280 	}
281 }
282 
perf_evlist__alloc_pollfd(struct perf_evlist * evlist)283 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
284 {
285 	int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
286 	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
287 	return evlist->pollfd != NULL ? 0 : -ENOMEM;
288 }
289 
perf_evlist__add_pollfd(struct perf_evlist * evlist,int fd)290 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
291 {
292 	fcntl(fd, F_SETFL, O_NONBLOCK);
293 	evlist->pollfd[evlist->nr_fds].fd = fd;
294 	evlist->pollfd[evlist->nr_fds].events = POLLIN;
295 	evlist->nr_fds++;
296 }
297 
perf_evlist__id_hash(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,u64 id)298 static void perf_evlist__id_hash(struct perf_evlist *evlist,
299 				 struct perf_evsel *evsel,
300 				 int cpu, int thread, u64 id)
301 {
302 	int hash;
303 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
304 
305 	sid->id = id;
306 	sid->evsel = evsel;
307 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
308 	hlist_add_head(&sid->node, &evlist->heads[hash]);
309 }
310 
perf_evlist__id_add(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,u64 id)311 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
312 			 int cpu, int thread, u64 id)
313 {
314 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
315 	evsel->id[evsel->ids++] = id;
316 }
317 
perf_evlist__id_add_fd(struct perf_evlist * evlist,struct perf_evsel * evsel,int cpu,int thread,int fd)318 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
319 				  struct perf_evsel *evsel,
320 				  int cpu, int thread, int fd)
321 {
322 	u64 read_data[4] = { 0, };
323 	int id_idx = 1; /* The first entry is the counter value */
324 
325 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
326 	    read(fd, &read_data, sizeof(read_data)) == -1)
327 		return -1;
328 
329 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
330 		++id_idx;
331 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
332 		++id_idx;
333 
334 	perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
335 	return 0;
336 }
337 
perf_evlist__id2evsel(struct perf_evlist * evlist,u64 id)338 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
339 {
340 	struct hlist_head *head;
341 	struct hlist_node *pos;
342 	struct perf_sample_id *sid;
343 	int hash;
344 
345 	if (evlist->nr_entries == 1)
346 		return list_entry(evlist->entries.next, struct perf_evsel, node);
347 
348 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
349 	head = &evlist->heads[hash];
350 
351 	hlist_for_each_entry(sid, pos, head, node)
352 		if (sid->id == id)
353 			return sid->evsel;
354 
355 	if (!perf_evlist__sample_id_all(evlist))
356 		return list_entry(evlist->entries.next, struct perf_evsel, node);
357 
358 	return NULL;
359 }
360 
perf_evlist__mmap_read(struct perf_evlist * evlist,int idx)361 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
362 {
363 	/* XXX Move this to perf.c, making it generally available */
364 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
365 	struct perf_mmap *md = &evlist->mmap[idx];
366 	unsigned int head = perf_mmap__read_head(md);
367 	unsigned int old = md->prev;
368 	unsigned char *data = md->base + page_size;
369 	union perf_event *event = NULL;
370 
371 	if (evlist->overwrite) {
372 		/*
373 		 * If we're further behind than half the buffer, there's a chance
374 		 * the writer will bite our tail and mess up the samples under us.
375 		 *
376 		 * If we somehow ended up ahead of the head, we got messed up.
377 		 *
378 		 * In either case, truncate and restart at head.
379 		 */
380 		int diff = head - old;
381 		if (diff > md->mask / 2 || diff < 0) {
382 			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
383 
384 			/*
385 			 * head points to a known good entry, start there.
386 			 */
387 			old = head;
388 		}
389 	}
390 
391 	if (old != head) {
392 		size_t size;
393 
394 		event = (union perf_event *)&data[old & md->mask];
395 		size = event->header.size;
396 
397 		/*
398 		 * Event straddles the mmap boundary -- header should always
399 		 * be inside due to u64 alignment of output.
400 		 */
401 		if ((old & md->mask) + size != ((old + size) & md->mask)) {
402 			unsigned int offset = old;
403 			unsigned int len = min(sizeof(*event), size), cpy;
404 			void *dst = &evlist->event_copy;
405 
406 			do {
407 				cpy = min(md->mask + 1 - (offset & md->mask), len);
408 				memcpy(dst, &data[offset & md->mask], cpy);
409 				offset += cpy;
410 				dst += cpy;
411 				len -= cpy;
412 			} while (len);
413 
414 			event = &evlist->event_copy;
415 		}
416 
417 		old += size;
418 	}
419 
420 	md->prev = old;
421 
422 	if (!evlist->overwrite)
423 		perf_mmap__write_tail(md, old);
424 
425 	return event;
426 }
427 
perf_evlist__munmap(struct perf_evlist * evlist)428 void perf_evlist__munmap(struct perf_evlist *evlist)
429 {
430 	int i;
431 
432 	for (i = 0; i < evlist->nr_mmaps; i++) {
433 		if (evlist->mmap[i].base != NULL) {
434 			munmap(evlist->mmap[i].base, evlist->mmap_len);
435 			evlist->mmap[i].base = NULL;
436 		}
437 	}
438 
439 	free(evlist->mmap);
440 	evlist->mmap = NULL;
441 }
442 
perf_evlist__alloc_mmap(struct perf_evlist * evlist)443 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
444 {
445 	evlist->nr_mmaps = evlist->cpus->nr;
446 	if (evlist->cpus->map[0] == -1)
447 		evlist->nr_mmaps = evlist->threads->nr;
448 	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
449 	return evlist->mmap != NULL ? 0 : -ENOMEM;
450 }
451 
__perf_evlist__mmap(struct perf_evlist * evlist,int idx,int prot,int mask,int fd)452 static int __perf_evlist__mmap(struct perf_evlist *evlist,
453 			       int idx, int prot, int mask, int fd)
454 {
455 	evlist->mmap[idx].prev = 0;
456 	evlist->mmap[idx].mask = mask;
457 	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
458 				      MAP_SHARED, fd, 0);
459 	if (evlist->mmap[idx].base == MAP_FAILED) {
460 		evlist->mmap[idx].base = NULL;
461 		return -1;
462 	}
463 
464 	perf_evlist__add_pollfd(evlist, fd);
465 	return 0;
466 }
467 
perf_evlist__mmap_per_cpu(struct perf_evlist * evlist,int prot,int mask)468 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
469 {
470 	struct perf_evsel *evsel;
471 	int cpu, thread;
472 
473 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
474 		int output = -1;
475 
476 		for (thread = 0; thread < evlist->threads->nr; thread++) {
477 			list_for_each_entry(evsel, &evlist->entries, node) {
478 				int fd = FD(evsel, cpu, thread);
479 
480 				if (output == -1) {
481 					output = fd;
482 					if (__perf_evlist__mmap(evlist, cpu,
483 								prot, mask, output) < 0)
484 						goto out_unmap;
485 				} else {
486 					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
487 						goto out_unmap;
488 				}
489 
490 				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
491 				    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
492 					goto out_unmap;
493 			}
494 		}
495 	}
496 
497 	return 0;
498 
499 out_unmap:
500 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
501 		if (evlist->mmap[cpu].base != NULL) {
502 			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
503 			evlist->mmap[cpu].base = NULL;
504 		}
505 	}
506 	return -1;
507 }
508 
perf_evlist__mmap_per_thread(struct perf_evlist * evlist,int prot,int mask)509 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
510 {
511 	struct perf_evsel *evsel;
512 	int thread;
513 
514 	for (thread = 0; thread < evlist->threads->nr; thread++) {
515 		int output = -1;
516 
517 		list_for_each_entry(evsel, &evlist->entries, node) {
518 			int fd = FD(evsel, 0, thread);
519 
520 			if (output == -1) {
521 				output = fd;
522 				if (__perf_evlist__mmap(evlist, thread,
523 							prot, mask, output) < 0)
524 					goto out_unmap;
525 			} else {
526 				if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
527 					goto out_unmap;
528 			}
529 
530 			if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
531 			    perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
532 				goto out_unmap;
533 		}
534 	}
535 
536 	return 0;
537 
538 out_unmap:
539 	for (thread = 0; thread < evlist->threads->nr; thread++) {
540 		if (evlist->mmap[thread].base != NULL) {
541 			munmap(evlist->mmap[thread].base, evlist->mmap_len);
542 			evlist->mmap[thread].base = NULL;
543 		}
544 	}
545 	return -1;
546 }
547 
548 /** perf_evlist__mmap - Create per cpu maps to receive events
549  *
550  * @evlist - list of events
551  * @pages - map length in pages
552  * @overwrite - overwrite older events?
553  *
554  * If overwrite is false the user needs to signal event consuption using:
555  *
556  *	struct perf_mmap *m = &evlist->mmap[cpu];
557  *	unsigned int head = perf_mmap__read_head(m);
558  *
559  *	perf_mmap__write_tail(m, head)
560  *
561  * Using perf_evlist__read_on_cpu does this automatically.
562  */
perf_evlist__mmap(struct perf_evlist * evlist,unsigned int pages,bool overwrite)563 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
564 		      bool overwrite)
565 {
566 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
567 	struct perf_evsel *evsel;
568 	const struct cpu_map *cpus = evlist->cpus;
569 	const struct thread_map *threads = evlist->threads;
570 	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
571 
572         /* 512 kiB: default amount of unprivileged mlocked memory */
573         if (pages == UINT_MAX)
574                 pages = (512 * 1024) / page_size;
575 	else if (!is_power_of_2(pages))
576 		return -EINVAL;
577 
578 	mask = pages * page_size - 1;
579 
580 	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
581 		return -ENOMEM;
582 
583 	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
584 		return -ENOMEM;
585 
586 	evlist->overwrite = overwrite;
587 	evlist->mmap_len = (pages + 1) * page_size;
588 
589 	list_for_each_entry(evsel, &evlist->entries, node) {
590 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
591 		    evsel->sample_id == NULL &&
592 		    perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
593 			return -ENOMEM;
594 	}
595 
596 	if (evlist->cpus->map[0] == -1)
597 		return perf_evlist__mmap_per_thread(evlist, prot, mask);
598 
599 	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
600 }
601 
perf_evlist__create_maps(struct perf_evlist * evlist,const char * target_pid,const char * target_tid,uid_t uid,const char * cpu_list)602 int perf_evlist__create_maps(struct perf_evlist *evlist, const char *target_pid,
603 			     const char *target_tid, uid_t uid, const char *cpu_list)
604 {
605 	evlist->threads = thread_map__new_str(target_pid, target_tid, uid);
606 
607 	if (evlist->threads == NULL)
608 		return -1;
609 
610 	if (uid != UINT_MAX || (cpu_list == NULL && target_tid))
611 		evlist->cpus = cpu_map__dummy_new();
612 	else
613 		evlist->cpus = cpu_map__new(cpu_list);
614 
615 	if (evlist->cpus == NULL)
616 		goto out_delete_threads;
617 
618 	return 0;
619 
620 out_delete_threads:
621 	thread_map__delete(evlist->threads);
622 	return -1;
623 }
624 
perf_evlist__delete_maps(struct perf_evlist * evlist)625 void perf_evlist__delete_maps(struct perf_evlist *evlist)
626 {
627 	cpu_map__delete(evlist->cpus);
628 	thread_map__delete(evlist->threads);
629 	evlist->cpus	= NULL;
630 	evlist->threads = NULL;
631 }
632 
perf_evlist__set_filters(struct perf_evlist * evlist)633 int perf_evlist__set_filters(struct perf_evlist *evlist)
634 {
635 	const struct thread_map *threads = evlist->threads;
636 	const struct cpu_map *cpus = evlist->cpus;
637 	struct perf_evsel *evsel;
638 	char *filter;
639 	int thread;
640 	int cpu;
641 	int err;
642 	int fd;
643 
644 	list_for_each_entry(evsel, &evlist->entries, node) {
645 		filter = evsel->filter;
646 		if (!filter)
647 			continue;
648 		for (cpu = 0; cpu < cpus->nr; cpu++) {
649 			for (thread = 0; thread < threads->nr; thread++) {
650 				fd = FD(evsel, cpu, thread);
651 				err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
652 				if (err)
653 					return err;
654 			}
655 		}
656 	}
657 
658 	return 0;
659 }
660 
perf_evlist__valid_sample_type(const struct perf_evlist * evlist)661 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
662 {
663 	struct perf_evsel *pos, *first;
664 
665 	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
666 
667 	list_for_each_entry_continue(pos, &evlist->entries, node) {
668 		if (first->attr.sample_type != pos->attr.sample_type)
669 			return false;
670 	}
671 
672 	return true;
673 }
674 
perf_evlist__sample_type(const struct perf_evlist * evlist)675 u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
676 {
677 	struct perf_evsel *first;
678 
679 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
680 	return first->attr.sample_type;
681 }
682 
perf_evlist__id_hdr_size(const struct perf_evlist * evlist)683 u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
684 {
685 	struct perf_evsel *first;
686 	struct perf_sample *data;
687 	u64 sample_type;
688 	u16 size = 0;
689 
690 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
691 
692 	if (!first->attr.sample_id_all)
693 		goto out;
694 
695 	sample_type = first->attr.sample_type;
696 
697 	if (sample_type & PERF_SAMPLE_TID)
698 		size += sizeof(data->tid) * 2;
699 
700        if (sample_type & PERF_SAMPLE_TIME)
701 		size += sizeof(data->time);
702 
703 	if (sample_type & PERF_SAMPLE_ID)
704 		size += sizeof(data->id);
705 
706 	if (sample_type & PERF_SAMPLE_STREAM_ID)
707 		size += sizeof(data->stream_id);
708 
709 	if (sample_type & PERF_SAMPLE_CPU)
710 		size += sizeof(data->cpu) * 2;
711 out:
712 	return size;
713 }
714 
perf_evlist__valid_sample_id_all(const struct perf_evlist * evlist)715 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
716 {
717 	struct perf_evsel *pos, *first;
718 
719 	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
720 
721 	list_for_each_entry_continue(pos, &evlist->entries, node) {
722 		if (first->attr.sample_id_all != pos->attr.sample_id_all)
723 			return false;
724 	}
725 
726 	return true;
727 }
728 
perf_evlist__sample_id_all(const struct perf_evlist * evlist)729 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
730 {
731 	struct perf_evsel *first;
732 
733 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
734 	return first->attr.sample_id_all;
735 }
736 
perf_evlist__set_selected(struct perf_evlist * evlist,struct perf_evsel * evsel)737 void perf_evlist__set_selected(struct perf_evlist *evlist,
738 			       struct perf_evsel *evsel)
739 {
740 	evlist->selected = evsel;
741 }
742 
perf_evlist__open(struct perf_evlist * evlist,bool group)743 int perf_evlist__open(struct perf_evlist *evlist, bool group)
744 {
745 	struct perf_evsel *evsel, *first;
746 	int err, ncpus, nthreads;
747 
748 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
749 
750 	list_for_each_entry(evsel, &evlist->entries, node) {
751 		struct xyarray *group_fd = NULL;
752 
753 		if (group && evsel != first)
754 			group_fd = first->fd;
755 
756 		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
757 				       group, group_fd);
758 		if (err < 0)
759 			goto out_err;
760 	}
761 
762 	return 0;
763 out_err:
764 	ncpus = evlist->cpus ? evlist->cpus->nr : 1;
765 	nthreads = evlist->threads ? evlist->threads->nr : 1;
766 
767 	list_for_each_entry_reverse(evsel, &evlist->entries, node)
768 		perf_evsel__close(evsel, ncpus, nthreads);
769 
770 	errno = -err;
771 	return err;
772 }
773 
perf_evlist__prepare_workload(struct perf_evlist * evlist,struct perf_record_opts * opts,const char * argv[])774 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
775 				  struct perf_record_opts *opts,
776 				  const char *argv[])
777 {
778 	int child_ready_pipe[2], go_pipe[2];
779 	char bf;
780 
781 	if (pipe(child_ready_pipe) < 0) {
782 		perror("failed to create 'ready' pipe");
783 		return -1;
784 	}
785 
786 	if (pipe(go_pipe) < 0) {
787 		perror("failed to create 'go' pipe");
788 		goto out_close_ready_pipe;
789 	}
790 
791 	evlist->workload.pid = fork();
792 	if (evlist->workload.pid < 0) {
793 		perror("failed to fork");
794 		goto out_close_pipes;
795 	}
796 
797 	if (!evlist->workload.pid) {
798 		if (opts->pipe_output)
799 			dup2(2, 1);
800 
801 		close(child_ready_pipe[0]);
802 		close(go_pipe[1]);
803 		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
804 
805 		/*
806 		 * Do a dummy execvp to get the PLT entry resolved,
807 		 * so we avoid the resolver overhead on the real
808 		 * execvp call.
809 		 */
810 		execvp("", (char **)argv);
811 
812 		/*
813 		 * Tell the parent we're ready to go
814 		 */
815 		close(child_ready_pipe[1]);
816 
817 		/*
818 		 * Wait until the parent tells us to go.
819 		 */
820 		if (read(go_pipe[0], &bf, 1) == -1)
821 			perror("unable to read pipe");
822 
823 		execvp(argv[0], (char **)argv);
824 
825 		perror(argv[0]);
826 		kill(getppid(), SIGUSR1);
827 		exit(-1);
828 	}
829 
830 	if (!opts->system_wide && !opts->target_tid && !opts->target_pid)
831 		evlist->threads->map[0] = evlist->workload.pid;
832 
833 	close(child_ready_pipe[1]);
834 	close(go_pipe[0]);
835 	/*
836 	 * wait for child to settle
837 	 */
838 	if (read(child_ready_pipe[0], &bf, 1) == -1) {
839 		perror("unable to read pipe");
840 		goto out_close_pipes;
841 	}
842 
843 	evlist->workload.cork_fd = go_pipe[1];
844 	close(child_ready_pipe[0]);
845 	return 0;
846 
847 out_close_pipes:
848 	close(go_pipe[0]);
849 	close(go_pipe[1]);
850 out_close_ready_pipe:
851 	close(child_ready_pipe[0]);
852 	close(child_ready_pipe[1]);
853 	return -1;
854 }
855 
perf_evlist__start_workload(struct perf_evlist * evlist)856 int perf_evlist__start_workload(struct perf_evlist *evlist)
857 {
858 	if (evlist->workload.cork_fd > 0) {
859 		/*
860 		 * Remove the cork, let it rip!
861 		 */
862 		return close(evlist->workload.cork_fd);
863 	}
864 
865 	return 0;
866 }
867