1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7 #include <linux/coresight.h>
8 #include <linux/coresight-pmu.h>
9 #include <linux/cpumask.h>
10 #include <linux/device.h>
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/perf_event.h>
15 #include <linux/percpu-defs.h>
16 #include <linux/slab.h>
17 #include <linux/stringhash.h>
18 #include <linux/types.h>
19 #include <linux/workqueue.h>
20
21 #include "coresight-config.h"
22 #include "coresight-etm-perf.h"
23 #include "coresight-priv.h"
24 #include "coresight-syscfg.h"
25
26 static struct pmu etm_pmu;
27 static bool etm_perf_up;
28
29 /*
30 * An ETM context for a running event includes the perf aux handle
31 * and aux_data. For ETM, the aux_data (etm_event_data), consists of
32 * the trace path and the sink configuration. The event data is accessible
33 * via perf_get_aux(handle). However, a sink could "end" a perf output
34 * handle via the IRQ handler. And if the "sink" encounters a failure
35 * to "begin" another session (e.g due to lack of space in the buffer),
36 * the handle will be cleared. Thus, the event_data may not be accessible
37 * from the handle when we get to the etm_event_stop(), which is required
38 * for stopping the trace path. The event_data is guaranteed to stay alive
39 * until "free_aux()", which cannot happen as long as the event is active on
40 * the ETM. Thus the event_data for the session must be part of the ETM context
41 * to make sure we can disable the trace path.
42 */
43 struct etm_ctxt {
44 struct perf_output_handle handle;
45 struct etm_event_data *event_data;
46 };
47
48 static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt);
49 static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
50
51 /*
52 * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config';
53 * now take them as general formats and apply on all ETMs.
54 */
55 PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
56 /* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */
57 PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID));
58 /* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */
59 PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2));
60 PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
61 PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
62 /* preset - if sink ID is used as a configuration selector */
63 PMU_FORMAT_ATTR(preset, "config:0-3");
64 /* Sink ID - same for all ETMs */
65 PMU_FORMAT_ATTR(sinkid, "config2:0-31");
66 /* config ID - set if a system configuration is selected */
67 PMU_FORMAT_ATTR(configid, "config2:32-63");
68
69
70 /*
71 * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1
72 * when the kernel is running at EL1; when the kernel is at EL2,
73 * the PID is in CONTEXTIDR_EL2.
74 */
format_attr_contextid_show(struct device * dev,struct device_attribute * attr,char * page)75 static ssize_t format_attr_contextid_show(struct device *dev,
76 struct device_attribute *attr,
77 char *page)
78 {
79 int pid_fmt = ETM_OPT_CTXTID;
80
81 #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
82 pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
83 #endif
84 return sprintf(page, "config:%d\n", pid_fmt);
85 }
86
87 static struct device_attribute format_attr_contextid =
88 __ATTR(contextid, 0444, format_attr_contextid_show, NULL);
89
90 static struct attribute *etm_config_formats_attr[] = {
91 &format_attr_cycacc.attr,
92 &format_attr_contextid.attr,
93 &format_attr_contextid1.attr,
94 &format_attr_contextid2.attr,
95 &format_attr_timestamp.attr,
96 &format_attr_retstack.attr,
97 &format_attr_sinkid.attr,
98 &format_attr_preset.attr,
99 &format_attr_configid.attr,
100 NULL,
101 };
102
103 static const struct attribute_group etm_pmu_format_group = {
104 .name = "format",
105 .attrs = etm_config_formats_attr,
106 };
107
108 static struct attribute *etm_config_sinks_attr[] = {
109 NULL,
110 };
111
112 static const struct attribute_group etm_pmu_sinks_group = {
113 .name = "sinks",
114 .attrs = etm_config_sinks_attr,
115 };
116
117 static struct attribute *etm_config_events_attr[] = {
118 NULL,
119 };
120
121 static const struct attribute_group etm_pmu_events_group = {
122 .name = "events",
123 .attrs = etm_config_events_attr,
124 };
125
126 static const struct attribute_group *etm_pmu_attr_groups[] = {
127 &etm_pmu_format_group,
128 &etm_pmu_sinks_group,
129 &etm_pmu_events_group,
130 NULL,
131 };
132
133 static inline struct list_head **
etm_event_cpu_path_ptr(struct etm_event_data * data,int cpu)134 etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
135 {
136 return per_cpu_ptr(data->path, cpu);
137 }
138
139 static inline struct list_head *
etm_event_cpu_path(struct etm_event_data * data,int cpu)140 etm_event_cpu_path(struct etm_event_data *data, int cpu)
141 {
142 return *etm_event_cpu_path_ptr(data, cpu);
143 }
144
etm_event_read(struct perf_event * event)145 static void etm_event_read(struct perf_event *event) {}
146
etm_addr_filters_alloc(struct perf_event * event)147 static int etm_addr_filters_alloc(struct perf_event *event)
148 {
149 struct etm_filters *filters;
150 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
151
152 filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
153 if (!filters)
154 return -ENOMEM;
155
156 if (event->parent)
157 memcpy(filters, event->parent->hw.addr_filters,
158 sizeof(*filters));
159
160 event->hw.addr_filters = filters;
161
162 return 0;
163 }
164
etm_event_destroy(struct perf_event * event)165 static void etm_event_destroy(struct perf_event *event)
166 {
167 kfree(event->hw.addr_filters);
168 event->hw.addr_filters = NULL;
169 }
170
etm_event_init(struct perf_event * event)171 static int etm_event_init(struct perf_event *event)
172 {
173 int ret = 0;
174
175 if (event->attr.type != etm_pmu.type) {
176 ret = -ENOENT;
177 goto out;
178 }
179
180 ret = etm_addr_filters_alloc(event);
181 if (ret)
182 goto out;
183
184 event->destroy = etm_event_destroy;
185 out:
186 return ret;
187 }
188
free_sink_buffer(struct etm_event_data * event_data)189 static void free_sink_buffer(struct etm_event_data *event_data)
190 {
191 int cpu;
192 cpumask_t *mask = &event_data->mask;
193 struct coresight_device *sink;
194
195 if (!event_data->snk_config)
196 return;
197
198 if (WARN_ON(cpumask_empty(mask)))
199 return;
200
201 cpu = cpumask_first(mask);
202 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
203 sink_ops(sink)->free_buffer(event_data->snk_config);
204 }
205
free_event_data(struct work_struct * work)206 static void free_event_data(struct work_struct *work)
207 {
208 int cpu;
209 cpumask_t *mask;
210 struct etm_event_data *event_data;
211
212 event_data = container_of(work, struct etm_event_data, work);
213 mask = &event_data->mask;
214
215 /* Free the sink buffers, if there are any */
216 free_sink_buffer(event_data);
217
218 /* clear any configuration we were using */
219 if (event_data->cfg_hash)
220 cscfg_deactivate_config(event_data->cfg_hash);
221
222 for_each_cpu(cpu, mask) {
223 struct list_head **ppath;
224
225 ppath = etm_event_cpu_path_ptr(event_data, cpu);
226 if (!(IS_ERR_OR_NULL(*ppath)))
227 coresight_release_path(*ppath);
228 *ppath = NULL;
229 }
230
231 free_percpu(event_data->path);
232 kfree(event_data);
233 }
234
alloc_event_data(int cpu)235 static void *alloc_event_data(int cpu)
236 {
237 cpumask_t *mask;
238 struct etm_event_data *event_data;
239
240 /* First get memory for the session's data */
241 event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
242 if (!event_data)
243 return NULL;
244
245
246 mask = &event_data->mask;
247 if (cpu != -1)
248 cpumask_set_cpu(cpu, mask);
249 else
250 cpumask_copy(mask, cpu_present_mask);
251
252 /*
253 * Each CPU has a single path between source and destination. As such
254 * allocate an array using CPU numbers as indexes. That way a path
255 * for any CPU can easily be accessed at any given time. We proceed
256 * the same way for sessions involving a single CPU. The cost of
257 * unused memory when dealing with single CPU trace scenarios is small
258 * compared to the cost of searching through an optimized array.
259 */
260 event_data->path = alloc_percpu(struct list_head *);
261
262 if (!event_data->path) {
263 kfree(event_data);
264 return NULL;
265 }
266
267 return event_data;
268 }
269
etm_free_aux(void * data)270 static void etm_free_aux(void *data)
271 {
272 struct etm_event_data *event_data = data;
273
274 schedule_work(&event_data->work);
275 }
276
277 /*
278 * Check if two given sinks are compatible with each other,
279 * so that they can use the same sink buffers, when an event
280 * moves around.
281 */
sinks_compatible(struct coresight_device * a,struct coresight_device * b)282 static bool sinks_compatible(struct coresight_device *a,
283 struct coresight_device *b)
284 {
285 if (!a || !b)
286 return false;
287 /*
288 * If the sinks are of the same subtype and driven
289 * by the same driver, we can use the same buffer
290 * on these sinks.
291 */
292 return (a->subtype.sink_subtype == b->subtype.sink_subtype) &&
293 (sink_ops(a) == sink_ops(b));
294 }
295
etm_setup_aux(struct perf_event * event,void ** pages,int nr_pages,bool overwrite)296 static void *etm_setup_aux(struct perf_event *event, void **pages,
297 int nr_pages, bool overwrite)
298 {
299 u32 id, cfg_hash;
300 int cpu = event->cpu;
301 cpumask_t *mask;
302 struct coresight_device *sink = NULL;
303 struct coresight_device *user_sink = NULL, *last_sink = NULL;
304 struct etm_event_data *event_data = NULL;
305
306 event_data = alloc_event_data(cpu);
307 if (!event_data)
308 return NULL;
309 INIT_WORK(&event_data->work, free_event_data);
310
311 /* First get the selected sink from user space. */
312 if (event->attr.config2 & GENMASK_ULL(31, 0)) {
313 id = (u32)event->attr.config2;
314 sink = user_sink = coresight_get_sink_by_id(id);
315 }
316
317 /* check if user wants a coresight configuration selected */
318 cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
319 if (cfg_hash) {
320 if (cscfg_activate_config(cfg_hash))
321 goto err;
322 event_data->cfg_hash = cfg_hash;
323 }
324
325 mask = &event_data->mask;
326
327 /*
328 * Setup the path for each CPU in a trace session. We try to build
329 * trace path for each CPU in the mask. If we don't find an ETM
330 * for the CPU or fail to build a path, we clear the CPU from the
331 * mask and continue with the rest. If ever we try to trace on those
332 * CPUs, we can handle it and fail the session.
333 */
334 for_each_cpu(cpu, mask) {
335 struct list_head *path;
336 struct coresight_device *csdev;
337
338 csdev = per_cpu(csdev_src, cpu);
339 /*
340 * If there is no ETM associated with this CPU clear it from
341 * the mask and continue with the rest. If ever we try to trace
342 * on this CPU, we handle it accordingly.
343 */
344 if (!csdev) {
345 cpumask_clear_cpu(cpu, mask);
346 continue;
347 }
348
349 /*
350 * No sink provided - look for a default sink for all the ETMs,
351 * where this event can be scheduled.
352 * We allocate the sink specific buffers only once for this
353 * event. If the ETMs have different default sink devices, we
354 * can only use a single "type" of sink as the event can carry
355 * only one sink specific buffer. Thus we have to make sure
356 * that the sinks are of the same type and driven by the same
357 * driver, as the one we allocate the buffer for. As such
358 * we choose the first sink and check if the remaining ETMs
359 * have a compatible default sink. We don't trace on a CPU
360 * if the sink is not compatible.
361 */
362 if (!user_sink) {
363 /* Find the default sink for this ETM */
364 sink = coresight_find_default_sink(csdev);
365 if (!sink) {
366 cpumask_clear_cpu(cpu, mask);
367 continue;
368 }
369
370 /* Check if this sink compatible with the last sink */
371 if (last_sink && !sinks_compatible(last_sink, sink)) {
372 cpumask_clear_cpu(cpu, mask);
373 continue;
374 }
375 last_sink = sink;
376 }
377
378 /*
379 * Building a path doesn't enable it, it simply builds a
380 * list of devices from source to sink that can be
381 * referenced later when the path is actually needed.
382 */
383 path = coresight_build_path(csdev, sink);
384 if (IS_ERR(path)) {
385 cpumask_clear_cpu(cpu, mask);
386 continue;
387 }
388
389 *etm_event_cpu_path_ptr(event_data, cpu) = path;
390 }
391
392 /* no sink found for any CPU - cannot trace */
393 if (!sink)
394 goto err;
395
396 /* If we don't have any CPUs ready for tracing, abort */
397 cpu = cpumask_first(mask);
398 if (cpu >= nr_cpu_ids)
399 goto err;
400
401 if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
402 goto err;
403
404 /*
405 * Allocate the sink buffer for this session. All the sinks
406 * where this event can be scheduled are ensured to be of the
407 * same type. Thus the same sink configuration is used by the
408 * sinks.
409 */
410 event_data->snk_config =
411 sink_ops(sink)->alloc_buffer(sink, event, pages,
412 nr_pages, overwrite);
413 if (!event_data->snk_config)
414 goto err;
415
416 out:
417 return event_data;
418
419 err:
420 etm_free_aux(event_data);
421 event_data = NULL;
422 goto out;
423 }
424
etm_event_start(struct perf_event * event,int flags)425 static void etm_event_start(struct perf_event *event, int flags)
426 {
427 int cpu = smp_processor_id();
428 struct etm_event_data *event_data;
429 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
430 struct perf_output_handle *handle = &ctxt->handle;
431 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
432 struct list_head *path;
433
434 if (!csdev)
435 goto fail;
436
437 /* Have we messed up our tracking ? */
438 if (WARN_ON(ctxt->event_data))
439 goto fail;
440
441 /*
442 * Deal with the ring buffer API and get a handle on the
443 * session's information.
444 */
445 event_data = perf_aux_output_begin(handle, event);
446 if (!event_data)
447 goto fail;
448
449 /*
450 * Check if this ETM is allowed to trace, as decided
451 * at etm_setup_aux(). This could be due to an unreachable
452 * sink from this ETM. We can't do much in this case if
453 * the sink was specified or hinted to the driver. For
454 * now, simply don't record anything on this ETM.
455 *
456 * As such we pretend that everything is fine, and let
457 * it continue without actually tracing. The event could
458 * continue tracing when it moves to a CPU where it is
459 * reachable to a sink.
460 */
461 if (!cpumask_test_cpu(cpu, &event_data->mask))
462 goto out;
463
464 path = etm_event_cpu_path(event_data, cpu);
465 /* We need a sink, no need to continue without one */
466 sink = coresight_get_sink(path);
467 if (WARN_ON_ONCE(!sink))
468 goto fail_end_stop;
469
470 /* Nothing will happen without a path */
471 if (coresight_enable_path(path, CS_MODE_PERF, handle))
472 goto fail_end_stop;
473
474 /* Finally enable the tracer */
475 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
476 goto fail_disable_path;
477
478 out:
479 /* Tell the perf core the event is alive */
480 event->hw.state = 0;
481 /* Save the event_data for this ETM */
482 ctxt->event_data = event_data;
483 return;
484
485 fail_disable_path:
486 coresight_disable_path(path);
487 fail_end_stop:
488 /*
489 * Check if the handle is still associated with the event,
490 * to handle cases where if the sink failed to start the
491 * trace and TRUNCATED the handle already.
492 */
493 if (READ_ONCE(handle->event)) {
494 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
495 perf_aux_output_end(handle, 0);
496 }
497 fail:
498 event->hw.state = PERF_HES_STOPPED;
499 return;
500 }
501
etm_event_stop(struct perf_event * event,int mode)502 static void etm_event_stop(struct perf_event *event, int mode)
503 {
504 int cpu = smp_processor_id();
505 unsigned long size;
506 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
507 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
508 struct perf_output_handle *handle = &ctxt->handle;
509 struct etm_event_data *event_data;
510 struct list_head *path;
511
512 /*
513 * If we still have access to the event_data via handle,
514 * confirm that we haven't messed up the tracking.
515 */
516 if (handle->event &&
517 WARN_ON(perf_get_aux(handle) != ctxt->event_data))
518 return;
519
520 event_data = ctxt->event_data;
521 /* Clear the event_data as this ETM is stopping the trace. */
522 ctxt->event_data = NULL;
523
524 if (event->hw.state == PERF_HES_STOPPED)
525 return;
526
527 /* We must have a valid event_data for a running event */
528 if (WARN_ON(!event_data))
529 return;
530
531 /*
532 * Check if this ETM was allowed to trace, as decided at
533 * etm_setup_aux(). If it wasn't allowed to trace, then
534 * nothing needs to be torn down other than outputting a
535 * zero sized record.
536 */
537 if (handle->event && (mode & PERF_EF_UPDATE) &&
538 !cpumask_test_cpu(cpu, &event_data->mask)) {
539 event->hw.state = PERF_HES_STOPPED;
540 perf_aux_output_end(handle, 0);
541 return;
542 }
543
544 if (!csdev)
545 return;
546
547 path = etm_event_cpu_path(event_data, cpu);
548 if (!path)
549 return;
550
551 sink = coresight_get_sink(path);
552 if (!sink)
553 return;
554
555 /* stop tracer */
556 source_ops(csdev)->disable(csdev, event);
557
558 /* tell the core */
559 event->hw.state = PERF_HES_STOPPED;
560
561 /*
562 * If the handle is not bound to an event anymore
563 * (e.g, the sink driver was unable to restart the
564 * handle due to lack of buffer space), we don't
565 * have to do anything here.
566 */
567 if (handle->event && (mode & PERF_EF_UPDATE)) {
568 if (WARN_ON_ONCE(handle->event != event))
569 return;
570
571 /* update trace information */
572 if (!sink_ops(sink)->update_buffer)
573 return;
574
575 size = sink_ops(sink)->update_buffer(sink, handle,
576 event_data->snk_config);
577 /*
578 * Make sure the handle is still valid as the
579 * sink could have closed it from an IRQ.
580 * The sink driver must handle the race with
581 * update_buffer() and IRQ. Thus either we
582 * should get a valid handle and valid size
583 * (which may be 0).
584 *
585 * But we should never get a non-zero size with
586 * an invalid handle.
587 */
588 if (READ_ONCE(handle->event))
589 perf_aux_output_end(handle, size);
590 else
591 WARN_ON(size);
592 }
593
594 /* Disabling the path make its elements available to other sessions */
595 coresight_disable_path(path);
596 }
597
etm_event_add(struct perf_event * event,int mode)598 static int etm_event_add(struct perf_event *event, int mode)
599 {
600 int ret = 0;
601 struct hw_perf_event *hwc = &event->hw;
602
603 if (mode & PERF_EF_START) {
604 etm_event_start(event, 0);
605 if (hwc->state & PERF_HES_STOPPED)
606 ret = -EINVAL;
607 } else {
608 hwc->state = PERF_HES_STOPPED;
609 }
610
611 return ret;
612 }
613
etm_event_del(struct perf_event * event,int mode)614 static void etm_event_del(struct perf_event *event, int mode)
615 {
616 etm_event_stop(event, PERF_EF_UPDATE);
617 }
618
etm_addr_filters_validate(struct list_head * filters)619 static int etm_addr_filters_validate(struct list_head *filters)
620 {
621 bool range = false, address = false;
622 int index = 0;
623 struct perf_addr_filter *filter;
624
625 list_for_each_entry(filter, filters, entry) {
626 /*
627 * No need to go further if there's no more
628 * room for filters.
629 */
630 if (++index > ETM_ADDR_CMP_MAX)
631 return -EOPNOTSUPP;
632
633 /* filter::size==0 means single address trigger */
634 if (filter->size) {
635 /*
636 * The existing code relies on START/STOP filters
637 * being address filters.
638 */
639 if (filter->action == PERF_ADDR_FILTER_ACTION_START ||
640 filter->action == PERF_ADDR_FILTER_ACTION_STOP)
641 return -EOPNOTSUPP;
642
643 range = true;
644 } else
645 address = true;
646
647 /*
648 * At this time we don't allow range and start/stop filtering
649 * to cohabitate, they have to be mutually exclusive.
650 */
651 if (range && address)
652 return -EOPNOTSUPP;
653 }
654
655 return 0;
656 }
657
etm_addr_filters_sync(struct perf_event * event)658 static void etm_addr_filters_sync(struct perf_event *event)
659 {
660 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
661 unsigned long start, stop;
662 struct perf_addr_filter_range *fr = event->addr_filter_ranges;
663 struct etm_filters *filters = event->hw.addr_filters;
664 struct etm_filter *etm_filter;
665 struct perf_addr_filter *filter;
666 int i = 0;
667
668 list_for_each_entry(filter, &head->list, entry) {
669 start = fr[i].start;
670 stop = start + fr[i].size;
671 etm_filter = &filters->etm_filter[i];
672
673 switch (filter->action) {
674 case PERF_ADDR_FILTER_ACTION_FILTER:
675 etm_filter->start_addr = start;
676 etm_filter->stop_addr = stop;
677 etm_filter->type = ETM_ADDR_TYPE_RANGE;
678 break;
679 case PERF_ADDR_FILTER_ACTION_START:
680 etm_filter->start_addr = start;
681 etm_filter->type = ETM_ADDR_TYPE_START;
682 break;
683 case PERF_ADDR_FILTER_ACTION_STOP:
684 etm_filter->stop_addr = stop;
685 etm_filter->type = ETM_ADDR_TYPE_STOP;
686 break;
687 }
688 i++;
689 }
690
691 filters->nr_filters = i;
692 }
693
etm_perf_symlink(struct coresight_device * csdev,bool link)694 int etm_perf_symlink(struct coresight_device *csdev, bool link)
695 {
696 char entry[sizeof("cpu9999999")];
697 int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
698 struct device *pmu_dev = etm_pmu.dev;
699 struct device *cs_dev = &csdev->dev;
700
701 sprintf(entry, "cpu%d", cpu);
702
703 if (!etm_perf_up)
704 return -EPROBE_DEFER;
705
706 if (link) {
707 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
708 if (ret)
709 return ret;
710 per_cpu(csdev_src, cpu) = csdev;
711 } else {
712 sysfs_remove_link(&pmu_dev->kobj, entry);
713 per_cpu(csdev_src, cpu) = NULL;
714 }
715
716 return 0;
717 }
718 EXPORT_SYMBOL_GPL(etm_perf_symlink);
719
etm_perf_sink_name_show(struct device * dev,struct device_attribute * dattr,char * buf)720 static ssize_t etm_perf_sink_name_show(struct device *dev,
721 struct device_attribute *dattr,
722 char *buf)
723 {
724 struct dev_ext_attribute *ea;
725
726 ea = container_of(dattr, struct dev_ext_attribute, attr);
727 return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
728 }
729
730 static struct dev_ext_attribute *
etm_perf_add_symlink_group(struct device * dev,const char * name,const char * group_name)731 etm_perf_add_symlink_group(struct device *dev, const char *name, const char *group_name)
732 {
733 struct dev_ext_attribute *ea;
734 unsigned long hash;
735 int ret;
736 struct device *pmu_dev = etm_pmu.dev;
737
738 if (!etm_perf_up)
739 return ERR_PTR(-EPROBE_DEFER);
740
741 ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL);
742 if (!ea)
743 return ERR_PTR(-ENOMEM);
744
745 /*
746 * If this function is called adding a sink then the hash is used for
747 * sink selection - see function coresight_get_sink_by_id().
748 * If adding a configuration then the hash is used for selection in
749 * cscfg_activate_config()
750 */
751 hash = hashlen_hash(hashlen_string(NULL, name));
752
753 sysfs_attr_init(&ea->attr.attr);
754 ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL);
755 if (!ea->attr.attr.name)
756 return ERR_PTR(-ENOMEM);
757
758 ea->attr.attr.mode = 0444;
759 ea->var = (unsigned long *)hash;
760
761 ret = sysfs_add_file_to_group(&pmu_dev->kobj,
762 &ea->attr.attr, group_name);
763
764 return ret ? ERR_PTR(ret) : ea;
765 }
766
etm_perf_add_symlink_sink(struct coresight_device * csdev)767 int etm_perf_add_symlink_sink(struct coresight_device *csdev)
768 {
769 const char *name;
770 struct device *dev = &csdev->dev;
771 int err = 0;
772
773 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
774 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
775 return -EINVAL;
776
777 if (csdev->ea != NULL)
778 return -EINVAL;
779
780 name = dev_name(dev);
781 csdev->ea = etm_perf_add_symlink_group(dev, name, "sinks");
782 if (IS_ERR(csdev->ea)) {
783 err = PTR_ERR(csdev->ea);
784 csdev->ea = NULL;
785 } else
786 csdev->ea->attr.show = etm_perf_sink_name_show;
787
788 return err;
789 }
790
etm_perf_del_symlink_group(struct dev_ext_attribute * ea,const char * group_name)791 static void etm_perf_del_symlink_group(struct dev_ext_attribute *ea, const char *group_name)
792 {
793 struct device *pmu_dev = etm_pmu.dev;
794
795 sysfs_remove_file_from_group(&pmu_dev->kobj,
796 &ea->attr.attr, group_name);
797 }
798
etm_perf_del_symlink_sink(struct coresight_device * csdev)799 void etm_perf_del_symlink_sink(struct coresight_device *csdev)
800 {
801 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
802 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
803 return;
804
805 if (!csdev->ea)
806 return;
807
808 etm_perf_del_symlink_group(csdev->ea, "sinks");
809 csdev->ea = NULL;
810 }
811
etm_perf_cscfg_event_show(struct device * dev,struct device_attribute * dattr,char * buf)812 static ssize_t etm_perf_cscfg_event_show(struct device *dev,
813 struct device_attribute *dattr,
814 char *buf)
815 {
816 struct dev_ext_attribute *ea;
817
818 ea = container_of(dattr, struct dev_ext_attribute, attr);
819 return scnprintf(buf, PAGE_SIZE, "configid=0x%lx\n", (unsigned long)(ea->var));
820 }
821
etm_perf_add_symlink_cscfg(struct device * dev,struct cscfg_config_desc * config_desc)822 int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc)
823 {
824 int err = 0;
825
826 if (config_desc->event_ea != NULL)
827 return 0;
828
829 config_desc->event_ea = etm_perf_add_symlink_group(dev, config_desc->name, "events");
830
831 /* set the show function to the custom cscfg event */
832 if (!IS_ERR(config_desc->event_ea))
833 config_desc->event_ea->attr.show = etm_perf_cscfg_event_show;
834 else {
835 err = PTR_ERR(config_desc->event_ea);
836 config_desc->event_ea = NULL;
837 }
838
839 return err;
840 }
841
etm_perf_del_symlink_cscfg(struct cscfg_config_desc * config_desc)842 void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc)
843 {
844 if (!config_desc->event_ea)
845 return;
846
847 etm_perf_del_symlink_group(config_desc->event_ea, "events");
848 config_desc->event_ea = NULL;
849 }
850
etm_perf_init(void)851 int __init etm_perf_init(void)
852 {
853 int ret;
854
855 etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE |
856 PERF_PMU_CAP_ITRACE);
857
858 etm_pmu.attr_groups = etm_pmu_attr_groups;
859 etm_pmu.task_ctx_nr = perf_sw_context;
860 etm_pmu.read = etm_event_read;
861 etm_pmu.event_init = etm_event_init;
862 etm_pmu.setup_aux = etm_setup_aux;
863 etm_pmu.free_aux = etm_free_aux;
864 etm_pmu.start = etm_event_start;
865 etm_pmu.stop = etm_event_stop;
866 etm_pmu.add = etm_event_add;
867 etm_pmu.del = etm_event_del;
868 etm_pmu.addr_filters_sync = etm_addr_filters_sync;
869 etm_pmu.addr_filters_validate = etm_addr_filters_validate;
870 etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
871
872 ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
873 if (ret == 0)
874 etm_perf_up = true;
875
876 return ret;
877 }
878
etm_perf_exit(void)879 void etm_perf_exit(void)
880 {
881 perf_pmu_unregister(&etm_pmu);
882 }
883