1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Performance event support - Processor Activity Instrumentation Extension
4 * Facility
5 *
6 * Copyright IBM Corp. 2022
7 * Author(s): Thomas Richter <tmricht@linux.ibm.com>
8 */
9 #define KMSG_COMPONENT "pai_ext"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/percpu.h>
15 #include <linux/notifier.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/io.h>
19
20 #include <asm/cpu_mcf.h>
21 #include <asm/ctl_reg.h>
22 #include <asm/pai.h>
23 #include <asm/debug.h>
24
25 #define PAIE1_CB_SZ 0x200 /* Size of PAIE1 control block */
26 #define PAIE1_CTRBLOCK_SZ 0x400 /* Size of PAIE1 counter blocks */
27
28 static debug_info_t *paiext_dbg;
29 static unsigned int paiext_cnt; /* Extracted with QPACI instruction */
30
31 enum paiext_mode {
32 PAI_MODE_NONE,
33 PAI_MODE_SAMPLING,
34 PAI_MODE_COUNTER,
35 };
36
37 struct pai_userdata {
38 u16 num;
39 u64 value;
40 } __packed;
41
42 /* Create the PAI extension 1 control block area.
43 * The PAI extension control block 1 is pointed to by lowcore
44 * address 0x1508 for each CPU. This control block is 512 bytes in size
45 * and requires a 512 byte boundary alignment.
46 */
47 struct paiext_cb { /* PAI extension 1 control block */
48 u64 header; /* Not used */
49 u64 reserved1;
50 u64 acc; /* Addr to analytics counter control block */
51 u8 reserved2[488];
52 } __packed;
53
54 struct paiext_map {
55 unsigned long *area; /* Area for CPU to store counters */
56 struct pai_userdata *save; /* Area to store non-zero counters */
57 enum paiext_mode mode; /* Type of event */
58 unsigned int active_events; /* # of PAI Extension users */
59 unsigned int refcnt;
60 struct perf_event *event; /* Perf event for sampling */
61 struct paiext_cb *paiext_cb; /* PAI extension control block area */
62 };
63
64 struct paiext_mapptr {
65 struct paiext_map *mapptr;
66 };
67
68 static struct paiext_root { /* Anchor to per CPU data */
69 int refcnt; /* Overall active events */
70 struct paiext_mapptr __percpu *mapptr;
71 } paiext_root;
72
73 /* Free per CPU data when the last event is removed. */
paiext_root_free(void)74 static void paiext_root_free(void)
75 {
76 if (!--paiext_root.refcnt) {
77 free_percpu(paiext_root.mapptr);
78 paiext_root.mapptr = NULL;
79 }
80 }
81
82 /* On initialization of first event also allocate per CPU data dynamically.
83 * Start with an array of pointers, the array size is the maximum number of
84 * CPUs possible, which might be larger than the number of CPUs currently
85 * online.
86 */
paiext_root_alloc(void)87 static int paiext_root_alloc(void)
88 {
89 if (++paiext_root.refcnt == 1) {
90 /* The memory is already zeroed. */
91 paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
92 if (!paiext_root.mapptr) {
93 /* Returing without refcnt adjustment is ok. The
94 * error code is handled by paiext_alloc() which
95 * decrements refcnt when an event can not be
96 * created.
97 */
98 return -ENOMEM;
99 }
100 }
101 return 0;
102 }
103
104 /* Protects against concurrent increment of sampler and counter member
105 * increments at the same time and prohibits concurrent execution of
106 * counting and sampling events.
107 * Ensures that analytics counter block is deallocated only when the
108 * sampling and counting on that cpu is zero.
109 * For details see paiext_alloc().
110 */
111 static DEFINE_MUTEX(paiext_reserve_mutex);
112
113 /* Free all memory allocated for event counting/sampling setup */
paiext_free(struct paiext_mapptr * mp)114 static void paiext_free(struct paiext_mapptr *mp)
115 {
116 kfree(mp->mapptr->area);
117 kfree(mp->mapptr->paiext_cb);
118 kvfree(mp->mapptr->save);
119 kfree(mp->mapptr);
120 mp->mapptr = NULL;
121 }
122
123 /* Release the PMU if event is the last perf event */
paiext_event_destroy(struct perf_event * event)124 static void paiext_event_destroy(struct perf_event *event)
125 {
126 struct paiext_mapptr *mp = per_cpu_ptr(paiext_root.mapptr, event->cpu);
127 struct paiext_map *cpump = mp->mapptr;
128
129 mutex_lock(&paiext_reserve_mutex);
130 cpump->event = NULL;
131 if (!--cpump->refcnt) /* Last reference gone */
132 paiext_free(mp);
133 paiext_root_free();
134 mutex_unlock(&paiext_reserve_mutex);
135 debug_sprintf_event(paiext_dbg, 4, "%s cpu %d mapptr %p\n", __func__,
136 event->cpu, mp->mapptr);
137
138 }
139
140 /* Used to avoid races in checking concurrent access of counting and
141 * sampling for pai_extension events.
142 *
143 * Only one instance of event pai_ext/NNPA_ALL/ for sampling is
144 * allowed and when this event is running, no counting event is allowed.
145 * Several counting events are allowed in parallel, but no sampling event
146 * is allowed while one (or more) counting events are running.
147 *
148 * This function is called in process context and it is safe to block.
149 * When the event initialization functions fails, no other call back will
150 * be invoked.
151 *
152 * Allocate the memory for the event.
153 */
paiext_alloc(struct perf_event_attr * a,struct perf_event * event)154 static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
155 {
156 struct paiext_mapptr *mp;
157 struct paiext_map *cpump;
158 int rc;
159
160 mutex_lock(&paiext_reserve_mutex);
161
162 rc = paiext_root_alloc();
163 if (rc)
164 goto unlock;
165
166 mp = per_cpu_ptr(paiext_root.mapptr, event->cpu);
167 cpump = mp->mapptr;
168 if (!cpump) { /* Paiext_map allocated? */
169 rc = -ENOMEM;
170 cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
171 if (!cpump)
172 goto unlock;
173
174 /* Allocate memory for counter area and counter extraction.
175 * These are
176 * - a 512 byte block and requires 512 byte boundary alignment.
177 * - a 1KB byte block and requires 1KB boundary alignment.
178 * Only the first counting event has to allocate the area.
179 *
180 * Note: This works with commit 59bb47985c1d by default.
181 * Backporting this to kernels without this commit might
182 * need adjustment.
183 */
184 mp->mapptr = cpump;
185 cpump->area = kzalloc(PAIE1_CTRBLOCK_SZ, GFP_KERNEL);
186 cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL);
187 cpump->save = kvmalloc_array(paiext_cnt + 1,
188 sizeof(struct pai_userdata),
189 GFP_KERNEL);
190 if (!cpump->save || !cpump->area || !cpump->paiext_cb) {
191 paiext_free(mp);
192 goto unlock;
193 }
194 cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
195 : PAI_MODE_COUNTER;
196 } else {
197 /* Multiple invocation, check whats active.
198 * Supported are multiple counter events or only one sampling
199 * event concurrently at any one time.
200 */
201 if (cpump->mode == PAI_MODE_SAMPLING ||
202 (cpump->mode == PAI_MODE_COUNTER && a->sample_period)) {
203 rc = -EBUSY;
204 goto unlock;
205 }
206 }
207
208 rc = 0;
209 cpump->event = event;
210 ++cpump->refcnt;
211
212 unlock:
213 if (rc) {
214 /* Error in allocation of event, decrement anchor. Since
215 * the event in not created, its destroy() function is never
216 * invoked. Adjust the reference counter for the anchor.
217 */
218 paiext_root_free();
219 }
220 mutex_unlock(&paiext_reserve_mutex);
221 /* If rc is non-zero, no increment of counter/sampler was done. */
222 return rc;
223 }
224
225 /* The PAI extension 1 control block supports up to 128 entries. Return
226 * the index within PAIE1_CB given the event number. Also validate event
227 * number.
228 */
paiext_event_valid(struct perf_event * event)229 static int paiext_event_valid(struct perf_event *event)
230 {
231 u64 cfg = event->attr.config;
232
233 if (cfg >= PAI_NNPA_BASE && cfg <= PAI_NNPA_BASE + paiext_cnt) {
234 /* Offset NNPA in paiext_cb */
235 event->hw.config_base = offsetof(struct paiext_cb, acc);
236 return 0;
237 }
238 return -EINVAL;
239 }
240
241 /* Might be called on different CPU than the one the event is intended for. */
paiext_event_init(struct perf_event * event)242 static int paiext_event_init(struct perf_event *event)
243 {
244 struct perf_event_attr *a = &event->attr;
245 int rc;
246
247 /* PMU pai_ext registered as PERF_TYPE_RAW, check event type */
248 if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
249 return -ENOENT;
250 /* PAI extension event must be valid and in supported range */
251 rc = paiext_event_valid(event);
252 if (rc)
253 return rc;
254 /* Allow only CPU wide operation, no process context for now. */
255 if (event->hw.target || event->cpu == -1)
256 return -ENOENT;
257 /* Allow only event NNPA_ALL for sampling. */
258 if (a->sample_period && a->config != PAI_NNPA_BASE)
259 return -EINVAL;
260 /* Prohibit exclude_user event selection */
261 if (a->exclude_user)
262 return -EINVAL;
263
264 rc = paiext_alloc(a, event);
265 if (rc)
266 return rc;
267 event->hw.last_tag = 0;
268 event->destroy = paiext_event_destroy;
269
270 if (a->sample_period) {
271 a->sample_period = 1;
272 a->freq = 0;
273 /* Register for paicrypt_sched_task() to be called */
274 event->attach_state |= PERF_ATTACH_SCHED_CB;
275 /* Add raw data which are the memory mapped counters */
276 a->sample_type |= PERF_SAMPLE_RAW;
277 /* Turn off inheritance */
278 a->inherit = 0;
279 }
280
281 return 0;
282 }
283
paiext_getctr(struct paiext_map * cpump,int nr)284 static u64 paiext_getctr(struct paiext_map *cpump, int nr)
285 {
286 return cpump->area[nr];
287 }
288
289 /* Read the counter values. Return value from location in buffer. For event
290 * NNPA_ALL sum up all events.
291 */
paiext_getdata(struct perf_event * event)292 static u64 paiext_getdata(struct perf_event *event)
293 {
294 struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
295 struct paiext_map *cpump = mp->mapptr;
296 u64 sum = 0;
297 int i;
298
299 if (event->attr.config != PAI_NNPA_BASE)
300 return paiext_getctr(cpump, event->attr.config - PAI_NNPA_BASE);
301
302 for (i = 1; i <= paiext_cnt; i++)
303 sum += paiext_getctr(cpump, i);
304
305 return sum;
306 }
307
paiext_getall(struct perf_event * event)308 static u64 paiext_getall(struct perf_event *event)
309 {
310 return paiext_getdata(event);
311 }
312
paiext_read(struct perf_event * event)313 static void paiext_read(struct perf_event *event)
314 {
315 u64 prev, new, delta;
316
317 prev = local64_read(&event->hw.prev_count);
318 new = paiext_getall(event);
319 local64_set(&event->hw.prev_count, new);
320 delta = new - prev;
321 local64_add(delta, &event->count);
322 }
323
paiext_start(struct perf_event * event,int flags)324 static void paiext_start(struct perf_event *event, int flags)
325 {
326 u64 sum;
327
328 if (event->hw.last_tag)
329 return;
330 event->hw.last_tag = 1;
331 sum = paiext_getall(event); /* Get current value */
332 local64_set(&event->hw.prev_count, sum);
333 local64_set(&event->count, 0);
334 }
335
paiext_add(struct perf_event * event,int flags)336 static int paiext_add(struct perf_event *event, int flags)
337 {
338 struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
339 struct paiext_map *cpump = mp->mapptr;
340 struct paiext_cb *pcb = cpump->paiext_cb;
341
342 if (++cpump->active_events == 1) {
343 S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb);
344 pcb->acc = virt_to_phys(cpump->area) | 0x1;
345 /* Enable CPU instruction lookup for PAIE1 control block */
346 __ctl_set_bit(0, 49);
347 debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
348 __func__, S390_lowcore.aicd, pcb->acc);
349 }
350 if (flags & PERF_EF_START && !event->attr.sample_period) {
351 /* Only counting needs initial counter value */
352 paiext_start(event, PERF_EF_RELOAD);
353 }
354 event->hw.state = 0;
355 if (event->attr.sample_period) {
356 cpump->event = event;
357 perf_sched_cb_inc(event->pmu);
358 }
359 return 0;
360 }
361
paiext_stop(struct perf_event * event,int flags)362 static void paiext_stop(struct perf_event *event, int flags)
363 {
364 paiext_read(event);
365 event->hw.state = PERF_HES_STOPPED;
366 }
367
paiext_del(struct perf_event * event,int flags)368 static void paiext_del(struct perf_event *event, int flags)
369 {
370 struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
371 struct paiext_map *cpump = mp->mapptr;
372 struct paiext_cb *pcb = cpump->paiext_cb;
373
374 if (event->attr.sample_period)
375 perf_sched_cb_dec(event->pmu);
376 if (!event->attr.sample_period) {
377 /* Only counting needs to read counter */
378 paiext_stop(event, PERF_EF_UPDATE);
379 }
380 if (--cpump->active_events == 0) {
381 /* Disable CPU instruction lookup for PAIE1 control block */
382 __ctl_clear_bit(0, 49);
383 pcb->acc = 0;
384 S390_lowcore.aicd = 0;
385 debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
386 __func__, S390_lowcore.aicd, pcb->acc);
387 }
388 }
389
390 /* Create raw data and save it in buffer. Returns number of bytes copied.
391 * Saves only positive counter entries of the form
392 * 2 bytes: Number of counter
393 * 8 bytes: Value of counter
394 */
paiext_copy(struct paiext_map * cpump)395 static size_t paiext_copy(struct paiext_map *cpump)
396 {
397 struct pai_userdata *userdata = cpump->save;
398 int i, outidx = 0;
399
400 for (i = 1; i <= paiext_cnt; i++) {
401 u64 val = paiext_getctr(cpump, i);
402
403 if (val) {
404 userdata[outidx].num = i;
405 userdata[outidx].value = val;
406 outidx++;
407 }
408 }
409 return outidx * sizeof(*userdata);
410 }
411
412 /* Write sample when one or more counters values are nonzero.
413 *
414 * Note: The function paiext_sched_task() and paiext_push_sample() are not
415 * invoked after function paiext_del() has been called because of function
416 * perf_sched_cb_dec().
417 * The function paiext_sched_task() and paiext_push_sample() are only
418 * called when sampling is active. Function perf_sched_cb_inc()
419 * has been invoked to install function paiext_sched_task() as call back
420 * to run at context switch time (see paiext_add()).
421 *
422 * This causes function perf_event_context_sched_out() and
423 * perf_event_context_sched_in() to check whether the PMU has installed an
424 * sched_task() callback. That callback is not active after paiext_del()
425 * returns and has deleted the event on that CPU.
426 */
paiext_push_sample(void)427 static int paiext_push_sample(void)
428 {
429 struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
430 struct paiext_map *cpump = mp->mapptr;
431 struct perf_event *event = cpump->event;
432 struct perf_sample_data data;
433 struct perf_raw_record raw;
434 struct pt_regs regs;
435 size_t rawsize;
436 int overflow;
437
438 rawsize = paiext_copy(cpump);
439 if (!rawsize) /* No incremented counters */
440 return 0;
441
442 /* Setup perf sample */
443 memset(®s, 0, sizeof(regs));
444 memset(&raw, 0, sizeof(raw));
445 memset(&data, 0, sizeof(data));
446 perf_sample_data_init(&data, 0, event->hw.last_period);
447 if (event->attr.sample_type & PERF_SAMPLE_TID) {
448 data.tid_entry.pid = task_tgid_nr(current);
449 data.tid_entry.tid = task_pid_nr(current);
450 }
451 if (event->attr.sample_type & PERF_SAMPLE_TIME)
452 data.time = event->clock();
453 if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
454 data.id = event->id;
455 if (event->attr.sample_type & PERF_SAMPLE_CPU)
456 data.cpu_entry.cpu = smp_processor_id();
457 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
458 raw.frag.size = rawsize;
459 raw.frag.data = cpump->save;
460 raw.size = raw.frag.size;
461 data.raw = &raw;
462 data.sample_flags |= PERF_SAMPLE_RAW;
463 }
464
465 overflow = perf_event_overflow(event, &data, ®s);
466 perf_event_update_userpage(event);
467 /* Clear lowcore area after read */
468 memset(cpump->area, 0, PAIE1_CTRBLOCK_SZ);
469 return overflow;
470 }
471
472 /* Called on schedule-in and schedule-out. No access to event structure,
473 * but for sampling only event NNPA_ALL is allowed.
474 */
paiext_sched_task(struct perf_event_context * ctx,bool sched_in)475 static void paiext_sched_task(struct perf_event_context *ctx, bool sched_in)
476 {
477 /* We started with a clean page on event installation. So read out
478 * results on schedule_out and if page was dirty, clear values.
479 */
480 if (!sched_in)
481 paiext_push_sample();
482 }
483
484 /* Attribute definitions for pai extension1 interface. As with other CPU
485 * Measurement Facilities, there is one attribute per mapped counter.
486 * The number of mapped counters may vary per machine generation. Use
487 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
488 * to determine the number of mapped counters. The instructions returns
489 * a positive number, which is the highest number of supported counters.
490 * All counters less than this number are also supported, there are no
491 * holes. A returned number of zero means no support for mapped counters.
492 *
493 * The identification of the counter is a unique number. The chosen range
494 * is 0x1800 + offset in mapped kernel page.
495 * All CPU Measurement Facility counters identifiers must be unique and
496 * the numbers from 0 to 496 are already used for the CPU Measurement
497 * Counter facility. Number 0x1000 to 0x103e are used for PAI cryptography
498 * counters.
499 * Numbers 0xb0000, 0xbc000 and 0xbd000 are already
500 * used for the CPU Measurement Sampling facility.
501 */
502 PMU_FORMAT_ATTR(event, "config:0-63");
503
504 static struct attribute *paiext_format_attr[] = {
505 &format_attr_event.attr,
506 NULL,
507 };
508
509 static struct attribute_group paiext_events_group = {
510 .name = "events",
511 .attrs = NULL, /* Filled in attr_event_init() */
512 };
513
514 static struct attribute_group paiext_format_group = {
515 .name = "format",
516 .attrs = paiext_format_attr,
517 };
518
519 static const struct attribute_group *paiext_attr_groups[] = {
520 &paiext_events_group,
521 &paiext_format_group,
522 NULL,
523 };
524
525 /* Performance monitoring unit for mapped counters */
526 static struct pmu paiext = {
527 .task_ctx_nr = perf_invalid_context,
528 .event_init = paiext_event_init,
529 .add = paiext_add,
530 .del = paiext_del,
531 .start = paiext_start,
532 .stop = paiext_stop,
533 .read = paiext_read,
534 .sched_task = paiext_sched_task,
535 .attr_groups = paiext_attr_groups,
536 };
537
538 /* List of symbolic PAI extension 1 NNPA counter names. */
539 static const char * const paiext_ctrnames[] = {
540 [0] = "NNPA_ALL",
541 [1] = "NNPA_ADD",
542 [2] = "NNPA_SUB",
543 [3] = "NNPA_MUL",
544 [4] = "NNPA_DIV",
545 [5] = "NNPA_MIN",
546 [6] = "NNPA_MAX",
547 [7] = "NNPA_LOG",
548 [8] = "NNPA_EXP",
549 [9] = "NNPA_IBM_RESERVED_9",
550 [10] = "NNPA_RELU",
551 [11] = "NNPA_TANH",
552 [12] = "NNPA_SIGMOID",
553 [13] = "NNPA_SOFTMAX",
554 [14] = "NNPA_BATCHNORM",
555 [15] = "NNPA_MAXPOOL2D",
556 [16] = "NNPA_AVGPOOL2D",
557 [17] = "NNPA_LSTMACT",
558 [18] = "NNPA_GRUACT",
559 [19] = "NNPA_CONVOLUTION",
560 [20] = "NNPA_MATMUL_OP",
561 [21] = "NNPA_MATMUL_OP_BCAST23",
562 [22] = "NNPA_SMALLBATCH",
563 [23] = "NNPA_LARGEDIM",
564 [24] = "NNPA_SMALLTENSOR",
565 [25] = "NNPA_1MFRAME",
566 [26] = "NNPA_2GFRAME",
567 [27] = "NNPA_ACCESSEXCEPT",
568 };
569
attr_event_free(struct attribute ** attrs,int num)570 static void __init attr_event_free(struct attribute **attrs, int num)
571 {
572 struct perf_pmu_events_attr *pa;
573 struct device_attribute *dap;
574 int i;
575
576 for (i = 0; i < num; i++) {
577 dap = container_of(attrs[i], struct device_attribute, attr);
578 pa = container_of(dap, struct perf_pmu_events_attr, attr);
579 kfree(pa);
580 }
581 kfree(attrs);
582 }
583
attr_event_init_one(struct attribute ** attrs,int num)584 static int __init attr_event_init_one(struct attribute **attrs, int num)
585 {
586 struct perf_pmu_events_attr *pa;
587
588 pa = kzalloc(sizeof(*pa), GFP_KERNEL);
589 if (!pa)
590 return -ENOMEM;
591
592 sysfs_attr_init(&pa->attr.attr);
593 pa->id = PAI_NNPA_BASE + num;
594 pa->attr.attr.name = paiext_ctrnames[num];
595 pa->attr.attr.mode = 0444;
596 pa->attr.show = cpumf_events_sysfs_show;
597 pa->attr.store = NULL;
598 attrs[num] = &pa->attr.attr;
599 return 0;
600 }
601
602 /* Create PMU sysfs event attributes on the fly. */
attr_event_init(void)603 static int __init attr_event_init(void)
604 {
605 struct attribute **attrs;
606 int ret, i;
607
608 attrs = kmalloc_array(ARRAY_SIZE(paiext_ctrnames) + 1, sizeof(*attrs),
609 GFP_KERNEL);
610 if (!attrs)
611 return -ENOMEM;
612 for (i = 0; i < ARRAY_SIZE(paiext_ctrnames); i++) {
613 ret = attr_event_init_one(attrs, i);
614 if (ret) {
615 attr_event_free(attrs, i - 1);
616 return ret;
617 }
618 }
619 attrs[i] = NULL;
620 paiext_events_group.attrs = attrs;
621 return 0;
622 }
623
paiext_init(void)624 static int __init paiext_init(void)
625 {
626 struct qpaci_info_block ib;
627 int rc = -ENOMEM;
628
629 if (!test_facility(197))
630 return 0;
631
632 qpaci(&ib);
633 paiext_cnt = ib.num_nnpa;
634 if (paiext_cnt >= PAI_NNPA_MAXCTR)
635 paiext_cnt = PAI_NNPA_MAXCTR;
636 if (!paiext_cnt)
637 return 0;
638
639 rc = attr_event_init();
640 if (rc) {
641 pr_err("Creation of PMU " KMSG_COMPONENT " /sysfs failed\n");
642 return rc;
643 }
644
645 /* Setup s390dbf facility */
646 paiext_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
647 if (!paiext_dbg) {
648 pr_err("Registration of s390dbf " KMSG_COMPONENT " failed\n");
649 rc = -ENOMEM;
650 goto out_init;
651 }
652 debug_register_view(paiext_dbg, &debug_sprintf_view);
653
654 rc = perf_pmu_register(&paiext, KMSG_COMPONENT, -1);
655 if (rc) {
656 pr_err("Registration of " KMSG_COMPONENT " PMU failed with "
657 "rc=%i\n", rc);
658 goto out_pmu;
659 }
660
661 return 0;
662
663 out_pmu:
664 debug_unregister_view(paiext_dbg, &debug_sprintf_view);
665 debug_unregister(paiext_dbg);
666 out_init:
667 attr_event_free(paiext_events_group.attrs,
668 ARRAY_SIZE(paiext_ctrnames) + 1);
669 return rc;
670 }
671
672 device_initcall(paiext_init);
673