1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Processor Activity Instrumentation support for cryptography counters
4 *
5 * Copyright IBM Corp. 2022
6 * Author(s): Thomas Richter <tmricht@linux.ibm.com>
7 */
8 #ifndef _ASM_S390_PAI_H
9 #define _ASM_S390_PAI_H
10
11 #include <linux/jump_label.h>
12 #include <asm/lowcore.h>
13 #include <asm/ptrace.h>
14
15 struct qpaci_info_block {
16 u64 header;
17 struct {
18 u64 : 8;
19 u64 num_cc : 8; /* # of supported crypto counters */
20 u64 : 48;
21 };
22 };
23
qpaci(struct qpaci_info_block * info)24 static inline int qpaci(struct qpaci_info_block *info)
25 {
26 /* Size of info (in double words minus one) */
27 size_t size = sizeof(*info) / sizeof(u64) - 1;
28 int cc;
29
30 asm volatile(
31 " lgr 0,%[size]\n"
32 " .insn s,0xb28f0000,%[info]\n"
33 " lgr %[size],0\n"
34 " ipm %[cc]\n"
35 " srl %[cc],28\n"
36 : [cc] "=d" (cc), [info] "=Q" (*info), [size] "+&d" (size)
37 :
38 : "0", "cc", "memory");
39 return cc ? (size + 1) * sizeof(u64) : 0;
40 }
41
42 #define PAI_CRYPTO_BASE 0x1000 /* First event number */
43 #define PAI_CRYPTO_MAXCTR 256 /* Max # of event counters */
44 #define PAI_CRYPTO_KERNEL_OFFSET 2048
45
46 DECLARE_STATIC_KEY_FALSE(pai_key);
47
pai_kernel_enter(struct pt_regs * regs)48 static __always_inline void pai_kernel_enter(struct pt_regs *regs)
49 {
50 if (!IS_ENABLED(CONFIG_PERF_EVENTS))
51 return;
52 if (!static_branch_unlikely(&pai_key))
53 return;
54 if (!S390_lowcore.ccd)
55 return;
56 if (!user_mode(regs))
57 return;
58 WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd | PAI_CRYPTO_KERNEL_OFFSET);
59 }
60
pai_kernel_exit(struct pt_regs * regs)61 static __always_inline void pai_kernel_exit(struct pt_regs *regs)
62 {
63 if (!IS_ENABLED(CONFIG_PERF_EVENTS))
64 return;
65 if (!static_branch_unlikely(&pai_key))
66 return;
67 if (!S390_lowcore.ccd)
68 return;
69 if (!user_mode(regs))
70 return;
71 WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET);
72 }
73
74 #endif
75