1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Copyright IBM Corp. 2001, 2019
4 * Author(s): Robert Burroughs
5 * Eric Rossman (edrossma@us.ibm.com)
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 */
13
14 #ifndef _ZCRYPT_API_H_
15 #define _ZCRYPT_API_H_
16
17 #include <linux/atomic.h>
18 #include <asm/debug.h>
19 #include <asm/zcrypt.h>
20 #include "ap_bus.h"
21
22 /**
23 * Supported device types
24 */
25 #define ZCRYPT_CEX2C 5
26 #define ZCRYPT_CEX2A 6
27 #define ZCRYPT_CEX3C 7
28 #define ZCRYPT_CEX3A 8
29 #define ZCRYPT_CEX4 10
30 #define ZCRYPT_CEX5 11
31 #define ZCRYPT_CEX6 12
32 #define ZCRYPT_CEX7 13
33
34 /**
35 * Large random numbers are pulled in 4096 byte chunks from the crypto cards
36 * and stored in a page. Be careful when increasing this buffer due to size
37 * limitations for AP requests.
38 */
39 #define ZCRYPT_RNG_BUFFER_SIZE 4096
40
41 /*
42 * Identifier for Crypto Request Performance Index
43 */
44 enum crypto_ops {
45 MEX_1K,
46 MEX_2K,
47 MEX_4K,
48 CRT_1K,
49 CRT_2K,
50 CRT_4K,
51 HWRNG,
52 SECKEY,
53 NUM_OPS
54 };
55
56 struct zcrypt_queue;
57
58 /* struct to hold tracking information for a userspace request/response */
59 struct zcrypt_track {
60 int again_counter; /* retry attempts counter */
61 int last_qid; /* last qid used */
62 int last_rc; /* last return code */
63 #ifdef CONFIG_ZCRYPT_DEBUG
64 struct ap_fi fi; /* failure injection cmd */
65 #endif
66 };
67
68 /* defines related to message tracking */
69 #define TRACK_AGAIN_MAX 10
70 #define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000
71 #define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000
72
73 struct zcrypt_ops {
74 long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *,
75 struct ap_message *);
76 long (*rsa_modexpo_crt)(struct zcrypt_queue *,
77 struct ica_rsa_modexpo_crt *,
78 struct ap_message *);
79 long (*send_cprb)(bool userspace, struct zcrypt_queue *, struct ica_xcRB *,
80 struct ap_message *);
81 long (*send_ep11_cprb)(bool userspace, struct zcrypt_queue *, struct ep11_urb *,
82 struct ap_message *);
83 long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
84 struct list_head list; /* zcrypt ops list. */
85 struct module *owner;
86 int variant;
87 char name[128];
88 };
89
90 struct zcrypt_card {
91 struct list_head list; /* Device list. */
92 struct list_head zqueues; /* List of zcrypt queues */
93 struct kref refcount; /* device refcounting */
94 struct ap_card *card; /* The "real" ap card device. */
95 int online; /* User online/offline */
96
97 int user_space_type; /* User space device id. */
98 char *type_string; /* User space device name. */
99 int min_mod_size; /* Min number of bits. */
100 int max_mod_size; /* Max number of bits. */
101 int max_exp_bit_length;
102 const int *speed_rating; /* Speed idx of crypto ops. */
103 atomic_t load; /* Utilization of the crypto device */
104
105 int request_count; /* # current requests. */
106 };
107
108 struct zcrypt_queue {
109 struct list_head list; /* Device list. */
110 struct kref refcount; /* device refcounting */
111 struct zcrypt_card *zcard;
112 struct zcrypt_ops *ops; /* Crypto operations. */
113 struct ap_queue *queue; /* The "real" ap queue device. */
114 int online; /* User online/offline */
115
116 atomic_t load; /* Utilization of the crypto device */
117
118 int request_count; /* # current requests. */
119
120 struct ap_message reply; /* Per-device reply structure. */
121 };
122
123 /* transport layer rescanning */
124 extern atomic_t zcrypt_rescan_req;
125
126 extern spinlock_t zcrypt_list_lock;
127 extern struct list_head zcrypt_card_list;
128
129 #define for_each_zcrypt_card(_zc) \
130 list_for_each_entry(_zc, &zcrypt_card_list, list)
131
132 #define for_each_zcrypt_queue(_zq, _zc) \
133 list_for_each_entry(_zq, &(_zc)->zqueues, list)
134
135 struct zcrypt_card *zcrypt_card_alloc(void);
136 void zcrypt_card_free(struct zcrypt_card *);
137 void zcrypt_card_get(struct zcrypt_card *);
138 int zcrypt_card_put(struct zcrypt_card *);
139 int zcrypt_card_register(struct zcrypt_card *);
140 void zcrypt_card_unregister(struct zcrypt_card *);
141
142 struct zcrypt_queue *zcrypt_queue_alloc(size_t);
143 void zcrypt_queue_free(struct zcrypt_queue *);
144 void zcrypt_queue_get(struct zcrypt_queue *);
145 int zcrypt_queue_put(struct zcrypt_queue *);
146 int zcrypt_queue_register(struct zcrypt_queue *);
147 void zcrypt_queue_unregister(struct zcrypt_queue *);
148 bool zcrypt_queue_force_online(struct zcrypt_queue *zq, int online);
149
150 int zcrypt_rng_device_add(void);
151 void zcrypt_rng_device_remove(void);
152
153 void zcrypt_msgtype_register(struct zcrypt_ops *);
154 void zcrypt_msgtype_unregister(struct zcrypt_ops *);
155 struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
156 int zcrypt_api_init(void);
157 void zcrypt_api_exit(void);
158 long zcrypt_send_cprb(struct ica_xcRB *xcRB);
159 long zcrypt_send_ep11_cprb(struct ep11_urb *urb);
160 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
161 int zcrypt_device_status_ext(int card, int queue,
162 struct zcrypt_device_status_ext *devstatus);
163
164 int zcrypt_wait_api_operational(void);
165
z_copy_from_user(bool userspace,void * to,const void __user * from,unsigned long n)166 static inline unsigned long z_copy_from_user(bool userspace,
167 void *to,
168 const void __user *from,
169 unsigned long n)
170 {
171 if (likely(userspace))
172 return copy_from_user(to, from, n);
173 memcpy(to, (void __force *)from, n);
174 return 0;
175 }
176
z_copy_to_user(bool userspace,void __user * to,const void * from,unsigned long n)177 static inline unsigned long z_copy_to_user(bool userspace,
178 void __user *to,
179 const void *from,
180 unsigned long n)
181 {
182 if (likely(userspace))
183 return copy_to_user(to, from, n);
184 memcpy((void __force *)to, from, n);
185 return 0;
186 }
187
188 #endif /* _ZCRYPT_API_H_ */
189