1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include "adf_accel_devices.h"
6 #include "adf_common_drv.h"
7 #include "adf_transport.h"
8 #include "adf_transport_access_macros.h"
9 #include "adf_cfg.h"
10 #include "adf_cfg_strings.h"
11 #include "adf_gen2_hw_data.h"
12 #include "qat_crypto.h"
13 #include "icp_qat_fw.h"
14
15 #define SEC ADF_KERNEL_SEC
16
17 static struct service_hndl qat_crypto;
18
qat_crypto_put_instance(struct qat_crypto_instance * inst)19 void qat_crypto_put_instance(struct qat_crypto_instance *inst)
20 {
21 atomic_dec(&inst->refctr);
22 adf_dev_put(inst->accel_dev);
23 }
24
qat_crypto_free_instances(struct adf_accel_dev * accel_dev)25 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
26 {
27 struct qat_crypto_instance *inst, *tmp;
28 int i;
29
30 list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
31 for (i = 0; i < atomic_read(&inst->refctr); i++)
32 qat_crypto_put_instance(inst);
33
34 if (inst->sym_tx)
35 adf_remove_ring(inst->sym_tx);
36
37 if (inst->sym_rx)
38 adf_remove_ring(inst->sym_rx);
39
40 if (inst->pke_tx)
41 adf_remove_ring(inst->pke_tx);
42
43 if (inst->pke_rx)
44 adf_remove_ring(inst->pke_rx);
45
46 list_del(&inst->list);
47 kfree(inst);
48 }
49 return 0;
50 }
51
qat_crypto_get_instance_node(int node)52 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
53 {
54 struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
55 struct qat_crypto_instance *inst = NULL, *tmp_inst;
56 unsigned long best = ~0;
57
58 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
59 unsigned long ctr;
60
61 if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
62 dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
63 adf_dev_started(tmp_dev) &&
64 !list_empty(&tmp_dev->crypto_list)) {
65 ctr = atomic_read(&tmp_dev->ref_count);
66 if (best > ctr) {
67 accel_dev = tmp_dev;
68 best = ctr;
69 }
70 }
71 }
72
73 if (!accel_dev) {
74 pr_info("QAT: Could not find a device on node %d\n", node);
75 /* Get any started device */
76 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
77 if (adf_dev_started(tmp_dev) &&
78 !list_empty(&tmp_dev->crypto_list)) {
79 accel_dev = tmp_dev;
80 break;
81 }
82 }
83 }
84
85 if (!accel_dev)
86 return NULL;
87
88 best = ~0;
89 list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
90 unsigned long ctr;
91
92 ctr = atomic_read(&tmp_inst->refctr);
93 if (best > ctr) {
94 inst = tmp_inst;
95 best = ctr;
96 }
97 }
98 if (inst) {
99 if (adf_dev_get(accel_dev)) {
100 dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
101 return NULL;
102 }
103 atomic_inc(&inst->refctr);
104 }
105 return inst;
106 }
107
108 /**
109 * qat_crypto_vf_dev_config()
110 * create dev config required to create crypto inst.
111 *
112 * @accel_dev: Pointer to acceleration device.
113 *
114 * Function creates device configuration required to create
115 * asym, sym or, crypto instances
116 *
117 * Return: 0 on success, error code otherwise.
118 */
qat_crypto_vf_dev_config(struct adf_accel_dev * accel_dev)119 int qat_crypto_vf_dev_config(struct adf_accel_dev *accel_dev)
120 {
121 u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map;
122
123 if (ring_to_svc_map != ADF_GEN2_DEFAULT_RING_TO_SRV_MAP) {
124 dev_err(&GET_DEV(accel_dev),
125 "Unsupported ring/service mapping present on PF");
126 return -EFAULT;
127 }
128
129 return qat_crypto_dev_config(accel_dev);
130 }
131
132 /**
133 * qat_crypto_dev_config() - create dev config required to create crypto inst.
134 *
135 * @accel_dev: Pointer to acceleration device.
136 *
137 * Function creates device configuration required to create crypto instances
138 *
139 * Return: 0 on success, error code otherwise.
140 */
qat_crypto_dev_config(struct adf_accel_dev * accel_dev)141 int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
142 {
143 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
144 int banks = GET_MAX_BANKS(accel_dev);
145 int cpus = num_online_cpus();
146 unsigned long val;
147 int instances;
148 int ret;
149 int i;
150
151 if (adf_hw_dev_has_crypto(accel_dev))
152 instances = min(cpus, banks);
153 else
154 instances = 0;
155
156 ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
157 if (ret)
158 goto err;
159
160 ret = adf_cfg_section_add(accel_dev, "Accelerator0");
161 if (ret)
162 goto err;
163
164 for (i = 0; i < instances; i++) {
165 val = i;
166 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
167 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
168 key, &val, ADF_DEC);
169 if (ret)
170 goto err;
171
172 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
173 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
174 key, &val, ADF_DEC);
175 if (ret)
176 goto err;
177
178 snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
179 i);
180 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
181 key, &val, ADF_DEC);
182 if (ret)
183 goto err;
184
185 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
186 val = 128;
187 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
188 key, &val, ADF_DEC);
189 if (ret)
190 goto err;
191
192 val = 512;
193 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
194 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
195 key, &val, ADF_DEC);
196 if (ret)
197 goto err;
198
199 val = 0;
200 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
201 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
202 key, &val, ADF_DEC);
203 if (ret)
204 goto err;
205
206 val = 2;
207 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
208 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
209 key, &val, ADF_DEC);
210 if (ret)
211 goto err;
212
213 val = 8;
214 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
215 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
216 key, &val, ADF_DEC);
217 if (ret)
218 goto err;
219
220 val = 10;
221 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
222 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
223 key, &val, ADF_DEC);
224 if (ret)
225 goto err;
226
227 val = ADF_COALESCING_DEF_TIME;
228 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
229 ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
230 key, &val, ADF_DEC);
231 if (ret)
232 goto err;
233 }
234
235 val = i;
236 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
237 &val, ADF_DEC);
238 if (ret)
239 goto err;
240
241 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
242 return 0;
243 err:
244 dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
245 return ret;
246 }
247 EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
248
qat_crypto_create_instances(struct adf_accel_dev * accel_dev)249 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
250 {
251 unsigned long num_inst, num_msg_sym, num_msg_asym;
252 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
253 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
254 unsigned long sym_bank, asym_bank;
255 struct qat_crypto_instance *inst;
256 int msg_size;
257 int ret;
258 int i;
259
260 INIT_LIST_HEAD(&accel_dev->crypto_list);
261 ret = adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val);
262 if (ret)
263 return ret;
264
265 ret = kstrtoul(val, 0, &num_inst);
266 if (ret)
267 return ret;
268
269 for (i = 0; i < num_inst; i++) {
270 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
271 dev_to_node(&GET_DEV(accel_dev)));
272 if (!inst) {
273 ret = -ENOMEM;
274 goto err;
275 }
276
277 list_add_tail(&inst->list, &accel_dev->crypto_list);
278 inst->id = i;
279 atomic_set(&inst->refctr, 0);
280 inst->accel_dev = accel_dev;
281
282 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
283 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
284 if (ret)
285 goto err;
286
287 ret = kstrtoul(val, 10, &sym_bank);
288 if (ret)
289 goto err;
290
291 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
292 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
293 if (ret)
294 goto err;
295
296 ret = kstrtoul(val, 10, &asym_bank);
297 if (ret)
298 goto err;
299
300 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
301 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
302 if (ret)
303 goto err;
304
305 ret = kstrtoul(val, 10, &num_msg_sym);
306 if (ret)
307 goto err;
308
309 num_msg_sym = num_msg_sym >> 1;
310
311 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
312 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val);
313 if (ret)
314 goto err;
315
316 ret = kstrtoul(val, 10, &num_msg_asym);
317 if (ret)
318 goto err;
319 num_msg_asym = num_msg_asym >> 1;
320
321 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
322 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
323 ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
324 msg_size, key, NULL, 0, &inst->sym_tx);
325 if (ret)
326 goto err;
327
328 msg_size = msg_size >> 1;
329 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
330 ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
331 msg_size, key, NULL, 0, &inst->pke_tx);
332 if (ret)
333 goto err;
334
335 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
336 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
337 ret = adf_create_ring(accel_dev, SEC, sym_bank, num_msg_sym,
338 msg_size, key, qat_alg_callback, 0,
339 &inst->sym_rx);
340 if (ret)
341 goto err;
342
343 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
344 ret = adf_create_ring(accel_dev, SEC, asym_bank, num_msg_asym,
345 msg_size, key, qat_alg_asym_callback, 0,
346 &inst->pke_rx);
347 if (ret)
348 goto err;
349
350 INIT_LIST_HEAD(&inst->backlog.list);
351 spin_lock_init(&inst->backlog.lock);
352 }
353 return 0;
354 err:
355 qat_crypto_free_instances(accel_dev);
356 return ret;
357 }
358
qat_crypto_init(struct adf_accel_dev * accel_dev)359 static int qat_crypto_init(struct adf_accel_dev *accel_dev)
360 {
361 if (qat_crypto_create_instances(accel_dev))
362 return -EFAULT;
363
364 return 0;
365 }
366
qat_crypto_shutdown(struct adf_accel_dev * accel_dev)367 static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
368 {
369 return qat_crypto_free_instances(accel_dev);
370 }
371
qat_crypto_event_handler(struct adf_accel_dev * accel_dev,enum adf_event event)372 static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
373 enum adf_event event)
374 {
375 int ret;
376
377 switch (event) {
378 case ADF_EVENT_INIT:
379 ret = qat_crypto_init(accel_dev);
380 break;
381 case ADF_EVENT_SHUTDOWN:
382 ret = qat_crypto_shutdown(accel_dev);
383 break;
384 case ADF_EVENT_RESTARTING:
385 case ADF_EVENT_RESTARTED:
386 case ADF_EVENT_START:
387 case ADF_EVENT_STOP:
388 default:
389 ret = 0;
390 }
391 return ret;
392 }
393
qat_crypto_register(void)394 int qat_crypto_register(void)
395 {
396 memset(&qat_crypto, 0, sizeof(qat_crypto));
397 qat_crypto.event_hld = qat_crypto_event_handler;
398 qat_crypto.name = "qat_crypto";
399 return adf_service_register(&qat_crypto);
400 }
401
qat_crypto_unregister(void)402 int qat_crypto_unregister(void)
403 {
404 return adf_service_unregister(&qat_crypto);
405 }
406