1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Handle async block request by crypto hardware engine.
4 *
5 * Copyright (C) 2016 Linaro, Inc.
6 *
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
9
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/akcipher.h>
12 #include <crypto/internal/engine.h>
13 #include <crypto/internal/hash.h>
14 #include <crypto/internal/kpp.h>
15 #include <crypto/internal/skcipher.h>
16 #include <linux/err.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <uapi/linux/sched/types.h>
22 #include "internal.h"
23
24 #define CRYPTO_ENGINE_MAX_QLEN 10
25
26 /* Temporary algorithm flag used to indicate an updated driver. */
27 #define CRYPTO_ALG_ENGINE 0x200
28
29 struct crypto_engine_alg {
30 struct crypto_alg base;
31 struct crypto_engine_op op;
32 };
33
34 /**
35 * crypto_finalize_request - finalize one request if the request is done
36 * @engine: the hardware engine
37 * @req: the request need to be finalized
38 * @err: error number
39 */
crypto_finalize_request(struct crypto_engine * engine,struct crypto_async_request * req,int err)40 static void crypto_finalize_request(struct crypto_engine *engine,
41 struct crypto_async_request *req, int err)
42 {
43 unsigned long flags;
44
45 /*
46 * If hardware cannot enqueue more requests
47 * and retry mechanism is not supported
48 * make sure we are completing the current request
49 */
50 if (!engine->retry_support) {
51 spin_lock_irqsave(&engine->queue_lock, flags);
52 if (engine->cur_req == req) {
53 engine->cur_req = NULL;
54 }
55 spin_unlock_irqrestore(&engine->queue_lock, flags);
56 }
57
58 lockdep_assert_in_softirq();
59 crypto_request_complete(req, err);
60
61 kthread_queue_work(engine->kworker, &engine->pump_requests);
62 }
63
64 /**
65 * crypto_pump_requests - dequeue one request from engine queue to process
66 * @engine: the hardware engine
67 * @in_kthread: true if we are in the context of the request pump thread
68 *
69 * This function checks if there is any request in the engine queue that
70 * needs processing and if so call out to the driver to initialize hardware
71 * and handle each request.
72 */
crypto_pump_requests(struct crypto_engine * engine,bool in_kthread)73 static void crypto_pump_requests(struct crypto_engine *engine,
74 bool in_kthread)
75 {
76 struct crypto_async_request *async_req, *backlog;
77 struct crypto_engine_alg *alg;
78 struct crypto_engine_op *op;
79 unsigned long flags;
80 bool was_busy = false;
81 int ret;
82
83 spin_lock_irqsave(&engine->queue_lock, flags);
84
85 /* Make sure we are not already running a request */
86 if (!engine->retry_support && engine->cur_req)
87 goto out;
88
89 /* If another context is idling then defer */
90 if (engine->idling) {
91 kthread_queue_work(engine->kworker, &engine->pump_requests);
92 goto out;
93 }
94
95 /* Check if the engine queue is idle */
96 if (!crypto_queue_len(&engine->queue) || !engine->running) {
97 if (!engine->busy)
98 goto out;
99
100 /* Only do teardown in the thread */
101 if (!in_kthread) {
102 kthread_queue_work(engine->kworker,
103 &engine->pump_requests);
104 goto out;
105 }
106
107 engine->busy = false;
108 engine->idling = true;
109 spin_unlock_irqrestore(&engine->queue_lock, flags);
110
111 if (engine->unprepare_crypt_hardware &&
112 engine->unprepare_crypt_hardware(engine))
113 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
114
115 spin_lock_irqsave(&engine->queue_lock, flags);
116 engine->idling = false;
117 goto out;
118 }
119
120 start_request:
121 /* Get the fist request from the engine queue to handle */
122 backlog = crypto_get_backlog(&engine->queue);
123 async_req = crypto_dequeue_request(&engine->queue);
124 if (!async_req)
125 goto out;
126
127 /*
128 * If hardware doesn't support the retry mechanism,
129 * keep track of the request we are processing now.
130 * We'll need it on completion (crypto_finalize_request).
131 */
132 if (!engine->retry_support)
133 engine->cur_req = async_req;
134
135 if (engine->busy)
136 was_busy = true;
137 else
138 engine->busy = true;
139
140 spin_unlock_irqrestore(&engine->queue_lock, flags);
141
142 /* Until here we get the request need to be encrypted successfully */
143 if (!was_busy && engine->prepare_crypt_hardware) {
144 ret = engine->prepare_crypt_hardware(engine);
145 if (ret) {
146 dev_err(engine->dev, "failed to prepare crypt hardware\n");
147 goto req_err_1;
148 }
149 }
150
151 if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) {
152 alg = container_of(async_req->tfm->__crt_alg,
153 struct crypto_engine_alg, base);
154 op = &alg->op;
155 } else {
156 dev_err(engine->dev, "failed to do request\n");
157 ret = -EINVAL;
158 goto req_err_1;
159 }
160
161 ret = op->do_one_request(engine, async_req);
162
163 /* Request unsuccessfully executed by hardware */
164 if (ret < 0) {
165 /*
166 * If hardware queue is full (-ENOSPC), requeue request
167 * regardless of backlog flag.
168 * Otherwise, unprepare and complete the request.
169 */
170 if (!engine->retry_support ||
171 (ret != -ENOSPC)) {
172 dev_err(engine->dev,
173 "Failed to do one request from queue: %d\n",
174 ret);
175 goto req_err_1;
176 }
177 spin_lock_irqsave(&engine->queue_lock, flags);
178 /*
179 * If hardware was unable to execute request, enqueue it
180 * back in front of crypto-engine queue, to keep the order
181 * of requests.
182 */
183 crypto_enqueue_request_head(&engine->queue, async_req);
184
185 kthread_queue_work(engine->kworker, &engine->pump_requests);
186 goto out;
187 }
188
189 goto retry;
190
191 req_err_1:
192 crypto_request_complete(async_req, ret);
193
194 retry:
195 if (backlog)
196 crypto_request_complete(backlog, -EINPROGRESS);
197
198 /* If retry mechanism is supported, send new requests to engine */
199 if (engine->retry_support) {
200 spin_lock_irqsave(&engine->queue_lock, flags);
201 goto start_request;
202 }
203 return;
204
205 out:
206 spin_unlock_irqrestore(&engine->queue_lock, flags);
207
208 /*
209 * Batch requests is possible only if
210 * hardware can enqueue multiple requests
211 */
212 if (engine->do_batch_requests) {
213 ret = engine->do_batch_requests(engine);
214 if (ret)
215 dev_err(engine->dev, "failed to do batch requests: %d\n",
216 ret);
217 }
218
219 return;
220 }
221
crypto_pump_work(struct kthread_work * work)222 static void crypto_pump_work(struct kthread_work *work)
223 {
224 struct crypto_engine *engine =
225 container_of(work, struct crypto_engine, pump_requests);
226
227 crypto_pump_requests(engine, true);
228 }
229
230 /**
231 * crypto_transfer_request - transfer the new request into the engine queue
232 * @engine: the hardware engine
233 * @req: the request need to be listed into the engine queue
234 * @need_pump: indicates whether queue the pump of request to kthread_work
235 */
crypto_transfer_request(struct crypto_engine * engine,struct crypto_async_request * req,bool need_pump)236 static int crypto_transfer_request(struct crypto_engine *engine,
237 struct crypto_async_request *req,
238 bool need_pump)
239 {
240 unsigned long flags;
241 int ret;
242
243 spin_lock_irqsave(&engine->queue_lock, flags);
244
245 if (!engine->running) {
246 spin_unlock_irqrestore(&engine->queue_lock, flags);
247 return -ESHUTDOWN;
248 }
249
250 ret = crypto_enqueue_request(&engine->queue, req);
251
252 if (!engine->busy && need_pump)
253 kthread_queue_work(engine->kworker, &engine->pump_requests);
254
255 spin_unlock_irqrestore(&engine->queue_lock, flags);
256 return ret;
257 }
258
259 /**
260 * crypto_transfer_request_to_engine - transfer one request to list
261 * into the engine queue
262 * @engine: the hardware engine
263 * @req: the request need to be listed into the engine queue
264 */
crypto_transfer_request_to_engine(struct crypto_engine * engine,struct crypto_async_request * req)265 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
266 struct crypto_async_request *req)
267 {
268 return crypto_transfer_request(engine, req, true);
269 }
270
271 /**
272 * crypto_transfer_aead_request_to_engine - transfer one aead_request
273 * to list into the engine queue
274 * @engine: the hardware engine
275 * @req: the request need to be listed into the engine queue
276 */
crypto_transfer_aead_request_to_engine(struct crypto_engine * engine,struct aead_request * req)277 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
278 struct aead_request *req)
279 {
280 return crypto_transfer_request_to_engine(engine, &req->base);
281 }
282 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
283
284 /**
285 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
286 * to list into the engine queue
287 * @engine: the hardware engine
288 * @req: the request need to be listed into the engine queue
289 */
crypto_transfer_akcipher_request_to_engine(struct crypto_engine * engine,struct akcipher_request * req)290 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
291 struct akcipher_request *req)
292 {
293 return crypto_transfer_request_to_engine(engine, &req->base);
294 }
295 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
296
297 /**
298 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
299 * to list into the engine queue
300 * @engine: the hardware engine
301 * @req: the request need to be listed into the engine queue
302 */
crypto_transfer_hash_request_to_engine(struct crypto_engine * engine,struct ahash_request * req)303 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
304 struct ahash_request *req)
305 {
306 return crypto_transfer_request_to_engine(engine, &req->base);
307 }
308 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
309
310 /**
311 * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
312 * into the engine queue
313 * @engine: the hardware engine
314 * @req: the request need to be listed into the engine queue
315 */
crypto_transfer_kpp_request_to_engine(struct crypto_engine * engine,struct kpp_request * req)316 int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
317 struct kpp_request *req)
318 {
319 return crypto_transfer_request_to_engine(engine, &req->base);
320 }
321 EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
322
323 /**
324 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
325 * to list into the engine queue
326 * @engine: the hardware engine
327 * @req: the request need to be listed into the engine queue
328 */
crypto_transfer_skcipher_request_to_engine(struct crypto_engine * engine,struct skcipher_request * req)329 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
330 struct skcipher_request *req)
331 {
332 return crypto_transfer_request_to_engine(engine, &req->base);
333 }
334 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
335
336 /**
337 * crypto_finalize_aead_request - finalize one aead_request if
338 * the request is done
339 * @engine: the hardware engine
340 * @req: the request need to be finalized
341 * @err: error number
342 */
crypto_finalize_aead_request(struct crypto_engine * engine,struct aead_request * req,int err)343 void crypto_finalize_aead_request(struct crypto_engine *engine,
344 struct aead_request *req, int err)
345 {
346 return crypto_finalize_request(engine, &req->base, err);
347 }
348 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
349
350 /**
351 * crypto_finalize_akcipher_request - finalize one akcipher_request if
352 * the request is done
353 * @engine: the hardware engine
354 * @req: the request need to be finalized
355 * @err: error number
356 */
crypto_finalize_akcipher_request(struct crypto_engine * engine,struct akcipher_request * req,int err)357 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
358 struct akcipher_request *req, int err)
359 {
360 return crypto_finalize_request(engine, &req->base, err);
361 }
362 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
363
364 /**
365 * crypto_finalize_hash_request - finalize one ahash_request if
366 * the request is done
367 * @engine: the hardware engine
368 * @req: the request need to be finalized
369 * @err: error number
370 */
crypto_finalize_hash_request(struct crypto_engine * engine,struct ahash_request * req,int err)371 void crypto_finalize_hash_request(struct crypto_engine *engine,
372 struct ahash_request *req, int err)
373 {
374 return crypto_finalize_request(engine, &req->base, err);
375 }
376 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
377
378 /**
379 * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
380 * @engine: the hardware engine
381 * @req: the request need to be finalized
382 * @err: error number
383 */
crypto_finalize_kpp_request(struct crypto_engine * engine,struct kpp_request * req,int err)384 void crypto_finalize_kpp_request(struct crypto_engine *engine,
385 struct kpp_request *req, int err)
386 {
387 return crypto_finalize_request(engine, &req->base, err);
388 }
389 EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
390
391 /**
392 * crypto_finalize_skcipher_request - finalize one skcipher_request if
393 * the request is done
394 * @engine: the hardware engine
395 * @req: the request need to be finalized
396 * @err: error number
397 */
crypto_finalize_skcipher_request(struct crypto_engine * engine,struct skcipher_request * req,int err)398 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
399 struct skcipher_request *req, int err)
400 {
401 return crypto_finalize_request(engine, &req->base, err);
402 }
403 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
404
405 /**
406 * crypto_engine_start - start the hardware engine
407 * @engine: the hardware engine need to be started
408 *
409 * Return 0 on success, else on fail.
410 */
crypto_engine_start(struct crypto_engine * engine)411 int crypto_engine_start(struct crypto_engine *engine)
412 {
413 unsigned long flags;
414
415 spin_lock_irqsave(&engine->queue_lock, flags);
416
417 if (engine->running || engine->busy) {
418 spin_unlock_irqrestore(&engine->queue_lock, flags);
419 return -EBUSY;
420 }
421
422 engine->running = true;
423 spin_unlock_irqrestore(&engine->queue_lock, flags);
424
425 kthread_queue_work(engine->kworker, &engine->pump_requests);
426
427 return 0;
428 }
429 EXPORT_SYMBOL_GPL(crypto_engine_start);
430
431 /**
432 * crypto_engine_stop - stop the hardware engine
433 * @engine: the hardware engine need to be stopped
434 *
435 * Return 0 on success, else on fail.
436 */
crypto_engine_stop(struct crypto_engine * engine)437 int crypto_engine_stop(struct crypto_engine *engine)
438 {
439 unsigned long flags;
440 unsigned int limit = 500;
441 int ret = 0;
442
443 spin_lock_irqsave(&engine->queue_lock, flags);
444
445 /*
446 * If the engine queue is not empty or the engine is on busy state,
447 * we need to wait for a while to pump the requests of engine queue.
448 */
449 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
450 spin_unlock_irqrestore(&engine->queue_lock, flags);
451 msleep(20);
452 spin_lock_irqsave(&engine->queue_lock, flags);
453 }
454
455 if (crypto_queue_len(&engine->queue) || engine->busy)
456 ret = -EBUSY;
457 else
458 engine->running = false;
459
460 spin_unlock_irqrestore(&engine->queue_lock, flags);
461
462 if (ret)
463 dev_warn(engine->dev, "could not stop engine\n");
464
465 return ret;
466 }
467 EXPORT_SYMBOL_GPL(crypto_engine_stop);
468
469 /**
470 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
471 * and initialize it by setting the maximum number of entries in the software
472 * crypto-engine queue.
473 * @dev: the device attached with one hardware engine
474 * @retry_support: whether hardware has support for retry mechanism
475 * @cbk_do_batch: pointer to a callback function to be invoked when executing
476 * a batch of requests.
477 * This has the form:
478 * callback(struct crypto_engine *engine)
479 * where:
480 * engine: the crypto engine structure.
481 * @rt: whether this queue is set to run as a realtime task
482 * @qlen: maximum size of the crypto-engine queue
483 *
484 * This must be called from context that can sleep.
485 * Return: the crypto engine structure on success, else NULL.
486 */
crypto_engine_alloc_init_and_set(struct device * dev,bool retry_support,int (* cbk_do_batch)(struct crypto_engine * engine),bool rt,int qlen)487 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
488 bool retry_support,
489 int (*cbk_do_batch)(struct crypto_engine *engine),
490 bool rt, int qlen)
491 {
492 struct crypto_engine *engine;
493
494 if (!dev)
495 return NULL;
496
497 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
498 if (!engine)
499 return NULL;
500
501 engine->dev = dev;
502 engine->rt = rt;
503 engine->running = false;
504 engine->busy = false;
505 engine->idling = false;
506 engine->retry_support = retry_support;
507 engine->priv_data = dev;
508 /*
509 * Batch requests is possible only if
510 * hardware has support for retry mechanism.
511 */
512 engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
513
514 snprintf(engine->name, sizeof(engine->name),
515 "%s-engine", dev_name(dev));
516
517 crypto_init_queue(&engine->queue, qlen);
518 spin_lock_init(&engine->queue_lock);
519
520 engine->kworker = kthread_create_worker(0, "%s", engine->name);
521 if (IS_ERR(engine->kworker)) {
522 dev_err(dev, "failed to create crypto request pump task\n");
523 return NULL;
524 }
525 kthread_init_work(&engine->pump_requests, crypto_pump_work);
526
527 if (engine->rt) {
528 dev_info(dev, "will run requests pump with realtime priority\n");
529 sched_set_fifo(engine->kworker->task);
530 }
531
532 return engine;
533 }
534 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
535
536 /**
537 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
538 * initialize it.
539 * @dev: the device attached with one hardware engine
540 * @rt: whether this queue is set to run as a realtime task
541 *
542 * This must be called from context that can sleep.
543 * Return: the crypto engine structure on success, else NULL.
544 */
crypto_engine_alloc_init(struct device * dev,bool rt)545 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
546 {
547 return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
548 CRYPTO_ENGINE_MAX_QLEN);
549 }
550 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
551
552 /**
553 * crypto_engine_exit - free the resources of hardware engine when exit
554 * @engine: the hardware engine need to be freed
555 *
556 * Return 0 for success.
557 */
crypto_engine_exit(struct crypto_engine * engine)558 int crypto_engine_exit(struct crypto_engine *engine)
559 {
560 int ret;
561
562 ret = crypto_engine_stop(engine);
563 if (ret)
564 return ret;
565
566 kthread_destroy_worker(engine->kworker);
567
568 return 0;
569 }
570 EXPORT_SYMBOL_GPL(crypto_engine_exit);
571
crypto_engine_register_aead(struct aead_engine_alg * alg)572 int crypto_engine_register_aead(struct aead_engine_alg *alg)
573 {
574 if (!alg->op.do_one_request)
575 return -EINVAL;
576
577 alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
578
579 return crypto_register_aead(&alg->base);
580 }
581 EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
582
crypto_engine_unregister_aead(struct aead_engine_alg * alg)583 void crypto_engine_unregister_aead(struct aead_engine_alg *alg)
584 {
585 crypto_unregister_aead(&alg->base);
586 }
587 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead);
588
crypto_engine_register_aeads(struct aead_engine_alg * algs,int count)589 int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
590 {
591 int i, ret;
592
593 for (i = 0; i < count; i++) {
594 ret = crypto_engine_register_aead(&algs[i]);
595 if (ret)
596 goto err;
597 }
598
599 return 0;
600
601 err:
602 crypto_engine_unregister_aeads(algs, i);
603
604 return ret;
605 }
606 EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
607
crypto_engine_unregister_aeads(struct aead_engine_alg * algs,int count)608 void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count)
609 {
610 int i;
611
612 for (i = count - 1; i >= 0; --i)
613 crypto_engine_unregister_aead(&algs[i]);
614 }
615 EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads);
616
crypto_engine_register_ahash(struct ahash_engine_alg * alg)617 int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
618 {
619 if (!alg->op.do_one_request)
620 return -EINVAL;
621
622 alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE;
623
624 return crypto_register_ahash(&alg->base);
625 }
626 EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
627
crypto_engine_unregister_ahash(struct ahash_engine_alg * alg)628 void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg)
629 {
630 crypto_unregister_ahash(&alg->base);
631 }
632 EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash);
633
crypto_engine_register_ahashes(struct ahash_engine_alg * algs,int count)634 int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
635 {
636 int i, ret;
637
638 for (i = 0; i < count; i++) {
639 ret = crypto_engine_register_ahash(&algs[i]);
640 if (ret)
641 goto err;
642 }
643
644 return 0;
645
646 err:
647 crypto_engine_unregister_ahashes(algs, i);
648
649 return ret;
650 }
651 EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
652
crypto_engine_unregister_ahashes(struct ahash_engine_alg * algs,int count)653 void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
654 int count)
655 {
656 int i;
657
658 for (i = count - 1; i >= 0; --i)
659 crypto_engine_unregister_ahash(&algs[i]);
660 }
661 EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes);
662
crypto_engine_register_akcipher(struct akcipher_engine_alg * alg)663 int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
664 {
665 if (!alg->op.do_one_request)
666 return -EINVAL;
667
668 alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
669
670 return crypto_register_akcipher(&alg->base);
671 }
672 EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
673
crypto_engine_unregister_akcipher(struct akcipher_engine_alg * alg)674 void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg)
675 {
676 crypto_unregister_akcipher(&alg->base);
677 }
678 EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher);
679
crypto_engine_register_kpp(struct kpp_engine_alg * alg)680 int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
681 {
682 if (!alg->op.do_one_request)
683 return -EINVAL;
684
685 alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
686
687 return crypto_register_kpp(&alg->base);
688 }
689 EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
690
crypto_engine_unregister_kpp(struct kpp_engine_alg * alg)691 void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg)
692 {
693 crypto_unregister_kpp(&alg->base);
694 }
695 EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp);
696
crypto_engine_register_skcipher(struct skcipher_engine_alg * alg)697 int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
698 {
699 if (!alg->op.do_one_request)
700 return -EINVAL;
701
702 alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
703
704 return crypto_register_skcipher(&alg->base);
705 }
706 EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
707
crypto_engine_unregister_skcipher(struct skcipher_engine_alg * alg)708 void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg)
709 {
710 return crypto_unregister_skcipher(&alg->base);
711 }
712 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher);
713
crypto_engine_register_skciphers(struct skcipher_engine_alg * algs,int count)714 int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
715 int count)
716 {
717 int i, ret;
718
719 for (i = 0; i < count; i++) {
720 ret = crypto_engine_register_skcipher(&algs[i]);
721 if (ret)
722 goto err;
723 }
724
725 return 0;
726
727 err:
728 crypto_engine_unregister_skciphers(algs, i);
729
730 return ret;
731 }
732 EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);
733
crypto_engine_unregister_skciphers(struct skcipher_engine_alg * algs,int count)734 void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
735 int count)
736 {
737 int i;
738
739 for (i = count - 1; i >= 0; --i)
740 crypto_engine_unregister_skcipher(&algs[i]);
741 }
742 EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers);
743
744 MODULE_LICENSE("GPL");
745 MODULE_DESCRIPTION("Crypto hardware engine framework");
746