1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ce-cipher.c - hardware cryptographic offloader for
4  * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5  *
6  * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
7  *
8  * This file add support for AES cipher with 128,192,256 bits keysize in
9  * CBC and ECB mode.
10  *
11  * You could find a link for the datasheet in Documentation/arm/sunxi.rst
12  */
13 
14 #include <linux/bottom_half.h>
15 #include <linux/crypto.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/io.h>
18 #include <linux/pm_runtime.h>
19 #include <crypto/scatterwalk.h>
20 #include <crypto/internal/des.h>
21 #include <crypto/internal/skcipher.h>
22 #include "sun8i-ce.h"
23 
sun8i_ce_cipher_need_fallback(struct skcipher_request * areq)24 static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
25 {
26 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
27 	struct scatterlist *sg;
28 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
29 	struct sun8i_ce_alg_template *algt;
30 	unsigned int todo, len;
31 
32 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
33 
34 	if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
35 	    sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
36 		algt->stat_fb_maxsg++;
37 		return true;
38 	}
39 
40 	if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) {
41 		algt->stat_fb_leniv++;
42 		return true;
43 	}
44 
45 	if (areq->cryptlen == 0) {
46 		algt->stat_fb_len0++;
47 		return true;
48 	}
49 
50 	if (areq->cryptlen % 16) {
51 		algt->stat_fb_mod16++;
52 		return true;
53 	}
54 
55 	len = areq->cryptlen;
56 	sg = areq->src;
57 	while (sg) {
58 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
59 			algt->stat_fb_srcali++;
60 			return true;
61 		}
62 		todo = min(len, sg->length);
63 		if (todo % 4) {
64 			algt->stat_fb_srclen++;
65 			return true;
66 		}
67 		len -= todo;
68 		sg = sg_next(sg);
69 	}
70 
71 	len = areq->cryptlen;
72 	sg = areq->dst;
73 	while (sg) {
74 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
75 			algt->stat_fb_dstali++;
76 			return true;
77 		}
78 		todo = min(len, sg->length);
79 		if (todo % 4) {
80 			algt->stat_fb_dstlen++;
81 			return true;
82 		}
83 		len -= todo;
84 		sg = sg_next(sg);
85 	}
86 	return false;
87 }
88 
sun8i_ce_cipher_fallback(struct skcipher_request * areq)89 static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
90 {
91 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
92 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
93 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
94 	int err;
95 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
96 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
97 	struct sun8i_ce_alg_template *algt;
98 
99 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
100 	algt->stat_fb++;
101 #endif
102 
103 	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
104 	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
105 				      areq->base.complete, areq->base.data);
106 	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
107 				   areq->cryptlen, areq->iv);
108 	if (rctx->op_dir & CE_DECRYPTION)
109 		err = crypto_skcipher_decrypt(&rctx->fallback_req);
110 	else
111 		err = crypto_skcipher_encrypt(&rctx->fallback_req);
112 	return err;
113 }
114 
sun8i_ce_cipher_prepare(struct crypto_engine * engine,void * async_req)115 static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
116 {
117 	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
118 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
119 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
120 	struct sun8i_ce_dev *ce = op->ce;
121 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
122 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
123 	struct sun8i_ce_alg_template *algt;
124 	struct sun8i_ce_flow *chan;
125 	struct ce_task *cet;
126 	struct scatterlist *sg;
127 	unsigned int todo, len, offset, ivsize;
128 	u32 common, sym;
129 	int flow, i;
130 	int nr_sgs = 0;
131 	int nr_sgd = 0;
132 	int err = 0;
133 	int ns = sg_nents_for_len(areq->src, areq->cryptlen);
134 	int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
135 
136 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
137 
138 	dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
139 		crypto_tfm_alg_name(areq->base.tfm),
140 		areq->cryptlen,
141 		rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
142 		op->keylen);
143 
144 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
145 	algt->stat_req++;
146 #endif
147 
148 	flow = rctx->flow;
149 
150 	chan = &ce->chanlist[flow];
151 
152 	cet = chan->tl;
153 	memset(cet, 0, sizeof(struct ce_task));
154 
155 	cet->t_id = cpu_to_le32(flow);
156 	common = ce->variant->alg_cipher[algt->ce_algo_id];
157 	common |= rctx->op_dir | CE_COMM_INT;
158 	cet->t_common_ctl = cpu_to_le32(common);
159 	/* CTS and recent CE (H6) need length in bytes, in word otherwise */
160 	if (ce->variant->cipher_t_dlen_in_bytes)
161 		cet->t_dlen = cpu_to_le32(areq->cryptlen);
162 	else
163 		cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
164 
165 	sym = ce->variant->op_mode[algt->ce_blockmode];
166 	len = op->keylen;
167 	switch (len) {
168 	case 128 / 8:
169 		sym |= CE_AES_128BITS;
170 		break;
171 	case 192 / 8:
172 		sym |= CE_AES_192BITS;
173 		break;
174 	case 256 / 8:
175 		sym |= CE_AES_256BITS;
176 		break;
177 	}
178 
179 	cet->t_sym_ctl = cpu_to_le32(sym);
180 	cet->t_asym_ctl = 0;
181 
182 	rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
183 	if (dma_mapping_error(ce->dev, rctx->addr_key)) {
184 		dev_err(ce->dev, "Cannot DMA MAP KEY\n");
185 		err = -EFAULT;
186 		goto theend;
187 	}
188 	cet->t_key = cpu_to_le32(rctx->addr_key);
189 
190 	ivsize = crypto_skcipher_ivsize(tfm);
191 	if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
192 		rctx->ivlen = ivsize;
193 		if (rctx->op_dir & CE_DECRYPTION) {
194 			offset = areq->cryptlen - ivsize;
195 			scatterwalk_map_and_copy(chan->backup_iv, areq->src,
196 						 offset, ivsize, 0);
197 		}
198 		memcpy(chan->bounce_iv, areq->iv, ivsize);
199 		rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen,
200 					       DMA_TO_DEVICE);
201 		if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
202 			dev_err(ce->dev, "Cannot DMA MAP IV\n");
203 			err = -ENOMEM;
204 			goto theend_iv;
205 		}
206 		cet->t_iv = cpu_to_le32(rctx->addr_iv);
207 	}
208 
209 	if (areq->src == areq->dst) {
210 		nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
211 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
212 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
213 			err = -EINVAL;
214 			goto theend_iv;
215 		}
216 		nr_sgd = nr_sgs;
217 	} else {
218 		nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
219 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
220 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
221 			err = -EINVAL;
222 			goto theend_iv;
223 		}
224 		nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
225 		if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
226 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
227 			err = -EINVAL;
228 			goto theend_sgs;
229 		}
230 	}
231 
232 	len = areq->cryptlen;
233 	for_each_sg(areq->src, sg, nr_sgs, i) {
234 		cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
235 		todo = min(len, sg_dma_len(sg));
236 		cet->t_src[i].len = cpu_to_le32(todo / 4);
237 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
238 			areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
239 		len -= todo;
240 	}
241 	if (len > 0) {
242 		dev_err(ce->dev, "remaining len %d\n", len);
243 		err = -EINVAL;
244 		goto theend_sgs;
245 	}
246 
247 	len = areq->cryptlen;
248 	for_each_sg(areq->dst, sg, nr_sgd, i) {
249 		cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
250 		todo = min(len, sg_dma_len(sg));
251 		cet->t_dst[i].len = cpu_to_le32(todo / 4);
252 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
253 			areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
254 		len -= todo;
255 	}
256 	if (len > 0) {
257 		dev_err(ce->dev, "remaining len %d\n", len);
258 		err = -EINVAL;
259 		goto theend_sgs;
260 	}
261 
262 	chan->timeout = areq->cryptlen;
263 	rctx->nr_sgs = nr_sgs;
264 	rctx->nr_sgd = nr_sgd;
265 	return 0;
266 
267 theend_sgs:
268 	if (areq->src == areq->dst) {
269 		dma_unmap_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL);
270 	} else {
271 		if (nr_sgs > 0)
272 			dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
273 		dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE);
274 	}
275 
276 theend_iv:
277 	if (areq->iv && ivsize > 0) {
278 		if (rctx->addr_iv)
279 			dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
280 		offset = areq->cryptlen - ivsize;
281 		if (rctx->op_dir & CE_DECRYPTION) {
282 			memcpy(areq->iv, chan->backup_iv, ivsize);
283 			memzero_explicit(chan->backup_iv, ivsize);
284 		} else {
285 			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
286 						 ivsize, 0);
287 		}
288 		memzero_explicit(chan->bounce_iv, ivsize);
289 	}
290 
291 	dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
292 
293 theend:
294 	return err;
295 }
296 
sun8i_ce_cipher_run(struct crypto_engine * engine,void * areq)297 static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
298 {
299 	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
300 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
301 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
302 	struct sun8i_ce_dev *ce = op->ce;
303 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
304 	int flow, err;
305 
306 	flow = rctx->flow;
307 	err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
308 	local_bh_disable();
309 	crypto_finalize_skcipher_request(engine, breq, err);
310 	local_bh_enable();
311 	return 0;
312 }
313 
sun8i_ce_cipher_unprepare(struct crypto_engine * engine,void * async_req)314 static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
315 {
316 	struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
317 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
318 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
319 	struct sun8i_ce_dev *ce = op->ce;
320 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
321 	struct sun8i_ce_flow *chan;
322 	struct ce_task *cet;
323 	unsigned int ivsize, offset;
324 	int nr_sgs = rctx->nr_sgs;
325 	int nr_sgd = rctx->nr_sgd;
326 	int flow;
327 
328 	flow = rctx->flow;
329 	chan = &ce->chanlist[flow];
330 	cet = chan->tl;
331 	ivsize = crypto_skcipher_ivsize(tfm);
332 
333 	if (areq->src == areq->dst) {
334 		dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
335 	} else {
336 		if (nr_sgs > 0)
337 			dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
338 		dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
339 	}
340 
341 	if (areq->iv && ivsize > 0) {
342 		if (cet->t_iv)
343 			dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
344 		offset = areq->cryptlen - ivsize;
345 		if (rctx->op_dir & CE_DECRYPTION) {
346 			memcpy(areq->iv, chan->backup_iv, ivsize);
347 			memzero_explicit(chan->backup_iv, ivsize);
348 		} else {
349 			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
350 						 ivsize, 0);
351 		}
352 		memzero_explicit(chan->bounce_iv, ivsize);
353 	}
354 
355 	dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
356 
357 	return 0;
358 }
359 
sun8i_ce_skdecrypt(struct skcipher_request * areq)360 int sun8i_ce_skdecrypt(struct skcipher_request *areq)
361 {
362 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
363 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
364 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
365 	struct crypto_engine *engine;
366 	int e;
367 
368 	rctx->op_dir = CE_DECRYPTION;
369 	if (sun8i_ce_cipher_need_fallback(areq))
370 		return sun8i_ce_cipher_fallback(areq);
371 
372 	e = sun8i_ce_get_engine_number(op->ce);
373 	rctx->flow = e;
374 	engine = op->ce->chanlist[e].engine;
375 
376 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
377 }
378 
sun8i_ce_skencrypt(struct skcipher_request * areq)379 int sun8i_ce_skencrypt(struct skcipher_request *areq)
380 {
381 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
382 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
383 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
384 	struct crypto_engine *engine;
385 	int e;
386 
387 	rctx->op_dir = CE_ENCRYPTION;
388 	if (sun8i_ce_cipher_need_fallback(areq))
389 		return sun8i_ce_cipher_fallback(areq);
390 
391 	e = sun8i_ce_get_engine_number(op->ce);
392 	rctx->flow = e;
393 	engine = op->ce->chanlist[e].engine;
394 
395 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
396 }
397 
sun8i_ce_cipher_init(struct crypto_tfm * tfm)398 int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
399 {
400 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
401 	struct sun8i_ce_alg_template *algt;
402 	const char *name = crypto_tfm_alg_name(tfm);
403 	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
404 	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
405 	int err;
406 
407 	memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
408 
409 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
410 	op->ce = algt->ce;
411 
412 	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
413 	if (IS_ERR(op->fallback_tfm)) {
414 		dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
415 			name, PTR_ERR(op->fallback_tfm));
416 		return PTR_ERR(op->fallback_tfm);
417 	}
418 
419 	sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
420 			 crypto_skcipher_reqsize(op->fallback_tfm);
421 
422 	memcpy(algt->fbname,
423 	       crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
424 	       CRYPTO_MAX_ALG_NAME);
425 
426 	op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
427 	op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
428 	op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
429 
430 	err = pm_runtime_get_sync(op->ce->dev);
431 	if (err < 0)
432 		goto error_pm;
433 
434 	return 0;
435 error_pm:
436 	pm_runtime_put_noidle(op->ce->dev);
437 	crypto_free_skcipher(op->fallback_tfm);
438 	return err;
439 }
440 
sun8i_ce_cipher_exit(struct crypto_tfm * tfm)441 void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
442 {
443 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
444 
445 	kfree_sensitive(op->key);
446 	crypto_free_skcipher(op->fallback_tfm);
447 	pm_runtime_put_sync_suspend(op->ce->dev);
448 }
449 
sun8i_ce_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)450 int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
451 			unsigned int keylen)
452 {
453 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
454 	struct sun8i_ce_dev *ce = op->ce;
455 
456 	switch (keylen) {
457 	case 128 / 8:
458 		break;
459 	case 192 / 8:
460 		break;
461 	case 256 / 8:
462 		break;
463 	default:
464 		dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
465 		return -EINVAL;
466 	}
467 	kfree_sensitive(op->key);
468 	op->keylen = keylen;
469 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
470 	if (!op->key)
471 		return -ENOMEM;
472 
473 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
474 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
475 
476 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
477 }
478 
sun8i_ce_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)479 int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
480 			 unsigned int keylen)
481 {
482 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
483 	int err;
484 
485 	err = verify_skcipher_des3_key(tfm, key);
486 	if (err)
487 		return err;
488 
489 	kfree_sensitive(op->key);
490 	op->keylen = keylen;
491 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
492 	if (!op->key)
493 		return -ENOMEM;
494 
495 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
496 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
497 
498 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
499 }
500