1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * sun8i-ss-hash.c - hardware cryptographic offloader for
4 * Allwinner A80/A83T SoC
5 *
6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7 *
8 * This file add support for MD5 and SHA1/SHA224/SHA256.
9 *
10 * You could find the datasheet in Documentation/arm/sunxi.rst
11 */
12 #include <linux/bottom_half.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/scatterlist.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/hmac.h>
18 #include <crypto/scatterwalk.h>
19 #include <crypto/sha1.h>
20 #include <crypto/sha2.h>
21 #include <crypto/md5.h>
22 #include "sun8i-ss.h"
23
sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx * tfmctx,const u8 * key,unsigned int keylen)24 static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
25 unsigned int keylen)
26 {
27 struct crypto_shash *xtfm;
28 struct shash_desc *sdesc;
29 size_t len;
30 int ret = 0;
31
32 xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
33 if (IS_ERR(xtfm))
34 return PTR_ERR(xtfm);
35
36 len = sizeof(*sdesc) + crypto_shash_descsize(xtfm);
37 sdesc = kmalloc(len, GFP_KERNEL);
38 if (!sdesc) {
39 ret = -ENOMEM;
40 goto err_hashkey_sdesc;
41 }
42 sdesc->tfm = xtfm;
43
44 ret = crypto_shash_init(sdesc);
45 if (ret) {
46 dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret);
47 goto err_hashkey;
48 }
49 ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key);
50 if (ret)
51 dev_err(tfmctx->ss->dev, "shash finup error\n");
52 err_hashkey:
53 kfree(sdesc);
54 err_hashkey_sdesc:
55 crypto_free_shash(xtfm);
56 return ret;
57 }
58
sun8i_ss_hmac_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)59 int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
60 unsigned int keylen)
61 {
62 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
63 struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg);
64 struct sun8i_ss_alg_template *algt;
65 int digestsize, i;
66 int bs = crypto_ahash_blocksize(ahash);
67 int ret;
68
69 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
70 digestsize = algt->alg.hash.halg.digestsize;
71
72 if (keylen > bs) {
73 ret = sun8i_ss_hashkey(tfmctx, key, keylen);
74 if (ret)
75 return ret;
76 tfmctx->keylen = digestsize;
77 } else {
78 tfmctx->keylen = keylen;
79 memcpy(tfmctx->key, key, keylen);
80 }
81
82 tfmctx->ipad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
83 if (!tfmctx->ipad)
84 return -ENOMEM;
85 tfmctx->opad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
86 if (!tfmctx->opad) {
87 ret = -ENOMEM;
88 goto err_opad;
89 }
90
91 memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen);
92 memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen);
93 memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen);
94 for (i = 0; i < bs; i++) {
95 tfmctx->ipad[i] ^= HMAC_IPAD_VALUE;
96 tfmctx->opad[i] ^= HMAC_OPAD_VALUE;
97 }
98
99 ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen);
100 if (!ret)
101 return 0;
102
103 memzero_explicit(tfmctx->key, keylen);
104 kfree_sensitive(tfmctx->opad);
105 err_opad:
106 kfree_sensitive(tfmctx->ipad);
107 return ret;
108 }
109
sun8i_ss_hash_crainit(struct crypto_tfm * tfm)110 int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
111 {
112 struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
113 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
114 struct sun8i_ss_alg_template *algt;
115 int err;
116
117 memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
118
119 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
120 op->ss = algt->ss;
121
122 op->enginectx.op.do_one_request = sun8i_ss_hash_run;
123 op->enginectx.op.prepare_request = NULL;
124 op->enginectx.op.unprepare_request = NULL;
125
126 /* FALLBACK */
127 op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
128 CRYPTO_ALG_NEED_FALLBACK);
129 if (IS_ERR(op->fallback_tfm)) {
130 dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
131 return PTR_ERR(op->fallback_tfm);
132 }
133
134 if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
135 algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
136
137 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
138 sizeof(struct sun8i_ss_hash_reqctx) +
139 crypto_ahash_reqsize(op->fallback_tfm));
140
141 memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME);
142
143 err = pm_runtime_get_sync(op->ss->dev);
144 if (err < 0)
145 goto error_pm;
146 return 0;
147 error_pm:
148 pm_runtime_put_noidle(op->ss->dev);
149 crypto_free_ahash(op->fallback_tfm);
150 return err;
151 }
152
sun8i_ss_hash_craexit(struct crypto_tfm * tfm)153 void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
154 {
155 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
156
157 kfree_sensitive(tfmctx->ipad);
158 kfree_sensitive(tfmctx->opad);
159
160 crypto_free_ahash(tfmctx->fallback_tfm);
161 pm_runtime_put_sync_suspend(tfmctx->ss->dev);
162 }
163
sun8i_ss_hash_init(struct ahash_request * areq)164 int sun8i_ss_hash_init(struct ahash_request *areq)
165 {
166 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
167 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
168 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
169
170 memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
171
172 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
173 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
174
175 return crypto_ahash_init(&rctx->fallback_req);
176 }
177
sun8i_ss_hash_export(struct ahash_request * areq,void * out)178 int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
179 {
180 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
181 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
182 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
183
184 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
185 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
186
187 return crypto_ahash_export(&rctx->fallback_req, out);
188 }
189
sun8i_ss_hash_import(struct ahash_request * areq,const void * in)190 int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
191 {
192 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
193 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
194 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
195
196 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
197 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
198
199 return crypto_ahash_import(&rctx->fallback_req, in);
200 }
201
sun8i_ss_hash_final(struct ahash_request * areq)202 int sun8i_ss_hash_final(struct ahash_request *areq)
203 {
204 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
205 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
206 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
207 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
208 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
209 struct sun8i_ss_alg_template *algt;
210 #endif
211
212 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
213 rctx->fallback_req.base.flags = areq->base.flags &
214 CRYPTO_TFM_REQ_MAY_SLEEP;
215 rctx->fallback_req.result = areq->result;
216
217 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
218 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
219 algt->stat_fb++;
220 #endif
221
222 return crypto_ahash_final(&rctx->fallback_req);
223 }
224
sun8i_ss_hash_update(struct ahash_request * areq)225 int sun8i_ss_hash_update(struct ahash_request *areq)
226 {
227 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
228 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
229 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
230
231 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
232 rctx->fallback_req.base.flags = areq->base.flags &
233 CRYPTO_TFM_REQ_MAY_SLEEP;
234 rctx->fallback_req.nbytes = areq->nbytes;
235 rctx->fallback_req.src = areq->src;
236
237 return crypto_ahash_update(&rctx->fallback_req);
238 }
239
sun8i_ss_hash_finup(struct ahash_request * areq)240 int sun8i_ss_hash_finup(struct ahash_request *areq)
241 {
242 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
243 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
244 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
245 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
246 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
247 struct sun8i_ss_alg_template *algt;
248 #endif
249
250 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
251 rctx->fallback_req.base.flags = areq->base.flags &
252 CRYPTO_TFM_REQ_MAY_SLEEP;
253
254 rctx->fallback_req.nbytes = areq->nbytes;
255 rctx->fallback_req.src = areq->src;
256 rctx->fallback_req.result = areq->result;
257 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
258 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
259 algt->stat_fb++;
260 #endif
261
262 return crypto_ahash_finup(&rctx->fallback_req);
263 }
264
sun8i_ss_hash_digest_fb(struct ahash_request * areq)265 static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
266 {
267 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
268 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
269 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
270 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
271 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
272 struct sun8i_ss_alg_template *algt;
273 #endif
274
275 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
276 rctx->fallback_req.base.flags = areq->base.flags &
277 CRYPTO_TFM_REQ_MAY_SLEEP;
278
279 rctx->fallback_req.nbytes = areq->nbytes;
280 rctx->fallback_req.src = areq->src;
281 rctx->fallback_req.result = areq->result;
282 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
283 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
284 algt->stat_fb++;
285 #endif
286
287 return crypto_ahash_digest(&rctx->fallback_req);
288 }
289
sun8i_ss_run_hash_task(struct sun8i_ss_dev * ss,struct sun8i_ss_hash_reqctx * rctx,const char * name)290 static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
291 struct sun8i_ss_hash_reqctx *rctx,
292 const char *name)
293 {
294 int flow = rctx->flow;
295 u32 v = SS_START;
296 int i;
297
298 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
299 ss->flows[flow].stat_req++;
300 #endif
301
302 /* choose between stream0/stream1 */
303 if (flow)
304 v |= SS_FLOW1;
305 else
306 v |= SS_FLOW0;
307
308 v |= rctx->method;
309
310 for (i = 0; i < MAX_SG; i++) {
311 if (!rctx->t_dst[i].addr)
312 break;
313
314 mutex_lock(&ss->mlock);
315 if (i > 0) {
316 v |= BIT(17);
317 writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
318 writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
319 }
320
321 dev_dbg(ss->dev,
322 "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
323 i, flow, name, v,
324 rctx->t_src[i].len, rctx->t_dst[i].len,
325 rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
326
327 writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
328 writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
329 writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
330 writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
331
332 reinit_completion(&ss->flows[flow].complete);
333 ss->flows[flow].status = 0;
334 wmb();
335
336 writel(v, ss->base + SS_CTL_REG);
337 mutex_unlock(&ss->mlock);
338 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
339 msecs_to_jiffies(2000));
340 if (ss->flows[flow].status == 0) {
341 dev_err(ss->dev, "DMA timeout for %s\n", name);
342 return -EFAULT;
343 }
344 }
345
346 return 0;
347 }
348
sun8i_ss_hash_need_fallback(struct ahash_request * areq)349 static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
350 {
351 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
352 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
353 struct sun8i_ss_alg_template *algt;
354 struct scatterlist *sg;
355
356 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
357
358 if (areq->nbytes == 0) {
359 algt->stat_fb_len++;
360 return true;
361 }
362
363 if (areq->nbytes >= MAX_PAD_SIZE - 64) {
364 algt->stat_fb_len++;
365 return true;
366 }
367
368 /* we need to reserve one SG for the padding one */
369 if (sg_nents(areq->src) > MAX_SG - 1) {
370 algt->stat_fb_sgnum++;
371 return true;
372 }
373
374 sg = areq->src;
375 while (sg) {
376 /* SS can operate hash only on full block size
377 * since SS support only MD5,sha1,sha224 and sha256, blocksize
378 * is always 64
379 */
380 /* Only the last block could be bounced to the pad buffer */
381 if (sg->length % 64 && sg_next(sg)) {
382 algt->stat_fb_sglen++;
383 return true;
384 }
385 if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
386 algt->stat_fb_align++;
387 return true;
388 }
389 if (sg->length % 4) {
390 algt->stat_fb_sglen++;
391 return true;
392 }
393 sg = sg_next(sg);
394 }
395 return false;
396 }
397
sun8i_ss_hash_digest(struct ahash_request * areq)398 int sun8i_ss_hash_digest(struct ahash_request *areq)
399 {
400 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
401 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
402 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
403 struct sun8i_ss_alg_template *algt;
404 struct sun8i_ss_dev *ss;
405 struct crypto_engine *engine;
406 int e;
407
408 if (sun8i_ss_hash_need_fallback(areq))
409 return sun8i_ss_hash_digest_fb(areq);
410
411 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
412 ss = algt->ss;
413
414 e = sun8i_ss_get_engine_number(ss);
415 rctx->flow = e;
416 engine = ss->flows[e].engine;
417
418 return crypto_transfer_hash_request_to_engine(engine, areq);
419 }
420
hash_pad(__le32 * buf,unsigned int bufsize,u64 padi,u64 byte_count,bool le,int bs)421 static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
422 {
423 u64 fill, min_fill, j, k;
424 __be64 *bebits;
425 __le64 *lebits;
426
427 j = padi;
428 buf[j++] = cpu_to_le32(0x80);
429
430 if (bs == 64) {
431 fill = 64 - (byte_count % 64);
432 min_fill = 2 * sizeof(u32) + sizeof(u32);
433 } else {
434 fill = 128 - (byte_count % 128);
435 min_fill = 4 * sizeof(u32) + sizeof(u32);
436 }
437
438 if (fill < min_fill)
439 fill += bs;
440
441 k = j;
442 j += (fill - min_fill) / sizeof(u32);
443 if (j * 4 > bufsize) {
444 pr_err("%s OVERFLOW %llu\n", __func__, j);
445 return 0;
446 }
447 for (; k < j; k++)
448 buf[k] = 0;
449
450 if (le) {
451 /* MD5 */
452 lebits = (__le64 *)&buf[j];
453 *lebits = cpu_to_le64(byte_count << 3);
454 j += 2;
455 } else {
456 if (bs == 64) {
457 /* sha1 sha224 sha256 */
458 bebits = (__be64 *)&buf[j];
459 *bebits = cpu_to_be64(byte_count << 3);
460 j += 2;
461 } else {
462 /* sha384 sha512*/
463 bebits = (__be64 *)&buf[j];
464 *bebits = cpu_to_be64(byte_count >> 61);
465 j += 2;
466 bebits = (__be64 *)&buf[j];
467 *bebits = cpu_to_be64(byte_count << 3);
468 j += 2;
469 }
470 }
471 if (j * 4 > bufsize) {
472 pr_err("%s OVERFLOW %llu\n", __func__, j);
473 return 0;
474 }
475
476 return j;
477 }
478
479 /* sun8i_ss_hash_run - run an ahash request
480 * Send the data of the request to the SS along with an extra SG with padding
481 */
sun8i_ss_hash_run(struct crypto_engine * engine,void * breq)482 int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
483 {
484 struct ahash_request *areq = container_of(breq, struct ahash_request, base);
485 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
486 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
487 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
488 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
489 struct sun8i_ss_alg_template *algt;
490 struct sun8i_ss_dev *ss;
491 struct scatterlist *sg;
492 int bs = crypto_ahash_blocksize(tfm);
493 int nr_sgs, err, digestsize;
494 unsigned int len;
495 u64 byte_count;
496 void *pad, *result;
497 int j, i, k, todo;
498 dma_addr_t addr_res, addr_pad, addr_xpad;
499 __le32 *bf;
500 /* HMAC step:
501 * 0: normal hashing
502 * 1: IPAD
503 * 2: OPAD
504 */
505 int hmac = 0;
506
507 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
508 ss = algt->ss;
509
510 digestsize = algt->alg.hash.halg.digestsize;
511 if (digestsize == SHA224_DIGEST_SIZE)
512 digestsize = SHA256_DIGEST_SIZE;
513
514 result = ss->flows[rctx->flow].result;
515 pad = ss->flows[rctx->flow].pad;
516 bf = (__le32 *)pad;
517
518 for (i = 0; i < MAX_SG; i++) {
519 rctx->t_dst[i].addr = 0;
520 rctx->t_dst[i].len = 0;
521 }
522
523 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
524 algt->stat_req++;
525 #endif
526
527 rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
528
529 nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
530 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
531 dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
532 err = -EINVAL;
533 goto theend;
534 }
535
536 addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
537 if (dma_mapping_error(ss->dev, addr_res)) {
538 dev_err(ss->dev, "DMA map dest\n");
539 err = -EINVAL;
540 goto err_dma_result;
541 }
542
543 j = 0;
544 len = areq->nbytes;
545 sg = areq->src;
546 i = 0;
547 while (len > 0 && sg) {
548 if (sg_dma_len(sg) == 0) {
549 sg = sg_next(sg);
550 continue;
551 }
552 todo = min(len, sg_dma_len(sg));
553 /* only the last SG could be with a size not modulo64 */
554 if (todo % 64 == 0) {
555 rctx->t_src[i].addr = sg_dma_address(sg);
556 rctx->t_src[i].len = todo / 4;
557 rctx->t_dst[i].addr = addr_res;
558 rctx->t_dst[i].len = digestsize / 4;
559 len -= todo;
560 } else {
561 scatterwalk_map_and_copy(bf, sg, 0, todo, 0);
562 j += todo / 4;
563 len -= todo;
564 }
565 sg = sg_next(sg);
566 i++;
567 }
568 if (len > 0) {
569 dev_err(ss->dev, "remaining len %d\n", len);
570 err = -EINVAL;
571 goto theend;
572 }
573
574 if (j > 0)
575 i--;
576
577 retry:
578 byte_count = areq->nbytes;
579 if (tfmctx->keylen && hmac == 0) {
580 hmac = 1;
581 /* shift all SG one slot up, to free slot 0 for IPAD */
582 for (k = 6; k >= 0; k--) {
583 rctx->t_src[k + 1].addr = rctx->t_src[k].addr;
584 rctx->t_src[k + 1].len = rctx->t_src[k].len;
585 rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr;
586 rctx->t_dst[k + 1].len = rctx->t_dst[k].len;
587 }
588 addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE);
589 err = dma_mapping_error(ss->dev, addr_xpad);
590 if (err) {
591 dev_err(ss->dev, "Fail to create DMA mapping of ipad\n");
592 goto err_dma_xpad;
593 }
594 rctx->t_src[0].addr = addr_xpad;
595 rctx->t_src[0].len = bs / 4;
596 rctx->t_dst[0].addr = addr_res;
597 rctx->t_dst[0].len = digestsize / 4;
598 i++;
599 byte_count = areq->nbytes + bs;
600 }
601 if (tfmctx->keylen && hmac == 2) {
602 for (i = 0; i < MAX_SG; i++) {
603 rctx->t_src[i].addr = 0;
604 rctx->t_src[i].len = 0;
605 rctx->t_dst[i].addr = 0;
606 rctx->t_dst[i].len = 0;
607 }
608
609 addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
610 if (dma_mapping_error(ss->dev, addr_res)) {
611 dev_err(ss->dev, "Fail to create DMA mapping of result\n");
612 err = -EINVAL;
613 goto err_dma_result;
614 }
615 addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE);
616 err = dma_mapping_error(ss->dev, addr_xpad);
617 if (err) {
618 dev_err(ss->dev, "Fail to create DMA mapping of opad\n");
619 goto err_dma_xpad;
620 }
621 rctx->t_src[0].addr = addr_xpad;
622 rctx->t_src[0].len = bs / 4;
623
624 memcpy(bf, result, digestsize);
625 j = digestsize / 4;
626 i = 1;
627 byte_count = digestsize + bs;
628
629 rctx->t_dst[0].addr = addr_res;
630 rctx->t_dst[0].len = digestsize / 4;
631 }
632
633 switch (algt->ss_algo_id) {
634 case SS_ID_HASH_MD5:
635 j = hash_pad(bf, 4096, j, byte_count, true, bs);
636 break;
637 case SS_ID_HASH_SHA1:
638 case SS_ID_HASH_SHA224:
639 case SS_ID_HASH_SHA256:
640 j = hash_pad(bf, 4096, j, byte_count, false, bs);
641 break;
642 }
643 if (!j) {
644 err = -EINVAL;
645 goto theend;
646 }
647
648 addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
649 if (dma_mapping_error(ss->dev, addr_pad)) {
650 dev_err(ss->dev, "DMA error on padding SG\n");
651 err = -EINVAL;
652 goto err_dma_pad;
653 }
654 rctx->t_src[i].addr = addr_pad;
655 rctx->t_src[i].len = j;
656 rctx->t_dst[i].addr = addr_res;
657 rctx->t_dst[i].len = digestsize / 4;
658
659 err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
660
661 /*
662 * mini helper for checking dma map/unmap
663 * flow start for hmac = 0 (and HMAC = 1)
664 * HMAC = 0
665 * MAP src
666 * MAP res
667 *
668 * retry:
669 * if hmac then hmac = 1
670 * MAP xpad (ipad)
671 * if hmac == 2
672 * MAP res
673 * MAP xpad (opad)
674 * MAP pad
675 * ACTION!
676 * UNMAP pad
677 * if hmac
678 * UNMAP xpad
679 * UNMAP res
680 * if hmac < 2
681 * UNMAP SRC
682 *
683 * if hmac = 1 then hmac = 2 goto retry
684 */
685
686 dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
687
688 err_dma_pad:
689 if (hmac > 0)
690 dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE);
691 err_dma_xpad:
692 dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
693 err_dma_result:
694 if (hmac < 2)
695 dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
696 DMA_TO_DEVICE);
697 if (hmac == 1 && !err) {
698 hmac = 2;
699 goto retry;
700 }
701
702 if (!err)
703 memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
704 theend:
705 local_bh_disable();
706 crypto_finalize_hash_request(engine, breq, err);
707 local_bh_enable();
708 return 0;
709 }
710