1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 2021 Aspeed Technology Inc.
4  */
5 
6 #include "aspeed-hace.h"
7 
8 #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
9 #define AHASH_DBG(h, fmt, ...)	\
10 	dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
11 #else
12 #define AHASH_DBG(h, fmt, ...)	\
13 	dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
14 #endif
15 
16 /* Initialization Vectors for SHA-family */
17 static const __be32 sha1_iv[8] = {
18 	cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
19 	cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
20 	cpu_to_be32(SHA1_H4), 0, 0, 0
21 };
22 
23 static const __be32 sha224_iv[8] = {
24 	cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
25 	cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
26 	cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
27 	cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
28 };
29 
30 static const __be32 sha256_iv[8] = {
31 	cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
32 	cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
33 	cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
34 	cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
35 };
36 
37 static const __be64 sha384_iv[8] = {
38 	cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
39 	cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
40 	cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
41 	cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7)
42 };
43 
44 static const __be64 sha512_iv[8] = {
45 	cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
46 	cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
47 	cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
48 	cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
49 };
50 
51 static const __be32 sha512_224_iv[16] = {
52 	cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL),
53 	cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL),
54 	cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL),
55 	cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL),
56 	cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL),
57 	cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL),
58 	cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL),
59 	cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL)
60 };
61 
62 static const __be32 sha512_256_iv[16] = {
63 	cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL),
64 	cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL),
65 	cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL),
66 	cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL),
67 	cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL),
68 	cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL),
69 	cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL),
70 	cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL)
71 };
72 
73 /* The purpose of this padding is to ensure that the padded message is a
74  * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
75  * The bit "1" is appended at the end of the message followed by
76  * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
77  * 128 bits block (SHA384/SHA512) equals to the message length in bits
78  * is appended.
79  *
80  * For SHA1/SHA224/SHA256, padlen is calculated as followed:
81  *  - if message length < 56 bytes then padlen = 56 - message length
82  *  - else padlen = 64 + 56 - message length
83  *
84  * For SHA384/SHA512, padlen is calculated as followed:
85  *  - if message length < 112 bytes then padlen = 112 - message length
86  *  - else padlen = 128 + 112 - message length
87  */
aspeed_ahash_fill_padding(struct aspeed_hace_dev * hace_dev,struct aspeed_sham_reqctx * rctx)88 static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
89 				      struct aspeed_sham_reqctx *rctx)
90 {
91 	unsigned int index, padlen;
92 	__be64 bits[2];
93 
94 	AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
95 
96 	switch (rctx->flags & SHA_FLAGS_MASK) {
97 	case SHA_FLAGS_SHA1:
98 	case SHA_FLAGS_SHA224:
99 	case SHA_FLAGS_SHA256:
100 		bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
101 		index = rctx->bufcnt & 0x3f;
102 		padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
103 		*(rctx->buffer + rctx->bufcnt) = 0x80;
104 		memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
105 		memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
106 		rctx->bufcnt += padlen + 8;
107 		break;
108 	default:
109 		bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
110 		bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
111 				      rctx->digcnt[0] >> 61);
112 		index = rctx->bufcnt & 0x7f;
113 		padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
114 		*(rctx->buffer + rctx->bufcnt) = 0x80;
115 		memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
116 		memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
117 		rctx->bufcnt += padlen + 16;
118 		break;
119 	}
120 }
121 
122 /*
123  * Prepare DMA buffer before hardware engine
124  * processing.
125  */
aspeed_ahash_dma_prepare(struct aspeed_hace_dev * hace_dev)126 static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
127 {
128 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
129 	struct ahash_request *req = hash_engine->req;
130 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
131 	int length, remain;
132 
133 	length = rctx->total + rctx->bufcnt;
134 	remain = length % rctx->block_size;
135 
136 	AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
137 
138 	if (rctx->bufcnt)
139 		memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
140 
141 	if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
142 		scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
143 					 rctx->bufcnt, rctx->src_sg,
144 					 rctx->offset, rctx->total - remain, 0);
145 		rctx->offset += rctx->total - remain;
146 
147 	} else {
148 		dev_warn(hace_dev->dev, "Hash data length is too large\n");
149 		return -EINVAL;
150 	}
151 
152 	scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
153 				 rctx->offset, remain, 0);
154 
155 	rctx->bufcnt = remain;
156 	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
157 					       SHA512_DIGEST_SIZE,
158 					       DMA_BIDIRECTIONAL);
159 	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
160 		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
161 		return -ENOMEM;
162 	}
163 
164 	hash_engine->src_length = length - remain;
165 	hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
166 	hash_engine->digest_dma = rctx->digest_dma_addr;
167 
168 	return 0;
169 }
170 
171 /*
172  * Prepare DMA buffer as SG list buffer before
173  * hardware engine processing.
174  */
aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev * hace_dev)175 static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
176 {
177 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
178 	struct ahash_request *req = hash_engine->req;
179 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
180 	struct aspeed_sg_list *src_list;
181 	struct scatterlist *s;
182 	int length, remain, sg_len, i;
183 	int rc = 0;
184 
185 	remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
186 	length = rctx->total + rctx->bufcnt - remain;
187 
188 	AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
189 		  "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
190 		  "length", length, "remain", remain);
191 
192 	sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
193 			    DMA_TO_DEVICE);
194 	if (!sg_len) {
195 		dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
196 		rc = -ENOMEM;
197 		goto end;
198 	}
199 
200 	src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
201 	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
202 					       SHA512_DIGEST_SIZE,
203 					       DMA_BIDIRECTIONAL);
204 	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
205 		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
206 		rc = -ENOMEM;
207 		goto free_src_sg;
208 	}
209 
210 	if (rctx->bufcnt != 0) {
211 		u32 phy_addr;
212 		u32 len;
213 
214 		rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
215 						       rctx->buffer,
216 						       rctx->block_size * 2,
217 						       DMA_TO_DEVICE);
218 		if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
219 			dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
220 			rc = -ENOMEM;
221 			goto free_rctx_digest;
222 		}
223 
224 		phy_addr = rctx->buffer_dma_addr;
225 		len = rctx->bufcnt;
226 		length -= len;
227 
228 		/* Last sg list */
229 		if (length == 0)
230 			len |= HASH_SG_LAST_LIST;
231 
232 		src_list[0].phy_addr = cpu_to_le32(phy_addr);
233 		src_list[0].len = cpu_to_le32(len);
234 		src_list++;
235 	}
236 
237 	if (length != 0) {
238 		for_each_sg(rctx->src_sg, s, sg_len, i) {
239 			u32 phy_addr = sg_dma_address(s);
240 			u32 len = sg_dma_len(s);
241 
242 			if (length > len)
243 				length -= len;
244 			else {
245 				/* Last sg list */
246 				len = length;
247 				len |= HASH_SG_LAST_LIST;
248 				length = 0;
249 			}
250 
251 			src_list[i].phy_addr = cpu_to_le32(phy_addr);
252 			src_list[i].len = cpu_to_le32(len);
253 		}
254 	}
255 
256 	if (length != 0) {
257 		rc = -EINVAL;
258 		goto free_rctx_buffer;
259 	}
260 
261 	rctx->offset = rctx->total - remain;
262 	hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
263 	hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
264 	hash_engine->digest_dma = rctx->digest_dma_addr;
265 
266 	return 0;
267 
268 free_rctx_buffer:
269 	if (rctx->bufcnt != 0)
270 		dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
271 				 rctx->block_size * 2, DMA_TO_DEVICE);
272 free_rctx_digest:
273 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
274 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
275 free_src_sg:
276 	dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
277 		     DMA_TO_DEVICE);
278 end:
279 	return rc;
280 }
281 
aspeed_ahash_complete(struct aspeed_hace_dev * hace_dev)282 static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
283 {
284 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
285 	struct ahash_request *req = hash_engine->req;
286 
287 	AHASH_DBG(hace_dev, "\n");
288 
289 	hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
290 
291 	crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
292 
293 	return 0;
294 }
295 
296 /*
297  * Copy digest to the corresponding request result.
298  * This function will be called at final() stage.
299  */
aspeed_ahash_transfer(struct aspeed_hace_dev * hace_dev)300 static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
301 {
302 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
303 	struct ahash_request *req = hash_engine->req;
304 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
305 
306 	AHASH_DBG(hace_dev, "\n");
307 
308 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
309 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
310 
311 	dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
312 			 rctx->block_size * 2, DMA_TO_DEVICE);
313 
314 	memcpy(req->result, rctx->digest, rctx->digsize);
315 
316 	return aspeed_ahash_complete(hace_dev);
317 }
318 
319 /*
320  * Trigger hardware engines to do the math.
321  */
aspeed_hace_ahash_trigger(struct aspeed_hace_dev * hace_dev,aspeed_hace_fn_t resume)322 static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
323 				     aspeed_hace_fn_t resume)
324 {
325 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
326 	struct ahash_request *req = hash_engine->req;
327 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
328 
329 	AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n",
330 		  &hash_engine->src_dma, &hash_engine->digest_dma,
331 		  hash_engine->src_length);
332 
333 	rctx->cmd |= HASH_CMD_INT_ENABLE;
334 	hash_engine->resume = resume;
335 
336 	ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC);
337 	ast_hace_write(hace_dev, hash_engine->digest_dma,
338 		       ASPEED_HACE_HASH_DIGEST_BUFF);
339 	ast_hace_write(hace_dev, hash_engine->digest_dma,
340 		       ASPEED_HACE_HASH_KEY_BUFF);
341 	ast_hace_write(hace_dev, hash_engine->src_length,
342 		       ASPEED_HACE_HASH_DATA_LEN);
343 
344 	/* Memory barrier to ensure all data setup before engine starts */
345 	mb();
346 
347 	ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD);
348 
349 	return -EINPROGRESS;
350 }
351 
352 /*
353  * HMAC resume aims to do the second pass produces
354  * the final HMAC code derived from the inner hash
355  * result and the outer key.
356  */
aspeed_ahash_hmac_resume(struct aspeed_hace_dev * hace_dev)357 static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
358 {
359 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
360 	struct ahash_request *req = hash_engine->req;
361 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
362 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
363 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
364 	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
365 	int rc = 0;
366 
367 	AHASH_DBG(hace_dev, "\n");
368 
369 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
370 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
371 
372 	dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
373 			 rctx->block_size * 2, DMA_TO_DEVICE);
374 
375 	/* o key pad + hash sum 1 */
376 	memcpy(rctx->buffer, bctx->opad, rctx->block_size);
377 	memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
378 
379 	rctx->bufcnt = rctx->block_size + rctx->digsize;
380 	rctx->digcnt[0] = rctx->block_size + rctx->digsize;
381 
382 	aspeed_ahash_fill_padding(hace_dev, rctx);
383 	memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
384 
385 	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
386 					       SHA512_DIGEST_SIZE,
387 					       DMA_BIDIRECTIONAL);
388 	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
389 		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
390 		rc = -ENOMEM;
391 		goto end;
392 	}
393 
394 	rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
395 					       rctx->block_size * 2,
396 					       DMA_TO_DEVICE);
397 	if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
398 		dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
399 		rc = -ENOMEM;
400 		goto free_rctx_digest;
401 	}
402 
403 	hash_engine->src_dma = rctx->buffer_dma_addr;
404 	hash_engine->src_length = rctx->bufcnt;
405 	hash_engine->digest_dma = rctx->digest_dma_addr;
406 
407 	return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
408 
409 free_rctx_digest:
410 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
411 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
412 end:
413 	return rc;
414 }
415 
aspeed_ahash_req_final(struct aspeed_hace_dev * hace_dev)416 static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
417 {
418 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
419 	struct ahash_request *req = hash_engine->req;
420 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
421 	int rc = 0;
422 
423 	AHASH_DBG(hace_dev, "\n");
424 
425 	aspeed_ahash_fill_padding(hace_dev, rctx);
426 
427 	rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
428 					       rctx->digest,
429 					       SHA512_DIGEST_SIZE,
430 					       DMA_BIDIRECTIONAL);
431 	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
432 		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
433 		rc = -ENOMEM;
434 		goto end;
435 	}
436 
437 	rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
438 					       rctx->buffer,
439 					       rctx->block_size * 2,
440 					       DMA_TO_DEVICE);
441 	if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
442 		dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
443 		rc = -ENOMEM;
444 		goto free_rctx_digest;
445 	}
446 
447 	hash_engine->src_dma = rctx->buffer_dma_addr;
448 	hash_engine->src_length = rctx->bufcnt;
449 	hash_engine->digest_dma = rctx->digest_dma_addr;
450 
451 	if (rctx->flags & SHA_FLAGS_HMAC)
452 		return aspeed_hace_ahash_trigger(hace_dev,
453 						 aspeed_ahash_hmac_resume);
454 
455 	return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
456 
457 free_rctx_digest:
458 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
459 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
460 end:
461 	return rc;
462 }
463 
aspeed_ahash_update_resume_sg(struct aspeed_hace_dev * hace_dev)464 static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
465 {
466 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
467 	struct ahash_request *req = hash_engine->req;
468 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
469 
470 	AHASH_DBG(hace_dev, "\n");
471 
472 	dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
473 		     DMA_TO_DEVICE);
474 
475 	if (rctx->bufcnt != 0)
476 		dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
477 				 rctx->block_size * 2,
478 				 DMA_TO_DEVICE);
479 
480 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
481 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
482 
483 	scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
484 				 rctx->total - rctx->offset, 0);
485 
486 	rctx->bufcnt = rctx->total - rctx->offset;
487 	rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
488 
489 	if (rctx->flags & SHA_FLAGS_FINUP)
490 		return aspeed_ahash_req_final(hace_dev);
491 
492 	return aspeed_ahash_complete(hace_dev);
493 }
494 
aspeed_ahash_update_resume(struct aspeed_hace_dev * hace_dev)495 static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
496 {
497 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
498 	struct ahash_request *req = hash_engine->req;
499 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
500 
501 	AHASH_DBG(hace_dev, "\n");
502 
503 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
504 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
505 
506 	if (rctx->flags & SHA_FLAGS_FINUP)
507 		return aspeed_ahash_req_final(hace_dev);
508 
509 	return aspeed_ahash_complete(hace_dev);
510 }
511 
aspeed_ahash_req_update(struct aspeed_hace_dev * hace_dev)512 static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
513 {
514 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
515 	struct ahash_request *req = hash_engine->req;
516 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
517 	aspeed_hace_fn_t resume;
518 	int ret;
519 
520 	AHASH_DBG(hace_dev, "\n");
521 
522 	if (hace_dev->version == AST2600_VERSION) {
523 		rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
524 		resume = aspeed_ahash_update_resume_sg;
525 
526 	} else {
527 		resume = aspeed_ahash_update_resume;
528 	}
529 
530 	ret = hash_engine->dma_prepare(hace_dev);
531 	if (ret)
532 		return ret;
533 
534 	return aspeed_hace_ahash_trigger(hace_dev, resume);
535 }
536 
aspeed_hace_hash_handle_queue(struct aspeed_hace_dev * hace_dev,struct ahash_request * req)537 static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
538 				  struct ahash_request *req)
539 {
540 	return crypto_transfer_hash_request_to_engine(
541 			hace_dev->crypt_engine_hash, req);
542 }
543 
aspeed_ahash_do_request(struct crypto_engine * engine,void * areq)544 static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
545 {
546 	struct ahash_request *req = ahash_request_cast(areq);
547 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
548 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
549 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
550 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
551 	struct aspeed_engine_hash *hash_engine;
552 	int ret = 0;
553 
554 	hash_engine = &hace_dev->hash_engine;
555 	hash_engine->flags |= CRYPTO_FLAGS_BUSY;
556 
557 	if (rctx->op == SHA_OP_UPDATE)
558 		ret = aspeed_ahash_req_update(hace_dev);
559 	else if (rctx->op == SHA_OP_FINAL)
560 		ret = aspeed_ahash_req_final(hace_dev);
561 
562 	if (ret != -EINPROGRESS)
563 		return ret;
564 
565 	return 0;
566 }
567 
aspeed_ahash_prepare_request(struct crypto_engine * engine,void * areq)568 static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
569 					void *areq)
570 {
571 	struct ahash_request *req = ahash_request_cast(areq);
572 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
573 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
574 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
575 	struct aspeed_engine_hash *hash_engine;
576 
577 	hash_engine = &hace_dev->hash_engine;
578 	hash_engine->req = req;
579 
580 	if (hace_dev->version == AST2600_VERSION)
581 		hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
582 	else
583 		hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
584 
585 	return 0;
586 }
587 
aspeed_sham_update(struct ahash_request * req)588 static int aspeed_sham_update(struct ahash_request *req)
589 {
590 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
591 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
592 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
593 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
594 
595 	AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
596 
597 	rctx->total = req->nbytes;
598 	rctx->src_sg = req->src;
599 	rctx->offset = 0;
600 	rctx->src_nents = sg_nents(req->src);
601 	rctx->op = SHA_OP_UPDATE;
602 
603 	rctx->digcnt[0] += rctx->total;
604 	if (rctx->digcnt[0] < rctx->total)
605 		rctx->digcnt[1]++;
606 
607 	if (rctx->bufcnt + rctx->total < rctx->block_size) {
608 		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
609 					 rctx->src_sg, rctx->offset,
610 					 rctx->total, 0);
611 		rctx->bufcnt += rctx->total;
612 
613 		return 0;
614 	}
615 
616 	return aspeed_hace_hash_handle_queue(hace_dev, req);
617 }
618 
aspeed_sham_shash_digest(struct crypto_shash * tfm,u32 flags,const u8 * data,unsigned int len,u8 * out)619 static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
620 				    const u8 *data, unsigned int len, u8 *out)
621 {
622 	SHASH_DESC_ON_STACK(shash, tfm);
623 
624 	shash->tfm = tfm;
625 
626 	return crypto_shash_digest(shash, data, len, out);
627 }
628 
aspeed_sham_final(struct ahash_request * req)629 static int aspeed_sham_final(struct ahash_request *req)
630 {
631 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
632 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
633 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
634 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
635 
636 	AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
637 		  req->nbytes, rctx->total);
638 	rctx->op = SHA_OP_FINAL;
639 
640 	return aspeed_hace_hash_handle_queue(hace_dev, req);
641 }
642 
aspeed_sham_finup(struct ahash_request * req)643 static int aspeed_sham_finup(struct ahash_request *req)
644 {
645 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
646 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
647 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
648 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
649 	int rc1, rc2;
650 
651 	AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
652 
653 	rctx->flags |= SHA_FLAGS_FINUP;
654 
655 	rc1 = aspeed_sham_update(req);
656 	if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
657 		return rc1;
658 
659 	/*
660 	 * final() has to be always called to cleanup resources
661 	 * even if update() failed, except EINPROGRESS
662 	 */
663 	rc2 = aspeed_sham_final(req);
664 
665 	return rc1 ? : rc2;
666 }
667 
aspeed_sham_init(struct ahash_request * req)668 static int aspeed_sham_init(struct ahash_request *req)
669 {
670 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
671 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
672 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
673 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
674 	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
675 
676 	AHASH_DBG(hace_dev, "%s: digest size:%d\n",
677 		  crypto_tfm_alg_name(&tfm->base),
678 		  crypto_ahash_digestsize(tfm));
679 
680 	rctx->cmd = HASH_CMD_ACC_MODE;
681 	rctx->flags = 0;
682 
683 	switch (crypto_ahash_digestsize(tfm)) {
684 	case SHA1_DIGEST_SIZE:
685 		rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP;
686 		rctx->flags |= SHA_FLAGS_SHA1;
687 		rctx->digsize = SHA1_DIGEST_SIZE;
688 		rctx->block_size = SHA1_BLOCK_SIZE;
689 		rctx->sha_iv = sha1_iv;
690 		rctx->ivsize = 32;
691 		memcpy(rctx->digest, sha1_iv, rctx->ivsize);
692 		break;
693 	case SHA224_DIGEST_SIZE:
694 		rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP;
695 		rctx->flags |= SHA_FLAGS_SHA224;
696 		rctx->digsize = SHA224_DIGEST_SIZE;
697 		rctx->block_size = SHA224_BLOCK_SIZE;
698 		rctx->sha_iv = sha224_iv;
699 		rctx->ivsize = 32;
700 		memcpy(rctx->digest, sha224_iv, rctx->ivsize);
701 		break;
702 	case SHA256_DIGEST_SIZE:
703 		rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP;
704 		rctx->flags |= SHA_FLAGS_SHA256;
705 		rctx->digsize = SHA256_DIGEST_SIZE;
706 		rctx->block_size = SHA256_BLOCK_SIZE;
707 		rctx->sha_iv = sha256_iv;
708 		rctx->ivsize = 32;
709 		memcpy(rctx->digest, sha256_iv, rctx->ivsize);
710 		break;
711 	case SHA384_DIGEST_SIZE:
712 		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 |
713 			     HASH_CMD_SHA_SWAP;
714 		rctx->flags |= SHA_FLAGS_SHA384;
715 		rctx->digsize = SHA384_DIGEST_SIZE;
716 		rctx->block_size = SHA384_BLOCK_SIZE;
717 		rctx->sha_iv = (const __be32 *)sha384_iv;
718 		rctx->ivsize = 64;
719 		memcpy(rctx->digest, sha384_iv, rctx->ivsize);
720 		break;
721 	case SHA512_DIGEST_SIZE:
722 		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 |
723 			     HASH_CMD_SHA_SWAP;
724 		rctx->flags |= SHA_FLAGS_SHA512;
725 		rctx->digsize = SHA512_DIGEST_SIZE;
726 		rctx->block_size = SHA512_BLOCK_SIZE;
727 		rctx->sha_iv = (const __be32 *)sha512_iv;
728 		rctx->ivsize = 64;
729 		memcpy(rctx->digest, sha512_iv, rctx->ivsize);
730 		break;
731 	default:
732 		dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
733 			 crypto_ahash_digestsize(tfm));
734 		return -EINVAL;
735 	}
736 
737 	rctx->bufcnt = 0;
738 	rctx->total = 0;
739 	rctx->digcnt[0] = 0;
740 	rctx->digcnt[1] = 0;
741 
742 	/* HMAC init */
743 	if (tctx->flags & SHA_FLAGS_HMAC) {
744 		rctx->digcnt[0] = rctx->block_size;
745 		rctx->bufcnt = rctx->block_size;
746 		memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
747 		rctx->flags |= SHA_FLAGS_HMAC;
748 	}
749 
750 	return 0;
751 }
752 
aspeed_sha512s_init(struct ahash_request * req)753 static int aspeed_sha512s_init(struct ahash_request *req)
754 {
755 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
756 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
757 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
758 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
759 	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
760 
761 	AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm));
762 
763 	rctx->cmd = HASH_CMD_ACC_MODE;
764 	rctx->flags = 0;
765 
766 	switch (crypto_ahash_digestsize(tfm)) {
767 	case SHA224_DIGEST_SIZE:
768 		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 |
769 			     HASH_CMD_SHA_SWAP;
770 		rctx->flags |= SHA_FLAGS_SHA512_224;
771 		rctx->digsize = SHA224_DIGEST_SIZE;
772 		rctx->block_size = SHA512_BLOCK_SIZE;
773 		rctx->sha_iv = sha512_224_iv;
774 		rctx->ivsize = 64;
775 		memcpy(rctx->digest, sha512_224_iv, rctx->ivsize);
776 		break;
777 	case SHA256_DIGEST_SIZE:
778 		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 |
779 			     HASH_CMD_SHA_SWAP;
780 		rctx->flags |= SHA_FLAGS_SHA512_256;
781 		rctx->digsize = SHA256_DIGEST_SIZE;
782 		rctx->block_size = SHA512_BLOCK_SIZE;
783 		rctx->sha_iv = sha512_256_iv;
784 		rctx->ivsize = 64;
785 		memcpy(rctx->digest, sha512_256_iv, rctx->ivsize);
786 		break;
787 	default:
788 		dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
789 			 crypto_ahash_digestsize(tfm));
790 		return -EINVAL;
791 	}
792 
793 	rctx->bufcnt = 0;
794 	rctx->total = 0;
795 	rctx->digcnt[0] = 0;
796 	rctx->digcnt[1] = 0;
797 
798 	/* HMAC init */
799 	if (tctx->flags & SHA_FLAGS_HMAC) {
800 		rctx->digcnt[0] = rctx->block_size;
801 		rctx->bufcnt = rctx->block_size;
802 		memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
803 		rctx->flags |= SHA_FLAGS_HMAC;
804 	}
805 
806 	return 0;
807 }
808 
aspeed_sham_digest(struct ahash_request * req)809 static int aspeed_sham_digest(struct ahash_request *req)
810 {
811 	return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
812 }
813 
aspeed_sham_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)814 static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
815 			      unsigned int keylen)
816 {
817 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
818 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
819 	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
820 	int ds = crypto_shash_digestsize(bctx->shash);
821 	int bs = crypto_shash_blocksize(bctx->shash);
822 	int err = 0;
823 	int i;
824 
825 	AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
826 		  keylen);
827 
828 	if (keylen > bs) {
829 		err = aspeed_sham_shash_digest(bctx->shash,
830 					       crypto_shash_get_flags(bctx->shash),
831 					       key, keylen, bctx->ipad);
832 		if (err)
833 			return err;
834 		keylen = ds;
835 
836 	} else {
837 		memcpy(bctx->ipad, key, keylen);
838 	}
839 
840 	memset(bctx->ipad + keylen, 0, bs - keylen);
841 	memcpy(bctx->opad, bctx->ipad, bs);
842 
843 	for (i = 0; i < bs; i++) {
844 		bctx->ipad[i] ^= HMAC_IPAD_VALUE;
845 		bctx->opad[i] ^= HMAC_OPAD_VALUE;
846 	}
847 
848 	return err;
849 }
850 
aspeed_sham_cra_init(struct crypto_tfm * tfm)851 static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
852 {
853 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
854 	struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
855 	struct aspeed_hace_alg *ast_alg;
856 
857 	ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash);
858 	tctx->hace_dev = ast_alg->hace_dev;
859 	tctx->flags = 0;
860 
861 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
862 				 sizeof(struct aspeed_sham_reqctx));
863 
864 	if (ast_alg->alg_base) {
865 		/* hmac related */
866 		struct aspeed_sha_hmac_ctx *bctx = tctx->base;
867 
868 		tctx->flags |= SHA_FLAGS_HMAC;
869 		bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
870 						 CRYPTO_ALG_NEED_FALLBACK);
871 		if (IS_ERR(bctx->shash)) {
872 			dev_warn(ast_alg->hace_dev->dev,
873 				 "base driver '%s' could not be loaded.\n",
874 				 ast_alg->alg_base);
875 			return PTR_ERR(bctx->shash);
876 		}
877 	}
878 
879 	tctx->enginectx.op.do_one_request = aspeed_ahash_do_request;
880 	tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request;
881 	tctx->enginectx.op.unprepare_request = NULL;
882 
883 	return 0;
884 }
885 
aspeed_sham_cra_exit(struct crypto_tfm * tfm)886 static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
887 {
888 	struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
889 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
890 
891 	AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
892 
893 	if (tctx->flags & SHA_FLAGS_HMAC) {
894 		struct aspeed_sha_hmac_ctx *bctx = tctx->base;
895 
896 		crypto_free_shash(bctx->shash);
897 	}
898 }
899 
aspeed_sham_export(struct ahash_request * req,void * out)900 static int aspeed_sham_export(struct ahash_request *req, void *out)
901 {
902 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
903 
904 	memcpy(out, rctx, sizeof(*rctx));
905 
906 	return 0;
907 }
908 
aspeed_sham_import(struct ahash_request * req,const void * in)909 static int aspeed_sham_import(struct ahash_request *req, const void *in)
910 {
911 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
912 
913 	memcpy(rctx, in, sizeof(*rctx));
914 
915 	return 0;
916 }
917 
918 static struct aspeed_hace_alg aspeed_ahash_algs[] = {
919 	{
920 		.alg.ahash = {
921 			.init	= aspeed_sham_init,
922 			.update	= aspeed_sham_update,
923 			.final	= aspeed_sham_final,
924 			.finup	= aspeed_sham_finup,
925 			.digest	= aspeed_sham_digest,
926 			.export	= aspeed_sham_export,
927 			.import	= aspeed_sham_import,
928 			.halg = {
929 				.digestsize = SHA1_DIGEST_SIZE,
930 				.statesize = sizeof(struct aspeed_sham_reqctx),
931 				.base = {
932 					.cra_name		= "sha1",
933 					.cra_driver_name	= "aspeed-sha1",
934 					.cra_priority		= 300,
935 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
936 								  CRYPTO_ALG_ASYNC |
937 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
938 					.cra_blocksize		= SHA1_BLOCK_SIZE,
939 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
940 					.cra_alignmask		= 0,
941 					.cra_module		= THIS_MODULE,
942 					.cra_init		= aspeed_sham_cra_init,
943 					.cra_exit		= aspeed_sham_cra_exit,
944 				}
945 			}
946 		},
947 	},
948 	{
949 		.alg.ahash = {
950 			.init	= aspeed_sham_init,
951 			.update	= aspeed_sham_update,
952 			.final	= aspeed_sham_final,
953 			.finup	= aspeed_sham_finup,
954 			.digest	= aspeed_sham_digest,
955 			.export	= aspeed_sham_export,
956 			.import	= aspeed_sham_import,
957 			.halg = {
958 				.digestsize = SHA256_DIGEST_SIZE,
959 				.statesize = sizeof(struct aspeed_sham_reqctx),
960 				.base = {
961 					.cra_name		= "sha256",
962 					.cra_driver_name	= "aspeed-sha256",
963 					.cra_priority		= 300,
964 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
965 								  CRYPTO_ALG_ASYNC |
966 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
967 					.cra_blocksize		= SHA256_BLOCK_SIZE,
968 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
969 					.cra_alignmask		= 0,
970 					.cra_module		= THIS_MODULE,
971 					.cra_init		= aspeed_sham_cra_init,
972 					.cra_exit		= aspeed_sham_cra_exit,
973 				}
974 			}
975 		},
976 	},
977 	{
978 		.alg.ahash = {
979 			.init	= aspeed_sham_init,
980 			.update	= aspeed_sham_update,
981 			.final	= aspeed_sham_final,
982 			.finup	= aspeed_sham_finup,
983 			.digest	= aspeed_sham_digest,
984 			.export	= aspeed_sham_export,
985 			.import	= aspeed_sham_import,
986 			.halg = {
987 				.digestsize = SHA224_DIGEST_SIZE,
988 				.statesize = sizeof(struct aspeed_sham_reqctx),
989 				.base = {
990 					.cra_name		= "sha224",
991 					.cra_driver_name	= "aspeed-sha224",
992 					.cra_priority		= 300,
993 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
994 								  CRYPTO_ALG_ASYNC |
995 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
996 					.cra_blocksize		= SHA224_BLOCK_SIZE,
997 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
998 					.cra_alignmask		= 0,
999 					.cra_module		= THIS_MODULE,
1000 					.cra_init		= aspeed_sham_cra_init,
1001 					.cra_exit		= aspeed_sham_cra_exit,
1002 				}
1003 			}
1004 		},
1005 	},
1006 	{
1007 		.alg_base = "sha1",
1008 		.alg.ahash = {
1009 			.init	= aspeed_sham_init,
1010 			.update	= aspeed_sham_update,
1011 			.final	= aspeed_sham_final,
1012 			.finup	= aspeed_sham_finup,
1013 			.digest	= aspeed_sham_digest,
1014 			.setkey	= aspeed_sham_setkey,
1015 			.export	= aspeed_sham_export,
1016 			.import	= aspeed_sham_import,
1017 			.halg = {
1018 				.digestsize = SHA1_DIGEST_SIZE,
1019 				.statesize = sizeof(struct aspeed_sham_reqctx),
1020 				.base = {
1021 					.cra_name		= "hmac(sha1)",
1022 					.cra_driver_name	= "aspeed-hmac-sha1",
1023 					.cra_priority		= 300,
1024 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1025 								  CRYPTO_ALG_ASYNC |
1026 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1027 					.cra_blocksize		= SHA1_BLOCK_SIZE,
1028 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
1029 								sizeof(struct aspeed_sha_hmac_ctx),
1030 					.cra_alignmask		= 0,
1031 					.cra_module		= THIS_MODULE,
1032 					.cra_init		= aspeed_sham_cra_init,
1033 					.cra_exit		= aspeed_sham_cra_exit,
1034 				}
1035 			}
1036 		},
1037 	},
1038 	{
1039 		.alg_base = "sha224",
1040 		.alg.ahash = {
1041 			.init	= aspeed_sham_init,
1042 			.update	= aspeed_sham_update,
1043 			.final	= aspeed_sham_final,
1044 			.finup	= aspeed_sham_finup,
1045 			.digest	= aspeed_sham_digest,
1046 			.setkey	= aspeed_sham_setkey,
1047 			.export	= aspeed_sham_export,
1048 			.import	= aspeed_sham_import,
1049 			.halg = {
1050 				.digestsize = SHA224_DIGEST_SIZE,
1051 				.statesize = sizeof(struct aspeed_sham_reqctx),
1052 				.base = {
1053 					.cra_name		= "hmac(sha224)",
1054 					.cra_driver_name	= "aspeed-hmac-sha224",
1055 					.cra_priority		= 300,
1056 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1057 								  CRYPTO_ALG_ASYNC |
1058 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1059 					.cra_blocksize		= SHA224_BLOCK_SIZE,
1060 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
1061 								sizeof(struct aspeed_sha_hmac_ctx),
1062 					.cra_alignmask		= 0,
1063 					.cra_module		= THIS_MODULE,
1064 					.cra_init		= aspeed_sham_cra_init,
1065 					.cra_exit		= aspeed_sham_cra_exit,
1066 				}
1067 			}
1068 		},
1069 	},
1070 	{
1071 		.alg_base = "sha256",
1072 		.alg.ahash = {
1073 			.init	= aspeed_sham_init,
1074 			.update	= aspeed_sham_update,
1075 			.final	= aspeed_sham_final,
1076 			.finup	= aspeed_sham_finup,
1077 			.digest	= aspeed_sham_digest,
1078 			.setkey	= aspeed_sham_setkey,
1079 			.export	= aspeed_sham_export,
1080 			.import	= aspeed_sham_import,
1081 			.halg = {
1082 				.digestsize = SHA256_DIGEST_SIZE,
1083 				.statesize = sizeof(struct aspeed_sham_reqctx),
1084 				.base = {
1085 					.cra_name		= "hmac(sha256)",
1086 					.cra_driver_name	= "aspeed-hmac-sha256",
1087 					.cra_priority		= 300,
1088 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1089 								  CRYPTO_ALG_ASYNC |
1090 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1091 					.cra_blocksize		= SHA256_BLOCK_SIZE,
1092 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
1093 								sizeof(struct aspeed_sha_hmac_ctx),
1094 					.cra_alignmask		= 0,
1095 					.cra_module		= THIS_MODULE,
1096 					.cra_init		= aspeed_sham_cra_init,
1097 					.cra_exit		= aspeed_sham_cra_exit,
1098 				}
1099 			}
1100 		},
1101 	},
1102 };
1103 
1104 static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
1105 	{
1106 		.alg.ahash = {
1107 			.init	= aspeed_sham_init,
1108 			.update	= aspeed_sham_update,
1109 			.final	= aspeed_sham_final,
1110 			.finup	= aspeed_sham_finup,
1111 			.digest	= aspeed_sham_digest,
1112 			.export	= aspeed_sham_export,
1113 			.import	= aspeed_sham_import,
1114 			.halg = {
1115 				.digestsize = SHA384_DIGEST_SIZE,
1116 				.statesize = sizeof(struct aspeed_sham_reqctx),
1117 				.base = {
1118 					.cra_name		= "sha384",
1119 					.cra_driver_name	= "aspeed-sha384",
1120 					.cra_priority		= 300,
1121 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1122 								  CRYPTO_ALG_ASYNC |
1123 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1124 					.cra_blocksize		= SHA384_BLOCK_SIZE,
1125 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
1126 					.cra_alignmask		= 0,
1127 					.cra_module		= THIS_MODULE,
1128 					.cra_init		= aspeed_sham_cra_init,
1129 					.cra_exit		= aspeed_sham_cra_exit,
1130 				}
1131 			}
1132 		},
1133 	},
1134 	{
1135 		.alg.ahash = {
1136 			.init	= aspeed_sham_init,
1137 			.update	= aspeed_sham_update,
1138 			.final	= aspeed_sham_final,
1139 			.finup	= aspeed_sham_finup,
1140 			.digest	= aspeed_sham_digest,
1141 			.export	= aspeed_sham_export,
1142 			.import	= aspeed_sham_import,
1143 			.halg = {
1144 				.digestsize = SHA512_DIGEST_SIZE,
1145 				.statesize = sizeof(struct aspeed_sham_reqctx),
1146 				.base = {
1147 					.cra_name		= "sha512",
1148 					.cra_driver_name	= "aspeed-sha512",
1149 					.cra_priority		= 300,
1150 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1151 								  CRYPTO_ALG_ASYNC |
1152 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1153 					.cra_blocksize		= SHA512_BLOCK_SIZE,
1154 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
1155 					.cra_alignmask		= 0,
1156 					.cra_module		= THIS_MODULE,
1157 					.cra_init		= aspeed_sham_cra_init,
1158 					.cra_exit		= aspeed_sham_cra_exit,
1159 				}
1160 			}
1161 		},
1162 	},
1163 	{
1164 		.alg.ahash = {
1165 			.init	= aspeed_sha512s_init,
1166 			.update	= aspeed_sham_update,
1167 			.final	= aspeed_sham_final,
1168 			.finup	= aspeed_sham_finup,
1169 			.digest	= aspeed_sham_digest,
1170 			.export	= aspeed_sham_export,
1171 			.import	= aspeed_sham_import,
1172 			.halg = {
1173 				.digestsize = SHA224_DIGEST_SIZE,
1174 				.statesize = sizeof(struct aspeed_sham_reqctx),
1175 				.base = {
1176 					.cra_name		= "sha512_224",
1177 					.cra_driver_name	= "aspeed-sha512_224",
1178 					.cra_priority		= 300,
1179 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1180 								  CRYPTO_ALG_ASYNC |
1181 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1182 					.cra_blocksize		= SHA512_BLOCK_SIZE,
1183 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
1184 					.cra_alignmask		= 0,
1185 					.cra_module		= THIS_MODULE,
1186 					.cra_init		= aspeed_sham_cra_init,
1187 					.cra_exit		= aspeed_sham_cra_exit,
1188 				}
1189 			}
1190 		},
1191 	},
1192 	{
1193 		.alg.ahash = {
1194 			.init	= aspeed_sha512s_init,
1195 			.update	= aspeed_sham_update,
1196 			.final	= aspeed_sham_final,
1197 			.finup	= aspeed_sham_finup,
1198 			.digest	= aspeed_sham_digest,
1199 			.export	= aspeed_sham_export,
1200 			.import	= aspeed_sham_import,
1201 			.halg = {
1202 				.digestsize = SHA256_DIGEST_SIZE,
1203 				.statesize = sizeof(struct aspeed_sham_reqctx),
1204 				.base = {
1205 					.cra_name		= "sha512_256",
1206 					.cra_driver_name	= "aspeed-sha512_256",
1207 					.cra_priority		= 300,
1208 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1209 								  CRYPTO_ALG_ASYNC |
1210 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1211 					.cra_blocksize		= SHA512_BLOCK_SIZE,
1212 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
1213 					.cra_alignmask		= 0,
1214 					.cra_module		= THIS_MODULE,
1215 					.cra_init		= aspeed_sham_cra_init,
1216 					.cra_exit		= aspeed_sham_cra_exit,
1217 				}
1218 			}
1219 		},
1220 	},
1221 	{
1222 		.alg_base = "sha384",
1223 		.alg.ahash = {
1224 			.init	= aspeed_sham_init,
1225 			.update	= aspeed_sham_update,
1226 			.final	= aspeed_sham_final,
1227 			.finup	= aspeed_sham_finup,
1228 			.digest	= aspeed_sham_digest,
1229 			.setkey	= aspeed_sham_setkey,
1230 			.export	= aspeed_sham_export,
1231 			.import	= aspeed_sham_import,
1232 			.halg = {
1233 				.digestsize = SHA384_DIGEST_SIZE,
1234 				.statesize = sizeof(struct aspeed_sham_reqctx),
1235 				.base = {
1236 					.cra_name		= "hmac(sha384)",
1237 					.cra_driver_name	= "aspeed-hmac-sha384",
1238 					.cra_priority		= 300,
1239 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1240 								  CRYPTO_ALG_ASYNC |
1241 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1242 					.cra_blocksize		= SHA384_BLOCK_SIZE,
1243 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
1244 								sizeof(struct aspeed_sha_hmac_ctx),
1245 					.cra_alignmask		= 0,
1246 					.cra_module		= THIS_MODULE,
1247 					.cra_init		= aspeed_sham_cra_init,
1248 					.cra_exit		= aspeed_sham_cra_exit,
1249 				}
1250 			}
1251 		},
1252 	},
1253 	{
1254 		.alg_base = "sha512",
1255 		.alg.ahash = {
1256 			.init	= aspeed_sham_init,
1257 			.update	= aspeed_sham_update,
1258 			.final	= aspeed_sham_final,
1259 			.finup	= aspeed_sham_finup,
1260 			.digest	= aspeed_sham_digest,
1261 			.setkey	= aspeed_sham_setkey,
1262 			.export	= aspeed_sham_export,
1263 			.import	= aspeed_sham_import,
1264 			.halg = {
1265 				.digestsize = SHA512_DIGEST_SIZE,
1266 				.statesize = sizeof(struct aspeed_sham_reqctx),
1267 				.base = {
1268 					.cra_name		= "hmac(sha512)",
1269 					.cra_driver_name	= "aspeed-hmac-sha512",
1270 					.cra_priority		= 300,
1271 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1272 								  CRYPTO_ALG_ASYNC |
1273 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1274 					.cra_blocksize		= SHA512_BLOCK_SIZE,
1275 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
1276 								sizeof(struct aspeed_sha_hmac_ctx),
1277 					.cra_alignmask		= 0,
1278 					.cra_module		= THIS_MODULE,
1279 					.cra_init		= aspeed_sham_cra_init,
1280 					.cra_exit		= aspeed_sham_cra_exit,
1281 				}
1282 			}
1283 		},
1284 	},
1285 	{
1286 		.alg_base = "sha512_224",
1287 		.alg.ahash = {
1288 			.init	= aspeed_sha512s_init,
1289 			.update	= aspeed_sham_update,
1290 			.final	= aspeed_sham_final,
1291 			.finup	= aspeed_sham_finup,
1292 			.digest	= aspeed_sham_digest,
1293 			.setkey	= aspeed_sham_setkey,
1294 			.export	= aspeed_sham_export,
1295 			.import	= aspeed_sham_import,
1296 			.halg = {
1297 				.digestsize = SHA224_DIGEST_SIZE,
1298 				.statesize = sizeof(struct aspeed_sham_reqctx),
1299 				.base = {
1300 					.cra_name		= "hmac(sha512_224)",
1301 					.cra_driver_name	= "aspeed-hmac-sha512_224",
1302 					.cra_priority		= 300,
1303 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1304 								  CRYPTO_ALG_ASYNC |
1305 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1306 					.cra_blocksize		= SHA512_BLOCK_SIZE,
1307 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
1308 								sizeof(struct aspeed_sha_hmac_ctx),
1309 					.cra_alignmask		= 0,
1310 					.cra_module		= THIS_MODULE,
1311 					.cra_init		= aspeed_sham_cra_init,
1312 					.cra_exit		= aspeed_sham_cra_exit,
1313 				}
1314 			}
1315 		},
1316 	},
1317 	{
1318 		.alg_base = "sha512_256",
1319 		.alg.ahash = {
1320 			.init	= aspeed_sha512s_init,
1321 			.update	= aspeed_sham_update,
1322 			.final	= aspeed_sham_final,
1323 			.finup	= aspeed_sham_finup,
1324 			.digest	= aspeed_sham_digest,
1325 			.setkey	= aspeed_sham_setkey,
1326 			.export	= aspeed_sham_export,
1327 			.import	= aspeed_sham_import,
1328 			.halg = {
1329 				.digestsize = SHA256_DIGEST_SIZE,
1330 				.statesize = sizeof(struct aspeed_sham_reqctx),
1331 				.base = {
1332 					.cra_name		= "hmac(sha512_256)",
1333 					.cra_driver_name	= "aspeed-hmac-sha512_256",
1334 					.cra_priority		= 300,
1335 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1336 								  CRYPTO_ALG_ASYNC |
1337 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1338 					.cra_blocksize		= SHA512_BLOCK_SIZE,
1339 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
1340 								sizeof(struct aspeed_sha_hmac_ctx),
1341 					.cra_alignmask		= 0,
1342 					.cra_module		= THIS_MODULE,
1343 					.cra_init		= aspeed_sham_cra_init,
1344 					.cra_exit		= aspeed_sham_cra_exit,
1345 				}
1346 			}
1347 		},
1348 	},
1349 };
1350 
aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev * hace_dev)1351 void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
1352 {
1353 	int i;
1354 
1355 	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
1356 		crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
1357 
1358 	if (hace_dev->version != AST2600_VERSION)
1359 		return;
1360 
1361 	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
1362 		crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
1363 }
1364 
aspeed_register_hace_hash_algs(struct aspeed_hace_dev * hace_dev)1365 void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
1366 {
1367 	int rc, i;
1368 
1369 	AHASH_DBG(hace_dev, "\n");
1370 
1371 	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
1372 		aspeed_ahash_algs[i].hace_dev = hace_dev;
1373 		rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
1374 		if (rc) {
1375 			AHASH_DBG(hace_dev, "Failed to register %s\n",
1376 				  aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name);
1377 		}
1378 	}
1379 
1380 	if (hace_dev->version != AST2600_VERSION)
1381 		return;
1382 
1383 	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
1384 		aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
1385 		rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
1386 		if (rc) {
1387 			AHASH_DBG(hace_dev, "Failed to register %s\n",
1388 				  aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name);
1389 		}
1390 	}
1391 }
1392