1 /*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/skbuff.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/highmem.h>
50 #include <linux/scatterlist.h>
51
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/gcm.h>
56 #include <crypto/sha1.h>
57 #include <crypto/sha2.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73
74 #define IV AES_BLOCK_SIZE
75
76 static unsigned int sgl_ent_len[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
81 };
82
83 static unsigned int dsgl_ent_len[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
88 };
89
90 static u32 round_constant[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
94 };
95
96 static int chcr_handle_cipher_resp(struct skcipher_request *req,
97 unsigned char *input, int err);
98
AEAD_CTX(struct chcr_context * ctx)99 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101 return ctx->crypto_ctx->aeadctx;
102 }
103
ABLK_CTX(struct chcr_context * ctx)104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106 return ctx->crypto_ctx->ablkctx;
107 }
108
HMAC_CTX(struct chcr_context * ctx)109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111 return ctx->crypto_ctx->hmacctx;
112 }
113
GCM_CTX(struct chcr_aead_ctx * gctx)114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116 return gctx->ctx->gcm;
117 }
118
AUTHENC_CTX(struct chcr_aead_ctx * gctx)119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121 return gctx->ctx->authenc;
122 }
123
ULD_CTX(struct chcr_context * ctx)124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126 return container_of(ctx->dev, struct uld_ctx, dev);
127 }
128
chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx * reqctx)129 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
130 {
131 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
132 }
133
sg_nents_xlen(struct scatterlist * sg,unsigned int reqlen,unsigned int entlen,unsigned int skip)134 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
135 unsigned int entlen,
136 unsigned int skip)
137 {
138 int nents = 0;
139 unsigned int less;
140 unsigned int skip_len = 0;
141
142 while (sg && skip) {
143 if (sg_dma_len(sg) <= skip) {
144 skip -= sg_dma_len(sg);
145 skip_len = 0;
146 sg = sg_next(sg);
147 } else {
148 skip_len = skip;
149 skip = 0;
150 }
151 }
152
153 while (sg && reqlen) {
154 less = min(reqlen, sg_dma_len(sg) - skip_len);
155 nents += DIV_ROUND_UP(less, entlen);
156 reqlen -= less;
157 skip_len = 0;
158 sg = sg_next(sg);
159 }
160 return nents;
161 }
162
get_aead_subtype(struct crypto_aead * aead)163 static inline int get_aead_subtype(struct crypto_aead *aead)
164 {
165 struct aead_alg *alg = crypto_aead_alg(aead);
166 struct chcr_alg_template *chcr_crypto_alg =
167 container_of(alg, struct chcr_alg_template, alg.aead);
168 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
169 }
170
chcr_verify_tag(struct aead_request * req,u8 * input,int * err)171 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
172 {
173 u8 temp[SHA512_DIGEST_SIZE];
174 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
175 int authsize = crypto_aead_authsize(tfm);
176 struct cpl_fw6_pld *fw6_pld;
177 int cmp = 0;
178
179 fw6_pld = (struct cpl_fw6_pld *)input;
180 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
181 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
182 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
183 } else {
184
185 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
186 authsize, req->assoclen +
187 req->cryptlen - authsize);
188 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
189 }
190 if (cmp)
191 *err = -EBADMSG;
192 else
193 *err = 0;
194 }
195
chcr_inc_wrcount(struct chcr_dev * dev)196 static int chcr_inc_wrcount(struct chcr_dev *dev)
197 {
198 if (dev->state == CHCR_DETACH)
199 return 1;
200 atomic_inc(&dev->inflight);
201 return 0;
202 }
203
chcr_dec_wrcount(struct chcr_dev * dev)204 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
205 {
206 atomic_dec(&dev->inflight);
207 }
208
chcr_handle_aead_resp(struct aead_request * req,unsigned char * input,int err)209 static inline int chcr_handle_aead_resp(struct aead_request *req,
210 unsigned char *input,
211 int err)
212 {
213 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
214 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
215 struct chcr_dev *dev = a_ctx(tfm)->dev;
216
217 chcr_aead_common_exit(req);
218 if (reqctx->verify == VERIFY_SW) {
219 chcr_verify_tag(req, input, &err);
220 reqctx->verify = VERIFY_HW;
221 }
222 chcr_dec_wrcount(dev);
223 req->base.complete(&req->base, err);
224
225 return err;
226 }
227
get_aes_decrypt_key(unsigned char * dec_key,const unsigned char * key,unsigned int keylength)228 static void get_aes_decrypt_key(unsigned char *dec_key,
229 const unsigned char *key,
230 unsigned int keylength)
231 {
232 u32 temp;
233 u32 w_ring[MAX_NK];
234 int i, j, k;
235 u8 nr, nk;
236
237 switch (keylength) {
238 case AES_KEYLENGTH_128BIT:
239 nk = KEYLENGTH_4BYTES;
240 nr = NUMBER_OF_ROUNDS_10;
241 break;
242 case AES_KEYLENGTH_192BIT:
243 nk = KEYLENGTH_6BYTES;
244 nr = NUMBER_OF_ROUNDS_12;
245 break;
246 case AES_KEYLENGTH_256BIT:
247 nk = KEYLENGTH_8BYTES;
248 nr = NUMBER_OF_ROUNDS_14;
249 break;
250 default:
251 return;
252 }
253 for (i = 0; i < nk; i++)
254 w_ring[i] = get_unaligned_be32(&key[i * 4]);
255
256 i = 0;
257 temp = w_ring[nk - 1];
258 while (i + nk < (nr + 1) * 4) {
259 if (!(i % nk)) {
260 /* RotWord(temp) */
261 temp = (temp << 8) | (temp >> 24);
262 temp = aes_ks_subword(temp);
263 temp ^= round_constant[i / nk];
264 } else if (nk == 8 && (i % 4 == 0)) {
265 temp = aes_ks_subword(temp);
266 }
267 w_ring[i % nk] ^= temp;
268 temp = w_ring[i % nk];
269 i++;
270 }
271 i--;
272 for (k = 0, j = i % nk; k < nk; k++) {
273 put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
274 j--;
275 if (j < 0)
276 j += nk;
277 }
278 }
279
chcr_alloc_shash(unsigned int ds)280 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
281 {
282 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
283
284 switch (ds) {
285 case SHA1_DIGEST_SIZE:
286 base_hash = crypto_alloc_shash("sha1", 0, 0);
287 break;
288 case SHA224_DIGEST_SIZE:
289 base_hash = crypto_alloc_shash("sha224", 0, 0);
290 break;
291 case SHA256_DIGEST_SIZE:
292 base_hash = crypto_alloc_shash("sha256", 0, 0);
293 break;
294 case SHA384_DIGEST_SIZE:
295 base_hash = crypto_alloc_shash("sha384", 0, 0);
296 break;
297 case SHA512_DIGEST_SIZE:
298 base_hash = crypto_alloc_shash("sha512", 0, 0);
299 break;
300 }
301
302 return base_hash;
303 }
304
chcr_compute_partial_hash(struct shash_desc * desc,char * iopad,char * result_hash,int digest_size)305 static int chcr_compute_partial_hash(struct shash_desc *desc,
306 char *iopad, char *result_hash,
307 int digest_size)
308 {
309 struct sha1_state sha1_st;
310 struct sha256_state sha256_st;
311 struct sha512_state sha512_st;
312 int error;
313
314 if (digest_size == SHA1_DIGEST_SIZE) {
315 error = crypto_shash_init(desc) ?:
316 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
317 crypto_shash_export(desc, (void *)&sha1_st);
318 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
319 } else if (digest_size == SHA224_DIGEST_SIZE) {
320 error = crypto_shash_init(desc) ?:
321 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
322 crypto_shash_export(desc, (void *)&sha256_st);
323 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
324
325 } else if (digest_size == SHA256_DIGEST_SIZE) {
326 error = crypto_shash_init(desc) ?:
327 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
328 crypto_shash_export(desc, (void *)&sha256_st);
329 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
330
331 } else if (digest_size == SHA384_DIGEST_SIZE) {
332 error = crypto_shash_init(desc) ?:
333 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
334 crypto_shash_export(desc, (void *)&sha512_st);
335 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
336
337 } else if (digest_size == SHA512_DIGEST_SIZE) {
338 error = crypto_shash_init(desc) ?:
339 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
340 crypto_shash_export(desc, (void *)&sha512_st);
341 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
342 } else {
343 error = -EINVAL;
344 pr_err("Unknown digest size %d\n", digest_size);
345 }
346 return error;
347 }
348
chcr_change_order(char * buf,int ds)349 static void chcr_change_order(char *buf, int ds)
350 {
351 int i;
352
353 if (ds == SHA512_DIGEST_SIZE) {
354 for (i = 0; i < (ds / sizeof(u64)); i++)
355 *((__be64 *)buf + i) =
356 cpu_to_be64(*((u64 *)buf + i));
357 } else {
358 for (i = 0; i < (ds / sizeof(u32)); i++)
359 *((__be32 *)buf + i) =
360 cpu_to_be32(*((u32 *)buf + i));
361 }
362 }
363
is_hmac(struct crypto_tfm * tfm)364 static inline int is_hmac(struct crypto_tfm *tfm)
365 {
366 struct crypto_alg *alg = tfm->__crt_alg;
367 struct chcr_alg_template *chcr_crypto_alg =
368 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
369 alg.hash);
370 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
371 return 1;
372 return 0;
373 }
374
dsgl_walk_init(struct dsgl_walk * walk,struct cpl_rx_phys_dsgl * dsgl)375 static inline void dsgl_walk_init(struct dsgl_walk *walk,
376 struct cpl_rx_phys_dsgl *dsgl)
377 {
378 walk->dsgl = dsgl;
379 walk->nents = 0;
380 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
381 }
382
dsgl_walk_end(struct dsgl_walk * walk,unsigned short qid,int pci_chan_id)383 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
384 int pci_chan_id)
385 {
386 struct cpl_rx_phys_dsgl *phys_cpl;
387
388 phys_cpl = walk->dsgl;
389
390 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
391 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
392 phys_cpl->pcirlxorder_to_noofsgentr =
393 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
394 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
395 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
396 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
397 CPL_RX_PHYS_DSGL_DCAID_V(0) |
398 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
399 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
400 phys_cpl->rss_hdr_int.qid = htons(qid);
401 phys_cpl->rss_hdr_int.hash_val = 0;
402 phys_cpl->rss_hdr_int.channel = pci_chan_id;
403 }
404
dsgl_walk_add_page(struct dsgl_walk * walk,size_t size,dma_addr_t addr)405 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
406 size_t size,
407 dma_addr_t addr)
408 {
409 int j;
410
411 if (!size)
412 return;
413 j = walk->nents;
414 walk->to->len[j % 8] = htons(size);
415 walk->to->addr[j % 8] = cpu_to_be64(addr);
416 j++;
417 if ((j % 8) == 0)
418 walk->to++;
419 walk->nents = j;
420 }
421
dsgl_walk_add_sg(struct dsgl_walk * walk,struct scatterlist * sg,unsigned int slen,unsigned int skip)422 static void dsgl_walk_add_sg(struct dsgl_walk *walk,
423 struct scatterlist *sg,
424 unsigned int slen,
425 unsigned int skip)
426 {
427 int skip_len = 0;
428 unsigned int left_size = slen, len = 0;
429 unsigned int j = walk->nents;
430 int offset, ent_len;
431
432 if (!slen)
433 return;
434 while (sg && skip) {
435 if (sg_dma_len(sg) <= skip) {
436 skip -= sg_dma_len(sg);
437 skip_len = 0;
438 sg = sg_next(sg);
439 } else {
440 skip_len = skip;
441 skip = 0;
442 }
443 }
444
445 while (left_size && sg) {
446 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
447 offset = 0;
448 while (len) {
449 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
450 walk->to->len[j % 8] = htons(ent_len);
451 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
452 offset + skip_len);
453 offset += ent_len;
454 len -= ent_len;
455 j++;
456 if ((j % 8) == 0)
457 walk->to++;
458 }
459 walk->last_sg = sg;
460 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
461 skip_len) + skip_len;
462 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
463 skip_len = 0;
464 sg = sg_next(sg);
465 }
466 walk->nents = j;
467 }
468
ulptx_walk_init(struct ulptx_walk * walk,struct ulptx_sgl * ulp)469 static inline void ulptx_walk_init(struct ulptx_walk *walk,
470 struct ulptx_sgl *ulp)
471 {
472 walk->sgl = ulp;
473 walk->nents = 0;
474 walk->pair_idx = 0;
475 walk->pair = ulp->sge;
476 walk->last_sg = NULL;
477 walk->last_sg_len = 0;
478 }
479
ulptx_walk_end(struct ulptx_walk * walk)480 static inline void ulptx_walk_end(struct ulptx_walk *walk)
481 {
482 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
483 ULPTX_NSGE_V(walk->nents));
484 }
485
486
ulptx_walk_add_page(struct ulptx_walk * walk,size_t size,dma_addr_t addr)487 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
488 size_t size,
489 dma_addr_t addr)
490 {
491 if (!size)
492 return;
493
494 if (walk->nents == 0) {
495 walk->sgl->len0 = cpu_to_be32(size);
496 walk->sgl->addr0 = cpu_to_be64(addr);
497 } else {
498 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
499 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
500 walk->pair_idx = !walk->pair_idx;
501 if (!walk->pair_idx)
502 walk->pair++;
503 }
504 walk->nents++;
505 }
506
ulptx_walk_add_sg(struct ulptx_walk * walk,struct scatterlist * sg,unsigned int len,unsigned int skip)507 static void ulptx_walk_add_sg(struct ulptx_walk *walk,
508 struct scatterlist *sg,
509 unsigned int len,
510 unsigned int skip)
511 {
512 int small;
513 int skip_len = 0;
514 unsigned int sgmin;
515
516 if (!len)
517 return;
518 while (sg && skip) {
519 if (sg_dma_len(sg) <= skip) {
520 skip -= sg_dma_len(sg);
521 skip_len = 0;
522 sg = sg_next(sg);
523 } else {
524 skip_len = skip;
525 skip = 0;
526 }
527 }
528 WARN(!sg, "SG should not be null here\n");
529 if (sg && (walk->nents == 0)) {
530 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
531 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
532 walk->sgl->len0 = cpu_to_be32(sgmin);
533 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
534 walk->nents++;
535 len -= sgmin;
536 walk->last_sg = sg;
537 walk->last_sg_len = sgmin + skip_len;
538 skip_len += sgmin;
539 if (sg_dma_len(sg) == skip_len) {
540 sg = sg_next(sg);
541 skip_len = 0;
542 }
543 }
544
545 while (sg && len) {
546 small = min(sg_dma_len(sg) - skip_len, len);
547 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
548 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
549 walk->pair->addr[walk->pair_idx] =
550 cpu_to_be64(sg_dma_address(sg) + skip_len);
551 walk->pair_idx = !walk->pair_idx;
552 walk->nents++;
553 if (!walk->pair_idx)
554 walk->pair++;
555 len -= sgmin;
556 skip_len += sgmin;
557 walk->last_sg = sg;
558 walk->last_sg_len = skip_len;
559 if (sg_dma_len(sg) == skip_len) {
560 sg = sg_next(sg);
561 skip_len = 0;
562 }
563 }
564 }
565
get_cryptoalg_subtype(struct crypto_skcipher * tfm)566 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
567 {
568 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
569 struct chcr_alg_template *chcr_crypto_alg =
570 container_of(alg, struct chcr_alg_template, alg.skcipher);
571
572 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
573 }
574
cxgb4_is_crypto_q_full(struct net_device * dev,unsigned int idx)575 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
576 {
577 struct adapter *adap = netdev2adap(dev);
578 struct sge_uld_txq_info *txq_info =
579 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
580 struct sge_uld_txq *txq;
581 int ret = 0;
582
583 local_bh_disable();
584 txq = &txq_info->uldtxq[idx];
585 spin_lock(&txq->sendq.lock);
586 if (txq->full)
587 ret = -1;
588 spin_unlock(&txq->sendq.lock);
589 local_bh_enable();
590 return ret;
591 }
592
generate_copy_rrkey(struct ablk_ctx * ablkctx,struct _key_ctx * key_ctx)593 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
594 struct _key_ctx *key_ctx)
595 {
596 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
597 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
598 } else {
599 memcpy(key_ctx->key,
600 ablkctx->key + (ablkctx->enckey_len >> 1),
601 ablkctx->enckey_len >> 1);
602 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
603 ablkctx->rrkey, ablkctx->enckey_len >> 1);
604 }
605 return 0;
606 }
607
chcr_hash_ent_in_wr(struct scatterlist * src,unsigned int minsg,unsigned int space,unsigned int srcskip)608 static int chcr_hash_ent_in_wr(struct scatterlist *src,
609 unsigned int minsg,
610 unsigned int space,
611 unsigned int srcskip)
612 {
613 int srclen = 0;
614 int srcsg = minsg;
615 int soffset = 0, sless;
616
617 if (sg_dma_len(src) == srcskip) {
618 src = sg_next(src);
619 srcskip = 0;
620 }
621 while (src && space > (sgl_ent_len[srcsg + 1])) {
622 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
623 CHCR_SRC_SG_SIZE);
624 srclen += sless;
625 soffset += sless;
626 srcsg++;
627 if (sg_dma_len(src) == (soffset + srcskip)) {
628 src = sg_next(src);
629 soffset = 0;
630 srcskip = 0;
631 }
632 }
633 return srclen;
634 }
635
chcr_sg_ent_in_wr(struct scatterlist * src,struct scatterlist * dst,unsigned int minsg,unsigned int space,unsigned int srcskip,unsigned int dstskip)636 static int chcr_sg_ent_in_wr(struct scatterlist *src,
637 struct scatterlist *dst,
638 unsigned int minsg,
639 unsigned int space,
640 unsigned int srcskip,
641 unsigned int dstskip)
642 {
643 int srclen = 0, dstlen = 0;
644 int srcsg = minsg, dstsg = minsg;
645 int offset = 0, soffset = 0, less, sless = 0;
646
647 if (sg_dma_len(src) == srcskip) {
648 src = sg_next(src);
649 srcskip = 0;
650 }
651 if (sg_dma_len(dst) == dstskip) {
652 dst = sg_next(dst);
653 dstskip = 0;
654 }
655
656 while (src && dst &&
657 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
658 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
659 CHCR_SRC_SG_SIZE);
660 srclen += sless;
661 srcsg++;
662 offset = 0;
663 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
664 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
665 if (srclen <= dstlen)
666 break;
667 less = min_t(unsigned int, sg_dma_len(dst) - offset -
668 dstskip, CHCR_DST_SG_SIZE);
669 dstlen += less;
670 offset += less;
671 if ((offset + dstskip) == sg_dma_len(dst)) {
672 dst = sg_next(dst);
673 offset = 0;
674 }
675 dstsg++;
676 dstskip = 0;
677 }
678 soffset += sless;
679 if ((soffset + srcskip) == sg_dma_len(src)) {
680 src = sg_next(src);
681 srcskip = 0;
682 soffset = 0;
683 }
684
685 }
686 return min(srclen, dstlen);
687 }
688
chcr_cipher_fallback(struct crypto_skcipher * cipher,struct skcipher_request * req,u8 * iv,unsigned short op_type)689 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
690 struct skcipher_request *req,
691 u8 *iv,
692 unsigned short op_type)
693 {
694 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
695 int err;
696
697 skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
698 skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
699 req->base.complete, req->base.data);
700 skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
701 req->cryptlen, iv);
702
703 err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
704 crypto_skcipher_encrypt(&reqctx->fallback_req);
705
706 return err;
707
708 }
709
get_qidxs(struct crypto_async_request * req,unsigned int * txqidx,unsigned int * rxqidx)710 static inline int get_qidxs(struct crypto_async_request *req,
711 unsigned int *txqidx, unsigned int *rxqidx)
712 {
713 struct crypto_tfm *tfm = req->tfm;
714 int ret = 0;
715
716 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
717 case CRYPTO_ALG_TYPE_AEAD:
718 {
719 struct aead_request *aead_req =
720 container_of(req, struct aead_request, base);
721 struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
722 *txqidx = reqctx->txqidx;
723 *rxqidx = reqctx->rxqidx;
724 break;
725 }
726 case CRYPTO_ALG_TYPE_SKCIPHER:
727 {
728 struct skcipher_request *sk_req =
729 container_of(req, struct skcipher_request, base);
730 struct chcr_skcipher_req_ctx *reqctx =
731 skcipher_request_ctx(sk_req);
732 *txqidx = reqctx->txqidx;
733 *rxqidx = reqctx->rxqidx;
734 break;
735 }
736 case CRYPTO_ALG_TYPE_AHASH:
737 {
738 struct ahash_request *ahash_req =
739 container_of(req, struct ahash_request, base);
740 struct chcr_ahash_req_ctx *reqctx =
741 ahash_request_ctx(ahash_req);
742 *txqidx = reqctx->txqidx;
743 *rxqidx = reqctx->rxqidx;
744 break;
745 }
746 default:
747 ret = -EINVAL;
748 /* should never get here */
749 BUG();
750 break;
751 }
752 return ret;
753 }
754
create_wreq(struct chcr_context * ctx,struct chcr_wr * chcr_req,struct crypto_async_request * req,unsigned int imm,int hash_sz,unsigned int len16,unsigned int sc_len,unsigned int lcb)755 static inline void create_wreq(struct chcr_context *ctx,
756 struct chcr_wr *chcr_req,
757 struct crypto_async_request *req,
758 unsigned int imm,
759 int hash_sz,
760 unsigned int len16,
761 unsigned int sc_len,
762 unsigned int lcb)
763 {
764 struct uld_ctx *u_ctx = ULD_CTX(ctx);
765 unsigned int tx_channel_id, rx_channel_id;
766 unsigned int txqidx = 0, rxqidx = 0;
767 unsigned int qid, fid, portno;
768
769 get_qidxs(req, &txqidx, &rxqidx);
770 qid = u_ctx->lldi.rxq_ids[rxqidx];
771 fid = u_ctx->lldi.rxq_ids[0];
772 portno = rxqidx / ctx->rxq_perchan;
773 tx_channel_id = txqidx / ctx->txq_perchan;
774 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
775
776
777 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
778 chcr_req->wreq.pld_size_hash_size =
779 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
780 chcr_req->wreq.len16_pkd =
781 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
782 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
783 chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
784 !!lcb, txqidx);
785
786 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
787 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
788 ((sizeof(chcr_req->wreq)) >> 4)));
789 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
790 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
791 sizeof(chcr_req->key_ctx) + sc_len);
792 }
793
794 /**
795 * create_cipher_wr - form the WR for cipher operations
796 * @wrparam: Container for create_cipher_wr()'s parameters
797 */
create_cipher_wr(struct cipher_wr_param * wrparam)798 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
799 {
800 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
801 struct chcr_context *ctx = c_ctx(tfm);
802 struct uld_ctx *u_ctx = ULD_CTX(ctx);
803 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
804 struct sk_buff *skb = NULL;
805 struct chcr_wr *chcr_req;
806 struct cpl_rx_phys_dsgl *phys_cpl;
807 struct ulptx_sgl *ulptx;
808 struct chcr_skcipher_req_ctx *reqctx =
809 skcipher_request_ctx(wrparam->req);
810 unsigned int temp = 0, transhdr_len, dst_size;
811 int error;
812 int nents;
813 unsigned int kctx_len;
814 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
815 GFP_KERNEL : GFP_ATOMIC;
816 struct adapter *adap = padap(ctx->dev);
817 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
818
819 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
820 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
821 reqctx->dst_ofst);
822 dst_size = get_space_for_phys_dsgl(nents);
823 kctx_len = roundup(ablkctx->enckey_len, 16);
824 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
825 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
826 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
827 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
828 (sgl_len(nents) * 8);
829 transhdr_len += temp;
830 transhdr_len = roundup(transhdr_len, 16);
831 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
832 if (!skb) {
833 error = -ENOMEM;
834 goto err;
835 }
836 chcr_req = __skb_put_zero(skb, transhdr_len);
837 chcr_req->sec_cpl.op_ivinsrtofst =
838 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
839
840 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
841 chcr_req->sec_cpl.aadstart_cipherstop_hi =
842 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
843
844 chcr_req->sec_cpl.cipherstop_lo_authinsert =
845 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
846 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
847 ablkctx->ciph_mode,
848 0, 0, IV >> 1);
849 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
850 0, 1, dst_size);
851
852 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
853 if ((reqctx->op == CHCR_DECRYPT_OP) &&
854 (!(get_cryptoalg_subtype(tfm) ==
855 CRYPTO_ALG_SUB_TYPE_CTR)) &&
856 (!(get_cryptoalg_subtype(tfm) ==
857 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
858 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
859 } else {
860 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
861 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
862 memcpy(chcr_req->key_ctx.key, ablkctx->key,
863 ablkctx->enckey_len);
864 } else {
865 memcpy(chcr_req->key_ctx.key, ablkctx->key +
866 (ablkctx->enckey_len >> 1),
867 ablkctx->enckey_len >> 1);
868 memcpy(chcr_req->key_ctx.key +
869 (ablkctx->enckey_len >> 1),
870 ablkctx->key,
871 ablkctx->enckey_len >> 1);
872 }
873 }
874 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
875 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
876 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
877 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
878
879 atomic_inc(&adap->chcr_stats.cipher_rqst);
880 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
881 + (reqctx->imm ? (wrparam->bytes) : 0);
882 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
883 transhdr_len, temp,
884 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
885 reqctx->skb = skb;
886
887 if (reqctx->op && (ablkctx->ciph_mode ==
888 CHCR_SCMD_CIPHER_MODE_AES_CBC))
889 sg_pcopy_to_buffer(wrparam->req->src,
890 sg_nents(wrparam->req->src), wrparam->req->iv, 16,
891 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
892
893 return skb;
894 err:
895 return ERR_PTR(error);
896 }
897
chcr_keyctx_ck_size(unsigned int keylen)898 static inline int chcr_keyctx_ck_size(unsigned int keylen)
899 {
900 int ck_size = 0;
901
902 if (keylen == AES_KEYSIZE_128)
903 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
904 else if (keylen == AES_KEYSIZE_192)
905 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
906 else if (keylen == AES_KEYSIZE_256)
907 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
908 else
909 ck_size = 0;
910
911 return ck_size;
912 }
chcr_cipher_fallback_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)913 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
914 const u8 *key,
915 unsigned int keylen)
916 {
917 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
918
919 crypto_skcipher_clear_flags(ablkctx->sw_cipher,
920 CRYPTO_TFM_REQ_MASK);
921 crypto_skcipher_set_flags(ablkctx->sw_cipher,
922 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
923 return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
924 }
925
chcr_aes_cbc_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)926 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
927 const u8 *key,
928 unsigned int keylen)
929 {
930 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
931 unsigned int ck_size, context_size;
932 u16 alignment = 0;
933 int err;
934
935 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
936 if (err)
937 goto badkey_err;
938
939 ck_size = chcr_keyctx_ck_size(keylen);
940 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
941 memcpy(ablkctx->key, key, keylen);
942 ablkctx->enckey_len = keylen;
943 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
944 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
945 keylen + alignment) >> 4;
946
947 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
948 0, 0, context_size);
949 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
950 return 0;
951 badkey_err:
952 ablkctx->enckey_len = 0;
953
954 return err;
955 }
956
chcr_aes_ctr_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)957 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
958 const u8 *key,
959 unsigned int keylen)
960 {
961 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
962 unsigned int ck_size, context_size;
963 u16 alignment = 0;
964 int err;
965
966 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
967 if (err)
968 goto badkey_err;
969 ck_size = chcr_keyctx_ck_size(keylen);
970 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
971 memcpy(ablkctx->key, key, keylen);
972 ablkctx->enckey_len = keylen;
973 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
974 keylen + alignment) >> 4;
975
976 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
977 0, 0, context_size);
978 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
979
980 return 0;
981 badkey_err:
982 ablkctx->enckey_len = 0;
983
984 return err;
985 }
986
chcr_aes_rfc3686_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)987 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
988 const u8 *key,
989 unsigned int keylen)
990 {
991 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
992 unsigned int ck_size, context_size;
993 u16 alignment = 0;
994 int err;
995
996 if (keylen < CTR_RFC3686_NONCE_SIZE)
997 return -EINVAL;
998 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
999 CTR_RFC3686_NONCE_SIZE);
1000
1001 keylen -= CTR_RFC3686_NONCE_SIZE;
1002 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1003 if (err)
1004 goto badkey_err;
1005
1006 ck_size = chcr_keyctx_ck_size(keylen);
1007 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1008 memcpy(ablkctx->key, key, keylen);
1009 ablkctx->enckey_len = keylen;
1010 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1011 keylen + alignment) >> 4;
1012
1013 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1014 0, 0, context_size);
1015 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1016
1017 return 0;
1018 badkey_err:
1019 ablkctx->enckey_len = 0;
1020
1021 return err;
1022 }
ctr_add_iv(u8 * dstiv,u8 * srciv,u32 add)1023 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1024 {
1025 unsigned int size = AES_BLOCK_SIZE;
1026 __be32 *b = (__be32 *)(dstiv + size);
1027 u32 c, prev;
1028
1029 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1030 for (; size >= 4; size -= 4) {
1031 prev = be32_to_cpu(*--b);
1032 c = prev + add;
1033 *b = cpu_to_be32(c);
1034 if (prev < c)
1035 break;
1036 add = 1;
1037 }
1038
1039 }
1040
adjust_ctr_overflow(u8 * iv,u32 bytes)1041 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1042 {
1043 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1044 u64 c;
1045 u32 temp = be32_to_cpu(*--b);
1046
1047 temp = ~temp;
1048 c = (u64)temp + 1; // No of block can processed without overflow
1049 if ((bytes / AES_BLOCK_SIZE) >= c)
1050 bytes = c * AES_BLOCK_SIZE;
1051 return bytes;
1052 }
1053
chcr_update_tweak(struct skcipher_request * req,u8 * iv,u32 isfinal)1054 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1055 u32 isfinal)
1056 {
1057 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1058 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1059 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1060 struct crypto_aes_ctx aes;
1061 int ret, i;
1062 u8 *key;
1063 unsigned int keylen;
1064 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1065 int round8 = round / 8;
1066
1067 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1068
1069 keylen = ablkctx->enckey_len / 2;
1070 key = ablkctx->key + keylen;
1071 /* For a 192 bit key remove the padded zeroes which was
1072 * added in chcr_xts_setkey
1073 */
1074 if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1075 == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1076 ret = aes_expandkey(&aes, key, keylen - 8);
1077 else
1078 ret = aes_expandkey(&aes, key, keylen);
1079 if (ret)
1080 return ret;
1081 aes_encrypt(&aes, iv, iv);
1082 for (i = 0; i < round8; i++)
1083 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1084
1085 for (i = 0; i < (round % 8); i++)
1086 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1087
1088 if (!isfinal)
1089 aes_decrypt(&aes, iv, iv);
1090
1091 memzero_explicit(&aes, sizeof(aes));
1092 return 0;
1093 }
1094
chcr_update_cipher_iv(struct skcipher_request * req,struct cpl_fw6_pld * fw6_pld,u8 * iv)1095 static int chcr_update_cipher_iv(struct skcipher_request *req,
1096 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1097 {
1098 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1099 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1100 int subtype = get_cryptoalg_subtype(tfm);
1101 int ret = 0;
1102
1103 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1104 ctr_add_iv(iv, req->iv, (reqctx->processed /
1105 AES_BLOCK_SIZE));
1106 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1107 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1108 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1109 AES_BLOCK_SIZE) + 1);
1110 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1111 ret = chcr_update_tweak(req, iv, 0);
1112 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1113 if (reqctx->op)
1114 /*Updated before sending last WR*/
1115 memcpy(iv, req->iv, AES_BLOCK_SIZE);
1116 else
1117 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1118 }
1119
1120 return ret;
1121
1122 }
1123
1124 /* We need separate function for final iv because in rfc3686 Initial counter
1125 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1126 * for subsequent update requests
1127 */
1128
chcr_final_cipher_iv(struct skcipher_request * req,struct cpl_fw6_pld * fw6_pld,u8 * iv)1129 static int chcr_final_cipher_iv(struct skcipher_request *req,
1130 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1131 {
1132 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1133 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1134 int subtype = get_cryptoalg_subtype(tfm);
1135 int ret = 0;
1136
1137 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1138 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1139 AES_BLOCK_SIZE));
1140 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1141 if (!reqctx->partial_req)
1142 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1143 else
1144 ret = chcr_update_tweak(req, iv, 1);
1145 }
1146 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1147 /*Already updated for Decrypt*/
1148 if (!reqctx->op)
1149 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1150
1151 }
1152 return ret;
1153
1154 }
1155
chcr_handle_cipher_resp(struct skcipher_request * req,unsigned char * input,int err)1156 static int chcr_handle_cipher_resp(struct skcipher_request *req,
1157 unsigned char *input, int err)
1158 {
1159 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1160 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1161 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1162 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1163 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1164 struct chcr_dev *dev = c_ctx(tfm)->dev;
1165 struct chcr_context *ctx = c_ctx(tfm);
1166 struct adapter *adap = padap(ctx->dev);
1167 struct cipher_wr_param wrparam;
1168 struct sk_buff *skb;
1169 int bytes;
1170
1171 if (err)
1172 goto unmap;
1173 if (req->cryptlen == reqctx->processed) {
1174 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1175 req);
1176 err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1177 goto complete;
1178 }
1179
1180 if (!reqctx->imm) {
1181 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1182 CIP_SPACE_LEFT(ablkctx->enckey_len),
1183 reqctx->src_ofst, reqctx->dst_ofst);
1184 if ((bytes + reqctx->processed) >= req->cryptlen)
1185 bytes = req->cryptlen - reqctx->processed;
1186 else
1187 bytes = rounddown(bytes, 16);
1188 } else {
1189 /*CTR mode counter overfloa*/
1190 bytes = req->cryptlen - reqctx->processed;
1191 }
1192 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1193 if (err)
1194 goto unmap;
1195
1196 if (unlikely(bytes == 0)) {
1197 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1198 req);
1199 memcpy(req->iv, reqctx->init_iv, IV);
1200 atomic_inc(&adap->chcr_stats.fallback);
1201 err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1202 reqctx->op);
1203 goto complete;
1204 }
1205
1206 if (get_cryptoalg_subtype(tfm) ==
1207 CRYPTO_ALG_SUB_TYPE_CTR)
1208 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1209 wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1210 wrparam.req = req;
1211 wrparam.bytes = bytes;
1212 skb = create_cipher_wr(&wrparam);
1213 if (IS_ERR(skb)) {
1214 pr_err("%s : Failed to form WR. No memory\n", __func__);
1215 err = PTR_ERR(skb);
1216 goto unmap;
1217 }
1218 skb->dev = u_ctx->lldi.ports[0];
1219 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1220 chcr_send_wr(skb);
1221 reqctx->last_req_len = bytes;
1222 reqctx->processed += bytes;
1223 if (get_cryptoalg_subtype(tfm) ==
1224 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1225 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1226 complete(&ctx->cbc_aes_aio_done);
1227 }
1228 return 0;
1229 unmap:
1230 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1231 complete:
1232 if (get_cryptoalg_subtype(tfm) ==
1233 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1234 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1235 complete(&ctx->cbc_aes_aio_done);
1236 }
1237 chcr_dec_wrcount(dev);
1238 req->base.complete(&req->base, err);
1239 return err;
1240 }
1241
process_cipher(struct skcipher_request * req,unsigned short qid,struct sk_buff ** skb,unsigned short op_type)1242 static int process_cipher(struct skcipher_request *req,
1243 unsigned short qid,
1244 struct sk_buff **skb,
1245 unsigned short op_type)
1246 {
1247 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1248 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1249 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1250 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1251 struct adapter *adap = padap(c_ctx(tfm)->dev);
1252 struct cipher_wr_param wrparam;
1253 int bytes, err = -EINVAL;
1254 int subtype;
1255
1256 reqctx->processed = 0;
1257 reqctx->partial_req = 0;
1258 if (!req->iv)
1259 goto error;
1260 subtype = get_cryptoalg_subtype(tfm);
1261 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1262 (req->cryptlen == 0) ||
1263 (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1264 if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1265 goto fallback;
1266 else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1267 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1268 goto fallback;
1269 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1270 ablkctx->enckey_len, req->cryptlen, ivsize);
1271 goto error;
1272 }
1273
1274 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1275 if (err)
1276 goto error;
1277 if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1278 AES_MIN_KEY_SIZE +
1279 sizeof(struct cpl_rx_phys_dsgl) +
1280 /*Min dsgl size*/
1281 32))) {
1282 /* Can be sent as Imm*/
1283 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1284
1285 dnents = sg_nents_xlen(req->dst, req->cryptlen,
1286 CHCR_DST_SG_SIZE, 0);
1287 phys_dsgl = get_space_for_phys_dsgl(dnents);
1288 kctx_len = roundup(ablkctx->enckey_len, 16);
1289 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1290 reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1291 SGE_MAX_WR_LEN;
1292 bytes = IV + req->cryptlen;
1293
1294 } else {
1295 reqctx->imm = 0;
1296 }
1297
1298 if (!reqctx->imm) {
1299 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1300 CIP_SPACE_LEFT(ablkctx->enckey_len),
1301 0, 0);
1302 if ((bytes + reqctx->processed) >= req->cryptlen)
1303 bytes = req->cryptlen - reqctx->processed;
1304 else
1305 bytes = rounddown(bytes, 16);
1306 } else {
1307 bytes = req->cryptlen;
1308 }
1309 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1310 bytes = adjust_ctr_overflow(req->iv, bytes);
1311 }
1312 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1313 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1314 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1315 CTR_RFC3686_IV_SIZE);
1316
1317 /* initialize counter portion of counter block */
1318 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1319 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1320 memcpy(reqctx->init_iv, reqctx->iv, IV);
1321
1322 } else {
1323
1324 memcpy(reqctx->iv, req->iv, IV);
1325 memcpy(reqctx->init_iv, req->iv, IV);
1326 }
1327 if (unlikely(bytes == 0)) {
1328 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1329 req);
1330 fallback: atomic_inc(&adap->chcr_stats.fallback);
1331 err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1332 subtype ==
1333 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1334 reqctx->iv : req->iv,
1335 op_type);
1336 goto error;
1337 }
1338 reqctx->op = op_type;
1339 reqctx->srcsg = req->src;
1340 reqctx->dstsg = req->dst;
1341 reqctx->src_ofst = 0;
1342 reqctx->dst_ofst = 0;
1343 wrparam.qid = qid;
1344 wrparam.req = req;
1345 wrparam.bytes = bytes;
1346 *skb = create_cipher_wr(&wrparam);
1347 if (IS_ERR(*skb)) {
1348 err = PTR_ERR(*skb);
1349 goto unmap;
1350 }
1351 reqctx->processed = bytes;
1352 reqctx->last_req_len = bytes;
1353 reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1354
1355 return 0;
1356 unmap:
1357 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1358 error:
1359 return err;
1360 }
1361
chcr_aes_encrypt(struct skcipher_request * req)1362 static int chcr_aes_encrypt(struct skcipher_request *req)
1363 {
1364 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1365 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1366 struct chcr_dev *dev = c_ctx(tfm)->dev;
1367 struct sk_buff *skb = NULL;
1368 int err;
1369 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1370 struct chcr_context *ctx = c_ctx(tfm);
1371 unsigned int cpu;
1372
1373 cpu = get_cpu();
1374 reqctx->txqidx = cpu % ctx->ntxq;
1375 reqctx->rxqidx = cpu % ctx->nrxq;
1376 put_cpu();
1377
1378 err = chcr_inc_wrcount(dev);
1379 if (err)
1380 return -ENXIO;
1381 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1382 reqctx->txqidx) &&
1383 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1384 err = -ENOSPC;
1385 goto error;
1386 }
1387
1388 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1389 &skb, CHCR_ENCRYPT_OP);
1390 if (err || !skb)
1391 return err;
1392 skb->dev = u_ctx->lldi.ports[0];
1393 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1394 chcr_send_wr(skb);
1395 if (get_cryptoalg_subtype(tfm) ==
1396 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1397 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1398 reqctx->partial_req = 1;
1399 wait_for_completion(&ctx->cbc_aes_aio_done);
1400 }
1401 return -EINPROGRESS;
1402 error:
1403 chcr_dec_wrcount(dev);
1404 return err;
1405 }
1406
chcr_aes_decrypt(struct skcipher_request * req)1407 static int chcr_aes_decrypt(struct skcipher_request *req)
1408 {
1409 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1410 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1411 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1412 struct chcr_dev *dev = c_ctx(tfm)->dev;
1413 struct sk_buff *skb = NULL;
1414 int err;
1415 struct chcr_context *ctx = c_ctx(tfm);
1416 unsigned int cpu;
1417
1418 cpu = get_cpu();
1419 reqctx->txqidx = cpu % ctx->ntxq;
1420 reqctx->rxqidx = cpu % ctx->nrxq;
1421 put_cpu();
1422
1423 err = chcr_inc_wrcount(dev);
1424 if (err)
1425 return -ENXIO;
1426
1427 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1428 reqctx->txqidx) &&
1429 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1430 return -ENOSPC;
1431 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1432 &skb, CHCR_DECRYPT_OP);
1433 if (err || !skb)
1434 return err;
1435 skb->dev = u_ctx->lldi.ports[0];
1436 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1437 chcr_send_wr(skb);
1438 return -EINPROGRESS;
1439 }
chcr_device_init(struct chcr_context * ctx)1440 static int chcr_device_init(struct chcr_context *ctx)
1441 {
1442 struct uld_ctx *u_ctx = NULL;
1443 int txq_perchan, ntxq;
1444 int err = 0, rxq_perchan;
1445
1446 if (!ctx->dev) {
1447 u_ctx = assign_chcr_device();
1448 if (!u_ctx) {
1449 err = -ENXIO;
1450 pr_err("chcr device assignment fails\n");
1451 goto out;
1452 }
1453 ctx->dev = &u_ctx->dev;
1454 ntxq = u_ctx->lldi.ntxq;
1455 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1456 txq_perchan = ntxq / u_ctx->lldi.nchan;
1457 ctx->ntxq = ntxq;
1458 ctx->nrxq = u_ctx->lldi.nrxq;
1459 ctx->rxq_perchan = rxq_perchan;
1460 ctx->txq_perchan = txq_perchan;
1461 }
1462 out:
1463 return err;
1464 }
1465
chcr_init_tfm(struct crypto_skcipher * tfm)1466 static int chcr_init_tfm(struct crypto_skcipher *tfm)
1467 {
1468 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1469 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1470 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1471
1472 ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1473 CRYPTO_ALG_NEED_FALLBACK);
1474 if (IS_ERR(ablkctx->sw_cipher)) {
1475 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1476 return PTR_ERR(ablkctx->sw_cipher);
1477 }
1478 init_completion(&ctx->cbc_aes_aio_done);
1479 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1480 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1481
1482 return chcr_device_init(ctx);
1483 }
1484
chcr_rfc3686_init(struct crypto_skcipher * tfm)1485 static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1486 {
1487 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1488 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1489 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1490
1491 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1492 * cannot be used as fallback in chcr_handle_cipher_response
1493 */
1494 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1495 CRYPTO_ALG_NEED_FALLBACK);
1496 if (IS_ERR(ablkctx->sw_cipher)) {
1497 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1498 return PTR_ERR(ablkctx->sw_cipher);
1499 }
1500 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1501 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1502 return chcr_device_init(ctx);
1503 }
1504
1505
chcr_exit_tfm(struct crypto_skcipher * tfm)1506 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1507 {
1508 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1509 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1510
1511 crypto_free_skcipher(ablkctx->sw_cipher);
1512 }
1513
get_alg_config(struct algo_param * params,unsigned int auth_size)1514 static int get_alg_config(struct algo_param *params,
1515 unsigned int auth_size)
1516 {
1517 switch (auth_size) {
1518 case SHA1_DIGEST_SIZE:
1519 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1520 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1521 params->result_size = SHA1_DIGEST_SIZE;
1522 break;
1523 case SHA224_DIGEST_SIZE:
1524 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1525 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1526 params->result_size = SHA256_DIGEST_SIZE;
1527 break;
1528 case SHA256_DIGEST_SIZE:
1529 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1530 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1531 params->result_size = SHA256_DIGEST_SIZE;
1532 break;
1533 case SHA384_DIGEST_SIZE:
1534 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1535 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1536 params->result_size = SHA512_DIGEST_SIZE;
1537 break;
1538 case SHA512_DIGEST_SIZE:
1539 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1540 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1541 params->result_size = SHA512_DIGEST_SIZE;
1542 break;
1543 default:
1544 pr_err("ERROR, unsupported digest size\n");
1545 return -EINVAL;
1546 }
1547 return 0;
1548 }
1549
chcr_free_shash(struct crypto_shash * base_hash)1550 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1551 {
1552 crypto_free_shash(base_hash);
1553 }
1554
1555 /**
1556 * create_hash_wr - Create hash work request
1557 * @req: Cipher req base
1558 * @param: Container for create_hash_wr()'s parameters
1559 */
create_hash_wr(struct ahash_request * req,struct hash_wr_param * param)1560 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1561 struct hash_wr_param *param)
1562 {
1563 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1564 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1565 struct chcr_context *ctx = h_ctx(tfm);
1566 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1567 struct sk_buff *skb = NULL;
1568 struct uld_ctx *u_ctx = ULD_CTX(ctx);
1569 struct chcr_wr *chcr_req;
1570 struct ulptx_sgl *ulptx;
1571 unsigned int nents = 0, transhdr_len;
1572 unsigned int temp = 0;
1573 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1574 GFP_ATOMIC;
1575 struct adapter *adap = padap(h_ctx(tfm)->dev);
1576 int error = 0;
1577 unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1578
1579 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
1580 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1581 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1582 param->sg_len) <= SGE_MAX_WR_LEN;
1583 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1584 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1585 nents += param->bfr_len ? 1 : 0;
1586 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1587 param->sg_len, 16) : (sgl_len(nents) * 8);
1588 transhdr_len = roundup(transhdr_len, 16);
1589
1590 skb = alloc_skb(transhdr_len, flags);
1591 if (!skb)
1592 return ERR_PTR(-ENOMEM);
1593 chcr_req = __skb_put_zero(skb, transhdr_len);
1594
1595 chcr_req->sec_cpl.op_ivinsrtofst =
1596 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1597
1598 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1599
1600 chcr_req->sec_cpl.aadstart_cipherstop_hi =
1601 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1602 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1603 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1604 chcr_req->sec_cpl.seqno_numivs =
1605 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1606 param->opad_needed, 0);
1607
1608 chcr_req->sec_cpl.ivgen_hdrlen =
1609 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1610
1611 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1612 param->alg_prm.result_size);
1613
1614 if (param->opad_needed)
1615 memcpy(chcr_req->key_ctx.key +
1616 ((param->alg_prm.result_size <= 32) ? 32 :
1617 CHCR_HASH_MAX_DIGEST_SIZE),
1618 hmacctx->opad, param->alg_prm.result_size);
1619
1620 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1621 param->alg_prm.mk_size, 0,
1622 param->opad_needed,
1623 ((param->kctx_len +
1624 sizeof(chcr_req->key_ctx)) >> 4));
1625 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1626 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1627 DUMMY_BYTES);
1628 if (param->bfr_len != 0) {
1629 req_ctx->hctx_wr.dma_addr =
1630 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1631 param->bfr_len, DMA_TO_DEVICE);
1632 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1633 req_ctx->hctx_wr. dma_addr)) {
1634 error = -ENOMEM;
1635 goto err;
1636 }
1637 req_ctx->hctx_wr.dma_len = param->bfr_len;
1638 } else {
1639 req_ctx->hctx_wr.dma_addr = 0;
1640 }
1641 chcr_add_hash_src_ent(req, ulptx, param);
1642 /* Request upto max wr size */
1643 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1644 (param->sg_len + param->bfr_len) : 0);
1645 atomic_inc(&adap->chcr_stats.digest_rqst);
1646 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1647 param->hash_size, transhdr_len,
1648 temp, 0);
1649 req_ctx->hctx_wr.skb = skb;
1650 return skb;
1651 err:
1652 kfree_skb(skb);
1653 return ERR_PTR(error);
1654 }
1655
chcr_ahash_update(struct ahash_request * req)1656 static int chcr_ahash_update(struct ahash_request *req)
1657 {
1658 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1659 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1660 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1661 struct chcr_context *ctx = h_ctx(rtfm);
1662 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1663 struct sk_buff *skb;
1664 u8 remainder = 0, bs;
1665 unsigned int nbytes = req->nbytes;
1666 struct hash_wr_param params;
1667 int error;
1668 unsigned int cpu;
1669
1670 cpu = get_cpu();
1671 req_ctx->txqidx = cpu % ctx->ntxq;
1672 req_ctx->rxqidx = cpu % ctx->nrxq;
1673 put_cpu();
1674
1675 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1676
1677 if (nbytes + req_ctx->reqlen >= bs) {
1678 remainder = (nbytes + req_ctx->reqlen) % bs;
1679 nbytes = nbytes + req_ctx->reqlen - remainder;
1680 } else {
1681 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1682 + req_ctx->reqlen, nbytes, 0);
1683 req_ctx->reqlen += nbytes;
1684 return 0;
1685 }
1686 error = chcr_inc_wrcount(dev);
1687 if (error)
1688 return -ENXIO;
1689 /* Detach state for CHCR means lldi or padap is freed. Increasing
1690 * inflight count for dev guarantees that lldi and padap is valid
1691 */
1692 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1693 req_ctx->txqidx) &&
1694 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1695 error = -ENOSPC;
1696 goto err;
1697 }
1698
1699 chcr_init_hctx_per_wr(req_ctx);
1700 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1701 if (error) {
1702 error = -ENOMEM;
1703 goto err;
1704 }
1705 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1706 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1707 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1708 HASH_SPACE_LEFT(params.kctx_len), 0);
1709 if (params.sg_len > req->nbytes)
1710 params.sg_len = req->nbytes;
1711 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1712 req_ctx->reqlen;
1713 params.opad_needed = 0;
1714 params.more = 1;
1715 params.last = 0;
1716 params.bfr_len = req_ctx->reqlen;
1717 params.scmd1 = 0;
1718 req_ctx->hctx_wr.srcsg = req->src;
1719
1720 params.hash_size = params.alg_prm.result_size;
1721 req_ctx->data_len += params.sg_len + params.bfr_len;
1722 skb = create_hash_wr(req, ¶ms);
1723 if (IS_ERR(skb)) {
1724 error = PTR_ERR(skb);
1725 goto unmap;
1726 }
1727
1728 req_ctx->hctx_wr.processed += params.sg_len;
1729 if (remainder) {
1730 /* Swap buffers */
1731 swap(req_ctx->reqbfr, req_ctx->skbfr);
1732 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1733 req_ctx->reqbfr, remainder, req->nbytes -
1734 remainder);
1735 }
1736 req_ctx->reqlen = remainder;
1737 skb->dev = u_ctx->lldi.ports[0];
1738 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1739 chcr_send_wr(skb);
1740 return -EINPROGRESS;
1741 unmap:
1742 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1743 err:
1744 chcr_dec_wrcount(dev);
1745 return error;
1746 }
1747
create_last_hash_block(char * bfr_ptr,unsigned int bs,u64 scmd1)1748 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1749 {
1750 memset(bfr_ptr, 0, bs);
1751 *bfr_ptr = 0x80;
1752 if (bs == 64)
1753 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1754 else
1755 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1756 }
1757
chcr_ahash_final(struct ahash_request * req)1758 static int chcr_ahash_final(struct ahash_request *req)
1759 {
1760 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1761 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1762 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1763 struct hash_wr_param params;
1764 struct sk_buff *skb;
1765 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1766 struct chcr_context *ctx = h_ctx(rtfm);
1767 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1768 int error;
1769 unsigned int cpu;
1770
1771 cpu = get_cpu();
1772 req_ctx->txqidx = cpu % ctx->ntxq;
1773 req_ctx->rxqidx = cpu % ctx->nrxq;
1774 put_cpu();
1775
1776 error = chcr_inc_wrcount(dev);
1777 if (error)
1778 return -ENXIO;
1779
1780 chcr_init_hctx_per_wr(req_ctx);
1781 if (is_hmac(crypto_ahash_tfm(rtfm)))
1782 params.opad_needed = 1;
1783 else
1784 params.opad_needed = 0;
1785 params.sg_len = 0;
1786 req_ctx->hctx_wr.isfinal = 1;
1787 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1788 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1789 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1790 params.opad_needed = 1;
1791 params.kctx_len *= 2;
1792 } else {
1793 params.opad_needed = 0;
1794 }
1795
1796 req_ctx->hctx_wr.result = 1;
1797 params.bfr_len = req_ctx->reqlen;
1798 req_ctx->data_len += params.bfr_len + params.sg_len;
1799 req_ctx->hctx_wr.srcsg = req->src;
1800 if (req_ctx->reqlen == 0) {
1801 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1802 params.last = 0;
1803 params.more = 1;
1804 params.scmd1 = 0;
1805 params.bfr_len = bs;
1806
1807 } else {
1808 params.scmd1 = req_ctx->data_len;
1809 params.last = 1;
1810 params.more = 0;
1811 }
1812 params.hash_size = crypto_ahash_digestsize(rtfm);
1813 skb = create_hash_wr(req, ¶ms);
1814 if (IS_ERR(skb)) {
1815 error = PTR_ERR(skb);
1816 goto err;
1817 }
1818 req_ctx->reqlen = 0;
1819 skb->dev = u_ctx->lldi.ports[0];
1820 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1821 chcr_send_wr(skb);
1822 return -EINPROGRESS;
1823 err:
1824 chcr_dec_wrcount(dev);
1825 return error;
1826 }
1827
chcr_ahash_finup(struct ahash_request * req)1828 static int chcr_ahash_finup(struct ahash_request *req)
1829 {
1830 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1831 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1832 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1833 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1834 struct chcr_context *ctx = h_ctx(rtfm);
1835 struct sk_buff *skb;
1836 struct hash_wr_param params;
1837 u8 bs;
1838 int error;
1839 unsigned int cpu;
1840
1841 cpu = get_cpu();
1842 req_ctx->txqidx = cpu % ctx->ntxq;
1843 req_ctx->rxqidx = cpu % ctx->nrxq;
1844 put_cpu();
1845
1846 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1847 error = chcr_inc_wrcount(dev);
1848 if (error)
1849 return -ENXIO;
1850
1851 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1852 req_ctx->txqidx) &&
1853 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1854 error = -ENOSPC;
1855 goto err;
1856 }
1857 chcr_init_hctx_per_wr(req_ctx);
1858 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1859 if (error) {
1860 error = -ENOMEM;
1861 goto err;
1862 }
1863
1864 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1865 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1866 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1867 params.kctx_len *= 2;
1868 params.opad_needed = 1;
1869 } else {
1870 params.opad_needed = 0;
1871 }
1872
1873 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1874 HASH_SPACE_LEFT(params.kctx_len), 0);
1875 if (params.sg_len < req->nbytes) {
1876 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1877 params.kctx_len /= 2;
1878 params.opad_needed = 0;
1879 }
1880 params.last = 0;
1881 params.more = 1;
1882 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1883 - req_ctx->reqlen;
1884 params.hash_size = params.alg_prm.result_size;
1885 params.scmd1 = 0;
1886 } else {
1887 params.last = 1;
1888 params.more = 0;
1889 params.sg_len = req->nbytes;
1890 params.hash_size = crypto_ahash_digestsize(rtfm);
1891 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1892 params.sg_len;
1893 }
1894 params.bfr_len = req_ctx->reqlen;
1895 req_ctx->data_len += params.bfr_len + params.sg_len;
1896 req_ctx->hctx_wr.result = 1;
1897 req_ctx->hctx_wr.srcsg = req->src;
1898 if ((req_ctx->reqlen + req->nbytes) == 0) {
1899 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1900 params.last = 0;
1901 params.more = 1;
1902 params.scmd1 = 0;
1903 params.bfr_len = bs;
1904 }
1905 skb = create_hash_wr(req, ¶ms);
1906 if (IS_ERR(skb)) {
1907 error = PTR_ERR(skb);
1908 goto unmap;
1909 }
1910 req_ctx->reqlen = 0;
1911 req_ctx->hctx_wr.processed += params.sg_len;
1912 skb->dev = u_ctx->lldi.ports[0];
1913 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1914 chcr_send_wr(skb);
1915 return -EINPROGRESS;
1916 unmap:
1917 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1918 err:
1919 chcr_dec_wrcount(dev);
1920 return error;
1921 }
1922
chcr_ahash_digest(struct ahash_request * req)1923 static int chcr_ahash_digest(struct ahash_request *req)
1924 {
1925 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1926 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1927 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1928 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1929 struct chcr_context *ctx = h_ctx(rtfm);
1930 struct sk_buff *skb;
1931 struct hash_wr_param params;
1932 u8 bs;
1933 int error;
1934 unsigned int cpu;
1935
1936 cpu = get_cpu();
1937 req_ctx->txqidx = cpu % ctx->ntxq;
1938 req_ctx->rxqidx = cpu % ctx->nrxq;
1939 put_cpu();
1940
1941 rtfm->init(req);
1942 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1943 error = chcr_inc_wrcount(dev);
1944 if (error)
1945 return -ENXIO;
1946
1947 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1948 req_ctx->txqidx) &&
1949 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1950 error = -ENOSPC;
1951 goto err;
1952 }
1953
1954 chcr_init_hctx_per_wr(req_ctx);
1955 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1956 if (error) {
1957 error = -ENOMEM;
1958 goto err;
1959 }
1960
1961 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1962 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1963 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1964 params.kctx_len *= 2;
1965 params.opad_needed = 1;
1966 } else {
1967 params.opad_needed = 0;
1968 }
1969 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1970 HASH_SPACE_LEFT(params.kctx_len), 0);
1971 if (params.sg_len < req->nbytes) {
1972 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1973 params.kctx_len /= 2;
1974 params.opad_needed = 0;
1975 }
1976 params.last = 0;
1977 params.more = 1;
1978 params.scmd1 = 0;
1979 params.sg_len = rounddown(params.sg_len, bs);
1980 params.hash_size = params.alg_prm.result_size;
1981 } else {
1982 params.sg_len = req->nbytes;
1983 params.hash_size = crypto_ahash_digestsize(rtfm);
1984 params.last = 1;
1985 params.more = 0;
1986 params.scmd1 = req->nbytes + req_ctx->data_len;
1987
1988 }
1989 params.bfr_len = 0;
1990 req_ctx->hctx_wr.result = 1;
1991 req_ctx->hctx_wr.srcsg = req->src;
1992 req_ctx->data_len += params.bfr_len + params.sg_len;
1993
1994 if (req->nbytes == 0) {
1995 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1996 params.more = 1;
1997 params.bfr_len = bs;
1998 }
1999
2000 skb = create_hash_wr(req, ¶ms);
2001 if (IS_ERR(skb)) {
2002 error = PTR_ERR(skb);
2003 goto unmap;
2004 }
2005 req_ctx->hctx_wr.processed += params.sg_len;
2006 skb->dev = u_ctx->lldi.ports[0];
2007 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2008 chcr_send_wr(skb);
2009 return -EINPROGRESS;
2010 unmap:
2011 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2012 err:
2013 chcr_dec_wrcount(dev);
2014 return error;
2015 }
2016
chcr_ahash_continue(struct ahash_request * req)2017 static int chcr_ahash_continue(struct ahash_request *req)
2018 {
2019 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2020 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2021 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2022 struct chcr_context *ctx = h_ctx(rtfm);
2023 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2024 struct sk_buff *skb;
2025 struct hash_wr_param params;
2026 u8 bs;
2027 int error;
2028 unsigned int cpu;
2029
2030 cpu = get_cpu();
2031 reqctx->txqidx = cpu % ctx->ntxq;
2032 reqctx->rxqidx = cpu % ctx->nrxq;
2033 put_cpu();
2034
2035 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2036 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
2037 params.kctx_len = roundup(params.alg_prm.result_size, 16);
2038 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2039 params.kctx_len *= 2;
2040 params.opad_needed = 1;
2041 } else {
2042 params.opad_needed = 0;
2043 }
2044 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2045 HASH_SPACE_LEFT(params.kctx_len),
2046 hctx_wr->src_ofst);
2047 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2048 params.sg_len = req->nbytes - hctx_wr->processed;
2049 if (!hctx_wr->result ||
2050 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2051 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2052 params.kctx_len /= 2;
2053 params.opad_needed = 0;
2054 }
2055 params.last = 0;
2056 params.more = 1;
2057 params.sg_len = rounddown(params.sg_len, bs);
2058 params.hash_size = params.alg_prm.result_size;
2059 params.scmd1 = 0;
2060 } else {
2061 params.last = 1;
2062 params.more = 0;
2063 params.hash_size = crypto_ahash_digestsize(rtfm);
2064 params.scmd1 = reqctx->data_len + params.sg_len;
2065 }
2066 params.bfr_len = 0;
2067 reqctx->data_len += params.sg_len;
2068 skb = create_hash_wr(req, ¶ms);
2069 if (IS_ERR(skb)) {
2070 error = PTR_ERR(skb);
2071 goto err;
2072 }
2073 hctx_wr->processed += params.sg_len;
2074 skb->dev = u_ctx->lldi.ports[0];
2075 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2076 chcr_send_wr(skb);
2077 return 0;
2078 err:
2079 return error;
2080 }
2081
chcr_handle_ahash_resp(struct ahash_request * req,unsigned char * input,int err)2082 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2083 unsigned char *input,
2084 int err)
2085 {
2086 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2087 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2088 int digestsize, updated_digestsize;
2089 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2090 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2091 struct chcr_dev *dev = h_ctx(tfm)->dev;
2092
2093 if (input == NULL)
2094 goto out;
2095 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2096 updated_digestsize = digestsize;
2097 if (digestsize == SHA224_DIGEST_SIZE)
2098 updated_digestsize = SHA256_DIGEST_SIZE;
2099 else if (digestsize == SHA384_DIGEST_SIZE)
2100 updated_digestsize = SHA512_DIGEST_SIZE;
2101
2102 if (hctx_wr->dma_addr) {
2103 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2104 hctx_wr->dma_len, DMA_TO_DEVICE);
2105 hctx_wr->dma_addr = 0;
2106 }
2107 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2108 req->nbytes)) {
2109 if (hctx_wr->result == 1) {
2110 hctx_wr->result = 0;
2111 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2112 digestsize);
2113 } else {
2114 memcpy(reqctx->partial_hash,
2115 input + sizeof(struct cpl_fw6_pld),
2116 updated_digestsize);
2117
2118 }
2119 goto unmap;
2120 }
2121 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2122 updated_digestsize);
2123
2124 err = chcr_ahash_continue(req);
2125 if (err)
2126 goto unmap;
2127 return;
2128 unmap:
2129 if (hctx_wr->is_sg_map)
2130 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2131
2132
2133 out:
2134 chcr_dec_wrcount(dev);
2135 req->base.complete(&req->base, err);
2136 }
2137
2138 /*
2139 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2140 * @req: crypto request
2141 */
chcr_handle_resp(struct crypto_async_request * req,unsigned char * input,int err)2142 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2143 int err)
2144 {
2145 struct crypto_tfm *tfm = req->tfm;
2146 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2147 struct adapter *adap = padap(ctx->dev);
2148
2149 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2150 case CRYPTO_ALG_TYPE_AEAD:
2151 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2152 break;
2153
2154 case CRYPTO_ALG_TYPE_SKCIPHER:
2155 chcr_handle_cipher_resp(skcipher_request_cast(req),
2156 input, err);
2157 break;
2158 case CRYPTO_ALG_TYPE_AHASH:
2159 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2160 }
2161 atomic_inc(&adap->chcr_stats.complete);
2162 return err;
2163 }
chcr_ahash_export(struct ahash_request * areq,void * out)2164 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2165 {
2166 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2167 struct chcr_ahash_req_ctx *state = out;
2168
2169 state->reqlen = req_ctx->reqlen;
2170 state->data_len = req_ctx->data_len;
2171 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2172 memcpy(state->partial_hash, req_ctx->partial_hash,
2173 CHCR_HASH_MAX_DIGEST_SIZE);
2174 chcr_init_hctx_per_wr(state);
2175 return 0;
2176 }
2177
chcr_ahash_import(struct ahash_request * areq,const void * in)2178 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2179 {
2180 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2181 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2182
2183 req_ctx->reqlen = state->reqlen;
2184 req_ctx->data_len = state->data_len;
2185 req_ctx->reqbfr = req_ctx->bfr1;
2186 req_ctx->skbfr = req_ctx->bfr2;
2187 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2188 memcpy(req_ctx->partial_hash, state->partial_hash,
2189 CHCR_HASH_MAX_DIGEST_SIZE);
2190 chcr_init_hctx_per_wr(req_ctx);
2191 return 0;
2192 }
2193
chcr_ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)2194 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2195 unsigned int keylen)
2196 {
2197 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2198 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2199 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2200 unsigned int i, err = 0, updated_digestsize;
2201
2202 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2203
2204 /* use the key to calculate the ipad and opad. ipad will sent with the
2205 * first request's data. opad will be sent with the final hash result
2206 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2207 */
2208 shash->tfm = hmacctx->base_hash;
2209 if (keylen > bs) {
2210 err = crypto_shash_digest(shash, key, keylen,
2211 hmacctx->ipad);
2212 if (err)
2213 goto out;
2214 keylen = digestsize;
2215 } else {
2216 memcpy(hmacctx->ipad, key, keylen);
2217 }
2218 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2219 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2220
2221 for (i = 0; i < bs / sizeof(int); i++) {
2222 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2223 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2224 }
2225
2226 updated_digestsize = digestsize;
2227 if (digestsize == SHA224_DIGEST_SIZE)
2228 updated_digestsize = SHA256_DIGEST_SIZE;
2229 else if (digestsize == SHA384_DIGEST_SIZE)
2230 updated_digestsize = SHA512_DIGEST_SIZE;
2231 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2232 hmacctx->ipad, digestsize);
2233 if (err)
2234 goto out;
2235 chcr_change_order(hmacctx->ipad, updated_digestsize);
2236
2237 err = chcr_compute_partial_hash(shash, hmacctx->opad,
2238 hmacctx->opad, digestsize);
2239 if (err)
2240 goto out;
2241 chcr_change_order(hmacctx->opad, updated_digestsize);
2242 out:
2243 return err;
2244 }
2245
chcr_aes_xts_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int key_len)2246 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2247 unsigned int key_len)
2248 {
2249 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2250 unsigned short context_size = 0;
2251 int err;
2252
2253 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2254 if (err)
2255 goto badkey_err;
2256
2257 memcpy(ablkctx->key, key, key_len);
2258 ablkctx->enckey_len = key_len;
2259 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2260 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2261 /* Both keys for xts must be aligned to 16 byte boundary
2262 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2263 */
2264 if (key_len == 48) {
2265 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2266 + 16) >> 4;
2267 memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2268 memset(ablkctx->key + 24, 0, 8);
2269 memset(ablkctx->key + 56, 0, 8);
2270 ablkctx->enckey_len = 64;
2271 ablkctx->key_ctx_hdr =
2272 FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2273 CHCR_KEYCTX_NO_KEY, 1,
2274 0, context_size);
2275 } else {
2276 ablkctx->key_ctx_hdr =
2277 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2278 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2279 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2280 CHCR_KEYCTX_NO_KEY, 1,
2281 0, context_size);
2282 }
2283 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2284 return 0;
2285 badkey_err:
2286 ablkctx->enckey_len = 0;
2287
2288 return err;
2289 }
2290
chcr_sha_init(struct ahash_request * areq)2291 static int chcr_sha_init(struct ahash_request *areq)
2292 {
2293 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2294 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2295 int digestsize = crypto_ahash_digestsize(tfm);
2296
2297 req_ctx->data_len = 0;
2298 req_ctx->reqlen = 0;
2299 req_ctx->reqbfr = req_ctx->bfr1;
2300 req_ctx->skbfr = req_ctx->bfr2;
2301 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2302
2303 return 0;
2304 }
2305
chcr_sha_cra_init(struct crypto_tfm * tfm)2306 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2307 {
2308 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2309 sizeof(struct chcr_ahash_req_ctx));
2310 return chcr_device_init(crypto_tfm_ctx(tfm));
2311 }
2312
chcr_hmac_init(struct ahash_request * areq)2313 static int chcr_hmac_init(struct ahash_request *areq)
2314 {
2315 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2316 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2317 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2318 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2319 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2320
2321 chcr_sha_init(areq);
2322 req_ctx->data_len = bs;
2323 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2324 if (digestsize == SHA224_DIGEST_SIZE)
2325 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2326 SHA256_DIGEST_SIZE);
2327 else if (digestsize == SHA384_DIGEST_SIZE)
2328 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2329 SHA512_DIGEST_SIZE);
2330 else
2331 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2332 digestsize);
2333 }
2334 return 0;
2335 }
2336
chcr_hmac_cra_init(struct crypto_tfm * tfm)2337 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2338 {
2339 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2340 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2341 unsigned int digestsize =
2342 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2343
2344 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2345 sizeof(struct chcr_ahash_req_ctx));
2346 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2347 if (IS_ERR(hmacctx->base_hash))
2348 return PTR_ERR(hmacctx->base_hash);
2349 return chcr_device_init(crypto_tfm_ctx(tfm));
2350 }
2351
chcr_hmac_cra_exit(struct crypto_tfm * tfm)2352 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2353 {
2354 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2355 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2356
2357 if (hmacctx->base_hash) {
2358 chcr_free_shash(hmacctx->base_hash);
2359 hmacctx->base_hash = NULL;
2360 }
2361 }
2362
chcr_aead_common_exit(struct aead_request * req)2363 inline void chcr_aead_common_exit(struct aead_request *req)
2364 {
2365 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2366 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2367 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2368
2369 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2370 }
2371
chcr_aead_common_init(struct aead_request * req)2372 static int chcr_aead_common_init(struct aead_request *req)
2373 {
2374 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2375 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2376 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2377 unsigned int authsize = crypto_aead_authsize(tfm);
2378 int error = -EINVAL;
2379
2380 /* validate key size */
2381 if (aeadctx->enckey_len == 0)
2382 goto err;
2383 if (reqctx->op && req->cryptlen < authsize)
2384 goto err;
2385 if (reqctx->b0_len)
2386 reqctx->scratch_pad = reqctx->iv + IV;
2387 else
2388 reqctx->scratch_pad = NULL;
2389
2390 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2391 reqctx->op);
2392 if (error) {
2393 error = -ENOMEM;
2394 goto err;
2395 }
2396
2397 return 0;
2398 err:
2399 return error;
2400 }
2401
chcr_aead_need_fallback(struct aead_request * req,int dst_nents,int aadmax,int wrlen,unsigned short op_type)2402 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2403 int aadmax, int wrlen,
2404 unsigned short op_type)
2405 {
2406 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2407
2408 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2409 dst_nents > MAX_DSGL_ENT ||
2410 (req->assoclen > aadmax) ||
2411 (wrlen > SGE_MAX_WR_LEN))
2412 return 1;
2413 return 0;
2414 }
2415
chcr_aead_fallback(struct aead_request * req,unsigned short op_type)2416 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2417 {
2418 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2419 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2420 struct aead_request *subreq = aead_request_ctx(req);
2421
2422 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2423 aead_request_set_callback(subreq, req->base.flags,
2424 req->base.complete, req->base.data);
2425 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2426 req->iv);
2427 aead_request_set_ad(subreq, req->assoclen);
2428 return op_type ? crypto_aead_decrypt(subreq) :
2429 crypto_aead_encrypt(subreq);
2430 }
2431
create_authenc_wr(struct aead_request * req,unsigned short qid,int size)2432 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2433 unsigned short qid,
2434 int size)
2435 {
2436 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2437 struct chcr_context *ctx = a_ctx(tfm);
2438 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2439 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2440 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2441 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2442 struct sk_buff *skb = NULL;
2443 struct chcr_wr *chcr_req;
2444 struct cpl_rx_phys_dsgl *phys_cpl;
2445 struct ulptx_sgl *ulptx;
2446 unsigned int transhdr_len;
2447 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2448 unsigned int kctx_len = 0, dnents, snents;
2449 unsigned int authsize = crypto_aead_authsize(tfm);
2450 int error = -EINVAL;
2451 u8 *ivptr;
2452 int null = 0;
2453 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2454 GFP_ATOMIC;
2455 struct adapter *adap = padap(ctx->dev);
2456 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2457
2458 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2459 if (req->cryptlen == 0)
2460 return NULL;
2461
2462 reqctx->b0_len = 0;
2463 error = chcr_aead_common_init(req);
2464 if (error)
2465 return ERR_PTR(error);
2466
2467 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2468 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2469 null = 1;
2470 }
2471 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2472 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2473 dnents += MIN_AUTH_SG; // For IV
2474 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2475 CHCR_SRC_SG_SIZE, 0);
2476 dst_size = get_space_for_phys_dsgl(dnents);
2477 kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2478 - sizeof(chcr_req->key_ctx);
2479 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2480 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2481 SGE_MAX_WR_LEN;
2482 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2483 : (sgl_len(snents) * 8);
2484 transhdr_len += temp;
2485 transhdr_len = roundup(transhdr_len, 16);
2486
2487 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2488 transhdr_len, reqctx->op)) {
2489 atomic_inc(&adap->chcr_stats.fallback);
2490 chcr_aead_common_exit(req);
2491 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2492 }
2493 skb = alloc_skb(transhdr_len, flags);
2494 if (!skb) {
2495 error = -ENOMEM;
2496 goto err;
2497 }
2498
2499 chcr_req = __skb_put_zero(skb, transhdr_len);
2500
2501 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2502
2503 /*
2504 * Input order is AAD,IV and Payload. where IV should be included as
2505 * the part of authdata. All other fields should be filled according
2506 * to the hardware spec
2507 */
2508 chcr_req->sec_cpl.op_ivinsrtofst =
2509 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2510 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2511 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2512 null ? 0 : 1 + IV,
2513 null ? 0 : IV + req->assoclen,
2514 req->assoclen + IV + 1,
2515 (temp & 0x1F0) >> 4);
2516 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2517 temp & 0xF,
2518 null ? 0 : req->assoclen + IV + 1,
2519 temp, temp);
2520 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2521 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2522 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2523 else
2524 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2525 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2526 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2527 temp,
2528 actx->auth_mode, aeadctx->hmac_ctrl,
2529 IV >> 1);
2530 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2531 0, 0, dst_size);
2532
2533 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2534 if (reqctx->op == CHCR_ENCRYPT_OP ||
2535 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2536 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2537 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2538 aeadctx->enckey_len);
2539 else
2540 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2541 aeadctx->enckey_len);
2542
2543 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2544 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2545 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2546 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2547 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2548 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2549 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2550 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2551 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2552 CTR_RFC3686_IV_SIZE);
2553 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2554 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2555 } else {
2556 memcpy(ivptr, req->iv, IV);
2557 }
2558 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2559 chcr_add_aead_src_ent(req, ulptx);
2560 atomic_inc(&adap->chcr_stats.cipher_rqst);
2561 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2562 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2563 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2564 transhdr_len, temp, 0);
2565 reqctx->skb = skb;
2566
2567 return skb;
2568 err:
2569 chcr_aead_common_exit(req);
2570
2571 return ERR_PTR(error);
2572 }
2573
chcr_aead_dma_map(struct device * dev,struct aead_request * req,unsigned short op_type)2574 int chcr_aead_dma_map(struct device *dev,
2575 struct aead_request *req,
2576 unsigned short op_type)
2577 {
2578 int error;
2579 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2580 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2581 unsigned int authsize = crypto_aead_authsize(tfm);
2582 int src_len, dst_len;
2583
2584 /* calculate and handle src and dst sg length separately
2585 * for inplace and out-of place operations
2586 */
2587 if (req->src == req->dst) {
2588 src_len = req->assoclen + req->cryptlen + (op_type ?
2589 0 : authsize);
2590 dst_len = src_len;
2591 } else {
2592 src_len = req->assoclen + req->cryptlen;
2593 dst_len = req->assoclen + req->cryptlen + (op_type ?
2594 -authsize : authsize);
2595 }
2596
2597 if (!req->cryptlen || !src_len || !dst_len)
2598 return 0;
2599 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2600 DMA_BIDIRECTIONAL);
2601 if (dma_mapping_error(dev, reqctx->iv_dma))
2602 return -ENOMEM;
2603 if (reqctx->b0_len)
2604 reqctx->b0_dma = reqctx->iv_dma + IV;
2605 else
2606 reqctx->b0_dma = 0;
2607 if (req->src == req->dst) {
2608 error = dma_map_sg(dev, req->src,
2609 sg_nents_for_len(req->src, src_len),
2610 DMA_BIDIRECTIONAL);
2611 if (!error)
2612 goto err;
2613 } else {
2614 error = dma_map_sg(dev, req->src,
2615 sg_nents_for_len(req->src, src_len),
2616 DMA_TO_DEVICE);
2617 if (!error)
2618 goto err;
2619 error = dma_map_sg(dev, req->dst,
2620 sg_nents_for_len(req->dst, dst_len),
2621 DMA_FROM_DEVICE);
2622 if (!error) {
2623 dma_unmap_sg(dev, req->src,
2624 sg_nents_for_len(req->src, src_len),
2625 DMA_TO_DEVICE);
2626 goto err;
2627 }
2628 }
2629
2630 return 0;
2631 err:
2632 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2633 return -ENOMEM;
2634 }
2635
chcr_aead_dma_unmap(struct device * dev,struct aead_request * req,unsigned short op_type)2636 void chcr_aead_dma_unmap(struct device *dev,
2637 struct aead_request *req,
2638 unsigned short op_type)
2639 {
2640 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2641 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2642 unsigned int authsize = crypto_aead_authsize(tfm);
2643 int src_len, dst_len;
2644
2645 /* calculate and handle src and dst sg length separately
2646 * for inplace and out-of place operations
2647 */
2648 if (req->src == req->dst) {
2649 src_len = req->assoclen + req->cryptlen + (op_type ?
2650 0 : authsize);
2651 dst_len = src_len;
2652 } else {
2653 src_len = req->assoclen + req->cryptlen;
2654 dst_len = req->assoclen + req->cryptlen + (op_type ?
2655 -authsize : authsize);
2656 }
2657
2658 if (!req->cryptlen || !src_len || !dst_len)
2659 return;
2660
2661 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2662 DMA_BIDIRECTIONAL);
2663 if (req->src == req->dst) {
2664 dma_unmap_sg(dev, req->src,
2665 sg_nents_for_len(req->src, src_len),
2666 DMA_BIDIRECTIONAL);
2667 } else {
2668 dma_unmap_sg(dev, req->src,
2669 sg_nents_for_len(req->src, src_len),
2670 DMA_TO_DEVICE);
2671 dma_unmap_sg(dev, req->dst,
2672 sg_nents_for_len(req->dst, dst_len),
2673 DMA_FROM_DEVICE);
2674 }
2675 }
2676
chcr_add_aead_src_ent(struct aead_request * req,struct ulptx_sgl * ulptx)2677 void chcr_add_aead_src_ent(struct aead_request *req,
2678 struct ulptx_sgl *ulptx)
2679 {
2680 struct ulptx_walk ulp_walk;
2681 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2682
2683 if (reqctx->imm) {
2684 u8 *buf = (u8 *)ulptx;
2685
2686 if (reqctx->b0_len) {
2687 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2688 buf += reqctx->b0_len;
2689 }
2690 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2691 buf, req->cryptlen + req->assoclen, 0);
2692 } else {
2693 ulptx_walk_init(&ulp_walk, ulptx);
2694 if (reqctx->b0_len)
2695 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2696 reqctx->b0_dma);
2697 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2698 req->assoclen, 0);
2699 ulptx_walk_end(&ulp_walk);
2700 }
2701 }
2702
chcr_add_aead_dst_ent(struct aead_request * req,struct cpl_rx_phys_dsgl * phys_cpl,unsigned short qid)2703 void chcr_add_aead_dst_ent(struct aead_request *req,
2704 struct cpl_rx_phys_dsgl *phys_cpl,
2705 unsigned short qid)
2706 {
2707 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2708 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2709 struct dsgl_walk dsgl_walk;
2710 unsigned int authsize = crypto_aead_authsize(tfm);
2711 struct chcr_context *ctx = a_ctx(tfm);
2712 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2713 u32 temp;
2714 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2715
2716 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2717 dsgl_walk_init(&dsgl_walk, phys_cpl);
2718 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2719 temp = req->assoclen + req->cryptlen +
2720 (reqctx->op ? -authsize : authsize);
2721 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2722 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2723 }
2724
chcr_add_cipher_src_ent(struct skcipher_request * req,void * ulptx,struct cipher_wr_param * wrparam)2725 void chcr_add_cipher_src_ent(struct skcipher_request *req,
2726 void *ulptx,
2727 struct cipher_wr_param *wrparam)
2728 {
2729 struct ulptx_walk ulp_walk;
2730 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2731 u8 *buf = ulptx;
2732
2733 memcpy(buf, reqctx->iv, IV);
2734 buf += IV;
2735 if (reqctx->imm) {
2736 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2737 buf, wrparam->bytes, reqctx->processed);
2738 } else {
2739 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2740 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2741 reqctx->src_ofst);
2742 reqctx->srcsg = ulp_walk.last_sg;
2743 reqctx->src_ofst = ulp_walk.last_sg_len;
2744 ulptx_walk_end(&ulp_walk);
2745 }
2746 }
2747
chcr_add_cipher_dst_ent(struct skcipher_request * req,struct cpl_rx_phys_dsgl * phys_cpl,struct cipher_wr_param * wrparam,unsigned short qid)2748 void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2749 struct cpl_rx_phys_dsgl *phys_cpl,
2750 struct cipher_wr_param *wrparam,
2751 unsigned short qid)
2752 {
2753 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2754 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2755 struct chcr_context *ctx = c_ctx(tfm);
2756 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2757 struct dsgl_walk dsgl_walk;
2758 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2759
2760 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2761 dsgl_walk_init(&dsgl_walk, phys_cpl);
2762 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2763 reqctx->dst_ofst);
2764 reqctx->dstsg = dsgl_walk.last_sg;
2765 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2766 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2767 }
2768
chcr_add_hash_src_ent(struct ahash_request * req,struct ulptx_sgl * ulptx,struct hash_wr_param * param)2769 void chcr_add_hash_src_ent(struct ahash_request *req,
2770 struct ulptx_sgl *ulptx,
2771 struct hash_wr_param *param)
2772 {
2773 struct ulptx_walk ulp_walk;
2774 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2775
2776 if (reqctx->hctx_wr.imm) {
2777 u8 *buf = (u8 *)ulptx;
2778
2779 if (param->bfr_len) {
2780 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2781 buf += param->bfr_len;
2782 }
2783
2784 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2785 sg_nents(reqctx->hctx_wr.srcsg), buf,
2786 param->sg_len, 0);
2787 } else {
2788 ulptx_walk_init(&ulp_walk, ulptx);
2789 if (param->bfr_len)
2790 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2791 reqctx->hctx_wr.dma_addr);
2792 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2793 param->sg_len, reqctx->hctx_wr.src_ofst);
2794 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2795 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2796 ulptx_walk_end(&ulp_walk);
2797 }
2798 }
2799
chcr_hash_dma_map(struct device * dev,struct ahash_request * req)2800 int chcr_hash_dma_map(struct device *dev,
2801 struct ahash_request *req)
2802 {
2803 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2804 int error = 0;
2805
2806 if (!req->nbytes)
2807 return 0;
2808 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2809 DMA_TO_DEVICE);
2810 if (!error)
2811 return -ENOMEM;
2812 req_ctx->hctx_wr.is_sg_map = 1;
2813 return 0;
2814 }
2815
chcr_hash_dma_unmap(struct device * dev,struct ahash_request * req)2816 void chcr_hash_dma_unmap(struct device *dev,
2817 struct ahash_request *req)
2818 {
2819 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2820
2821 if (!req->nbytes)
2822 return;
2823
2824 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2825 DMA_TO_DEVICE);
2826 req_ctx->hctx_wr.is_sg_map = 0;
2827
2828 }
2829
chcr_cipher_dma_map(struct device * dev,struct skcipher_request * req)2830 int chcr_cipher_dma_map(struct device *dev,
2831 struct skcipher_request *req)
2832 {
2833 int error;
2834
2835 if (req->src == req->dst) {
2836 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2837 DMA_BIDIRECTIONAL);
2838 if (!error)
2839 goto err;
2840 } else {
2841 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2842 DMA_TO_DEVICE);
2843 if (!error)
2844 goto err;
2845 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2846 DMA_FROM_DEVICE);
2847 if (!error) {
2848 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2849 DMA_TO_DEVICE);
2850 goto err;
2851 }
2852 }
2853
2854 return 0;
2855 err:
2856 return -ENOMEM;
2857 }
2858
chcr_cipher_dma_unmap(struct device * dev,struct skcipher_request * req)2859 void chcr_cipher_dma_unmap(struct device *dev,
2860 struct skcipher_request *req)
2861 {
2862 if (req->src == req->dst) {
2863 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2864 DMA_BIDIRECTIONAL);
2865 } else {
2866 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2867 DMA_TO_DEVICE);
2868 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2869 DMA_FROM_DEVICE);
2870 }
2871 }
2872
set_msg_len(u8 * block,unsigned int msglen,int csize)2873 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2874 {
2875 __be32 data;
2876
2877 memset(block, 0, csize);
2878 block += csize;
2879
2880 if (csize >= 4)
2881 csize = 4;
2882 else if (msglen > (unsigned int)(1 << (8 * csize)))
2883 return -EOVERFLOW;
2884
2885 data = cpu_to_be32(msglen);
2886 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2887
2888 return 0;
2889 }
2890
generate_b0(struct aead_request * req,u8 * ivptr,unsigned short op_type)2891 static int generate_b0(struct aead_request *req, u8 *ivptr,
2892 unsigned short op_type)
2893 {
2894 unsigned int l, lp, m;
2895 int rc;
2896 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2897 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2898 u8 *b0 = reqctx->scratch_pad;
2899
2900 m = crypto_aead_authsize(aead);
2901
2902 memcpy(b0, ivptr, 16);
2903
2904 lp = b0[0];
2905 l = lp + 1;
2906
2907 /* set m, bits 3-5 */
2908 *b0 |= (8 * ((m - 2) / 2));
2909
2910 /* set adata, bit 6, if associated data is used */
2911 if (req->assoclen)
2912 *b0 |= 64;
2913 rc = set_msg_len(b0 + 16 - l,
2914 (op_type == CHCR_DECRYPT_OP) ?
2915 req->cryptlen - m : req->cryptlen, l);
2916
2917 return rc;
2918 }
2919
crypto_ccm_check_iv(const u8 * iv)2920 static inline int crypto_ccm_check_iv(const u8 *iv)
2921 {
2922 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2923 if (iv[0] < 1 || iv[0] > 7)
2924 return -EINVAL;
2925
2926 return 0;
2927 }
2928
ccm_format_packet(struct aead_request * req,u8 * ivptr,unsigned int sub_type,unsigned short op_type,unsigned int assoclen)2929 static int ccm_format_packet(struct aead_request *req,
2930 u8 *ivptr,
2931 unsigned int sub_type,
2932 unsigned short op_type,
2933 unsigned int assoclen)
2934 {
2935 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2936 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2937 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2938 int rc = 0;
2939
2940 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2941 ivptr[0] = 3;
2942 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2943 memcpy(ivptr + 4, req->iv, 8);
2944 memset(ivptr + 12, 0, 4);
2945 } else {
2946 memcpy(ivptr, req->iv, 16);
2947 }
2948 if (assoclen)
2949 put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2950
2951 rc = generate_b0(req, ivptr, op_type);
2952 /* zero the ctr value */
2953 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2954 return rc;
2955 }
2956
fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu * sec_cpl,unsigned int dst_size,struct aead_request * req,unsigned short op_type)2957 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2958 unsigned int dst_size,
2959 struct aead_request *req,
2960 unsigned short op_type)
2961 {
2962 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2963 struct chcr_context *ctx = a_ctx(tfm);
2964 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2965 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2966 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2967 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2968 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2969 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2970 unsigned int ccm_xtra;
2971 unsigned int tag_offset = 0, auth_offset = 0;
2972 unsigned int assoclen;
2973
2974 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2975
2976 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2977 assoclen = req->assoclen - 8;
2978 else
2979 assoclen = req->assoclen;
2980 ccm_xtra = CCM_B0_SIZE +
2981 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2982
2983 auth_offset = req->cryptlen ?
2984 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2985 if (op_type == CHCR_DECRYPT_OP) {
2986 if (crypto_aead_authsize(tfm) != req->cryptlen)
2987 tag_offset = crypto_aead_authsize(tfm);
2988 else
2989 auth_offset = 0;
2990 }
2991
2992 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2993 sec_cpl->pldlen =
2994 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2995 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2996 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2997 1 + IV, IV + assoclen + ccm_xtra,
2998 req->assoclen + IV + 1 + ccm_xtra, 0);
2999
3000 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
3001 auth_offset, tag_offset,
3002 (op_type == CHCR_ENCRYPT_OP) ? 0 :
3003 crypto_aead_authsize(tfm));
3004 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
3005 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
3006 cipher_mode, mac_mode,
3007 aeadctx->hmac_ctrl, IV >> 1);
3008
3009 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3010 0, dst_size);
3011 }
3012
aead_ccm_validate_input(unsigned short op_type,struct aead_request * req,struct chcr_aead_ctx * aeadctx,unsigned int sub_type)3013 static int aead_ccm_validate_input(unsigned short op_type,
3014 struct aead_request *req,
3015 struct chcr_aead_ctx *aeadctx,
3016 unsigned int sub_type)
3017 {
3018 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3019 if (crypto_ccm_check_iv(req->iv)) {
3020 pr_err("CCM: IV check fails\n");
3021 return -EINVAL;
3022 }
3023 } else {
3024 if (req->assoclen != 16 && req->assoclen != 20) {
3025 pr_err("RFC4309: Invalid AAD length %d\n",
3026 req->assoclen);
3027 return -EINVAL;
3028 }
3029 }
3030 return 0;
3031 }
3032
create_aead_ccm_wr(struct aead_request * req,unsigned short qid,int size)3033 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3034 unsigned short qid,
3035 int size)
3036 {
3037 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3038 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3039 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3040 struct sk_buff *skb = NULL;
3041 struct chcr_wr *chcr_req;
3042 struct cpl_rx_phys_dsgl *phys_cpl;
3043 struct ulptx_sgl *ulptx;
3044 unsigned int transhdr_len;
3045 unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3046 unsigned int sub_type, assoclen = req->assoclen;
3047 unsigned int authsize = crypto_aead_authsize(tfm);
3048 int error = -EINVAL;
3049 u8 *ivptr;
3050 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3051 GFP_ATOMIC;
3052 struct adapter *adap = padap(a_ctx(tfm)->dev);
3053
3054 sub_type = get_aead_subtype(tfm);
3055 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3056 assoclen -= 8;
3057 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3058 error = chcr_aead_common_init(req);
3059 if (error)
3060 return ERR_PTR(error);
3061
3062 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3063 if (error)
3064 goto err;
3065 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3066 + (reqctx->op ? -authsize : authsize),
3067 CHCR_DST_SG_SIZE, 0);
3068 dnents += MIN_CCM_SG; // For IV and B0
3069 dst_size = get_space_for_phys_dsgl(dnents);
3070 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3071 CHCR_SRC_SG_SIZE, 0);
3072 snents += MIN_CCM_SG; //For B0
3073 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3074 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3075 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3076 reqctx->b0_len) <= SGE_MAX_WR_LEN;
3077 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3078 reqctx->b0_len, 16) :
3079 (sgl_len(snents) * 8);
3080 transhdr_len += temp;
3081 transhdr_len = roundup(transhdr_len, 16);
3082
3083 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3084 reqctx->b0_len, transhdr_len, reqctx->op)) {
3085 atomic_inc(&adap->chcr_stats.fallback);
3086 chcr_aead_common_exit(req);
3087 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3088 }
3089 skb = alloc_skb(transhdr_len, flags);
3090
3091 if (!skb) {
3092 error = -ENOMEM;
3093 goto err;
3094 }
3095
3096 chcr_req = __skb_put_zero(skb, transhdr_len);
3097
3098 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3099
3100 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3101 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3102 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3103 aeadctx->key, aeadctx->enckey_len);
3104
3105 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3106 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3107 ulptx = (struct ulptx_sgl *)(ivptr + IV);
3108 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3109 if (error)
3110 goto dstmap_fail;
3111 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3112 chcr_add_aead_src_ent(req, ulptx);
3113
3114 atomic_inc(&adap->chcr_stats.aead_rqst);
3115 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3116 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3117 reqctx->b0_len) : 0);
3118 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3119 transhdr_len, temp, 0);
3120 reqctx->skb = skb;
3121
3122 return skb;
3123 dstmap_fail:
3124 kfree_skb(skb);
3125 err:
3126 chcr_aead_common_exit(req);
3127 return ERR_PTR(error);
3128 }
3129
create_gcm_wr(struct aead_request * req,unsigned short qid,int size)3130 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3131 unsigned short qid,
3132 int size)
3133 {
3134 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3135 struct chcr_context *ctx = a_ctx(tfm);
3136 struct uld_ctx *u_ctx = ULD_CTX(ctx);
3137 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3138 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3139 struct sk_buff *skb = NULL;
3140 struct chcr_wr *chcr_req;
3141 struct cpl_rx_phys_dsgl *phys_cpl;
3142 struct ulptx_sgl *ulptx;
3143 unsigned int transhdr_len, dnents = 0, snents;
3144 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3145 unsigned int authsize = crypto_aead_authsize(tfm);
3146 int error = -EINVAL;
3147 u8 *ivptr;
3148 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3149 GFP_ATOMIC;
3150 struct adapter *adap = padap(ctx->dev);
3151 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3152
3153 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
3154 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3155 assoclen = req->assoclen - 8;
3156
3157 reqctx->b0_len = 0;
3158 error = chcr_aead_common_init(req);
3159 if (error)
3160 return ERR_PTR(error);
3161 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3162 (reqctx->op ? -authsize : authsize),
3163 CHCR_DST_SG_SIZE, 0);
3164 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3165 CHCR_SRC_SG_SIZE, 0);
3166 dnents += MIN_GCM_SG; // For IV
3167 dst_size = get_space_for_phys_dsgl(dnents);
3168 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3169 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3170 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3171 SGE_MAX_WR_LEN;
3172 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3173 (sgl_len(snents) * 8);
3174 transhdr_len += temp;
3175 transhdr_len = roundup(transhdr_len, 16);
3176 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3177 transhdr_len, reqctx->op)) {
3178
3179 atomic_inc(&adap->chcr_stats.fallback);
3180 chcr_aead_common_exit(req);
3181 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3182 }
3183 skb = alloc_skb(transhdr_len, flags);
3184 if (!skb) {
3185 error = -ENOMEM;
3186 goto err;
3187 }
3188
3189 chcr_req = __skb_put_zero(skb, transhdr_len);
3190
3191 //Offset of tag from end
3192 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3193 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3194 rx_channel_id, 2, 1);
3195 chcr_req->sec_cpl.pldlen =
3196 htonl(req->assoclen + IV + req->cryptlen);
3197 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3198 assoclen ? 1 + IV : 0,
3199 assoclen ? IV + assoclen : 0,
3200 req->assoclen + IV + 1, 0);
3201 chcr_req->sec_cpl.cipherstop_lo_authinsert =
3202 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3203 temp, temp);
3204 chcr_req->sec_cpl.seqno_numivs =
3205 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3206 CHCR_ENCRYPT_OP) ? 1 : 0,
3207 CHCR_SCMD_CIPHER_MODE_AES_GCM,
3208 CHCR_SCMD_AUTH_MODE_GHASH,
3209 aeadctx->hmac_ctrl, IV >> 1);
3210 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3211 0, 0, dst_size);
3212 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3213 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3214 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3215 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3216
3217 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3218 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3219 /* prepare a 16 byte iv */
3220 /* S A L T | IV | 0x00000001 */
3221 if (get_aead_subtype(tfm) ==
3222 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3223 memcpy(ivptr, aeadctx->salt, 4);
3224 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3225 } else {
3226 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3227 }
3228 put_unaligned_be32(0x01, &ivptr[12]);
3229 ulptx = (struct ulptx_sgl *)(ivptr + 16);
3230
3231 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3232 chcr_add_aead_src_ent(req, ulptx);
3233 atomic_inc(&adap->chcr_stats.aead_rqst);
3234 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3235 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3236 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3237 transhdr_len, temp, reqctx->verify);
3238 reqctx->skb = skb;
3239 return skb;
3240
3241 err:
3242 chcr_aead_common_exit(req);
3243 return ERR_PTR(error);
3244 }
3245
3246
3247
chcr_aead_cra_init(struct crypto_aead * tfm)3248 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3249 {
3250 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3251 struct aead_alg *alg = crypto_aead_alg(tfm);
3252
3253 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3254 CRYPTO_ALG_NEED_FALLBACK |
3255 CRYPTO_ALG_ASYNC);
3256 if (IS_ERR(aeadctx->sw_cipher))
3257 return PTR_ERR(aeadctx->sw_cipher);
3258 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3259 sizeof(struct aead_request) +
3260 crypto_aead_reqsize(aeadctx->sw_cipher)));
3261 return chcr_device_init(a_ctx(tfm));
3262 }
3263
chcr_aead_cra_exit(struct crypto_aead * tfm)3264 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3265 {
3266 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3267
3268 crypto_free_aead(aeadctx->sw_cipher);
3269 }
3270
chcr_authenc_null_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3271 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3272 unsigned int authsize)
3273 {
3274 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3275
3276 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3277 aeadctx->mayverify = VERIFY_HW;
3278 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3279 }
chcr_authenc_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3280 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3281 unsigned int authsize)
3282 {
3283 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3284 u32 maxauth = crypto_aead_maxauthsize(tfm);
3285
3286 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3287 * true for sha1. authsize == 12 condition should be before
3288 * authsize == (maxauth >> 1)
3289 */
3290 if (authsize == ICV_4) {
3291 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3292 aeadctx->mayverify = VERIFY_HW;
3293 } else if (authsize == ICV_6) {
3294 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3295 aeadctx->mayverify = VERIFY_HW;
3296 } else if (authsize == ICV_10) {
3297 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3298 aeadctx->mayverify = VERIFY_HW;
3299 } else if (authsize == ICV_12) {
3300 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3301 aeadctx->mayverify = VERIFY_HW;
3302 } else if (authsize == ICV_14) {
3303 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3304 aeadctx->mayverify = VERIFY_HW;
3305 } else if (authsize == (maxauth >> 1)) {
3306 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3307 aeadctx->mayverify = VERIFY_HW;
3308 } else if (authsize == maxauth) {
3309 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3310 aeadctx->mayverify = VERIFY_HW;
3311 } else {
3312 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3313 aeadctx->mayverify = VERIFY_SW;
3314 }
3315 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3316 }
3317
3318
chcr_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3319 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3320 {
3321 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3322
3323 switch (authsize) {
3324 case ICV_4:
3325 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3326 aeadctx->mayverify = VERIFY_HW;
3327 break;
3328 case ICV_8:
3329 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3330 aeadctx->mayverify = VERIFY_HW;
3331 break;
3332 case ICV_12:
3333 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3334 aeadctx->mayverify = VERIFY_HW;
3335 break;
3336 case ICV_14:
3337 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3338 aeadctx->mayverify = VERIFY_HW;
3339 break;
3340 case ICV_16:
3341 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3342 aeadctx->mayverify = VERIFY_HW;
3343 break;
3344 case ICV_13:
3345 case ICV_15:
3346 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3347 aeadctx->mayverify = VERIFY_SW;
3348 break;
3349 default:
3350 return -EINVAL;
3351 }
3352 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3353 }
3354
chcr_4106_4309_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3355 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3356 unsigned int authsize)
3357 {
3358 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3359
3360 switch (authsize) {
3361 case ICV_8:
3362 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3363 aeadctx->mayverify = VERIFY_HW;
3364 break;
3365 case ICV_12:
3366 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3367 aeadctx->mayverify = VERIFY_HW;
3368 break;
3369 case ICV_16:
3370 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3371 aeadctx->mayverify = VERIFY_HW;
3372 break;
3373 default:
3374 return -EINVAL;
3375 }
3376 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3377 }
3378
chcr_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3379 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3380 unsigned int authsize)
3381 {
3382 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3383
3384 switch (authsize) {
3385 case ICV_4:
3386 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3387 aeadctx->mayverify = VERIFY_HW;
3388 break;
3389 case ICV_6:
3390 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3391 aeadctx->mayverify = VERIFY_HW;
3392 break;
3393 case ICV_8:
3394 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3395 aeadctx->mayverify = VERIFY_HW;
3396 break;
3397 case ICV_10:
3398 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3399 aeadctx->mayverify = VERIFY_HW;
3400 break;
3401 case ICV_12:
3402 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3403 aeadctx->mayverify = VERIFY_HW;
3404 break;
3405 case ICV_14:
3406 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3407 aeadctx->mayverify = VERIFY_HW;
3408 break;
3409 case ICV_16:
3410 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3411 aeadctx->mayverify = VERIFY_HW;
3412 break;
3413 default:
3414 return -EINVAL;
3415 }
3416 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3417 }
3418
chcr_ccm_common_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3419 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3420 const u8 *key,
3421 unsigned int keylen)
3422 {
3423 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3424 unsigned char ck_size, mk_size;
3425 int key_ctx_size = 0;
3426
3427 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3428 if (keylen == AES_KEYSIZE_128) {
3429 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3430 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3431 } else if (keylen == AES_KEYSIZE_192) {
3432 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3433 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3434 } else if (keylen == AES_KEYSIZE_256) {
3435 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3436 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3437 } else {
3438 aeadctx->enckey_len = 0;
3439 return -EINVAL;
3440 }
3441 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3442 key_ctx_size >> 4);
3443 memcpy(aeadctx->key, key, keylen);
3444 aeadctx->enckey_len = keylen;
3445
3446 return 0;
3447 }
3448
chcr_aead_ccm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3449 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3450 const u8 *key,
3451 unsigned int keylen)
3452 {
3453 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3454 int error;
3455
3456 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3457 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3458 CRYPTO_TFM_REQ_MASK);
3459 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3460 if (error)
3461 return error;
3462 return chcr_ccm_common_setkey(aead, key, keylen);
3463 }
3464
chcr_aead_rfc4309_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3465 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3466 unsigned int keylen)
3467 {
3468 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3469 int error;
3470
3471 if (keylen < 3) {
3472 aeadctx->enckey_len = 0;
3473 return -EINVAL;
3474 }
3475 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3476 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3477 CRYPTO_TFM_REQ_MASK);
3478 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3479 if (error)
3480 return error;
3481 keylen -= 3;
3482 memcpy(aeadctx->salt, key + keylen, 3);
3483 return chcr_ccm_common_setkey(aead, key, keylen);
3484 }
3485
chcr_gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3486 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3487 unsigned int keylen)
3488 {
3489 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3490 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3491 unsigned int ck_size;
3492 int ret = 0, key_ctx_size = 0;
3493 struct crypto_aes_ctx aes;
3494
3495 aeadctx->enckey_len = 0;
3496 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3497 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3498 & CRYPTO_TFM_REQ_MASK);
3499 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3500 if (ret)
3501 goto out;
3502
3503 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3504 keylen > 3) {
3505 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3506 memcpy(aeadctx->salt, key + keylen, 4);
3507 }
3508 if (keylen == AES_KEYSIZE_128) {
3509 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3510 } else if (keylen == AES_KEYSIZE_192) {
3511 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3512 } else if (keylen == AES_KEYSIZE_256) {
3513 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3514 } else {
3515 pr_err("GCM: Invalid key length %d\n", keylen);
3516 ret = -EINVAL;
3517 goto out;
3518 }
3519
3520 memcpy(aeadctx->key, key, keylen);
3521 aeadctx->enckey_len = keylen;
3522 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3523 AEAD_H_SIZE;
3524 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3525 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3526 0, 0,
3527 key_ctx_size >> 4);
3528 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3529 * It will go in key context
3530 */
3531 ret = aes_expandkey(&aes, key, keylen);
3532 if (ret) {
3533 aeadctx->enckey_len = 0;
3534 goto out;
3535 }
3536 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3537 aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3538 memzero_explicit(&aes, sizeof(aes));
3539
3540 out:
3541 return ret;
3542 }
3543
chcr_authenc_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)3544 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3545 unsigned int keylen)
3546 {
3547 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3548 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3549 /* it contains auth and cipher key both*/
3550 struct crypto_authenc_keys keys;
3551 unsigned int bs, subtype;
3552 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3553 int err = 0, i, key_ctx_len = 0;
3554 unsigned char ck_size = 0;
3555 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3556 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3557 struct algo_param param;
3558 int align;
3559 u8 *o_ptr = NULL;
3560
3561 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3562 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3563 & CRYPTO_TFM_REQ_MASK);
3564 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3565 if (err)
3566 goto out;
3567
3568 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3569 goto out;
3570
3571 if (get_alg_config(¶m, max_authsize)) {
3572 pr_err("Unsupported digest size\n");
3573 goto out;
3574 }
3575 subtype = get_aead_subtype(authenc);
3576 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3577 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3578 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3579 goto out;
3580 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3581 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3582 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3583 }
3584 if (keys.enckeylen == AES_KEYSIZE_128) {
3585 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3586 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3587 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3588 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3589 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3590 } else {
3591 pr_err("Unsupported cipher key\n");
3592 goto out;
3593 }
3594
3595 /* Copy only encryption key. We use authkey to generate h(ipad) and
3596 * h(opad) so authkey is not needed again. authkeylen size have the
3597 * size of the hash digest size.
3598 */
3599 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3600 aeadctx->enckey_len = keys.enckeylen;
3601 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3602 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3603
3604 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3605 aeadctx->enckey_len << 3);
3606 }
3607 base_hash = chcr_alloc_shash(max_authsize);
3608 if (IS_ERR(base_hash)) {
3609 pr_err("Base driver cannot be loaded\n");
3610 goto out;
3611 }
3612 {
3613 SHASH_DESC_ON_STACK(shash, base_hash);
3614
3615 shash->tfm = base_hash;
3616 bs = crypto_shash_blocksize(base_hash);
3617 align = KEYCTX_ALIGN_PAD(max_authsize);
3618 o_ptr = actx->h_iopad + param.result_size + align;
3619
3620 if (keys.authkeylen > bs) {
3621 err = crypto_shash_digest(shash, keys.authkey,
3622 keys.authkeylen,
3623 o_ptr);
3624 if (err) {
3625 pr_err("Base driver cannot be loaded\n");
3626 goto out;
3627 }
3628 keys.authkeylen = max_authsize;
3629 } else
3630 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3631
3632 /* Compute the ipad-digest*/
3633 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3634 memcpy(pad, o_ptr, keys.authkeylen);
3635 for (i = 0; i < bs >> 2; i++)
3636 *((unsigned int *)pad + i) ^= IPAD_DATA;
3637
3638 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3639 max_authsize))
3640 goto out;
3641 /* Compute the opad-digest */
3642 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3643 memcpy(pad, o_ptr, keys.authkeylen);
3644 for (i = 0; i < bs >> 2; i++)
3645 *((unsigned int *)pad + i) ^= OPAD_DATA;
3646
3647 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3648 goto out;
3649
3650 /* convert the ipad and opad digest to network order */
3651 chcr_change_order(actx->h_iopad, param.result_size);
3652 chcr_change_order(o_ptr, param.result_size);
3653 key_ctx_len = sizeof(struct _key_ctx) +
3654 roundup(keys.enckeylen, 16) +
3655 (param.result_size + align) * 2;
3656 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3657 0, 1, key_ctx_len >> 4);
3658 actx->auth_mode = param.auth_mode;
3659 chcr_free_shash(base_hash);
3660
3661 memzero_explicit(&keys, sizeof(keys));
3662 return 0;
3663 }
3664 out:
3665 aeadctx->enckey_len = 0;
3666 memzero_explicit(&keys, sizeof(keys));
3667 if (!IS_ERR(base_hash))
3668 chcr_free_shash(base_hash);
3669 return -EINVAL;
3670 }
3671
chcr_aead_digest_null_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)3672 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3673 const u8 *key, unsigned int keylen)
3674 {
3675 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3676 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3677 struct crypto_authenc_keys keys;
3678 int err;
3679 /* it contains auth and cipher key both*/
3680 unsigned int subtype;
3681 int key_ctx_len = 0;
3682 unsigned char ck_size = 0;
3683
3684 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3685 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3686 & CRYPTO_TFM_REQ_MASK);
3687 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3688 if (err)
3689 goto out;
3690
3691 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3692 goto out;
3693
3694 subtype = get_aead_subtype(authenc);
3695 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3696 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3697 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3698 goto out;
3699 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3700 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3701 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3702 }
3703 if (keys.enckeylen == AES_KEYSIZE_128) {
3704 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3705 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3706 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3707 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3708 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3709 } else {
3710 pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3711 goto out;
3712 }
3713 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3714 aeadctx->enckey_len = keys.enckeylen;
3715 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3716 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3717 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3718 aeadctx->enckey_len << 3);
3719 }
3720 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3721
3722 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3723 0, key_ctx_len >> 4);
3724 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3725 memzero_explicit(&keys, sizeof(keys));
3726 return 0;
3727 out:
3728 aeadctx->enckey_len = 0;
3729 memzero_explicit(&keys, sizeof(keys));
3730 return -EINVAL;
3731 }
3732
chcr_aead_op(struct aead_request * req,int size,create_wr_t create_wr_fn)3733 static int chcr_aead_op(struct aead_request *req,
3734 int size,
3735 create_wr_t create_wr_fn)
3736 {
3737 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3738 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3739 struct chcr_context *ctx = a_ctx(tfm);
3740 struct uld_ctx *u_ctx = ULD_CTX(ctx);
3741 struct sk_buff *skb;
3742 struct chcr_dev *cdev;
3743
3744 cdev = a_ctx(tfm)->dev;
3745 if (!cdev) {
3746 pr_err("%s : No crypto device.\n", __func__);
3747 return -ENXIO;
3748 }
3749
3750 if (chcr_inc_wrcount(cdev)) {
3751 /* Detach state for CHCR means lldi or padap is freed.
3752 * We cannot increment fallback here.
3753 */
3754 return chcr_aead_fallback(req, reqctx->op);
3755 }
3756
3757 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3758 reqctx->txqidx) &&
3759 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3760 chcr_dec_wrcount(cdev);
3761 return -ENOSPC;
3762 }
3763
3764 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3765 crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3766 pr_err("RFC4106: Invalid value of assoclen %d\n",
3767 req->assoclen);
3768 return -EINVAL;
3769 }
3770
3771 /* Form a WR from req */
3772 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3773
3774 if (IS_ERR_OR_NULL(skb)) {
3775 chcr_dec_wrcount(cdev);
3776 return PTR_ERR_OR_ZERO(skb);
3777 }
3778
3779 skb->dev = u_ctx->lldi.ports[0];
3780 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3781 chcr_send_wr(skb);
3782 return -EINPROGRESS;
3783 }
3784
chcr_aead_encrypt(struct aead_request * req)3785 static int chcr_aead_encrypt(struct aead_request *req)
3786 {
3787 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3788 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3789 struct chcr_context *ctx = a_ctx(tfm);
3790 unsigned int cpu;
3791
3792 cpu = get_cpu();
3793 reqctx->txqidx = cpu % ctx->ntxq;
3794 reqctx->rxqidx = cpu % ctx->nrxq;
3795 put_cpu();
3796
3797 reqctx->verify = VERIFY_HW;
3798 reqctx->op = CHCR_ENCRYPT_OP;
3799
3800 switch (get_aead_subtype(tfm)) {
3801 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3802 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3803 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3804 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3805 return chcr_aead_op(req, 0, create_authenc_wr);
3806 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3807 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3808 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3809 default:
3810 return chcr_aead_op(req, 0, create_gcm_wr);
3811 }
3812 }
3813
chcr_aead_decrypt(struct aead_request * req)3814 static int chcr_aead_decrypt(struct aead_request *req)
3815 {
3816 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3817 struct chcr_context *ctx = a_ctx(tfm);
3818 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3819 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3820 int size;
3821 unsigned int cpu;
3822
3823 cpu = get_cpu();
3824 reqctx->txqidx = cpu % ctx->ntxq;
3825 reqctx->rxqidx = cpu % ctx->nrxq;
3826 put_cpu();
3827
3828 if (aeadctx->mayverify == VERIFY_SW) {
3829 size = crypto_aead_maxauthsize(tfm);
3830 reqctx->verify = VERIFY_SW;
3831 } else {
3832 size = 0;
3833 reqctx->verify = VERIFY_HW;
3834 }
3835 reqctx->op = CHCR_DECRYPT_OP;
3836 switch (get_aead_subtype(tfm)) {
3837 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3838 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3839 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3840 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3841 return chcr_aead_op(req, size, create_authenc_wr);
3842 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3843 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3844 return chcr_aead_op(req, size, create_aead_ccm_wr);
3845 default:
3846 return chcr_aead_op(req, size, create_gcm_wr);
3847 }
3848 }
3849
3850 static struct chcr_alg_template driver_algs[] = {
3851 /* AES-CBC */
3852 {
3853 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3854 .is_registered = 0,
3855 .alg.skcipher = {
3856 .base.cra_name = "cbc(aes)",
3857 .base.cra_driver_name = "cbc-aes-chcr",
3858 .base.cra_blocksize = AES_BLOCK_SIZE,
3859
3860 .init = chcr_init_tfm,
3861 .exit = chcr_exit_tfm,
3862 .min_keysize = AES_MIN_KEY_SIZE,
3863 .max_keysize = AES_MAX_KEY_SIZE,
3864 .ivsize = AES_BLOCK_SIZE,
3865 .setkey = chcr_aes_cbc_setkey,
3866 .encrypt = chcr_aes_encrypt,
3867 .decrypt = chcr_aes_decrypt,
3868 }
3869 },
3870 {
3871 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3872 .is_registered = 0,
3873 .alg.skcipher = {
3874 .base.cra_name = "xts(aes)",
3875 .base.cra_driver_name = "xts-aes-chcr",
3876 .base.cra_blocksize = AES_BLOCK_SIZE,
3877
3878 .init = chcr_init_tfm,
3879 .exit = chcr_exit_tfm,
3880 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3881 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3882 .ivsize = AES_BLOCK_SIZE,
3883 .setkey = chcr_aes_xts_setkey,
3884 .encrypt = chcr_aes_encrypt,
3885 .decrypt = chcr_aes_decrypt,
3886 }
3887 },
3888 {
3889 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3890 .is_registered = 0,
3891 .alg.skcipher = {
3892 .base.cra_name = "ctr(aes)",
3893 .base.cra_driver_name = "ctr-aes-chcr",
3894 .base.cra_blocksize = 1,
3895
3896 .init = chcr_init_tfm,
3897 .exit = chcr_exit_tfm,
3898 .min_keysize = AES_MIN_KEY_SIZE,
3899 .max_keysize = AES_MAX_KEY_SIZE,
3900 .ivsize = AES_BLOCK_SIZE,
3901 .setkey = chcr_aes_ctr_setkey,
3902 .encrypt = chcr_aes_encrypt,
3903 .decrypt = chcr_aes_decrypt,
3904 }
3905 },
3906 {
3907 .type = CRYPTO_ALG_TYPE_SKCIPHER |
3908 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3909 .is_registered = 0,
3910 .alg.skcipher = {
3911 .base.cra_name = "rfc3686(ctr(aes))",
3912 .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
3913 .base.cra_blocksize = 1,
3914
3915 .init = chcr_rfc3686_init,
3916 .exit = chcr_exit_tfm,
3917 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3918 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3919 .ivsize = CTR_RFC3686_IV_SIZE,
3920 .setkey = chcr_aes_rfc3686_setkey,
3921 .encrypt = chcr_aes_encrypt,
3922 .decrypt = chcr_aes_decrypt,
3923 }
3924 },
3925 /* SHA */
3926 {
3927 .type = CRYPTO_ALG_TYPE_AHASH,
3928 .is_registered = 0,
3929 .alg.hash = {
3930 .halg.digestsize = SHA1_DIGEST_SIZE,
3931 .halg.base = {
3932 .cra_name = "sha1",
3933 .cra_driver_name = "sha1-chcr",
3934 .cra_blocksize = SHA1_BLOCK_SIZE,
3935 }
3936 }
3937 },
3938 {
3939 .type = CRYPTO_ALG_TYPE_AHASH,
3940 .is_registered = 0,
3941 .alg.hash = {
3942 .halg.digestsize = SHA256_DIGEST_SIZE,
3943 .halg.base = {
3944 .cra_name = "sha256",
3945 .cra_driver_name = "sha256-chcr",
3946 .cra_blocksize = SHA256_BLOCK_SIZE,
3947 }
3948 }
3949 },
3950 {
3951 .type = CRYPTO_ALG_TYPE_AHASH,
3952 .is_registered = 0,
3953 .alg.hash = {
3954 .halg.digestsize = SHA224_DIGEST_SIZE,
3955 .halg.base = {
3956 .cra_name = "sha224",
3957 .cra_driver_name = "sha224-chcr",
3958 .cra_blocksize = SHA224_BLOCK_SIZE,
3959 }
3960 }
3961 },
3962 {
3963 .type = CRYPTO_ALG_TYPE_AHASH,
3964 .is_registered = 0,
3965 .alg.hash = {
3966 .halg.digestsize = SHA384_DIGEST_SIZE,
3967 .halg.base = {
3968 .cra_name = "sha384",
3969 .cra_driver_name = "sha384-chcr",
3970 .cra_blocksize = SHA384_BLOCK_SIZE,
3971 }
3972 }
3973 },
3974 {
3975 .type = CRYPTO_ALG_TYPE_AHASH,
3976 .is_registered = 0,
3977 .alg.hash = {
3978 .halg.digestsize = SHA512_DIGEST_SIZE,
3979 .halg.base = {
3980 .cra_name = "sha512",
3981 .cra_driver_name = "sha512-chcr",
3982 .cra_blocksize = SHA512_BLOCK_SIZE,
3983 }
3984 }
3985 },
3986 /* HMAC */
3987 {
3988 .type = CRYPTO_ALG_TYPE_HMAC,
3989 .is_registered = 0,
3990 .alg.hash = {
3991 .halg.digestsize = SHA1_DIGEST_SIZE,
3992 .halg.base = {
3993 .cra_name = "hmac(sha1)",
3994 .cra_driver_name = "hmac-sha1-chcr",
3995 .cra_blocksize = SHA1_BLOCK_SIZE,
3996 }
3997 }
3998 },
3999 {
4000 .type = CRYPTO_ALG_TYPE_HMAC,
4001 .is_registered = 0,
4002 .alg.hash = {
4003 .halg.digestsize = SHA224_DIGEST_SIZE,
4004 .halg.base = {
4005 .cra_name = "hmac(sha224)",
4006 .cra_driver_name = "hmac-sha224-chcr",
4007 .cra_blocksize = SHA224_BLOCK_SIZE,
4008 }
4009 }
4010 },
4011 {
4012 .type = CRYPTO_ALG_TYPE_HMAC,
4013 .is_registered = 0,
4014 .alg.hash = {
4015 .halg.digestsize = SHA256_DIGEST_SIZE,
4016 .halg.base = {
4017 .cra_name = "hmac(sha256)",
4018 .cra_driver_name = "hmac-sha256-chcr",
4019 .cra_blocksize = SHA256_BLOCK_SIZE,
4020 }
4021 }
4022 },
4023 {
4024 .type = CRYPTO_ALG_TYPE_HMAC,
4025 .is_registered = 0,
4026 .alg.hash = {
4027 .halg.digestsize = SHA384_DIGEST_SIZE,
4028 .halg.base = {
4029 .cra_name = "hmac(sha384)",
4030 .cra_driver_name = "hmac-sha384-chcr",
4031 .cra_blocksize = SHA384_BLOCK_SIZE,
4032 }
4033 }
4034 },
4035 {
4036 .type = CRYPTO_ALG_TYPE_HMAC,
4037 .is_registered = 0,
4038 .alg.hash = {
4039 .halg.digestsize = SHA512_DIGEST_SIZE,
4040 .halg.base = {
4041 .cra_name = "hmac(sha512)",
4042 .cra_driver_name = "hmac-sha512-chcr",
4043 .cra_blocksize = SHA512_BLOCK_SIZE,
4044 }
4045 }
4046 },
4047 /* Add AEAD Algorithms */
4048 {
4049 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4050 .is_registered = 0,
4051 .alg.aead = {
4052 .base = {
4053 .cra_name = "gcm(aes)",
4054 .cra_driver_name = "gcm-aes-chcr",
4055 .cra_blocksize = 1,
4056 .cra_priority = CHCR_AEAD_PRIORITY,
4057 .cra_ctxsize = sizeof(struct chcr_context) +
4058 sizeof(struct chcr_aead_ctx) +
4059 sizeof(struct chcr_gcm_ctx),
4060 },
4061 .ivsize = GCM_AES_IV_SIZE,
4062 .maxauthsize = GHASH_DIGEST_SIZE,
4063 .setkey = chcr_gcm_setkey,
4064 .setauthsize = chcr_gcm_setauthsize,
4065 }
4066 },
4067 {
4068 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4069 .is_registered = 0,
4070 .alg.aead = {
4071 .base = {
4072 .cra_name = "rfc4106(gcm(aes))",
4073 .cra_driver_name = "rfc4106-gcm-aes-chcr",
4074 .cra_blocksize = 1,
4075 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4076 .cra_ctxsize = sizeof(struct chcr_context) +
4077 sizeof(struct chcr_aead_ctx) +
4078 sizeof(struct chcr_gcm_ctx),
4079
4080 },
4081 .ivsize = GCM_RFC4106_IV_SIZE,
4082 .maxauthsize = GHASH_DIGEST_SIZE,
4083 .setkey = chcr_gcm_setkey,
4084 .setauthsize = chcr_4106_4309_setauthsize,
4085 }
4086 },
4087 {
4088 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4089 .is_registered = 0,
4090 .alg.aead = {
4091 .base = {
4092 .cra_name = "ccm(aes)",
4093 .cra_driver_name = "ccm-aes-chcr",
4094 .cra_blocksize = 1,
4095 .cra_priority = CHCR_AEAD_PRIORITY,
4096 .cra_ctxsize = sizeof(struct chcr_context) +
4097 sizeof(struct chcr_aead_ctx),
4098
4099 },
4100 .ivsize = AES_BLOCK_SIZE,
4101 .maxauthsize = GHASH_DIGEST_SIZE,
4102 .setkey = chcr_aead_ccm_setkey,
4103 .setauthsize = chcr_ccm_setauthsize,
4104 }
4105 },
4106 {
4107 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4108 .is_registered = 0,
4109 .alg.aead = {
4110 .base = {
4111 .cra_name = "rfc4309(ccm(aes))",
4112 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4113 .cra_blocksize = 1,
4114 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4115 .cra_ctxsize = sizeof(struct chcr_context) +
4116 sizeof(struct chcr_aead_ctx),
4117
4118 },
4119 .ivsize = 8,
4120 .maxauthsize = GHASH_DIGEST_SIZE,
4121 .setkey = chcr_aead_rfc4309_setkey,
4122 .setauthsize = chcr_4106_4309_setauthsize,
4123 }
4124 },
4125 {
4126 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4127 .is_registered = 0,
4128 .alg.aead = {
4129 .base = {
4130 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4131 .cra_driver_name =
4132 "authenc-hmac-sha1-cbc-aes-chcr",
4133 .cra_blocksize = AES_BLOCK_SIZE,
4134 .cra_priority = CHCR_AEAD_PRIORITY,
4135 .cra_ctxsize = sizeof(struct chcr_context) +
4136 sizeof(struct chcr_aead_ctx) +
4137 sizeof(struct chcr_authenc_ctx),
4138
4139 },
4140 .ivsize = AES_BLOCK_SIZE,
4141 .maxauthsize = SHA1_DIGEST_SIZE,
4142 .setkey = chcr_authenc_setkey,
4143 .setauthsize = chcr_authenc_setauthsize,
4144 }
4145 },
4146 {
4147 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4148 .is_registered = 0,
4149 .alg.aead = {
4150 .base = {
4151
4152 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4153 .cra_driver_name =
4154 "authenc-hmac-sha256-cbc-aes-chcr",
4155 .cra_blocksize = AES_BLOCK_SIZE,
4156 .cra_priority = CHCR_AEAD_PRIORITY,
4157 .cra_ctxsize = sizeof(struct chcr_context) +
4158 sizeof(struct chcr_aead_ctx) +
4159 sizeof(struct chcr_authenc_ctx),
4160
4161 },
4162 .ivsize = AES_BLOCK_SIZE,
4163 .maxauthsize = SHA256_DIGEST_SIZE,
4164 .setkey = chcr_authenc_setkey,
4165 .setauthsize = chcr_authenc_setauthsize,
4166 }
4167 },
4168 {
4169 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4170 .is_registered = 0,
4171 .alg.aead = {
4172 .base = {
4173 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4174 .cra_driver_name =
4175 "authenc-hmac-sha224-cbc-aes-chcr",
4176 .cra_blocksize = AES_BLOCK_SIZE,
4177 .cra_priority = CHCR_AEAD_PRIORITY,
4178 .cra_ctxsize = sizeof(struct chcr_context) +
4179 sizeof(struct chcr_aead_ctx) +
4180 sizeof(struct chcr_authenc_ctx),
4181 },
4182 .ivsize = AES_BLOCK_SIZE,
4183 .maxauthsize = SHA224_DIGEST_SIZE,
4184 .setkey = chcr_authenc_setkey,
4185 .setauthsize = chcr_authenc_setauthsize,
4186 }
4187 },
4188 {
4189 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4190 .is_registered = 0,
4191 .alg.aead = {
4192 .base = {
4193 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4194 .cra_driver_name =
4195 "authenc-hmac-sha384-cbc-aes-chcr",
4196 .cra_blocksize = AES_BLOCK_SIZE,
4197 .cra_priority = CHCR_AEAD_PRIORITY,
4198 .cra_ctxsize = sizeof(struct chcr_context) +
4199 sizeof(struct chcr_aead_ctx) +
4200 sizeof(struct chcr_authenc_ctx),
4201
4202 },
4203 .ivsize = AES_BLOCK_SIZE,
4204 .maxauthsize = SHA384_DIGEST_SIZE,
4205 .setkey = chcr_authenc_setkey,
4206 .setauthsize = chcr_authenc_setauthsize,
4207 }
4208 },
4209 {
4210 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4211 .is_registered = 0,
4212 .alg.aead = {
4213 .base = {
4214 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4215 .cra_driver_name =
4216 "authenc-hmac-sha512-cbc-aes-chcr",
4217 .cra_blocksize = AES_BLOCK_SIZE,
4218 .cra_priority = CHCR_AEAD_PRIORITY,
4219 .cra_ctxsize = sizeof(struct chcr_context) +
4220 sizeof(struct chcr_aead_ctx) +
4221 sizeof(struct chcr_authenc_ctx),
4222
4223 },
4224 .ivsize = AES_BLOCK_SIZE,
4225 .maxauthsize = SHA512_DIGEST_SIZE,
4226 .setkey = chcr_authenc_setkey,
4227 .setauthsize = chcr_authenc_setauthsize,
4228 }
4229 },
4230 {
4231 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4232 .is_registered = 0,
4233 .alg.aead = {
4234 .base = {
4235 .cra_name = "authenc(digest_null,cbc(aes))",
4236 .cra_driver_name =
4237 "authenc-digest_null-cbc-aes-chcr",
4238 .cra_blocksize = AES_BLOCK_SIZE,
4239 .cra_priority = CHCR_AEAD_PRIORITY,
4240 .cra_ctxsize = sizeof(struct chcr_context) +
4241 sizeof(struct chcr_aead_ctx) +
4242 sizeof(struct chcr_authenc_ctx),
4243
4244 },
4245 .ivsize = AES_BLOCK_SIZE,
4246 .maxauthsize = 0,
4247 .setkey = chcr_aead_digest_null_setkey,
4248 .setauthsize = chcr_authenc_null_setauthsize,
4249 }
4250 },
4251 {
4252 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4253 .is_registered = 0,
4254 .alg.aead = {
4255 .base = {
4256 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4257 .cra_driver_name =
4258 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4259 .cra_blocksize = 1,
4260 .cra_priority = CHCR_AEAD_PRIORITY,
4261 .cra_ctxsize = sizeof(struct chcr_context) +
4262 sizeof(struct chcr_aead_ctx) +
4263 sizeof(struct chcr_authenc_ctx),
4264
4265 },
4266 .ivsize = CTR_RFC3686_IV_SIZE,
4267 .maxauthsize = SHA1_DIGEST_SIZE,
4268 .setkey = chcr_authenc_setkey,
4269 .setauthsize = chcr_authenc_setauthsize,
4270 }
4271 },
4272 {
4273 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4274 .is_registered = 0,
4275 .alg.aead = {
4276 .base = {
4277
4278 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4279 .cra_driver_name =
4280 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4281 .cra_blocksize = 1,
4282 .cra_priority = CHCR_AEAD_PRIORITY,
4283 .cra_ctxsize = sizeof(struct chcr_context) +
4284 sizeof(struct chcr_aead_ctx) +
4285 sizeof(struct chcr_authenc_ctx),
4286
4287 },
4288 .ivsize = CTR_RFC3686_IV_SIZE,
4289 .maxauthsize = SHA256_DIGEST_SIZE,
4290 .setkey = chcr_authenc_setkey,
4291 .setauthsize = chcr_authenc_setauthsize,
4292 }
4293 },
4294 {
4295 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4296 .is_registered = 0,
4297 .alg.aead = {
4298 .base = {
4299 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4300 .cra_driver_name =
4301 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4302 .cra_blocksize = 1,
4303 .cra_priority = CHCR_AEAD_PRIORITY,
4304 .cra_ctxsize = sizeof(struct chcr_context) +
4305 sizeof(struct chcr_aead_ctx) +
4306 sizeof(struct chcr_authenc_ctx),
4307 },
4308 .ivsize = CTR_RFC3686_IV_SIZE,
4309 .maxauthsize = SHA224_DIGEST_SIZE,
4310 .setkey = chcr_authenc_setkey,
4311 .setauthsize = chcr_authenc_setauthsize,
4312 }
4313 },
4314 {
4315 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4316 .is_registered = 0,
4317 .alg.aead = {
4318 .base = {
4319 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4320 .cra_driver_name =
4321 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4322 .cra_blocksize = 1,
4323 .cra_priority = CHCR_AEAD_PRIORITY,
4324 .cra_ctxsize = sizeof(struct chcr_context) +
4325 sizeof(struct chcr_aead_ctx) +
4326 sizeof(struct chcr_authenc_ctx),
4327
4328 },
4329 .ivsize = CTR_RFC3686_IV_SIZE,
4330 .maxauthsize = SHA384_DIGEST_SIZE,
4331 .setkey = chcr_authenc_setkey,
4332 .setauthsize = chcr_authenc_setauthsize,
4333 }
4334 },
4335 {
4336 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4337 .is_registered = 0,
4338 .alg.aead = {
4339 .base = {
4340 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4341 .cra_driver_name =
4342 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4343 .cra_blocksize = 1,
4344 .cra_priority = CHCR_AEAD_PRIORITY,
4345 .cra_ctxsize = sizeof(struct chcr_context) +
4346 sizeof(struct chcr_aead_ctx) +
4347 sizeof(struct chcr_authenc_ctx),
4348
4349 },
4350 .ivsize = CTR_RFC3686_IV_SIZE,
4351 .maxauthsize = SHA512_DIGEST_SIZE,
4352 .setkey = chcr_authenc_setkey,
4353 .setauthsize = chcr_authenc_setauthsize,
4354 }
4355 },
4356 {
4357 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4358 .is_registered = 0,
4359 .alg.aead = {
4360 .base = {
4361 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4362 .cra_driver_name =
4363 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4364 .cra_blocksize = 1,
4365 .cra_priority = CHCR_AEAD_PRIORITY,
4366 .cra_ctxsize = sizeof(struct chcr_context) +
4367 sizeof(struct chcr_aead_ctx) +
4368 sizeof(struct chcr_authenc_ctx),
4369
4370 },
4371 .ivsize = CTR_RFC3686_IV_SIZE,
4372 .maxauthsize = 0,
4373 .setkey = chcr_aead_digest_null_setkey,
4374 .setauthsize = chcr_authenc_null_setauthsize,
4375 }
4376 },
4377 };
4378
4379 /*
4380 * chcr_unregister_alg - Deregister crypto algorithms with
4381 * kernel framework.
4382 */
chcr_unregister_alg(void)4383 static int chcr_unregister_alg(void)
4384 {
4385 int i;
4386
4387 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4388 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4389 case CRYPTO_ALG_TYPE_SKCIPHER:
4390 if (driver_algs[i].is_registered && refcount_read(
4391 &driver_algs[i].alg.skcipher.base.cra_refcnt)
4392 == 1) {
4393 crypto_unregister_skcipher(
4394 &driver_algs[i].alg.skcipher);
4395 driver_algs[i].is_registered = 0;
4396 }
4397 break;
4398 case CRYPTO_ALG_TYPE_AEAD:
4399 if (driver_algs[i].is_registered && refcount_read(
4400 &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4401 crypto_unregister_aead(
4402 &driver_algs[i].alg.aead);
4403 driver_algs[i].is_registered = 0;
4404 }
4405 break;
4406 case CRYPTO_ALG_TYPE_AHASH:
4407 if (driver_algs[i].is_registered && refcount_read(
4408 &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4409 == 1) {
4410 crypto_unregister_ahash(
4411 &driver_algs[i].alg.hash);
4412 driver_algs[i].is_registered = 0;
4413 }
4414 break;
4415 }
4416 }
4417 return 0;
4418 }
4419
4420 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4421 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4422 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4423
4424 /*
4425 * chcr_register_alg - Register crypto algorithms with kernel framework.
4426 */
chcr_register_alg(void)4427 static int chcr_register_alg(void)
4428 {
4429 struct crypto_alg ai;
4430 struct ahash_alg *a_hash;
4431 int err = 0, i;
4432 char *name = NULL;
4433
4434 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4435 if (driver_algs[i].is_registered)
4436 continue;
4437 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4438 case CRYPTO_ALG_TYPE_SKCIPHER:
4439 driver_algs[i].alg.skcipher.base.cra_priority =
4440 CHCR_CRA_PRIORITY;
4441 driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4442 driver_algs[i].alg.skcipher.base.cra_flags =
4443 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4444 CRYPTO_ALG_ALLOCATES_MEMORY |
4445 CRYPTO_ALG_NEED_FALLBACK;
4446 driver_algs[i].alg.skcipher.base.cra_ctxsize =
4447 sizeof(struct chcr_context) +
4448 sizeof(struct ablk_ctx);
4449 driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4450
4451 err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4452 name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4453 break;
4454 case CRYPTO_ALG_TYPE_AEAD:
4455 driver_algs[i].alg.aead.base.cra_flags =
4456 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4457 CRYPTO_ALG_ALLOCATES_MEMORY;
4458 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4459 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4460 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4461 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4462 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4463 err = crypto_register_aead(&driver_algs[i].alg.aead);
4464 name = driver_algs[i].alg.aead.base.cra_driver_name;
4465 break;
4466 case CRYPTO_ALG_TYPE_AHASH:
4467 a_hash = &driver_algs[i].alg.hash;
4468 a_hash->update = chcr_ahash_update;
4469 a_hash->final = chcr_ahash_final;
4470 a_hash->finup = chcr_ahash_finup;
4471 a_hash->digest = chcr_ahash_digest;
4472 a_hash->export = chcr_ahash_export;
4473 a_hash->import = chcr_ahash_import;
4474 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4475 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4476 a_hash->halg.base.cra_module = THIS_MODULE;
4477 a_hash->halg.base.cra_flags =
4478 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4479 a_hash->halg.base.cra_alignmask = 0;
4480 a_hash->halg.base.cra_exit = NULL;
4481
4482 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4483 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4484 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4485 a_hash->init = chcr_hmac_init;
4486 a_hash->setkey = chcr_ahash_setkey;
4487 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4488 } else {
4489 a_hash->init = chcr_sha_init;
4490 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4491 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4492 }
4493 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4494 ai = driver_algs[i].alg.hash.halg.base;
4495 name = ai.cra_driver_name;
4496 break;
4497 }
4498 if (err) {
4499 pr_err("%s : Algorithm registration failed\n", name);
4500 goto register_err;
4501 } else {
4502 driver_algs[i].is_registered = 1;
4503 }
4504 }
4505 return 0;
4506
4507 register_err:
4508 chcr_unregister_alg();
4509 return err;
4510 }
4511
4512 /*
4513 * start_crypto - Register the crypto algorithms.
4514 * This should called once when the first device comesup. After this
4515 * kernel will start calling driver APIs for crypto operations.
4516 */
start_crypto(void)4517 int start_crypto(void)
4518 {
4519 return chcr_register_alg();
4520 }
4521
4522 /*
4523 * stop_crypto - Deregister all the crypto algorithms with kernel.
4524 * This should be called once when the last device goes down. After this
4525 * kernel will not call the driver API for crypto operations.
4526 */
stop_crypto(void)4527 int stop_crypto(void)
4528 {
4529 chcr_unregister_alg();
4530 return 0;
4531 }
4532