1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Support for Intel AES-NI instructions. This file contains glue
4  * code, the real AES implementation is in intel-aes_asm.S.
5  *
6  * Copyright (C) 2008, Intel Corp.
7  *    Author: Huang Ying <ying.huang@intel.com>
8  *
9  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
10  * interface for 64-bit kernels.
11  *    Authors: Adrian Hoban <adrian.hoban@intel.com>
12  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
13  *             Tadeusz Struk (tadeusz.struk@intel.com)
14  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
15  *    Copyright (c) 2010, Intel Corporation.
16  */
17 
18 #include <linux/hardirq.h>
19 #include <linux/types.h>
20 #include <linux/module.h>
21 #include <linux/err.h>
22 #include <crypto/algapi.h>
23 #include <crypto/aes.h>
24 #include <crypto/ctr.h>
25 #include <crypto/b128ops.h>
26 #include <crypto/gcm.h>
27 #include <crypto/xts.h>
28 #include <asm/cpu_device_id.h>
29 #include <asm/simd.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/internal/aead.h>
32 #include <crypto/internal/simd.h>
33 #include <crypto/internal/skcipher.h>
34 #include <linux/jump_label.h>
35 #include <linux/workqueue.h>
36 #include <linux/spinlock.h>
37 #include <linux/static_call.h>
38 
39 
40 #define AESNI_ALIGN	16
41 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
42 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
43 #define RFC4106_HASH_SUBKEY_SIZE 16
44 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
45 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
46 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
47 
48 /* This data is stored at the end of the crypto_tfm struct.
49  * It's a type of per "session" data storage location.
50  * This needs to be 16 byte aligned.
51  */
52 struct aesni_rfc4106_gcm_ctx {
53 	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
54 	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
55 	u8 nonce[4];
56 };
57 
58 struct generic_gcmaes_ctx {
59 	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
60 	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
61 };
62 
63 struct aesni_xts_ctx {
64 	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
65 	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
66 };
67 
68 #define GCM_BLOCK_LEN 16
69 
70 struct gcm_context_data {
71 	/* init, update and finalize context data */
72 	u8 aad_hash[GCM_BLOCK_LEN];
73 	u64 aad_length;
74 	u64 in_length;
75 	u8 partial_block_enc_key[GCM_BLOCK_LEN];
76 	u8 orig_IV[GCM_BLOCK_LEN];
77 	u8 current_counter[GCM_BLOCK_LEN];
78 	u64 partial_block_len;
79 	u64 unused;
80 	u8 hash_keys[GCM_BLOCK_LEN * 16];
81 };
82 
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84 			     unsigned int key_len);
85 asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
86 asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
87 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
88 			      const u8 *in, unsigned int len);
89 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
90 			      const u8 *in, unsigned int len);
91 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
92 			      const u8 *in, unsigned int len, u8 *iv);
93 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
94 			      const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
96 				  const u8 *in, unsigned int len, u8 *iv);
97 asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
98 				  const u8 *in, unsigned int len, u8 *iv);
99 
100 #define AVX_GEN2_OPTSIZE 640
101 #define AVX_GEN4_OPTSIZE 4096
102 
103 asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
104 				  const u8 *in, unsigned int len, u8 *iv);
105 
106 asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
107 				  const u8 *in, unsigned int len, u8 *iv);
108 
109 #ifdef CONFIG_X86_64
110 
111 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
112 			      const u8 *in, unsigned int len, u8 *iv);
113 DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc);
114 
115 /* Scatter / Gather routines, with args similar to above */
116 asmlinkage void aesni_gcm_init(void *ctx,
117 			       struct gcm_context_data *gdata,
118 			       u8 *iv,
119 			       u8 *hash_subkey, const u8 *aad,
120 			       unsigned long aad_len);
121 asmlinkage void aesni_gcm_enc_update(void *ctx,
122 				     struct gcm_context_data *gdata, u8 *out,
123 				     const u8 *in, unsigned long plaintext_len);
124 asmlinkage void aesni_gcm_dec_update(void *ctx,
125 				     struct gcm_context_data *gdata, u8 *out,
126 				     const u8 *in,
127 				     unsigned long ciphertext_len);
128 asmlinkage void aesni_gcm_finalize(void *ctx,
129 				   struct gcm_context_data *gdata,
130 				   u8 *auth_tag, unsigned long auth_tag_len);
131 
132 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
133 		void *keys, u8 *out, unsigned int num_bytes);
134 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
135 		void *keys, u8 *out, unsigned int num_bytes);
136 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
137 		void *keys, u8 *out, unsigned int num_bytes);
138 
139 
140 asmlinkage void aes_xctr_enc_128_avx_by8(const u8 *in, const u8 *iv,
141 	const void *keys, u8 *out, unsigned int num_bytes,
142 	unsigned int byte_ctr);
143 
144 asmlinkage void aes_xctr_enc_192_avx_by8(const u8 *in, const u8 *iv,
145 	const void *keys, u8 *out, unsigned int num_bytes,
146 	unsigned int byte_ctr);
147 
148 asmlinkage void aes_xctr_enc_256_avx_by8(const u8 *in, const u8 *iv,
149 	const void *keys, u8 *out, unsigned int num_bytes,
150 	unsigned int byte_ctr);
151 
152 /*
153  * asmlinkage void aesni_gcm_init_avx_gen2()
154  * gcm_data *my_ctx_data, context data
155  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
156  */
157 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
158 					struct gcm_context_data *gdata,
159 					u8 *iv,
160 					u8 *hash_subkey,
161 					const u8 *aad,
162 					unsigned long aad_len);
163 
164 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
165 				     struct gcm_context_data *gdata, u8 *out,
166 				     const u8 *in, unsigned long plaintext_len);
167 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
168 				     struct gcm_context_data *gdata, u8 *out,
169 				     const u8 *in,
170 				     unsigned long ciphertext_len);
171 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
172 				   struct gcm_context_data *gdata,
173 				   u8 *auth_tag, unsigned long auth_tag_len);
174 
175 /*
176  * asmlinkage void aesni_gcm_init_avx_gen4()
177  * gcm_data *my_ctx_data, context data
178  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
179  */
180 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
181 					struct gcm_context_data *gdata,
182 					u8 *iv,
183 					u8 *hash_subkey,
184 					const u8 *aad,
185 					unsigned long aad_len);
186 
187 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
188 				     struct gcm_context_data *gdata, u8 *out,
189 				     const u8 *in, unsigned long plaintext_len);
190 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
191 				     struct gcm_context_data *gdata, u8 *out,
192 				     const u8 *in,
193 				     unsigned long ciphertext_len);
194 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
195 				   struct gcm_context_data *gdata,
196 				   u8 *auth_tag, unsigned long auth_tag_len);
197 
198 static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx);
199 static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
200 
201 static inline struct
aesni_rfc4106_gcm_ctx_get(struct crypto_aead * tfm)202 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
203 {
204 	unsigned long align = AESNI_ALIGN;
205 
206 	if (align <= crypto_tfm_ctx_alignment())
207 		align = 1;
208 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
209 }
210 
211 static inline struct
generic_gcmaes_ctx_get(struct crypto_aead * tfm)212 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
213 {
214 	unsigned long align = AESNI_ALIGN;
215 
216 	if (align <= crypto_tfm_ctx_alignment())
217 		align = 1;
218 	return PTR_ALIGN(crypto_aead_ctx(tfm), align);
219 }
220 #endif
221 
aes_ctx(void * raw_ctx)222 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
223 {
224 	unsigned long addr = (unsigned long)raw_ctx;
225 	unsigned long align = AESNI_ALIGN;
226 
227 	if (align <= crypto_tfm_ctx_alignment())
228 		align = 1;
229 	return (struct crypto_aes_ctx *)ALIGN(addr, align);
230 }
231 
aes_set_key_common(struct crypto_tfm * tfm,void * raw_ctx,const u8 * in_key,unsigned int key_len)232 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
233 			      const u8 *in_key, unsigned int key_len)
234 {
235 	struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
236 	int err;
237 
238 	if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
239 	    key_len != AES_KEYSIZE_256)
240 		return -EINVAL;
241 
242 	if (!crypto_simd_usable())
243 		err = aes_expandkey(ctx, in_key, key_len);
244 	else {
245 		kernel_fpu_begin();
246 		err = aesni_set_key(ctx, in_key, key_len);
247 		kernel_fpu_end();
248 	}
249 
250 	return err;
251 }
252 
aes_set_key(struct crypto_tfm * tfm,const u8 * in_key,unsigned int key_len)253 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
254 		       unsigned int key_len)
255 {
256 	return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
257 }
258 
aesni_encrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)259 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
260 {
261 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
262 
263 	if (!crypto_simd_usable()) {
264 		aes_encrypt(ctx, dst, src);
265 	} else {
266 		kernel_fpu_begin();
267 		aesni_enc(ctx, dst, src);
268 		kernel_fpu_end();
269 	}
270 }
271 
aesni_decrypt(struct crypto_tfm * tfm,u8 * dst,const u8 * src)272 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
273 {
274 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
275 
276 	if (!crypto_simd_usable()) {
277 		aes_decrypt(ctx, dst, src);
278 	} else {
279 		kernel_fpu_begin();
280 		aesni_dec(ctx, dst, src);
281 		kernel_fpu_end();
282 	}
283 }
284 
aesni_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)285 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
286 			         unsigned int len)
287 {
288 	return aes_set_key_common(crypto_skcipher_tfm(tfm),
289 				  crypto_skcipher_ctx(tfm), key, len);
290 }
291 
ecb_encrypt(struct skcipher_request * req)292 static int ecb_encrypt(struct skcipher_request *req)
293 {
294 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
295 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
296 	struct skcipher_walk walk;
297 	unsigned int nbytes;
298 	int err;
299 
300 	err = skcipher_walk_virt(&walk, req, false);
301 
302 	while ((nbytes = walk.nbytes)) {
303 		kernel_fpu_begin();
304 		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
305 			      nbytes & AES_BLOCK_MASK);
306 		kernel_fpu_end();
307 		nbytes &= AES_BLOCK_SIZE - 1;
308 		err = skcipher_walk_done(&walk, nbytes);
309 	}
310 
311 	return err;
312 }
313 
ecb_decrypt(struct skcipher_request * req)314 static int ecb_decrypt(struct skcipher_request *req)
315 {
316 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
317 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
318 	struct skcipher_walk walk;
319 	unsigned int nbytes;
320 	int err;
321 
322 	err = skcipher_walk_virt(&walk, req, false);
323 
324 	while ((nbytes = walk.nbytes)) {
325 		kernel_fpu_begin();
326 		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
327 			      nbytes & AES_BLOCK_MASK);
328 		kernel_fpu_end();
329 		nbytes &= AES_BLOCK_SIZE - 1;
330 		err = skcipher_walk_done(&walk, nbytes);
331 	}
332 
333 	return err;
334 }
335 
cbc_encrypt(struct skcipher_request * req)336 static int cbc_encrypt(struct skcipher_request *req)
337 {
338 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
339 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
340 	struct skcipher_walk walk;
341 	unsigned int nbytes;
342 	int err;
343 
344 	err = skcipher_walk_virt(&walk, req, false);
345 
346 	while ((nbytes = walk.nbytes)) {
347 		kernel_fpu_begin();
348 		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
349 			      nbytes & AES_BLOCK_MASK, walk.iv);
350 		kernel_fpu_end();
351 		nbytes &= AES_BLOCK_SIZE - 1;
352 		err = skcipher_walk_done(&walk, nbytes);
353 	}
354 
355 	return err;
356 }
357 
cbc_decrypt(struct skcipher_request * req)358 static int cbc_decrypt(struct skcipher_request *req)
359 {
360 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
361 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
362 	struct skcipher_walk walk;
363 	unsigned int nbytes;
364 	int err;
365 
366 	err = skcipher_walk_virt(&walk, req, false);
367 
368 	while ((nbytes = walk.nbytes)) {
369 		kernel_fpu_begin();
370 		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
371 			      nbytes & AES_BLOCK_MASK, walk.iv);
372 		kernel_fpu_end();
373 		nbytes &= AES_BLOCK_SIZE - 1;
374 		err = skcipher_walk_done(&walk, nbytes);
375 	}
376 
377 	return err;
378 }
379 
cts_cbc_encrypt(struct skcipher_request * req)380 static int cts_cbc_encrypt(struct skcipher_request *req)
381 {
382 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
383 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
384 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
385 	struct scatterlist *src = req->src, *dst = req->dst;
386 	struct scatterlist sg_src[2], sg_dst[2];
387 	struct skcipher_request subreq;
388 	struct skcipher_walk walk;
389 	int err;
390 
391 	skcipher_request_set_tfm(&subreq, tfm);
392 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
393 				      NULL, NULL);
394 
395 	if (req->cryptlen <= AES_BLOCK_SIZE) {
396 		if (req->cryptlen < AES_BLOCK_SIZE)
397 			return -EINVAL;
398 		cbc_blocks = 1;
399 	}
400 
401 	if (cbc_blocks > 0) {
402 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
403 					   cbc_blocks * AES_BLOCK_SIZE,
404 					   req->iv);
405 
406 		err = cbc_encrypt(&subreq);
407 		if (err)
408 			return err;
409 
410 		if (req->cryptlen == AES_BLOCK_SIZE)
411 			return 0;
412 
413 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
414 		if (req->dst != req->src)
415 			dst = scatterwalk_ffwd(sg_dst, req->dst,
416 					       subreq.cryptlen);
417 	}
418 
419 	/* handle ciphertext stealing */
420 	skcipher_request_set_crypt(&subreq, src, dst,
421 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
422 				   req->iv);
423 
424 	err = skcipher_walk_virt(&walk, &subreq, false);
425 	if (err)
426 		return err;
427 
428 	kernel_fpu_begin();
429 	aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
430 			  walk.nbytes, walk.iv);
431 	kernel_fpu_end();
432 
433 	return skcipher_walk_done(&walk, 0);
434 }
435 
cts_cbc_decrypt(struct skcipher_request * req)436 static int cts_cbc_decrypt(struct skcipher_request *req)
437 {
438 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
439 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
440 	int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
441 	struct scatterlist *src = req->src, *dst = req->dst;
442 	struct scatterlist sg_src[2], sg_dst[2];
443 	struct skcipher_request subreq;
444 	struct skcipher_walk walk;
445 	int err;
446 
447 	skcipher_request_set_tfm(&subreq, tfm);
448 	skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
449 				      NULL, NULL);
450 
451 	if (req->cryptlen <= AES_BLOCK_SIZE) {
452 		if (req->cryptlen < AES_BLOCK_SIZE)
453 			return -EINVAL;
454 		cbc_blocks = 1;
455 	}
456 
457 	if (cbc_blocks > 0) {
458 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
459 					   cbc_blocks * AES_BLOCK_SIZE,
460 					   req->iv);
461 
462 		err = cbc_decrypt(&subreq);
463 		if (err)
464 			return err;
465 
466 		if (req->cryptlen == AES_BLOCK_SIZE)
467 			return 0;
468 
469 		dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
470 		if (req->dst != req->src)
471 			dst = scatterwalk_ffwd(sg_dst, req->dst,
472 					       subreq.cryptlen);
473 	}
474 
475 	/* handle ciphertext stealing */
476 	skcipher_request_set_crypt(&subreq, src, dst,
477 				   req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
478 				   req->iv);
479 
480 	err = skcipher_walk_virt(&walk, &subreq, false);
481 	if (err)
482 		return err;
483 
484 	kernel_fpu_begin();
485 	aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
486 			  walk.nbytes, walk.iv);
487 	kernel_fpu_end();
488 
489 	return skcipher_walk_done(&walk, 0);
490 }
491 
492 #ifdef CONFIG_X86_64
aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx * ctx,u8 * out,const u8 * in,unsigned int len,u8 * iv)493 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
494 			      const u8 *in, unsigned int len, u8 *iv)
495 {
496 	/*
497 	 * based on key length, override with the by8 version
498 	 * of ctr mode encryption/decryption for improved performance
499 	 * aes_set_key_common() ensures that key length is one of
500 	 * {128,192,256}
501 	 */
502 	if (ctx->key_length == AES_KEYSIZE_128)
503 		aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
504 	else if (ctx->key_length == AES_KEYSIZE_192)
505 		aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
506 	else
507 		aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
508 }
509 
ctr_crypt(struct skcipher_request * req)510 static int ctr_crypt(struct skcipher_request *req)
511 {
512 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
513 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
514 	u8 keystream[AES_BLOCK_SIZE];
515 	struct skcipher_walk walk;
516 	unsigned int nbytes;
517 	int err;
518 
519 	err = skcipher_walk_virt(&walk, req, false);
520 
521 	while ((nbytes = walk.nbytes) > 0) {
522 		kernel_fpu_begin();
523 		if (nbytes & AES_BLOCK_MASK)
524 			static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr,
525 						       walk.src.virt.addr,
526 						       nbytes & AES_BLOCK_MASK,
527 						       walk.iv);
528 		nbytes &= ~AES_BLOCK_MASK;
529 
530 		if (walk.nbytes == walk.total && nbytes > 0) {
531 			aesni_enc(ctx, keystream, walk.iv);
532 			crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes,
533 				       walk.src.virt.addr + walk.nbytes - nbytes,
534 				       keystream, nbytes);
535 			crypto_inc(walk.iv, AES_BLOCK_SIZE);
536 			nbytes = 0;
537 		}
538 		kernel_fpu_end();
539 		err = skcipher_walk_done(&walk, nbytes);
540 	}
541 	return err;
542 }
543 
aesni_xctr_enc_avx_tfm(struct crypto_aes_ctx * ctx,u8 * out,const u8 * in,unsigned int len,u8 * iv,unsigned int byte_ctr)544 static void aesni_xctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
545 				   const u8 *in, unsigned int len, u8 *iv,
546 				   unsigned int byte_ctr)
547 {
548 	if (ctx->key_length == AES_KEYSIZE_128)
549 		aes_xctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len,
550 					 byte_ctr);
551 	else if (ctx->key_length == AES_KEYSIZE_192)
552 		aes_xctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len,
553 					 byte_ctr);
554 	else
555 		aes_xctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len,
556 					 byte_ctr);
557 }
558 
xctr_crypt(struct skcipher_request * req)559 static int xctr_crypt(struct skcipher_request *req)
560 {
561 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
562 	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
563 	u8 keystream[AES_BLOCK_SIZE];
564 	struct skcipher_walk walk;
565 	unsigned int nbytes;
566 	unsigned int byte_ctr = 0;
567 	int err;
568 	__le32 block[AES_BLOCK_SIZE / sizeof(__le32)];
569 
570 	err = skcipher_walk_virt(&walk, req, false);
571 
572 	while ((nbytes = walk.nbytes) > 0) {
573 		kernel_fpu_begin();
574 		if (nbytes & AES_BLOCK_MASK)
575 			aesni_xctr_enc_avx_tfm(ctx, walk.dst.virt.addr,
576 				walk.src.virt.addr, nbytes & AES_BLOCK_MASK,
577 				walk.iv, byte_ctr);
578 		nbytes &= ~AES_BLOCK_MASK;
579 		byte_ctr += walk.nbytes - nbytes;
580 
581 		if (walk.nbytes == walk.total && nbytes > 0) {
582 			memcpy(block, walk.iv, AES_BLOCK_SIZE);
583 			block[0] ^= cpu_to_le32(1 + byte_ctr / AES_BLOCK_SIZE);
584 			aesni_enc(ctx, keystream, (u8 *)block);
585 			crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes -
586 				       nbytes, walk.src.virt.addr + walk.nbytes
587 				       - nbytes, keystream, nbytes);
588 			byte_ctr += nbytes;
589 			nbytes = 0;
590 		}
591 		kernel_fpu_end();
592 		err = skcipher_walk_done(&walk, nbytes);
593 	}
594 	return err;
595 }
596 
597 static int
rfc4106_set_hash_subkey(u8 * hash_subkey,const u8 * key,unsigned int key_len)598 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
599 {
600 	struct crypto_aes_ctx ctx;
601 	int ret;
602 
603 	ret = aes_expandkey(&ctx, key, key_len);
604 	if (ret)
605 		return ret;
606 
607 	/* Clear the data in the hash sub key container to zero.*/
608 	/* We want to cipher all zeros to create the hash sub key. */
609 	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
610 
611 	aes_encrypt(&ctx, hash_subkey, hash_subkey);
612 
613 	memzero_explicit(&ctx, sizeof(ctx));
614 	return 0;
615 }
616 
common_rfc4106_set_key(struct crypto_aead * aead,const u8 * key,unsigned int key_len)617 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
618 				  unsigned int key_len)
619 {
620 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
621 
622 	if (key_len < 4)
623 		return -EINVAL;
624 
625 	/*Account for 4 byte nonce at the end.*/
626 	key_len -= 4;
627 
628 	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
629 
630 	return aes_set_key_common(crypto_aead_tfm(aead),
631 				  &ctx->aes_key_expanded, key, key_len) ?:
632 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
633 }
634 
635 /* This is the Integrity Check Value (aka the authentication tag) length and can
636  * be 8, 12 or 16 bytes long. */
common_rfc4106_set_authsize(struct crypto_aead * aead,unsigned int authsize)637 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
638 				       unsigned int authsize)
639 {
640 	switch (authsize) {
641 	case 8:
642 	case 12:
643 	case 16:
644 		break;
645 	default:
646 		return -EINVAL;
647 	}
648 
649 	return 0;
650 }
651 
generic_gcmaes_set_authsize(struct crypto_aead * tfm,unsigned int authsize)652 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
653 				       unsigned int authsize)
654 {
655 	switch (authsize) {
656 	case 4:
657 	case 8:
658 	case 12:
659 	case 13:
660 	case 14:
661 	case 15:
662 	case 16:
663 		break;
664 	default:
665 		return -EINVAL;
666 	}
667 
668 	return 0;
669 }
670 
gcmaes_crypt_by_sg(bool enc,struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx,u8 * auth_tag,unsigned long auth_tag_len)671 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
672 			      unsigned int assoclen, u8 *hash_subkey,
673 			      u8 *iv, void *aes_ctx, u8 *auth_tag,
674 			      unsigned long auth_tag_len)
675 {
676 	u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
677 	struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
678 	unsigned long left = req->cryptlen;
679 	struct scatter_walk assoc_sg_walk;
680 	struct skcipher_walk walk;
681 	bool do_avx, do_avx2;
682 	u8 *assocmem = NULL;
683 	u8 *assoc;
684 	int err;
685 
686 	if (!enc)
687 		left -= auth_tag_len;
688 
689 	do_avx = (left >= AVX_GEN2_OPTSIZE);
690 	do_avx2 = (left >= AVX_GEN4_OPTSIZE);
691 
692 	/* Linearize assoc, if not already linear */
693 	if (req->src->length >= assoclen && req->src->length) {
694 		scatterwalk_start(&assoc_sg_walk, req->src);
695 		assoc = scatterwalk_map(&assoc_sg_walk);
696 	} else {
697 		gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
698 			      GFP_KERNEL : GFP_ATOMIC;
699 
700 		/* assoc can be any length, so must be on heap */
701 		assocmem = kmalloc(assoclen, flags);
702 		if (unlikely(!assocmem))
703 			return -ENOMEM;
704 		assoc = assocmem;
705 
706 		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
707 	}
708 
709 	kernel_fpu_begin();
710 	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
711 		aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc,
712 					assoclen);
713 	else if (static_branch_likely(&gcm_use_avx) && do_avx)
714 		aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc,
715 					assoclen);
716 	else
717 		aesni_gcm_init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
718 	kernel_fpu_end();
719 
720 	if (!assocmem)
721 		scatterwalk_unmap(assoc);
722 	else
723 		kfree(assocmem);
724 
725 	err = enc ? skcipher_walk_aead_encrypt(&walk, req, false)
726 		  : skcipher_walk_aead_decrypt(&walk, req, false);
727 
728 	while (walk.nbytes > 0) {
729 		kernel_fpu_begin();
730 		if (static_branch_likely(&gcm_use_avx2) && do_avx2) {
731 			if (enc)
732 				aesni_gcm_enc_update_avx_gen4(aes_ctx, data,
733 							      walk.dst.virt.addr,
734 							      walk.src.virt.addr,
735 							      walk.nbytes);
736 			else
737 				aesni_gcm_dec_update_avx_gen4(aes_ctx, data,
738 							      walk.dst.virt.addr,
739 							      walk.src.virt.addr,
740 							      walk.nbytes);
741 		} else if (static_branch_likely(&gcm_use_avx) && do_avx) {
742 			if (enc)
743 				aesni_gcm_enc_update_avx_gen2(aes_ctx, data,
744 							      walk.dst.virt.addr,
745 							      walk.src.virt.addr,
746 							      walk.nbytes);
747 			else
748 				aesni_gcm_dec_update_avx_gen2(aes_ctx, data,
749 							      walk.dst.virt.addr,
750 							      walk.src.virt.addr,
751 							      walk.nbytes);
752 		} else if (enc) {
753 			aesni_gcm_enc_update(aes_ctx, data, walk.dst.virt.addr,
754 					     walk.src.virt.addr, walk.nbytes);
755 		} else {
756 			aesni_gcm_dec_update(aes_ctx, data, walk.dst.virt.addr,
757 					     walk.src.virt.addr, walk.nbytes);
758 		}
759 		kernel_fpu_end();
760 
761 		err = skcipher_walk_done(&walk, 0);
762 	}
763 
764 	if (err)
765 		return err;
766 
767 	kernel_fpu_begin();
768 	if (static_branch_likely(&gcm_use_avx2) && do_avx2)
769 		aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag,
770 					    auth_tag_len);
771 	else if (static_branch_likely(&gcm_use_avx) && do_avx)
772 		aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag,
773 					    auth_tag_len);
774 	else
775 		aesni_gcm_finalize(aes_ctx, data, auth_tag, auth_tag_len);
776 	kernel_fpu_end();
777 
778 	return 0;
779 }
780 
gcmaes_encrypt(struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx)781 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
782 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
783 {
784 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
785 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
786 	u8 auth_tag[16];
787 	int err;
788 
789 	err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx,
790 				 auth_tag, auth_tag_len);
791 	if (err)
792 		return err;
793 
794 	scatterwalk_map_and_copy(auth_tag, req->dst,
795 				 req->assoclen + req->cryptlen,
796 				 auth_tag_len, 1);
797 	return 0;
798 }
799 
gcmaes_decrypt(struct aead_request * req,unsigned int assoclen,u8 * hash_subkey,u8 * iv,void * aes_ctx)800 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
801 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
802 {
803 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
804 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
805 	u8 auth_tag_msg[16];
806 	u8 auth_tag[16];
807 	int err;
808 
809 	err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx,
810 				 auth_tag, auth_tag_len);
811 	if (err)
812 		return err;
813 
814 	/* Copy out original auth_tag */
815 	scatterwalk_map_and_copy(auth_tag_msg, req->src,
816 				 req->assoclen + req->cryptlen - auth_tag_len,
817 				 auth_tag_len, 0);
818 
819 	/* Compare generated tag with passed in tag. */
820 	if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) {
821 		memzero_explicit(auth_tag, sizeof(auth_tag));
822 		return -EBADMSG;
823 	}
824 	return 0;
825 }
826 
helper_rfc4106_encrypt(struct aead_request * req)827 static int helper_rfc4106_encrypt(struct aead_request *req)
828 {
829 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
830 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
831 	void *aes_ctx = &(ctx->aes_key_expanded);
832 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
833 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
834 	unsigned int i;
835 	__be32 counter = cpu_to_be32(1);
836 
837 	/* Assuming we are supporting rfc4106 64-bit extended */
838 	/* sequence numbers We need to have the AAD length equal */
839 	/* to 16 or 20 bytes */
840 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
841 		return -EINVAL;
842 
843 	/* IV below built */
844 	for (i = 0; i < 4; i++)
845 		*(iv+i) = ctx->nonce[i];
846 	for (i = 0; i < 8; i++)
847 		*(iv+4+i) = req->iv[i];
848 	*((__be32 *)(iv+12)) = counter;
849 
850 	return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
851 			      aes_ctx);
852 }
853 
helper_rfc4106_decrypt(struct aead_request * req)854 static int helper_rfc4106_decrypt(struct aead_request *req)
855 {
856 	__be32 counter = cpu_to_be32(1);
857 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
858 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
859 	void *aes_ctx = &(ctx->aes_key_expanded);
860 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
861 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
862 	unsigned int i;
863 
864 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
865 		return -EINVAL;
866 
867 	/* Assuming we are supporting rfc4106 64-bit extended */
868 	/* sequence numbers We need to have the AAD length */
869 	/* equal to 16 or 20 bytes */
870 
871 	/* IV below built */
872 	for (i = 0; i < 4; i++)
873 		*(iv+i) = ctx->nonce[i];
874 	for (i = 0; i < 8; i++)
875 		*(iv+4+i) = req->iv[i];
876 	*((__be32 *)(iv+12)) = counter;
877 
878 	return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
879 			      aes_ctx);
880 }
881 #endif
882 
xts_aesni_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)883 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
884 			    unsigned int keylen)
885 {
886 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
887 	int err;
888 
889 	err = xts_verify_key(tfm, key, keylen);
890 	if (err)
891 		return err;
892 
893 	keylen /= 2;
894 
895 	/* first half of xts-key is for crypt */
896 	err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
897 				 key, keylen);
898 	if (err)
899 		return err;
900 
901 	/* second half of xts-key is for tweak */
902 	return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
903 				  key + keylen, keylen);
904 }
905 
xts_crypt(struct skcipher_request * req,bool encrypt)906 static int xts_crypt(struct skcipher_request *req, bool encrypt)
907 {
908 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
909 	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
910 	int tail = req->cryptlen % AES_BLOCK_SIZE;
911 	struct skcipher_request subreq;
912 	struct skcipher_walk walk;
913 	int err;
914 
915 	if (req->cryptlen < AES_BLOCK_SIZE)
916 		return -EINVAL;
917 
918 	err = skcipher_walk_virt(&walk, req, false);
919 	if (!walk.nbytes)
920 		return err;
921 
922 	if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
923 		int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
924 
925 		skcipher_walk_abort(&walk);
926 
927 		skcipher_request_set_tfm(&subreq, tfm);
928 		skcipher_request_set_callback(&subreq,
929 					      skcipher_request_flags(req),
930 					      NULL, NULL);
931 		skcipher_request_set_crypt(&subreq, req->src, req->dst,
932 					   blocks * AES_BLOCK_SIZE, req->iv);
933 		req = &subreq;
934 
935 		err = skcipher_walk_virt(&walk, req, false);
936 		if (!walk.nbytes)
937 			return err;
938 	} else {
939 		tail = 0;
940 	}
941 
942 	kernel_fpu_begin();
943 
944 	/* calculate first value of T */
945 	aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
946 
947 	while (walk.nbytes > 0) {
948 		int nbytes = walk.nbytes;
949 
950 		if (nbytes < walk.total)
951 			nbytes &= ~(AES_BLOCK_SIZE - 1);
952 
953 		if (encrypt)
954 			aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
955 					  walk.dst.virt.addr, walk.src.virt.addr,
956 					  nbytes, walk.iv);
957 		else
958 			aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
959 					  walk.dst.virt.addr, walk.src.virt.addr,
960 					  nbytes, walk.iv);
961 		kernel_fpu_end();
962 
963 		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
964 
965 		if (walk.nbytes > 0)
966 			kernel_fpu_begin();
967 	}
968 
969 	if (unlikely(tail > 0 && !err)) {
970 		struct scatterlist sg_src[2], sg_dst[2];
971 		struct scatterlist *src, *dst;
972 
973 		dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
974 		if (req->dst != req->src)
975 			dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
976 
977 		skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
978 					   req->iv);
979 
980 		err = skcipher_walk_virt(&walk, &subreq, false);
981 		if (err)
982 			return err;
983 
984 		kernel_fpu_begin();
985 		if (encrypt)
986 			aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
987 					  walk.dst.virt.addr, walk.src.virt.addr,
988 					  walk.nbytes, walk.iv);
989 		else
990 			aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
991 					  walk.dst.virt.addr, walk.src.virt.addr,
992 					  walk.nbytes, walk.iv);
993 		kernel_fpu_end();
994 
995 		err = skcipher_walk_done(&walk, 0);
996 	}
997 	return err;
998 }
999 
xts_encrypt(struct skcipher_request * req)1000 static int xts_encrypt(struct skcipher_request *req)
1001 {
1002 	return xts_crypt(req, true);
1003 }
1004 
xts_decrypt(struct skcipher_request * req)1005 static int xts_decrypt(struct skcipher_request *req)
1006 {
1007 	return xts_crypt(req, false);
1008 }
1009 
1010 static struct crypto_alg aesni_cipher_alg = {
1011 	.cra_name		= "aes",
1012 	.cra_driver_name	= "aes-aesni",
1013 	.cra_priority		= 300,
1014 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
1015 	.cra_blocksize		= AES_BLOCK_SIZE,
1016 	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1017 	.cra_module		= THIS_MODULE,
1018 	.cra_u	= {
1019 		.cipher	= {
1020 			.cia_min_keysize	= AES_MIN_KEY_SIZE,
1021 			.cia_max_keysize	= AES_MAX_KEY_SIZE,
1022 			.cia_setkey		= aes_set_key,
1023 			.cia_encrypt		= aesni_encrypt,
1024 			.cia_decrypt		= aesni_decrypt
1025 		}
1026 	}
1027 };
1028 
1029 static struct skcipher_alg aesni_skciphers[] = {
1030 	{
1031 		.base = {
1032 			.cra_name		= "__ecb(aes)",
1033 			.cra_driver_name	= "__ecb-aes-aesni",
1034 			.cra_priority		= 400,
1035 			.cra_flags		= CRYPTO_ALG_INTERNAL,
1036 			.cra_blocksize		= AES_BLOCK_SIZE,
1037 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1038 			.cra_module		= THIS_MODULE,
1039 		},
1040 		.min_keysize	= AES_MIN_KEY_SIZE,
1041 		.max_keysize	= AES_MAX_KEY_SIZE,
1042 		.setkey		= aesni_skcipher_setkey,
1043 		.encrypt	= ecb_encrypt,
1044 		.decrypt	= ecb_decrypt,
1045 	}, {
1046 		.base = {
1047 			.cra_name		= "__cbc(aes)",
1048 			.cra_driver_name	= "__cbc-aes-aesni",
1049 			.cra_priority		= 400,
1050 			.cra_flags		= CRYPTO_ALG_INTERNAL,
1051 			.cra_blocksize		= AES_BLOCK_SIZE,
1052 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1053 			.cra_module		= THIS_MODULE,
1054 		},
1055 		.min_keysize	= AES_MIN_KEY_SIZE,
1056 		.max_keysize	= AES_MAX_KEY_SIZE,
1057 		.ivsize		= AES_BLOCK_SIZE,
1058 		.setkey		= aesni_skcipher_setkey,
1059 		.encrypt	= cbc_encrypt,
1060 		.decrypt	= cbc_decrypt,
1061 	}, {
1062 		.base = {
1063 			.cra_name		= "__cts(cbc(aes))",
1064 			.cra_driver_name	= "__cts-cbc-aes-aesni",
1065 			.cra_priority		= 400,
1066 			.cra_flags		= CRYPTO_ALG_INTERNAL,
1067 			.cra_blocksize		= AES_BLOCK_SIZE,
1068 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1069 			.cra_module		= THIS_MODULE,
1070 		},
1071 		.min_keysize	= AES_MIN_KEY_SIZE,
1072 		.max_keysize	= AES_MAX_KEY_SIZE,
1073 		.ivsize		= AES_BLOCK_SIZE,
1074 		.walksize	= 2 * AES_BLOCK_SIZE,
1075 		.setkey		= aesni_skcipher_setkey,
1076 		.encrypt	= cts_cbc_encrypt,
1077 		.decrypt	= cts_cbc_decrypt,
1078 #ifdef CONFIG_X86_64
1079 	}, {
1080 		.base = {
1081 			.cra_name		= "__ctr(aes)",
1082 			.cra_driver_name	= "__ctr-aes-aesni",
1083 			.cra_priority		= 400,
1084 			.cra_flags		= CRYPTO_ALG_INTERNAL,
1085 			.cra_blocksize		= 1,
1086 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1087 			.cra_module		= THIS_MODULE,
1088 		},
1089 		.min_keysize	= AES_MIN_KEY_SIZE,
1090 		.max_keysize	= AES_MAX_KEY_SIZE,
1091 		.ivsize		= AES_BLOCK_SIZE,
1092 		.chunksize	= AES_BLOCK_SIZE,
1093 		.setkey		= aesni_skcipher_setkey,
1094 		.encrypt	= ctr_crypt,
1095 		.decrypt	= ctr_crypt,
1096 #endif
1097 	}, {
1098 		.base = {
1099 			.cra_name		= "__xts(aes)",
1100 			.cra_driver_name	= "__xts-aes-aesni",
1101 			.cra_priority		= 401,
1102 			.cra_flags		= CRYPTO_ALG_INTERNAL,
1103 			.cra_blocksize		= AES_BLOCK_SIZE,
1104 			.cra_ctxsize		= XTS_AES_CTX_SIZE,
1105 			.cra_module		= THIS_MODULE,
1106 		},
1107 		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1108 		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1109 		.ivsize		= AES_BLOCK_SIZE,
1110 		.walksize	= 2 * AES_BLOCK_SIZE,
1111 		.setkey		= xts_aesni_setkey,
1112 		.encrypt	= xts_encrypt,
1113 		.decrypt	= xts_decrypt,
1114 	}
1115 };
1116 
1117 static
1118 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1119 
1120 #ifdef CONFIG_X86_64
1121 /*
1122  * XCTR does not have a non-AVX implementation, so it must be enabled
1123  * conditionally.
1124  */
1125 static struct skcipher_alg aesni_xctr = {
1126 	.base = {
1127 		.cra_name		= "__xctr(aes)",
1128 		.cra_driver_name	= "__xctr-aes-aesni",
1129 		.cra_priority		= 400,
1130 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1131 		.cra_blocksize		= 1,
1132 		.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
1133 		.cra_module		= THIS_MODULE,
1134 	},
1135 	.min_keysize	= AES_MIN_KEY_SIZE,
1136 	.max_keysize	= AES_MAX_KEY_SIZE,
1137 	.ivsize		= AES_BLOCK_SIZE,
1138 	.chunksize	= AES_BLOCK_SIZE,
1139 	.setkey		= aesni_skcipher_setkey,
1140 	.encrypt	= xctr_crypt,
1141 	.decrypt	= xctr_crypt,
1142 };
1143 
1144 static struct simd_skcipher_alg *aesni_simd_xctr;
1145 #endif /* CONFIG_X86_64 */
1146 
1147 #ifdef CONFIG_X86_64
generic_gcmaes_set_key(struct crypto_aead * aead,const u8 * key,unsigned int key_len)1148 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1149 				  unsigned int key_len)
1150 {
1151 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1152 
1153 	return aes_set_key_common(crypto_aead_tfm(aead),
1154 				  &ctx->aes_key_expanded, key, key_len) ?:
1155 	       rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1156 }
1157 
generic_gcmaes_encrypt(struct aead_request * req)1158 static int generic_gcmaes_encrypt(struct aead_request *req)
1159 {
1160 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1161 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1162 	void *aes_ctx = &(ctx->aes_key_expanded);
1163 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1164 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1165 	__be32 counter = cpu_to_be32(1);
1166 
1167 	memcpy(iv, req->iv, 12);
1168 	*((__be32 *)(iv+12)) = counter;
1169 
1170 	return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1171 			      aes_ctx);
1172 }
1173 
generic_gcmaes_decrypt(struct aead_request * req)1174 static int generic_gcmaes_decrypt(struct aead_request *req)
1175 {
1176 	__be32 counter = cpu_to_be32(1);
1177 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1178 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1179 	void *aes_ctx = &(ctx->aes_key_expanded);
1180 	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1181 	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1182 
1183 	memcpy(iv, req->iv, 12);
1184 	*((__be32 *)(iv+12)) = counter;
1185 
1186 	return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1187 			      aes_ctx);
1188 }
1189 
1190 static struct aead_alg aesni_aeads[] = { {
1191 	.setkey			= common_rfc4106_set_key,
1192 	.setauthsize		= common_rfc4106_set_authsize,
1193 	.encrypt		= helper_rfc4106_encrypt,
1194 	.decrypt		= helper_rfc4106_decrypt,
1195 	.ivsize			= GCM_RFC4106_IV_SIZE,
1196 	.maxauthsize		= 16,
1197 	.base = {
1198 		.cra_name		= "__rfc4106(gcm(aes))",
1199 		.cra_driver_name	= "__rfc4106-gcm-aesni",
1200 		.cra_priority		= 400,
1201 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1202 		.cra_blocksize		= 1,
1203 		.cra_ctxsize		= sizeof(struct aesni_rfc4106_gcm_ctx),
1204 		.cra_alignmask		= 0,
1205 		.cra_module		= THIS_MODULE,
1206 	},
1207 }, {
1208 	.setkey			= generic_gcmaes_set_key,
1209 	.setauthsize		= generic_gcmaes_set_authsize,
1210 	.encrypt		= generic_gcmaes_encrypt,
1211 	.decrypt		= generic_gcmaes_decrypt,
1212 	.ivsize			= GCM_AES_IV_SIZE,
1213 	.maxauthsize		= 16,
1214 	.base = {
1215 		.cra_name		= "__gcm(aes)",
1216 		.cra_driver_name	= "__generic-gcm-aesni",
1217 		.cra_priority		= 400,
1218 		.cra_flags		= CRYPTO_ALG_INTERNAL,
1219 		.cra_blocksize		= 1,
1220 		.cra_ctxsize		= sizeof(struct generic_gcmaes_ctx),
1221 		.cra_alignmask		= 0,
1222 		.cra_module		= THIS_MODULE,
1223 	},
1224 } };
1225 #else
1226 static struct aead_alg aesni_aeads[0];
1227 #endif
1228 
1229 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1230 
1231 static const struct x86_cpu_id aesni_cpu_id[] = {
1232 	X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
1233 	{}
1234 };
1235 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1236 
aesni_init(void)1237 static int __init aesni_init(void)
1238 {
1239 	int err;
1240 
1241 	if (!x86_match_cpu(aesni_cpu_id))
1242 		return -ENODEV;
1243 #ifdef CONFIG_X86_64
1244 	if (boot_cpu_has(X86_FEATURE_AVX2)) {
1245 		pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1246 		static_branch_enable(&gcm_use_avx);
1247 		static_branch_enable(&gcm_use_avx2);
1248 	} else
1249 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1250 		pr_info("AVX version of gcm_enc/dec engaged.\n");
1251 		static_branch_enable(&gcm_use_avx);
1252 	} else {
1253 		pr_info("SSE version of gcm_enc/dec engaged.\n");
1254 	}
1255 	if (boot_cpu_has(X86_FEATURE_AVX)) {
1256 		/* optimize performance of ctr mode encryption transform */
1257 		static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm);
1258 		pr_info("AES CTR mode by8 optimization enabled\n");
1259 	}
1260 #endif /* CONFIG_X86_64 */
1261 
1262 	err = crypto_register_alg(&aesni_cipher_alg);
1263 	if (err)
1264 		return err;
1265 
1266 	err = simd_register_skciphers_compat(aesni_skciphers,
1267 					     ARRAY_SIZE(aesni_skciphers),
1268 					     aesni_simd_skciphers);
1269 	if (err)
1270 		goto unregister_cipher;
1271 
1272 	err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1273 					 aesni_simd_aeads);
1274 	if (err)
1275 		goto unregister_skciphers;
1276 
1277 #ifdef CONFIG_X86_64
1278 	if (boot_cpu_has(X86_FEATURE_AVX))
1279 		err = simd_register_skciphers_compat(&aesni_xctr, 1,
1280 						     &aesni_simd_xctr);
1281 	if (err)
1282 		goto unregister_aeads;
1283 #endif /* CONFIG_X86_64 */
1284 
1285 	return 0;
1286 
1287 #ifdef CONFIG_X86_64
1288 unregister_aeads:
1289 	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1290 				aesni_simd_aeads);
1291 #endif /* CONFIG_X86_64 */
1292 
1293 unregister_skciphers:
1294 	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1295 				  aesni_simd_skciphers);
1296 unregister_cipher:
1297 	crypto_unregister_alg(&aesni_cipher_alg);
1298 	return err;
1299 }
1300 
aesni_exit(void)1301 static void __exit aesni_exit(void)
1302 {
1303 	simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1304 			      aesni_simd_aeads);
1305 	simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1306 				  aesni_simd_skciphers);
1307 	crypto_unregister_alg(&aesni_cipher_alg);
1308 #ifdef CONFIG_X86_64
1309 	if (boot_cpu_has(X86_FEATURE_AVX))
1310 		simd_unregister_skciphers(&aesni_xctr, 1, &aesni_simd_xctr);
1311 #endif /* CONFIG_X86_64 */
1312 }
1313 
1314 late_initcall(aesni_init);
1315 module_exit(aesni_exit);
1316 
1317 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1318 MODULE_LICENSE("GPL");
1319 MODULE_ALIAS_CRYPTO("aes");
1320