1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Crypto library utility functions
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8 #include <asm/unaligned.h>
9 #include <crypto/algapi.h>
10 #include <linux/module.h>
11
12 /*
13 * XOR @len bytes from @src1 and @src2 together, writing the result to @dst
14 * (which may alias one of the sources). Don't call this directly; call
15 * crypto_xor() or crypto_xor_cpy() instead.
16 */
__crypto_xor(u8 * dst,const u8 * src1,const u8 * src2,unsigned int len)17 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
18 {
19 int relalign = 0;
20
21 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
22 int size = sizeof(unsigned long);
23 int d = (((unsigned long)dst ^ (unsigned long)src1) |
24 ((unsigned long)dst ^ (unsigned long)src2)) &
25 (size - 1);
26
27 relalign = d ? 1 << __ffs(d) : size;
28
29 /*
30 * If we care about alignment, process as many bytes as
31 * needed to advance dst and src to values whose alignments
32 * equal their relative alignment. This will allow us to
33 * process the remainder of the input using optimal strides.
34 */
35 while (((unsigned long)dst & (relalign - 1)) && len > 0) {
36 *dst++ = *src1++ ^ *src2++;
37 len--;
38 }
39 }
40
41 while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
42 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
43 u64 l = get_unaligned((u64 *)src1) ^
44 get_unaligned((u64 *)src2);
45 put_unaligned(l, (u64 *)dst);
46 } else {
47 *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
48 }
49 dst += 8;
50 src1 += 8;
51 src2 += 8;
52 len -= 8;
53 }
54
55 while (len >= 4 && !(relalign & 3)) {
56 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
57 u32 l = get_unaligned((u32 *)src1) ^
58 get_unaligned((u32 *)src2);
59 put_unaligned(l, (u32 *)dst);
60 } else {
61 *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
62 }
63 dst += 4;
64 src1 += 4;
65 src2 += 4;
66 len -= 4;
67 }
68
69 while (len >= 2 && !(relalign & 1)) {
70 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
71 u16 l = get_unaligned((u16 *)src1) ^
72 get_unaligned((u16 *)src2);
73 put_unaligned(l, (u16 *)dst);
74 } else {
75 *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
76 }
77 dst += 2;
78 src1 += 2;
79 src2 += 2;
80 len -= 2;
81 }
82
83 while (len--)
84 *dst++ = *src1++ ^ *src2++;
85 }
86 EXPORT_SYMBOL_GPL(__crypto_xor);
87
88 MODULE_LICENSE("GPL");
89