1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Checksumming functions for IP, TCP, UDP and so on
8 *
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Borrows very liberally from tcp.c and ip.c, see those
12 * files for more names.
13 */
14
15 #ifndef _CHECKSUM_H
16 #define _CHECKSUM_H
17
18 #include <linux/errno.h>
19 #include <asm/types.h>
20 #include <asm/byteorder.h>
21 #include <linux/uaccess.h>
22 #include <asm/checksum.h>
23
24 #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
25 static __always_inline
csum_and_copy_from_user(const void __user * src,void * dst,int len)26 __wsum csum_and_copy_from_user (const void __user *src, void *dst,
27 int len)
28 {
29 if (copy_from_user(dst, src, len))
30 return 0;
31 return csum_partial(dst, len, ~0U);
32 }
33 #endif
34
35 #ifndef HAVE_CSUM_COPY_USER
csum_and_copy_to_user(const void * src,void __user * dst,int len)36 static __always_inline __wsum csum_and_copy_to_user
37 (const void *src, void __user *dst, int len)
38 {
39 __wsum sum = csum_partial(src, len, ~0U);
40
41 if (copy_to_user(dst, src, len) == 0)
42 return sum;
43 return 0;
44 }
45 #endif
46
47 #ifndef _HAVE_ARCH_CSUM_AND_COPY
48 static __always_inline __wsum
csum_partial_copy_nocheck(const void * src,void * dst,int len)49 csum_partial_copy_nocheck(const void *src, void *dst, int len)
50 {
51 memcpy(dst, src, len);
52 return csum_partial(dst, len, 0);
53 }
54 #endif
55
56 #ifndef HAVE_ARCH_CSUM_ADD
csum_add(__wsum csum,__wsum addend)57 static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
58 {
59 u32 res = (__force u32)csum;
60 res += (__force u32)addend;
61 return (__force __wsum)(res + (res < (__force u32)addend));
62 }
63 #endif
64
csum_sub(__wsum csum,__wsum addend)65 static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
66 {
67 return csum_add(csum, ~addend);
68 }
69
csum16_add(__sum16 csum,__be16 addend)70 static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
71 {
72 u16 res = (__force u16)csum;
73
74 res += (__force u16)addend;
75 return (__force __sum16)(res + (res < (__force u16)addend));
76 }
77
csum16_sub(__sum16 csum,__be16 addend)78 static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
79 {
80 return csum16_add(csum, ~addend);
81 }
82
83 #ifndef HAVE_ARCH_CSUM_SHIFT
csum_shift(__wsum sum,int offset)84 static __always_inline __wsum csum_shift(__wsum sum, int offset)
85 {
86 /* rotate sum to align it with a 16b boundary */
87 if (offset & 1)
88 return (__force __wsum)ror32((__force u32)sum, 8);
89 return sum;
90 }
91 #endif
92
93 static __always_inline __wsum
csum_block_add(__wsum csum,__wsum csum2,int offset)94 csum_block_add(__wsum csum, __wsum csum2, int offset)
95 {
96 return csum_add(csum, csum_shift(csum2, offset));
97 }
98
99 static __always_inline __wsum
csum_block_add_ext(__wsum csum,__wsum csum2,int offset,int len)100 csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
101 {
102 return csum_block_add(csum, csum2, offset);
103 }
104
105 static __always_inline __wsum
csum_block_sub(__wsum csum,__wsum csum2,int offset)106 csum_block_sub(__wsum csum, __wsum csum2, int offset)
107 {
108 return csum_block_add(csum, ~csum2, offset);
109 }
110
csum_unfold(__sum16 n)111 static __always_inline __wsum csum_unfold(__sum16 n)
112 {
113 return (__force __wsum)n;
114 }
115
116 static __always_inline
csum_partial_ext(const void * buff,int len,__wsum sum)117 __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
118 {
119 return csum_partial(buff, len, sum);
120 }
121
122 #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
123
csum_replace_by_diff(__sum16 * sum,__wsum diff)124 static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
125 {
126 *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
127 }
128
csum_replace4(__sum16 * sum,__be32 from,__be32 to)129 static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
130 {
131 __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
132
133 *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
134 }
135
136 /* Implements RFC 1624 (Incremental Internet Checksum)
137 * 3. Discussion states :
138 * HC' = ~(~HC + ~m + m')
139 * m : old value of a 16bit field
140 * m' : new value of a 16bit field
141 */
csum_replace2(__sum16 * sum,__be16 old,__be16 new)142 static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
143 {
144 *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
145 }
146
csum_replace(__wsum * csum,__wsum old,__wsum new)147 static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
148 {
149 *csum = csum_add(csum_sub(*csum, old), new);
150 }
151
152 struct sk_buff;
153 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
154 __be32 from, __be32 to, bool pseudohdr);
155 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
156 const __be32 *from, const __be32 *to,
157 bool pseudohdr);
158 void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
159 __wsum diff, bool pseudohdr);
160
161 static __always_inline
inet_proto_csum_replace2(__sum16 * sum,struct sk_buff * skb,__be16 from,__be16 to,bool pseudohdr)162 void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
163 __be16 from, __be16 to, bool pseudohdr)
164 {
165 inet_proto_csum_replace4(sum, skb, (__force __be32)from,
166 (__force __be32)to, pseudohdr);
167 }
168
remcsum_adjust(void * ptr,__wsum csum,int start,int offset)169 static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
170 int start, int offset)
171 {
172 __sum16 *psum = (__sum16 *)(ptr + offset);
173 __wsum delta;
174
175 /* Subtract out checksum up to start */
176 csum = csum_sub(csum, csum_partial(ptr, start, 0));
177
178 /* Set derived checksum in packet */
179 delta = csum_sub((__force __wsum)csum_fold(csum),
180 (__force __wsum)*psum);
181 *psum = csum_fold(csum);
182
183 return delta;
184 }
185
remcsum_unadjust(__sum16 * psum,__wsum delta)186 static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
187 {
188 *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
189 }
190
wsum_negate(__wsum val)191 static __always_inline __wsum wsum_negate(__wsum val)
192 {
193 return (__force __wsum)-((__force u32)val);
194 }
195 #endif
196