1 #ifndef __ASM_SH_CHECKSUM_H
2 #define __ASM_SH_CHECKSUM_H
3
4 /*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka
10 */
11
12 #include <linux/config.h>
13
14 /*
15 * computes the checksum of a memory block at buff, length len,
16 * and adds in "sum" (32-bit)
17 *
18 * returns a 32-bit number suitable for feeding into itself
19 * or csum_tcpudp_magic
20 *
21 * this function must be called with even lengths, except
22 * for the last fragment, which may be odd
23 *
24 * it's best to have buff aligned on a 32-bit boundary
25 */
26 asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
27
28 /*
29 * the same as csum_partial, but copies from src while it
30 * checksums, and handles user-space pointer exceptions correctly, when needed.
31 *
32 * here even more important to align src and dst on a 32-bit (or even
33 * better 64-bit) boundary
34 */
35
36 asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum,
37 int *src_err_ptr, int *dst_err_ptr);
38
39 /*
40 * Note: when you get a NULL pointer exception here this means someone
41 * passed in an incorrect kernel address to one of these functions.
42 *
43 * If you use these functions directly please don't forget the
44 * verify_area().
45 */
46 static __inline__
csum_partial_copy_nocheck(const char * src,char * dst,int len,int sum)47 unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
48 int len, int sum)
49 {
50 return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
51 }
52
53 static __inline__
csum_partial_copy_from_user(const char * src,char * dst,int len,int sum,int * err_ptr)54 unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
55 int len, int sum, int *err_ptr)
56 {
57 return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL);
58 }
59
60 /*
61 * These are the old (and unsafe) way of doing checksums, a warning message will be
62 * printed if they are used and an exeption occurs.
63 *
64 * these functions should go away after some time.
65 */
66
67 #define csum_partial_copy_fromuser csum_partial_copy
68 unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
69
70 /*
71 * Fold a partial checksum
72 */
73
csum_fold(unsigned int sum)74 static __inline__ unsigned int csum_fold(unsigned int sum)
75 {
76 unsigned int __dummy;
77 __asm__("swap.w %0, %1\n\t"
78 "extu.w %0, %0\n\t"
79 "extu.w %1, %1\n\t"
80 "add %1, %0\n\t"
81 "swap.w %0, %1\n\t"
82 "add %1, %0\n\t"
83 "not %0, %0\n\t"
84 : "=r" (sum), "=&r" (__dummy)
85 : "0" (sum)
86 : "t");
87 return sum;
88 }
89
90 /*
91 * This is a version of ip_compute_csum() optimized for IP headers,
92 * which always checksum on 4 octet boundaries.
93 *
94 * i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
95 * for linux by * Arnt Gulbrandsen.
96 */
ip_fast_csum(unsigned char * iph,unsigned int ihl)97 static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
98 {
99 unsigned int sum, __dummy0, __dummy1;
100
101 __asm__ __volatile__(
102 "mov.l @%1+, %0\n\t"
103 "mov.l @%1+, %3\n\t"
104 "add #-2, %2\n\t"
105 "clrt\n\t"
106 "1:\t"
107 "addc %3, %0\n\t"
108 "movt %4\n\t"
109 "mov.l @%1+, %3\n\t"
110 "dt %2\n\t"
111 "bf/s 1b\n\t"
112 " cmp/eq #1, %4\n\t"
113 "addc %3, %0\n\t"
114 "addc %2, %0" /* Here %2 is 0, add carry-bit */
115 /* Since the input registers which are loaded with iph and ihl
116 are modified, we must also specify them as outputs, or gcc
117 will assume they contain their original values. */
118 : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1)
119 : "1" (iph), "2" (ihl)
120 : "t");
121
122 return csum_fold(sum);
123 }
124
csum_tcpudp_nofold(unsigned long saddr,unsigned long daddr,unsigned short len,unsigned short proto,unsigned int sum)125 static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr,
126 unsigned long daddr,
127 unsigned short len,
128 unsigned short proto,
129 unsigned int sum)
130 {
131 #ifdef __LITTLE_ENDIAN__
132 unsigned long len_proto = (ntohs(len)<<16)+proto*256;
133 #else
134 unsigned long len_proto = (proto<<16)+len;
135 #endif
136 __asm__("clrt\n\t"
137 "addc %0, %1\n\t"
138 "addc %2, %1\n\t"
139 "addc %3, %1\n\t"
140 "movt %0\n\t"
141 "add %1, %0"
142 : "=r" (sum), "=r" (len_proto)
143 : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
144 : "t");
145 return sum;
146 }
147
148 /*
149 * computes the checksum of the TCP/UDP pseudo-header
150 * returns a 16-bit checksum, already complemented
151 */
csum_tcpudp_magic(unsigned long saddr,unsigned long daddr,unsigned short len,unsigned short proto,unsigned int sum)152 static __inline__ unsigned short int csum_tcpudp_magic(unsigned long saddr,
153 unsigned long daddr,
154 unsigned short len,
155 unsigned short proto,
156 unsigned int sum)
157 {
158 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
159 }
160
161 /*
162 * this routine is used for miscellaneous IP-like checksums, mainly
163 * in icmp.c
164 */
165
ip_compute_csum(unsigned char * buff,int len)166 static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len)
167 {
168 return csum_fold (csum_partial(buff, len, 0));
169 }
170
171 #define _HAVE_ARCH_IPV6_CSUM
csum_ipv6_magic(struct in6_addr * saddr,struct in6_addr * daddr,__u32 len,unsigned short proto,unsigned int sum)172 static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
173 struct in6_addr *daddr,
174 __u32 len,
175 unsigned short proto,
176 unsigned int sum)
177 {
178 unsigned int __dummy;
179 __asm__("clrt\n\t"
180 "mov.l @(0,%2), %1\n\t"
181 "addc %1, %0\n\t"
182 "mov.l @(4,%2), %1\n\t"
183 "addc %1, %0\n\t"
184 "mov.l @(8,%2), %1\n\t"
185 "addc %1, %0\n\t"
186 "mov.l @(12,%2), %1\n\t"
187 "addc %1, %0\n\t"
188 "mov.l @(0,%3), %1\n\t"
189 "addc %1, %0\n\t"
190 "mov.l @(4,%3), %1\n\t"
191 "addc %1, %0\n\t"
192 "mov.l @(8,%3), %1\n\t"
193 "addc %1, %0\n\t"
194 "mov.l @(12,%3), %1\n\t"
195 "addc %1, %0\n\t"
196 "addc %4, %0\n\t"
197 "addc %5, %0\n\t"
198 "movt %1\n\t"
199 "add %1, %0\n"
200 : "=r" (sum), "=&r" (__dummy)
201 : "r" (saddr), "r" (daddr),
202 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
203 : "t");
204
205 return csum_fold(sum);
206 }
207
208 /*
209 * Copy and checksum to user
210 */
211 #define HAVE_CSUM_COPY_USER
csum_and_copy_to_user(const char * src,char * dst,int len,int sum,int * err_ptr)212 static __inline__ unsigned int csum_and_copy_to_user (const char *src, char *dst,
213 int len, int sum, int *err_ptr)
214 {
215 if (access_ok(VERIFY_WRITE, dst, len))
216 return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
217
218 if (len)
219 *err_ptr = -EFAULT;
220
221 return -1; /* invalid checksum */
222 }
223 #endif /* __ASM_SH_CHECKSUM_H */
224