1 #ifndef _S390_CHECKSUM_H
2 #define _S390_CHECKSUM_H
3
4 /*
5 * include/asm-s390/checksum.h
6 * S390 fast network checksum routines
7 * see also arch/S390/lib/checksum.c
8 *
9 * S390 version
10 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
11 * Author(s): Ulrich Hild (first version)
12 * Martin Schwidefsky (heavily optimized CKSM version)
13 * D.J. Barrow (third attempt)
14 */
15
16 #include <asm/uaccess.h>
17
18 /*
19 * computes the checksum of a memory block at buff, length len,
20 * and adds in "sum" (32-bit)
21 *
22 * returns a 32-bit number suitable for feeding into itself
23 * or csum_tcpudp_magic
24 *
25 * this function must be called with even lengths, except
26 * for the last fragment, which may be odd
27 *
28 * it's best to have buff aligned on a 32-bit boundary
29 */
30 unsigned int
31 csum_partial(const unsigned char * buff, int len, unsigned int sum);
32
33 /*
34 * csum_partial as an inline function
35 */
36 extern inline unsigned int
csum_partial_inline(const unsigned char * buff,int len,unsigned int sum)37 csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
38 {
39 register_pair rp;
40
41 rp.subreg.even = (unsigned long) buff;
42 rp.subreg.odd = (unsigned long) len;
43 __asm__ __volatile__ (
44 "0: cksm %0,%1\n" /* do checksum on longs */
45 " jo 0b\n"
46 : "+&d" (sum), "+&a" (rp) : : "cc" );
47 return sum;
48 }
49
50 /*
51 * the same as csum_partial, but copies from src while it
52 * checksums
53 *
54 * here even more important to align src and dst on a 32-bit (or even
55 * better 64-bit) boundary
56 */
57
58 extern inline unsigned int
csum_partial_copy(const char * src,char * dst,int len,unsigned int sum)59 csum_partial_copy(const char *src, char *dst, int len,unsigned int sum)
60 {
61 memcpy(dst,src,len);
62 return csum_partial_inline(dst, len, sum);
63 }
64
65 /*
66 * the same as csum_partial_copy, but copies from user space.
67 *
68 * here even more important to align src and dst on a 32-bit (or even
69 * better 64-bit) boundary
70 *
71 * Copy from userspace and compute checksum. If we catch an exception
72 * then zero the rest of the buffer.
73 */
74 extern inline unsigned int
csum_partial_copy_from_user(const char * src,char * dst,int len,unsigned int sum,int * err_ptr)75 csum_partial_copy_from_user (const char *src, char *dst,
76 int len, unsigned int sum,
77 int *err_ptr)
78 {
79 int missing;
80
81 missing = copy_from_user(dst, src, len);
82 if (missing) {
83 memset(dst + len - missing, 0, missing);
84 *err_ptr = -EFAULT;
85 }
86
87 return csum_partial(dst, len, sum);
88 }
89
90
91 extern inline unsigned int
csum_partial_copy_nocheck(const char * src,char * dst,int len,unsigned int sum)92 csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
93 {
94 memcpy(dst,src,len);
95 return csum_partial_inline(dst, len, sum);
96 }
97
98 /*
99 * Fold a partial checksum without adding pseudo headers
100 */
101 #if 1
102 unsigned short csum_fold(unsigned int sum);
103 #else
104 extern inline unsigned short
csum_fold(unsigned int sum)105 csum_fold(unsigned int sum)
106 {
107 register_pair rp;
108
109 __asm__ __volatile__ (
110 " slr %N1,%N1\n" /* %0 = H L */
111 " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */
112 " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */
113 " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */
114 " alr %0,%1\n" /* %0 = H+L+C L+H */
115 " srl %0,16\n" /* %0 = H+L+C */
116 : "+&d" (sum), "=d" (rp) : : "cc" );
117 return ((unsigned short) ~sum);
118 }
119 #endif
120
121 /*
122 * This is a version of ip_compute_csum() optimized for IP headers,
123 * which always checksum on 4 octet boundaries.
124 *
125 */
126 extern inline unsigned short
ip_fast_csum(unsigned char * iph,unsigned int ihl)127 ip_fast_csum(unsigned char *iph, unsigned int ihl)
128 {
129 register_pair rp;
130 unsigned long sum;
131
132 rp.subreg.even = (unsigned long) iph;
133 rp.subreg.odd = (unsigned long) ihl*4;
134 __asm__ __volatile__ (
135 " sr %0,%0\n" /* set sum to zero */
136 "0: cksm %0,%1\n" /* do checksum on longs */
137 " jo 0b\n"
138 : "=&d" (sum), "+&a" (rp) : : "cc" );
139 return csum_fold(sum);
140 }
141
142 /*
143 * computes the checksum of the TCP/UDP pseudo-header
144 * returns a 32-bit checksum
145 */
146 extern inline unsigned int
csum_tcpudp_nofold(unsigned long saddr,unsigned long daddr,unsigned short len,unsigned short proto,unsigned int sum)147 csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
148 unsigned short len, unsigned short proto,
149 unsigned int sum)
150 {
151 __asm__ __volatile__ (
152 " alr %0,%1\n" /* sum += saddr */
153 " brc 12,0f\n"
154 " ahi %0,1\n" /* add carry */
155 "0:"
156 : "+&d" (sum) : "d" (saddr) : "cc" );
157 __asm__ __volatile__ (
158 " alr %0,%1\n" /* sum += daddr */
159 " brc 12,1f\n"
160 " ahi %0,1\n" /* add carry */
161 "1:"
162 : "+&d" (sum) : "d" (daddr) : "cc" );
163 __asm__ __volatile__ (
164 " alr %0,%1\n" /* sum += (len<<16) + (proto<<8) */
165 " brc 12,2f\n"
166 " ahi %0,1\n" /* add carry */
167 "2:"
168 : "+&d" (sum)
169 : "d" (((unsigned int) len<<16) + (unsigned int) proto)
170 : "cc" );
171 return sum;
172 }
173
174 /*
175 * computes the checksum of the TCP/UDP pseudo-header
176 * returns a 16-bit checksum, already complemented
177 */
178
179 extern inline unsigned short int
csum_tcpudp_magic(unsigned long saddr,unsigned long daddr,unsigned short len,unsigned short proto,unsigned int sum)180 csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
181 unsigned short len, unsigned short proto,
182 unsigned int sum)
183 {
184 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
185 }
186
187 /*
188 * this routine is used for miscellaneous IP-like checksums, mainly
189 * in icmp.c
190 */
191
192 extern inline unsigned short
ip_compute_csum(unsigned char * buff,int len)193 ip_compute_csum(unsigned char * buff, int len)
194 {
195 return csum_fold(csum_partial(buff, len, 0));
196 }
197
198 #endif /* _S390_CHECKSUM_H */
199
200
201