1 #ifndef _S390_CHECKSUM_H
2 #define _S390_CHECKSUM_H
3
4 /*
5 * include/asm-s390/checksum.h
6 * S390 fast network checksum routines
7 * see also arch/S390/lib/checksum.c
8 *
9 * S390 version
10 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
11 * Author(s): Ulrich Hild (first version)
12 * Martin Schwidefsky (heavily optimized CKSM version)
13 * D.J. Barrow (third attempt)
14 */
15
16 #include <asm/uaccess.h>
17
18 /*
19 * computes the checksum of a memory block at buff, length len,
20 * and adds in "sum" (32-bit)
21 *
22 * returns a 32-bit number suitable for feeding into itself
23 * or csum_tcpudp_magic
24 *
25 * this function must be called with even lengths, except
26 * for the last fragment, which may be odd
27 *
28 * it's best to have buff aligned on a 32-bit boundary
29 */
30 unsigned int
31 csum_partial(const unsigned char * buff, int len, unsigned int sum);
32
33 /*
34 * csum_partial as an inline function
35 */
36 extern inline unsigned int
csum_partial_inline(const unsigned char * buff,int len,unsigned int sum)37 csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
38 {
39 __asm__ __volatile__ (
40 " lgr 2,%1\n" /* address in gpr 2 */
41 " lgfr 3,%2\n" /* length in gpr 3 */
42 "0: cksm %0,2\n" /* do checksum on longs */
43 " jo 0b\n"
44 : "+&d" (sum)
45 : "d" (buff), "d" (len)
46 : "cc", "2", "3" );
47 return sum;
48 }
49
50 /*
51 * the same as csum_partial, but copies from src while it
52 * checksums
53 *
54 * here even more important to align src and dst on a 32-bit (or even
55 * better 64-bit) boundary
56 */
57
58 extern inline unsigned int
csum_partial_copy(const char * src,char * dst,int len,unsigned int sum)59 csum_partial_copy(const char *src, char *dst, int len,unsigned int sum)
60 {
61 memcpy(dst,src,len);
62 return csum_partial_inline(dst, len, sum);
63 }
64
65 /*
66 * the same as csum_partial_copy, but copies from user space.
67 *
68 * here even more important to align src and dst on a 32-bit (or even
69 * better 64-bit) boundary
70 *
71 * Copy from userspace and compute checksum. If we catch an exception
72 * then zero the rest of the buffer.
73 */
74 extern inline unsigned int
csum_partial_copy_from_user(const char * src,char * dst,int len,unsigned int sum,int * err_ptr)75 csum_partial_copy_from_user (const char *src, char *dst,
76 int len, unsigned int sum,
77 int *err_ptr)
78 {
79 int missing;
80
81 missing = copy_from_user(dst, src, len);
82 if (missing) {
83 memset(dst + len - missing, 0, missing);
84 *err_ptr = -EFAULT;
85 }
86
87 return csum_partial(dst, len, sum);
88 }
89
90 extern inline unsigned int
csum_partial_copy_nocheck(const char * src,char * dst,int len,unsigned int sum)91 csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
92 {
93 memcpy(dst,src,len);
94 return csum_partial_inline(dst, len, sum);
95 }
96
97 /*
98 * Fold a partial checksum without adding pseudo headers
99 */
100 extern inline unsigned short
csum_fold(unsigned int sum)101 csum_fold(unsigned int sum)
102 {
103 __asm__ __volatile__ (
104 " sr 3,3\n" /* %0 = H*65536 + L */
105 " lr 2,%0\n" /* %0 = H L, R2/R3 = H L / 0 0 */
106 " srdl 2,16\n" /* %0 = H L, R2/R3 = 0 H / L 0 */
107 " alr 2,3\n" /* %0 = H L, R2/R3 = L H / L 0 */
108 " alr %0,2\n" /* %0 = H+L+C L+H */
109 " srl %0,16\n" /* %0 = H+L+C */
110 : "+&d" (sum) : : "cc", "2", "3");
111 return ((unsigned short) ~sum);
112 }
113
114 /*
115 * This is a version of ip_compute_csum() optimized for IP headers,
116 * which always checksum on 4 octet boundaries.
117 *
118 */
119 extern inline unsigned short
ip_fast_csum(unsigned char * iph,unsigned int ihl)120 ip_fast_csum(unsigned char *iph, unsigned int ihl)
121 {
122 unsigned long sum;
123
124 __asm__ __volatile__ (
125 " slgr %0,%0\n" /* set sum to zero */
126 " lgr 2,%1\n" /* address in gpr 2 */
127 " lgfr 3,%2\n" /* length in gpr 3 */
128 "0: cksm %0,2\n" /* do checksum on ints */
129 " jo 0b\n"
130 : "=&d" (sum)
131 : "d" (iph), "d" (ihl*4)
132 : "cc", "2", "3" );
133 return csum_fold(sum);
134 }
135
136 /*
137 * computes the checksum of the TCP/UDP pseudo-header
138 * returns a 32-bit checksum
139 */
140 extern inline unsigned int
csum_tcpudp_nofold(unsigned long saddr,unsigned long daddr,unsigned short len,unsigned short proto,unsigned int sum)141 csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
142 unsigned short len, unsigned short proto,
143 unsigned int sum)
144 {
145 __asm__ __volatile__ (
146 " lgfr %0,%0\n"
147 " algr %0,%1\n" /* sum += saddr */
148 " brc 12,0f\n"
149 " aghi %0,1\n" /* add carry */
150 "0: algr %0,%2\n" /* sum += daddr */
151 " brc 12,1f\n"
152 " aghi %0,1\n" /* add carry */
153 "1: algfr %0,%3\n" /* sum += (len<<16) + proto */
154 " brc 12,2f\n"
155 " aghi %0,1\n" /* add carry */
156 "2: srlg 0,%0,32\n"
157 " alr %0,0\n" /* fold to 32 bits */
158 " brc 12,3f\n"
159 " ahi %0,1\n" /* add carry */
160 "3: llgfr %0,%0"
161 : "+&d" (sum)
162 : "d" (saddr), "d" (daddr),
163 "d" (((unsigned int) len<<16) + (unsigned int) proto)
164 : "cc", "0" );
165 return sum;
166 }
167
168 /*
169 * computes the checksum of the TCP/UDP pseudo-header
170 * returns a 16-bit checksum, already complemented
171 */
172
173 extern inline unsigned short int
csum_tcpudp_magic(unsigned long saddr,unsigned long daddr,unsigned short len,unsigned short proto,unsigned int sum)174 csum_tcpudp_magic(unsigned long saddr, unsigned long daddr,
175 unsigned short len, unsigned short proto,
176 unsigned int sum)
177 {
178 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
179 }
180
181 /*
182 * this routine is used for miscellaneous IP-like checksums, mainly
183 * in icmp.c
184 */
185
186 extern inline unsigned short
ip_compute_csum(unsigned char * buff,int len)187 ip_compute_csum(unsigned char * buff, int len)
188 {
189 return csum_fold(csum_partial_inline(buff, len, 0));
190 }
191
192 #endif /* _S390_CHECKSUM_H */
193
194
195