1 /* TODO: csum_tcpudp_magic could be speeded up, and csum_fold as well */
2 
3 #ifndef _CRIS_CHECKSUM_H
4 #define _CRIS_CHECKSUM_H
5 
6 /*
7  * computes the checksum of a memory block at buff, length len,
8  * and adds in "sum" (32-bit)
9  *
10  * returns a 32-bit number suitable for feeding into itself
11  * or csum_tcpudp_magic
12  *
13  * this function must be called with even lengths, except
14  * for the last fragment, which may be odd
15  *
16  * it's best to have buff aligned on a 32-bit boundary
17  */
18 unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
19 
20 /*
21  * the same as csum_partial, but copies from src while it
22  * checksums
23  *
24  * here even more important to align src and dst on a 32-bit (or even
25  * better 64-bit) boundary
26  */
27 
28 unsigned int csum_partial_copy_nocheck(const char *src, char *dst,
29 				       int len, unsigned int sum);
30 
31 /*
32  *	Fold a partial checksum into a word
33  */
34 
csum_fold(unsigned int sum)35 extern inline unsigned int csum_fold(unsigned int sum)
36 {
37 	/* the while loop is unnecessary really, it's always enough with two
38 	   iterations */
39 
40 	while(sum >> 16)
41 		sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
42 
43 	return ~sum;
44 }
45 
46 /* Checksum some values used in TCP/UDP headers.
47  *
48  * The gain by doing this in asm is that C will not generate carry-additions
49  * for the 32-bit components of the checksum, so otherwise we would have had
50  * to split all of those into 16-bit components, then add.
51  */
52 
53 extern inline unsigned int
csum_tcpudp_nofold(unsigned long saddr,unsigned long daddr,unsigned short len,unsigned short proto,unsigned int sum)54 csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
55 		   unsigned short proto, unsigned int sum)
56 {
57 	int res;
58 	__asm__ ("add.d %2, %0\n\t"
59 		 "ax\n\t"
60 		 "add.d %3, %0\n\t"
61 		 "ax\n\t"
62 		 "add.d %4, %0\n\t"
63 		 "ax\n\t"
64 		 "addq 0, %0\n"
65 	: "=r" (res)
66 	: "0" (sum), "r" (daddr), "r" (saddr), "r" ((ntohs(len) << 16) + (proto << 8)));
67 
68 	return res;
69 }
70 
71 extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
72 						int len, unsigned int sum,
73 						int *errptr);
74 
75 /*
76  *	This is a version of ip_compute_csum() optimized for IP headers,
77  *	which always checksum on 4 octet boundaries.
78  *
79  */
80 
ip_fast_csum(unsigned char * iph,unsigned int ihl)81 extern inline unsigned short ip_fast_csum(unsigned char * iph,
82 					  unsigned int ihl)
83 {
84 	return csum_fold(csum_partial(iph, ihl * 4, 0));
85 }
86 
87 /*
88  * computes the checksum of the TCP/UDP pseudo-header
89  * returns a 16-bit checksum, already complemented
90  */
91 
csum_tcpudp_magic(unsigned long saddr,unsigned long daddr,unsigned short len,unsigned short proto,unsigned int sum)92 extern inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
93 						   unsigned long daddr,
94 						   unsigned short len,
95 						   unsigned short proto,
96 						   unsigned int sum)
97 {
98 	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
99 }
100 
101 /*
102  * this routine is used for miscellaneous IP-like checksums, mainly
103  * in icmp.c
104  */
105 
ip_compute_csum(unsigned char * buff,int len)106 extern inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
107 	return csum_fold (csum_partial(buff, len, 0));
108 }
109 
110 #endif
111