1 /*
2 * linux/include/asm-arm/checksum.h
3 *
4 * IP checksum routines
5 *
6 * Copyright (C) Original authors of ../asm-i386/checksum.h
7 * Copyright (C) 1996-1999 Russell King
8 */
9 #ifndef __ASM_ARM_CHECKSUM_H
10 #define __ASM_ARM_CHECKSUM_H
11
12 /*
13 * computes the checksum of a memory block at buff, length len,
14 * and adds in "sum" (32-bit)
15 *
16 * returns a 32-bit number suitable for feeding into itself
17 * or csum_tcpudp_magic
18 *
19 * this function must be called with even lengths, except
20 * for the last fragment, which may be odd
21 *
22 * it's best to have buff aligned on a 32-bit boundary
23 */
24 unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
25
26 /*
27 * the same as csum_partial, but copies from src while it
28 * checksums, and handles user-space pointer exceptions correctly, when needed.
29 *
30 * here even more important to align src and dst on a 32-bit (or even
31 * better 64-bit) boundary
32 */
33
34 unsigned int
35 csum_partial_copy_nocheck(const char *src, char *dst, int len, int sum);
36
37 unsigned int
38 csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *err_ptr);
39
40 /*
41 * These are the old (and unsafe) way of doing checksums, a warning message will be
42 * printed if they are used and an exception occurs.
43 *
44 * these functions should go away after some time.
45 */
46 #define csum_partial_copy(src,dst,len,sum) csum_partial_copy_nocheck(src,dst,len,sum)
47
48 /*
49 * This is a version of ip_compute_csum() optimized for IP headers,
50 * which always checksum on 4 octet boundaries.
51 */
52 static inline unsigned short
ip_fast_csum(unsigned char * iph,unsigned int ihl)53 ip_fast_csum(unsigned char * iph, unsigned int ihl)
54 {
55 unsigned int sum, tmp1;
56
57 __asm__ __volatile__(
58 "ldr %0, [%1], #4 @ ip_fast_csum \n\
59 ldr %3, [%1], #4 \n\
60 sub %2, %2, #5 \n\
61 adds %0, %0, %3 \n\
62 ldr %3, [%1], #4 \n\
63 adcs %0, %0, %3 \n\
64 ldr %3, [%1], #4 \n\
65 1: adcs %0, %0, %3 \n\
66 ldr %3, [%1], #4 \n\
67 tst %2, #15 @ do this carefully \n\
68 subne %2, %2, #1 @ without destroying \n\
69 bne 1b @ the carry flag \n\
70 adcs %0, %0, %3 \n\
71 adc %0, %0, #0 \n\
72 adds %0, %0, %0, lsl #16 \n\
73 addcs %0, %0, #0x10000 \n\
74 mvn %0, %0 \n\
75 mov %0, %0, lsr #16"
76 : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
77 : "1" (iph), "2" (ihl)
78 : "cc");
79 return sum;
80 }
81
82 /*
83 * Fold a partial checksum without adding pseudo headers
84 */
85 static inline unsigned int
csum_fold(unsigned int sum)86 csum_fold(unsigned int sum)
87 {
88 __asm__(
89 "adds %0, %1, %1, lsl #16 @ csum_fold \n\
90 addcs %0, %0, #0x10000"
91 : "=r" (sum)
92 : "r" (sum)
93 : "cc");
94 return (~sum) >> 16;
95 }
96
97 static inline unsigned int
csum_tcpudp_nofold(unsigned long saddr,unsigned long daddr,unsigned short len,unsigned int proto,unsigned int sum)98 csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
99 unsigned int proto, unsigned int sum)
100 {
101 __asm__(
102 "adds %0, %1, %2 @ csum_tcpudp_nofold \n\
103 adcs %0, %0, %3 \n\
104 adcs %0, %0, %4 \n\
105 adcs %0, %0, %5 \n\
106 adc %0, %0, #0"
107 : "=&r"(sum)
108 : "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len) << 16), "Ir" (proto << 8)
109 : "cc");
110 return sum;
111 }
112 /*
113 * computes the checksum of the TCP/UDP pseudo-header
114 * returns a 16-bit checksum, already complemented
115 */
116 static inline unsigned short int
csum_tcpudp_magic(unsigned long saddr,unsigned long daddr,unsigned short len,unsigned int proto,unsigned int sum)117 csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
118 unsigned int proto, unsigned int sum)
119 {
120 __asm__(
121 "adds %0, %1, %2 @ csum_tcpudp_magic \n\
122 adcs %0, %0, %3 \n\
123 adcs %0, %0, %4 \n\
124 adcs %0, %0, %5 \n\
125 adc %0, %0, #0 \n\
126 adds %0, %0, %0, lsl #16 \n\
127 addcs %0, %0, #0x10000 \n\
128 mvn %0, %0"
129 : "=&r"(sum)
130 : "r" (sum), "r" (daddr), "r" (saddr), "r" (ntohs(len)), "Ir" (proto << 8)
131 : "cc");
132 return sum >> 16;
133 }
134
135
136 /*
137 * this routine is used for miscellaneous IP-like checksums, mainly
138 * in icmp.c
139 */
140 static inline unsigned short
ip_compute_csum(unsigned char * buff,int len)141 ip_compute_csum(unsigned char * buff, int len)
142 {
143 return csum_fold(csum_partial(buff, len, 0));
144 }
145
146 #define _HAVE_ARCH_IPV6_CSUM
147 extern unsigned long
148 __csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len,
149 __u32 proto, unsigned int sum);
150
151 static inline unsigned short int
csum_ipv6_magic(struct in6_addr * saddr,struct in6_addr * daddr,__u32 len,unsigned short proto,unsigned int sum)152 csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u32 len,
153 unsigned short proto, unsigned int sum)
154 {
155 return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
156 htonl(proto), sum));
157 }
158 #endif
159