Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifdef __KERNEL__ |
2 | #ifndef _ASM_M32R_CHECKSUM_H | |
3 | #define _ASM_M32R_CHECKSUM_H | |
4 | ||
5 | /* | |
6 | * include/asm-m32r/checksum.h | |
7 | * | |
8 | * IP/TCP/UDP checksum routines | |
9 | * | |
10 | * This file is subject to the terms and conditions of the GNU General Public | |
11 | * License. See the file "COPYING" in the main directory of this archive | |
12 | * for more details. | |
13 | * | |
14 | * Some code taken from mips and parisc architecture. | |
15 | * | |
16 | * Copyright (C) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata | |
17 | * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org> | |
18 | */ | |
19 | ||
20 | #include <linux/in6.h> | |
21 | ||
22 | /* | |
23 | * computes the checksum of a memory block at buff, length len, | |
24 | * and adds in "sum" (32-bit) | |
25 | * | |
26 | * returns a 32-bit number suitable for feeding into itself | |
27 | * or csum_tcpudp_magic | |
28 | * | |
29 | * this function must be called with even lengths, except | |
30 | * for the last fragment, which may be odd | |
31 | * | |
32 | * it's best to have buff aligned on a 32-bit boundary | |
33 | */ | |
34 | asmlinkage unsigned int csum_partial(const unsigned char *buff, | |
35 | int len, unsigned int sum); | |
36 | ||
37 | /* | |
38 | * The same as csum_partial, but copies from src while it checksums. | |
39 | * | |
40 | * Here even more important to align src and dst on a 32-bit (or even | |
41 | * better 64-bit) boundary | |
42 | */ | |
43 | extern unsigned int csum_partial_copy_nocheck(const unsigned char *src, | |
44 | unsigned char *dst, | |
45 | int len, unsigned int sum); | |
46 | ||
47 | /* | |
48 | * This is a new version of the above that records errors it finds in *errp, | |
49 | * but continues and zeros thre rest of the buffer. | |
50 | */ | |
51 | extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, | |
52 | unsigned char *dst, | |
53 | int len, unsigned int sum, | |
54 | int *err_ptr); | |
55 | ||
56 | /* | |
57 | * Fold a partial checksum | |
58 | */ | |
59 | ||
60 | static inline unsigned int csum_fold(unsigned int sum) | |
61 | { | |
62 | unsigned long tmpreg; | |
63 | __asm__( | |
64 | " sll3 %1, %0, #16 \n" | |
65 | " cmp %0, %0 \n" | |
66 | " addx %0, %1 \n" | |
67 | " ldi %1, #0 \n" | |
68 | " srli %0, #16 \n" | |
69 | " addx %0, %1 \n" | |
70 | " xor3 %0, %0, #0x0000ffff \n" | |
71 | : "=r" (sum), "=&r" (tmpreg) | |
72 | : "0" (sum) | |
73 | : "cbit" | |
74 | ); | |
75 | return sum; | |
76 | } | |
77 | ||
78 | /* | |
79 | * This is a version of ip_compute_csum() optimized for IP headers, | |
80 | * which always checksum on 4 octet boundaries. | |
81 | */ | |
82 | static inline unsigned short ip_fast_csum(unsigned char * iph, | |
83 | unsigned int ihl) { | |
84 | unsigned long sum, tmpreg0, tmpreg1; | |
85 | ||
86 | __asm__ __volatile__( | |
87 | " ld %0, @%1+ \n" | |
88 | " addi %2, #-4 \n" | |
89 | "# bgez %2, 2f \n" | |
90 | " cmp %0, %0 \n" | |
91 | " ld %3, @%1+ \n" | |
92 | " ld %4, @%1+ \n" | |
93 | " addx %0, %3 \n" | |
94 | " ld %3, @%1+ \n" | |
95 | " addx %0, %4 \n" | |
96 | " addx %0, %3 \n" | |
97 | " .fillinsn\n" | |
98 | "1: \n" | |
99 | " ld %4, @%1+ \n" | |
100 | " addi %2, #-1 \n" | |
101 | " addx %0, %4 \n" | |
102 | " bgtz %2, 1b \n" | |
103 | "\n" | |
104 | " ldi %3, #0 \n" | |
105 | " addx %0, %3 \n" | |
106 | " .fillinsn\n" | |
107 | "2: \n" | |
108 | /* Since the input registers which are loaded with iph and ipl | |
109 | are modified, we must also specify them as outputs, or gcc | |
110 | will assume they contain their original values. */ | |
111 | : "=&r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmpreg0), "=&r" (tmpreg1) | |
112 | : "1" (iph), "2" (ihl) | |
113 | : "cbit", "memory"); | |
114 | ||
115 | return csum_fold(sum); | |
116 | } | |
117 | ||
118 | static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, | |
119 | unsigned long daddr, | |
120 | unsigned short len, | |
121 | unsigned short proto, | |
122 | unsigned int sum) | |
123 | { | |
124 | #if defined(__LITTLE_ENDIAN) | |
125 | unsigned long len_proto = (ntohs(len)<<16)+proto*256; | |
126 | #else | |
127 | unsigned long len_proto = (proto<<16)+len; | |
128 | #endif | |
129 | unsigned long tmpreg; | |
130 | ||
131 | __asm__( | |
132 | " cmp %0, %0 \n" | |
133 | " addx %0, %2 \n" | |
134 | " addx %0, %3 \n" | |
135 | " addx %0, %4 \n" | |
136 | " ldi %1, #0 \n" | |
137 | " addx %0, %1 \n" | |
138 | : "=r" (sum), "=&r" (tmpreg) | |
139 | : "r" (daddr), "r" (saddr), "r" (len_proto), "0" (sum) | |
140 | : "cbit" | |
141 | ); | |
142 | ||
143 | return sum; | |
144 | } | |
145 | ||
146 | /* | |
147 | * computes the checksum of the TCP/UDP pseudo-header | |
148 | * returns a 16-bit checksum, already complemented | |
149 | */ | |
150 | static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | |
151 | unsigned long daddr, | |
152 | unsigned short len, | |
153 | unsigned short proto, | |
154 | unsigned int sum) | |
155 | { | |
156 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | |
157 | } | |
158 | ||
159 | /* | |
160 | * this routine is used for miscellaneous IP-like checksums, mainly | |
161 | * in icmp.c | |
162 | */ | |
163 | ||
164 | static inline unsigned short ip_compute_csum(unsigned char * buff, int len) { | |
165 | return csum_fold (csum_partial(buff, len, 0)); | |
166 | } | |
167 | ||
168 | #define _HAVE_ARCH_IPV6_CSUM | |
169 | static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr, | |
170 | struct in6_addr *daddr, | |
171 | __u16 len, | |
172 | unsigned short proto, | |
173 | unsigned int sum) | |
174 | { | |
175 | unsigned long tmpreg0, tmpreg1, tmpreg2, tmpreg3; | |
176 | __asm__( | |
177 | " ld %1, @(%5) \n" | |
178 | " ld %2, @(4,%5) \n" | |
179 | " ld %3, @(8,%5) \n" | |
180 | " ld %4, @(12,%5) \n" | |
181 | " add %0, %1 \n" | |
182 | " addx %0, %2 \n" | |
183 | " addx %0, %3 \n" | |
184 | " addx %0, %4 \n" | |
185 | " ld %1, @(%6) \n" | |
186 | " ld %2, @(4,%6) \n" | |
187 | " ld %3, @(8,%6) \n" | |
188 | " ld %4, @(12,%6) \n" | |
189 | " addx %0, %1 \n" | |
190 | " addx %0, %2 \n" | |
191 | " addx %0, %3 \n" | |
192 | " addx %0, %4 \n" | |
193 | " addx %0, %7 \n" | |
194 | " addx %0, %8 \n" | |
195 | " ldi %1, #0 \n" | |
196 | " addx %0, %1 \n" | |
197 | : "=&r" (sum), "=&r" (tmpreg0), "=&r" (tmpreg1), | |
198 | "=&r" (tmpreg2), "=&r" (tmpreg3) | |
199 | : "r" (saddr), "r" (daddr), | |
200 | "r" (htonl((__u32) (len))), "r" (htonl(proto)), "0" (sum) | |
201 | : "cbit" | |
202 | ); | |
203 | ||
204 | return csum_fold(sum); | |
205 | } | |
206 | ||
207 | #endif /* _ASM_M32R_CHECKSUM_H */ | |
208 | #endif /* __KERNEL__ */ |