Merge branch 'bonding-overflow'
[linux-2.6-block.git] / include / net / checksum.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
1da177e4
LT
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Checksumming functions for IP, TCP, UDP and so on
8 *
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Borrows very liberally from tcp.c and ip.c, see those
12 * files for more names.
1da177e4
LT
13 */
14
15#ifndef _CHECKSUM_H
16#define _CHECKSUM_H
17
18#include <linux/errno.h>
19#include <asm/types.h>
20#include <asm/byteorder.h>
1da177e4 21#include <asm/checksum.h>
68f4eae7
JK
22#if !defined(_HAVE_ARCH_COPY_AND_CSUM_FROM_USER) || !defined(HAVE_CSUM_COPY_USER)
23#include <linux/uaccess.h>
24#endif
1da177e4
LT
25
26#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
5486f5bf 27static __always_inline
56649d5d 28__wsum csum_and_copy_from_user (const void __user *src, void *dst,
c693cc46 29 int len)
1da177e4 30{
5904122c 31 if (copy_from_user(dst, src, len))
c693cc46
AV
32 return 0;
33 return csum_partial(dst, len, ~0U);
1da177e4
LT
34}
35#endif
36
37#ifndef HAVE_CSUM_COPY_USER
5486f5bf 38static __always_inline __wsum csum_and_copy_to_user
c693cc46 39(const void *src, void __user *dst, int len)
1da177e4 40{
c693cc46 41 __wsum sum = csum_partial(src, len, ~0U);
1da177e4 42
001c1a65
AV
43 if (copy_to_user(dst, src, len) == 0)
44 return sum;
c693cc46 45 return 0;
1da177e4
LT
46}
47#endif
48
6e41c585 49#ifndef _HAVE_ARCH_CSUM_AND_COPY
5486f5bf 50static __always_inline __wsum
cc44c17b 51csum_partial_copy_nocheck(const void *src, void *dst, int len)
6e41c585
AV
52{
53 memcpy(dst, src, len);
cc44c17b 54 return csum_partial(dst, len, 0);
6e41c585
AV
55}
56#endif
57
07064c6e 58#ifndef HAVE_ARCH_CSUM_ADD
5486f5bf 59static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
1da177e4 60{
56649d5d
AV
61 u32 res = (__force u32)csum;
62 res += (__force u32)addend;
63 return (__force __wsum)(res + (res < (__force u32)addend));
1da177e4 64}
07064c6e 65#endif
1da177e4 66
5486f5bf 67static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
1da177e4
LT
68{
69 return csum_add(csum, ~addend);
70}
71
5486f5bf 72static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
99f0b958
ED
73{
74 u16 res = (__force u16)csum;
75
76 res += (__force u16)addend;
77 return (__force __sum16)(res + (res < (__force u16)addend));
78}
79
5486f5bf 80static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
99f0b958
ED
81{
82 return csum16_add(csum, ~addend);
83}
84
3af722cb 85#ifndef HAVE_ARCH_CSUM_SHIFT
5486f5bf 86static __always_inline __wsum csum_shift(__wsum sum, int offset)
1da177e4 87{
33803963
AD
88 /* rotate sum to align it with a 16b boundary */
89 if (offset & 1)
594e450b
AV
90 return (__force __wsum)ror32((__force u32)sum, 8);
91 return sum;
92}
3af722cb 93#endif
33803963 94
5486f5bf 95static __always_inline __wsum
594e450b
AV
96csum_block_add(__wsum csum, __wsum csum2, int offset)
97{
98 return csum_add(csum, csum_shift(csum2, offset));
1da177e4
LT
99}
100
5486f5bf 101static __always_inline __wsum
2817a336
DB
102csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
103{
104 return csum_block_add(csum, csum2, offset);
105}
106
5486f5bf 107static __always_inline __wsum
56649d5d 108csum_block_sub(__wsum csum, __wsum csum2, int offset)
1da177e4 109{
33803963 110 return csum_block_add(csum, ~csum2, offset);
56649d5d
AV
111}
112
5486f5bf 113static __always_inline __wsum csum_unfold(__sum16 n)
56649d5d
AV
114{
115 return (__force __wsum)n;
1da177e4
LT
116}
117
5486f5bf
CL
118static __always_inline
119__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
cea80ea8
DB
120{
121 return csum_partial(buff, len, sum);
122}
123
f6ab0288 124#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
a99a00cf 125
5486f5bf 126static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
8050c0f0
DB
127{
128 *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
129}
130
5486f5bf 131static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
a99a00cf 132{
d53a2aa3
ED
133 __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
134
135 *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
a99a00cf
PM
136}
137
99f0b958
ED
138/* Implements RFC 1624 (Incremental Internet Checksum)
139 * 3. Discussion states :
140 * HC' = ~(~HC + ~m + m')
141 * m : old value of a 16bit field
142 * m' : new value of a 16bit field
143 */
5486f5bf 144static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
a99a00cf 145{
99f0b958 146 *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
a99a00cf
PM
147}
148
d9b5ae5c
PB
149static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
150{
151 *csum = csum_add(csum_sub(*csum, old), new);
152}
153
a99a00cf 154struct sk_buff;
4fc70747 155void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
4b048d6d 156 __be32 from, __be32 to, bool pseudohdr);
4fc70747
JP
157void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
158 const __be32 *from, const __be32 *to,
4b048d6d 159 bool pseudohdr);
abc5d1ff
TH
160void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
161 __wsum diff, bool pseudohdr);
a99a00cf 162
5486f5bf
CL
163static __always_inline
164void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
165 __be16 from, __be16 to, bool pseudohdr)
a99a00cf
PM
166{
167 inet_proto_csum_replace4(sum, skb, (__force __be32)from,
168 (__force __be32)to, pseudohdr);
169}
170
5486f5bf
CL
171static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
172 int start, int offset)
7c967b22
TH
173{
174 __sum16 *psum = (__sum16 *)(ptr + offset);
175 __wsum delta;
176
177 /* Subtract out checksum up to start */
178 csum = csum_sub(csum, csum_partial(ptr, start, 0));
179
180 /* Set derived checksum in packet */
369620a0
TH
181 delta = csum_sub((__force __wsum)csum_fold(csum),
182 (__force __wsum)*psum);
7c967b22
TH
183 *psum = csum_fold(csum);
184
185 return delta;
186}
187
5486f5bf 188static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
26c4f7da 189{
22fbece1 190 *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
26c4f7da
TH
191}
192
5486f5bf 193static __always_inline __wsum wsum_negate(__wsum val)
45cac675
ED
194{
195 return (__force __wsum)-((__force u32)val);
196}
1da177e4 197#endif