Commit | Line | Data |
---|---|---|
e48c414e ACM |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Generic TIME_WAIT sockets functions | |
7 | * | |
8 | * From code orinally in TCP | |
9 | */ | |
10 | ||
172589cc | 11 | #include <linux/kernel.h> |
e48c414e ACM |
12 | #include <net/inet_hashtables.h> |
13 | #include <net/inet_timewait_sock.h> | |
696ab2d3 | 14 | #include <net/ip.h> |
e48c414e ACM |
15 | |
16 | /* Must be called with locally disabled BHs. */ | |
acd159b6 AB |
17 | static void __inet_twsk_kill(struct inet_timewait_sock *tw, |
18 | struct inet_hashinfo *hashinfo) | |
e48c414e ACM |
19 | { |
20 | struct inet_bind_hashbucket *bhead; | |
21 | struct inet_bind_bucket *tb; | |
22 | /* Unlink from established hashes. */ | |
230140cf | 23 | rwlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); |
e48c414e | 24 | |
230140cf | 25 | write_lock(lock); |
e48c414e | 26 | if (hlist_unhashed(&tw->tw_node)) { |
230140cf | 27 | write_unlock(lock); |
e48c414e ACM |
28 | return; |
29 | } | |
30 | __hlist_del(&tw->tw_node); | |
31 | sk_node_init(&tw->tw_node); | |
230140cf | 32 | write_unlock(lock); |
e48c414e ACM |
33 | |
34 | /* Disassociate with bind bucket. */ | |
7f635ab7 PE |
35 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, |
36 | hashinfo->bhash_size)]; | |
e48c414e ACM |
37 | spin_lock(&bhead->lock); |
38 | tb = tw->tw_tb; | |
39 | __hlist_del(&tw->tw_bind_node); | |
40 | tw->tw_tb = NULL; | |
41 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); | |
42 | spin_unlock(&bhead->lock); | |
43 | #ifdef SOCK_REFCNT_DEBUG | |
44 | if (atomic_read(&tw->tw_refcnt) != 1) { | |
45 | printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", | |
46 | tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); | |
47 | } | |
48 | #endif | |
49 | inet_twsk_put(tw); | |
50 | } | |
51 | ||
7054fb93 PE |
52 | void inet_twsk_put(struct inet_timewait_sock *tw) |
53 | { | |
54 | if (atomic_dec_and_test(&tw->tw_refcnt)) { | |
55 | struct module *owner = tw->tw_prot->owner; | |
56 | twsk_destructor((struct sock *)tw); | |
57 | #ifdef SOCK_REFCNT_DEBUG | |
58 | printk(KERN_DEBUG "%s timewait_sock %p released\n", | |
59 | tw->tw_prot->name, tw); | |
60 | #endif | |
cd5342d9 | 61 | release_net(twsk_net(tw)); |
7054fb93 PE |
62 | kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); |
63 | module_put(owner); | |
64 | } | |
65 | } | |
66 | EXPORT_SYMBOL_GPL(inet_twsk_put); | |
67 | ||
e48c414e ACM |
68 | /* |
69 | * Enter the time wait state. This is called with locally disabled BH. | |
70 | * Essentially we whip up a timewait bucket, copy the relevant info into it | |
71 | * from the SK, and mess with hash chains and list linkage. | |
72 | */ | |
73 | void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |
74 | struct inet_hashinfo *hashinfo) | |
75 | { | |
76 | const struct inet_sock *inet = inet_sk(sk); | |
463c84b9 | 77 | const struct inet_connection_sock *icsk = inet_csk(sk); |
81c3d547 | 78 | struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); |
230140cf | 79 | rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); |
e48c414e ACM |
80 | struct inet_bind_hashbucket *bhead; |
81 | /* Step 1: Put TW into bind hash. Original socket stays there too. | |
82 | Note, that any socket with inet->num != 0 MUST be bound in | |
83 | binding cache, even if it is closed. | |
84 | */ | |
7f635ab7 PE |
85 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->num, |
86 | hashinfo->bhash_size)]; | |
e48c414e | 87 | spin_lock(&bhead->lock); |
463c84b9 | 88 | tw->tw_tb = icsk->icsk_bind_hash; |
547b792c | 89 | WARN_ON(!icsk->icsk_bind_hash); |
e48c414e ACM |
90 | inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); |
91 | spin_unlock(&bhead->lock); | |
92 | ||
230140cf | 93 | write_lock(lock); |
e48c414e ACM |
94 | |
95 | /* Step 2: Remove SK from established hash. */ | |
96 | if (__sk_del_node_init(sk)) | |
c29a0bc4 | 97 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
e48c414e | 98 | |
dbca9b27 ED |
99 | /* Step 3: Hash TW into TIMEWAIT chain. */ |
100 | inet_twsk_add_node(tw, &ehead->twchain); | |
e48c414e ACM |
101 | atomic_inc(&tw->tw_refcnt); |
102 | ||
230140cf | 103 | write_unlock(lock); |
e48c414e | 104 | } |
c676270b | 105 | |
696ab2d3 ACM |
106 | EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); |
107 | ||
c676270b ACM |
108 | struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) |
109 | { | |
6d6ee43e ACM |
110 | struct inet_timewait_sock *tw = |
111 | kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, | |
54e6ecb2 | 112 | GFP_ATOMIC); |
c676270b ACM |
113 | if (tw != NULL) { |
114 | const struct inet_sock *inet = inet_sk(sk); | |
115 | ||
116 | /* Give us an identity. */ | |
117 | tw->tw_daddr = inet->daddr; | |
118 | tw->tw_rcv_saddr = inet->rcv_saddr; | |
119 | tw->tw_bound_dev_if = sk->sk_bound_dev_if; | |
120 | tw->tw_num = inet->num; | |
121 | tw->tw_state = TCP_TIME_WAIT; | |
122 | tw->tw_substate = state; | |
123 | tw->tw_sport = inet->sport; | |
124 | tw->tw_dport = inet->dport; | |
125 | tw->tw_family = sk->sk_family; | |
126 | tw->tw_reuse = sk->sk_reuse; | |
81c3d547 | 127 | tw->tw_hash = sk->sk_hash; |
c676270b ACM |
128 | tw->tw_ipv6only = 0; |
129 | tw->tw_prot = sk->sk_prot_creator; | |
cd5342d9 | 130 | twsk_net_set(tw, hold_net(sock_net(sk))); |
c676270b ACM |
131 | atomic_set(&tw->tw_refcnt, 1); |
132 | inet_twsk_dead_node_init(tw); | |
eeb2b856 | 133 | __module_get(tw->tw_prot->owner); |
c676270b ACM |
134 | } |
135 | ||
136 | return tw; | |
137 | } | |
696ab2d3 ACM |
138 | |
139 | EXPORT_SYMBOL_GPL(inet_twsk_alloc); | |
140 | ||
141 | /* Returns non-zero if quota exceeded. */ | |
142 | static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, | |
143 | const int slot) | |
144 | { | |
145 | struct inet_timewait_sock *tw; | |
146 | struct hlist_node *node; | |
147 | unsigned int killed; | |
148 | int ret; | |
149 | ||
150 | /* NOTE: compare this to previous version where lock | |
151 | * was released after detaching chain. It was racy, | |
152 | * because tw buckets are scheduled in not serialized context | |
153 | * in 2.3 (with netfilter), and with softnet it is common, because | |
154 | * soft irqs are not sequenced. | |
155 | */ | |
156 | killed = 0; | |
157 | ret = 0; | |
158 | rescan: | |
159 | inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { | |
160 | __inet_twsk_del_dead_node(tw); | |
161 | spin_unlock(&twdr->death_lock); | |
162 | __inet_twsk_kill(tw, twdr->hashinfo); | |
f2bf415c PE |
163 | #ifdef CONFIG_NET_NS |
164 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED); | |
165 | #endif | |
696ab2d3 ACM |
166 | inet_twsk_put(tw); |
167 | killed++; | |
168 | spin_lock(&twdr->death_lock); | |
169 | if (killed > INET_TWDR_TWKILL_QUOTA) { | |
170 | ret = 1; | |
171 | break; | |
172 | } | |
173 | ||
174 | /* While we dropped twdr->death_lock, another cpu may have | |
175 | * killed off the next TW bucket in the list, therefore | |
176 | * do a fresh re-read of the hlist head node with the | |
177 | * lock reacquired. We still use the hlist traversal | |
178 | * macro in order to get the prefetches. | |
179 | */ | |
180 | goto rescan; | |
181 | } | |
182 | ||
183 | twdr->tw_count -= killed; | |
f2bf415c PE |
184 | #ifndef CONFIG_NET_NS |
185 | NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed); | |
186 | #endif | |
696ab2d3 ACM |
187 | return ret; |
188 | } | |
189 | ||
190 | void inet_twdr_hangman(unsigned long data) | |
191 | { | |
192 | struct inet_timewait_death_row *twdr; | |
193 | int unsigned need_timer; | |
194 | ||
195 | twdr = (struct inet_timewait_death_row *)data; | |
196 | spin_lock(&twdr->death_lock); | |
197 | ||
198 | if (twdr->tw_count == 0) | |
199 | goto out; | |
200 | ||
201 | need_timer = 0; | |
202 | if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { | |
203 | twdr->thread_slots |= (1 << twdr->slot); | |
696ab2d3 ACM |
204 | schedule_work(&twdr->twkill_work); |
205 | need_timer = 1; | |
206 | } else { | |
207 | /* We purged the entire slot, anything left? */ | |
208 | if (twdr->tw_count) | |
209 | need_timer = 1; | |
210 | } | |
211 | twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1)); | |
212 | if (need_timer) | |
213 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); | |
214 | out: | |
215 | spin_unlock(&twdr->death_lock); | |
216 | } | |
217 | ||
218 | EXPORT_SYMBOL_GPL(inet_twdr_hangman); | |
219 | ||
65f27f38 | 220 | void inet_twdr_twkill_work(struct work_struct *work) |
696ab2d3 | 221 | { |
65f27f38 DH |
222 | struct inet_timewait_death_row *twdr = |
223 | container_of(work, struct inet_timewait_death_row, twkill_work); | |
696ab2d3 ACM |
224 | int i; |
225 | ||
95c9382a PE |
226 | BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) > |
227 | (sizeof(twdr->thread_slots) * 8)); | |
696ab2d3 ACM |
228 | |
229 | while (twdr->thread_slots) { | |
230 | spin_lock_bh(&twdr->death_lock); | |
231 | for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) { | |
232 | if (!(twdr->thread_slots & (1 << i))) | |
233 | continue; | |
234 | ||
235 | while (inet_twdr_do_twkill_work(twdr, i) != 0) { | |
236 | if (need_resched()) { | |
237 | spin_unlock_bh(&twdr->death_lock); | |
238 | schedule(); | |
239 | spin_lock_bh(&twdr->death_lock); | |
240 | } | |
241 | } | |
242 | ||
243 | twdr->thread_slots &= ~(1 << i); | |
244 | } | |
245 | spin_unlock_bh(&twdr->death_lock); | |
246 | } | |
247 | } | |
248 | ||
249 | EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); | |
250 | ||
251 | /* These are always called from BH context. See callers in | |
252 | * tcp_input.c to verify this. | |
253 | */ | |
254 | ||
255 | /* This is for handling early-kills of TIME_WAIT sockets. */ | |
256 | void inet_twsk_deschedule(struct inet_timewait_sock *tw, | |
257 | struct inet_timewait_death_row *twdr) | |
258 | { | |
259 | spin_lock(&twdr->death_lock); | |
260 | if (inet_twsk_del_dead_node(tw)) { | |
261 | inet_twsk_put(tw); | |
262 | if (--twdr->tw_count == 0) | |
263 | del_timer(&twdr->tw_timer); | |
264 | } | |
265 | spin_unlock(&twdr->death_lock); | |
266 | __inet_twsk_kill(tw, twdr->hashinfo); | |
267 | } | |
268 | ||
269 | EXPORT_SYMBOL(inet_twsk_deschedule); | |
270 | ||
271 | void inet_twsk_schedule(struct inet_timewait_sock *tw, | |
272 | struct inet_timewait_death_row *twdr, | |
273 | const int timeo, const int timewait_len) | |
274 | { | |
275 | struct hlist_head *list; | |
276 | int slot; | |
277 | ||
278 | /* timeout := RTO * 3.5 | |
279 | * | |
280 | * 3.5 = 1+2+0.5 to wait for two retransmits. | |
281 | * | |
282 | * RATIONALE: if FIN arrived and we entered TIME-WAIT state, | |
283 | * our ACK acking that FIN can be lost. If N subsequent retransmitted | |
284 | * FINs (or previous seqments) are lost (probability of such event | |
285 | * is p^(N+1), where p is probability to lose single packet and | |
286 | * time to detect the loss is about RTO*(2^N - 1) with exponential | |
287 | * backoff). Normal timewait length is calculated so, that we | |
288 | * waited at least for one retransmitted FIN (maximal RTO is 120sec). | |
289 | * [ BTW Linux. following BSD, violates this requirement waiting | |
290 | * only for 60sec, we should wait at least for 240 secs. | |
291 | * Well, 240 consumes too much of resources 8) | |
292 | * ] | |
293 | * This interval is not reduced to catch old duplicate and | |
294 | * responces to our wandering segments living for two MSLs. | |
295 | * However, if we use PAWS to detect | |
296 | * old duplicates, we can reduce the interval to bounds required | |
297 | * by RTO, rather than MSL. So, if peer understands PAWS, we | |
298 | * kill tw bucket after 3.5*RTO (it is important that this number | |
299 | * is greater than TS tick!) and detect old duplicates with help | |
300 | * of PAWS. | |
301 | */ | |
302 | slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK; | |
303 | ||
304 | spin_lock(&twdr->death_lock); | |
305 | ||
306 | /* Unlink it, if it was scheduled */ | |
307 | if (inet_twsk_del_dead_node(tw)) | |
308 | twdr->tw_count--; | |
309 | else | |
310 | atomic_inc(&tw->tw_refcnt); | |
311 | ||
312 | if (slot >= INET_TWDR_RECYCLE_SLOTS) { | |
313 | /* Schedule to slow timer */ | |
314 | if (timeo >= timewait_len) { | |
315 | slot = INET_TWDR_TWKILL_SLOTS - 1; | |
316 | } else { | |
172589cc | 317 | slot = DIV_ROUND_UP(timeo, twdr->period); |
696ab2d3 ACM |
318 | if (slot >= INET_TWDR_TWKILL_SLOTS) |
319 | slot = INET_TWDR_TWKILL_SLOTS - 1; | |
320 | } | |
321 | tw->tw_ttd = jiffies + timeo; | |
322 | slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1); | |
323 | list = &twdr->cells[slot]; | |
324 | } else { | |
325 | tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK); | |
326 | ||
327 | if (twdr->twcal_hand < 0) { | |
328 | twdr->twcal_hand = 0; | |
329 | twdr->twcal_jiffie = jiffies; | |
330 | twdr->twcal_timer.expires = twdr->twcal_jiffie + | |
331 | (slot << INET_TWDR_RECYCLE_TICK); | |
332 | add_timer(&twdr->twcal_timer); | |
333 | } else { | |
334 | if (time_after(twdr->twcal_timer.expires, | |
335 | jiffies + (slot << INET_TWDR_RECYCLE_TICK))) | |
336 | mod_timer(&twdr->twcal_timer, | |
337 | jiffies + (slot << INET_TWDR_RECYCLE_TICK)); | |
338 | slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1); | |
339 | } | |
340 | list = &twdr->twcal_row[slot]; | |
341 | } | |
342 | ||
343 | hlist_add_head(&tw->tw_death_node, list); | |
344 | ||
345 | if (twdr->tw_count++ == 0) | |
346 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); | |
347 | spin_unlock(&twdr->death_lock); | |
348 | } | |
349 | ||
350 | EXPORT_SYMBOL_GPL(inet_twsk_schedule); | |
351 | ||
352 | void inet_twdr_twcal_tick(unsigned long data) | |
353 | { | |
354 | struct inet_timewait_death_row *twdr; | |
355 | int n, slot; | |
356 | unsigned long j; | |
357 | unsigned long now = jiffies; | |
358 | int killed = 0; | |
359 | int adv = 0; | |
360 | ||
361 | twdr = (struct inet_timewait_death_row *)data; | |
362 | ||
363 | spin_lock(&twdr->death_lock); | |
364 | if (twdr->twcal_hand < 0) | |
365 | goto out; | |
366 | ||
367 | slot = twdr->twcal_hand; | |
368 | j = twdr->twcal_jiffie; | |
369 | ||
370 | for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { | |
371 | if (time_before_eq(j, now)) { | |
372 | struct hlist_node *node, *safe; | |
373 | struct inet_timewait_sock *tw; | |
374 | ||
375 | inet_twsk_for_each_inmate_safe(tw, node, safe, | |
376 | &twdr->twcal_row[slot]) { | |
377 | __inet_twsk_del_dead_node(tw); | |
378 | __inet_twsk_kill(tw, twdr->hashinfo); | |
f2bf415c PE |
379 | #ifdef CONFIG_NET_NS |
380 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED); | |
381 | #endif | |
696ab2d3 ACM |
382 | inet_twsk_put(tw); |
383 | killed++; | |
384 | } | |
385 | } else { | |
386 | if (!adv) { | |
387 | adv = 1; | |
388 | twdr->twcal_jiffie = j; | |
389 | twdr->twcal_hand = slot; | |
390 | } | |
391 | ||
392 | if (!hlist_empty(&twdr->twcal_row[slot])) { | |
393 | mod_timer(&twdr->twcal_timer, j); | |
394 | goto out; | |
395 | } | |
396 | } | |
397 | j += 1 << INET_TWDR_RECYCLE_TICK; | |
398 | slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1); | |
399 | } | |
400 | twdr->twcal_hand = -1; | |
401 | ||
402 | out: | |
403 | if ((twdr->tw_count -= killed) == 0) | |
404 | del_timer(&twdr->tw_timer); | |
f2bf415c PE |
405 | #ifndef CONFIG_NET_NS |
406 | NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed); | |
407 | #endif | |
696ab2d3 ACM |
408 | spin_unlock(&twdr->death_lock); |
409 | } | |
410 | ||
411 | EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); |