Commit | Line | Data |
---|---|---|
e48c414e ACM |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Generic TIME_WAIT sockets functions | |
7 | * | |
8 | * From code orinally in TCP | |
9 | */ | |
10 | ||
172589cc | 11 | #include <linux/kernel.h> |
9e337b0f | 12 | #include <linux/kmemcheck.h> |
e48c414e ACM |
13 | #include <net/inet_hashtables.h> |
14 | #include <net/inet_timewait_sock.h> | |
696ab2d3 | 15 | #include <net/ip.h> |
e48c414e | 16 | |
13475a30 | 17 | |
2a8875e7 ED |
18 | /** |
19 | * inet_twsk_unhash - unhash a timewait socket from established hash | |
20 | * @tw: timewait socket | |
21 | * | |
22 | * unhash a timewait socket from established hash, if hashed. | |
23 | * ehash lock must be held by caller. | |
24 | * Returns 1 if caller should call inet_twsk_put() after lock release. | |
13475a30 ED |
25 | */ |
26 | int inet_twsk_unhash(struct inet_timewait_sock *tw) | |
27 | { | |
28 | if (hlist_nulls_unhashed(&tw->tw_node)) | |
29 | return 0; | |
30 | ||
31 | hlist_nulls_del_rcu(&tw->tw_node); | |
32 | sk_nulls_node_init(&tw->tw_node); | |
2a8875e7 ED |
33 | /* |
34 | * We cannot call inet_twsk_put() ourself under lock, | |
35 | * caller must call it for us. | |
36 | */ | |
13475a30 ED |
37 | return 1; |
38 | } | |
39 | ||
2a8875e7 ED |
40 | /** |
41 | * inet_twsk_bind_unhash - unhash a timewait socket from bind hash | |
42 | * @tw: timewait socket | |
43 | * @hashinfo: hashinfo pointer | |
44 | * | |
45 | * unhash a timewait socket from bind hash, if hashed. | |
46 | * bind hash lock must be held by caller. | |
47 | * Returns 1 if caller should call inet_twsk_put() after lock release. | |
3cdaedae ED |
48 | */ |
49 | int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, | |
50 | struct inet_hashinfo *hashinfo) | |
51 | { | |
52 | struct inet_bind_bucket *tb = tw->tw_tb; | |
53 | ||
54 | if (!tb) | |
55 | return 0; | |
56 | ||
57 | __hlist_del(&tw->tw_bind_node); | |
58 | tw->tw_tb = NULL; | |
59 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); | |
2a8875e7 ED |
60 | /* |
61 | * We cannot call inet_twsk_put() ourself under lock, | |
62 | * caller must call it for us. | |
63 | */ | |
3cdaedae ED |
64 | return 1; |
65 | } | |
66 | ||
e48c414e | 67 | /* Must be called with locally disabled BHs. */ |
acd159b6 AB |
68 | static void __inet_twsk_kill(struct inet_timewait_sock *tw, |
69 | struct inet_hashinfo *hashinfo) | |
e48c414e ACM |
70 | { |
71 | struct inet_bind_hashbucket *bhead; | |
13475a30 | 72 | int refcnt; |
e48c414e | 73 | /* Unlink from established hashes. */ |
9db66bdc | 74 | spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); |
e48c414e | 75 | |
9db66bdc | 76 | spin_lock(lock); |
13475a30 | 77 | refcnt = inet_twsk_unhash(tw); |
9db66bdc | 78 | spin_unlock(lock); |
e48c414e ACM |
79 | |
80 | /* Disassociate with bind bucket. */ | |
7f635ab7 PE |
81 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, |
82 | hashinfo->bhash_size)]; | |
3cdaedae | 83 | |
e48c414e | 84 | spin_lock(&bhead->lock); |
3cdaedae | 85 | refcnt += inet_twsk_bind_unhash(tw, hashinfo); |
e48c414e | 86 | spin_unlock(&bhead->lock); |
3cdaedae | 87 | |
e48c414e ACM |
88 | #ifdef SOCK_REFCNT_DEBUG |
89 | if (atomic_read(&tw->tw_refcnt) != 1) { | |
90 | printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", | |
91 | tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); | |
92 | } | |
93 | #endif | |
13475a30 ED |
94 | while (refcnt) { |
95 | inet_twsk_put(tw); | |
96 | refcnt--; | |
97 | } | |
e48c414e ACM |
98 | } |
99 | ||
4dbc8ef7 | 100 | static noinline void inet_twsk_free(struct inet_timewait_sock *tw) |
7054fb93 | 101 | { |
4dbc8ef7 ACM |
102 | struct module *owner = tw->tw_prot->owner; |
103 | twsk_destructor((struct sock *)tw); | |
7054fb93 | 104 | #ifdef SOCK_REFCNT_DEBUG |
4dbc8ef7 | 105 | pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw); |
7054fb93 | 106 | #endif |
4dbc8ef7 ACM |
107 | release_net(twsk_net(tw)); |
108 | kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); | |
109 | module_put(owner); | |
110 | } | |
111 | ||
112 | void inet_twsk_put(struct inet_timewait_sock *tw) | |
113 | { | |
114 | if (atomic_dec_and_test(&tw->tw_refcnt)) | |
115 | inet_twsk_free(tw); | |
7054fb93 PE |
116 | } |
117 | EXPORT_SYMBOL_GPL(inet_twsk_put); | |
118 | ||
e48c414e ACM |
119 | /* |
120 | * Enter the time wait state. This is called with locally disabled BH. | |
121 | * Essentially we whip up a timewait bucket, copy the relevant info into it | |
122 | * from the SK, and mess with hash chains and list linkage. | |
123 | */ | |
124 | void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |
125 | struct inet_hashinfo *hashinfo) | |
126 | { | |
127 | const struct inet_sock *inet = inet_sk(sk); | |
463c84b9 | 128 | const struct inet_connection_sock *icsk = inet_csk(sk); |
81c3d547 | 129 | struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); |
9db66bdc | 130 | spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); |
e48c414e ACM |
131 | struct inet_bind_hashbucket *bhead; |
132 | /* Step 1: Put TW into bind hash. Original socket stays there too. | |
133 | Note, that any socket with inet->num != 0 MUST be bound in | |
134 | binding cache, even if it is closed. | |
135 | */ | |
c720c7e8 | 136 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num, |
7f635ab7 | 137 | hashinfo->bhash_size)]; |
e48c414e | 138 | spin_lock(&bhead->lock); |
463c84b9 | 139 | tw->tw_tb = icsk->icsk_bind_hash; |
547b792c | 140 | WARN_ON(!icsk->icsk_bind_hash); |
e48c414e ACM |
141 | inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); |
142 | spin_unlock(&bhead->lock); | |
143 | ||
9db66bdc | 144 | spin_lock(lock); |
e48c414e | 145 | |
3ab5aee7 ED |
146 | /* |
147 | * Step 2: Hash TW into TIMEWAIT chain. | |
148 | * Should be done before removing sk from established chain | |
149 | * because readers are lockless and search established first. | |
150 | */ | |
3ab5aee7 ED |
151 | inet_twsk_add_node_rcu(tw, &ehead->twchain); |
152 | ||
153 | /* Step 3: Remove SK from established hash. */ | |
154 | if (__sk_nulls_del_node_init_rcu(sk)) | |
155 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | |
e48c414e | 156 | |
47e1c323 ED |
157 | /* |
158 | * Notes : | |
2a8875e7 | 159 | * - We initially set tw_refcnt to 0 in inet_twsk_alloc() |
47e1c323 ED |
160 | * - We add one reference for the bhash link |
161 | * - We add one reference for the ehash link | |
162 | * - We want this refcnt update done before allowing other | |
163 | * threads to find this tw in ehash chain. | |
164 | */ | |
165 | atomic_add(1 + 1 + 1, &tw->tw_refcnt); | |
166 | ||
9db66bdc | 167 | spin_unlock(lock); |
e48c414e | 168 | } |
696ab2d3 ACM |
169 | EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); |
170 | ||
c676270b ACM |
171 | struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) |
172 | { | |
6d6ee43e ACM |
173 | struct inet_timewait_sock *tw = |
174 | kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, | |
54e6ecb2 | 175 | GFP_ATOMIC); |
c676270b ACM |
176 | if (tw != NULL) { |
177 | const struct inet_sock *inet = inet_sk(sk); | |
178 | ||
9e337b0f VN |
179 | kmemcheck_annotate_bitfield(tw, flags); |
180 | ||
c676270b | 181 | /* Give us an identity. */ |
c720c7e8 ED |
182 | tw->tw_daddr = inet->inet_daddr; |
183 | tw->tw_rcv_saddr = inet->inet_rcv_saddr; | |
c676270b | 184 | tw->tw_bound_dev_if = sk->sk_bound_dev_if; |
c720c7e8 | 185 | tw->tw_num = inet->inet_num; |
c676270b ACM |
186 | tw->tw_state = TCP_TIME_WAIT; |
187 | tw->tw_substate = state; | |
c720c7e8 ED |
188 | tw->tw_sport = inet->inet_sport; |
189 | tw->tw_dport = inet->inet_dport; | |
c676270b ACM |
190 | tw->tw_family = sk->sk_family; |
191 | tw->tw_reuse = sk->sk_reuse; | |
81c3d547 | 192 | tw->tw_hash = sk->sk_hash; |
c676270b | 193 | tw->tw_ipv6only = 0; |
f5715aea | 194 | tw->tw_transparent = inet->transparent; |
c676270b | 195 | tw->tw_prot = sk->sk_prot_creator; |
cd5342d9 | 196 | twsk_net_set(tw, hold_net(sock_net(sk))); |
47e1c323 ED |
197 | /* |
198 | * Because we use RCU lookups, we should not set tw_refcnt | |
199 | * to a non null value before everything is setup for this | |
200 | * timewait socket. | |
201 | */ | |
202 | atomic_set(&tw->tw_refcnt, 0); | |
c676270b | 203 | inet_twsk_dead_node_init(tw); |
eeb2b856 | 204 | __module_get(tw->tw_prot->owner); |
c676270b ACM |
205 | } |
206 | ||
207 | return tw; | |
208 | } | |
696ab2d3 ACM |
209 | EXPORT_SYMBOL_GPL(inet_twsk_alloc); |
210 | ||
211 | /* Returns non-zero if quota exceeded. */ | |
212 | static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, | |
213 | const int slot) | |
214 | { | |
215 | struct inet_timewait_sock *tw; | |
216 | struct hlist_node *node; | |
217 | unsigned int killed; | |
218 | int ret; | |
219 | ||
220 | /* NOTE: compare this to previous version where lock | |
221 | * was released after detaching chain. It was racy, | |
222 | * because tw buckets are scheduled in not serialized context | |
223 | * in 2.3 (with netfilter), and with softnet it is common, because | |
224 | * soft irqs are not sequenced. | |
225 | */ | |
226 | killed = 0; | |
227 | ret = 0; | |
228 | rescan: | |
229 | inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { | |
230 | __inet_twsk_del_dead_node(tw); | |
231 | spin_unlock(&twdr->death_lock); | |
232 | __inet_twsk_kill(tw, twdr->hashinfo); | |
f2bf415c PE |
233 | #ifdef CONFIG_NET_NS |
234 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED); | |
235 | #endif | |
696ab2d3 ACM |
236 | inet_twsk_put(tw); |
237 | killed++; | |
238 | spin_lock(&twdr->death_lock); | |
239 | if (killed > INET_TWDR_TWKILL_QUOTA) { | |
240 | ret = 1; | |
241 | break; | |
242 | } | |
243 | ||
244 | /* While we dropped twdr->death_lock, another cpu may have | |
245 | * killed off the next TW bucket in the list, therefore | |
246 | * do a fresh re-read of the hlist head node with the | |
247 | * lock reacquired. We still use the hlist traversal | |
248 | * macro in order to get the prefetches. | |
249 | */ | |
250 | goto rescan; | |
251 | } | |
252 | ||
253 | twdr->tw_count -= killed; | |
f2bf415c PE |
254 | #ifndef CONFIG_NET_NS |
255 | NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed); | |
256 | #endif | |
696ab2d3 ACM |
257 | return ret; |
258 | } | |
259 | ||
260 | void inet_twdr_hangman(unsigned long data) | |
261 | { | |
262 | struct inet_timewait_death_row *twdr; | |
263 | int unsigned need_timer; | |
264 | ||
265 | twdr = (struct inet_timewait_death_row *)data; | |
266 | spin_lock(&twdr->death_lock); | |
267 | ||
268 | if (twdr->tw_count == 0) | |
269 | goto out; | |
270 | ||
271 | need_timer = 0; | |
272 | if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { | |
273 | twdr->thread_slots |= (1 << twdr->slot); | |
696ab2d3 ACM |
274 | schedule_work(&twdr->twkill_work); |
275 | need_timer = 1; | |
276 | } else { | |
277 | /* We purged the entire slot, anything left? */ | |
278 | if (twdr->tw_count) | |
279 | need_timer = 1; | |
80a1096b | 280 | twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1)); |
696ab2d3 | 281 | } |
696ab2d3 ACM |
282 | if (need_timer) |
283 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); | |
284 | out: | |
285 | spin_unlock(&twdr->death_lock); | |
286 | } | |
696ab2d3 ACM |
287 | EXPORT_SYMBOL_GPL(inet_twdr_hangman); |
288 | ||
65f27f38 | 289 | void inet_twdr_twkill_work(struct work_struct *work) |
696ab2d3 | 290 | { |
65f27f38 DH |
291 | struct inet_timewait_death_row *twdr = |
292 | container_of(work, struct inet_timewait_death_row, twkill_work); | |
696ab2d3 ACM |
293 | int i; |
294 | ||
95c9382a PE |
295 | BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) > |
296 | (sizeof(twdr->thread_slots) * 8)); | |
696ab2d3 ACM |
297 | |
298 | while (twdr->thread_slots) { | |
299 | spin_lock_bh(&twdr->death_lock); | |
300 | for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) { | |
301 | if (!(twdr->thread_slots & (1 << i))) | |
302 | continue; | |
303 | ||
304 | while (inet_twdr_do_twkill_work(twdr, i) != 0) { | |
305 | if (need_resched()) { | |
306 | spin_unlock_bh(&twdr->death_lock); | |
307 | schedule(); | |
308 | spin_lock_bh(&twdr->death_lock); | |
309 | } | |
310 | } | |
311 | ||
312 | twdr->thread_slots &= ~(1 << i); | |
313 | } | |
314 | spin_unlock_bh(&twdr->death_lock); | |
315 | } | |
316 | } | |
696ab2d3 ACM |
317 | EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); |
318 | ||
319 | /* These are always called from BH context. See callers in | |
320 | * tcp_input.c to verify this. | |
321 | */ | |
322 | ||
323 | /* This is for handling early-kills of TIME_WAIT sockets. */ | |
324 | void inet_twsk_deschedule(struct inet_timewait_sock *tw, | |
325 | struct inet_timewait_death_row *twdr) | |
326 | { | |
327 | spin_lock(&twdr->death_lock); | |
328 | if (inet_twsk_del_dead_node(tw)) { | |
329 | inet_twsk_put(tw); | |
330 | if (--twdr->tw_count == 0) | |
331 | del_timer(&twdr->tw_timer); | |
332 | } | |
333 | spin_unlock(&twdr->death_lock); | |
334 | __inet_twsk_kill(tw, twdr->hashinfo); | |
335 | } | |
696ab2d3 ACM |
336 | EXPORT_SYMBOL(inet_twsk_deschedule); |
337 | ||
338 | void inet_twsk_schedule(struct inet_timewait_sock *tw, | |
339 | struct inet_timewait_death_row *twdr, | |
340 | const int timeo, const int timewait_len) | |
341 | { | |
342 | struct hlist_head *list; | |
343 | int slot; | |
344 | ||
345 | /* timeout := RTO * 3.5 | |
346 | * | |
347 | * 3.5 = 1+2+0.5 to wait for two retransmits. | |
348 | * | |
349 | * RATIONALE: if FIN arrived and we entered TIME-WAIT state, | |
350 | * our ACK acking that FIN can be lost. If N subsequent retransmitted | |
351 | * FINs (or previous seqments) are lost (probability of such event | |
352 | * is p^(N+1), where p is probability to lose single packet and | |
353 | * time to detect the loss is about RTO*(2^N - 1) with exponential | |
354 | * backoff). Normal timewait length is calculated so, that we | |
355 | * waited at least for one retransmitted FIN (maximal RTO is 120sec). | |
356 | * [ BTW Linux. following BSD, violates this requirement waiting | |
357 | * only for 60sec, we should wait at least for 240 secs. | |
358 | * Well, 240 consumes too much of resources 8) | |
359 | * ] | |
360 | * This interval is not reduced to catch old duplicate and | |
361 | * responces to our wandering segments living for two MSLs. | |
362 | * However, if we use PAWS to detect | |
363 | * old duplicates, we can reduce the interval to bounds required | |
364 | * by RTO, rather than MSL. So, if peer understands PAWS, we | |
365 | * kill tw bucket after 3.5*RTO (it is important that this number | |
366 | * is greater than TS tick!) and detect old duplicates with help | |
367 | * of PAWS. | |
368 | */ | |
369 | slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK; | |
370 | ||
371 | spin_lock(&twdr->death_lock); | |
372 | ||
373 | /* Unlink it, if it was scheduled */ | |
374 | if (inet_twsk_del_dead_node(tw)) | |
375 | twdr->tw_count--; | |
376 | else | |
377 | atomic_inc(&tw->tw_refcnt); | |
378 | ||
379 | if (slot >= INET_TWDR_RECYCLE_SLOTS) { | |
380 | /* Schedule to slow timer */ | |
381 | if (timeo >= timewait_len) { | |
382 | slot = INET_TWDR_TWKILL_SLOTS - 1; | |
383 | } else { | |
172589cc | 384 | slot = DIV_ROUND_UP(timeo, twdr->period); |
696ab2d3 ACM |
385 | if (slot >= INET_TWDR_TWKILL_SLOTS) |
386 | slot = INET_TWDR_TWKILL_SLOTS - 1; | |
387 | } | |
388 | tw->tw_ttd = jiffies + timeo; | |
389 | slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1); | |
390 | list = &twdr->cells[slot]; | |
391 | } else { | |
392 | tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK); | |
393 | ||
394 | if (twdr->twcal_hand < 0) { | |
395 | twdr->twcal_hand = 0; | |
396 | twdr->twcal_jiffie = jiffies; | |
397 | twdr->twcal_timer.expires = twdr->twcal_jiffie + | |
398 | (slot << INET_TWDR_RECYCLE_TICK); | |
399 | add_timer(&twdr->twcal_timer); | |
400 | } else { | |
401 | if (time_after(twdr->twcal_timer.expires, | |
402 | jiffies + (slot << INET_TWDR_RECYCLE_TICK))) | |
403 | mod_timer(&twdr->twcal_timer, | |
404 | jiffies + (slot << INET_TWDR_RECYCLE_TICK)); | |
405 | slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1); | |
406 | } | |
407 | list = &twdr->twcal_row[slot]; | |
408 | } | |
409 | ||
410 | hlist_add_head(&tw->tw_death_node, list); | |
411 | ||
412 | if (twdr->tw_count++ == 0) | |
413 | mod_timer(&twdr->tw_timer, jiffies + twdr->period); | |
414 | spin_unlock(&twdr->death_lock); | |
415 | } | |
696ab2d3 ACM |
416 | EXPORT_SYMBOL_GPL(inet_twsk_schedule); |
417 | ||
418 | void inet_twdr_twcal_tick(unsigned long data) | |
419 | { | |
420 | struct inet_timewait_death_row *twdr; | |
421 | int n, slot; | |
422 | unsigned long j; | |
423 | unsigned long now = jiffies; | |
424 | int killed = 0; | |
425 | int adv = 0; | |
426 | ||
427 | twdr = (struct inet_timewait_death_row *)data; | |
428 | ||
429 | spin_lock(&twdr->death_lock); | |
430 | if (twdr->twcal_hand < 0) | |
431 | goto out; | |
432 | ||
433 | slot = twdr->twcal_hand; | |
434 | j = twdr->twcal_jiffie; | |
435 | ||
436 | for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { | |
437 | if (time_before_eq(j, now)) { | |
438 | struct hlist_node *node, *safe; | |
439 | struct inet_timewait_sock *tw; | |
440 | ||
441 | inet_twsk_for_each_inmate_safe(tw, node, safe, | |
442 | &twdr->twcal_row[slot]) { | |
443 | __inet_twsk_del_dead_node(tw); | |
444 | __inet_twsk_kill(tw, twdr->hashinfo); | |
f2bf415c PE |
445 | #ifdef CONFIG_NET_NS |
446 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED); | |
447 | #endif | |
696ab2d3 ACM |
448 | inet_twsk_put(tw); |
449 | killed++; | |
450 | } | |
451 | } else { | |
452 | if (!adv) { | |
453 | adv = 1; | |
454 | twdr->twcal_jiffie = j; | |
455 | twdr->twcal_hand = slot; | |
456 | } | |
457 | ||
458 | if (!hlist_empty(&twdr->twcal_row[slot])) { | |
459 | mod_timer(&twdr->twcal_timer, j); | |
460 | goto out; | |
461 | } | |
462 | } | |
463 | j += 1 << INET_TWDR_RECYCLE_TICK; | |
464 | slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1); | |
465 | } | |
466 | twdr->twcal_hand = -1; | |
467 | ||
468 | out: | |
469 | if ((twdr->tw_count -= killed) == 0) | |
470 | del_timer(&twdr->tw_timer); | |
f2bf415c PE |
471 | #ifndef CONFIG_NET_NS |
472 | NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed); | |
473 | #endif | |
696ab2d3 ACM |
474 | spin_unlock(&twdr->death_lock); |
475 | } | |
696ab2d3 | 476 | EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); |
d315492b | 477 | |
b099ce26 | 478 | void inet_twsk_purge(struct inet_hashinfo *hashinfo, |
d315492b DL |
479 | struct inet_timewait_death_row *twdr, int family) |
480 | { | |
481 | struct inet_timewait_sock *tw; | |
482 | struct sock *sk; | |
3ab5aee7 | 483 | struct hlist_nulls_node *node; |
575f4cd5 | 484 | unsigned int slot; |
d315492b | 485 | |
575f4cd5 EB |
486 | for (slot = 0; slot <= hashinfo->ehash_mask; slot++) { |
487 | struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; | |
488 | restart_rcu: | |
489 | rcu_read_lock(); | |
d315492b | 490 | restart: |
575f4cd5 | 491 | sk_nulls_for_each_rcu(sk, node, &head->twchain) { |
d315492b | 492 | tw = inet_twsk(sk); |
b099ce26 EB |
493 | if ((tw->tw_family != family) || |
494 | atomic_read(&twsk_net(tw)->count)) | |
d315492b DL |
495 | continue; |
496 | ||
575f4cd5 EB |
497 | if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt))) |
498 | continue; | |
499 | ||
b099ce26 EB |
500 | if (unlikely((tw->tw_family != family) || |
501 | atomic_read(&twsk_net(tw)->count))) { | |
575f4cd5 EB |
502 | inet_twsk_put(tw); |
503 | goto restart; | |
504 | } | |
505 | ||
506 | rcu_read_unlock(); | |
d315492b DL |
507 | inet_twsk_deschedule(tw, twdr); |
508 | inet_twsk_put(tw); | |
575f4cd5 | 509 | goto restart_rcu; |
d315492b | 510 | } |
575f4cd5 EB |
511 | /* If the nulls value we got at the end of this lookup is |
512 | * not the expected one, we must restart lookup. | |
513 | * We probably met an item that was moved to another chain. | |
514 | */ | |
515 | if (get_nulls_value(node) != slot) | |
516 | goto restart; | |
517 | rcu_read_unlock(); | |
d315492b | 518 | } |
d315492b DL |
519 | } |
520 | EXPORT_SYMBOL_GPL(inet_twsk_purge); |