Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/core/dst.c Protocol independent destination cache. | |
3 | * | |
4 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
5 | * | |
6 | */ | |
7 | ||
8 | #include <linux/bitops.h> | |
9 | #include <linux/errno.h> | |
10 | #include <linux/init.h> | |
11 | #include <linux/kernel.h> | |
86bba269 | 12 | #include <linux/workqueue.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/module.h> | |
5a0e3ad6 | 15 | #include <linux/slab.h> |
1da177e4 | 16 | #include <linux/netdevice.h> |
1da177e4 LT |
17 | #include <linux/skbuff.h> |
18 | #include <linux/string.h> | |
19 | #include <linux/types.h> | |
e9dc8653 | 20 | #include <net/net_namespace.h> |
2fc1b5dd | 21 | #include <linux/sched.h> |
1da177e4 LT |
22 | |
23 | #include <net/dst.h> | |
24 | ||
86bba269 ED |
25 | /* |
26 | * Theory of operations: | |
27 | * 1) We use a list, protected by a spinlock, to add | |
28 | * new entries from both BH and non-BH context. | |
29 | * 2) In order to keep spinlock held for a small delay, | |
30 | * we use a second list where are stored long lived | |
31 | * entries, that are handled by the garbage collect thread | |
32 | * fired by a workqueue. | |
33 | * 3) This list is guarded by a mutex, | |
34 | * so that the gc_task and dst_dev_event() can be synchronized. | |
1da177e4 | 35 | */ |
4ec93edb | 36 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
37 | static atomic_t dst_total = ATOMIC_INIT(0); |
38 | #endif | |
1da177e4 | 39 | |
86bba269 ED |
40 | /* |
41 | * We want to keep lock & list close together | |
42 | * to dirty as few cache lines as possible in __dst_free(). | |
43 | * As this is not a very strong hint, we dont force an alignment on SMP. | |
44 | */ | |
45 | static struct { | |
46 | spinlock_t lock; | |
47 | struct dst_entry *list; | |
48 | unsigned long timer_inc; | |
49 | unsigned long timer_expires; | |
50 | } dst_garbage = { | |
51 | .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock), | |
52 | .timer_inc = DST_GC_MAX, | |
53 | }; | |
54 | static void dst_gc_task(struct work_struct *work); | |
1da177e4 LT |
55 | static void ___dst_free(struct dst_entry * dst); |
56 | ||
86bba269 | 57 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); |
1da177e4 | 58 | |
86bba269 ED |
59 | static DEFINE_MUTEX(dst_gc_mutex); |
60 | /* | |
61 | * long lived entries are maintained in this list, guarded by dst_gc_mutex | |
62 | */ | |
63 | static struct dst_entry *dst_busy_list; | |
64 | ||
65 | static void dst_gc_task(struct work_struct *work) | |
1da177e4 LT |
66 | { |
67 | int delayed = 0; | |
86bba269 ED |
68 | int work_performed = 0; |
69 | unsigned long expires = ~0L; | |
70 | struct dst_entry *dst, *next, head; | |
71 | struct dst_entry *last = &head; | |
72 | #if RT_CACHE_DEBUG >= 2 | |
73 | ktime_t time_start = ktime_get(); | |
74 | struct timespec elapsed; | |
75 | #endif | |
1da177e4 | 76 | |
86bba269 ED |
77 | mutex_lock(&dst_gc_mutex); |
78 | next = dst_busy_list; | |
1da177e4 | 79 | |
86bba269 ED |
80 | loop: |
81 | while ((dst = next) != NULL) { | |
82 | next = dst->next; | |
83 | prefetch(&next->next); | |
2fc1b5dd | 84 | cond_resched(); |
86bba269 ED |
85 | if (likely(atomic_read(&dst->__refcnt))) { |
86 | last->next = dst; | |
87 | last = dst; | |
1da177e4 LT |
88 | delayed++; |
89 | continue; | |
90 | } | |
86bba269 | 91 | work_performed++; |
1da177e4 LT |
92 | |
93 | dst = dst_destroy(dst); | |
94 | if (dst) { | |
95 | /* NOHASH and still referenced. Unless it is already | |
96 | * on gc list, invalidate it and add to gc list. | |
97 | * | |
98 | * Note: this is temporary. Actually, NOHASH dst's | |
99 | * must be obsoleted when parent is obsoleted. | |
100 | * But we do not have state "obsoleted, but | |
101 | * referenced by parent", so it is right. | |
102 | */ | |
103 | if (dst->obsolete > 1) | |
104 | continue; | |
105 | ||
106 | ___dst_free(dst); | |
86bba269 ED |
107 | dst->next = next; |
108 | next = dst; | |
1da177e4 LT |
109 | } |
110 | } | |
86bba269 ED |
111 | |
112 | spin_lock_bh(&dst_garbage.lock); | |
113 | next = dst_garbage.list; | |
114 | if (next) { | |
115 | dst_garbage.list = NULL; | |
116 | spin_unlock_bh(&dst_garbage.lock); | |
117 | goto loop; | |
1da177e4 | 118 | } |
86bba269 ED |
119 | last->next = NULL; |
120 | dst_busy_list = head.next; | |
121 | if (!dst_busy_list) | |
122 | dst_garbage.timer_inc = DST_GC_MAX; | |
123 | else { | |
124 | /* | |
125 | * if we freed less than 1/10 of delayed entries, | |
126 | * we can sleep longer. | |
127 | */ | |
128 | if (work_performed <= delayed/10) { | |
129 | dst_garbage.timer_expires += dst_garbage.timer_inc; | |
130 | if (dst_garbage.timer_expires > DST_GC_MAX) | |
131 | dst_garbage.timer_expires = DST_GC_MAX; | |
132 | dst_garbage.timer_inc += DST_GC_INC; | |
133 | } else { | |
134 | dst_garbage.timer_inc = DST_GC_INC; | |
135 | dst_garbage.timer_expires = DST_GC_MIN; | |
136 | } | |
137 | expires = dst_garbage.timer_expires; | |
138 | /* | |
139 | * if the next desired timer is more than 4 seconds in the future | |
140 | * then round the timer to whole seconds | |
141 | */ | |
142 | if (expires > 4*HZ) | |
143 | expires = round_jiffies_relative(expires); | |
144 | schedule_delayed_work(&dst_gc_work, expires); | |
f0098f78 | 145 | } |
86bba269 ED |
146 | |
147 | spin_unlock_bh(&dst_garbage.lock); | |
148 | mutex_unlock(&dst_gc_mutex); | |
1da177e4 | 149 | #if RT_CACHE_DEBUG >= 2 |
86bba269 ED |
150 | elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start)); |
151 | printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d" | |
152 | " expires: %lu elapsed: %lu us\n", | |
153 | atomic_read(&dst_total), delayed, work_performed, | |
154 | expires, | |
155 | elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC); | |
1da177e4 | 156 | #endif |
1da177e4 LT |
157 | } |
158 | ||
352e512c | 159 | int dst_discard(struct sk_buff *skb) |
1da177e4 LT |
160 | { |
161 | kfree_skb(skb); | |
162 | return 0; | |
163 | } | |
352e512c | 164 | EXPORT_SYMBOL(dst_discard); |
1da177e4 LT |
165 | |
166 | void * dst_alloc(struct dst_ops * ops) | |
167 | { | |
168 | struct dst_entry * dst; | |
169 | ||
170 | if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { | |
569d3645 | 171 | if (ops->gc(ops)) |
1da177e4 LT |
172 | return NULL; |
173 | } | |
c3762229 | 174 | dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); |
1da177e4 LT |
175 | if (!dst) |
176 | return NULL; | |
1da177e4 LT |
177 | atomic_set(&dst->__refcnt, 0); |
178 | dst->ops = ops; | |
179 | dst->lastuse = jiffies; | |
180 | dst->path = dst; | |
c4b1010f | 181 | dst->input = dst->output = dst_discard; |
4ec93edb | 182 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
183 | atomic_inc(&dst_total); |
184 | #endif | |
185 | atomic_inc(&ops->entries); | |
186 | return dst; | |
187 | } | |
188 | ||
189 | static void ___dst_free(struct dst_entry * dst) | |
190 | { | |
191 | /* The first case (dev==NULL) is required, when | |
192 | protocol module is unloaded. | |
193 | */ | |
194 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { | |
c4b1010f | 195 | dst->input = dst->output = dst_discard; |
1da177e4 LT |
196 | } |
197 | dst->obsolete = 2; | |
198 | } | |
199 | ||
200 | void __dst_free(struct dst_entry * dst) | |
201 | { | |
86bba269 | 202 | spin_lock_bh(&dst_garbage.lock); |
1da177e4 | 203 | ___dst_free(dst); |
86bba269 ED |
204 | dst->next = dst_garbage.list; |
205 | dst_garbage.list = dst; | |
206 | if (dst_garbage.timer_inc > DST_GC_INC) { | |
207 | dst_garbage.timer_inc = DST_GC_INC; | |
208 | dst_garbage.timer_expires = DST_GC_MIN; | |
f262b59b | 209 | cancel_delayed_work(&dst_gc_work); |
86bba269 | 210 | schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); |
1da177e4 | 211 | } |
86bba269 | 212 | spin_unlock_bh(&dst_garbage.lock); |
1da177e4 LT |
213 | } |
214 | ||
215 | struct dst_entry *dst_destroy(struct dst_entry * dst) | |
216 | { | |
217 | struct dst_entry *child; | |
218 | struct neighbour *neigh; | |
219 | struct hh_cache *hh; | |
220 | ||
221 | smp_rmb(); | |
222 | ||
223 | again: | |
224 | neigh = dst->neighbour; | |
225 | hh = dst->hh; | |
226 | child = dst->child; | |
227 | ||
228 | dst->hh = NULL; | |
229 | if (hh && atomic_dec_and_test(&hh->hh_refcnt)) | |
230 | kfree(hh); | |
231 | ||
232 | if (neigh) { | |
233 | dst->neighbour = NULL; | |
234 | neigh_release(neigh); | |
235 | } | |
236 | ||
237 | atomic_dec(&dst->ops->entries); | |
238 | ||
239 | if (dst->ops->destroy) | |
240 | dst->ops->destroy(dst); | |
241 | if (dst->dev) | |
242 | dev_put(dst->dev); | |
4ec93edb | 243 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
244 | atomic_dec(&dst_total); |
245 | #endif | |
246 | kmem_cache_free(dst->ops->kmem_cachep, dst); | |
247 | ||
248 | dst = child; | |
249 | if (dst) { | |
6775cab9 HX |
250 | int nohash = dst->flags & DST_NOHASH; |
251 | ||
1da177e4 LT |
252 | if (atomic_dec_and_test(&dst->__refcnt)) { |
253 | /* We were real parent of this dst, so kill child. */ | |
6775cab9 | 254 | if (nohash) |
1da177e4 LT |
255 | goto again; |
256 | } else { | |
257 | /* Child is still referenced, return it for freeing. */ | |
6775cab9 | 258 | if (nohash) |
1da177e4 LT |
259 | return dst; |
260 | /* Child is still in his hash table */ | |
261 | } | |
262 | } | |
263 | return NULL; | |
264 | } | |
265 | ||
8d330868 IJ |
266 | void dst_release(struct dst_entry *dst) |
267 | { | |
268 | if (dst) { | |
ef711cf1 ED |
269 | int newrefcnt; |
270 | ||
8d330868 | 271 | smp_mb__before_atomic_dec(); |
ef711cf1 ED |
272 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
273 | WARN_ON(newrefcnt < 0); | |
8d330868 IJ |
274 | } |
275 | } | |
276 | EXPORT_SYMBOL(dst_release); | |
277 | ||
1da177e4 LT |
278 | /* Dirty hack. We did it in 2.2 (in __dst_free), |
279 | * we have _very_ good reasons not to repeat | |
280 | * this mistake in 2.3, but we have no choice | |
281 | * now. _It_ _is_ _explicit_ _deliberate_ | |
282 | * _race_ _condition_. | |
283 | * | |
284 | * Commented and originally written by Alexey. | |
285 | */ | |
286 | static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |
287 | int unregister) | |
288 | { | |
289 | if (dst->ops->ifdown) | |
290 | dst->ops->ifdown(dst, dev, unregister); | |
291 | ||
292 | if (dev != dst->dev) | |
293 | return; | |
294 | ||
295 | if (!unregister) { | |
c4b1010f | 296 | dst->input = dst->output = dst_discard; |
1da177e4 | 297 | } else { |
c346dca1 | 298 | dst->dev = dev_net(dst->dev)->loopback_dev; |
de3cb747 | 299 | dev_hold(dst->dev); |
1da177e4 LT |
300 | dev_put(dev); |
301 | if (dst->neighbour && dst->neighbour->dev == dev) { | |
5a3e55d6 | 302 | dst->neighbour->dev = dst->dev; |
64b7d961 | 303 | dev_hold(dst->dev); |
1da177e4 | 304 | dev_put(dev); |
1da177e4 LT |
305 | } |
306 | } | |
307 | } | |
308 | ||
309 | static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) | |
310 | { | |
311 | struct net_device *dev = ptr; | |
86bba269 | 312 | struct dst_entry *dst, *last = NULL; |
1da177e4 LT |
313 | |
314 | switch (event) { | |
315 | case NETDEV_UNREGISTER: | |
316 | case NETDEV_DOWN: | |
86bba269 ED |
317 | mutex_lock(&dst_gc_mutex); |
318 | for (dst = dst_busy_list; dst; dst = dst->next) { | |
319 | last = dst; | |
320 | dst_ifdown(dst, dev, event != NETDEV_DOWN); | |
321 | } | |
322 | ||
323 | spin_lock_bh(&dst_garbage.lock); | |
324 | dst = dst_garbage.list; | |
325 | dst_garbage.list = NULL; | |
326 | spin_unlock_bh(&dst_garbage.lock); | |
327 | ||
328 | if (last) | |
329 | last->next = dst; | |
330 | else | |
331 | dst_busy_list = dst; | |
332 | for (; dst; dst = dst->next) { | |
1da177e4 LT |
333 | dst_ifdown(dst, dev, event != NETDEV_DOWN); |
334 | } | |
86bba269 | 335 | mutex_unlock(&dst_gc_mutex); |
1da177e4 LT |
336 | break; |
337 | } | |
338 | return NOTIFY_DONE; | |
339 | } | |
340 | ||
341 | static struct notifier_block dst_dev_notifier = { | |
342 | .notifier_call = dst_dev_event, | |
343 | }; | |
344 | ||
345 | void __init dst_init(void) | |
346 | { | |
347 | register_netdevice_notifier(&dst_dev_notifier); | |
348 | } | |
349 | ||
350 | EXPORT_SYMBOL(__dst_free); | |
351 | EXPORT_SYMBOL(dst_alloc); | |
352 | EXPORT_SYMBOL(dst_destroy); |