Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/core/dst.c Protocol independent destination cache. | |
3 | * | |
4 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
5 | * | |
6 | */ | |
7 | ||
8 | #include <linux/bitops.h> | |
9 | #include <linux/errno.h> | |
10 | #include <linux/init.h> | |
11 | #include <linux/kernel.h> | |
86bba269 | 12 | #include <linux/workqueue.h> |
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | #include <linux/module.h> | |
15 | #include <linux/netdevice.h> | |
1da177e4 LT |
16 | #include <linux/skbuff.h> |
17 | #include <linux/string.h> | |
18 | #include <linux/types.h> | |
e9dc8653 | 19 | #include <net/net_namespace.h> |
2fc1b5dd | 20 | #include <linux/sched.h> |
1da177e4 LT |
21 | |
22 | #include <net/dst.h> | |
23 | ||
86bba269 ED |
24 | /* |
25 | * Theory of operations: | |
26 | * 1) We use a list, protected by a spinlock, to add | |
27 | * new entries from both BH and non-BH context. | |
28 | * 2) In order to keep spinlock held for a small delay, | |
29 | * we use a second list where are stored long lived | |
30 | * entries, that are handled by the garbage collect thread | |
31 | * fired by a workqueue. | |
32 | * 3) This list is guarded by a mutex, | |
33 | * so that the gc_task and dst_dev_event() can be synchronized. | |
1da177e4 | 34 | */ |
4ec93edb | 35 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
36 | static atomic_t dst_total = ATOMIC_INIT(0); |
37 | #endif | |
1da177e4 | 38 | |
86bba269 ED |
39 | /* |
40 | * We want to keep lock & list close together | |
41 | * to dirty as few cache lines as possible in __dst_free(). | |
42 | * As this is not a very strong hint, we dont force an alignment on SMP. | |
43 | */ | |
44 | static struct { | |
45 | spinlock_t lock; | |
46 | struct dst_entry *list; | |
47 | unsigned long timer_inc; | |
48 | unsigned long timer_expires; | |
49 | } dst_garbage = { | |
50 | .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock), | |
51 | .timer_inc = DST_GC_MAX, | |
52 | }; | |
53 | static void dst_gc_task(struct work_struct *work); | |
1da177e4 LT |
54 | static void ___dst_free(struct dst_entry * dst); |
55 | ||
86bba269 | 56 | static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); |
1da177e4 | 57 | |
86bba269 ED |
58 | static DEFINE_MUTEX(dst_gc_mutex); |
59 | /* | |
60 | * long lived entries are maintained in this list, guarded by dst_gc_mutex | |
61 | */ | |
62 | static struct dst_entry *dst_busy_list; | |
63 | ||
64 | static void dst_gc_task(struct work_struct *work) | |
1da177e4 LT |
65 | { |
66 | int delayed = 0; | |
86bba269 ED |
67 | int work_performed = 0; |
68 | unsigned long expires = ~0L; | |
69 | struct dst_entry *dst, *next, head; | |
70 | struct dst_entry *last = &head; | |
71 | #if RT_CACHE_DEBUG >= 2 | |
72 | ktime_t time_start = ktime_get(); | |
73 | struct timespec elapsed; | |
74 | #endif | |
1da177e4 | 75 | |
86bba269 ED |
76 | mutex_lock(&dst_gc_mutex); |
77 | next = dst_busy_list; | |
1da177e4 | 78 | |
86bba269 ED |
79 | loop: |
80 | while ((dst = next) != NULL) { | |
81 | next = dst->next; | |
82 | prefetch(&next->next); | |
2fc1b5dd | 83 | cond_resched(); |
86bba269 ED |
84 | if (likely(atomic_read(&dst->__refcnt))) { |
85 | last->next = dst; | |
86 | last = dst; | |
1da177e4 LT |
87 | delayed++; |
88 | continue; | |
89 | } | |
86bba269 | 90 | work_performed++; |
1da177e4 LT |
91 | |
92 | dst = dst_destroy(dst); | |
93 | if (dst) { | |
94 | /* NOHASH and still referenced. Unless it is already | |
95 | * on gc list, invalidate it and add to gc list. | |
96 | * | |
97 | * Note: this is temporary. Actually, NOHASH dst's | |
98 | * must be obsoleted when parent is obsoleted. | |
99 | * But we do not have state "obsoleted, but | |
100 | * referenced by parent", so it is right. | |
101 | */ | |
102 | if (dst->obsolete > 1) | |
103 | continue; | |
104 | ||
105 | ___dst_free(dst); | |
86bba269 ED |
106 | dst->next = next; |
107 | next = dst; | |
1da177e4 LT |
108 | } |
109 | } | |
86bba269 ED |
110 | |
111 | spin_lock_bh(&dst_garbage.lock); | |
112 | next = dst_garbage.list; | |
113 | if (next) { | |
114 | dst_garbage.list = NULL; | |
115 | spin_unlock_bh(&dst_garbage.lock); | |
116 | goto loop; | |
1da177e4 | 117 | } |
86bba269 ED |
118 | last->next = NULL; |
119 | dst_busy_list = head.next; | |
120 | if (!dst_busy_list) | |
121 | dst_garbage.timer_inc = DST_GC_MAX; | |
122 | else { | |
123 | /* | |
124 | * if we freed less than 1/10 of delayed entries, | |
125 | * we can sleep longer. | |
126 | */ | |
127 | if (work_performed <= delayed/10) { | |
128 | dst_garbage.timer_expires += dst_garbage.timer_inc; | |
129 | if (dst_garbage.timer_expires > DST_GC_MAX) | |
130 | dst_garbage.timer_expires = DST_GC_MAX; | |
131 | dst_garbage.timer_inc += DST_GC_INC; | |
132 | } else { | |
133 | dst_garbage.timer_inc = DST_GC_INC; | |
134 | dst_garbage.timer_expires = DST_GC_MIN; | |
135 | } | |
136 | expires = dst_garbage.timer_expires; | |
137 | /* | |
138 | * if the next desired timer is more than 4 seconds in the future | |
139 | * then round the timer to whole seconds | |
140 | */ | |
141 | if (expires > 4*HZ) | |
142 | expires = round_jiffies_relative(expires); | |
143 | schedule_delayed_work(&dst_gc_work, expires); | |
f0098f78 | 144 | } |
86bba269 ED |
145 | |
146 | spin_unlock_bh(&dst_garbage.lock); | |
147 | mutex_unlock(&dst_gc_mutex); | |
1da177e4 | 148 | #if RT_CACHE_DEBUG >= 2 |
86bba269 ED |
149 | elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start)); |
150 | printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d" | |
151 | " expires: %lu elapsed: %lu us\n", | |
152 | atomic_read(&dst_total), delayed, work_performed, | |
153 | expires, | |
154 | elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC); | |
1da177e4 | 155 | #endif |
1da177e4 LT |
156 | } |
157 | ||
352e512c | 158 | int dst_discard(struct sk_buff *skb) |
1da177e4 LT |
159 | { |
160 | kfree_skb(skb); | |
161 | return 0; | |
162 | } | |
352e512c | 163 | EXPORT_SYMBOL(dst_discard); |
1da177e4 LT |
164 | |
165 | void * dst_alloc(struct dst_ops * ops) | |
166 | { | |
167 | struct dst_entry * dst; | |
168 | ||
169 | if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { | |
569d3645 | 170 | if (ops->gc(ops)) |
1da177e4 LT |
171 | return NULL; |
172 | } | |
c3762229 | 173 | dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); |
1da177e4 LT |
174 | if (!dst) |
175 | return NULL; | |
1da177e4 LT |
176 | atomic_set(&dst->__refcnt, 0); |
177 | dst->ops = ops; | |
178 | dst->lastuse = jiffies; | |
179 | dst->path = dst; | |
c4b1010f | 180 | dst->input = dst->output = dst_discard; |
4ec93edb | 181 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
182 | atomic_inc(&dst_total); |
183 | #endif | |
184 | atomic_inc(&ops->entries); | |
185 | return dst; | |
186 | } | |
187 | ||
188 | static void ___dst_free(struct dst_entry * dst) | |
189 | { | |
190 | /* The first case (dev==NULL) is required, when | |
191 | protocol module is unloaded. | |
192 | */ | |
193 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { | |
c4b1010f | 194 | dst->input = dst->output = dst_discard; |
1da177e4 LT |
195 | } |
196 | dst->obsolete = 2; | |
197 | } | |
198 | ||
199 | void __dst_free(struct dst_entry * dst) | |
200 | { | |
86bba269 | 201 | spin_lock_bh(&dst_garbage.lock); |
1da177e4 | 202 | ___dst_free(dst); |
86bba269 ED |
203 | dst->next = dst_garbage.list; |
204 | dst_garbage.list = dst; | |
205 | if (dst_garbage.timer_inc > DST_GC_INC) { | |
206 | dst_garbage.timer_inc = DST_GC_INC; | |
207 | dst_garbage.timer_expires = DST_GC_MIN; | |
f262b59b | 208 | cancel_delayed_work(&dst_gc_work); |
86bba269 | 209 | schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); |
1da177e4 | 210 | } |
86bba269 | 211 | spin_unlock_bh(&dst_garbage.lock); |
1da177e4 LT |
212 | } |
213 | ||
214 | struct dst_entry *dst_destroy(struct dst_entry * dst) | |
215 | { | |
216 | struct dst_entry *child; | |
217 | struct neighbour *neigh; | |
218 | struct hh_cache *hh; | |
219 | ||
220 | smp_rmb(); | |
221 | ||
222 | again: | |
223 | neigh = dst->neighbour; | |
224 | hh = dst->hh; | |
225 | child = dst->child; | |
226 | ||
227 | dst->hh = NULL; | |
228 | if (hh && atomic_dec_and_test(&hh->hh_refcnt)) | |
229 | kfree(hh); | |
230 | ||
231 | if (neigh) { | |
232 | dst->neighbour = NULL; | |
233 | neigh_release(neigh); | |
234 | } | |
235 | ||
236 | atomic_dec(&dst->ops->entries); | |
237 | ||
238 | if (dst->ops->destroy) | |
239 | dst->ops->destroy(dst); | |
240 | if (dst->dev) | |
241 | dev_put(dst->dev); | |
4ec93edb | 242 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
243 | atomic_dec(&dst_total); |
244 | #endif | |
245 | kmem_cache_free(dst->ops->kmem_cachep, dst); | |
246 | ||
247 | dst = child; | |
248 | if (dst) { | |
6775cab9 HX |
249 | int nohash = dst->flags & DST_NOHASH; |
250 | ||
1da177e4 LT |
251 | if (atomic_dec_and_test(&dst->__refcnt)) { |
252 | /* We were real parent of this dst, so kill child. */ | |
6775cab9 | 253 | if (nohash) |
1da177e4 LT |
254 | goto again; |
255 | } else { | |
256 | /* Child is still referenced, return it for freeing. */ | |
6775cab9 | 257 | if (nohash) |
1da177e4 LT |
258 | return dst; |
259 | /* Child is still in his hash table */ | |
260 | } | |
261 | } | |
262 | return NULL; | |
263 | } | |
264 | ||
8d330868 IJ |
265 | void dst_release(struct dst_entry *dst) |
266 | { | |
267 | if (dst) { | |
ef711cf1 ED |
268 | int newrefcnt; |
269 | ||
8d330868 | 270 | smp_mb__before_atomic_dec(); |
ef711cf1 ED |
271 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
272 | WARN_ON(newrefcnt < 0); | |
8d330868 IJ |
273 | } |
274 | } | |
275 | EXPORT_SYMBOL(dst_release); | |
276 | ||
1da177e4 LT |
277 | /* Dirty hack. We did it in 2.2 (in __dst_free), |
278 | * we have _very_ good reasons not to repeat | |
279 | * this mistake in 2.3, but we have no choice | |
280 | * now. _It_ _is_ _explicit_ _deliberate_ | |
281 | * _race_ _condition_. | |
282 | * | |
283 | * Commented and originally written by Alexey. | |
284 | */ | |
285 | static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |
286 | int unregister) | |
287 | { | |
288 | if (dst->ops->ifdown) | |
289 | dst->ops->ifdown(dst, dev, unregister); | |
290 | ||
291 | if (dev != dst->dev) | |
292 | return; | |
293 | ||
294 | if (!unregister) { | |
c4b1010f | 295 | dst->input = dst->output = dst_discard; |
1da177e4 | 296 | } else { |
c346dca1 | 297 | dst->dev = dev_net(dst->dev)->loopback_dev; |
de3cb747 | 298 | dev_hold(dst->dev); |
1da177e4 LT |
299 | dev_put(dev); |
300 | if (dst->neighbour && dst->neighbour->dev == dev) { | |
5a3e55d6 | 301 | dst->neighbour->dev = dst->dev; |
64b7d961 | 302 | dev_hold(dst->dev); |
1da177e4 | 303 | dev_put(dev); |
1da177e4 LT |
304 | } |
305 | } | |
306 | } | |
307 | ||
308 | static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) | |
309 | { | |
310 | struct net_device *dev = ptr; | |
86bba269 | 311 | struct dst_entry *dst, *last = NULL; |
1da177e4 LT |
312 | |
313 | switch (event) { | |
314 | case NETDEV_UNREGISTER: | |
315 | case NETDEV_DOWN: | |
86bba269 ED |
316 | mutex_lock(&dst_gc_mutex); |
317 | for (dst = dst_busy_list; dst; dst = dst->next) { | |
318 | last = dst; | |
319 | dst_ifdown(dst, dev, event != NETDEV_DOWN); | |
320 | } | |
321 | ||
322 | spin_lock_bh(&dst_garbage.lock); | |
323 | dst = dst_garbage.list; | |
324 | dst_garbage.list = NULL; | |
325 | spin_unlock_bh(&dst_garbage.lock); | |
326 | ||
327 | if (last) | |
328 | last->next = dst; | |
329 | else | |
330 | dst_busy_list = dst; | |
331 | for (; dst; dst = dst->next) { | |
1da177e4 LT |
332 | dst_ifdown(dst, dev, event != NETDEV_DOWN); |
333 | } | |
86bba269 | 334 | mutex_unlock(&dst_gc_mutex); |
1da177e4 LT |
335 | break; |
336 | } | |
337 | return NOTIFY_DONE; | |
338 | } | |
339 | ||
340 | static struct notifier_block dst_dev_notifier = { | |
341 | .notifier_call = dst_dev_event, | |
342 | }; | |
343 | ||
344 | void __init dst_init(void) | |
345 | { | |
346 | register_netdevice_notifier(&dst_dev_notifier); | |
347 | } | |
348 | ||
349 | EXPORT_SYMBOL(__dst_free); | |
350 | EXPORT_SYMBOL(dst_alloc); | |
351 | EXPORT_SYMBOL(dst_destroy); |