Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/core/dst.c Protocol independent destination cache. | |
3 | * | |
4 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
5 | * | |
6 | */ | |
7 | ||
8 | #include <linux/bitops.h> | |
9 | #include <linux/errno.h> | |
10 | #include <linux/init.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/netdevice.h> | |
1da177e4 LT |
15 | #include <linux/skbuff.h> |
16 | #include <linux/string.h> | |
17 | #include <linux/types.h> | |
18 | ||
19 | #include <net/dst.h> | |
20 | ||
21 | /* Locking strategy: | |
22 | * 1) Garbage collection state of dead destination cache | |
23 | * entries is protected by dst_lock. | |
24 | * 2) GC is run only from BH context, and is the only remover | |
25 | * of entries. | |
26 | * 3) Entries are added to the garbage list from both BH | |
27 | * and non-BH context, so local BH disabling is needed. | |
28 | * 4) All operations modify state, so a spinlock is used. | |
29 | */ | |
30 | static struct dst_entry *dst_garbage_list; | |
4ec93edb | 31 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
32 | static atomic_t dst_total = ATOMIC_INIT(0); |
33 | #endif | |
34 | static DEFINE_SPINLOCK(dst_lock); | |
35 | ||
36 | static unsigned long dst_gc_timer_expires; | |
37 | static unsigned long dst_gc_timer_inc = DST_GC_MAX; | |
38 | static void dst_run_gc(unsigned long); | |
39 | static void ___dst_free(struct dst_entry * dst); | |
40 | ||
8d06afab | 41 | static DEFINE_TIMER(dst_gc_timer, dst_run_gc, DST_GC_MIN, 0); |
1da177e4 LT |
42 | |
43 | static void dst_run_gc(unsigned long dummy) | |
44 | { | |
45 | int delayed = 0; | |
f0098f78 | 46 | int work_performed; |
1da177e4 LT |
47 | struct dst_entry * dst, **dstp; |
48 | ||
49 | if (!spin_trylock(&dst_lock)) { | |
50 | mod_timer(&dst_gc_timer, jiffies + HZ/10); | |
51 | return; | |
52 | } | |
53 | ||
1da177e4 LT |
54 | del_timer(&dst_gc_timer); |
55 | dstp = &dst_garbage_list; | |
f0098f78 | 56 | work_performed = 0; |
1da177e4 LT |
57 | while ((dst = *dstp) != NULL) { |
58 | if (atomic_read(&dst->__refcnt)) { | |
59 | dstp = &dst->next; | |
60 | delayed++; | |
61 | continue; | |
62 | } | |
63 | *dstp = dst->next; | |
f0098f78 | 64 | work_performed = 1; |
1da177e4 LT |
65 | |
66 | dst = dst_destroy(dst); | |
67 | if (dst) { | |
68 | /* NOHASH and still referenced. Unless it is already | |
69 | * on gc list, invalidate it and add to gc list. | |
70 | * | |
71 | * Note: this is temporary. Actually, NOHASH dst's | |
72 | * must be obsoleted when parent is obsoleted. | |
73 | * But we do not have state "obsoleted, but | |
74 | * referenced by parent", so it is right. | |
75 | */ | |
76 | if (dst->obsolete > 1) | |
77 | continue; | |
78 | ||
79 | ___dst_free(dst); | |
80 | dst->next = *dstp; | |
81 | *dstp = dst; | |
82 | dstp = &dst->next; | |
83 | } | |
84 | } | |
85 | if (!dst_garbage_list) { | |
86 | dst_gc_timer_inc = DST_GC_MAX; | |
87 | goto out; | |
88 | } | |
f0098f78 DL |
89 | if (!work_performed) { |
90 | if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX) | |
91 | dst_gc_timer_expires = DST_GC_MAX; | |
92 | dst_gc_timer_inc += DST_GC_INC; | |
93 | } else { | |
94 | dst_gc_timer_inc = DST_GC_INC; | |
95 | dst_gc_timer_expires = DST_GC_MIN; | |
96 | } | |
1da177e4 LT |
97 | #if RT_CACHE_DEBUG >= 2 |
98 | printk("dst_total: %d/%d %ld\n", | |
99 | atomic_read(&dst_total), delayed, dst_gc_timer_expires); | |
100 | #endif | |
f5a6e01c AV |
101 | /* if the next desired timer is more than 4 seconds in the future |
102 | * then round the timer to whole seconds | |
103 | */ | |
104 | if (dst_gc_timer_expires > 4*HZ) | |
105 | mod_timer(&dst_gc_timer, | |
106 | round_jiffies(jiffies + dst_gc_timer_expires)); | |
107 | else | |
108 | mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); | |
1da177e4 LT |
109 | |
110 | out: | |
111 | spin_unlock(&dst_lock); | |
112 | } | |
113 | ||
c4b1010f | 114 | static int dst_discard(struct sk_buff *skb) |
1da177e4 LT |
115 | { |
116 | kfree_skb(skb); | |
117 | return 0; | |
118 | } | |
119 | ||
120 | void * dst_alloc(struct dst_ops * ops) | |
121 | { | |
122 | struct dst_entry * dst; | |
123 | ||
124 | if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { | |
125 | if (ops->gc()) | |
126 | return NULL; | |
127 | } | |
c3762229 | 128 | dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); |
1da177e4 LT |
129 | if (!dst) |
130 | return NULL; | |
1da177e4 LT |
131 | atomic_set(&dst->__refcnt, 0); |
132 | dst->ops = ops; | |
133 | dst->lastuse = jiffies; | |
134 | dst->path = dst; | |
c4b1010f | 135 | dst->input = dst->output = dst_discard; |
4ec93edb | 136 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
137 | atomic_inc(&dst_total); |
138 | #endif | |
139 | atomic_inc(&ops->entries); | |
140 | return dst; | |
141 | } | |
142 | ||
143 | static void ___dst_free(struct dst_entry * dst) | |
144 | { | |
145 | /* The first case (dev==NULL) is required, when | |
146 | protocol module is unloaded. | |
147 | */ | |
148 | if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { | |
c4b1010f | 149 | dst->input = dst->output = dst_discard; |
1da177e4 LT |
150 | } |
151 | dst->obsolete = 2; | |
152 | } | |
153 | ||
154 | void __dst_free(struct dst_entry * dst) | |
155 | { | |
156 | spin_lock_bh(&dst_lock); | |
157 | ___dst_free(dst); | |
158 | dst->next = dst_garbage_list; | |
159 | dst_garbage_list = dst; | |
160 | if (dst_gc_timer_inc > DST_GC_INC) { | |
161 | dst_gc_timer_inc = DST_GC_INC; | |
162 | dst_gc_timer_expires = DST_GC_MIN; | |
163 | mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); | |
164 | } | |
165 | spin_unlock_bh(&dst_lock); | |
166 | } | |
167 | ||
168 | struct dst_entry *dst_destroy(struct dst_entry * dst) | |
169 | { | |
170 | struct dst_entry *child; | |
171 | struct neighbour *neigh; | |
172 | struct hh_cache *hh; | |
173 | ||
174 | smp_rmb(); | |
175 | ||
176 | again: | |
177 | neigh = dst->neighbour; | |
178 | hh = dst->hh; | |
179 | child = dst->child; | |
180 | ||
181 | dst->hh = NULL; | |
182 | if (hh && atomic_dec_and_test(&hh->hh_refcnt)) | |
183 | kfree(hh); | |
184 | ||
185 | if (neigh) { | |
186 | dst->neighbour = NULL; | |
187 | neigh_release(neigh); | |
188 | } | |
189 | ||
190 | atomic_dec(&dst->ops->entries); | |
191 | ||
192 | if (dst->ops->destroy) | |
193 | dst->ops->destroy(dst); | |
194 | if (dst->dev) | |
195 | dev_put(dst->dev); | |
4ec93edb | 196 | #if RT_CACHE_DEBUG >= 2 |
1da177e4 LT |
197 | atomic_dec(&dst_total); |
198 | #endif | |
199 | kmem_cache_free(dst->ops->kmem_cachep, dst); | |
200 | ||
201 | dst = child; | |
202 | if (dst) { | |
6775cab9 HX |
203 | int nohash = dst->flags & DST_NOHASH; |
204 | ||
1da177e4 LT |
205 | if (atomic_dec_and_test(&dst->__refcnt)) { |
206 | /* We were real parent of this dst, so kill child. */ | |
6775cab9 | 207 | if (nohash) |
1da177e4 LT |
208 | goto again; |
209 | } else { | |
210 | /* Child is still referenced, return it for freeing. */ | |
6775cab9 | 211 | if (nohash) |
1da177e4 LT |
212 | return dst; |
213 | /* Child is still in his hash table */ | |
214 | } | |
215 | } | |
216 | return NULL; | |
217 | } | |
218 | ||
219 | /* Dirty hack. We did it in 2.2 (in __dst_free), | |
220 | * we have _very_ good reasons not to repeat | |
221 | * this mistake in 2.3, but we have no choice | |
222 | * now. _It_ _is_ _explicit_ _deliberate_ | |
223 | * _race_ _condition_. | |
224 | * | |
225 | * Commented and originally written by Alexey. | |
226 | */ | |
227 | static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, | |
228 | int unregister) | |
229 | { | |
230 | if (dst->ops->ifdown) | |
231 | dst->ops->ifdown(dst, dev, unregister); | |
232 | ||
233 | if (dev != dst->dev) | |
234 | return; | |
235 | ||
236 | if (!unregister) { | |
c4b1010f | 237 | dst->input = dst->output = dst_discard; |
1da177e4 LT |
238 | } else { |
239 | dst->dev = &loopback_dev; | |
240 | dev_hold(&loopback_dev); | |
241 | dev_put(dev); | |
242 | if (dst->neighbour && dst->neighbour->dev == dev) { | |
243 | dst->neighbour->dev = &loopback_dev; | |
244 | dev_put(dev); | |
245 | dev_hold(&loopback_dev); | |
246 | } | |
247 | } | |
248 | } | |
249 | ||
250 | static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) | |
251 | { | |
252 | struct net_device *dev = ptr; | |
253 | struct dst_entry *dst; | |
254 | ||
255 | switch (event) { | |
256 | case NETDEV_UNREGISTER: | |
257 | case NETDEV_DOWN: | |
258 | spin_lock_bh(&dst_lock); | |
259 | for (dst = dst_garbage_list; dst; dst = dst->next) { | |
260 | dst_ifdown(dst, dev, event != NETDEV_DOWN); | |
261 | } | |
262 | spin_unlock_bh(&dst_lock); | |
263 | break; | |
264 | } | |
265 | return NOTIFY_DONE; | |
266 | } | |
267 | ||
268 | static struct notifier_block dst_dev_notifier = { | |
269 | .notifier_call = dst_dev_event, | |
270 | }; | |
271 | ||
272 | void __init dst_init(void) | |
273 | { | |
274 | register_netdevice_notifier(&dst_dev_notifier); | |
275 | } | |
276 | ||
277 | EXPORT_SYMBOL(__dst_free); | |
278 | EXPORT_SYMBOL(dst_alloc); | |
279 | EXPORT_SYMBOL(dst_destroy); |