Commit | Line | Data |
---|---|---|
eb2b9311 | 1 | /* |
264d9b7d | 2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
eb2b9311 LCC |
3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | */ | |
9 | ||
10 | #include <linux/etherdevice.h> | |
11 | #include <linux/list.h> | |
eb2b9311 | 12 | #include <linux/random.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
eb2b9311 LCC |
14 | #include <linux/spinlock.h> |
15 | #include <linux/string.h> | |
16 | #include <net/mac80211.h> | |
17 | #include "ieee80211_i.h" | |
18 | #include "mesh.h" | |
19 | ||
7646887a JC |
20 | #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG |
21 | #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args) | |
22 | #else | |
23 | #define mpath_dbg(fmt, args...) do { (void)(0); } while (0) | |
24 | #endif | |
25 | ||
eb2b9311 LCC |
26 | /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ |
27 | #define INIT_PATHS_SIZE_ORDER 2 | |
28 | ||
29 | /* Keep the mean chain length below this constant */ | |
30 | #define MEAN_CHAIN_LEN 2 | |
31 | ||
32 | #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ | |
33 | time_after(jiffies, mpath->exp_time) && \ | |
34 | !(mpath->flags & MESH_PATH_FIXED)) | |
35 | ||
36 | struct mpath_node { | |
37 | struct hlist_node list; | |
38 | struct rcu_head rcu; | |
39 | /* This indirection allows two different tables to point to the same | |
40 | * mesh_path structure, useful when resizing | |
41 | */ | |
42 | struct mesh_path *mpath; | |
43 | }; | |
44 | ||
349eb8cf JB |
45 | static struct mesh_table __rcu *mesh_paths; |
46 | static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ | |
eb2b9311 | 47 | |
f5ea9120 | 48 | int mesh_paths_generation; |
6b86bd62 JB |
49 | |
50 | /* This lock will have the grow table function as writer and add / delete nodes | |
51 | * as readers. When reading the table (i.e. doing lookups) we are well protected | |
52 | * by RCU | |
53 | */ | |
54 | static DEFINE_RWLOCK(pathtbl_resize_lock); | |
55 | ||
56 | ||
349eb8cf JB |
57 | static inline struct mesh_table *resize_dereference_mesh_paths(void) |
58 | { | |
59 | return rcu_dereference_protected(mesh_paths, | |
60 | lockdep_is_held(&pathtbl_resize_lock)); | |
61 | } | |
62 | ||
63 | static inline struct mesh_table *resize_dereference_mpp_paths(void) | |
64 | { | |
65 | return rcu_dereference_protected(mpp_paths, | |
66 | lockdep_is_held(&pathtbl_resize_lock)); | |
67 | } | |
68 | ||
5ee68e5b JC |
69 | static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath); |
70 | ||
349eb8cf JB |
71 | /* |
72 | * CAREFUL -- "tbl" must not be an expression, | |
73 | * in particular not an rcu_dereference(), since | |
74 | * it's used twice. So it is illegal to do | |
75 | * for_each_mesh_entry(rcu_dereference(...), ...) | |
76 | */ | |
77 | #define for_each_mesh_entry(tbl, p, node, i) \ | |
78 | for (i = 0; i <= tbl->hash_mask; i++) \ | |
79 | hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) | |
80 | ||
81 | ||
6b86bd62 JB |
82 | static struct mesh_table *mesh_table_alloc(int size_order) |
83 | { | |
84 | int i; | |
85 | struct mesh_table *newtbl; | |
86 | ||
d676ff49 | 87 | newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); |
6b86bd62 JB |
88 | if (!newtbl) |
89 | return NULL; | |
90 | ||
91 | newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * | |
d676ff49 | 92 | (1 << size_order), GFP_ATOMIC); |
6b86bd62 JB |
93 | |
94 | if (!newtbl->hash_buckets) { | |
95 | kfree(newtbl); | |
96 | return NULL; | |
97 | } | |
98 | ||
99 | newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * | |
d676ff49 | 100 | (1 << size_order), GFP_ATOMIC); |
6b86bd62 JB |
101 | if (!newtbl->hashwlock) { |
102 | kfree(newtbl->hash_buckets); | |
103 | kfree(newtbl); | |
104 | return NULL; | |
105 | } | |
106 | ||
107 | newtbl->size_order = size_order; | |
108 | newtbl->hash_mask = (1 << size_order) - 1; | |
109 | atomic_set(&newtbl->entries, 0); | |
110 | get_random_bytes(&newtbl->hash_rnd, | |
111 | sizeof(newtbl->hash_rnd)); | |
112 | for (i = 0; i <= newtbl->hash_mask; i++) | |
113 | spin_lock_init(&newtbl->hashwlock[i]); | |
5ee68e5b | 114 | spin_lock_init(&newtbl->gates_lock); |
6b86bd62 JB |
115 | |
116 | return newtbl; | |
117 | } | |
118 | ||
18889231 JC |
119 | static void __mesh_table_free(struct mesh_table *tbl) |
120 | { | |
121 | kfree(tbl->hash_buckets); | |
122 | kfree(tbl->hashwlock); | |
123 | kfree(tbl); | |
124 | } | |
125 | ||
6b86bd62 | 126 | static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) |
18889231 JC |
127 | { |
128 | struct hlist_head *mesh_hash; | |
129 | struct hlist_node *p, *q; | |
5ee68e5b | 130 | struct mpath_node *gate; |
18889231 JC |
131 | int i; |
132 | ||
133 | mesh_hash = tbl->hash_buckets; | |
134 | for (i = 0; i <= tbl->hash_mask; i++) { | |
9b84b808 | 135 | spin_lock_bh(&tbl->hashwlock[i]); |
18889231 JC |
136 | hlist_for_each_safe(p, q, &mesh_hash[i]) { |
137 | tbl->free_node(p, free_leafs); | |
138 | atomic_dec(&tbl->entries); | |
139 | } | |
9b84b808 | 140 | spin_unlock_bh(&tbl->hashwlock[i]); |
18889231 | 141 | } |
5ee68e5b JC |
142 | if (free_leafs) { |
143 | spin_lock_bh(&tbl->gates_lock); | |
144 | hlist_for_each_entry_safe(gate, p, q, | |
145 | tbl->known_gates, list) { | |
146 | hlist_del(&gate->list); | |
147 | kfree(gate); | |
148 | } | |
149 | kfree(tbl->known_gates); | |
150 | spin_unlock_bh(&tbl->gates_lock); | |
151 | } | |
152 | ||
18889231 JC |
153 | __mesh_table_free(tbl); |
154 | } | |
155 | ||
a3e6b12c | 156 | static int mesh_table_grow(struct mesh_table *oldtbl, |
6b86bd62 | 157 | struct mesh_table *newtbl) |
18889231 | 158 | { |
18889231 JC |
159 | struct hlist_head *oldhash; |
160 | struct hlist_node *p, *q; | |
161 | int i; | |
162 | ||
a3e6b12c I |
163 | if (atomic_read(&oldtbl->entries) |
164 | < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1)) | |
165 | return -EAGAIN; | |
18889231 | 166 | |
a3e6b12c I |
167 | newtbl->free_node = oldtbl->free_node; |
168 | newtbl->mean_chain_len = oldtbl->mean_chain_len; | |
169 | newtbl->copy_node = oldtbl->copy_node; | |
5ee68e5b | 170 | newtbl->known_gates = oldtbl->known_gates; |
a3e6b12c | 171 | atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); |
18889231 | 172 | |
a3e6b12c I |
173 | oldhash = oldtbl->hash_buckets; |
174 | for (i = 0; i <= oldtbl->hash_mask; i++) | |
18889231 | 175 | hlist_for_each(p, &oldhash[i]) |
a3e6b12c | 176 | if (oldtbl->copy_node(p, newtbl) < 0) |
18889231 JC |
177 | goto errcopy; |
178 | ||
a3e6b12c | 179 | return 0; |
18889231 JC |
180 | |
181 | errcopy: | |
182 | for (i = 0; i <= newtbl->hash_mask; i++) { | |
183 | hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) | |
a3e6b12c | 184 | oldtbl->free_node(p, 0); |
18889231 | 185 | } |
a3e6b12c | 186 | return -ENOMEM; |
18889231 JC |
187 | } |
188 | ||
6b86bd62 JB |
189 | static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, |
190 | struct mesh_table *tbl) | |
191 | { | |
192 | /* Use last four bytes of hw addr and interface index as hash index */ | |
193 | return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) | |
194 | & tbl->hash_mask; | |
195 | } | |
f5ea9120 | 196 | |
eb2b9311 LCC |
197 | |
198 | /** | |
199 | * | |
200 | * mesh_path_assign_nexthop - update mesh path next hop | |
201 | * | |
202 | * @mpath: mesh path to update | |
203 | * @sta: next hop to assign | |
204 | * | |
205 | * Locking: mpath->state_lock must be held when calling this function | |
206 | */ | |
207 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | |
208 | { | |
10c836d7 JC |
209 | struct sk_buff *skb; |
210 | struct ieee80211_hdr *hdr; | |
211 | struct sk_buff_head tmpq; | |
212 | unsigned long flags; | |
213 | ||
d0709a65 | 214 | rcu_assign_pointer(mpath->next_hop, sta); |
10c836d7 JC |
215 | |
216 | __skb_queue_head_init(&tmpq); | |
217 | ||
218 | spin_lock_irqsave(&mpath->frame_queue.lock, flags); | |
219 | ||
220 | while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { | |
221 | hdr = (struct ieee80211_hdr *) skb->data; | |
222 | memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); | |
223 | __skb_queue_tail(&tmpq, skb); | |
224 | } | |
225 | ||
226 | skb_queue_splice(&tmpq, &mpath->frame_queue); | |
227 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); | |
eb2b9311 LCC |
228 | } |
229 | ||
5ee68e5b JC |
230 | static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, |
231 | struct mesh_path *gate_mpath) | |
232 | { | |
233 | struct ieee80211_hdr *hdr; | |
234 | struct ieee80211s_hdr *mshdr; | |
235 | int mesh_hdrlen, hdrlen; | |
236 | char *next_hop; | |
237 | ||
238 | hdr = (struct ieee80211_hdr *) skb->data; | |
239 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | |
240 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | |
241 | ||
242 | if (!(mshdr->flags & MESH_FLAGS_AE)) { | |
243 | /* size of the fixed part of the mesh header */ | |
244 | mesh_hdrlen = 6; | |
245 | ||
246 | /* make room for the two extended addresses */ | |
247 | skb_push(skb, 2 * ETH_ALEN); | |
248 | memmove(skb->data, hdr, hdrlen + mesh_hdrlen); | |
249 | ||
250 | hdr = (struct ieee80211_hdr *) skb->data; | |
251 | ||
252 | /* we preserve the previous mesh header and only add | |
253 | * the new addreses */ | |
254 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | |
255 | mshdr->flags = MESH_FLAGS_AE_A5_A6; | |
256 | memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); | |
257 | memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); | |
258 | } | |
259 | ||
260 | /* update next hop */ | |
261 | hdr = (struct ieee80211_hdr *) skb->data; | |
262 | rcu_read_lock(); | |
263 | next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; | |
264 | memcpy(hdr->addr1, next_hop, ETH_ALEN); | |
265 | rcu_read_unlock(); | |
266 | memcpy(hdr->addr3, dst_addr, ETH_ALEN); | |
267 | } | |
268 | ||
269 | /** | |
270 | * | |
271 | * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another | |
272 | * | |
273 | * This function is used to transfer or copy frames from an unresolved mpath to | |
274 | * a gate mpath. The function also adds the Address Extension field and | |
275 | * updates the next hop. | |
276 | * | |
277 | * If a frame already has an Address Extension field, only the next hop and | |
278 | * destination addresses are updated. | |
279 | * | |
280 | * The gate mpath must be an active mpath with a valid mpath->next_hop. | |
281 | * | |
282 | * @mpath: An active mpath the frames will be sent to (i.e. the gate) | |
283 | * @from_mpath: The failed mpath | |
284 | * @copy: When true, copy all the frames to the new mpath queue. When false, | |
285 | * move them. | |
286 | */ | |
287 | static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, | |
288 | struct mesh_path *from_mpath, | |
289 | bool copy) | |
290 | { | |
c6133661 | 291 | struct sk_buff *skb, *cp_skb = NULL; |
5ee68e5b JC |
292 | struct sk_buff_head gateq, failq; |
293 | unsigned long flags; | |
294 | int num_skbs; | |
295 | ||
296 | BUG_ON(gate_mpath == from_mpath); | |
297 | BUG_ON(!gate_mpath->next_hop); | |
298 | ||
299 | __skb_queue_head_init(&gateq); | |
300 | __skb_queue_head_init(&failq); | |
301 | ||
302 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); | |
303 | skb_queue_splice_init(&from_mpath->frame_queue, &failq); | |
304 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); | |
305 | ||
306 | num_skbs = skb_queue_len(&failq); | |
307 | ||
308 | while (num_skbs--) { | |
309 | skb = __skb_dequeue(&failq); | |
817a53d9 | 310 | if (copy) { |
5ee68e5b | 311 | cp_skb = skb_copy(skb, GFP_ATOMIC); |
817a53d9 JL |
312 | if (cp_skb) |
313 | __skb_queue_tail(&failq, cp_skb); | |
314 | } | |
5ee68e5b JC |
315 | |
316 | prepare_for_gate(skb, gate_mpath->dst, gate_mpath); | |
317 | __skb_queue_tail(&gateq, skb); | |
5ee68e5b JC |
318 | } |
319 | ||
320 | spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags); | |
321 | skb_queue_splice(&gateq, &gate_mpath->frame_queue); | |
322 | mpath_dbg("Mpath queue for gate %pM has %d frames\n", | |
323 | gate_mpath->dst, | |
324 | skb_queue_len(&gate_mpath->frame_queue)); | |
325 | spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags); | |
326 | ||
327 | if (!copy) | |
328 | return; | |
329 | ||
330 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); | |
331 | skb_queue_splice(&failq, &from_mpath->frame_queue); | |
332 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); | |
333 | } | |
334 | ||
eb2b9311 LCC |
335 | |
336 | /** | |
337 | * mesh_path_lookup - look up a path in the mesh path table | |
338 | * @dst: hardware address (ETH_ALEN length) of destination | |
f698d856 | 339 | * @sdata: local subif |
eb2b9311 LCC |
340 | * |
341 | * Returns: pointer to the mesh path structure, or NULL if not found | |
342 | * | |
343 | * Locking: must be called within a read rcu section. | |
344 | */ | |
f698d856 | 345 | struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
eb2b9311 LCC |
346 | { |
347 | struct mesh_path *mpath; | |
348 | struct hlist_node *n; | |
349 | struct hlist_head *bucket; | |
350 | struct mesh_table *tbl; | |
351 | struct mpath_node *node; | |
352 | ||
353 | tbl = rcu_dereference(mesh_paths); | |
354 | ||
f698d856 | 355 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
eb2b9311 LCC |
356 | hlist_for_each_entry_rcu(node, n, bucket, list) { |
357 | mpath = node->mpath; | |
f698d856 | 358 | if (mpath->sdata == sdata && |
eb2b9311 LCC |
359 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { |
360 | if (MPATH_EXPIRED(mpath)) { | |
361 | spin_lock_bh(&mpath->state_lock); | |
362 | if (MPATH_EXPIRED(mpath)) | |
363 | mpath->flags &= ~MESH_PATH_ACTIVE; | |
364 | spin_unlock_bh(&mpath->state_lock); | |
365 | } | |
366 | return mpath; | |
367 | } | |
368 | } | |
369 | return NULL; | |
370 | } | |
371 | ||
79617dee Y |
372 | struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
373 | { | |
374 | struct mesh_path *mpath; | |
375 | struct hlist_node *n; | |
376 | struct hlist_head *bucket; | |
377 | struct mesh_table *tbl; | |
378 | struct mpath_node *node; | |
379 | ||
380 | tbl = rcu_dereference(mpp_paths); | |
381 | ||
382 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | |
383 | hlist_for_each_entry_rcu(node, n, bucket, list) { | |
384 | mpath = node->mpath; | |
385 | if (mpath->sdata == sdata && | |
386 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | |
387 | if (MPATH_EXPIRED(mpath)) { | |
388 | spin_lock_bh(&mpath->state_lock); | |
389 | if (MPATH_EXPIRED(mpath)) | |
390 | mpath->flags &= ~MESH_PATH_ACTIVE; | |
391 | spin_unlock_bh(&mpath->state_lock); | |
392 | } | |
393 | return mpath; | |
394 | } | |
395 | } | |
396 | return NULL; | |
397 | } | |
398 | ||
399 | ||
eb2b9311 LCC |
400 | /** |
401 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index | |
402 | * @idx: index | |
f698d856 | 403 | * @sdata: local subif, or NULL for all entries |
eb2b9311 LCC |
404 | * |
405 | * Returns: pointer to the mesh path structure, or NULL if not found. | |
406 | * | |
407 | * Locking: must be called within a read rcu section. | |
408 | */ | |
f698d856 | 409 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) |
eb2b9311 | 410 | { |
349eb8cf | 411 | struct mesh_table *tbl = rcu_dereference(mesh_paths); |
eb2b9311 LCC |
412 | struct mpath_node *node; |
413 | struct hlist_node *p; | |
414 | int i; | |
415 | int j = 0; | |
416 | ||
349eb8cf | 417 | for_each_mesh_entry(tbl, p, node, i) { |
f698d856 | 418 | if (sdata && node->mpath->sdata != sdata) |
2a8ca29a | 419 | continue; |
eb2b9311 LCC |
420 | if (j++ == idx) { |
421 | if (MPATH_EXPIRED(node->mpath)) { | |
422 | spin_lock_bh(&node->mpath->state_lock); | |
423 | if (MPATH_EXPIRED(node->mpath)) | |
424 | node->mpath->flags &= ~MESH_PATH_ACTIVE; | |
425 | spin_unlock_bh(&node->mpath->state_lock); | |
426 | } | |
427 | return node->mpath; | |
428 | } | |
2a8ca29a | 429 | } |
eb2b9311 LCC |
430 | |
431 | return NULL; | |
432 | } | |
433 | ||
5ee68e5b JC |
434 | static void mesh_gate_node_reclaim(struct rcu_head *rp) |
435 | { | |
436 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | |
437 | kfree(node); | |
438 | } | |
439 | ||
440 | /** | |
441 | * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates | |
442 | * @mesh_tbl: table which contains known_gates list | |
443 | * @mpath: mpath to known mesh gate | |
444 | * | |
445 | * Returns: 0 on success | |
446 | * | |
447 | */ | |
448 | static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath) | |
449 | { | |
450 | struct mpath_node *gate, *new_gate; | |
451 | struct hlist_node *n; | |
452 | int err; | |
453 | ||
454 | rcu_read_lock(); | |
455 | tbl = rcu_dereference(tbl); | |
456 | ||
457 | hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) | |
458 | if (gate->mpath == mpath) { | |
459 | err = -EEXIST; | |
460 | goto err_rcu; | |
461 | } | |
462 | ||
463 | new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC); | |
464 | if (!new_gate) { | |
465 | err = -ENOMEM; | |
466 | goto err_rcu; | |
467 | } | |
468 | ||
469 | mpath->is_gate = true; | |
470 | mpath->sdata->u.mesh.num_gates++; | |
471 | new_gate->mpath = mpath; | |
472 | spin_lock_bh(&tbl->gates_lock); | |
473 | hlist_add_head_rcu(&new_gate->list, tbl->known_gates); | |
474 | spin_unlock_bh(&tbl->gates_lock); | |
475 | rcu_read_unlock(); | |
476 | mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n", | |
477 | mpath->sdata->name, mpath->dst, | |
478 | mpath->sdata->u.mesh.num_gates); | |
479 | return 0; | |
480 | err_rcu: | |
481 | rcu_read_unlock(); | |
482 | return err; | |
483 | } | |
484 | ||
485 | /** | |
486 | * mesh_gate_del - remove a mesh gate from the list of known gates | |
487 | * @tbl: table which holds our list of known gates | |
488 | * @mpath: gate mpath | |
489 | * | |
490 | * Returns: 0 on success | |
491 | * | |
492 | * Locking: must be called inside rcu_read_lock() section | |
493 | */ | |
494 | static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) | |
495 | { | |
496 | struct mpath_node *gate; | |
497 | struct hlist_node *p, *q; | |
498 | ||
499 | tbl = rcu_dereference(tbl); | |
500 | ||
501 | hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) | |
502 | if (gate->mpath == mpath) { | |
503 | spin_lock_bh(&tbl->gates_lock); | |
504 | hlist_del_rcu(&gate->list); | |
505 | call_rcu(&gate->rcu, mesh_gate_node_reclaim); | |
506 | spin_unlock_bh(&tbl->gates_lock); | |
507 | mpath->sdata->u.mesh.num_gates--; | |
508 | mpath->is_gate = false; | |
509 | mpath_dbg("Mesh path (%s): Deleted gate: %pM. " | |
510 | "%d known gates\n", mpath->sdata->name, | |
511 | mpath->dst, mpath->sdata->u.mesh.num_gates); | |
512 | break; | |
513 | } | |
514 | ||
515 | return 0; | |
516 | } | |
517 | ||
518 | /** | |
519 | * | |
520 | * mesh_path_add_gate - add the given mpath to a mesh gate to our path table | |
521 | * @mpath: gate path to add to table | |
522 | */ | |
523 | int mesh_path_add_gate(struct mesh_path *mpath) | |
524 | { | |
525 | return mesh_gate_add(mesh_paths, mpath); | |
526 | } | |
527 | ||
528 | /** | |
529 | * mesh_gate_num - number of gates known to this interface | |
530 | * @sdata: subif data | |
531 | */ | |
532 | int mesh_gate_num(struct ieee80211_sub_if_data *sdata) | |
533 | { | |
534 | return sdata->u.mesh.num_gates; | |
535 | } | |
536 | ||
eb2b9311 LCC |
537 | /** |
538 | * mesh_path_add - allocate and add a new path to the mesh path table | |
539 | * @addr: destination address of the path (ETH_ALEN length) | |
f698d856 | 540 | * @sdata: local subif |
eb2b9311 | 541 | * |
af901ca1 | 542 | * Returns: 0 on success |
eb2b9311 LCC |
543 | * |
544 | * State: the initial state of the new path is set to 0 | |
545 | */ | |
f698d856 | 546 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) |
eb2b9311 | 547 | { |
18889231 JC |
548 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
549 | struct ieee80211_local *local = sdata->local; | |
349eb8cf | 550 | struct mesh_table *tbl; |
eb2b9311 LCC |
551 | struct mesh_path *mpath, *new_mpath; |
552 | struct mpath_node *node, *new_node; | |
553 | struct hlist_head *bucket; | |
554 | struct hlist_node *n; | |
555 | int grow = 0; | |
556 | int err = 0; | |
557 | u32 hash_idx; | |
558 | ||
47846c9b | 559 | if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) |
eb2b9311 LCC |
560 | /* never add ourselves as neighbours */ |
561 | return -ENOTSUPP; | |
562 | ||
563 | if (is_multicast_ether_addr(dst)) | |
564 | return -ENOTSUPP; | |
565 | ||
472dbc45 | 566 | if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) |
eb2b9311 LCC |
567 | return -ENOSPC; |
568 | ||
402d7752 | 569 | err = -ENOMEM; |
18889231 | 570 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); |
402d7752 PE |
571 | if (!new_mpath) |
572 | goto err_path_alloc; | |
573 | ||
18889231 | 574 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
402d7752 PE |
575 | if (!new_node) |
576 | goto err_node_alloc; | |
f84e71a9 | 577 | |
9b84b808 | 578 | read_lock_bh(&pathtbl_resize_lock); |
eb2b9311 | 579 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
f698d856 | 580 | new_mpath->sdata = sdata; |
eb2b9311 LCC |
581 | new_mpath->flags = 0; |
582 | skb_queue_head_init(&new_mpath->frame_queue); | |
eb2b9311 LCC |
583 | new_node->mpath = new_mpath; |
584 | new_mpath->timer.data = (unsigned long) new_mpath; | |
585 | new_mpath->timer.function = mesh_path_timer; | |
586 | new_mpath->exp_time = jiffies; | |
587 | spin_lock_init(&new_mpath->state_lock); | |
588 | init_timer(&new_mpath->timer); | |
589 | ||
349eb8cf | 590 | tbl = resize_dereference_mesh_paths(); |
eb2b9311 | 591 | |
349eb8cf JB |
592 | hash_idx = mesh_table_hash(dst, sdata, tbl); |
593 | bucket = &tbl->hash_buckets[hash_idx]; | |
eb2b9311 | 594 | |
349eb8cf | 595 | spin_lock_bh(&tbl->hashwlock[hash_idx]); |
eb2b9311 | 596 | |
402d7752 | 597 | err = -EEXIST; |
eb2b9311 LCC |
598 | hlist_for_each_entry(node, n, bucket, list) { |
599 | mpath = node->mpath; | |
f698d856 | 600 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) |
402d7752 | 601 | goto err_exists; |
eb2b9311 LCC |
602 | } |
603 | ||
604 | hlist_add_head_rcu(&new_node->list, bucket); | |
349eb8cf JB |
605 | if (atomic_inc_return(&tbl->entries) >= |
606 | tbl->mean_chain_len * (tbl->hash_mask + 1)) | |
eb2b9311 LCC |
607 | grow = 1; |
608 | ||
f5ea9120 JB |
609 | mesh_paths_generation++; |
610 | ||
349eb8cf | 611 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
9b84b808 | 612 | read_unlock_bh(&pathtbl_resize_lock); |
402d7752 | 613 | if (grow) { |
18889231 | 614 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); |
64592c8f | 615 | ieee80211_queue_work(&local->hw, &sdata->work); |
eb2b9311 | 616 | } |
402d7752 PE |
617 | return 0; |
618 | ||
619 | err_exists: | |
349eb8cf | 620 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
9b84b808 | 621 | read_unlock_bh(&pathtbl_resize_lock); |
402d7752 PE |
622 | kfree(new_node); |
623 | err_node_alloc: | |
624 | kfree(new_mpath); | |
625 | err_path_alloc: | |
472dbc45 | 626 | atomic_dec(&sdata->u.mesh.mpaths); |
eb2b9311 LCC |
627 | return err; |
628 | } | |
629 | ||
1928ecab JB |
630 | static void mesh_table_free_rcu(struct rcu_head *rcu) |
631 | { | |
632 | struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head); | |
633 | ||
634 | mesh_table_free(tbl, false); | |
635 | } | |
636 | ||
18889231 JC |
637 | void mesh_mpath_table_grow(void) |
638 | { | |
639 | struct mesh_table *oldtbl, *newtbl; | |
640 | ||
9b84b808 | 641 | write_lock_bh(&pathtbl_resize_lock); |
349eb8cf JB |
642 | oldtbl = resize_dereference_mesh_paths(); |
643 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); | |
1928ecab JB |
644 | if (!newtbl) |
645 | goto out; | |
349eb8cf | 646 | if (mesh_table_grow(oldtbl, newtbl) < 0) { |
a3e6b12c | 647 | __mesh_table_free(newtbl); |
1928ecab | 648 | goto out; |
18889231 JC |
649 | } |
650 | rcu_assign_pointer(mesh_paths, newtbl); | |
18889231 | 651 | |
1928ecab JB |
652 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); |
653 | ||
654 | out: | |
655 | write_unlock_bh(&pathtbl_resize_lock); | |
18889231 JC |
656 | } |
657 | ||
658 | void mesh_mpp_table_grow(void) | |
659 | { | |
660 | struct mesh_table *oldtbl, *newtbl; | |
661 | ||
9b84b808 | 662 | write_lock_bh(&pathtbl_resize_lock); |
349eb8cf JB |
663 | oldtbl = resize_dereference_mpp_paths(); |
664 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); | |
1928ecab JB |
665 | if (!newtbl) |
666 | goto out; | |
349eb8cf | 667 | if (mesh_table_grow(oldtbl, newtbl) < 0) { |
a3e6b12c | 668 | __mesh_table_free(newtbl); |
1928ecab | 669 | goto out; |
18889231 JC |
670 | } |
671 | rcu_assign_pointer(mpp_paths, newtbl); | |
1928ecab | 672 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); |
18889231 | 673 | |
1928ecab JB |
674 | out: |
675 | write_unlock_bh(&pathtbl_resize_lock); | |
18889231 | 676 | } |
eb2b9311 | 677 | |
79617dee Y |
678 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) |
679 | { | |
18889231 JC |
680 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
681 | struct ieee80211_local *local = sdata->local; | |
349eb8cf | 682 | struct mesh_table *tbl; |
79617dee Y |
683 | struct mesh_path *mpath, *new_mpath; |
684 | struct mpath_node *node, *new_node; | |
685 | struct hlist_head *bucket; | |
686 | struct hlist_node *n; | |
687 | int grow = 0; | |
688 | int err = 0; | |
689 | u32 hash_idx; | |
690 | ||
47846c9b | 691 | if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) |
79617dee Y |
692 | /* never add ourselves as neighbours */ |
693 | return -ENOTSUPP; | |
694 | ||
695 | if (is_multicast_ether_addr(dst)) | |
696 | return -ENOTSUPP; | |
697 | ||
698 | err = -ENOMEM; | |
18889231 | 699 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); |
79617dee Y |
700 | if (!new_mpath) |
701 | goto err_path_alloc; | |
702 | ||
18889231 | 703 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
79617dee Y |
704 | if (!new_node) |
705 | goto err_node_alloc; | |
706 | ||
9b84b808 | 707 | read_lock_bh(&pathtbl_resize_lock); |
79617dee Y |
708 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
709 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | |
710 | new_mpath->sdata = sdata; | |
711 | new_mpath->flags = 0; | |
712 | skb_queue_head_init(&new_mpath->frame_queue); | |
713 | new_node->mpath = new_mpath; | |
c6133661 | 714 | init_timer(&new_mpath->timer); |
79617dee Y |
715 | new_mpath->exp_time = jiffies; |
716 | spin_lock_init(&new_mpath->state_lock); | |
717 | ||
349eb8cf | 718 | tbl = resize_dereference_mpp_paths(); |
79617dee | 719 | |
349eb8cf JB |
720 | hash_idx = mesh_table_hash(dst, sdata, tbl); |
721 | bucket = &tbl->hash_buckets[hash_idx]; | |
722 | ||
723 | spin_lock_bh(&tbl->hashwlock[hash_idx]); | |
79617dee Y |
724 | |
725 | err = -EEXIST; | |
726 | hlist_for_each_entry(node, n, bucket, list) { | |
727 | mpath = node->mpath; | |
728 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) | |
729 | goto err_exists; | |
730 | } | |
731 | ||
732 | hlist_add_head_rcu(&new_node->list, bucket); | |
349eb8cf JB |
733 | if (atomic_inc_return(&tbl->entries) >= |
734 | tbl->mean_chain_len * (tbl->hash_mask + 1)) | |
79617dee Y |
735 | grow = 1; |
736 | ||
349eb8cf | 737 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
9b84b808 | 738 | read_unlock_bh(&pathtbl_resize_lock); |
79617dee | 739 | if (grow) { |
18889231 | 740 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); |
64592c8f | 741 | ieee80211_queue_work(&local->hw, &sdata->work); |
79617dee Y |
742 | } |
743 | return 0; | |
744 | ||
745 | err_exists: | |
349eb8cf | 746 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
9b84b808 | 747 | read_unlock_bh(&pathtbl_resize_lock); |
79617dee Y |
748 | kfree(new_node); |
749 | err_node_alloc: | |
750 | kfree(new_mpath); | |
751 | err_path_alloc: | |
752 | return err; | |
753 | } | |
754 | ||
755 | ||
eb2b9311 LCC |
756 | /** |
757 | * mesh_plink_broken - deactivates paths and sends perr when a link breaks | |
758 | * | |
759 | * @sta: broken peer link | |
760 | * | |
761 | * This function must be called from the rate control algorithm if enough | |
762 | * delivery errors suggest that a peer link is no longer usable. | |
763 | */ | |
764 | void mesh_plink_broken(struct sta_info *sta) | |
765 | { | |
349eb8cf | 766 | struct mesh_table *tbl; |
15ff6365 | 767 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
eb2b9311 LCC |
768 | struct mesh_path *mpath; |
769 | struct mpath_node *node; | |
770 | struct hlist_node *p; | |
f698d856 | 771 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
eb2b9311 | 772 | int i; |
25d49e4d | 773 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); |
eb2b9311 LCC |
774 | |
775 | rcu_read_lock(); | |
349eb8cf JB |
776 | tbl = rcu_dereference(mesh_paths); |
777 | for_each_mesh_entry(tbl, p, node, i) { | |
eb2b9311 LCC |
778 | mpath = node->mpath; |
779 | spin_lock_bh(&mpath->state_lock); | |
349eb8cf | 780 | if (rcu_dereference(mpath->next_hop) == sta && |
eb2b9311 LCC |
781 | mpath->flags & MESH_PATH_ACTIVE && |
782 | !(mpath->flags & MESH_PATH_FIXED)) { | |
783 | mpath->flags &= ~MESH_PATH_ACTIVE; | |
d19b3bf6 | 784 | ++mpath->sn; |
eb2b9311 | 785 | spin_unlock_bh(&mpath->state_lock); |
45904f21 JC |
786 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, |
787 | mpath->dst, cpu_to_le32(mpath->sn), | |
25d49e4d | 788 | reason, bcast, sdata); |
eb2b9311 LCC |
789 | } else |
790 | spin_unlock_bh(&mpath->state_lock); | |
791 | } | |
792 | rcu_read_unlock(); | |
793 | } | |
794 | ||
795 | /** | |
796 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches | |
797 | * | |
798 | * @sta - mesh peer to match | |
799 | * | |
b4e08ea1 LCC |
800 | * RCU notes: this function is called when a mesh plink transitions from |
801 | * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that | |
802 | * allows path creation. This will happen before the sta can be freed (because | |
d0709a65 JB |
803 | * sta_info_destroy() calls this) so any reader in a rcu read block will be |
804 | * protected against the plink disappearing. | |
eb2b9311 LCC |
805 | */ |
806 | void mesh_path_flush_by_nexthop(struct sta_info *sta) | |
807 | { | |
349eb8cf | 808 | struct mesh_table *tbl; |
eb2b9311 LCC |
809 | struct mesh_path *mpath; |
810 | struct mpath_node *node; | |
811 | struct hlist_node *p; | |
812 | int i; | |
813 | ||
349eb8cf JB |
814 | rcu_read_lock(); |
815 | tbl = rcu_dereference(mesh_paths); | |
816 | for_each_mesh_entry(tbl, p, node, i) { | |
eb2b9311 | 817 | mpath = node->mpath; |
349eb8cf | 818 | if (rcu_dereference(mpath->next_hop) == sta) |
f698d856 | 819 | mesh_path_del(mpath->dst, mpath->sdata); |
eb2b9311 | 820 | } |
349eb8cf | 821 | rcu_read_unlock(); |
eb2b9311 LCC |
822 | } |
823 | ||
f698d856 | 824 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata) |
eb2b9311 | 825 | { |
349eb8cf | 826 | struct mesh_table *tbl; |
eb2b9311 LCC |
827 | struct mesh_path *mpath; |
828 | struct mpath_node *node; | |
829 | struct hlist_node *p; | |
830 | int i; | |
831 | ||
349eb8cf JB |
832 | rcu_read_lock(); |
833 | tbl = rcu_dereference(mesh_paths); | |
834 | for_each_mesh_entry(tbl, p, node, i) { | |
eb2b9311 | 835 | mpath = node->mpath; |
f698d856 JBG |
836 | if (mpath->sdata == sdata) |
837 | mesh_path_del(mpath->dst, mpath->sdata); | |
eb2b9311 | 838 | } |
349eb8cf | 839 | rcu_read_unlock(); |
eb2b9311 LCC |
840 | } |
841 | ||
842 | static void mesh_path_node_reclaim(struct rcu_head *rp) | |
843 | { | |
844 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | |
f698d856 | 845 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; |
d0709a65 | 846 | |
c6133661 | 847 | del_timer_sync(&node->mpath->timer); |
472dbc45 | 848 | atomic_dec(&sdata->u.mesh.mpaths); |
eb2b9311 LCC |
849 | kfree(node->mpath); |
850 | kfree(node); | |
851 | } | |
852 | ||
853 | /** | |
854 | * mesh_path_del - delete a mesh path from the table | |
855 | * | |
856 | * @addr: dst address (ETH_ALEN length) | |
f698d856 | 857 | * @sdata: local subif |
eb2b9311 | 858 | * |
af901ca1 | 859 | * Returns: 0 if successful |
eb2b9311 | 860 | */ |
f698d856 | 861 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) |
eb2b9311 | 862 | { |
349eb8cf | 863 | struct mesh_table *tbl; |
eb2b9311 LCC |
864 | struct mesh_path *mpath; |
865 | struct mpath_node *node; | |
866 | struct hlist_head *bucket; | |
867 | struct hlist_node *n; | |
868 | int hash_idx; | |
869 | int err = 0; | |
870 | ||
9b84b808 | 871 | read_lock_bh(&pathtbl_resize_lock); |
349eb8cf JB |
872 | tbl = resize_dereference_mesh_paths(); |
873 | hash_idx = mesh_table_hash(addr, sdata, tbl); | |
874 | bucket = &tbl->hash_buckets[hash_idx]; | |
eb2b9311 | 875 | |
349eb8cf | 876 | spin_lock_bh(&tbl->hashwlock[hash_idx]); |
eb2b9311 LCC |
877 | hlist_for_each_entry(node, n, bucket, list) { |
878 | mpath = node->mpath; | |
f698d856 | 879 | if (mpath->sdata == sdata && |
349eb8cf | 880 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { |
a6965c44 | 881 | spin_lock_bh(&mpath->state_lock); |
5ee68e5b JC |
882 | if (mpath->is_gate) |
883 | mesh_gate_del(tbl, mpath); | |
cfa22c71 LCC |
884 | mpath->flags |= MESH_PATH_RESOLVING; |
885 | hlist_del_rcu(&node->list); | |
886 | call_rcu(&node->rcu, mesh_path_node_reclaim); | |
349eb8cf | 887 | atomic_dec(&tbl->entries); |
a6965c44 | 888 | spin_unlock_bh(&mpath->state_lock); |
eb2b9311 LCC |
889 | goto enddel; |
890 | } | |
891 | } | |
892 | ||
893 | err = -ENXIO; | |
894 | enddel: | |
f5ea9120 | 895 | mesh_paths_generation++; |
349eb8cf | 896 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
9b84b808 | 897 | read_unlock_bh(&pathtbl_resize_lock); |
eb2b9311 LCC |
898 | return err; |
899 | } | |
900 | ||
901 | /** | |
902 | * mesh_path_tx_pending - sends pending frames in a mesh path queue | |
903 | * | |
904 | * @mpath: mesh path to activate | |
905 | * | |
906 | * Locking: the state_lock of the mpath structure must NOT be held when calling | |
907 | * this function. | |
908 | */ | |
909 | void mesh_path_tx_pending(struct mesh_path *mpath) | |
910 | { | |
249b405c JC |
911 | if (mpath->flags & MESH_PATH_ACTIVE) |
912 | ieee80211_add_pending_skbs(mpath->sdata->local, | |
913 | &mpath->frame_queue); | |
eb2b9311 LCC |
914 | } |
915 | ||
5ee68e5b JC |
916 | /** |
917 | * mesh_path_send_to_gates - sends pending frames to all known mesh gates | |
918 | * | |
919 | * @mpath: mesh path whose queue will be emptied | |
920 | * | |
921 | * If there is only one gate, the frames are transferred from the failed mpath | |
922 | * queue to that gate's queue. If there are more than one gates, the frames | |
923 | * are copied from each gate to the next. After frames are copied, the | |
924 | * mpath queues are emptied onto the transmission queue. | |
925 | */ | |
926 | int mesh_path_send_to_gates(struct mesh_path *mpath) | |
927 | { | |
928 | struct ieee80211_sub_if_data *sdata = mpath->sdata; | |
929 | struct hlist_node *n; | |
930 | struct mesh_table *tbl; | |
931 | struct mesh_path *from_mpath = mpath; | |
932 | struct mpath_node *gate = NULL; | |
933 | bool copy = false; | |
934 | struct hlist_head *known_gates; | |
935 | ||
936 | rcu_read_lock(); | |
937 | tbl = rcu_dereference(mesh_paths); | |
938 | known_gates = tbl->known_gates; | |
939 | rcu_read_unlock(); | |
940 | ||
941 | if (!known_gates) | |
942 | return -EHOSTUNREACH; | |
943 | ||
944 | hlist_for_each_entry_rcu(gate, n, known_gates, list) { | |
945 | if (gate->mpath->sdata != sdata) | |
946 | continue; | |
947 | ||
948 | if (gate->mpath->flags & MESH_PATH_ACTIVE) { | |
949 | mpath_dbg("Forwarding to %pM\n", gate->mpath->dst); | |
950 | mesh_path_move_to_queue(gate->mpath, from_mpath, copy); | |
951 | from_mpath = gate->mpath; | |
952 | copy = true; | |
953 | } else { | |
954 | mpath_dbg("Not forwarding %p\n", gate->mpath); | |
955 | mpath_dbg("flags %x\n", gate->mpath->flags); | |
956 | } | |
957 | } | |
958 | ||
959 | hlist_for_each_entry_rcu(gate, n, known_gates, list) | |
960 | if (gate->mpath->sdata == sdata) { | |
961 | mpath_dbg("Sending to %pM\n", gate->mpath->dst); | |
962 | mesh_path_tx_pending(gate->mpath); | |
963 | } | |
964 | ||
965 | return (from_mpath == mpath) ? -EHOSTUNREACH : 0; | |
966 | } | |
967 | ||
eb2b9311 LCC |
968 | /** |
969 | * mesh_path_discard_frame - discard a frame whose path could not be resolved | |
970 | * | |
971 | * @skb: frame to discard | |
f698d856 | 972 | * @sdata: network subif the frame was to be sent through |
eb2b9311 | 973 | * |
35946a57 JC |
974 | * If the frame was being forwarded from another MP, a PERR frame will be sent |
975 | * to the precursor. The precursor's address (i.e. the previous hop) was saved | |
976 | * in addr1 of the frame-to-be-forwarded, and would only be overwritten once | |
977 | * the destination is successfully resolved. | |
eb2b9311 LCC |
978 | * |
979 | * Locking: the function must me called within a rcu_read_lock region | |
980 | */ | |
f698d856 JBG |
981 | void mesh_path_discard_frame(struct sk_buff *skb, |
982 | struct ieee80211_sub_if_data *sdata) | |
eb2b9311 | 983 | { |
e32f85f7 | 984 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
eb2b9311 | 985 | struct mesh_path *mpath; |
d19b3bf6 | 986 | u32 sn = 0; |
25d49e4d | 987 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD); |
eb2b9311 | 988 | |
47846c9b | 989 | if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { |
eb2b9311 LCC |
990 | u8 *ra, *da; |
991 | ||
e32f85f7 | 992 | da = hdr->addr3; |
35946a57 | 993 | ra = hdr->addr1; |
af089c15 | 994 | rcu_read_lock(); |
f698d856 | 995 | mpath = mesh_path_lookup(da, sdata); |
af089c15 JC |
996 | if (mpath) { |
997 | spin_lock_bh(&mpath->state_lock); | |
d19b3bf6 | 998 | sn = ++mpath->sn; |
af089c15 JC |
999 | spin_unlock_bh(&mpath->state_lock); |
1000 | } | |
1001 | rcu_read_unlock(); | |
45904f21 | 1002 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, |
25d49e4d | 1003 | cpu_to_le32(sn), reason, ra, sdata); |
eb2b9311 LCC |
1004 | } |
1005 | ||
1006 | kfree_skb(skb); | |
472dbc45 | 1007 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
eb2b9311 LCC |
1008 | } |
1009 | ||
1010 | /** | |
1011 | * mesh_path_flush_pending - free the pending queue of a mesh path | |
1012 | * | |
1013 | * @mpath: mesh path whose queue has to be freed | |
1014 | * | |
25985edc | 1015 | * Locking: the function must me called within a rcu_read_lock region |
eb2b9311 LCC |
1016 | */ |
1017 | void mesh_path_flush_pending(struct mesh_path *mpath) | |
1018 | { | |
eb2b9311 LCC |
1019 | struct sk_buff *skb; |
1020 | ||
00e3f25c | 1021 | while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) |
f698d856 | 1022 | mesh_path_discard_frame(skb, mpath->sdata); |
eb2b9311 LCC |
1023 | } |
1024 | ||
1025 | /** | |
1026 | * mesh_path_fix_nexthop - force a specific next hop for a mesh path | |
1027 | * | |
1028 | * @mpath: the mesh path to modify | |
1029 | * @next_hop: the next hop to force | |
1030 | * | |
1031 | * Locking: this function must be called holding mpath->state_lock | |
1032 | */ | |
1033 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) | |
1034 | { | |
1035 | spin_lock_bh(&mpath->state_lock); | |
1036 | mesh_path_assign_nexthop(mpath, next_hop); | |
d19b3bf6 | 1037 | mpath->sn = 0xffff; |
eb2b9311 LCC |
1038 | mpath->metric = 0; |
1039 | mpath->hop_count = 0; | |
1040 | mpath->exp_time = 0; | |
1041 | mpath->flags |= MESH_PATH_FIXED; | |
1042 | mesh_path_activate(mpath); | |
1043 | spin_unlock_bh(&mpath->state_lock); | |
1044 | mesh_path_tx_pending(mpath); | |
1045 | } | |
1046 | ||
1047 | static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) | |
1048 | { | |
1049 | struct mesh_path *mpath; | |
1050 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); | |
1051 | mpath = node->mpath; | |
1052 | hlist_del_rcu(p); | |
d0df9eec | 1053 | if (free_leafs) { |
c6133661 | 1054 | del_timer_sync(&mpath->timer); |
eb2b9311 | 1055 | kfree(mpath); |
d0df9eec | 1056 | } |
eb2b9311 LCC |
1057 | kfree(node); |
1058 | } | |
1059 | ||
4caf86c6 | 1060 | static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) |
eb2b9311 LCC |
1061 | { |
1062 | struct mesh_path *mpath; | |
1063 | struct mpath_node *node, *new_node; | |
1064 | u32 hash_idx; | |
1065 | ||
8566dc3f | 1066 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
00242c40 PE |
1067 | if (new_node == NULL) |
1068 | return -ENOMEM; | |
1069 | ||
eb2b9311 LCC |
1070 | node = hlist_entry(p, struct mpath_node, list); |
1071 | mpath = node->mpath; | |
eb2b9311 | 1072 | new_node->mpath = mpath; |
f698d856 | 1073 | hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); |
eb2b9311 LCC |
1074 | hlist_add_head(&new_node->list, |
1075 | &newtbl->hash_buckets[hash_idx]); | |
4caf86c6 | 1076 | return 0; |
eb2b9311 LCC |
1077 | } |
1078 | ||
1079 | int mesh_pathtbl_init(void) | |
1080 | { | |
349eb8cf JB |
1081 | struct mesh_table *tbl_path, *tbl_mpp; |
1082 | ||
1083 | tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | |
1084 | if (!tbl_path) | |
79617dee | 1085 | return -ENOMEM; |
349eb8cf JB |
1086 | tbl_path->free_node = &mesh_path_node_free; |
1087 | tbl_path->copy_node = &mesh_path_node_copy; | |
1088 | tbl_path->mean_chain_len = MEAN_CHAIN_LEN; | |
5ee68e5b JC |
1089 | tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); |
1090 | INIT_HLIST_HEAD(tbl_path->known_gates); | |
1091 | ||
79617dee | 1092 | |
349eb8cf JB |
1093 | tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
1094 | if (!tbl_mpp) { | |
1095 | mesh_table_free(tbl_path, true); | |
eb2b9311 | 1096 | return -ENOMEM; |
79617dee | 1097 | } |
349eb8cf JB |
1098 | tbl_mpp->free_node = &mesh_path_node_free; |
1099 | tbl_mpp->copy_node = &mesh_path_node_copy; | |
1100 | tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; | |
5ee68e5b JC |
1101 | tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); |
1102 | INIT_HLIST_HEAD(tbl_mpp->known_gates); | |
349eb8cf JB |
1103 | |
1104 | /* Need no locking since this is during init */ | |
1105 | RCU_INIT_POINTER(mesh_paths, tbl_path); | |
1106 | RCU_INIT_POINTER(mpp_paths, tbl_mpp); | |
79617dee | 1107 | |
eb2b9311 LCC |
1108 | return 0; |
1109 | } | |
1110 | ||
f698d856 | 1111 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
eb2b9311 | 1112 | { |
349eb8cf | 1113 | struct mesh_table *tbl; |
eb2b9311 LCC |
1114 | struct mesh_path *mpath; |
1115 | struct mpath_node *node; | |
1116 | struct hlist_node *p; | |
1117 | int i; | |
1118 | ||
349eb8cf JB |
1119 | rcu_read_lock(); |
1120 | tbl = rcu_dereference(mesh_paths); | |
1121 | for_each_mesh_entry(tbl, p, node, i) { | |
f698d856 | 1122 | if (node->mpath->sdata != sdata) |
eb2b9311 LCC |
1123 | continue; |
1124 | mpath = node->mpath; | |
1125 | spin_lock_bh(&mpath->state_lock); | |
1126 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && | |
1127 | (!(mpath->flags & MESH_PATH_FIXED)) && | |
349eb8cf | 1128 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) { |
eb2b9311 | 1129 | spin_unlock_bh(&mpath->state_lock); |
f698d856 | 1130 | mesh_path_del(mpath->dst, mpath->sdata); |
eb2b9311 LCC |
1131 | } else |
1132 | spin_unlock_bh(&mpath->state_lock); | |
1133 | } | |
349eb8cf | 1134 | rcu_read_unlock(); |
eb2b9311 LCC |
1135 | } |
1136 | ||
1137 | void mesh_pathtbl_unregister(void) | |
1138 | { | |
349eb8cf JB |
1139 | /* no need for locking during exit path */ |
1140 | mesh_table_free(rcu_dereference_raw(mesh_paths), true); | |
1141 | mesh_table_free(rcu_dereference_raw(mpp_paths), true); | |
eb2b9311 | 1142 | } |