Commit | Line | Data |
---|---|---|
23721387 SW |
1 | /* |
2 | * Copyright (C) 2011 B.A.T.M.A.N. contributors: | |
3 | * | |
4 | * Simon Wunderlich | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of version 2 of the GNU General Public | |
8 | * License as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
18 | * 02110-1301, USA | |
19 | * | |
20 | */ | |
21 | ||
22 | #include "main.h" | |
23 | #include "hash.h" | |
24 | #include "hard-interface.h" | |
25 | #include "originator.h" | |
26 | #include "bridge_loop_avoidance.h" | |
20ff9d59 | 27 | #include "translation-table.h" |
23721387 SW |
28 | #include "send.h" |
29 | ||
30 | #include <linux/etherdevice.h> | |
31 | #include <linux/crc16.h> | |
32 | #include <linux/if_arp.h> | |
33 | #include <net/arp.h> | |
34 | #include <linux/if_vlan.h> | |
35 | ||
23721387 SW |
36 | static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; |
37 | ||
38 | static void bla_periodic_work(struct work_struct *work); | |
39 | static void bla_send_announce(struct bat_priv *bat_priv, | |
40 | struct backbone_gw *backbone_gw); | |
41 | ||
42 | /* return the index of the claim */ | |
43 | static inline uint32_t choose_claim(const void *data, uint32_t size) | |
44 | { | |
45 | const unsigned char *key = data; | |
46 | uint32_t hash = 0; | |
47 | size_t i; | |
48 | ||
49 | for (i = 0; i < ETH_ALEN + sizeof(short); i++) { | |
50 | hash += key[i]; | |
51 | hash += (hash << 10); | |
52 | hash ^= (hash >> 6); | |
53 | } | |
54 | ||
55 | hash += (hash << 3); | |
56 | hash ^= (hash >> 11); | |
57 | hash += (hash << 15); | |
58 | ||
59 | return hash % size; | |
60 | } | |
61 | ||
62 | /* return the index of the backbone gateway */ | |
63 | static inline uint32_t choose_backbone_gw(const void *data, uint32_t size) | |
64 | { | |
65 | const unsigned char *key = data; | |
66 | uint32_t hash = 0; | |
67 | size_t i; | |
68 | ||
69 | for (i = 0; i < ETH_ALEN + sizeof(short); i++) { | |
70 | hash += key[i]; | |
71 | hash += (hash << 10); | |
72 | hash ^= (hash >> 6); | |
73 | } | |
74 | ||
75 | hash += (hash << 3); | |
76 | hash ^= (hash >> 11); | |
77 | hash += (hash << 15); | |
78 | ||
79 | return hash % size; | |
80 | } | |
81 | ||
82 | ||
83 | /* compares address and vid of two backbone gws */ | |
84 | static int compare_backbone_gw(const struct hlist_node *node, const void *data2) | |
85 | { | |
86 | const void *data1 = container_of(node, struct backbone_gw, | |
87 | hash_entry); | |
88 | ||
89 | return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); | |
90 | } | |
91 | ||
92 | /* compares address and vid of two claims */ | |
93 | static int compare_claim(const struct hlist_node *node, const void *data2) | |
94 | { | |
95 | const void *data1 = container_of(node, struct claim, | |
96 | hash_entry); | |
97 | ||
98 | return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); | |
99 | } | |
100 | ||
101 | /* free a backbone gw */ | |
102 | static void backbone_gw_free_ref(struct backbone_gw *backbone_gw) | |
103 | { | |
104 | if (atomic_dec_and_test(&backbone_gw->refcount)) | |
105 | kfree_rcu(backbone_gw, rcu); | |
106 | } | |
107 | ||
108 | /* finally deinitialize the claim */ | |
109 | static void claim_free_rcu(struct rcu_head *rcu) | |
110 | { | |
111 | struct claim *claim; | |
112 | ||
113 | claim = container_of(rcu, struct claim, rcu); | |
114 | ||
115 | backbone_gw_free_ref(claim->backbone_gw); | |
116 | kfree(claim); | |
117 | } | |
118 | ||
119 | /* free a claim, call claim_free_rcu if its the last reference */ | |
120 | static void claim_free_ref(struct claim *claim) | |
121 | { | |
122 | if (atomic_dec_and_test(&claim->refcount)) | |
123 | call_rcu(&claim->rcu, claim_free_rcu); | |
124 | } | |
125 | ||
126 | /** | |
127 | * @bat_priv: the bat priv with all the soft interface information | |
128 | * @data: search data (may be local/static data) | |
129 | * | |
130 | * looks for a claim in the hash, and returns it if found | |
131 | * or NULL otherwise. | |
132 | */ | |
133 | static struct claim *claim_hash_find(struct bat_priv *bat_priv, | |
134 | struct claim *data) | |
135 | { | |
136 | struct hashtable_t *hash = bat_priv->claim_hash; | |
137 | struct hlist_head *head; | |
138 | struct hlist_node *node; | |
139 | struct claim *claim; | |
140 | struct claim *claim_tmp = NULL; | |
141 | int index; | |
142 | ||
143 | if (!hash) | |
144 | return NULL; | |
145 | ||
146 | index = choose_claim(data, hash->size); | |
147 | head = &hash->table[index]; | |
148 | ||
149 | rcu_read_lock(); | |
150 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | |
151 | if (!compare_claim(&claim->hash_entry, data)) | |
152 | continue; | |
153 | ||
154 | if (!atomic_inc_not_zero(&claim->refcount)) | |
155 | continue; | |
156 | ||
157 | claim_tmp = claim; | |
158 | break; | |
159 | } | |
160 | rcu_read_unlock(); | |
161 | ||
162 | return claim_tmp; | |
163 | } | |
164 | ||
165 | /** | |
166 | * @bat_priv: the bat priv with all the soft interface information | |
167 | * @addr: the address of the originator | |
168 | * @vid: the VLAN ID | |
169 | * | |
170 | * looks for a claim in the hash, and returns it if found | |
171 | * or NULL otherwise. | |
172 | */ | |
173 | static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv, | |
174 | uint8_t *addr, short vid) | |
175 | { | |
176 | struct hashtable_t *hash = bat_priv->backbone_hash; | |
177 | struct hlist_head *head; | |
178 | struct hlist_node *node; | |
179 | struct backbone_gw search_entry, *backbone_gw; | |
180 | struct backbone_gw *backbone_gw_tmp = NULL; | |
181 | int index; | |
182 | ||
183 | if (!hash) | |
184 | return NULL; | |
185 | ||
186 | memcpy(search_entry.orig, addr, ETH_ALEN); | |
187 | search_entry.vid = vid; | |
188 | ||
189 | index = choose_backbone_gw(&search_entry, hash->size); | |
190 | head = &hash->table[index]; | |
191 | ||
192 | rcu_read_lock(); | |
193 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | |
194 | if (!compare_backbone_gw(&backbone_gw->hash_entry, | |
195 | &search_entry)) | |
196 | continue; | |
197 | ||
198 | if (!atomic_inc_not_zero(&backbone_gw->refcount)) | |
199 | continue; | |
200 | ||
201 | backbone_gw_tmp = backbone_gw; | |
202 | break; | |
203 | } | |
204 | rcu_read_unlock(); | |
205 | ||
206 | return backbone_gw_tmp; | |
207 | } | |
208 | ||
209 | /* delete all claims for a backbone */ | |
210 | static void bla_del_backbone_claims(struct backbone_gw *backbone_gw) | |
211 | { | |
212 | struct hashtable_t *hash; | |
213 | struct hlist_node *node, *node_tmp; | |
214 | struct hlist_head *head; | |
215 | struct claim *claim; | |
216 | int i; | |
217 | spinlock_t *list_lock; /* protects write access to the hash lists */ | |
218 | ||
219 | hash = backbone_gw->bat_priv->claim_hash; | |
220 | if (!hash) | |
221 | return; | |
222 | ||
223 | for (i = 0; i < hash->size; i++) { | |
224 | head = &hash->table[i]; | |
225 | list_lock = &hash->list_locks[i]; | |
226 | ||
227 | spin_lock_bh(list_lock); | |
228 | hlist_for_each_entry_safe(claim, node, node_tmp, | |
229 | head, hash_entry) { | |
230 | ||
231 | if (claim->backbone_gw != backbone_gw) | |
232 | continue; | |
233 | ||
234 | claim_free_ref(claim); | |
235 | hlist_del_rcu(node); | |
236 | } | |
237 | spin_unlock_bh(list_lock); | |
238 | } | |
239 | ||
240 | /* all claims gone, intialize CRC */ | |
241 | backbone_gw->crc = BLA_CRC_INIT; | |
242 | } | |
243 | ||
244 | /** | |
245 | * @bat_priv: the bat priv with all the soft interface information | |
246 | * @orig: the mac address to be announced within the claim | |
247 | * @vid: the VLAN ID | |
248 | * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) | |
249 | * | |
250 | * sends a claim frame according to the provided info. | |
251 | */ | |
252 | static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac, | |
253 | short vid, int claimtype) | |
254 | { | |
255 | struct sk_buff *skb; | |
256 | struct ethhdr *ethhdr; | |
257 | struct hard_iface *primary_if; | |
258 | struct net_device *soft_iface; | |
259 | uint8_t *hw_src; | |
260 | struct bla_claim_dst local_claim_dest; | |
261 | uint32_t zeroip = 0; | |
262 | ||
263 | primary_if = primary_if_get_selected(bat_priv); | |
264 | if (!primary_if) | |
265 | return; | |
266 | ||
38ef3d1d SW |
267 | memcpy(&local_claim_dest, &bat_priv->claim_dest, |
268 | sizeof(local_claim_dest)); | |
23721387 SW |
269 | local_claim_dest.type = claimtype; |
270 | ||
271 | soft_iface = primary_if->soft_iface; | |
272 | ||
273 | skb = arp_create(ARPOP_REPLY, ETH_P_ARP, | |
274 | /* IP DST: 0.0.0.0 */ | |
275 | zeroip, | |
276 | primary_if->soft_iface, | |
277 | /* IP SRC: 0.0.0.0 */ | |
278 | zeroip, | |
279 | /* Ethernet DST: Broadcast */ | |
280 | NULL, | |
281 | /* Ethernet SRC/HW SRC: originator mac */ | |
282 | primary_if->net_dev->dev_addr, | |
283 | /* HW DST: FF:43:05:XX:00:00 | |
284 | * with XX = claim type | |
38ef3d1d | 285 | * and YY:YY = group id |
23721387 SW |
286 | */ |
287 | (uint8_t *)&local_claim_dest); | |
288 | ||
289 | if (!skb) | |
290 | goto out; | |
291 | ||
292 | ethhdr = (struct ethhdr *)skb->data; | |
293 | hw_src = (uint8_t *)ethhdr + | |
294 | sizeof(struct ethhdr) + | |
295 | sizeof(struct arphdr); | |
296 | ||
297 | /* now we pretend that the client would have sent this ... */ | |
298 | switch (claimtype) { | |
299 | case CLAIM_TYPE_ADD: | |
300 | /* normal claim frame | |
301 | * set Ethernet SRC to the clients mac | |
302 | */ | |
303 | memcpy(ethhdr->h_source, mac, ETH_ALEN); | |
304 | bat_dbg(DBG_BLA, bat_priv, | |
305 | "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid); | |
306 | break; | |
307 | case CLAIM_TYPE_DEL: | |
308 | /* unclaim frame | |
309 | * set HW SRC to the clients mac | |
310 | */ | |
311 | memcpy(hw_src, mac, ETH_ALEN); | |
312 | bat_dbg(DBG_BLA, bat_priv, | |
313 | "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid); | |
314 | break; | |
315 | case CLAIM_TYPE_ANNOUNCE: | |
316 | /* announcement frame | |
317 | * set HW SRC to the special mac containg the crc | |
318 | */ | |
319 | memcpy(hw_src, mac, ETH_ALEN); | |
320 | bat_dbg(DBG_BLA, bat_priv, | |
321 | "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", | |
322 | ethhdr->h_source, vid); | |
323 | break; | |
324 | case CLAIM_TYPE_REQUEST: | |
325 | /* request frame | |
326 | * set HW SRC to the special mac containg the crc | |
327 | */ | |
328 | memcpy(hw_src, mac, ETH_ALEN); | |
329 | memcpy(ethhdr->h_dest, mac, ETH_ALEN); | |
330 | bat_dbg(DBG_BLA, bat_priv, | |
331 | "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n", | |
332 | ethhdr->h_source, ethhdr->h_dest, vid); | |
333 | break; | |
334 | ||
335 | } | |
336 | ||
337 | if (vid != -1) | |
338 | skb = vlan_insert_tag(skb, vid); | |
339 | ||
340 | skb_reset_mac_header(skb); | |
341 | skb->protocol = eth_type_trans(skb, soft_iface); | |
342 | bat_priv->stats.rx_packets++; | |
343 | bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr); | |
344 | soft_iface->last_rx = jiffies; | |
345 | ||
346 | netif_rx(skb); | |
347 | out: | |
348 | if (primary_if) | |
349 | hardif_free_ref(primary_if); | |
350 | } | |
351 | ||
352 | /** | |
353 | * @bat_priv: the bat priv with all the soft interface information | |
354 | * @orig: the mac address of the originator | |
355 | * @vid: the VLAN ID | |
356 | * | |
357 | * searches for the backbone gw or creates a new one if it could not | |
358 | * be found. | |
359 | */ | |
360 | static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv, | |
361 | uint8_t *orig, short vid) | |
362 | { | |
363 | struct backbone_gw *entry; | |
20ff9d59 | 364 | struct orig_node *orig_node; |
23721387 SW |
365 | int hash_added; |
366 | ||
367 | entry = backbone_hash_find(bat_priv, orig, vid); | |
368 | ||
369 | if (entry) | |
370 | return entry; | |
371 | ||
372 | bat_dbg(DBG_BLA, bat_priv, | |
373 | "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", | |
374 | orig, vid); | |
375 | ||
376 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | |
377 | if (!entry) | |
378 | return NULL; | |
379 | ||
380 | entry->vid = vid; | |
381 | entry->lasttime = jiffies; | |
382 | entry->crc = BLA_CRC_INIT; | |
383 | entry->bat_priv = bat_priv; | |
384 | atomic_set(&entry->request_sent, 0); | |
385 | memcpy(entry->orig, orig, ETH_ALEN); | |
386 | ||
387 | /* one for the hash, one for returning */ | |
388 | atomic_set(&entry->refcount, 2); | |
389 | ||
390 | hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw, | |
391 | choose_backbone_gw, entry, &entry->hash_entry); | |
392 | ||
393 | if (unlikely(hash_added != 0)) { | |
394 | /* hash failed, free the structure */ | |
395 | kfree(entry); | |
396 | return NULL; | |
397 | } | |
398 | ||
20ff9d59 SW |
399 | /* this is a gateway now, remove any tt entries */ |
400 | orig_node = orig_hash_find(bat_priv, orig); | |
401 | if (orig_node) { | |
402 | tt_global_del_orig(bat_priv, orig_node, | |
403 | "became a backbone gateway"); | |
404 | orig_node_free_ref(orig_node); | |
405 | } | |
23721387 SW |
406 | return entry; |
407 | } | |
408 | ||
409 | /* update or add the own backbone gw to make sure we announce | |
410 | * where we receive other backbone gws | |
411 | */ | |
412 | static void bla_update_own_backbone_gw(struct bat_priv *bat_priv, | |
413 | struct hard_iface *primary_if, | |
414 | short vid) | |
415 | { | |
416 | struct backbone_gw *backbone_gw; | |
417 | ||
418 | backbone_gw = bla_get_backbone_gw(bat_priv, | |
419 | primary_if->net_dev->dev_addr, vid); | |
420 | if (unlikely(!backbone_gw)) | |
421 | return; | |
422 | ||
423 | backbone_gw->lasttime = jiffies; | |
424 | backbone_gw_free_ref(backbone_gw); | |
425 | } | |
426 | ||
427 | /** | |
428 | * @bat_priv: the bat priv with all the soft interface information | |
429 | * @vid: the vid where the request came on | |
430 | * | |
431 | * Repeat all of our own claims, and finally send an ANNOUNCE frame | |
432 | * to allow the requester another check if the CRC is correct now. | |
433 | */ | |
434 | static void bla_answer_request(struct bat_priv *bat_priv, | |
435 | struct hard_iface *primary_if, short vid) | |
436 | { | |
437 | struct hlist_node *node; | |
438 | struct hlist_head *head; | |
439 | struct hashtable_t *hash; | |
440 | struct claim *claim; | |
441 | struct backbone_gw *backbone_gw; | |
442 | int i; | |
443 | ||
444 | bat_dbg(DBG_BLA, bat_priv, | |
445 | "bla_answer_request(): received a claim request, send all of our own claims again\n"); | |
446 | ||
447 | backbone_gw = backbone_hash_find(bat_priv, | |
448 | primary_if->net_dev->dev_addr, vid); | |
449 | if (!backbone_gw) | |
450 | return; | |
451 | ||
452 | hash = bat_priv->claim_hash; | |
453 | for (i = 0; i < hash->size; i++) { | |
454 | head = &hash->table[i]; | |
455 | ||
456 | rcu_read_lock(); | |
457 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | |
458 | /* only own claims are interesting */ | |
459 | if (claim->backbone_gw != backbone_gw) | |
460 | continue; | |
461 | ||
462 | bla_send_claim(bat_priv, claim->addr, claim->vid, | |
463 | CLAIM_TYPE_ADD); | |
464 | } | |
465 | rcu_read_unlock(); | |
466 | } | |
467 | ||
468 | /* finally, send an announcement frame */ | |
469 | bla_send_announce(bat_priv, backbone_gw); | |
470 | backbone_gw_free_ref(backbone_gw); | |
471 | } | |
472 | ||
473 | /** | |
474 | * @backbone_gw: the backbone gateway from whom we are out of sync | |
475 | * | |
476 | * When the crc is wrong, ask the backbone gateway for a full table update. | |
477 | * After the request, it will repeat all of his own claims and finally | |
478 | * send an announcement claim with which we can check again. | |
479 | */ | |
480 | static void bla_send_request(struct backbone_gw *backbone_gw) | |
481 | { | |
482 | /* first, remove all old entries */ | |
483 | bla_del_backbone_claims(backbone_gw); | |
484 | ||
485 | bat_dbg(DBG_BLA, backbone_gw->bat_priv, | |
486 | "Sending REQUEST to %pM\n", | |
487 | backbone_gw->orig); | |
488 | ||
489 | /* send request */ | |
490 | bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig, | |
491 | backbone_gw->vid, CLAIM_TYPE_REQUEST); | |
492 | ||
493 | /* no local broadcasts should be sent or received, for now. */ | |
494 | if (!atomic_read(&backbone_gw->request_sent)) { | |
495 | atomic_inc(&backbone_gw->bat_priv->bla_num_requests); | |
496 | atomic_set(&backbone_gw->request_sent, 1); | |
497 | } | |
498 | } | |
499 | ||
500 | /** | |
501 | * @bat_priv: the bat priv with all the soft interface information | |
502 | * @backbone_gw: our backbone gateway which should be announced | |
503 | * | |
504 | * This function sends an announcement. It is called from multiple | |
505 | * places. | |
506 | */ | |
507 | static void bla_send_announce(struct bat_priv *bat_priv, | |
508 | struct backbone_gw *backbone_gw) | |
509 | { | |
510 | uint8_t mac[ETH_ALEN]; | |
511 | uint16_t crc; | |
512 | ||
513 | memcpy(mac, announce_mac, 4); | |
514 | crc = htons(backbone_gw->crc); | |
515 | memcpy(&mac[4], (uint8_t *)&crc, 2); | |
516 | ||
517 | bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE); | |
518 | ||
519 | } | |
520 | ||
521 | /** | |
522 | * @bat_priv: the bat priv with all the soft interface information | |
523 | * @mac: the mac address of the claim | |
524 | * @vid: the VLAN ID of the frame | |
525 | * @backbone_gw: the backbone gateway which claims it | |
526 | * | |
527 | * Adds a claim in the claim hash. | |
528 | */ | |
529 | static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac, | |
530 | const short vid, struct backbone_gw *backbone_gw) | |
531 | { | |
532 | struct claim *claim; | |
533 | struct claim search_claim; | |
534 | int hash_added; | |
535 | ||
536 | memcpy(search_claim.addr, mac, ETH_ALEN); | |
537 | search_claim.vid = vid; | |
538 | claim = claim_hash_find(bat_priv, &search_claim); | |
539 | ||
540 | /* create a new claim entry if it does not exist yet. */ | |
541 | if (!claim) { | |
542 | claim = kzalloc(sizeof(*claim), GFP_ATOMIC); | |
543 | if (!claim) | |
544 | return; | |
545 | ||
546 | memcpy(claim->addr, mac, ETH_ALEN); | |
547 | claim->vid = vid; | |
548 | claim->lasttime = jiffies; | |
549 | claim->backbone_gw = backbone_gw; | |
550 | ||
551 | atomic_set(&claim->refcount, 2); | |
552 | bat_dbg(DBG_BLA, bat_priv, | |
553 | "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", | |
554 | mac, vid); | |
555 | hash_added = hash_add(bat_priv->claim_hash, compare_claim, | |
556 | choose_claim, claim, &claim->hash_entry); | |
557 | ||
558 | if (unlikely(hash_added != 0)) { | |
559 | /* only local changes happened. */ | |
560 | kfree(claim); | |
561 | return; | |
562 | } | |
563 | } else { | |
564 | claim->lasttime = jiffies; | |
565 | if (claim->backbone_gw == backbone_gw) | |
566 | /* no need to register a new backbone */ | |
567 | goto claim_free_ref; | |
568 | ||
569 | bat_dbg(DBG_BLA, bat_priv, | |
570 | "bla_add_claim(): changing ownership for %pM, vid %d\n", | |
571 | mac, vid); | |
572 | ||
573 | claim->backbone_gw->crc ^= | |
574 | crc16(0, claim->addr, ETH_ALEN); | |
575 | backbone_gw_free_ref(claim->backbone_gw); | |
576 | ||
577 | } | |
578 | /* set (new) backbone gw */ | |
579 | atomic_inc(&backbone_gw->refcount); | |
580 | claim->backbone_gw = backbone_gw; | |
581 | ||
582 | backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); | |
583 | backbone_gw->lasttime = jiffies; | |
584 | ||
585 | claim_free_ref: | |
586 | claim_free_ref(claim); | |
587 | } | |
588 | ||
589 | /* Delete a claim from the claim hash which has the | |
590 | * given mac address and vid. | |
591 | */ | |
592 | static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac, | |
593 | const short vid) | |
594 | { | |
595 | struct claim search_claim, *claim; | |
596 | ||
597 | memcpy(search_claim.addr, mac, ETH_ALEN); | |
598 | search_claim.vid = vid; | |
599 | claim = claim_hash_find(bat_priv, &search_claim); | |
600 | if (!claim) | |
601 | return; | |
602 | ||
603 | bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid); | |
604 | ||
605 | hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim); | |
606 | claim_free_ref(claim); /* reference from the hash is gone */ | |
607 | ||
608 | claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); | |
609 | ||
610 | /* don't need the reference from hash_find() anymore */ | |
611 | claim_free_ref(claim); | |
612 | } | |
613 | ||
614 | /* check for ANNOUNCE frame, return 1 if handled */ | |
615 | static int handle_announce(struct bat_priv *bat_priv, | |
616 | uint8_t *an_addr, uint8_t *backbone_addr, short vid) | |
617 | { | |
618 | struct backbone_gw *backbone_gw; | |
619 | uint16_t crc; | |
620 | ||
621 | if (memcmp(an_addr, announce_mac, 4) != 0) | |
622 | return 0; | |
623 | ||
624 | backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); | |
625 | ||
626 | if (unlikely(!backbone_gw)) | |
627 | return 1; | |
628 | ||
629 | ||
630 | /* handle as ANNOUNCE frame */ | |
631 | backbone_gw->lasttime = jiffies; | |
632 | crc = ntohs(*((uint16_t *)(&an_addr[4]))); | |
633 | ||
634 | bat_dbg(DBG_BLA, bat_priv, | |
635 | "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n", | |
636 | vid, backbone_gw->orig, crc); | |
637 | ||
638 | if (backbone_gw->crc != crc) { | |
639 | bat_dbg(DBG_BLA, backbone_gw->bat_priv, | |
640 | "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n", | |
641 | backbone_gw->orig, backbone_gw->vid, backbone_gw->crc, | |
642 | crc); | |
643 | ||
644 | bla_send_request(backbone_gw); | |
645 | } else { | |
646 | /* if we have sent a request and the crc was OK, | |
647 | * we can allow traffic again. | |
648 | */ | |
649 | if (atomic_read(&backbone_gw->request_sent)) { | |
650 | atomic_dec(&backbone_gw->bat_priv->bla_num_requests); | |
651 | atomic_set(&backbone_gw->request_sent, 0); | |
652 | } | |
653 | } | |
654 | ||
655 | backbone_gw_free_ref(backbone_gw); | |
656 | return 1; | |
657 | } | |
658 | ||
659 | /* check for REQUEST frame, return 1 if handled */ | |
660 | static int handle_request(struct bat_priv *bat_priv, | |
661 | struct hard_iface *primary_if, | |
662 | uint8_t *backbone_addr, | |
663 | struct ethhdr *ethhdr, short vid) | |
664 | { | |
665 | /* check for REQUEST frame */ | |
666 | if (!compare_eth(backbone_addr, ethhdr->h_dest)) | |
667 | return 0; | |
668 | ||
669 | /* sanity check, this should not happen on a normal switch, | |
670 | * we ignore it in this case. | |
671 | */ | |
672 | if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr)) | |
673 | return 1; | |
674 | ||
675 | bat_dbg(DBG_BLA, bat_priv, | |
676 | "handle_request(): REQUEST vid %d (sent by %pM)...\n", | |
677 | vid, ethhdr->h_source); | |
678 | ||
679 | bla_answer_request(bat_priv, primary_if, vid); | |
680 | return 1; | |
681 | } | |
682 | ||
683 | /* check for UNCLAIM frame, return 1 if handled */ | |
684 | static int handle_unclaim(struct bat_priv *bat_priv, | |
685 | struct hard_iface *primary_if, | |
686 | uint8_t *backbone_addr, | |
687 | uint8_t *claim_addr, short vid) | |
688 | { | |
689 | struct backbone_gw *backbone_gw; | |
690 | ||
691 | /* unclaim in any case if it is our own */ | |
692 | if (primary_if && compare_eth(backbone_addr, | |
693 | primary_if->net_dev->dev_addr)) | |
694 | bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL); | |
695 | ||
696 | backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid); | |
697 | ||
698 | if (!backbone_gw) | |
699 | return 1; | |
700 | ||
701 | /* this must be an UNCLAIM frame */ | |
702 | bat_dbg(DBG_BLA, bat_priv, | |
703 | "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", | |
704 | claim_addr, vid, backbone_gw->orig); | |
705 | ||
706 | bla_del_claim(bat_priv, claim_addr, vid); | |
707 | backbone_gw_free_ref(backbone_gw); | |
708 | return 1; | |
709 | } | |
710 | ||
711 | /* check for CLAIM frame, return 1 if handled */ | |
712 | static int handle_claim(struct bat_priv *bat_priv, | |
713 | struct hard_iface *primary_if, uint8_t *backbone_addr, | |
714 | uint8_t *claim_addr, short vid) | |
715 | { | |
716 | struct backbone_gw *backbone_gw; | |
717 | ||
718 | /* register the gateway if not yet available, and add the claim. */ | |
719 | ||
720 | backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid); | |
721 | ||
722 | if (unlikely(!backbone_gw)) | |
723 | return 1; | |
724 | ||
725 | /* this must be a CLAIM frame */ | |
726 | bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); | |
727 | if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) | |
728 | bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD); | |
729 | ||
730 | /* TODO: we could call something like tt_local_del() here. */ | |
731 | ||
732 | backbone_gw_free_ref(backbone_gw); | |
733 | return 1; | |
734 | } | |
735 | ||
38ef3d1d | 736 | /** |
38ef3d1d SW |
737 | * @bat_priv: the bat priv with all the soft interface information |
738 | * @hw_src: the Hardware source in the ARP Header | |
739 | * @hw_dst: the Hardware destination in the ARP Header | |
740 | * @ethhdr: pointer to the Ethernet header of the claim frame | |
741 | * | |
742 | * checks if it is a claim packet and if its on the same group. | |
743 | * This function also applies the group ID of the sender | |
744 | * if it is in the same mesh. | |
745 | * | |
746 | * returns: | |
747 | * 2 - if it is a claim packet and on the same group | |
748 | * 1 - if is a claim packet from another group | |
749 | * 0 - if it is not a claim packet | |
750 | */ | |
751 | static int check_claim_group(struct bat_priv *bat_priv, | |
752 | struct hard_iface *primary_if, | |
753 | uint8_t *hw_src, uint8_t *hw_dst, | |
754 | struct ethhdr *ethhdr) | |
755 | { | |
756 | uint8_t *backbone_addr; | |
757 | struct orig_node *orig_node; | |
758 | struct bla_claim_dst *bla_dst, *bla_dst_own; | |
759 | ||
760 | bla_dst = (struct bla_claim_dst *)hw_dst; | |
761 | bla_dst_own = &bat_priv->claim_dest; | |
762 | ||
763 | /* check if it is a claim packet in general */ | |
764 | if (memcmp(bla_dst->magic, bla_dst_own->magic, | |
765 | sizeof(bla_dst->magic)) != 0) | |
766 | return 0; | |
767 | ||
768 | /* if announcement packet, use the source, | |
769 | * otherwise assume it is in the hw_src | |
770 | */ | |
771 | switch (bla_dst->type) { | |
772 | case CLAIM_TYPE_ADD: | |
773 | backbone_addr = hw_src; | |
774 | break; | |
775 | case CLAIM_TYPE_REQUEST: | |
776 | case CLAIM_TYPE_ANNOUNCE: | |
777 | case CLAIM_TYPE_DEL: | |
778 | backbone_addr = ethhdr->h_source; | |
779 | break; | |
780 | default: | |
781 | return 0; | |
782 | } | |
783 | ||
784 | /* don't accept claim frames from ourselves */ | |
785 | if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) | |
786 | return 0; | |
787 | ||
788 | /* if its already the same group, it is fine. */ | |
789 | if (bla_dst->group == bla_dst_own->group) | |
790 | return 2; | |
791 | ||
792 | /* lets see if this originator is in our mesh */ | |
793 | orig_node = orig_hash_find(bat_priv, backbone_addr); | |
794 | ||
795 | /* dont accept claims from gateways which are not in | |
796 | * the same mesh or group. | |
797 | */ | |
798 | if (!orig_node) | |
799 | return 1; | |
800 | ||
801 | /* if our mesh friends mac is bigger, use it for ourselves. */ | |
802 | if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) { | |
803 | bat_dbg(DBG_BLA, bat_priv, | |
804 | "taking other backbones claim group: %04x\n", | |
805 | ntohs(bla_dst->group)); | |
806 | bla_dst_own->group = bla_dst->group; | |
807 | } | |
808 | ||
809 | orig_node_free_ref(orig_node); | |
810 | ||
811 | return 2; | |
812 | } | |
813 | ||
814 | ||
23721387 SW |
815 | /** |
816 | * @bat_priv: the bat priv with all the soft interface information | |
817 | * @skb: the frame to be checked | |
818 | * | |
819 | * Check if this is a claim frame, and process it accordingly. | |
820 | * | |
821 | * returns 1 if it was a claim frame, otherwise return 0 to | |
822 | * tell the callee that it can use the frame on its own. | |
823 | */ | |
824 | static int bla_process_claim(struct bat_priv *bat_priv, | |
825 | struct hard_iface *primary_if, | |
826 | struct sk_buff *skb) | |
827 | { | |
828 | struct ethhdr *ethhdr; | |
829 | struct vlan_ethhdr *vhdr; | |
830 | struct arphdr *arphdr; | |
831 | uint8_t *hw_src, *hw_dst; | |
832 | struct bla_claim_dst *bla_dst; | |
833 | uint16_t proto; | |
834 | int headlen; | |
835 | short vid = -1; | |
38ef3d1d | 836 | int ret; |
23721387 SW |
837 | |
838 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | |
839 | ||
840 | if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { | |
841 | vhdr = (struct vlan_ethhdr *)ethhdr; | |
842 | vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; | |
843 | proto = ntohs(vhdr->h_vlan_encapsulated_proto); | |
844 | headlen = sizeof(*vhdr); | |
845 | } else { | |
846 | proto = ntohs(ethhdr->h_proto); | |
847 | headlen = sizeof(*ethhdr); | |
848 | } | |
849 | ||
850 | if (proto != ETH_P_ARP) | |
851 | return 0; /* not a claim frame */ | |
852 | ||
853 | /* this must be a ARP frame. check if it is a claim. */ | |
854 | ||
855 | if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev)))) | |
856 | return 0; | |
857 | ||
858 | /* pskb_may_pull() may have modified the pointers, get ethhdr again */ | |
859 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | |
860 | arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen); | |
861 | ||
862 | /* Check whether the ARP frame carries a valid | |
863 | * IP information | |
864 | */ | |
865 | ||
866 | if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) | |
867 | return 0; | |
868 | if (arphdr->ar_pro != htons(ETH_P_IP)) | |
869 | return 0; | |
870 | if (arphdr->ar_hln != ETH_ALEN) | |
871 | return 0; | |
872 | if (arphdr->ar_pln != 4) | |
873 | return 0; | |
874 | ||
875 | hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); | |
876 | hw_dst = hw_src + ETH_ALEN + 4; | |
877 | bla_dst = (struct bla_claim_dst *)hw_dst; | |
878 | ||
879 | /* check if it is a claim frame. */ | |
38ef3d1d SW |
880 | ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr); |
881 | if (ret == 1) | |
882 | bat_dbg(DBG_BLA, bat_priv, | |
883 | "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", | |
884 | ethhdr->h_source, vid, hw_src, hw_dst); | |
885 | ||
886 | if (ret < 2) | |
887 | return ret; | |
23721387 SW |
888 | |
889 | /* become a backbone gw ourselves on this vlan if not happened yet */ | |
890 | bla_update_own_backbone_gw(bat_priv, primary_if, vid); | |
891 | ||
892 | /* check for the different types of claim frames ... */ | |
893 | switch (bla_dst->type) { | |
894 | case CLAIM_TYPE_ADD: | |
895 | if (handle_claim(bat_priv, primary_if, hw_src, | |
896 | ethhdr->h_source, vid)) | |
897 | return 1; | |
898 | break; | |
899 | case CLAIM_TYPE_DEL: | |
900 | if (handle_unclaim(bat_priv, primary_if, | |
901 | ethhdr->h_source, hw_src, vid)) | |
902 | return 1; | |
903 | break; | |
904 | ||
905 | case CLAIM_TYPE_ANNOUNCE: | |
906 | if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid)) | |
907 | return 1; | |
908 | break; | |
909 | case CLAIM_TYPE_REQUEST: | |
910 | if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid)) | |
911 | return 1; | |
912 | break; | |
913 | } | |
914 | ||
915 | bat_dbg(DBG_BLA, bat_priv, | |
916 | "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", | |
917 | ethhdr->h_source, vid, hw_src, hw_dst); | |
918 | return 1; | |
919 | } | |
920 | ||
921 | /* Check when we last heard from other nodes, and remove them in case of | |
922 | * a time out, or clean all backbone gws if now is set. | |
923 | */ | |
924 | static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now) | |
925 | { | |
926 | struct backbone_gw *backbone_gw; | |
927 | struct hlist_node *node, *node_tmp; | |
928 | struct hlist_head *head; | |
929 | struct hashtable_t *hash; | |
930 | spinlock_t *list_lock; /* protects write access to the hash lists */ | |
931 | int i; | |
932 | ||
933 | hash = bat_priv->backbone_hash; | |
934 | if (!hash) | |
935 | return; | |
936 | ||
937 | for (i = 0; i < hash->size; i++) { | |
938 | head = &hash->table[i]; | |
939 | list_lock = &hash->list_locks[i]; | |
940 | ||
941 | spin_lock_bh(list_lock); | |
942 | hlist_for_each_entry_safe(backbone_gw, node, node_tmp, | |
943 | head, hash_entry) { | |
944 | if (now) | |
945 | goto purge_now; | |
946 | if (!has_timed_out(backbone_gw->lasttime, | |
947 | BLA_BACKBONE_TIMEOUT)) | |
948 | continue; | |
949 | ||
950 | bat_dbg(DBG_BLA, backbone_gw->bat_priv, | |
951 | "bla_purge_backbone_gw(): backbone gw %pM timed out\n", | |
952 | backbone_gw->orig); | |
953 | ||
954 | purge_now: | |
955 | /* don't wait for the pending request anymore */ | |
956 | if (atomic_read(&backbone_gw->request_sent)) | |
957 | atomic_dec(&bat_priv->bla_num_requests); | |
958 | ||
959 | bla_del_backbone_claims(backbone_gw); | |
960 | ||
961 | hlist_del_rcu(node); | |
962 | backbone_gw_free_ref(backbone_gw); | |
963 | } | |
964 | spin_unlock_bh(list_lock); | |
965 | } | |
966 | } | |
967 | ||
968 | /** | |
969 | * @bat_priv: the bat priv with all the soft interface information | |
970 | * @primary_if: the selected primary interface, may be NULL if now is set | |
971 | * @now: whether the whole hash shall be wiped now | |
972 | * | |
973 | * Check when we heard last time from our own claims, and remove them in case of | |
974 | * a time out, or clean all claims if now is set | |
975 | */ | |
976 | static void bla_purge_claims(struct bat_priv *bat_priv, | |
977 | struct hard_iface *primary_if, int now) | |
978 | { | |
979 | struct claim *claim; | |
980 | struct hlist_node *node; | |
981 | struct hlist_head *head; | |
982 | struct hashtable_t *hash; | |
983 | int i; | |
984 | ||
985 | hash = bat_priv->claim_hash; | |
986 | if (!hash) | |
987 | return; | |
988 | ||
989 | for (i = 0; i < hash->size; i++) { | |
990 | head = &hash->table[i]; | |
991 | ||
992 | rcu_read_lock(); | |
993 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | |
994 | if (now) | |
995 | goto purge_now; | |
996 | if (!compare_eth(claim->backbone_gw->orig, | |
997 | primary_if->net_dev->dev_addr)) | |
998 | continue; | |
999 | if (!has_timed_out(claim->lasttime, | |
1000 | BLA_CLAIM_TIMEOUT)) | |
1001 | continue; | |
1002 | ||
1003 | bat_dbg(DBG_BLA, bat_priv, | |
1004 | "bla_purge_claims(): %pM, vid %d, time out\n", | |
1005 | claim->addr, claim->vid); | |
1006 | ||
1007 | purge_now: | |
1008 | handle_unclaim(bat_priv, primary_if, | |
1009 | claim->backbone_gw->orig, | |
1010 | claim->addr, claim->vid); | |
1011 | } | |
1012 | rcu_read_unlock(); | |
1013 | } | |
1014 | } | |
1015 | ||
1016 | /** | |
1017 | * @bat_priv: the bat priv with all the soft interface information | |
1018 | * @primary_if: the new selected primary_if | |
1019 | * @oldif: the old primary interface, may be NULL | |
1020 | * | |
1021 | * Update the backbone gateways when the own orig address changes. | |
1022 | * | |
1023 | */ | |
1024 | void bla_update_orig_address(struct bat_priv *bat_priv, | |
1025 | struct hard_iface *primary_if, | |
1026 | struct hard_iface *oldif) | |
1027 | { | |
1028 | struct backbone_gw *backbone_gw; | |
1029 | struct hlist_node *node; | |
1030 | struct hlist_head *head; | |
1031 | struct hashtable_t *hash; | |
1032 | int i; | |
1033 | ||
38ef3d1d SW |
1034 | /* reset bridge loop avoidance group id */ |
1035 | bat_priv->claim_dest.group = | |
1036 | htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); | |
1037 | ||
23721387 SW |
1038 | if (!oldif) { |
1039 | bla_purge_claims(bat_priv, NULL, 1); | |
1040 | bla_purge_backbone_gw(bat_priv, 1); | |
1041 | return; | |
1042 | } | |
1043 | ||
1044 | hash = bat_priv->backbone_hash; | |
1045 | if (!hash) | |
1046 | return; | |
1047 | ||
1048 | for (i = 0; i < hash->size; i++) { | |
1049 | head = &hash->table[i]; | |
1050 | ||
1051 | rcu_read_lock(); | |
1052 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | |
1053 | /* own orig still holds the old value. */ | |
1054 | if (!compare_eth(backbone_gw->orig, | |
1055 | oldif->net_dev->dev_addr)) | |
1056 | continue; | |
1057 | ||
1058 | memcpy(backbone_gw->orig, | |
1059 | primary_if->net_dev->dev_addr, ETH_ALEN); | |
1060 | /* send an announce frame so others will ask for our | |
1061 | * claims and update their tables. | |
1062 | */ | |
1063 | bla_send_announce(bat_priv, backbone_gw); | |
1064 | } | |
1065 | rcu_read_unlock(); | |
1066 | } | |
1067 | } | |
1068 | ||
1069 | ||
1070 | ||
1071 | /* (re)start the timer */ | |
1072 | static void bla_start_timer(struct bat_priv *bat_priv) | |
1073 | { | |
1074 | INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work); | |
1075 | queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work, | |
1076 | msecs_to_jiffies(BLA_PERIOD_LENGTH)); | |
1077 | } | |
1078 | ||
1079 | /* periodic work to do: | |
1080 | * * purge structures when they are too old | |
1081 | * * send announcements | |
1082 | */ | |
1083 | static void bla_periodic_work(struct work_struct *work) | |
1084 | { | |
1085 | struct delayed_work *delayed_work = | |
1086 | container_of(work, struct delayed_work, work); | |
1087 | struct bat_priv *bat_priv = | |
1088 | container_of(delayed_work, struct bat_priv, bla_work); | |
1089 | struct hlist_node *node; | |
1090 | struct hlist_head *head; | |
1091 | struct backbone_gw *backbone_gw; | |
1092 | struct hashtable_t *hash; | |
1093 | struct hard_iface *primary_if; | |
1094 | int i; | |
1095 | ||
1096 | primary_if = primary_if_get_selected(bat_priv); | |
1097 | if (!primary_if) | |
1098 | goto out; | |
1099 | ||
1100 | bla_purge_claims(bat_priv, primary_if, 0); | |
1101 | bla_purge_backbone_gw(bat_priv, 0); | |
1102 | ||
1103 | if (!atomic_read(&bat_priv->bridge_loop_avoidance)) | |
1104 | goto out; | |
1105 | ||
1106 | hash = bat_priv->backbone_hash; | |
1107 | if (!hash) | |
1108 | goto out; | |
1109 | ||
1110 | for (i = 0; i < hash->size; i++) { | |
1111 | head = &hash->table[i]; | |
1112 | ||
1113 | rcu_read_lock(); | |
1114 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | |
1115 | if (!compare_eth(backbone_gw->orig, | |
1116 | primary_if->net_dev->dev_addr)) | |
1117 | continue; | |
1118 | ||
1119 | backbone_gw->lasttime = jiffies; | |
1120 | ||
1121 | bla_send_announce(bat_priv, backbone_gw); | |
1122 | } | |
1123 | rcu_read_unlock(); | |
1124 | } | |
1125 | out: | |
1126 | if (primary_if) | |
1127 | hardif_free_ref(primary_if); | |
1128 | ||
1129 | bla_start_timer(bat_priv); | |
1130 | } | |
1131 | ||
1132 | /* initialize all bla structures */ | |
1133 | int bla_init(struct bat_priv *bat_priv) | |
1134 | { | |
fe2da6ff | 1135 | int i; |
38ef3d1d SW |
1136 | uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; |
1137 | struct hard_iface *primary_if; | |
fe2da6ff | 1138 | |
23721387 SW |
1139 | bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n"); |
1140 | ||
38ef3d1d SW |
1141 | /* setting claim destination address */ |
1142 | memcpy(&bat_priv->claim_dest.magic, claim_dest, 3); | |
1143 | bat_priv->claim_dest.type = 0; | |
1144 | primary_if = primary_if_get_selected(bat_priv); | |
1145 | if (primary_if) { | |
1146 | bat_priv->claim_dest.group = | |
1147 | htons(crc16(0, primary_if->net_dev->dev_addr, | |
1148 | ETH_ALEN)); | |
1149 | hardif_free_ref(primary_if); | |
1150 | } else { | |
1151 | bat_priv->claim_dest.group = 0; /* will be set later */ | |
1152 | } | |
1153 | ||
fe2da6ff SW |
1154 | /* initialize the duplicate list */ |
1155 | for (i = 0; i < DUPLIST_SIZE; i++) | |
1156 | bat_priv->bcast_duplist[i].entrytime = | |
1157 | jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT); | |
1158 | bat_priv->bcast_duplist_curr = 0; | |
1159 | ||
23721387 SW |
1160 | if (bat_priv->claim_hash) |
1161 | return 1; | |
1162 | ||
1163 | bat_priv->claim_hash = hash_new(128); | |
1164 | bat_priv->backbone_hash = hash_new(32); | |
1165 | ||
1166 | if (!bat_priv->claim_hash || !bat_priv->backbone_hash) | |
1167 | return -1; | |
1168 | ||
1169 | bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n"); | |
1170 | ||
1171 | bla_start_timer(bat_priv); | |
1172 | return 1; | |
1173 | } | |
1174 | ||
fe2da6ff SW |
1175 | /** |
1176 | * @bat_priv: the bat priv with all the soft interface information | |
1177 | * @bcast_packet: originator mac address | |
1178 | * @hdr_size: maximum length of the frame | |
1179 | * | |
1180 | * check if it is on our broadcast list. Another gateway might | |
1181 | * have sent the same packet because it is connected to the same backbone, | |
1182 | * so we have to remove this duplicate. | |
1183 | * | |
1184 | * This is performed by checking the CRC, which will tell us | |
1185 | * with a good chance that it is the same packet. If it is furthermore | |
1186 | * sent by another host, drop it. We allow equal packets from | |
1187 | * the same host however as this might be intended. | |
1188 | * | |
1189 | **/ | |
1190 | ||
1191 | int bla_check_bcast_duplist(struct bat_priv *bat_priv, | |
1192 | struct bcast_packet *bcast_packet, | |
1193 | int hdr_size) | |
1194 | { | |
1195 | int i, length, curr; | |
1196 | uint8_t *content; | |
1197 | uint16_t crc; | |
1198 | struct bcast_duplist_entry *entry; | |
1199 | ||
1200 | length = hdr_size - sizeof(*bcast_packet); | |
1201 | content = (uint8_t *)bcast_packet; | |
1202 | content += sizeof(*bcast_packet); | |
1203 | ||
1204 | /* calculate the crc ... */ | |
1205 | crc = crc16(0, content, length); | |
1206 | ||
1207 | for (i = 0 ; i < DUPLIST_SIZE; i++) { | |
1208 | curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE; | |
1209 | entry = &bat_priv->bcast_duplist[curr]; | |
1210 | ||
1211 | /* we can stop searching if the entry is too old ; | |
1212 | * later entries will be even older | |
1213 | */ | |
1214 | if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT)) | |
1215 | break; | |
1216 | ||
1217 | if (entry->crc != crc) | |
1218 | continue; | |
1219 | ||
1220 | if (compare_eth(entry->orig, bcast_packet->orig)) | |
1221 | continue; | |
1222 | ||
1223 | /* this entry seems to match: same crc, not too old, | |
1224 | * and from another gw. therefore return 1 to forbid it. | |
1225 | */ | |
1226 | return 1; | |
1227 | } | |
1228 | /* not found, add a new entry (overwrite the oldest entry) */ | |
1229 | curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE; | |
1230 | entry = &bat_priv->bcast_duplist[curr]; | |
1231 | entry->crc = crc; | |
1232 | entry->entrytime = jiffies; | |
1233 | memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); | |
1234 | bat_priv->bcast_duplist_curr = curr; | |
1235 | ||
1236 | /* allow it, its the first occurence. */ | |
1237 | return 0; | |
1238 | } | |
1239 | ||
1240 | ||
1241 | ||
20ff9d59 SW |
1242 | /** |
1243 | * @bat_priv: the bat priv with all the soft interface information | |
1244 | * @orig: originator mac address | |
1245 | * | |
1246 | * check if the originator is a gateway for any VLAN ID. | |
1247 | * | |
1248 | * returns 1 if it is found, 0 otherwise | |
1249 | * | |
1250 | */ | |
1251 | ||
1252 | int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig) | |
1253 | { | |
1254 | struct hashtable_t *hash = bat_priv->backbone_hash; | |
1255 | struct hlist_head *head; | |
1256 | struct hlist_node *node; | |
1257 | struct backbone_gw *backbone_gw; | |
1258 | int i; | |
1259 | ||
1260 | if (!atomic_read(&bat_priv->bridge_loop_avoidance)) | |
1261 | return 0; | |
1262 | ||
1263 | if (!hash) | |
1264 | return 0; | |
1265 | ||
1266 | for (i = 0; i < hash->size; i++) { | |
1267 | head = &hash->table[i]; | |
1268 | ||
1269 | rcu_read_lock(); | |
1270 | hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { | |
1271 | if (compare_eth(backbone_gw->orig, orig)) { | |
1272 | rcu_read_unlock(); | |
1273 | return 1; | |
1274 | } | |
1275 | } | |
1276 | rcu_read_unlock(); | |
1277 | } | |
1278 | ||
1279 | return 0; | |
1280 | } | |
1281 | ||
1282 | ||
23721387 SW |
1283 | /** |
1284 | * @skb: the frame to be checked | |
1285 | * @orig_node: the orig_node of the frame | |
1286 | * @hdr_size: maximum length of the frame | |
1287 | * | |
1288 | * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1 | |
1289 | * if the orig_node is also a gateway on the soft interface, otherwise it | |
1290 | * returns 0. | |
1291 | * | |
1292 | */ | |
1293 | int bla_is_backbone_gw(struct sk_buff *skb, | |
1294 | struct orig_node *orig_node, int hdr_size) | |
1295 | { | |
1296 | struct ethhdr *ethhdr; | |
1297 | struct vlan_ethhdr *vhdr; | |
1298 | struct backbone_gw *backbone_gw; | |
1299 | short vid = -1; | |
1300 | ||
1301 | if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) | |
1302 | return 0; | |
1303 | ||
1304 | /* first, find out the vid. */ | |
1305 | if (!pskb_may_pull(skb, hdr_size + sizeof(struct ethhdr))) | |
1306 | return 0; | |
1307 | ||
1308 | ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size); | |
1309 | ||
1310 | if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { | |
1311 | if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr))) | |
1312 | return 0; | |
1313 | ||
1314 | vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) + | |
1315 | hdr_size); | |
1316 | vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; | |
1317 | } | |
1318 | ||
1319 | /* see if this originator is a backbone gw for this VLAN */ | |
1320 | ||
1321 | backbone_gw = backbone_hash_find(orig_node->bat_priv, | |
1322 | orig_node->orig, vid); | |
1323 | if (!backbone_gw) | |
1324 | return 0; | |
1325 | ||
1326 | backbone_gw_free_ref(backbone_gw); | |
1327 | return 1; | |
1328 | } | |
1329 | ||
1330 | /* free all bla structures (for softinterface free or module unload) */ | |
1331 | void bla_free(struct bat_priv *bat_priv) | |
1332 | { | |
1333 | struct hard_iface *primary_if; | |
1334 | ||
1335 | cancel_delayed_work_sync(&bat_priv->bla_work); | |
1336 | primary_if = primary_if_get_selected(bat_priv); | |
1337 | ||
1338 | if (bat_priv->claim_hash) { | |
1339 | bla_purge_claims(bat_priv, primary_if, 1); | |
1340 | hash_destroy(bat_priv->claim_hash); | |
1341 | bat_priv->claim_hash = NULL; | |
1342 | } | |
1343 | if (bat_priv->backbone_hash) { | |
1344 | bla_purge_backbone_gw(bat_priv, 1); | |
1345 | hash_destroy(bat_priv->backbone_hash); | |
1346 | bat_priv->backbone_hash = NULL; | |
1347 | } | |
1348 | if (primary_if) | |
1349 | hardif_free_ref(primary_if); | |
1350 | } | |
1351 | ||
1352 | /** | |
1353 | * @bat_priv: the bat priv with all the soft interface information | |
1354 | * @skb: the frame to be checked | |
1355 | * @vid: the VLAN ID of the frame | |
1356 | * | |
1357 | * bla_rx avoidance checks if: | |
1358 | * * we have to race for a claim | |
1359 | * * if the frame is allowed on the LAN | |
1360 | * | |
1361 | * in these cases, the skb is further handled by this function and | |
1362 | * returns 1, otherwise it returns 0 and the caller shall further | |
1363 | * process the skb. | |
1364 | * | |
1365 | */ | |
1366 | int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) | |
1367 | { | |
1368 | struct ethhdr *ethhdr; | |
1369 | struct claim search_claim, *claim = NULL; | |
1370 | struct hard_iface *primary_if; | |
1371 | int ret; | |
1372 | ||
1373 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | |
1374 | ||
1375 | primary_if = primary_if_get_selected(bat_priv); | |
1376 | if (!primary_if) | |
1377 | goto handled; | |
1378 | ||
1379 | if (!atomic_read(&bat_priv->bridge_loop_avoidance)) | |
1380 | goto allow; | |
1381 | ||
1382 | ||
1383 | if (unlikely(atomic_read(&bat_priv->bla_num_requests))) | |
1384 | /* don't allow broadcasts while requests are in flight */ | |
1385 | if (is_multicast_ether_addr(ethhdr->h_dest)) | |
1386 | goto handled; | |
1387 | ||
1388 | memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); | |
1389 | search_claim.vid = vid; | |
1390 | claim = claim_hash_find(bat_priv, &search_claim); | |
1391 | ||
1392 | if (!claim) { | |
1393 | /* possible optimization: race for a claim */ | |
1394 | /* No claim exists yet, claim it for us! | |
1395 | */ | |
1396 | handle_claim(bat_priv, primary_if, | |
1397 | primary_if->net_dev->dev_addr, | |
1398 | ethhdr->h_source, vid); | |
1399 | goto allow; | |
1400 | } | |
1401 | ||
1402 | /* if it is our own claim ... */ | |
1403 | if (compare_eth(claim->backbone_gw->orig, | |
1404 | primary_if->net_dev->dev_addr)) { | |
1405 | /* ... allow it in any case */ | |
1406 | claim->lasttime = jiffies; | |
1407 | goto allow; | |
1408 | } | |
1409 | ||
1410 | /* if it is a broadcast ... */ | |
1411 | if (is_multicast_ether_addr(ethhdr->h_dest)) { | |
1412 | /* ... drop it. the responsible gateway is in charge. */ | |
1413 | goto handled; | |
1414 | } else { | |
1415 | /* seems the client considers us as its best gateway. | |
1416 | * send a claim and update the claim table | |
1417 | * immediately. | |
1418 | */ | |
1419 | handle_claim(bat_priv, primary_if, | |
1420 | primary_if->net_dev->dev_addr, | |
1421 | ethhdr->h_source, vid); | |
1422 | goto allow; | |
1423 | } | |
1424 | allow: | |
1425 | bla_update_own_backbone_gw(bat_priv, primary_if, vid); | |
1426 | ret = 0; | |
1427 | goto out; | |
1428 | ||
1429 | handled: | |
1430 | kfree_skb(skb); | |
1431 | ret = 1; | |
1432 | ||
1433 | out: | |
1434 | if (primary_if) | |
1435 | hardif_free_ref(primary_if); | |
1436 | if (claim) | |
1437 | claim_free_ref(claim); | |
1438 | return ret; | |
1439 | } | |
1440 | ||
1441 | /** | |
1442 | * @bat_priv: the bat priv with all the soft interface information | |
1443 | * @skb: the frame to be checked | |
1444 | * @vid: the VLAN ID of the frame | |
1445 | * | |
1446 | * bla_tx checks if: | |
1447 | * * a claim was received which has to be processed | |
1448 | * * the frame is allowed on the mesh | |
1449 | * | |
1450 | * in these cases, the skb is further handled by this function and | |
1451 | * returns 1, otherwise it returns 0 and the caller shall further | |
1452 | * process the skb. | |
1453 | * | |
1454 | */ | |
1455 | int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) | |
1456 | { | |
1457 | struct ethhdr *ethhdr; | |
1458 | struct claim search_claim, *claim = NULL; | |
1459 | struct hard_iface *primary_if; | |
1460 | int ret = 0; | |
1461 | ||
1462 | primary_if = primary_if_get_selected(bat_priv); | |
1463 | if (!primary_if) | |
1464 | goto out; | |
1465 | ||
1466 | if (!atomic_read(&bat_priv->bridge_loop_avoidance)) | |
1467 | goto allow; | |
1468 | ||
1469 | /* in VLAN case, the mac header might not be set. */ | |
1470 | skb_reset_mac_header(skb); | |
1471 | ||
1472 | if (bla_process_claim(bat_priv, primary_if, skb)) | |
1473 | goto handled; | |
1474 | ||
1475 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | |
1476 | ||
1477 | if (unlikely(atomic_read(&bat_priv->bla_num_requests))) | |
1478 | /* don't allow broadcasts while requests are in flight */ | |
1479 | if (is_multicast_ether_addr(ethhdr->h_dest)) | |
1480 | goto handled; | |
1481 | ||
1482 | memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); | |
1483 | search_claim.vid = vid; | |
1484 | ||
1485 | claim = claim_hash_find(bat_priv, &search_claim); | |
1486 | ||
1487 | /* if no claim exists, allow it. */ | |
1488 | if (!claim) | |
1489 | goto allow; | |
1490 | ||
1491 | /* check if we are responsible. */ | |
1492 | if (compare_eth(claim->backbone_gw->orig, | |
1493 | primary_if->net_dev->dev_addr)) { | |
1494 | /* if yes, the client has roamed and we have | |
1495 | * to unclaim it. | |
1496 | */ | |
1497 | handle_unclaim(bat_priv, primary_if, | |
1498 | primary_if->net_dev->dev_addr, | |
1499 | ethhdr->h_source, vid); | |
1500 | goto allow; | |
1501 | } | |
1502 | ||
1503 | /* check if it is a multicast/broadcast frame */ | |
1504 | if (is_multicast_ether_addr(ethhdr->h_dest)) { | |
1505 | /* drop it. the responsible gateway has forwarded it into | |
1506 | * the backbone network. | |
1507 | */ | |
1508 | goto handled; | |
1509 | } else { | |
1510 | /* we must allow it. at least if we are | |
1511 | * responsible for the DESTINATION. | |
1512 | */ | |
1513 | goto allow; | |
1514 | } | |
1515 | allow: | |
1516 | bla_update_own_backbone_gw(bat_priv, primary_if, vid); | |
1517 | ret = 0; | |
1518 | goto out; | |
1519 | handled: | |
1520 | ret = 1; | |
1521 | out: | |
1522 | if (primary_if) | |
1523 | hardif_free_ref(primary_if); | |
1524 | if (claim) | |
1525 | claim_free_ref(claim); | |
1526 | return ret; | |
1527 | } | |
9bf8e4d4 SW |
1528 | |
1529 | int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) | |
1530 | { | |
1531 | struct net_device *net_dev = (struct net_device *)seq->private; | |
1532 | struct bat_priv *bat_priv = netdev_priv(net_dev); | |
1533 | struct hashtable_t *hash = bat_priv->claim_hash; | |
1534 | struct claim *claim; | |
1535 | struct hard_iface *primary_if; | |
1536 | struct hlist_node *node; | |
1537 | struct hlist_head *head; | |
1538 | uint32_t i; | |
1539 | bool is_own; | |
1540 | int ret = 0; | |
1541 | ||
1542 | primary_if = primary_if_get_selected(bat_priv); | |
1543 | if (!primary_if) { | |
1544 | ret = seq_printf(seq, | |
1545 | "BATMAN mesh %s disabled - please specify interfaces to enable it\n", | |
1546 | net_dev->name); | |
1547 | goto out; | |
1548 | } | |
1549 | ||
1550 | if (primary_if->if_status != IF_ACTIVE) { | |
1551 | ret = seq_printf(seq, | |
1552 | "BATMAN mesh %s disabled - primary interface not active\n", | |
1553 | net_dev->name); | |
1554 | goto out; | |
1555 | } | |
1556 | ||
38ef3d1d SW |
1557 | seq_printf(seq, |
1558 | "Claims announced for the mesh %s (orig %pM, group id %04x)\n", | |
1559 | net_dev->name, primary_if->net_dev->dev_addr, | |
1560 | ntohs(bat_priv->claim_dest.group)); | |
9bf8e4d4 SW |
1561 | seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", |
1562 | "Client", "VID", "Originator", "CRC"); | |
1563 | for (i = 0; i < hash->size; i++) { | |
1564 | head = &hash->table[i]; | |
1565 | ||
1566 | rcu_read_lock(); | |
1567 | hlist_for_each_entry_rcu(claim, node, head, hash_entry) { | |
1568 | is_own = compare_eth(claim->backbone_gw->orig, | |
1569 | primary_if->net_dev->dev_addr); | |
1570 | seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n", | |
1571 | claim->addr, claim->vid, | |
1572 | claim->backbone_gw->orig, | |
1573 | (is_own ? 'x' : ' '), | |
1574 | claim->backbone_gw->crc); | |
1575 | } | |
1576 | rcu_read_unlock(); | |
1577 | } | |
1578 | out: | |
1579 | if (primary_if) | |
1580 | hardif_free_ref(primary_if); | |
1581 | return ret; | |
1582 | } |