batman-adv: separate ethernet comparing calls from hash functions
[linux-2.6-block.git] / net / batman-adv / originator.c
CommitLineData
c6c8fea2 1/*
64afe353 2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
c6c8fea2
SE
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22/* increase the reference counter for this originator */
23
24#include "main.h"
25#include "originator.h"
26#include "hash.h"
27#include "translation-table.h"
28#include "routing.h"
29#include "gateway_client.h"
30#include "hard-interface.h"
31#include "unicast.h"
32#include "soft-interface.h"
33
34static void purge_orig(struct work_struct *work);
35
36static void start_purge_timer(struct bat_priv *bat_priv)
37{
38 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
39 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
40}
41
42int originator_init(struct bat_priv *bat_priv)
43{
44 if (bat_priv->orig_hash)
45 return 1;
46
47 spin_lock_bh(&bat_priv->orig_hash_lock);
48 bat_priv->orig_hash = hash_new(1024);
49
50 if (!bat_priv->orig_hash)
51 goto err;
52
53 spin_unlock_bh(&bat_priv->orig_hash_lock);
54 start_purge_timer(bat_priv);
55 return 1;
56
57err:
58 spin_unlock_bh(&bat_priv->orig_hash_lock);
59 return 0;
60}
61
f987ed6e
ML
62static void neigh_node_free_rcu(struct rcu_head *rcu)
63{
64 struct neigh_node *neigh_node;
65
66 neigh_node = container_of(rcu, struct neigh_node, rcu);
44524fcd 67 kfree(neigh_node);
f987ed6e
ML
68}
69
44524fcd 70void neigh_node_free_ref(struct neigh_node *neigh_node)
a4c135c5 71{
44524fcd
ML
72 if (atomic_dec_and_test(&neigh_node->refcount))
73 call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
a4c135c5
SW
74}
75
a8e7f4bc
ML
76struct neigh_node *create_neighbor(struct orig_node *orig_node,
77 struct orig_node *orig_neigh_node,
78 uint8_t *neigh,
79 struct batman_if *if_incoming)
c6c8fea2
SE
80{
81 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
82 struct neigh_node *neigh_node;
83
84 bat_dbg(DBG_BATMAN, bat_priv,
85 "Creating new last-hop neighbor of originator\n");
86
87 neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
88 if (!neigh_node)
89 return NULL;
90
9591a79f 91 INIT_HLIST_NODE(&neigh_node->list);
a4c135c5 92 INIT_LIST_HEAD(&neigh_node->bonding_list);
c6c8fea2
SE
93
94 memcpy(neigh_node->addr, neigh, ETH_ALEN);
95 neigh_node->orig_node = orig_neigh_node;
96 neigh_node->if_incoming = if_incoming;
44524fcd 97 atomic_set(&neigh_node->refcount, 1);
c6c8fea2 98
f987ed6e
ML
99 spin_lock_bh(&orig_node->neigh_list_lock);
100 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
101 spin_unlock_bh(&orig_node->neigh_list_lock);
c6c8fea2
SE
102 return neigh_node;
103}
104
16b1aba8 105void orig_node_free_ref(struct kref *refcount)
c6c8fea2 106{
9591a79f 107 struct hlist_node *node, *node_tmp;
a4c135c5 108 struct neigh_node *neigh_node, *tmp_neigh_node;
16b1aba8
ML
109 struct orig_node *orig_node;
110
111 orig_node = container_of(refcount, struct orig_node, refcount);
c6c8fea2 112
f987ed6e
ML
113 spin_lock_bh(&orig_node->neigh_list_lock);
114
a4c135c5
SW
115 /* for all bonding members ... */
116 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
117 &orig_node->bond_list, bonding_list) {
118 list_del_rcu(&neigh_node->bonding_list);
44524fcd 119 neigh_node_free_ref(neigh_node);
a4c135c5
SW
120 }
121
c6c8fea2 122 /* for all neighbors towards this originator ... */
9591a79f
ML
123 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
124 &orig_node->neigh_list, list) {
f987ed6e 125 hlist_del_rcu(&neigh_node->list);
44524fcd 126 neigh_node_free_ref(neigh_node);
c6c8fea2
SE
127 }
128
f987ed6e
ML
129 spin_unlock_bh(&orig_node->neigh_list_lock);
130
c6c8fea2 131 frag_list_free(&orig_node->frag_list);
16b1aba8
ML
132 hna_global_del_orig(orig_node->bat_priv, orig_node,
133 "originator timed out");
c6c8fea2
SE
134
135 kfree(orig_node->bcast_own);
136 kfree(orig_node->bcast_own_sum);
137 kfree(orig_node);
138}
139
140void originator_free(struct bat_priv *bat_priv)
141{
16b1aba8
ML
142 struct hashtable_t *hash = bat_priv->orig_hash;
143 struct hlist_node *walk, *safe;
144 struct hlist_head *head;
145 struct element_t *bucket;
146 spinlock_t *list_lock; /* spinlock to protect write access */
147 struct orig_node *orig_node;
148 int i;
149
150 if (!hash)
c6c8fea2
SE
151 return;
152
153 cancel_delayed_work_sync(&bat_priv->orig_work);
154
155 spin_lock_bh(&bat_priv->orig_hash_lock);
c6c8fea2 156 bat_priv->orig_hash = NULL;
16b1aba8
ML
157
158 for (i = 0; i < hash->size; i++) {
159 head = &hash->table[i];
160 list_lock = &hash->list_locks[i];
161
162 spin_lock_bh(list_lock);
163 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
164 orig_node = bucket->data;
165
166 hlist_del_rcu(walk);
167 call_rcu(&bucket->rcu, bucket_free_rcu);
168 kref_put(&orig_node->refcount, orig_node_free_ref);
169 }
170 spin_unlock_bh(list_lock);
171 }
172
173 hash_destroy(hash);
c6c8fea2
SE
174 spin_unlock_bh(&bat_priv->orig_hash_lock);
175}
176
16b1aba8
ML
177static void bucket_free_orig_rcu(struct rcu_head *rcu)
178{
179 struct element_t *bucket;
180 struct orig_node *orig_node;
181
182 bucket = container_of(rcu, struct element_t, rcu);
183 orig_node = bucket->data;
184
185 kref_put(&orig_node->refcount, orig_node_free_ref);
186 kfree(bucket);
187}
188
c6c8fea2
SE
189/* this function finds or creates an originator entry for the given
190 * address if it does not exits */
191struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
192{
193 struct orig_node *orig_node;
194 int size;
195 int hash_added;
196
fb778ea1 197 rcu_read_lock();
c6c8fea2
SE
198 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
199 compare_orig, choose_orig,
200 addr));
fb778ea1 201 rcu_read_unlock();
c6c8fea2 202
16b1aba8
ML
203 if (orig_node) {
204 kref_get(&orig_node->refcount);
c6c8fea2 205 return orig_node;
16b1aba8 206 }
c6c8fea2
SE
207
208 bat_dbg(DBG_BATMAN, bat_priv,
209 "Creating new originator: %pM\n", addr);
210
211 orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
212 if (!orig_node)
213 return NULL;
214
9591a79f 215 INIT_HLIST_HEAD(&orig_node->neigh_list);
a4c135c5 216 INIT_LIST_HEAD(&orig_node->bond_list);
2ae2daf6 217 spin_lock_init(&orig_node->ogm_cnt_lock);
f3e0008f 218 spin_lock_init(&orig_node->bcast_seqno_lock);
f987ed6e 219 spin_lock_init(&orig_node->neigh_list_lock);
16b1aba8 220 kref_init(&orig_node->refcount);
c6c8fea2 221
16b1aba8 222 orig_node->bat_priv = bat_priv;
c6c8fea2
SE
223 memcpy(orig_node->orig, addr, ETH_ALEN);
224 orig_node->router = NULL;
225 orig_node->hna_buff = NULL;
226 orig_node->bcast_seqno_reset = jiffies - 1
227 - msecs_to_jiffies(RESET_PROTECTION_MS);
228 orig_node->batman_seqno_reset = jiffies - 1
229 - msecs_to_jiffies(RESET_PROTECTION_MS);
230
a4c135c5
SW
231 atomic_set(&orig_node->bond_candidates, 0);
232
c6c8fea2
SE
233 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
234
235 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
236 if (!orig_node->bcast_own)
237 goto free_orig_node;
238
239 size = bat_priv->num_ifaces * sizeof(uint8_t);
240 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
241
242 INIT_LIST_HEAD(&orig_node->frag_list);
243 orig_node->last_frag_packet = 0;
244
245 if (!orig_node->bcast_own_sum)
246 goto free_bcast_own;
247
248 hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig,
249 orig_node);
250 if (hash_added < 0)
251 goto free_bcast_own_sum;
252
16b1aba8
ML
253 /* extra reference for return */
254 kref_get(&orig_node->refcount);
c6c8fea2
SE
255 return orig_node;
256free_bcast_own_sum:
257 kfree(orig_node->bcast_own_sum);
258free_bcast_own:
259 kfree(orig_node->bcast_own);
260free_orig_node:
261 kfree(orig_node);
262 return NULL;
263}
264
265static bool purge_orig_neighbors(struct bat_priv *bat_priv,
266 struct orig_node *orig_node,
267 struct neigh_node **best_neigh_node)
268{
9591a79f 269 struct hlist_node *node, *node_tmp;
c6c8fea2
SE
270 struct neigh_node *neigh_node;
271 bool neigh_purged = false;
272
273 *best_neigh_node = NULL;
274
f987ed6e
ML
275 spin_lock_bh(&orig_node->neigh_list_lock);
276
c6c8fea2 277 /* for all neighbors towards this originator ... */
9591a79f
ML
278 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
279 &orig_node->neigh_list, list) {
c6c8fea2
SE
280
281 if ((time_after(jiffies,
282 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
283 (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
1a241a57 284 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
c6c8fea2
SE
285 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
286
1a241a57
ML
287 if ((neigh_node->if_incoming->if_status ==
288 IF_INACTIVE) ||
289 (neigh_node->if_incoming->if_status ==
290 IF_NOT_IN_USE) ||
291 (neigh_node->if_incoming->if_status ==
292 IF_TO_BE_REMOVED))
c6c8fea2
SE
293 bat_dbg(DBG_BATMAN, bat_priv,
294 "neighbor purge: originator %pM, "
295 "neighbor: %pM, iface: %s\n",
296 orig_node->orig, neigh_node->addr,
297 neigh_node->if_incoming->net_dev->name);
298 else
299 bat_dbg(DBG_BATMAN, bat_priv,
300 "neighbor timeout: originator %pM, "
301 "neighbor: %pM, last_valid: %lu\n",
302 orig_node->orig, neigh_node->addr,
303 (neigh_node->last_valid / HZ));
304
305 neigh_purged = true;
9591a79f 306
f987ed6e 307 hlist_del_rcu(&neigh_node->list);
a4c135c5 308 bonding_candidate_del(orig_node, neigh_node);
44524fcd 309 neigh_node_free_ref(neigh_node);
c6c8fea2
SE
310 } else {
311 if ((!*best_neigh_node) ||
312 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
313 *best_neigh_node = neigh_node;
314 }
315 }
f987ed6e
ML
316
317 spin_unlock_bh(&orig_node->neigh_list_lock);
c6c8fea2
SE
318 return neigh_purged;
319}
320
321static bool purge_orig_node(struct bat_priv *bat_priv,
322 struct orig_node *orig_node)
323{
324 struct neigh_node *best_neigh_node;
325
326 if (time_after(jiffies,
327 orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
328
329 bat_dbg(DBG_BATMAN, bat_priv,
330 "Originator timeout: originator %pM, last_valid %lu\n",
331 orig_node->orig, (orig_node->last_valid / HZ));
332 return true;
333 } else {
334 if (purge_orig_neighbors(bat_priv, orig_node,
335 &best_neigh_node)) {
336 update_routes(bat_priv, orig_node,
337 best_neigh_node,
338 orig_node->hna_buff,
339 orig_node->hna_buff_len);
c6c8fea2
SE
340 }
341 }
342
343 return false;
344}
345
346static void _purge_orig(struct bat_priv *bat_priv)
347{
348 struct hashtable_t *hash = bat_priv->orig_hash;
349 struct hlist_node *walk, *safe;
350 struct hlist_head *head;
351 struct element_t *bucket;
fb778ea1 352 spinlock_t *list_lock; /* spinlock to protect write access */
c6c8fea2
SE
353 struct orig_node *orig_node;
354 int i;
355
356 if (!hash)
357 return;
358
359 spin_lock_bh(&bat_priv->orig_hash_lock);
360
361 /* for all origins... */
362 for (i = 0; i < hash->size; i++) {
363 head = &hash->table[i];
fb778ea1 364 list_lock = &hash->list_locks[i];
c6c8fea2 365
fb778ea1 366 spin_lock_bh(list_lock);
c6c8fea2
SE
367 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
368 orig_node = bucket->data;
369
370 if (purge_orig_node(bat_priv, orig_node)) {
371 if (orig_node->gw_flags)
372 gw_node_delete(bat_priv, orig_node);
fb778ea1 373 hlist_del_rcu(walk);
16b1aba8 374 call_rcu(&bucket->rcu, bucket_free_orig_rcu);
fb778ea1 375 continue;
c6c8fea2
SE
376 }
377
378 if (time_after(jiffies, orig_node->last_frag_packet +
379 msecs_to_jiffies(FRAG_TIMEOUT)))
380 frag_list_free(&orig_node->frag_list);
381 }
fb778ea1 382 spin_unlock_bh(list_lock);
c6c8fea2
SE
383 }
384
385 spin_unlock_bh(&bat_priv->orig_hash_lock);
386
387 gw_node_purge(bat_priv);
388 gw_election(bat_priv);
389
390 softif_neigh_purge(bat_priv);
391}
392
393static void purge_orig(struct work_struct *work)
394{
395 struct delayed_work *delayed_work =
396 container_of(work, struct delayed_work, work);
397 struct bat_priv *bat_priv =
398 container_of(delayed_work, struct bat_priv, orig_work);
399
400 _purge_orig(bat_priv);
401 start_purge_timer(bat_priv);
402}
403
404void purge_orig_ref(struct bat_priv *bat_priv)
405{
406 _purge_orig(bat_priv);
407}
408
409int orig_seq_print_text(struct seq_file *seq, void *offset)
410{
411 struct net_device *net_dev = (struct net_device *)seq->private;
412 struct bat_priv *bat_priv = netdev_priv(net_dev);
413 struct hashtable_t *hash = bat_priv->orig_hash;
9591a79f 414 struct hlist_node *walk, *node;
c6c8fea2
SE
415 struct hlist_head *head;
416 struct element_t *bucket;
417 struct orig_node *orig_node;
418 struct neigh_node *neigh_node;
419 int batman_count = 0;
420 int last_seen_secs;
421 int last_seen_msecs;
422 int i;
423
424 if ((!bat_priv->primary_if) ||
425 (bat_priv->primary_if->if_status != IF_ACTIVE)) {
426 if (!bat_priv->primary_if)
427 return seq_printf(seq, "BATMAN mesh %s disabled - "
428 "please specify interfaces to enable it\n",
429 net_dev->name);
430
431 return seq_printf(seq, "BATMAN mesh %s "
432 "disabled - primary interface not active\n",
433 net_dev->name);
434 }
435
436 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
437 SOURCE_VERSION, REVISION_VERSION_STR,
438 bat_priv->primary_if->net_dev->name,
439 bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
440 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
441 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
442 "outgoingIF", "Potential nexthops");
443
444 spin_lock_bh(&bat_priv->orig_hash_lock);
445
446 for (i = 0; i < hash->size; i++) {
447 head = &hash->table[i];
448
fb778ea1
ML
449 rcu_read_lock();
450 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
c6c8fea2
SE
451 orig_node = bucket->data;
452
453 if (!orig_node->router)
454 continue;
455
456 if (orig_node->router->tq_avg == 0)
457 continue;
458
459 last_seen_secs = jiffies_to_msecs(jiffies -
460 orig_node->last_valid) / 1000;
461 last_seen_msecs = jiffies_to_msecs(jiffies -
462 orig_node->last_valid) % 1000;
463
464 neigh_node = orig_node->router;
465 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
466 orig_node->orig, last_seen_secs,
467 last_seen_msecs, neigh_node->tq_avg,
468 neigh_node->addr,
469 neigh_node->if_incoming->net_dev->name);
470
f987ed6e
ML
471 hlist_for_each_entry_rcu(neigh_node, node,
472 &orig_node->neigh_list, list) {
c6c8fea2
SE
473 seq_printf(seq, " %pM (%3i)", neigh_node->addr,
474 neigh_node->tq_avg);
475 }
476
477 seq_printf(seq, "\n");
478 batman_count++;
479 }
fb778ea1 480 rcu_read_unlock();
c6c8fea2
SE
481 }
482
483 spin_unlock_bh(&bat_priv->orig_hash_lock);
484
485 if ((batman_count == 0))
486 seq_printf(seq, "No batman nodes in range ...\n");
487
488 return 0;
489}
490
491static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
492{
493 void *data_ptr;
494
495 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
496 GFP_ATOMIC);
497 if (!data_ptr) {
498 pr_err("Can't resize orig: out of memory\n");
499 return -1;
500 }
501
502 memcpy(data_ptr, orig_node->bcast_own,
503 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
504 kfree(orig_node->bcast_own);
505 orig_node->bcast_own = data_ptr;
506
507 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
508 if (!data_ptr) {
509 pr_err("Can't resize orig: out of memory\n");
510 return -1;
511 }
512
513 memcpy(data_ptr, orig_node->bcast_own_sum,
514 (max_if_num - 1) * sizeof(uint8_t));
515 kfree(orig_node->bcast_own_sum);
516 orig_node->bcast_own_sum = data_ptr;
517
518 return 0;
519}
520
521int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
522{
523 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
524 struct hashtable_t *hash = bat_priv->orig_hash;
525 struct hlist_node *walk;
526 struct hlist_head *head;
527 struct element_t *bucket;
528 struct orig_node *orig_node;
2ae2daf6 529 int i, ret;
c6c8fea2
SE
530
531 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
532 * if_num */
533 spin_lock_bh(&bat_priv->orig_hash_lock);
534
535 for (i = 0; i < hash->size; i++) {
536 head = &hash->table[i];
537
fb778ea1
ML
538 rcu_read_lock();
539 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
c6c8fea2
SE
540 orig_node = bucket->data;
541
2ae2daf6
ML
542 spin_lock_bh(&orig_node->ogm_cnt_lock);
543 ret = orig_node_add_if(orig_node, max_if_num);
544 spin_unlock_bh(&orig_node->ogm_cnt_lock);
545
546 if (ret == -1)
c6c8fea2
SE
547 goto err;
548 }
fb778ea1 549 rcu_read_unlock();
c6c8fea2
SE
550 }
551
552 spin_unlock_bh(&bat_priv->orig_hash_lock);
553 return 0;
554
555err:
fb778ea1 556 rcu_read_unlock();
c6c8fea2
SE
557 spin_unlock_bh(&bat_priv->orig_hash_lock);
558 return -ENOMEM;
559}
560
561static int orig_node_del_if(struct orig_node *orig_node,
562 int max_if_num, int del_if_num)
563{
564 void *data_ptr = NULL;
565 int chunk_size;
566
567 /* last interface was removed */
568 if (max_if_num == 0)
569 goto free_bcast_own;
570
571 chunk_size = sizeof(unsigned long) * NUM_WORDS;
572 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
573 if (!data_ptr) {
574 pr_err("Can't resize orig: out of memory\n");
575 return -1;
576 }
577
578 /* copy first part */
579 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
580
581 /* copy second part */
582 memcpy(data_ptr + del_if_num * chunk_size,
583 orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
584 (max_if_num - del_if_num) * chunk_size);
585
586free_bcast_own:
587 kfree(orig_node->bcast_own);
588 orig_node->bcast_own = data_ptr;
589
590 if (max_if_num == 0)
591 goto free_own_sum;
592
593 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
594 if (!data_ptr) {
595 pr_err("Can't resize orig: out of memory\n");
596 return -1;
597 }
598
599 memcpy(data_ptr, orig_node->bcast_own_sum,
600 del_if_num * sizeof(uint8_t));
601
602 memcpy(data_ptr + del_if_num * sizeof(uint8_t),
603 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
604 (max_if_num - del_if_num) * sizeof(uint8_t));
605
606free_own_sum:
607 kfree(orig_node->bcast_own_sum);
608 orig_node->bcast_own_sum = data_ptr;
609
610 return 0;
611}
612
613int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
614{
615 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
616 struct hashtable_t *hash = bat_priv->orig_hash;
617 struct hlist_node *walk;
618 struct hlist_head *head;
619 struct element_t *bucket;
620 struct batman_if *batman_if_tmp;
621 struct orig_node *orig_node;
622 int i, ret;
623
624 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
625 * if_num */
626 spin_lock_bh(&bat_priv->orig_hash_lock);
627
628 for (i = 0; i < hash->size; i++) {
629 head = &hash->table[i];
630
fb778ea1
ML
631 rcu_read_lock();
632 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
c6c8fea2
SE
633 orig_node = bucket->data;
634
2ae2daf6 635 spin_lock_bh(&orig_node->ogm_cnt_lock);
c6c8fea2
SE
636 ret = orig_node_del_if(orig_node, max_if_num,
637 batman_if->if_num);
2ae2daf6 638 spin_unlock_bh(&orig_node->ogm_cnt_lock);
c6c8fea2
SE
639
640 if (ret == -1)
641 goto err;
642 }
fb778ea1 643 rcu_read_unlock();
c6c8fea2
SE
644 }
645
646 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
647 rcu_read_lock();
648 list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
649 if (batman_if_tmp->if_status == IF_NOT_IN_USE)
650 continue;
651
652 if (batman_if == batman_if_tmp)
653 continue;
654
655 if (batman_if->soft_iface != batman_if_tmp->soft_iface)
656 continue;
657
658 if (batman_if_tmp->if_num > batman_if->if_num)
659 batman_if_tmp->if_num--;
660 }
661 rcu_read_unlock();
662
663 batman_if->if_num = -1;
664 spin_unlock_bh(&bat_priv->orig_hash_lock);
665 return 0;
666
667err:
fb778ea1 668 rcu_read_unlock();
c6c8fea2
SE
669 spin_unlock_bh(&bat_priv->orig_hash_lock);
670 return -ENOMEM;
671}