batman-adv: remove extra layer between hash and hash element - hash bucket
[linux-block.git] / net / batman-adv / originator.c
CommitLineData
c6c8fea2 1/*
64afe353 2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
c6c8fea2
SE
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22/* increase the reference counter for this originator */
23
24#include "main.h"
25#include "originator.h"
26#include "hash.h"
27#include "translation-table.h"
28#include "routing.h"
29#include "gateway_client.h"
30#include "hard-interface.h"
31#include "unicast.h"
32#include "soft-interface.h"
33
34static void purge_orig(struct work_struct *work);
35
36static void start_purge_timer(struct bat_priv *bat_priv)
37{
38 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
39 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
40}
41
42int originator_init(struct bat_priv *bat_priv)
43{
44 if (bat_priv->orig_hash)
45 return 1;
46
47 spin_lock_bh(&bat_priv->orig_hash_lock);
48 bat_priv->orig_hash = hash_new(1024);
49
50 if (!bat_priv->orig_hash)
51 goto err;
52
53 spin_unlock_bh(&bat_priv->orig_hash_lock);
54 start_purge_timer(bat_priv);
55 return 1;
56
57err:
58 spin_unlock_bh(&bat_priv->orig_hash_lock);
59 return 0;
60}
61
f987ed6e
ML
62static void neigh_node_free_rcu(struct rcu_head *rcu)
63{
64 struct neigh_node *neigh_node;
65
66 neigh_node = container_of(rcu, struct neigh_node, rcu);
44524fcd 67 kfree(neigh_node);
f987ed6e
ML
68}
69
44524fcd 70void neigh_node_free_ref(struct neigh_node *neigh_node)
a4c135c5 71{
44524fcd
ML
72 if (atomic_dec_and_test(&neigh_node->refcount))
73 call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
a4c135c5
SW
74}
75
a8e7f4bc
ML
76struct neigh_node *create_neighbor(struct orig_node *orig_node,
77 struct orig_node *orig_neigh_node,
78 uint8_t *neigh,
79 struct batman_if *if_incoming)
c6c8fea2
SE
80{
81 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
82 struct neigh_node *neigh_node;
83
84 bat_dbg(DBG_BATMAN, bat_priv,
85 "Creating new last-hop neighbor of originator\n");
86
87 neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
88 if (!neigh_node)
89 return NULL;
90
9591a79f 91 INIT_HLIST_NODE(&neigh_node->list);
a4c135c5 92 INIT_LIST_HEAD(&neigh_node->bonding_list);
c6c8fea2
SE
93
94 memcpy(neigh_node->addr, neigh, ETH_ALEN);
95 neigh_node->orig_node = orig_neigh_node;
96 neigh_node->if_incoming = if_incoming;
44524fcd 97 atomic_set(&neigh_node->refcount, 1);
c6c8fea2 98
f987ed6e
ML
99 spin_lock_bh(&orig_node->neigh_list_lock);
100 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
101 spin_unlock_bh(&orig_node->neigh_list_lock);
c6c8fea2
SE
102 return neigh_node;
103}
104
16b1aba8 105void orig_node_free_ref(struct kref *refcount)
c6c8fea2 106{
9591a79f 107 struct hlist_node *node, *node_tmp;
a4c135c5 108 struct neigh_node *neigh_node, *tmp_neigh_node;
16b1aba8
ML
109 struct orig_node *orig_node;
110
111 orig_node = container_of(refcount, struct orig_node, refcount);
c6c8fea2 112
f987ed6e
ML
113 spin_lock_bh(&orig_node->neigh_list_lock);
114
a4c135c5
SW
115 /* for all bonding members ... */
116 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
117 &orig_node->bond_list, bonding_list) {
118 list_del_rcu(&neigh_node->bonding_list);
44524fcd 119 neigh_node_free_ref(neigh_node);
a4c135c5
SW
120 }
121
c6c8fea2 122 /* for all neighbors towards this originator ... */
9591a79f
ML
123 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
124 &orig_node->neigh_list, list) {
f987ed6e 125 hlist_del_rcu(&neigh_node->list);
44524fcd 126 neigh_node_free_ref(neigh_node);
c6c8fea2
SE
127 }
128
f987ed6e
ML
129 spin_unlock_bh(&orig_node->neigh_list_lock);
130
c6c8fea2 131 frag_list_free(&orig_node->frag_list);
16b1aba8
ML
132 hna_global_del_orig(orig_node->bat_priv, orig_node,
133 "originator timed out");
c6c8fea2
SE
134
135 kfree(orig_node->bcast_own);
136 kfree(orig_node->bcast_own_sum);
137 kfree(orig_node);
138}
139
140void originator_free(struct bat_priv *bat_priv)
141{
16b1aba8 142 struct hashtable_t *hash = bat_priv->orig_hash;
7aadf889 143 struct hlist_node *node, *node_tmp;
16b1aba8 144 struct hlist_head *head;
16b1aba8
ML
145 spinlock_t *list_lock; /* spinlock to protect write access */
146 struct orig_node *orig_node;
147 int i;
148
149 if (!hash)
c6c8fea2
SE
150 return;
151
152 cancel_delayed_work_sync(&bat_priv->orig_work);
153
154 spin_lock_bh(&bat_priv->orig_hash_lock);
c6c8fea2 155 bat_priv->orig_hash = NULL;
16b1aba8
ML
156
157 for (i = 0; i < hash->size; i++) {
158 head = &hash->table[i];
159 list_lock = &hash->list_locks[i];
160
161 spin_lock_bh(list_lock);
7aadf889
ML
162 hlist_for_each_entry_safe(orig_node, node, node_tmp,
163 head, hash_entry) {
16b1aba8 164
7aadf889 165 hlist_del_rcu(node);
16b1aba8
ML
166 kref_put(&orig_node->refcount, orig_node_free_ref);
167 }
168 spin_unlock_bh(list_lock);
169 }
170
171 hash_destroy(hash);
c6c8fea2
SE
172 spin_unlock_bh(&bat_priv->orig_hash_lock);
173}
174
175/* this function finds or creates an originator entry for the given
176 * address if it does not exits */
177struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
178{
179 struct orig_node *orig_node;
180 int size;
181 int hash_added;
182
7aadf889
ML
183 orig_node = orig_hash_find(bat_priv, addr);
184 if (orig_node)
c6c8fea2
SE
185 return orig_node;
186
187 bat_dbg(DBG_BATMAN, bat_priv,
188 "Creating new originator: %pM\n", addr);
189
190 orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
191 if (!orig_node)
192 return NULL;
193
9591a79f 194 INIT_HLIST_HEAD(&orig_node->neigh_list);
a4c135c5 195 INIT_LIST_HEAD(&orig_node->bond_list);
2ae2daf6 196 spin_lock_init(&orig_node->ogm_cnt_lock);
f3e0008f 197 spin_lock_init(&orig_node->bcast_seqno_lock);
f987ed6e 198 spin_lock_init(&orig_node->neigh_list_lock);
16b1aba8 199 kref_init(&orig_node->refcount);
c6c8fea2 200
16b1aba8 201 orig_node->bat_priv = bat_priv;
c6c8fea2
SE
202 memcpy(orig_node->orig, addr, ETH_ALEN);
203 orig_node->router = NULL;
204 orig_node->hna_buff = NULL;
205 orig_node->bcast_seqno_reset = jiffies - 1
206 - msecs_to_jiffies(RESET_PROTECTION_MS);
207 orig_node->batman_seqno_reset = jiffies - 1
208 - msecs_to_jiffies(RESET_PROTECTION_MS);
209
a4c135c5
SW
210 atomic_set(&orig_node->bond_candidates, 0);
211
c6c8fea2
SE
212 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
213
214 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
215 if (!orig_node->bcast_own)
216 goto free_orig_node;
217
218 size = bat_priv->num_ifaces * sizeof(uint8_t);
219 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
220
221 INIT_LIST_HEAD(&orig_node->frag_list);
222 orig_node->last_frag_packet = 0;
223
224 if (!orig_node->bcast_own_sum)
225 goto free_bcast_own;
226
7aadf889
ML
227 hash_added = hash_add(bat_priv->orig_hash, compare_orig,
228 choose_orig, orig_node, &orig_node->hash_entry);
c6c8fea2
SE
229 if (hash_added < 0)
230 goto free_bcast_own_sum;
231
16b1aba8
ML
232 /* extra reference for return */
233 kref_get(&orig_node->refcount);
c6c8fea2
SE
234 return orig_node;
235free_bcast_own_sum:
236 kfree(orig_node->bcast_own_sum);
237free_bcast_own:
238 kfree(orig_node->bcast_own);
239free_orig_node:
240 kfree(orig_node);
241 return NULL;
242}
243
244static bool purge_orig_neighbors(struct bat_priv *bat_priv,
245 struct orig_node *orig_node,
246 struct neigh_node **best_neigh_node)
247{
9591a79f 248 struct hlist_node *node, *node_tmp;
c6c8fea2
SE
249 struct neigh_node *neigh_node;
250 bool neigh_purged = false;
251
252 *best_neigh_node = NULL;
253
f987ed6e
ML
254 spin_lock_bh(&orig_node->neigh_list_lock);
255
c6c8fea2 256 /* for all neighbors towards this originator ... */
9591a79f
ML
257 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
258 &orig_node->neigh_list, list) {
c6c8fea2
SE
259
260 if ((time_after(jiffies,
261 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
262 (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
1a241a57 263 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
c6c8fea2
SE
264 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
265
1a241a57
ML
266 if ((neigh_node->if_incoming->if_status ==
267 IF_INACTIVE) ||
268 (neigh_node->if_incoming->if_status ==
269 IF_NOT_IN_USE) ||
270 (neigh_node->if_incoming->if_status ==
271 IF_TO_BE_REMOVED))
c6c8fea2
SE
272 bat_dbg(DBG_BATMAN, bat_priv,
273 "neighbor purge: originator %pM, "
274 "neighbor: %pM, iface: %s\n",
275 orig_node->orig, neigh_node->addr,
276 neigh_node->if_incoming->net_dev->name);
277 else
278 bat_dbg(DBG_BATMAN, bat_priv,
279 "neighbor timeout: originator %pM, "
280 "neighbor: %pM, last_valid: %lu\n",
281 orig_node->orig, neigh_node->addr,
282 (neigh_node->last_valid / HZ));
283
284 neigh_purged = true;
9591a79f 285
f987ed6e 286 hlist_del_rcu(&neigh_node->list);
a4c135c5 287 bonding_candidate_del(orig_node, neigh_node);
44524fcd 288 neigh_node_free_ref(neigh_node);
c6c8fea2
SE
289 } else {
290 if ((!*best_neigh_node) ||
291 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
292 *best_neigh_node = neigh_node;
293 }
294 }
f987ed6e
ML
295
296 spin_unlock_bh(&orig_node->neigh_list_lock);
c6c8fea2
SE
297 return neigh_purged;
298}
299
300static bool purge_orig_node(struct bat_priv *bat_priv,
301 struct orig_node *orig_node)
302{
303 struct neigh_node *best_neigh_node;
304
305 if (time_after(jiffies,
306 orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
307
308 bat_dbg(DBG_BATMAN, bat_priv,
309 "Originator timeout: originator %pM, last_valid %lu\n",
310 orig_node->orig, (orig_node->last_valid / HZ));
311 return true;
312 } else {
313 if (purge_orig_neighbors(bat_priv, orig_node,
314 &best_neigh_node)) {
315 update_routes(bat_priv, orig_node,
316 best_neigh_node,
317 orig_node->hna_buff,
318 orig_node->hna_buff_len);
c6c8fea2
SE
319 }
320 }
321
322 return false;
323}
324
325static void _purge_orig(struct bat_priv *bat_priv)
326{
327 struct hashtable_t *hash = bat_priv->orig_hash;
7aadf889 328 struct hlist_node *node, *node_tmp;
c6c8fea2 329 struct hlist_head *head;
fb778ea1 330 spinlock_t *list_lock; /* spinlock to protect write access */
c6c8fea2
SE
331 struct orig_node *orig_node;
332 int i;
333
334 if (!hash)
335 return;
336
337 spin_lock_bh(&bat_priv->orig_hash_lock);
338
339 /* for all origins... */
340 for (i = 0; i < hash->size; i++) {
341 head = &hash->table[i];
fb778ea1 342 list_lock = &hash->list_locks[i];
c6c8fea2 343
fb778ea1 344 spin_lock_bh(list_lock);
7aadf889
ML
345 hlist_for_each_entry_safe(orig_node, node, node_tmp,
346 head, hash_entry) {
c6c8fea2
SE
347 if (purge_orig_node(bat_priv, orig_node)) {
348 if (orig_node->gw_flags)
349 gw_node_delete(bat_priv, orig_node);
7aadf889
ML
350 hlist_del_rcu(node);
351 kref_put(&orig_node->refcount,
352 orig_node_free_ref);
fb778ea1 353 continue;
c6c8fea2
SE
354 }
355
356 if (time_after(jiffies, orig_node->last_frag_packet +
357 msecs_to_jiffies(FRAG_TIMEOUT)))
358 frag_list_free(&orig_node->frag_list);
359 }
fb778ea1 360 spin_unlock_bh(list_lock);
c6c8fea2
SE
361 }
362
363 spin_unlock_bh(&bat_priv->orig_hash_lock);
364
365 gw_node_purge(bat_priv);
366 gw_election(bat_priv);
367
368 softif_neigh_purge(bat_priv);
369}
370
371static void purge_orig(struct work_struct *work)
372{
373 struct delayed_work *delayed_work =
374 container_of(work, struct delayed_work, work);
375 struct bat_priv *bat_priv =
376 container_of(delayed_work, struct bat_priv, orig_work);
377
378 _purge_orig(bat_priv);
379 start_purge_timer(bat_priv);
380}
381
382void purge_orig_ref(struct bat_priv *bat_priv)
383{
384 _purge_orig(bat_priv);
385}
386
387int orig_seq_print_text(struct seq_file *seq, void *offset)
388{
389 struct net_device *net_dev = (struct net_device *)seq->private;
390 struct bat_priv *bat_priv = netdev_priv(net_dev);
391 struct hashtable_t *hash = bat_priv->orig_hash;
7aadf889 392 struct hlist_node *node, *node_tmp;
c6c8fea2 393 struct hlist_head *head;
c6c8fea2
SE
394 struct orig_node *orig_node;
395 struct neigh_node *neigh_node;
396 int batman_count = 0;
397 int last_seen_secs;
398 int last_seen_msecs;
399 int i;
400
401 if ((!bat_priv->primary_if) ||
402 (bat_priv->primary_if->if_status != IF_ACTIVE)) {
403 if (!bat_priv->primary_if)
404 return seq_printf(seq, "BATMAN mesh %s disabled - "
405 "please specify interfaces to enable it\n",
406 net_dev->name);
407
408 return seq_printf(seq, "BATMAN mesh %s "
409 "disabled - primary interface not active\n",
410 net_dev->name);
411 }
412
413 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
414 SOURCE_VERSION, REVISION_VERSION_STR,
415 bat_priv->primary_if->net_dev->name,
416 bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
417 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
418 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
419 "outgoingIF", "Potential nexthops");
420
421 spin_lock_bh(&bat_priv->orig_hash_lock);
422
423 for (i = 0; i < hash->size; i++) {
424 head = &hash->table[i];
425
fb778ea1 426 rcu_read_lock();
7aadf889 427 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
c6c8fea2
SE
428 if (!orig_node->router)
429 continue;
430
431 if (orig_node->router->tq_avg == 0)
432 continue;
433
434 last_seen_secs = jiffies_to_msecs(jiffies -
435 orig_node->last_valid) / 1000;
436 last_seen_msecs = jiffies_to_msecs(jiffies -
437 orig_node->last_valid) % 1000;
438
439 neigh_node = orig_node->router;
440 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
441 orig_node->orig, last_seen_secs,
442 last_seen_msecs, neigh_node->tq_avg,
443 neigh_node->addr,
444 neigh_node->if_incoming->net_dev->name);
445
7aadf889 446 hlist_for_each_entry_rcu(neigh_node, node_tmp,
f987ed6e 447 &orig_node->neigh_list, list) {
c6c8fea2
SE
448 seq_printf(seq, " %pM (%3i)", neigh_node->addr,
449 neigh_node->tq_avg);
450 }
451
452 seq_printf(seq, "\n");
453 batman_count++;
454 }
fb778ea1 455 rcu_read_unlock();
c6c8fea2
SE
456 }
457
458 spin_unlock_bh(&bat_priv->orig_hash_lock);
459
460 if ((batman_count == 0))
461 seq_printf(seq, "No batman nodes in range ...\n");
462
463 return 0;
464}
465
466static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
467{
468 void *data_ptr;
469
470 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
471 GFP_ATOMIC);
472 if (!data_ptr) {
473 pr_err("Can't resize orig: out of memory\n");
474 return -1;
475 }
476
477 memcpy(data_ptr, orig_node->bcast_own,
478 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
479 kfree(orig_node->bcast_own);
480 orig_node->bcast_own = data_ptr;
481
482 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
483 if (!data_ptr) {
484 pr_err("Can't resize orig: out of memory\n");
485 return -1;
486 }
487
488 memcpy(data_ptr, orig_node->bcast_own_sum,
489 (max_if_num - 1) * sizeof(uint8_t));
490 kfree(orig_node->bcast_own_sum);
491 orig_node->bcast_own_sum = data_ptr;
492
493 return 0;
494}
495
496int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
497{
498 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
499 struct hashtable_t *hash = bat_priv->orig_hash;
7aadf889 500 struct hlist_node *node;
c6c8fea2 501 struct hlist_head *head;
c6c8fea2 502 struct orig_node *orig_node;
2ae2daf6 503 int i, ret;
c6c8fea2
SE
504
505 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
506 * if_num */
507 spin_lock_bh(&bat_priv->orig_hash_lock);
508
509 for (i = 0; i < hash->size; i++) {
510 head = &hash->table[i];
511
fb778ea1 512 rcu_read_lock();
7aadf889 513 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
2ae2daf6
ML
514 spin_lock_bh(&orig_node->ogm_cnt_lock);
515 ret = orig_node_add_if(orig_node, max_if_num);
516 spin_unlock_bh(&orig_node->ogm_cnt_lock);
517
518 if (ret == -1)
c6c8fea2
SE
519 goto err;
520 }
fb778ea1 521 rcu_read_unlock();
c6c8fea2
SE
522 }
523
524 spin_unlock_bh(&bat_priv->orig_hash_lock);
525 return 0;
526
527err:
fb778ea1 528 rcu_read_unlock();
c6c8fea2
SE
529 spin_unlock_bh(&bat_priv->orig_hash_lock);
530 return -ENOMEM;
531}
532
533static int orig_node_del_if(struct orig_node *orig_node,
534 int max_if_num, int del_if_num)
535{
536 void *data_ptr = NULL;
537 int chunk_size;
538
539 /* last interface was removed */
540 if (max_if_num == 0)
541 goto free_bcast_own;
542
543 chunk_size = sizeof(unsigned long) * NUM_WORDS;
544 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
545 if (!data_ptr) {
546 pr_err("Can't resize orig: out of memory\n");
547 return -1;
548 }
549
550 /* copy first part */
551 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
552
553 /* copy second part */
554 memcpy(data_ptr + del_if_num * chunk_size,
555 orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
556 (max_if_num - del_if_num) * chunk_size);
557
558free_bcast_own:
559 kfree(orig_node->bcast_own);
560 orig_node->bcast_own = data_ptr;
561
562 if (max_if_num == 0)
563 goto free_own_sum;
564
565 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
566 if (!data_ptr) {
567 pr_err("Can't resize orig: out of memory\n");
568 return -1;
569 }
570
571 memcpy(data_ptr, orig_node->bcast_own_sum,
572 del_if_num * sizeof(uint8_t));
573
574 memcpy(data_ptr + del_if_num * sizeof(uint8_t),
575 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
576 (max_if_num - del_if_num) * sizeof(uint8_t));
577
578free_own_sum:
579 kfree(orig_node->bcast_own_sum);
580 orig_node->bcast_own_sum = data_ptr;
581
582 return 0;
583}
584
585int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
586{
587 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
588 struct hashtable_t *hash = bat_priv->orig_hash;
7aadf889 589 struct hlist_node *node;
c6c8fea2 590 struct hlist_head *head;
c6c8fea2
SE
591 struct batman_if *batman_if_tmp;
592 struct orig_node *orig_node;
593 int i, ret;
594
595 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
596 * if_num */
597 spin_lock_bh(&bat_priv->orig_hash_lock);
598
599 for (i = 0; i < hash->size; i++) {
600 head = &hash->table[i];
601
fb778ea1 602 rcu_read_lock();
7aadf889 603 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
2ae2daf6 604 spin_lock_bh(&orig_node->ogm_cnt_lock);
c6c8fea2
SE
605 ret = orig_node_del_if(orig_node, max_if_num,
606 batman_if->if_num);
2ae2daf6 607 spin_unlock_bh(&orig_node->ogm_cnt_lock);
c6c8fea2
SE
608
609 if (ret == -1)
610 goto err;
611 }
fb778ea1 612 rcu_read_unlock();
c6c8fea2
SE
613 }
614
615 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
616 rcu_read_lock();
617 list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
618 if (batman_if_tmp->if_status == IF_NOT_IN_USE)
619 continue;
620
621 if (batman_if == batman_if_tmp)
622 continue;
623
624 if (batman_if->soft_iface != batman_if_tmp->soft_iface)
625 continue;
626
627 if (batman_if_tmp->if_num > batman_if->if_num)
628 batman_if_tmp->if_num--;
629 }
630 rcu_read_unlock();
631
632 batman_if->if_num = -1;
633 spin_unlock_bh(&bat_priv->orig_hash_lock);
634 return 0;
635
636err:
fb778ea1 637 rcu_read_unlock();
c6c8fea2
SE
638 spin_unlock_bh(&bat_priv->orig_hash_lock);
639 return -ENOMEM;
640}