batman-adv: don't switch byte order too often if not needed
[linux-2.6-block.git] / net / batman-adv / send.c
CommitLineData
0b873931 1/* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
c6c8fea2
SE
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
c6c8fea2
SE
18 */
19
20#include "main.h"
c384ea3e 21#include "distributed-arp-table.h"
c6c8fea2
SE
22#include "send.h"
23#include "routing.h"
24#include "translation-table.h"
25#include "soft-interface.h"
26#include "hard-interface.h"
c6c8fea2 27#include "gateway_common.h"
f097e25d 28#include "gateway_client.h"
c6c8fea2 29#include "originator.h"
612d2b4f 30#include "network-coding.h"
ee75ed88 31#include "fragmentation.h"
af5d4f77 32
bb079c82 33static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
c6c8fea2 34
c6c8fea2 35/* send out an already prepared packet to the given address via the
9cfc7bd6
SE
36 * specified batman interface
37 */
56303d34
SE
38int batadv_send_skb_packet(struct sk_buff *skb,
39 struct batadv_hard_iface *hard_iface,
9455e34c 40 const uint8_t *dst_addr)
c6c8fea2 41{
612d2b4f 42 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
c6c8fea2
SE
43 struct ethhdr *ethhdr;
44
e9a4f295 45 if (hard_iface->if_status != BATADV_IF_ACTIVE)
c6c8fea2
SE
46 goto send_skb_err;
47
e6c10f43 48 if (unlikely(!hard_iface->net_dev))
c6c8fea2
SE
49 goto send_skb_err;
50
e6c10f43 51 if (!(hard_iface->net_dev->flags & IFF_UP)) {
67969581
SE
52 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
53 hard_iface->net_dev->name);
c6c8fea2
SE
54 goto send_skb_err;
55 }
56
57 /* push to the ethernet header. */
04b482a2 58 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
c6c8fea2
SE
59 goto send_skb_err;
60
61 skb_reset_mac_header(skb);
62
7ed4be95 63 ethhdr = eth_hdr(skb);
e6c10f43 64 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
c6c8fea2 65 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
293e9338 66 ethhdr->h_proto = htons(ETH_P_BATMAN);
c6c8fea2
SE
67
68 skb_set_network_header(skb, ETH_HLEN);
293e9338 69 skb->protocol = htons(ETH_P_BATMAN);
c6c8fea2 70
e6c10f43 71 skb->dev = hard_iface->net_dev;
c6c8fea2 72
612d2b4f
MH
73 /* Save a clone of the skb to use when decoding coded packets */
74 batadv_nc_skb_store_for_decoding(bat_priv, skb);
75
c6c8fea2
SE
76 /* dev_queue_xmit() returns a negative result on error. However on
77 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
9cfc7bd6
SE
78 * (which is > 0). This will not be treated as an error.
79 */
c6c8fea2
SE
80 return dev_queue_xmit(skb);
81send_skb_err:
82 kfree_skb(skb);
83 return NET_XMIT_DROP;
84}
85
bb351ba0
MH
86/**
87 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
88 * @skb: Packet to be transmitted.
89 * @orig_node: Final destination of the packet.
90 * @recv_if: Interface used when receiving the packet (can be NULL).
91 *
92 * Looks up the best next-hop towards the passed originator and passes the
93 * skb on for preparation of MAC header. If the packet originated from this
94 * host, NULL can be passed as recv_if and no interface alternating is
95 * attempted.
96 *
e91ecfc6
MH
97 * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
98 * NET_XMIT_POLICED if the skb is buffered for later transmit.
bb351ba0 99 */
e91ecfc6
MH
100int batadv_send_skb_to_orig(struct sk_buff *skb,
101 struct batadv_orig_node *orig_node,
102 struct batadv_hard_iface *recv_if)
bb351ba0
MH
103{
104 struct batadv_priv *bat_priv = orig_node->bat_priv;
105 struct batadv_neigh_node *neigh_node;
e91ecfc6 106 int ret = NET_XMIT_DROP;
bb351ba0
MH
107
108 /* batadv_find_router() increases neigh_nodes refcount if found. */
109 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
110 if (!neigh_node)
ee75ed88
MH
111 goto out;
112
113 /* Check if the skb is too large to send in one piece and fragment
114 * it if needed.
115 */
116 if (atomic_read(&bat_priv->fragmentation) &&
117 skb->len > neigh_node->if_incoming->net_dev->mtu) {
118 /* Fragment and send packet. */
119 if (batadv_frag_send_packet(skb, orig_node, neigh_node))
120 ret = NET_XMIT_SUCCESS;
121
122 goto out;
123 }
bb351ba0 124
e91ecfc6
MH
125 /* try to network code the packet, if it is received on an interface
126 * (i.e. being forwarded). If the packet originates from this node or if
127 * network coding fails, then send the packet as usual.
128 */
129 if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
130 ret = NET_XMIT_POLICED;
131 } else {
132 batadv_send_skb_packet(skb, neigh_node->if_incoming,
133 neigh_node->addr);
134 ret = NET_XMIT_SUCCESS;
135 }
bb351ba0 136
ee75ed88
MH
137out:
138 if (neigh_node)
139 batadv_neigh_node_free_ref(neigh_node);
bb351ba0 140
e91ecfc6 141 return ret;
bb351ba0
MH
142}
143
f097e25d
MH
144/**
145 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
146 * common fields for unicast packets
147 * @skb: the skb carrying the unicast header to initialize
148 * @hdr_size: amount of bytes to push at the beginning of the skb
149 * @orig_node: the destination node
150 *
151 * Returns false if the buffer extension was not possible or true otherwise.
152 */
153static bool
154batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
155 struct batadv_orig_node *orig_node)
156{
157 struct batadv_unicast_packet *unicast_packet;
158 uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
159
160 if (batadv_skb_head_push(skb, hdr_size) < 0)
161 return false;
162
163 unicast_packet = (struct batadv_unicast_packet *)skb->data;
a40d9b07 164 unicast_packet->version = BATADV_COMPAT_VERSION;
f097e25d 165 /* batman packet type: unicast */
a40d9b07 166 unicast_packet->packet_type = BATADV_UNICAST;
f097e25d 167 /* set unicast ttl */
a40d9b07 168 unicast_packet->ttl = BATADV_TTL;
f097e25d
MH
169 /* copy the destination for faster routing */
170 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
171 /* set the destination tt version number */
172 unicast_packet->ttvn = ttvn;
173
174 return true;
175}
176
177/**
178 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
179 * @skb: the skb containing the payload to encapsulate
180 * @orig_node: the destination node
181 *
182 * Returns false if the payload could not be encapsulated or true otherwise.
183 */
184static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
185 struct batadv_orig_node *orig_node)
186{
187 size_t uni_size = sizeof(struct batadv_unicast_packet);
188
189 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
190}
191
192/**
193 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
194 * unicast 4addr header
195 * @bat_priv: the bat priv with all the soft interface information
196 * @skb: the skb containing the payload to encapsulate
197 * @orig_node: the destination node
198 * @packet_subtype: the unicast 4addr packet subtype to use
199 *
200 * Returns false if the payload could not be encapsulated or true otherwise.
201 */
202bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
203 struct sk_buff *skb,
204 struct batadv_orig_node *orig,
205 int packet_subtype)
206{
207 struct batadv_hard_iface *primary_if;
208 struct batadv_unicast_4addr_packet *uc_4addr_packet;
209 bool ret = false;
210
211 primary_if = batadv_primary_if_get_selected(bat_priv);
212 if (!primary_if)
213 goto out;
214
215 /* Pull the header space and fill the unicast_packet substructure.
216 * We can do that because the first member of the uc_4addr_packet
217 * is of type struct unicast_packet
218 */
219 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
220 orig))
221 goto out;
222
223 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
a40d9b07 224 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
f097e25d
MH
225 memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
226 uc_4addr_packet->subtype = packet_subtype;
227 uc_4addr_packet->reserved = 0;
228
229 ret = true;
230out:
231 if (primary_if)
232 batadv_hardif_free_ref(primary_if);
233 return ret;
234}
235
236/**
e300d314 237 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
f097e25d
MH
238 * @bat_priv: the bat priv with all the soft interface information
239 * @skb: payload to send
240 * @packet_type: the batman unicast packet type to use
241 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
242 * 4addr packets)
e300d314 243 * @orig_node: the originator to send the packet to
c018ad3d 244 * @vid: the vid to be used to search the translation table
f097e25d 245 *
e300d314
LL
246 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
247 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
248 * as packet_type. Then send this frame to the given orig_node and release a
249 * reference to this orig_node.
250 *
251 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
f097e25d 252 */
e300d314
LL
253static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
254 struct sk_buff *skb, int packet_type,
255 int packet_subtype,
256 struct batadv_orig_node *orig_node,
257 unsigned short vid)
f097e25d
MH
258{
259 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
260 struct batadv_unicast_packet *unicast_packet;
e300d314 261 int ret = NET_XMIT_DROP;
f097e25d 262
56a5ca84 263 if (!orig_node)
f097e25d
MH
264 goto out;
265
266 switch (packet_type) {
267 case BATADV_UNICAST:
33faa045
AQ
268 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
269 goto out;
f097e25d
MH
270 break;
271 case BATADV_UNICAST_4ADDR:
33faa045
AQ
272 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
273 orig_node,
274 packet_subtype))
275 goto out;
f097e25d
MH
276 break;
277 default:
278 /* this function supports UNICAST and UNICAST_4ADDR only. It
279 * should never be invoked with any other packet type
280 */
281 goto out;
282 }
283
284 unicast_packet = (struct batadv_unicast_packet *)skb->data;
285
286 /* inform the destination node that we are still missing a correct route
287 * for this client. The destination will receive this packet and will
288 * try to reroute it because the ttvn contained in the header is less
289 * than the current one
290 */
c018ad3d 291 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
f097e25d
MH
292 unicast_packet->ttvn = unicast_packet->ttvn - 1;
293
294 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
e300d314 295 ret = NET_XMIT_SUCCESS;
f097e25d
MH
296
297out:
f097e25d
MH
298 if (orig_node)
299 batadv_orig_node_free_ref(orig_node);
e300d314 300 if (ret == NET_XMIT_DROP)
f097e25d
MH
301 kfree_skb(skb);
302 return ret;
303}
304
e300d314
LL
305/**
306 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
307 * @bat_priv: the bat priv with all the soft interface information
308 * @skb: payload to send
309 * @packet_type: the batman unicast packet type to use
310 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
311 * 4addr packets)
312 * @vid: the vid to be used to search the translation table
313 *
314 * Look up the recipient node for the destination address in the ethernet
315 * header via the translation table. Wrap the given skb into a batman-adv
316 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
317 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
318 * to the according destination node.
319 *
320 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
321 */
322int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
323 struct sk_buff *skb, int packet_type,
324 int packet_subtype, unsigned short vid)
325{
326 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
327 struct batadv_orig_node *orig_node;
328
329 orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
330 ethhdr->h_dest, vid);
331 return batadv_send_skb_unicast(bat_priv, skb, packet_type,
332 packet_subtype, orig_node, vid);
333}
334
335/**
336 * batadv_send_skb_via_gw - send an skb via gateway lookup
337 * @bat_priv: the bat priv with all the soft interface information
338 * @skb: payload to send
339 * @vid: the vid to be used to search the translation table
340 *
341 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
342 * unicast header and send this frame to this gateway node.
343 *
344 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
345 */
346int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
347 unsigned short vid)
348{
349 struct batadv_orig_node *orig_node;
350
351 orig_node = batadv_gw_get_selected_orig(bat_priv);
352 return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
353 orig_node, vid);
354}
355
56303d34 356void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
c6c8fea2 357{
56303d34 358 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
c6c8fea2 359
e9a4f295
SE
360 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
361 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
c6c8fea2
SE
362 return;
363
9cfc7bd6 364 /* the interface gets activated here to avoid race conditions between
c6c8fea2
SE
365 * the moment of activating the interface in
366 * hardif_activate_interface() where the originator mac is set and
367 * outdated packets (especially uninitialized mac addresses) in the
368 * packet queue
369 */
e9a4f295
SE
370 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
371 hard_iface->if_status = BATADV_IF_ACTIVE;
c6c8fea2 372
be9aa4c1 373 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
c6c8fea2
SE
374}
375
56303d34 376static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
c6c8fea2
SE
377{
378 if (forw_packet->skb)
379 kfree_skb(forw_packet->skb);
6d5808d4 380 if (forw_packet->if_incoming)
e5d89254 381 batadv_hardif_free_ref(forw_packet->if_incoming);
c6c8fea2
SE
382 kfree(forw_packet);
383}
384
56303d34
SE
385static void
386_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
387 struct batadv_forw_packet *forw_packet,
388 unsigned long send_time)
c6c8fea2 389{
c6c8fea2
SE
390 /* add new packet to packet list */
391 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
392 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
393 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
394
395 /* start timer for this packet */
3193e8fd 396 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
c6c8fea2
SE
397 send_time);
398}
399
c6c8fea2 400/* add a broadcast packet to the queue and setup timers. broadcast packets
015758d0 401 * are sent multiple times to increase probability for being received.
c6c8fea2
SE
402 *
403 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
404 * errors.
405 *
406 * The skb is not consumed, so the caller should make sure that the
9cfc7bd6
SE
407 * skb is freed.
408 */
56303d34 409int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
9455e34c
SE
410 const struct sk_buff *skb,
411 unsigned long delay)
c6c8fea2 412{
56303d34
SE
413 struct batadv_hard_iface *primary_if = NULL;
414 struct batadv_forw_packet *forw_packet;
96412690 415 struct batadv_bcast_packet *bcast_packet;
747e4221 416 struct sk_buff *newskb;
c6c8fea2 417
3e34819e 418 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
39c75a51
SE
419 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
420 "bcast packet queue full\n");
c6c8fea2
SE
421 goto out;
422 }
423
e5d89254 424 primary_if = batadv_primary_if_get_selected(bat_priv);
32ae9b22 425 if (!primary_if)
ca06c6eb 426 goto out_and_inc;
c6c8fea2 427
704509b8 428 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
c6c8fea2
SE
429
430 if (!forw_packet)
431 goto out_and_inc;
432
747e4221
SE
433 newskb = skb_copy(skb, GFP_ATOMIC);
434 if (!newskb)
c6c8fea2
SE
435 goto packet_free;
436
437 /* as we have a copy now, it is safe to decrease the TTL */
96412690 438 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
a40d9b07 439 bcast_packet->ttl--;
c6c8fea2 440
747e4221 441 skb_reset_mac_header(newskb);
c6c8fea2 442
747e4221 443 forw_packet->skb = newskb;
32ae9b22 444 forw_packet->if_incoming = primary_if;
c6c8fea2
SE
445
446 /* how often did we send the bcast packet ? */
447 forw_packet->num_packets = 0;
448
72414442
AQ
449 INIT_DELAYED_WORK(&forw_packet->delayed_work,
450 batadv_send_outstanding_bcast_packet);
451
bb079c82 452 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
c6c8fea2
SE
453 return NETDEV_TX_OK;
454
455packet_free:
456 kfree(forw_packet);
457out_and_inc:
458 atomic_inc(&bat_priv->bcast_queue_left);
459out:
32ae9b22 460 if (primary_if)
e5d89254 461 batadv_hardif_free_ref(primary_if);
c6c8fea2
SE
462 return NETDEV_TX_BUSY;
463}
464
bb079c82 465static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
c6c8fea2 466{
56303d34 467 struct batadv_hard_iface *hard_iface;
bbb1f90e 468 struct delayed_work *delayed_work;
56303d34 469 struct batadv_forw_packet *forw_packet;
c6c8fea2 470 struct sk_buff *skb1;
56303d34
SE
471 struct net_device *soft_iface;
472 struct batadv_priv *bat_priv;
473
bbb1f90e 474 delayed_work = container_of(work, struct delayed_work, work);
56303d34
SE
475 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
476 delayed_work);
477 soft_iface = forw_packet->if_incoming->soft_iface;
478 bat_priv = netdev_priv(soft_iface);
c6c8fea2
SE
479
480 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
481 hlist_del(&forw_packet->list);
482 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
483
39c75a51 484 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
c6c8fea2
SE
485 goto out;
486
c384ea3e
AQ
487 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
488 goto out;
489
c6c8fea2
SE
490 /* rebroadcast packet */
491 rcu_read_lock();
3193e8fd 492 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
e6c10f43 493 if (hard_iface->soft_iface != soft_iface)
c6c8fea2
SE
494 continue;
495
caf65bfc
MS
496 if (forw_packet->num_packets >= hard_iface->num_bcasts)
497 continue;
498
c6c8fea2
SE
499 /* send a copy of the saved skb */
500 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
501 if (skb1)
9455e34c 502 batadv_send_skb_packet(skb1, hard_iface,
3193e8fd 503 batadv_broadcast_addr);
c6c8fea2
SE
504 }
505 rcu_read_unlock();
506
507 forw_packet->num_packets++;
508
509 /* if we still have some more bcasts to send */
caf65bfc 510 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
bb079c82
SE
511 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
512 msecs_to_jiffies(5));
c6c8fea2
SE
513 return;
514 }
515
516out:
bb079c82 517 batadv_forw_packet_free(forw_packet);
c6c8fea2
SE
518 atomic_inc(&bat_priv->bcast_queue_left);
519}
520
9455e34c 521void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
c6c8fea2 522{
bbb1f90e 523 struct delayed_work *delayed_work;
56303d34
SE
524 struct batadv_forw_packet *forw_packet;
525 struct batadv_priv *bat_priv;
c6c8fea2 526
bbb1f90e 527 delayed_work = container_of(work, struct delayed_work, work);
56303d34
SE
528 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
529 delayed_work);
c6c8fea2
SE
530 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
531 spin_lock_bh(&bat_priv->forw_bat_list_lock);
532 hlist_del(&forw_packet->list);
533 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
534
39c75a51 535 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
c6c8fea2
SE
536 goto out;
537
01c4224b 538 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
c6c8fea2 539
9cfc7bd6 540 /* we have to have at least one packet in the queue
c6c8fea2
SE
541 * to determine the queues wake up time unless we are
542 * shutting down
543 */
544 if (forw_packet->own)
9455e34c 545 batadv_schedule_bat_ogm(forw_packet->if_incoming);
c6c8fea2
SE
546
547out:
548 /* don't count own packet */
549 if (!forw_packet->own)
550 atomic_inc(&bat_priv->batman_queue_left);
551
bb079c82 552 batadv_forw_packet_free(forw_packet);
c6c8fea2
SE
553}
554
56303d34
SE
555void
556batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
557 const struct batadv_hard_iface *hard_iface)
c6c8fea2 558{
56303d34 559 struct batadv_forw_packet *forw_packet;
b67bfe0d 560 struct hlist_node *safe_tmp_node;
6d5808d4 561 bool pending;
c6c8fea2 562
e6c10f43 563 if (hard_iface)
39c75a51 564 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1eda58bf
SE
565 "purge_outstanding_packets(): %s\n",
566 hard_iface->net_dev->name);
c6c8fea2 567 else
39c75a51 568 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1eda58bf 569 "purge_outstanding_packets()\n");
c6c8fea2
SE
570
571 /* free bcast list */
572 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
b67bfe0d 573 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
c6c8fea2 574 &bat_priv->forw_bcast_list, list) {
9cfc7bd6 575 /* if purge_outstanding_packets() was called with an argument
c6c8fea2
SE
576 * we delete only packets belonging to the given interface
577 */
e6c10f43
ML
578 if ((hard_iface) &&
579 (forw_packet->if_incoming != hard_iface))
c6c8fea2
SE
580 continue;
581
582 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
583
bb079c82 584 /* batadv_send_outstanding_bcast_packet() will lock the list to
c6c8fea2
SE
585 * delete the item from the list
586 */
6d5808d4 587 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
c6c8fea2 588 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
6d5808d4
SE
589
590 if (pending) {
591 hlist_del(&forw_packet->list);
bb079c82 592 batadv_forw_packet_free(forw_packet);
6d5808d4 593 }
c6c8fea2
SE
594 }
595 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
596
597 /* free batman packet list */
598 spin_lock_bh(&bat_priv->forw_bat_list_lock);
b67bfe0d 599 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
c6c8fea2 600 &bat_priv->forw_bat_list, list) {
9cfc7bd6 601 /* if purge_outstanding_packets() was called with an argument
c6c8fea2
SE
602 * we delete only packets belonging to the given interface
603 */
e6c10f43
ML
604 if ((hard_iface) &&
605 (forw_packet->if_incoming != hard_iface))
c6c8fea2
SE
606 continue;
607
608 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
609
9cfc7bd6 610 /* send_outstanding_bat_packet() will lock the list to
c6c8fea2
SE
611 * delete the item from the list
612 */
6d5808d4 613 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
c6c8fea2 614 spin_lock_bh(&bat_priv->forw_bat_list_lock);
6d5808d4
SE
615
616 if (pending) {
617 hlist_del(&forw_packet->list);
bb079c82 618 batadv_forw_packet_free(forw_packet);
6d5808d4 619 }
c6c8fea2
SE
620 }
621 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
622}