Merge tag 'trace-v6.2-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[linux-block.git] / net / mac802154 / tx.c
CommitLineData
1802d0be 1// SPDX-License-Identifier: GPL-2.0-only
5b641ebe 2/*
3 * Copyright 2007-2012 Siemens AG
4 *
5b641ebe 5 * Written by:
6 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
7 * Sergey Lapin <slapin@ossfans.org>
8 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
9 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
10 */
11
12#include <linux/netdevice.h>
13#include <linux/if_arp.h>
14#include <linux/crc-ccitt.h>
061ef8f9 15#include <asm/unaligned.h>
5b641ebe 16
6001d522 17#include <net/rtnetlink.h>
b5992fe9 18#include <net/ieee802154_netdev.h>
5b641ebe 19#include <net/mac802154.h>
5ad60d36 20#include <net/cfg802154.h>
5b641ebe 21
0f1556bc 22#include "ieee802154_i.h"
59cb300f 23#include "driver-ops.h"
5b641ebe 24
be8c6d86 25void ieee802154_xmit_sync_worker(struct work_struct *work)
5b641ebe 26{
c22ff7b4 27 struct ieee802154_local *local =
983a974b 28 container_of(work, struct ieee802154_local, sync_tx_work);
c22ff7b4 29 struct sk_buff *skb = local->tx_skb;
409c3b0c 30 struct net_device *dev = skb->dev;
5b641ebe 31 int res;
32
59cb300f 33 res = drv_xmit_sync(local, skb);
6001d522
AA
34 if (res)
35 goto err_tx;
36
409c3b0c
AA
37 dev->stats.tx_packets++;
38 dev->stats.tx_bytes += skb->len;
39
0ff4628f
ED
40 ieee802154_xmit_complete(&local->hw, skb, false);
41
6001d522
AA
42 return;
43
44err_tx:
45 /* Restart the netif queue on each sub_if_data object. */
20a19d1d 46 ieee802154_release_queue(local);
6c1c78d0 47 if (atomic_dec_and_test(&local->phy->ongoing_txs))
f0feb349 48 wake_up(&local->phy->sync_txq);
6001d522 49 kfree_skb(skb);
409c3b0c 50 netdev_dbg(dev, "transmission failed\n");
5b641ebe 51}
52
dc67c6b3 53static netdev_tx_t
e5e584fc 54ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
5b641ebe 55{
409c3b0c 56 struct net_device *dev = skb->dev;
ed0a5dce 57 int ret;
5b641ebe 58
90386a7e 59 if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
f9c52831
AA
60 struct sk_buff *nskb;
61 u16 crc;
4710d806 62
f9c52831
AA
63 if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
64 nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
65 GFP_ATOMIC);
66 if (likely(nskb)) {
67 consume_skb(skb);
68 skb = nskb;
69 } else {
d08d951a 70 goto err_free_skb;
f9c52831
AA
71 }
72 }
73
74 crc = crc_ccitt(0, skb->data, skb->len);
061ef8f9 75 put_unaligned_le16(crc, skb_put(skb, 2));
5b641ebe 76 }
77
b5992fe9 78 /* Stop the netif queue on each sub_if_data object. */
20a19d1d 79 ieee802154_hold_queue(local);
bde000ae 80 atomic_inc(&local->phy->ongoing_txs);
b5992fe9 81
983a974b
MR
82 /* Drivers should preferably implement the async callback. In some rare
83 * cases they only provide a sync callback which we will use as a
84 * fallback.
85 */
ed0a5dce 86 if (local->ops->xmit_async) {
0ff4628f
ED
87 unsigned int len = skb->len;
88
59cb300f 89 ret = drv_xmit_async(local, skb);
d08d951a
MR
90 if (ret)
91 goto err_wake_netif_queue;
409c3b0c
AA
92
93 dev->stats.tx_packets++;
0ff4628f 94 dev->stats.tx_bytes += len;
ed0a5dce 95 } else {
c22ff7b4 96 local->tx_skb = skb;
983a974b 97 queue_work(local->workqueue, &local->sync_tx_work);
ed0a5dce 98 }
5b641ebe 99
100 return NETDEV_TX_OK;
f5588912 101
d08d951a 102err_wake_netif_queue:
20a19d1d 103 ieee802154_release_queue(local);
6c1c78d0 104 if (atomic_dec_and_test(&local->phy->ongoing_txs))
f0feb349 105 wake_up(&local->phy->sync_txq);
d08d951a 106err_free_skb:
f5588912
VB
107 kfree_skb(skb);
108 return NETDEV_TX_OK;
5b641ebe 109}
50c6fb99 110
f0feb349
MR
111static int ieee802154_sync_queue(struct ieee802154_local *local)
112{
113 int ret;
114
115 ieee802154_hold_queue(local);
116 ieee802154_disable_queue(local);
117 wait_event(local->phy->sync_txq, !atomic_read(&local->phy->ongoing_txs));
118 ret = local->tx_result;
119 ieee802154_release_queue(local);
120
121 return ret;
122}
123
124int ieee802154_sync_and_hold_queue(struct ieee802154_local *local)
125{
2b13db13
MR
126 int ret;
127
f0feb349 128 ieee802154_hold_queue(local);
2b13db13
MR
129 ret = ieee802154_sync_queue(local);
130 set_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
f0feb349 131
2b13db13 132 return ret;
f0feb349
MR
133}
134
ddd9ee7c
MR
135int ieee802154_mlme_op_pre(struct ieee802154_local *local)
136{
137 return ieee802154_sync_and_hold_queue(local);
138}
139
fbdaa5ba
MR
140int ieee802154_mlme_tx(struct ieee802154_local *local,
141 struct ieee802154_sub_if_data *sdata,
142 struct sk_buff *skb)
ddd9ee7c
MR
143{
144 int ret;
145
146 /* Avoid possible calls to ->ndo_stop() when we asynchronously perform
147 * MLME transmissions.
148 */
149 rtnl_lock();
150
151 /* Ensure the device was not stopped, otherwise error out */
152 if (!local->open_count) {
153 rtnl_unlock();
154 return -ENETDOWN;
155 }
156
4f790184
MR
157 /* Warn if the ieee802154 core thinks MLME frames can be sent while the
158 * net interface expects this cannot happen.
159 */
fbdaa5ba 160 if (WARN_ON_ONCE(!netif_running(sdata->dev))) {
4f790184
MR
161 rtnl_unlock();
162 return -ENETDOWN;
163 }
164
ddd9ee7c
MR
165 ieee802154_tx(local, skb);
166 ret = ieee802154_sync_queue(local);
167
168 rtnl_unlock();
169
170 return ret;
171}
172
173void ieee802154_mlme_op_post(struct ieee802154_local *local)
174{
175 ieee802154_release_queue(local);
176}
177
fbdaa5ba
MR
178int ieee802154_mlme_tx_one(struct ieee802154_local *local,
179 struct ieee802154_sub_if_data *sdata,
180 struct sk_buff *skb)
ddd9ee7c
MR
181{
182 int ret;
183
184 ieee802154_mlme_op_pre(local);
fbdaa5ba 185 ret = ieee802154_mlme_tx(local, sdata, skb);
ddd9ee7c
MR
186 ieee802154_mlme_op_post(local);
187
188 return ret;
189}
190
2b13db13
MR
191static bool ieee802154_queue_is_stopped(struct ieee802154_local *local)
192{
193 return test_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
194}
195
226730e1
MR
196static netdev_tx_t
197ieee802154_hot_tx(struct ieee802154_local *local, struct sk_buff *skb)
198{
2b13db13
MR
199 /* Warn if the net interface tries to transmit frames while the
200 * ieee802154 core assumes the queue is stopped.
201 */
202 WARN_ON_ONCE(ieee802154_queue_is_stopped(local));
203
226730e1
MR
204 return ieee802154_tx(local, skb);
205}
206
e5e584fc
AA
207netdev_tx_t
208ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
50c6fb99
AA
209{
210 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
50c6fb99
AA
211
212 skb->skb_iif = dev->ifindex;
50c6fb99 213
226730e1 214 return ieee802154_hot_tx(sdata->local, skb);
50c6fb99
AA
215}
216
e5e584fc
AA
217netdev_tx_t
218ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
50c6fb99
AA
219{
220 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
50c6fb99
AA
221 int rc;
222
d58a2fa9
AA
223 /* TODO we should move it to wpan_dev_hard_header and dev_hard_header
224 * functions. The reason is wireshark will show a mac header which is
225 * with security fields but the payload is not encrypted.
226 */
50c6fb99
AA
227 rc = mac802154_llsec_encrypt(&sdata->sec, skb);
228 if (rc) {
cfa626cb 229 netdev_warn(dev, "encryption failed: %i\n", rc);
50c6fb99
AA
230 kfree_skb(skb);
231 return NETDEV_TX_OK;
232 }
233
234 skb->skb_iif = dev->ifindex;
50c6fb99 235
226730e1 236 return ieee802154_hot_tx(sdata->local, skb);
50c6fb99 237}