Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / net / mac802154 / tx.c
CommitLineData
1802d0be 1// SPDX-License-Identifier: GPL-2.0-only
5b641ebe 2/*
3 * Copyright 2007-2012 Siemens AG
4 *
5b641ebe 5 * Written by:
6 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
7 * Sergey Lapin <slapin@ossfans.org>
8 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
9 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
10 */
11
12#include <linux/netdevice.h>
13#include <linux/if_arp.h>
14#include <linux/crc-ccitt.h>
5f60d5f6 15#include <linux/unaligned.h>
5b641ebe 16
6001d522 17#include <net/rtnetlink.h>
b5992fe9 18#include <net/ieee802154_netdev.h>
5b641ebe 19#include <net/mac802154.h>
5ad60d36 20#include <net/cfg802154.h>
5b641ebe 21
0f1556bc 22#include "ieee802154_i.h"
59cb300f 23#include "driver-ops.h"
5b641ebe 24
be8c6d86 25void ieee802154_xmit_sync_worker(struct work_struct *work)
5b641ebe 26{
c22ff7b4 27 struct ieee802154_local *local =
983a974b 28 container_of(work, struct ieee802154_local, sync_tx_work);
c22ff7b4 29 struct sk_buff *skb = local->tx_skb;
409c3b0c 30 struct net_device *dev = skb->dev;
5b641ebe 31 int res;
32
59cb300f 33 res = drv_xmit_sync(local, skb);
6001d522
AA
34 if (res)
35 goto err_tx;
36
b8ec0dc3
YJ
37 DEV_STATS_INC(dev, tx_packets);
38 DEV_STATS_ADD(dev, tx_bytes, skb->len);
409c3b0c 39
0ff4628f
ED
40 ieee802154_xmit_complete(&local->hw, skb, false);
41
6001d522
AA
42 return;
43
44err_tx:
45 /* Restart the netif queue on each sub_if_data object. */
20a19d1d 46 ieee802154_release_queue(local);
6c1c78d0 47 if (atomic_dec_and_test(&local->phy->ongoing_txs))
f0feb349 48 wake_up(&local->phy->sync_txq);
6001d522 49 kfree_skb(skb);
409c3b0c 50 netdev_dbg(dev, "transmission failed\n");
5b641ebe 51}
52
dc67c6b3 53static netdev_tx_t
e5e584fc 54ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
5b641ebe 55{
409c3b0c 56 struct net_device *dev = skb->dev;
ed0a5dce 57 int ret;
5b641ebe 58
90386a7e 59 if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
f9c52831
AA
60 struct sk_buff *nskb;
61 u16 crc;
4710d806 62
f9c52831
AA
63 if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
64 nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
65 GFP_ATOMIC);
66 if (likely(nskb)) {
67 consume_skb(skb);
68 skb = nskb;
69 } else {
d08d951a 70 goto err_free_skb;
f9c52831
AA
71 }
72 }
73
74 crc = crc_ccitt(0, skb->data, skb->len);
061ef8f9 75 put_unaligned_le16(crc, skb_put(skb, 2));
5b641ebe 76 }
77
b5992fe9 78 /* Stop the netif queue on each sub_if_data object. */
20a19d1d 79 ieee802154_hold_queue(local);
bde000ae 80 atomic_inc(&local->phy->ongoing_txs);
b5992fe9 81
983a974b
MR
82 /* Drivers should preferably implement the async callback. In some rare
83 * cases they only provide a sync callback which we will use as a
84 * fallback.
85 */
ed0a5dce 86 if (local->ops->xmit_async) {
0ff4628f
ED
87 unsigned int len = skb->len;
88
59cb300f 89 ret = drv_xmit_async(local, skb);
d08d951a
MR
90 if (ret)
91 goto err_wake_netif_queue;
409c3b0c 92
b8ec0dc3
YJ
93 DEV_STATS_INC(dev, tx_packets);
94 DEV_STATS_ADD(dev, tx_bytes, len);
ed0a5dce 95 } else {
c22ff7b4 96 local->tx_skb = skb;
983a974b 97 queue_work(local->workqueue, &local->sync_tx_work);
ed0a5dce 98 }
5b641ebe 99
100 return NETDEV_TX_OK;
f5588912 101
d08d951a 102err_wake_netif_queue:
20a19d1d 103 ieee802154_release_queue(local);
6c1c78d0 104 if (atomic_dec_and_test(&local->phy->ongoing_txs))
f0feb349 105 wake_up(&local->phy->sync_txq);
d08d951a 106err_free_skb:
f5588912
VB
107 kfree_skb(skb);
108 return NETDEV_TX_OK;
5b641ebe 109}
50c6fb99 110
f0feb349
MR
111static int ieee802154_sync_queue(struct ieee802154_local *local)
112{
113 int ret;
114
115 ieee802154_hold_queue(local);
116 ieee802154_disable_queue(local);
117 wait_event(local->phy->sync_txq, !atomic_read(&local->phy->ongoing_txs));
118 ret = local->tx_result;
119 ieee802154_release_queue(local);
120
121 return ret;
122}
123
124int ieee802154_sync_and_hold_queue(struct ieee802154_local *local)
125{
2b13db13
MR
126 int ret;
127
f0feb349 128 ieee802154_hold_queue(local);
2b13db13
MR
129 ret = ieee802154_sync_queue(local);
130 set_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
f0feb349 131
2b13db13 132 return ret;
f0feb349
MR
133}
134
ddd9ee7c
MR
135int ieee802154_mlme_op_pre(struct ieee802154_local *local)
136{
137 return ieee802154_sync_and_hold_queue(local);
138}
139
dd180962
MR
140int ieee802154_mlme_tx_locked(struct ieee802154_local *local,
141 struct ieee802154_sub_if_data *sdata,
142 struct sk_buff *skb)
ddd9ee7c 143{
ddd9ee7c
MR
144 /* Avoid possible calls to ->ndo_stop() when we asynchronously perform
145 * MLME transmissions.
146 */
dd180962 147 ASSERT_RTNL();
ddd9ee7c
MR
148
149 /* Ensure the device was not stopped, otherwise error out */
dd180962 150 if (!local->open_count)
ddd9ee7c 151 return -ENETDOWN;
ddd9ee7c 152
4f790184
MR
153 /* Warn if the ieee802154 core thinks MLME frames can be sent while the
154 * net interface expects this cannot happen.
155 */
dd180962 156 if (WARN_ON_ONCE(!netif_running(sdata->dev)))
4f790184 157 return -ENETDOWN;
4f790184 158
ddd9ee7c 159 ieee802154_tx(local, skb);
dd180962
MR
160 return ieee802154_sync_queue(local);
161}
162
163int ieee802154_mlme_tx(struct ieee802154_local *local,
164 struct ieee802154_sub_if_data *sdata,
165 struct sk_buff *skb)
166{
167 int ret;
ddd9ee7c 168
dd180962
MR
169 rtnl_lock();
170 ret = ieee802154_mlme_tx_locked(local, sdata, skb);
ddd9ee7c
MR
171 rtnl_unlock();
172
173 return ret;
174}
175
176void ieee802154_mlme_op_post(struct ieee802154_local *local)
177{
178 ieee802154_release_queue(local);
179}
180
dd180962
MR
181int ieee802154_mlme_tx_one_locked(struct ieee802154_local *local,
182 struct ieee802154_sub_if_data *sdata,
183 struct sk_buff *skb)
184{
185 int ret;
186
187 ieee802154_mlme_op_pre(local);
188 ret = ieee802154_mlme_tx_locked(local, sdata, skb);
189 ieee802154_mlme_op_post(local);
190
191 return ret;
192}
193
2b13db13
MR
194static bool ieee802154_queue_is_stopped(struct ieee802154_local *local)
195{
196 return test_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
197}
198
226730e1
MR
199static netdev_tx_t
200ieee802154_hot_tx(struct ieee802154_local *local, struct sk_buff *skb)
201{
2b13db13
MR
202 /* Warn if the net interface tries to transmit frames while the
203 * ieee802154 core assumes the queue is stopped.
204 */
205 WARN_ON_ONCE(ieee802154_queue_is_stopped(local));
206
226730e1
MR
207 return ieee802154_tx(local, skb);
208}
209
e5e584fc
AA
210netdev_tx_t
211ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
50c6fb99
AA
212{
213 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
50c6fb99
AA
214
215 skb->skb_iif = dev->ifindex;
50c6fb99 216
226730e1 217 return ieee802154_hot_tx(sdata->local, skb);
50c6fb99
AA
218}
219
e5e584fc
AA
220netdev_tx_t
221ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
50c6fb99
AA
222{
223 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
50c6fb99
AA
224 int rc;
225
d58a2fa9
AA
226 /* TODO we should move it to wpan_dev_hard_header and dev_hard_header
227 * functions. The reason is wireshark will show a mac header which is
228 * with security fields but the payload is not encrypted.
229 */
50c6fb99
AA
230 rc = mac802154_llsec_encrypt(&sdata->sec, skb);
231 if (rc) {
cfa626cb 232 netdev_warn(dev, "encryption failed: %i\n", rc);
50c6fb99
AA
233 kfree_skb(skb);
234 return NETDEV_TX_OK;
235 }
236
237 skb->skb_iif = dev->ifindex;
50c6fb99 238
226730e1 239 return ieee802154_hot_tx(sdata->local, skb);
50c6fb99 240}