Merge tag 'driver-core-6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / net / mac802154 / tx.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2007-2012 Siemens AG
4  *
5  * Written by:
6  * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
7  * Sergey Lapin <slapin@ossfans.org>
8  * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
9  * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
10  */
11
12 #include <linux/netdevice.h>
13 #include <linux/if_arp.h>
14 #include <linux/crc-ccitt.h>
15 #include <asm/unaligned.h>
16
17 #include <net/rtnetlink.h>
18 #include <net/ieee802154_netdev.h>
19 #include <net/mac802154.h>
20 #include <net/cfg802154.h>
21
22 #include "ieee802154_i.h"
23 #include "driver-ops.h"
24
25 void ieee802154_xmit_sync_worker(struct work_struct *work)
26 {
27         struct ieee802154_local *local =
28                 container_of(work, struct ieee802154_local, sync_tx_work);
29         struct sk_buff *skb = local->tx_skb;
30         struct net_device *dev = skb->dev;
31         int res;
32
33         res = drv_xmit_sync(local, skb);
34         if (res)
35                 goto err_tx;
36
37         dev->stats.tx_packets++;
38         dev->stats.tx_bytes += skb->len;
39
40         ieee802154_xmit_complete(&local->hw, skb, false);
41
42         return;
43
44 err_tx:
45         /* Restart the netif queue on each sub_if_data object. */
46         ieee802154_release_queue(local);
47         if (atomic_dec_and_test(&local->phy->ongoing_txs))
48                 wake_up(&local->phy->sync_txq);
49         kfree_skb(skb);
50         netdev_dbg(dev, "transmission failed\n");
51 }
52
53 static netdev_tx_t
54 ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
55 {
56         struct net_device *dev = skb->dev;
57         int ret;
58
59         if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
60                 struct sk_buff *nskb;
61                 u16 crc;
62
63                 if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
64                         nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
65                                                GFP_ATOMIC);
66                         if (likely(nskb)) {
67                                 consume_skb(skb);
68                                 skb = nskb;
69                         } else {
70                                 goto err_free_skb;
71                         }
72                 }
73
74                 crc = crc_ccitt(0, skb->data, skb->len);
75                 put_unaligned_le16(crc, skb_put(skb, 2));
76         }
77
78         /* Stop the netif queue on each sub_if_data object. */
79         ieee802154_hold_queue(local);
80         atomic_inc(&local->phy->ongoing_txs);
81
82         /* Drivers should preferably implement the async callback. In some rare
83          * cases they only provide a sync callback which we will use as a
84          * fallback.
85          */
86         if (local->ops->xmit_async) {
87                 unsigned int len = skb->len;
88
89                 ret = drv_xmit_async(local, skb);
90                 if (ret)
91                         goto err_wake_netif_queue;
92
93                 dev->stats.tx_packets++;
94                 dev->stats.tx_bytes += len;
95         } else {
96                 local->tx_skb = skb;
97                 queue_work(local->workqueue, &local->sync_tx_work);
98         }
99
100         return NETDEV_TX_OK;
101
102 err_wake_netif_queue:
103         ieee802154_release_queue(local);
104         if (atomic_dec_and_test(&local->phy->ongoing_txs))
105                 wake_up(&local->phy->sync_txq);
106 err_free_skb:
107         kfree_skb(skb);
108         return NETDEV_TX_OK;
109 }
110
111 static int ieee802154_sync_queue(struct ieee802154_local *local)
112 {
113         int ret;
114
115         ieee802154_hold_queue(local);
116         ieee802154_disable_queue(local);
117         wait_event(local->phy->sync_txq, !atomic_read(&local->phy->ongoing_txs));
118         ret = local->tx_result;
119         ieee802154_release_queue(local);
120
121         return ret;
122 }
123
124 int ieee802154_sync_and_hold_queue(struct ieee802154_local *local)
125 {
126         int ret;
127
128         ieee802154_hold_queue(local);
129         ret = ieee802154_sync_queue(local);
130         set_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
131
132         return ret;
133 }
134
135 int ieee802154_mlme_op_pre(struct ieee802154_local *local)
136 {
137         return ieee802154_sync_and_hold_queue(local);
138 }
139
140 int ieee802154_mlme_tx(struct ieee802154_local *local,
141                        struct ieee802154_sub_if_data *sdata,
142                        struct sk_buff *skb)
143 {
144         int ret;
145
146         /* Avoid possible calls to ->ndo_stop() when we asynchronously perform
147          * MLME transmissions.
148          */
149         rtnl_lock();
150
151         /* Ensure the device was not stopped, otherwise error out */
152         if (!local->open_count) {
153                 rtnl_unlock();
154                 return -ENETDOWN;
155         }
156
157         /* Warn if the ieee802154 core thinks MLME frames can be sent while the
158          * net interface expects this cannot happen.
159          */
160         if (WARN_ON_ONCE(!netif_running(sdata->dev))) {
161                 rtnl_unlock();
162                 return -ENETDOWN;
163         }
164
165         ieee802154_tx(local, skb);
166         ret = ieee802154_sync_queue(local);
167
168         rtnl_unlock();
169
170         return ret;
171 }
172
173 void ieee802154_mlme_op_post(struct ieee802154_local *local)
174 {
175         ieee802154_release_queue(local);
176 }
177
178 int ieee802154_mlme_tx_one(struct ieee802154_local *local,
179                            struct ieee802154_sub_if_data *sdata,
180                            struct sk_buff *skb)
181 {
182         int ret;
183
184         ieee802154_mlme_op_pre(local);
185         ret = ieee802154_mlme_tx(local, sdata, skb);
186         ieee802154_mlme_op_post(local);
187
188         return ret;
189 }
190
191 static bool ieee802154_queue_is_stopped(struct ieee802154_local *local)
192 {
193         return test_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
194 }
195
196 static netdev_tx_t
197 ieee802154_hot_tx(struct ieee802154_local *local, struct sk_buff *skb)
198 {
199         /* Warn if the net interface tries to transmit frames while the
200          * ieee802154 core assumes the queue is stopped.
201          */
202         WARN_ON_ONCE(ieee802154_queue_is_stopped(local));
203
204         return ieee802154_tx(local, skb);
205 }
206
207 netdev_tx_t
208 ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
209 {
210         struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
211
212         skb->skb_iif = dev->ifindex;
213
214         return ieee802154_hot_tx(sdata->local, skb);
215 }
216
217 netdev_tx_t
218 ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
219 {
220         struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
221         int rc;
222
223         /* TODO we should move it to wpan_dev_hard_header and dev_hard_header
224          * functions. The reason is wireshark will show a mac header which is
225          * with security fields but the payload is not encrypted.
226          */
227         rc = mac802154_llsec_encrypt(&sdata->sec, skb);
228         if (rc) {
229                 netdev_warn(dev, "encryption failed: %i\n", rc);
230                 kfree_skb(skb);
231                 return NETDEV_TX_OK;
232         }
233
234         skb->skb_iif = dev->ifindex;
235
236         return ieee802154_hot_tx(sdata->local, skb);
237 }