Merge tag 'net-5.14-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-block.git] / drivers / net / mhi / net.c
CommitLineData
3ffec6a1
LP
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* MHI Network driver - Network over MHI bus
3 *
4 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
5 */
6
7#include <linux/if_arp.h>
8#include <linux/mhi.h>
9#include <linux/mod_devicetable.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/skbuff.h>
13#include <linux/u64_stats_sync.h>
13adac03 14#include <linux/wwan.h>
3ffec6a1 15
77e8080e
LP
16#include "mhi.h"
17
3ffec6a1
LP
18#define MHI_NET_MIN_MTU ETH_MIN_MTU
19#define MHI_NET_MAX_MTU 0xffff
20#define MHI_NET_DEFAULT_MTU 0x4000
21
13adac03
LP
22/* When set to false, the default netdev (link 0) is not created, and it's up
23 * to user to create the link (via wwan rtnetlink).
24 */
25static bool create_default_iface = true;
26module_param(create_default_iface, bool, 0);
27
ddeb9bfa
LP
28struct mhi_device_info {
29 const char *netname;
30 const struct mhi_net_proto *proto;
31};
32
3ffec6a1
LP
33static int mhi_ndo_open(struct net_device *ndev)
34{
69940924 35 struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
3ffec6a1
LP
36
37 /* Feed the rx buffer pool */
38 schedule_delayed_work(&mhi_netdev->rx_refill, 0);
39
40 /* Carrier is established via out-of-band channel (e.g. qmi) */
41 netif_carrier_on(ndev);
42
43 netif_start_queue(ndev);
44
45 return 0;
46}
47
48static int mhi_ndo_stop(struct net_device *ndev)
49{
69940924 50 struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
3ffec6a1
LP
51
52 netif_stop_queue(ndev);
53 netif_carrier_off(ndev);
54 cancel_delayed_work_sync(&mhi_netdev->rx_refill);
55
56 return 0;
57}
58
2214fb53 59static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
3ffec6a1 60{
69940924 61 struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
ddeb9bfa 62 const struct mhi_net_proto *proto = mhi_netdev->proto;
3ffec6a1
LP
63 struct mhi_device *mdev = mhi_netdev->mdev;
64 int err;
65
ddeb9bfa
LP
66 if (proto && proto->tx_fixup) {
67 skb = proto->tx_fixup(mhi_netdev, skb);
68 if (unlikely(!skb))
69 goto exit_drop;
70 }
71
3ffec6a1
LP
72 err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
73 if (unlikely(err)) {
74 net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
75 ndev->name, err);
3ffec6a1 76 dev_kfree_skb_any(skb);
ddeb9bfa 77 goto exit_drop;
3ffec6a1
LP
78 }
79
80 if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
81 netif_stop_queue(ndev);
82
83 return NETDEV_TX_OK;
ddeb9bfa
LP
84
85exit_drop:
86 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
87 u64_stats_inc(&mhi_netdev->stats.tx_dropped);
88 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
89
90 return NETDEV_TX_OK;
3ffec6a1
LP
91}
92
93static void mhi_ndo_get_stats64(struct net_device *ndev,
94 struct rtnl_link_stats64 *stats)
95{
69940924 96 struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
3ffec6a1
LP
97 unsigned int start;
98
99 do {
100 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp);
101 stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
102 stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
103 stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
104 stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped);
84c55f16 105 stats->rx_length_errors = u64_stats_read(&mhi_netdev->stats.rx_length_errors);
3ffec6a1
LP
106 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
107
108 do {
109 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp);
110 stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
111 stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
112 stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
113 stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
114 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start));
115}
116
117static const struct net_device_ops mhi_netdev_ops = {
118 .ndo_open = mhi_ndo_open,
119 .ndo_stop = mhi_ndo_stop,
120 .ndo_start_xmit = mhi_ndo_xmit,
121 .ndo_get_stats64 = mhi_ndo_get_stats64,
122};
123
124static void mhi_net_setup(struct net_device *ndev)
125{
126 ndev->header_ops = NULL; /* No header */
c134db89 127 ndev->type = ARPHRD_RAWIP;
3ffec6a1
LP
128 ndev->hard_header_len = 0;
129 ndev->addr_len = 0;
130 ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
131 ndev->netdev_ops = &mhi_netdev_ops;
132 ndev->mtu = MHI_NET_DEFAULT_MTU;
133 ndev->min_mtu = MHI_NET_MIN_MTU;
134 ndev->max_mtu = MHI_NET_MAX_MTU;
135 ndev->tx_queue_len = 1000;
136}
137
c1fcda2b
LP
138static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
139 struct sk_buff *skb)
140{
141 struct sk_buff *head = mhi_netdev->skbagg_head;
142 struct sk_buff *tail = mhi_netdev->skbagg_tail;
143
144 /* This is non-paged skb chaining using frag_list */
145 if (!head) {
146 mhi_netdev->skbagg_head = skb;
147 return skb;
148 }
149
150 if (!skb_shinfo(head)->frag_list)
151 skb_shinfo(head)->frag_list = skb;
152 else
153 tail->next = skb;
154
155 head->len += skb->len;
156 head->data_len += skb->len;
157 head->truesize += skb->truesize;
158
159 mhi_netdev->skbagg_tail = skb;
160
161 return mhi_netdev->skbagg_head;
162}
163
3ffec6a1
LP
164static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
165 struct mhi_result *mhi_res)
166{
167 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
ddeb9bfa 168 const struct mhi_net_proto *proto = mhi_netdev->proto;
3ffec6a1 169 struct sk_buff *skb = mhi_res->buf_addr;
6e10785e 170 int free_desc_count;
3ffec6a1 171
6e10785e 172 free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
3ffec6a1
LP
173
174 if (unlikely(mhi_res->transaction_status)) {
c1fcda2b
LP
175 switch (mhi_res->transaction_status) {
176 case -EOVERFLOW:
177 /* Packet can not fit in one MHI buffer and has been
178 * split over multiple MHI transfers, do re-aggregation.
179 * That usually means the device side MTU is larger than
180 * the host side MTU/MRU. Since this is not optimal,
181 * print a warning (once).
182 */
183 netdev_warn_once(mhi_netdev->ndev,
184 "Fragmented packets received, fix MTU?\n");
185 skb_put(skb, mhi_res->bytes_xferd);
186 mhi_net_skb_agg(mhi_netdev, skb);
187 break;
188 case -ENOTCONN:
189 /* MHI layer stopping/resetting the DL channel */
190 dev_kfree_skb_any(skb);
3ffec6a1 191 return;
c1fcda2b
LP
192 default:
193 /* Unknown error, simply drop */
194 dev_kfree_skb_any(skb);
195 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
196 u64_stats_inc(&mhi_netdev->stats.rx_errors);
197 u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
198 }
3ffec6a1 199 } else {
c1fcda2b
LP
200 skb_put(skb, mhi_res->bytes_xferd);
201
202 if (mhi_netdev->skbagg_head) {
203 /* Aggregate the final fragment */
204 skb = mhi_net_skb_agg(mhi_netdev, skb);
205 mhi_netdev->skbagg_head = NULL;
206 }
207
3ffec6a1
LP
208 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
209 u64_stats_inc(&mhi_netdev->stats.rx_packets);
c1fcda2b 210 u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
3ffec6a1
LP
211 u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
212
c134db89
LP
213 switch (skb->data[0] & 0xf0) {
214 case 0x40:
215 skb->protocol = htons(ETH_P_IP);
216 break;
217 case 0x60:
218 skb->protocol = htons(ETH_P_IPV6);
219 break;
220 default:
221 skb->protocol = htons(ETH_P_MAP);
222 break;
223 }
224
ddeb9bfa
LP
225 if (proto && proto->rx)
226 proto->rx(mhi_netdev, skb);
227 else
228 netif_rx(skb);
3ffec6a1
LP
229 }
230
231 /* Refill if RX buffers queue becomes low */
6e10785e 232 if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
3ffec6a1
LP
233 schedule_delayed_work(&mhi_netdev->rx_refill, 0);
234}
235
236static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
237 struct mhi_result *mhi_res)
238{
239 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
240 struct net_device *ndev = mhi_netdev->ndev;
efc36d3c 241 struct mhi_device *mdev = mhi_netdev->mdev;
3ffec6a1
LP
242 struct sk_buff *skb = mhi_res->buf_addr;
243
244 /* Hardware has consumed the buffer, so free the skb (which is not
245 * freed by the MHI stack) and perform accounting.
246 */
247 dev_consume_skb_any(skb);
248
249 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
250 if (unlikely(mhi_res->transaction_status)) {
251
252 /* MHI layer stopping/resetting the UL channel */
253 if (mhi_res->transaction_status == -ENOTCONN) {
254 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
255 return;
256 }
257
258 u64_stats_inc(&mhi_netdev->stats.tx_errors);
259 } else {
260 u64_stats_inc(&mhi_netdev->stats.tx_packets);
261 u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
262 }
263 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
264
efc36d3c 265 if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE))
3ffec6a1
LP
266 netif_wake_queue(ndev);
267}
268
269static void mhi_net_rx_refill_work(struct work_struct *work)
270{
271 struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
272 rx_refill.work);
273 struct net_device *ndev = mhi_netdev->ndev;
274 struct mhi_device *mdev = mhi_netdev->mdev;
3ffec6a1 275 struct sk_buff *skb;
3af562a3 276 unsigned int size;
3ffec6a1
LP
277 int err;
278
3af562a3
LP
279 size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu);
280
6e10785e 281 while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
3ffec6a1
LP
282 skb = netdev_alloc_skb(ndev, size);
283 if (unlikely(!skb))
284 break;
285
286 err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
287 if (unlikely(err)) {
288 net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
289 ndev->name, err);
290 kfree_skb(skb);
291 break;
292 }
293
3ffec6a1
LP
294 /* Do not hog the CPU if rx buffers are consumed faster than
295 * queued (unlikely).
296 */
297 cond_resched();
298 }
299
300 /* If we're still starved of rx buffers, reschedule later */
6e10785e 301 if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
3ffec6a1
LP
302 schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
303}
304
13adac03
LP
305static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
306 struct netlink_ext_ack *extack)
3ffec6a1 307{
13adac03
LP
308 const struct mhi_device_info *info;
309 struct mhi_device *mhi_dev = ctxt;
3ffec6a1 310 struct mhi_net_dev *mhi_netdev;
3ffec6a1
LP
311 int err;
312
13adac03
LP
313 info = (struct mhi_device_info *)mhi_dev->id->driver_data;
314
315 /* For now we only support one link (link context 0), driver must be
316 * reworked to break 1:1 relationship for net MBIM and to forward setup
317 * call to rmnet(QMAP) otherwise.
318 */
319 if (if_id != 0)
320 return -EINVAL;
321
322 if (dev_get_drvdata(&mhi_dev->dev))
323 return -EBUSY;
3ffec6a1 324
69940924 325 mhi_netdev = wwan_netdev_drvpriv(ndev);
13adac03
LP
326
327 dev_set_drvdata(&mhi_dev->dev, mhi_netdev);
3ffec6a1
LP
328 mhi_netdev->ndev = ndev;
329 mhi_netdev->mdev = mhi_dev;
c1fcda2b 330 mhi_netdev->skbagg_head = NULL;
ddeb9bfa 331 mhi_netdev->proto = info->proto;
3ffec6a1 332
3ffec6a1
LP
333 INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
334 u64_stats_init(&mhi_netdev->stats.rx_syncp);
335 u64_stats_init(&mhi_netdev->stats.tx_syncp);
336
337 /* Start MHI channels */
338 err = mhi_prepare_for_transfer(mhi_dev);
339 if (err)
340 goto out_err;
341
e6ec3ccd
LP
342 /* Number of transfer descriptors determines size of the queue */
343 mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
344
13adac03
LP
345 if (extack)
346 err = register_netdevice(ndev);
347 else
348 err = register_netdev(ndev);
3ffec6a1
LP
349 if (err)
350 goto out_err;
351
ddeb9bfa
LP
352 if (mhi_netdev->proto) {
353 err = mhi_netdev->proto->init(mhi_netdev);
354 if (err)
355 goto out_err_proto;
356 }
357
3ffec6a1
LP
358 return 0;
359
ddeb9bfa 360out_err_proto:
13adac03 361 unregister_netdevice(ndev);
3ffec6a1
LP
362out_err:
363 free_netdev(ndev);
364 return err;
365}
366
13adac03
LP
367static void mhi_net_dellink(void *ctxt, struct net_device *ndev,
368 struct list_head *head)
3ffec6a1 369{
69940924 370 struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev);
13adac03 371 struct mhi_device *mhi_dev = ctxt;
3ffec6a1 372
13adac03
LP
373 if (head)
374 unregister_netdevice_queue(ndev, head);
375 else
376 unregister_netdev(ndev);
3ffec6a1 377
13adac03 378 mhi_unprepare_from_transfer(mhi_dev);
3ffec6a1 379
cda1893e 380 kfree_skb(mhi_netdev->skbagg_head);
c1fcda2b 381
13adac03
LP
382 dev_set_drvdata(&mhi_dev->dev, NULL);
383}
384
1d0bbbf2 385static const struct wwan_ops mhi_wwan_ops = {
13adac03
LP
386 .priv_size = sizeof(struct mhi_net_dev),
387 .setup = mhi_net_setup,
388 .newlink = mhi_net_newlink,
389 .dellink = mhi_net_dellink,
390};
391
392static int mhi_net_probe(struct mhi_device *mhi_dev,
393 const struct mhi_device_id *id)
394{
395 const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
396 struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
397 struct net_device *ndev;
398 int err;
399
ca374290
SR
400 err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev,
401 WWAN_NO_DEFAULT_LINK);
13adac03
LP
402 if (err)
403 return err;
404
405 if (!create_default_iface)
406 return 0;
407
408 /* Create a default interface which is used as either RMNET real-dev,
409 * MBIM link 0 or ip link 0)
410 */
411 ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname,
412 NET_NAME_PREDICTABLE, mhi_net_setup);
413 if (!ndev) {
414 err = -ENOMEM;
415 goto err_unregister;
416 }
417
418 SET_NETDEV_DEV(ndev, &mhi_dev->dev);
419
420 err = mhi_net_newlink(mhi_dev, ndev, 0, NULL);
421 if (err)
422 goto err_release;
423
424 return 0;
425
426err_release:
427 free_netdev(ndev);
428err_unregister:
429 wwan_unregister_ops(&cntrl->mhi_dev->dev);
430
431 return err;
432}
433
434static void mhi_net_remove(struct mhi_device *mhi_dev)
435{
436 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
437 struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
438
9f0248ea 439 /* WWAN core takes care of removing remaining links */
13adac03
LP
440 wwan_unregister_ops(&cntrl->mhi_dev->dev);
441
442 if (create_default_iface)
443 mhi_net_dellink(mhi_dev, mhi_netdev->ndev, NULL);
3ffec6a1
LP
444}
445
ddeb9bfa
LP
446static const struct mhi_device_info mhi_hwip0 = {
447 .netname = "mhi_hwip%d",
448};
449
450static const struct mhi_device_info mhi_swip0 = {
451 .netname = "mhi_swip%d",
452};
453
163c5e62
LP
454static const struct mhi_device_info mhi_hwip0_mbim = {
455 .netname = "mhi_mbim%d",
456 .proto = &proto_mbim,
457};
458
3ffec6a1 459static const struct mhi_device_id mhi_net_id_table[] = {
ddeb9bfa
LP
460 /* Hardware accelerated data PATH (to modem IPA), protocol agnostic */
461 { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 },
462 /* Software data PATH (to modem CPU) */
463 { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 },
163c5e62
LP
464 /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
465 { .chan = "IP_HW0_MBIM", .driver_data = (kernel_ulong_t)&mhi_hwip0_mbim },
3ffec6a1
LP
466 {}
467};
468MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
469
470static struct mhi_driver mhi_net_driver = {
471 .probe = mhi_net_probe,
472 .remove = mhi_net_remove,
473 .dl_xfer_cb = mhi_net_dl_callback,
474 .ul_xfer_cb = mhi_net_ul_callback,
475 .id_table = mhi_net_id_table,
476 .driver = {
477 .name = "mhi_net",
478 .owner = THIS_MODULE,
479 },
480};
481
482module_mhi_driver(mhi_net_driver);
483
484MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
485MODULE_DESCRIPTION("Network over MHI");
486MODULE_LICENSE("GPL v2");