Commit | Line | Data |
---|---|---|
3ffec6a1 LP |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* MHI Network driver - Network over MHI bus | |
3 | * | |
4 | * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org> | |
5 | */ | |
6 | ||
7 | #include <linux/if_arp.h> | |
8 | #include <linux/mhi.h> | |
9 | #include <linux/mod_devicetable.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
12 | #include <linux/skbuff.h> | |
13 | #include <linux/u64_stats_sync.h> | |
77e8080e | 14 | |
3ffec6a1 LP |
15 | #define MHI_NET_MIN_MTU ETH_MIN_MTU |
16 | #define MHI_NET_MAX_MTU 0xffff | |
17 | #define MHI_NET_DEFAULT_MTU 0x4000 | |
18 | ||
7ffa7542 LP |
19 | struct mhi_net_stats { |
20 | u64_stats_t rx_packets; | |
21 | u64_stats_t rx_bytes; | |
22 | u64_stats_t rx_errors; | |
23 | u64_stats_t tx_packets; | |
24 | u64_stats_t tx_bytes; | |
25 | u64_stats_t tx_errors; | |
26 | u64_stats_t tx_dropped; | |
27 | struct u64_stats_sync tx_syncp; | |
28 | struct u64_stats_sync rx_syncp; | |
29 | }; | |
30 | ||
31 | struct mhi_net_dev { | |
32 | struct mhi_device *mdev; | |
33 | struct net_device *ndev; | |
34 | struct sk_buff *skbagg_head; | |
35 | struct sk_buff *skbagg_tail; | |
36 | struct delayed_work rx_refill; | |
37 | struct mhi_net_stats stats; | |
38 | u32 rx_queue_sz; | |
39 | int msg_enable; | |
40 | unsigned int mru; | |
41 | }; | |
13adac03 | 42 | |
ddeb9bfa LP |
43 | struct mhi_device_info { |
44 | const char *netname; | |
ddeb9bfa LP |
45 | }; |
46 | ||
3ffec6a1 LP |
47 | static int mhi_ndo_open(struct net_device *ndev) |
48 | { | |
7ffa7542 | 49 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); |
3ffec6a1 LP |
50 | |
51 | /* Feed the rx buffer pool */ | |
52 | schedule_delayed_work(&mhi_netdev->rx_refill, 0); | |
53 | ||
54 | /* Carrier is established via out-of-band channel (e.g. qmi) */ | |
55 | netif_carrier_on(ndev); | |
56 | ||
57 | netif_start_queue(ndev); | |
58 | ||
59 | return 0; | |
60 | } | |
61 | ||
62 | static int mhi_ndo_stop(struct net_device *ndev) | |
63 | { | |
7ffa7542 | 64 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); |
3ffec6a1 LP |
65 | |
66 | netif_stop_queue(ndev); | |
67 | netif_carrier_off(ndev); | |
68 | cancel_delayed_work_sync(&mhi_netdev->rx_refill); | |
69 | ||
70 | return 0; | |
71 | } | |
72 | ||
2214fb53 | 73 | static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) |
3ffec6a1 | 74 | { |
7ffa7542 | 75 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); |
3ffec6a1 LP |
76 | struct mhi_device *mdev = mhi_netdev->mdev; |
77 | int err; | |
78 | ||
79 | err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); | |
80 | if (unlikely(err)) { | |
81 | net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", | |
82 | ndev->name, err); | |
3ffec6a1 | 83 | dev_kfree_skb_any(skb); |
ddeb9bfa | 84 | goto exit_drop; |
3ffec6a1 LP |
85 | } |
86 | ||
87 | if (mhi_queue_is_full(mdev, DMA_TO_DEVICE)) | |
88 | netif_stop_queue(ndev); | |
89 | ||
90 | return NETDEV_TX_OK; | |
ddeb9bfa LP |
91 | |
92 | exit_drop: | |
93 | u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); | |
94 | u64_stats_inc(&mhi_netdev->stats.tx_dropped); | |
95 | u64_stats_update_end(&mhi_netdev->stats.tx_syncp); | |
96 | ||
97 | return NETDEV_TX_OK; | |
3ffec6a1 LP |
98 | } |
99 | ||
100 | static void mhi_ndo_get_stats64(struct net_device *ndev, | |
101 | struct rtnl_link_stats64 *stats) | |
102 | { | |
7ffa7542 | 103 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); |
3ffec6a1 LP |
104 | unsigned int start; |
105 | ||
106 | do { | |
068c38ad | 107 | start = u64_stats_fetch_begin(&mhi_netdev->stats.rx_syncp); |
3ffec6a1 LP |
108 | stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); |
109 | stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); | |
110 | stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); | |
068c38ad | 111 | } while (u64_stats_fetch_retry(&mhi_netdev->stats.rx_syncp, start)); |
3ffec6a1 LP |
112 | |
113 | do { | |
068c38ad | 114 | start = u64_stats_fetch_begin(&mhi_netdev->stats.tx_syncp); |
3ffec6a1 LP |
115 | stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets); |
116 | stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes); | |
117 | stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors); | |
118 | stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped); | |
068c38ad | 119 | } while (u64_stats_fetch_retry(&mhi_netdev->stats.tx_syncp, start)); |
3ffec6a1 LP |
120 | } |
121 | ||
122 | static const struct net_device_ops mhi_netdev_ops = { | |
123 | .ndo_open = mhi_ndo_open, | |
124 | .ndo_stop = mhi_ndo_stop, | |
125 | .ndo_start_xmit = mhi_ndo_xmit, | |
126 | .ndo_get_stats64 = mhi_ndo_get_stats64, | |
127 | }; | |
128 | ||
129 | static void mhi_net_setup(struct net_device *ndev) | |
130 | { | |
131 | ndev->header_ops = NULL; /* No header */ | |
c134db89 | 132 | ndev->type = ARPHRD_RAWIP; |
3ffec6a1 LP |
133 | ndev->hard_header_len = 0; |
134 | ndev->addr_len = 0; | |
135 | ndev->flags = IFF_POINTOPOINT | IFF_NOARP; | |
136 | ndev->netdev_ops = &mhi_netdev_ops; | |
137 | ndev->mtu = MHI_NET_DEFAULT_MTU; | |
138 | ndev->min_mtu = MHI_NET_MIN_MTU; | |
139 | ndev->max_mtu = MHI_NET_MAX_MTU; | |
140 | ndev->tx_queue_len = 1000; | |
141 | } | |
142 | ||
c1fcda2b LP |
143 | static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev, |
144 | struct sk_buff *skb) | |
145 | { | |
146 | struct sk_buff *head = mhi_netdev->skbagg_head; | |
147 | struct sk_buff *tail = mhi_netdev->skbagg_tail; | |
148 | ||
149 | /* This is non-paged skb chaining using frag_list */ | |
150 | if (!head) { | |
151 | mhi_netdev->skbagg_head = skb; | |
152 | return skb; | |
153 | } | |
154 | ||
155 | if (!skb_shinfo(head)->frag_list) | |
156 | skb_shinfo(head)->frag_list = skb; | |
157 | else | |
158 | tail->next = skb; | |
159 | ||
160 | head->len += skb->len; | |
161 | head->data_len += skb->len; | |
162 | head->truesize += skb->truesize; | |
163 | ||
164 | mhi_netdev->skbagg_tail = skb; | |
165 | ||
166 | return mhi_netdev->skbagg_head; | |
167 | } | |
168 | ||
3ffec6a1 LP |
169 | static void mhi_net_dl_callback(struct mhi_device *mhi_dev, |
170 | struct mhi_result *mhi_res) | |
171 | { | |
172 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); | |
173 | struct sk_buff *skb = mhi_res->buf_addr; | |
6e10785e | 174 | int free_desc_count; |
3ffec6a1 | 175 | |
6e10785e | 176 | free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); |
3ffec6a1 LP |
177 | |
178 | if (unlikely(mhi_res->transaction_status)) { | |
c1fcda2b LP |
179 | switch (mhi_res->transaction_status) { |
180 | case -EOVERFLOW: | |
181 | /* Packet can not fit in one MHI buffer and has been | |
182 | * split over multiple MHI transfers, do re-aggregation. | |
183 | * That usually means the device side MTU is larger than | |
184 | * the host side MTU/MRU. Since this is not optimal, | |
185 | * print a warning (once). | |
186 | */ | |
187 | netdev_warn_once(mhi_netdev->ndev, | |
188 | "Fragmented packets received, fix MTU?\n"); | |
189 | skb_put(skb, mhi_res->bytes_xferd); | |
190 | mhi_net_skb_agg(mhi_netdev, skb); | |
191 | break; | |
192 | case -ENOTCONN: | |
193 | /* MHI layer stopping/resetting the DL channel */ | |
194 | dev_kfree_skb_any(skb); | |
3ffec6a1 | 195 | return; |
c1fcda2b LP |
196 | default: |
197 | /* Unknown error, simply drop */ | |
198 | dev_kfree_skb_any(skb); | |
199 | u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); | |
200 | u64_stats_inc(&mhi_netdev->stats.rx_errors); | |
201 | u64_stats_update_end(&mhi_netdev->stats.rx_syncp); | |
202 | } | |
3ffec6a1 | 203 | } else { |
c1fcda2b LP |
204 | skb_put(skb, mhi_res->bytes_xferd); |
205 | ||
206 | if (mhi_netdev->skbagg_head) { | |
207 | /* Aggregate the final fragment */ | |
208 | skb = mhi_net_skb_agg(mhi_netdev, skb); | |
209 | mhi_netdev->skbagg_head = NULL; | |
210 | } | |
211 | ||
c134db89 LP |
212 | switch (skb->data[0] & 0xf0) { |
213 | case 0x40: | |
214 | skb->protocol = htons(ETH_P_IP); | |
215 | break; | |
216 | case 0x60: | |
217 | skb->protocol = htons(ETH_P_IPV6); | |
218 | break; | |
219 | default: | |
220 | skb->protocol = htons(ETH_P_MAP); | |
221 | break; | |
222 | } | |
223 | ||
7ffa7542 LP |
224 | u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); |
225 | u64_stats_inc(&mhi_netdev->stats.rx_packets); | |
226 | u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); | |
227 | u64_stats_update_end(&mhi_netdev->stats.rx_syncp); | |
baebdf48 | 228 | __netif_rx(skb); |
3ffec6a1 LP |
229 | } |
230 | ||
231 | /* Refill if RX buffers queue becomes low */ | |
6e10785e | 232 | if (free_desc_count >= mhi_netdev->rx_queue_sz / 2) |
3ffec6a1 LP |
233 | schedule_delayed_work(&mhi_netdev->rx_refill, 0); |
234 | } | |
235 | ||
236 | static void mhi_net_ul_callback(struct mhi_device *mhi_dev, | |
237 | struct mhi_result *mhi_res) | |
238 | { | |
239 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); | |
240 | struct net_device *ndev = mhi_netdev->ndev; | |
efc36d3c | 241 | struct mhi_device *mdev = mhi_netdev->mdev; |
3ffec6a1 LP |
242 | struct sk_buff *skb = mhi_res->buf_addr; |
243 | ||
244 | /* Hardware has consumed the buffer, so free the skb (which is not | |
245 | * freed by the MHI stack) and perform accounting. | |
246 | */ | |
247 | dev_consume_skb_any(skb); | |
248 | ||
249 | u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); | |
250 | if (unlikely(mhi_res->transaction_status)) { | |
3ffec6a1 LP |
251 | /* MHI layer stopping/resetting the UL channel */ |
252 | if (mhi_res->transaction_status == -ENOTCONN) { | |
253 | u64_stats_update_end(&mhi_netdev->stats.tx_syncp); | |
254 | return; | |
255 | } | |
256 | ||
257 | u64_stats_inc(&mhi_netdev->stats.tx_errors); | |
258 | } else { | |
259 | u64_stats_inc(&mhi_netdev->stats.tx_packets); | |
260 | u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd); | |
261 | } | |
262 | u64_stats_update_end(&mhi_netdev->stats.tx_syncp); | |
263 | ||
efc36d3c | 264 | if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE)) |
3ffec6a1 LP |
265 | netif_wake_queue(ndev); |
266 | } | |
267 | ||
268 | static void mhi_net_rx_refill_work(struct work_struct *work) | |
269 | { | |
270 | struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev, | |
271 | rx_refill.work); | |
272 | struct net_device *ndev = mhi_netdev->ndev; | |
273 | struct mhi_device *mdev = mhi_netdev->mdev; | |
3ffec6a1 | 274 | struct sk_buff *skb; |
3af562a3 | 275 | unsigned int size; |
3ffec6a1 LP |
276 | int err; |
277 | ||
3af562a3 LP |
278 | size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu); |
279 | ||
6e10785e | 280 | while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) { |
3ffec6a1 LP |
281 | skb = netdev_alloc_skb(ndev, size); |
282 | if (unlikely(!skb)) | |
283 | break; | |
284 | ||
285 | err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT); | |
286 | if (unlikely(err)) { | |
287 | net_err_ratelimited("%s: Failed to queue RX buf (%d)\n", | |
288 | ndev->name, err); | |
289 | kfree_skb(skb); | |
290 | break; | |
291 | } | |
292 | ||
3ffec6a1 LP |
293 | /* Do not hog the CPU if rx buffers are consumed faster than |
294 | * queued (unlikely). | |
295 | */ | |
296 | cond_resched(); | |
297 | } | |
298 | ||
299 | /* If we're still starved of rx buffers, reschedule later */ | |
6e10785e | 300 | if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz) |
3ffec6a1 LP |
301 | schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); |
302 | } | |
303 | ||
7ffa7542 | 304 | static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev) |
3ffec6a1 | 305 | { |
3ffec6a1 | 306 | struct mhi_net_dev *mhi_netdev; |
3ffec6a1 LP |
307 | int err; |
308 | ||
7ffa7542 | 309 | mhi_netdev = netdev_priv(ndev); |
13adac03 LP |
310 | |
311 | dev_set_drvdata(&mhi_dev->dev, mhi_netdev); | |
3ffec6a1 LP |
312 | mhi_netdev->ndev = ndev; |
313 | mhi_netdev->mdev = mhi_dev; | |
c1fcda2b | 314 | mhi_netdev->skbagg_head = NULL; |
5c2c8531 | 315 | mhi_netdev->mru = mhi_dev->mhi_cntrl->mru; |
3ffec6a1 | 316 | |
3ffec6a1 LP |
317 | INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); |
318 | u64_stats_init(&mhi_netdev->stats.rx_syncp); | |
319 | u64_stats_init(&mhi_netdev->stats.tx_syncp); | |
320 | ||
321 | /* Start MHI channels */ | |
9ebc2758 | 322 | err = mhi_prepare_for_transfer(mhi_dev); |
3ffec6a1 | 323 | if (err) |
4526fe74 | 324 | return err; |
3ffec6a1 | 325 | |
e6ec3ccd LP |
326 | /* Number of transfer descriptors determines size of the queue */ |
327 | mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); | |
328 | ||
7ffa7542 | 329 | err = register_netdev(ndev); |
3ffec6a1 | 330 | if (err) |
7ffa7542 | 331 | return err; |
ddeb9bfa | 332 | |
3ffec6a1 | 333 | return 0; |
3ffec6a1 LP |
334 | } |
335 | ||
7ffa7542 | 336 | static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev) |
3ffec6a1 | 337 | { |
7ffa7542 | 338 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); |
3ffec6a1 | 339 | |
7ffa7542 | 340 | unregister_netdev(ndev); |
3ffec6a1 | 341 | |
13adac03 | 342 | mhi_unprepare_from_transfer(mhi_dev); |
3ffec6a1 | 343 | |
cda1893e | 344 | kfree_skb(mhi_netdev->skbagg_head); |
c1fcda2b | 345 | |
f7c125bd WY |
346 | free_netdev(ndev); |
347 | ||
13adac03 LP |
348 | dev_set_drvdata(&mhi_dev->dev, NULL); |
349 | } | |
350 | ||
13adac03 LP |
351 | static int mhi_net_probe(struct mhi_device *mhi_dev, |
352 | const struct mhi_device_id *id) | |
353 | { | |
354 | const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data; | |
13adac03 LP |
355 | struct net_device *ndev; |
356 | int err; | |
357 | ||
13adac03 LP |
358 | ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname, |
359 | NET_NAME_PREDICTABLE, mhi_net_setup); | |
7ffa7542 LP |
360 | if (!ndev) |
361 | return -ENOMEM; | |
13adac03 LP |
362 | |
363 | SET_NETDEV_DEV(ndev, &mhi_dev->dev); | |
364 | ||
7ffa7542 LP |
365 | err = mhi_net_newlink(mhi_dev, ndev); |
366 | if (err) { | |
367 | free_netdev(ndev); | |
368 | return err; | |
369 | } | |
13adac03 LP |
370 | |
371 | return 0; | |
13adac03 LP |
372 | } |
373 | ||
374 | static void mhi_net_remove(struct mhi_device *mhi_dev) | |
375 | { | |
376 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); | |
13adac03 | 377 | |
7ffa7542 | 378 | mhi_net_dellink(mhi_dev, mhi_netdev->ndev); |
3ffec6a1 LP |
379 | } |
380 | ||
ddeb9bfa LP |
381 | static const struct mhi_device_info mhi_hwip0 = { |
382 | .netname = "mhi_hwip%d", | |
383 | }; | |
384 | ||
385 | static const struct mhi_device_info mhi_swip0 = { | |
386 | .netname = "mhi_swip%d", | |
387 | }; | |
388 | ||
3ffec6a1 | 389 | static const struct mhi_device_id mhi_net_id_table[] = { |
ddeb9bfa LP |
390 | /* Hardware accelerated data PATH (to modem IPA), protocol agnostic */ |
391 | { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 }, | |
392 | /* Software data PATH (to modem CPU) */ | |
393 | { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 }, | |
3ffec6a1 LP |
394 | {} |
395 | }; | |
396 | MODULE_DEVICE_TABLE(mhi, mhi_net_id_table); | |
397 | ||
398 | static struct mhi_driver mhi_net_driver = { | |
399 | .probe = mhi_net_probe, | |
400 | .remove = mhi_net_remove, | |
401 | .dl_xfer_cb = mhi_net_dl_callback, | |
402 | .ul_xfer_cb = mhi_net_ul_callback, | |
403 | .id_table = mhi_net_id_table, | |
404 | .driver = { | |
405 | .name = "mhi_net", | |
3ffec6a1 LP |
406 | }, |
407 | }; | |
408 | ||
409 | module_mhi_driver(mhi_net_driver); | |
410 | ||
411 | MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); | |
412 | MODULE_DESCRIPTION("Network over MHI"); | |
413 | MODULE_LICENSE("GPL v2"); |