Commit | Line | Data |
---|---|---|
3ffec6a1 LP |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* MHI Network driver - Network over MHI bus | |
3 | * | |
4 | * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org> | |
5 | */ | |
6 | ||
7 | #include <linux/if_arp.h> | |
8 | #include <linux/mhi.h> | |
9 | #include <linux/mod_devicetable.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
12 | #include <linux/skbuff.h> | |
13 | #include <linux/u64_stats_sync.h> | |
14 | ||
15 | #define MHI_NET_MIN_MTU ETH_MIN_MTU | |
16 | #define MHI_NET_MAX_MTU 0xffff | |
17 | #define MHI_NET_DEFAULT_MTU 0x4000 | |
18 | ||
19 | struct mhi_net_stats { | |
20 | u64_stats_t rx_packets; | |
21 | u64_stats_t rx_bytes; | |
22 | u64_stats_t rx_errors; | |
23 | u64_stats_t rx_dropped; | |
24 | u64_stats_t tx_packets; | |
25 | u64_stats_t tx_bytes; | |
26 | u64_stats_t tx_errors; | |
27 | u64_stats_t tx_dropped; | |
3ffec6a1 LP |
28 | struct u64_stats_sync tx_syncp; |
29 | struct u64_stats_sync rx_syncp; | |
30 | }; | |
31 | ||
32 | struct mhi_net_dev { | |
33 | struct mhi_device *mdev; | |
34 | struct net_device *ndev; | |
35 | struct delayed_work rx_refill; | |
36 | struct mhi_net_stats stats; | |
37 | u32 rx_queue_sz; | |
38 | }; | |
39 | ||
40 | static int mhi_ndo_open(struct net_device *ndev) | |
41 | { | |
42 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); | |
43 | ||
44 | /* Feed the rx buffer pool */ | |
45 | schedule_delayed_work(&mhi_netdev->rx_refill, 0); | |
46 | ||
47 | /* Carrier is established via out-of-band channel (e.g. qmi) */ | |
48 | netif_carrier_on(ndev); | |
49 | ||
50 | netif_start_queue(ndev); | |
51 | ||
52 | return 0; | |
53 | } | |
54 | ||
55 | static int mhi_ndo_stop(struct net_device *ndev) | |
56 | { | |
57 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); | |
58 | ||
59 | netif_stop_queue(ndev); | |
60 | netif_carrier_off(ndev); | |
61 | cancel_delayed_work_sync(&mhi_netdev->rx_refill); | |
62 | ||
63 | return 0; | |
64 | } | |
65 | ||
66 | static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) | |
67 | { | |
68 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); | |
69 | struct mhi_device *mdev = mhi_netdev->mdev; | |
70 | int err; | |
71 | ||
72 | err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); | |
73 | if (unlikely(err)) { | |
74 | net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", | |
75 | ndev->name, err); | |
76 | ||
77 | u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); | |
78 | u64_stats_inc(&mhi_netdev->stats.tx_dropped); | |
79 | u64_stats_update_end(&mhi_netdev->stats.tx_syncp); | |
80 | ||
81 | /* drop the packet */ | |
82 | dev_kfree_skb_any(skb); | |
83 | } | |
84 | ||
85 | if (mhi_queue_is_full(mdev, DMA_TO_DEVICE)) | |
86 | netif_stop_queue(ndev); | |
87 | ||
88 | return NETDEV_TX_OK; | |
89 | } | |
90 | ||
91 | static void mhi_ndo_get_stats64(struct net_device *ndev, | |
92 | struct rtnl_link_stats64 *stats) | |
93 | { | |
94 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); | |
95 | unsigned int start; | |
96 | ||
97 | do { | |
98 | start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp); | |
99 | stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); | |
100 | stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); | |
101 | stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); | |
102 | stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped); | |
103 | } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start)); | |
104 | ||
105 | do { | |
106 | start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp); | |
107 | stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets); | |
108 | stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes); | |
109 | stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors); | |
110 | stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped); | |
111 | } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start)); | |
112 | } | |
113 | ||
114 | static const struct net_device_ops mhi_netdev_ops = { | |
115 | .ndo_open = mhi_ndo_open, | |
116 | .ndo_stop = mhi_ndo_stop, | |
117 | .ndo_start_xmit = mhi_ndo_xmit, | |
118 | .ndo_get_stats64 = mhi_ndo_get_stats64, | |
119 | }; | |
120 | ||
121 | static void mhi_net_setup(struct net_device *ndev) | |
122 | { | |
123 | ndev->header_ops = NULL; /* No header */ | |
c134db89 | 124 | ndev->type = ARPHRD_RAWIP; |
3ffec6a1 LP |
125 | ndev->hard_header_len = 0; |
126 | ndev->addr_len = 0; | |
127 | ndev->flags = IFF_POINTOPOINT | IFF_NOARP; | |
128 | ndev->netdev_ops = &mhi_netdev_ops; | |
129 | ndev->mtu = MHI_NET_DEFAULT_MTU; | |
130 | ndev->min_mtu = MHI_NET_MIN_MTU; | |
131 | ndev->max_mtu = MHI_NET_MAX_MTU; | |
132 | ndev->tx_queue_len = 1000; | |
133 | } | |
134 | ||
135 | static void mhi_net_dl_callback(struct mhi_device *mhi_dev, | |
136 | struct mhi_result *mhi_res) | |
137 | { | |
138 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); | |
139 | struct sk_buff *skb = mhi_res->buf_addr; | |
6e10785e | 140 | int free_desc_count; |
3ffec6a1 | 141 | |
6e10785e | 142 | free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); |
3ffec6a1 LP |
143 | |
144 | if (unlikely(mhi_res->transaction_status)) { | |
145 | dev_kfree_skb_any(skb); | |
146 | ||
147 | /* MHI layer stopping/resetting the DL channel */ | |
148 | if (mhi_res->transaction_status == -ENOTCONN) | |
149 | return; | |
150 | ||
151 | u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); | |
152 | u64_stats_inc(&mhi_netdev->stats.rx_errors); | |
153 | u64_stats_update_end(&mhi_netdev->stats.rx_syncp); | |
154 | } else { | |
155 | u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); | |
156 | u64_stats_inc(&mhi_netdev->stats.rx_packets); | |
157 | u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd); | |
158 | u64_stats_update_end(&mhi_netdev->stats.rx_syncp); | |
159 | ||
c134db89 LP |
160 | switch (skb->data[0] & 0xf0) { |
161 | case 0x40: | |
162 | skb->protocol = htons(ETH_P_IP); | |
163 | break; | |
164 | case 0x60: | |
165 | skb->protocol = htons(ETH_P_IPV6); | |
166 | break; | |
167 | default: | |
168 | skb->protocol = htons(ETH_P_MAP); | |
169 | break; | |
170 | } | |
171 | ||
3ffec6a1 LP |
172 | skb_put(skb, mhi_res->bytes_xferd); |
173 | netif_rx(skb); | |
174 | } | |
175 | ||
176 | /* Refill if RX buffers queue becomes low */ | |
6e10785e | 177 | if (free_desc_count >= mhi_netdev->rx_queue_sz / 2) |
3ffec6a1 LP |
178 | schedule_delayed_work(&mhi_netdev->rx_refill, 0); |
179 | } | |
180 | ||
181 | static void mhi_net_ul_callback(struct mhi_device *mhi_dev, | |
182 | struct mhi_result *mhi_res) | |
183 | { | |
184 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); | |
185 | struct net_device *ndev = mhi_netdev->ndev; | |
efc36d3c | 186 | struct mhi_device *mdev = mhi_netdev->mdev; |
3ffec6a1 LP |
187 | struct sk_buff *skb = mhi_res->buf_addr; |
188 | ||
189 | /* Hardware has consumed the buffer, so free the skb (which is not | |
190 | * freed by the MHI stack) and perform accounting. | |
191 | */ | |
192 | dev_consume_skb_any(skb); | |
193 | ||
194 | u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); | |
195 | if (unlikely(mhi_res->transaction_status)) { | |
196 | ||
197 | /* MHI layer stopping/resetting the UL channel */ | |
198 | if (mhi_res->transaction_status == -ENOTCONN) { | |
199 | u64_stats_update_end(&mhi_netdev->stats.tx_syncp); | |
200 | return; | |
201 | } | |
202 | ||
203 | u64_stats_inc(&mhi_netdev->stats.tx_errors); | |
204 | } else { | |
205 | u64_stats_inc(&mhi_netdev->stats.tx_packets); | |
206 | u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd); | |
207 | } | |
208 | u64_stats_update_end(&mhi_netdev->stats.tx_syncp); | |
209 | ||
efc36d3c | 210 | if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE)) |
3ffec6a1 LP |
211 | netif_wake_queue(ndev); |
212 | } | |
213 | ||
214 | static void mhi_net_rx_refill_work(struct work_struct *work) | |
215 | { | |
216 | struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev, | |
217 | rx_refill.work); | |
218 | struct net_device *ndev = mhi_netdev->ndev; | |
219 | struct mhi_device *mdev = mhi_netdev->mdev; | |
220 | int size = READ_ONCE(ndev->mtu); | |
221 | struct sk_buff *skb; | |
222 | int err; | |
223 | ||
6e10785e | 224 | while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) { |
3ffec6a1 LP |
225 | skb = netdev_alloc_skb(ndev, size); |
226 | if (unlikely(!skb)) | |
227 | break; | |
228 | ||
229 | err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT); | |
230 | if (unlikely(err)) { | |
231 | net_err_ratelimited("%s: Failed to queue RX buf (%d)\n", | |
232 | ndev->name, err); | |
233 | kfree_skb(skb); | |
234 | break; | |
235 | } | |
236 | ||
3ffec6a1 LP |
237 | /* Do not hog the CPU if rx buffers are consumed faster than |
238 | * queued (unlikely). | |
239 | */ | |
240 | cond_resched(); | |
241 | } | |
242 | ||
243 | /* If we're still starved of rx buffers, reschedule later */ | |
6e10785e | 244 | if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz) |
3ffec6a1 LP |
245 | schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); |
246 | } | |
247 | ||
b80b5dbf LP |
248 | static struct device_type wwan_type = { |
249 | .name = "wwan", | |
250 | }; | |
251 | ||
3ffec6a1 LP |
252 | static int mhi_net_probe(struct mhi_device *mhi_dev, |
253 | const struct mhi_device_id *id) | |
254 | { | |
255 | const char *netname = (char *)id->driver_data; | |
256 | struct device *dev = &mhi_dev->dev; | |
257 | struct mhi_net_dev *mhi_netdev; | |
258 | struct net_device *ndev; | |
259 | int err; | |
260 | ||
261 | ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE, | |
262 | mhi_net_setup); | |
263 | if (!ndev) | |
264 | return -ENOMEM; | |
265 | ||
266 | mhi_netdev = netdev_priv(ndev); | |
267 | dev_set_drvdata(dev, mhi_netdev); | |
268 | mhi_netdev->ndev = ndev; | |
269 | mhi_netdev->mdev = mhi_dev; | |
270 | SET_NETDEV_DEV(ndev, &mhi_dev->dev); | |
b80b5dbf | 271 | SET_NETDEV_DEVTYPE(ndev, &wwan_type); |
3ffec6a1 | 272 | |
3ffec6a1 LP |
273 | INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); |
274 | u64_stats_init(&mhi_netdev->stats.rx_syncp); | |
275 | u64_stats_init(&mhi_netdev->stats.tx_syncp); | |
276 | ||
277 | /* Start MHI channels */ | |
278 | err = mhi_prepare_for_transfer(mhi_dev); | |
279 | if (err) | |
280 | goto out_err; | |
281 | ||
e6ec3ccd LP |
282 | /* Number of transfer descriptors determines size of the queue */ |
283 | mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); | |
284 | ||
3ffec6a1 LP |
285 | err = register_netdev(ndev); |
286 | if (err) | |
287 | goto out_err; | |
288 | ||
289 | return 0; | |
290 | ||
291 | out_err: | |
292 | free_netdev(ndev); | |
293 | return err; | |
294 | } | |
295 | ||
296 | static void mhi_net_remove(struct mhi_device *mhi_dev) | |
297 | { | |
298 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); | |
299 | ||
300 | unregister_netdev(mhi_netdev->ndev); | |
301 | ||
302 | mhi_unprepare_from_transfer(mhi_netdev->mdev); | |
303 | ||
304 | free_netdev(mhi_netdev->ndev); | |
305 | } | |
306 | ||
307 | static const struct mhi_device_id mhi_net_id_table[] = { | |
308 | { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)"mhi_hwip%d" }, | |
309 | { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" }, | |
310 | {} | |
311 | }; | |
312 | MODULE_DEVICE_TABLE(mhi, mhi_net_id_table); | |
313 | ||
314 | static struct mhi_driver mhi_net_driver = { | |
315 | .probe = mhi_net_probe, | |
316 | .remove = mhi_net_remove, | |
317 | .dl_xfer_cb = mhi_net_dl_callback, | |
318 | .ul_xfer_cb = mhi_net_ul_callback, | |
319 | .id_table = mhi_net_id_table, | |
320 | .driver = { | |
321 | .name = "mhi_net", | |
322 | .owner = THIS_MODULE, | |
323 | }, | |
324 | }; | |
325 | ||
326 | module_mhi_driver(mhi_net_driver); | |
327 | ||
328 | MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); | |
329 | MODULE_DESCRIPTION("Network over MHI"); | |
330 | MODULE_LICENSE("GPL v2"); |