Commit | Line | Data |
---|---|---|
3ffec6a1 LP |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* MHI Network driver - Network over MHI bus | |
3 | * | |
4 | * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org> | |
5 | */ | |
6 | ||
7 | #include <linux/if_arp.h> | |
8 | #include <linux/mhi.h> | |
9 | #include <linux/mod_devicetable.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
12 | #include <linux/skbuff.h> | |
13 | #include <linux/u64_stats_sync.h> | |
14 | ||
15 | #define MHI_NET_MIN_MTU ETH_MIN_MTU | |
16 | #define MHI_NET_MAX_MTU 0xffff | |
17 | #define MHI_NET_DEFAULT_MTU 0x4000 | |
18 | ||
19 | struct mhi_net_stats { | |
20 | u64_stats_t rx_packets; | |
21 | u64_stats_t rx_bytes; | |
22 | u64_stats_t rx_errors; | |
23 | u64_stats_t rx_dropped; | |
24 | u64_stats_t tx_packets; | |
25 | u64_stats_t tx_bytes; | |
26 | u64_stats_t tx_errors; | |
27 | u64_stats_t tx_dropped; | |
28 | atomic_t rx_queued; | |
29 | struct u64_stats_sync tx_syncp; | |
30 | struct u64_stats_sync rx_syncp; | |
31 | }; | |
32 | ||
33 | struct mhi_net_dev { | |
34 | struct mhi_device *mdev; | |
35 | struct net_device *ndev; | |
36 | struct delayed_work rx_refill; | |
37 | struct mhi_net_stats stats; | |
38 | u32 rx_queue_sz; | |
39 | }; | |
40 | ||
41 | static int mhi_ndo_open(struct net_device *ndev) | |
42 | { | |
43 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); | |
44 | ||
45 | /* Feed the rx buffer pool */ | |
46 | schedule_delayed_work(&mhi_netdev->rx_refill, 0); | |
47 | ||
48 | /* Carrier is established via out-of-band channel (e.g. qmi) */ | |
49 | netif_carrier_on(ndev); | |
50 | ||
51 | netif_start_queue(ndev); | |
52 | ||
53 | return 0; | |
54 | } | |
55 | ||
56 | static int mhi_ndo_stop(struct net_device *ndev) | |
57 | { | |
58 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); | |
59 | ||
60 | netif_stop_queue(ndev); | |
61 | netif_carrier_off(ndev); | |
62 | cancel_delayed_work_sync(&mhi_netdev->rx_refill); | |
63 | ||
64 | return 0; | |
65 | } | |
66 | ||
67 | static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) | |
68 | { | |
69 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); | |
70 | struct mhi_device *mdev = mhi_netdev->mdev; | |
71 | int err; | |
72 | ||
73 | err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); | |
74 | if (unlikely(err)) { | |
75 | net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", | |
76 | ndev->name, err); | |
77 | ||
78 | u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); | |
79 | u64_stats_inc(&mhi_netdev->stats.tx_dropped); | |
80 | u64_stats_update_end(&mhi_netdev->stats.tx_syncp); | |
81 | ||
82 | /* drop the packet */ | |
83 | dev_kfree_skb_any(skb); | |
84 | } | |
85 | ||
86 | if (mhi_queue_is_full(mdev, DMA_TO_DEVICE)) | |
87 | netif_stop_queue(ndev); | |
88 | ||
89 | return NETDEV_TX_OK; | |
90 | } | |
91 | ||
92 | static void mhi_ndo_get_stats64(struct net_device *ndev, | |
93 | struct rtnl_link_stats64 *stats) | |
94 | { | |
95 | struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); | |
96 | unsigned int start; | |
97 | ||
98 | do { | |
99 | start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp); | |
100 | stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); | |
101 | stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); | |
102 | stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); | |
103 | stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped); | |
104 | } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start)); | |
105 | ||
106 | do { | |
107 | start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp); | |
108 | stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets); | |
109 | stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes); | |
110 | stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors); | |
111 | stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped); | |
112 | } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start)); | |
113 | } | |
114 | ||
115 | static const struct net_device_ops mhi_netdev_ops = { | |
116 | .ndo_open = mhi_ndo_open, | |
117 | .ndo_stop = mhi_ndo_stop, | |
118 | .ndo_start_xmit = mhi_ndo_xmit, | |
119 | .ndo_get_stats64 = mhi_ndo_get_stats64, | |
120 | }; | |
121 | ||
122 | static void mhi_net_setup(struct net_device *ndev) | |
123 | { | |
124 | ndev->header_ops = NULL; /* No header */ | |
125 | ndev->type = ARPHRD_NONE; /* QMAP... */ | |
126 | ndev->hard_header_len = 0; | |
127 | ndev->addr_len = 0; | |
128 | ndev->flags = IFF_POINTOPOINT | IFF_NOARP; | |
129 | ndev->netdev_ops = &mhi_netdev_ops; | |
130 | ndev->mtu = MHI_NET_DEFAULT_MTU; | |
131 | ndev->min_mtu = MHI_NET_MIN_MTU; | |
132 | ndev->max_mtu = MHI_NET_MAX_MTU; | |
133 | ndev->tx_queue_len = 1000; | |
134 | } | |
135 | ||
136 | static void mhi_net_dl_callback(struct mhi_device *mhi_dev, | |
137 | struct mhi_result *mhi_res) | |
138 | { | |
139 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); | |
140 | struct sk_buff *skb = mhi_res->buf_addr; | |
141 | int remaining; | |
142 | ||
143 | remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued); | |
144 | ||
145 | if (unlikely(mhi_res->transaction_status)) { | |
146 | dev_kfree_skb_any(skb); | |
147 | ||
148 | /* MHI layer stopping/resetting the DL channel */ | |
149 | if (mhi_res->transaction_status == -ENOTCONN) | |
150 | return; | |
151 | ||
152 | u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); | |
153 | u64_stats_inc(&mhi_netdev->stats.rx_errors); | |
154 | u64_stats_update_end(&mhi_netdev->stats.rx_syncp); | |
155 | } else { | |
156 | u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); | |
157 | u64_stats_inc(&mhi_netdev->stats.rx_packets); | |
158 | u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd); | |
159 | u64_stats_update_end(&mhi_netdev->stats.rx_syncp); | |
160 | ||
161 | skb->protocol = htons(ETH_P_MAP); | |
162 | skb_put(skb, mhi_res->bytes_xferd); | |
163 | netif_rx(skb); | |
164 | } | |
165 | ||
166 | /* Refill if RX buffers queue becomes low */ | |
167 | if (remaining <= mhi_netdev->rx_queue_sz / 2) | |
168 | schedule_delayed_work(&mhi_netdev->rx_refill, 0); | |
169 | } | |
170 | ||
171 | static void mhi_net_ul_callback(struct mhi_device *mhi_dev, | |
172 | struct mhi_result *mhi_res) | |
173 | { | |
174 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); | |
175 | struct net_device *ndev = mhi_netdev->ndev; | |
efc36d3c | 176 | struct mhi_device *mdev = mhi_netdev->mdev; |
3ffec6a1 LP |
177 | struct sk_buff *skb = mhi_res->buf_addr; |
178 | ||
179 | /* Hardware has consumed the buffer, so free the skb (which is not | |
180 | * freed by the MHI stack) and perform accounting. | |
181 | */ | |
182 | dev_consume_skb_any(skb); | |
183 | ||
184 | u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); | |
185 | if (unlikely(mhi_res->transaction_status)) { | |
186 | ||
187 | /* MHI layer stopping/resetting the UL channel */ | |
188 | if (mhi_res->transaction_status == -ENOTCONN) { | |
189 | u64_stats_update_end(&mhi_netdev->stats.tx_syncp); | |
190 | return; | |
191 | } | |
192 | ||
193 | u64_stats_inc(&mhi_netdev->stats.tx_errors); | |
194 | } else { | |
195 | u64_stats_inc(&mhi_netdev->stats.tx_packets); | |
196 | u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd); | |
197 | } | |
198 | u64_stats_update_end(&mhi_netdev->stats.tx_syncp); | |
199 | ||
efc36d3c | 200 | if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE)) |
3ffec6a1 LP |
201 | netif_wake_queue(ndev); |
202 | } | |
203 | ||
204 | static void mhi_net_rx_refill_work(struct work_struct *work) | |
205 | { | |
206 | struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev, | |
207 | rx_refill.work); | |
208 | struct net_device *ndev = mhi_netdev->ndev; | |
209 | struct mhi_device *mdev = mhi_netdev->mdev; | |
210 | int size = READ_ONCE(ndev->mtu); | |
211 | struct sk_buff *skb; | |
212 | int err; | |
213 | ||
214 | while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) { | |
215 | skb = netdev_alloc_skb(ndev, size); | |
216 | if (unlikely(!skb)) | |
217 | break; | |
218 | ||
219 | err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT); | |
220 | if (unlikely(err)) { | |
221 | net_err_ratelimited("%s: Failed to queue RX buf (%d)\n", | |
222 | ndev->name, err); | |
223 | kfree_skb(skb); | |
224 | break; | |
225 | } | |
226 | ||
227 | atomic_inc(&mhi_netdev->stats.rx_queued); | |
228 | ||
229 | /* Do not hog the CPU if rx buffers are consumed faster than | |
230 | * queued (unlikely). | |
231 | */ | |
232 | cond_resched(); | |
233 | } | |
234 | ||
235 | /* If we're still starved of rx buffers, reschedule later */ | |
236 | if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued))) | |
237 | schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); | |
238 | } | |
239 | ||
240 | static int mhi_net_probe(struct mhi_device *mhi_dev, | |
241 | const struct mhi_device_id *id) | |
242 | { | |
243 | const char *netname = (char *)id->driver_data; | |
244 | struct device *dev = &mhi_dev->dev; | |
245 | struct mhi_net_dev *mhi_netdev; | |
246 | struct net_device *ndev; | |
247 | int err; | |
248 | ||
249 | ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE, | |
250 | mhi_net_setup); | |
251 | if (!ndev) | |
252 | return -ENOMEM; | |
253 | ||
254 | mhi_netdev = netdev_priv(ndev); | |
255 | dev_set_drvdata(dev, mhi_netdev); | |
256 | mhi_netdev->ndev = ndev; | |
257 | mhi_netdev->mdev = mhi_dev; | |
258 | SET_NETDEV_DEV(ndev, &mhi_dev->dev); | |
259 | ||
260 | /* All MHI net channels have 128 ring elements (at least for now) */ | |
261 | mhi_netdev->rx_queue_sz = 128; | |
262 | ||
263 | INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); | |
264 | u64_stats_init(&mhi_netdev->stats.rx_syncp); | |
265 | u64_stats_init(&mhi_netdev->stats.tx_syncp); | |
266 | ||
267 | /* Start MHI channels */ | |
268 | err = mhi_prepare_for_transfer(mhi_dev); | |
269 | if (err) | |
270 | goto out_err; | |
271 | ||
272 | err = register_netdev(ndev); | |
273 | if (err) | |
274 | goto out_err; | |
275 | ||
276 | return 0; | |
277 | ||
278 | out_err: | |
279 | free_netdev(ndev); | |
280 | return err; | |
281 | } | |
282 | ||
283 | static void mhi_net_remove(struct mhi_device *mhi_dev) | |
284 | { | |
285 | struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); | |
286 | ||
287 | unregister_netdev(mhi_netdev->ndev); | |
288 | ||
289 | mhi_unprepare_from_transfer(mhi_netdev->mdev); | |
290 | ||
291 | free_netdev(mhi_netdev->ndev); | |
292 | } | |
293 | ||
294 | static const struct mhi_device_id mhi_net_id_table[] = { | |
295 | { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)"mhi_hwip%d" }, | |
296 | { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" }, | |
297 | {} | |
298 | }; | |
299 | MODULE_DEVICE_TABLE(mhi, mhi_net_id_table); | |
300 | ||
301 | static struct mhi_driver mhi_net_driver = { | |
302 | .probe = mhi_net_probe, | |
303 | .remove = mhi_net_remove, | |
304 | .dl_xfer_cb = mhi_net_dl_callback, | |
305 | .ul_xfer_cb = mhi_net_ul_callback, | |
306 | .id_table = mhi_net_id_table, | |
307 | .driver = { | |
308 | .name = "mhi_net", | |
309 | .owner = THIS_MODULE, | |
310 | }, | |
311 | }; | |
312 | ||
313 | module_mhi_driver(mhi_net_driver); | |
314 | ||
315 | MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); | |
316 | MODULE_DESCRIPTION("Network over MHI"); | |
317 | MODULE_LICENSE("GPL v2"); |