Commit | Line | Data |
---|---|---|
7e3af593 | 1 | // SPDX-License-Identifier: GPL-2.0 |
e69b6c02 | 2 | /* |
e550ed4b MW |
3 | * Networking over Thunderbolt/USB4 cables using USB4NET protocol |
4 | * (formerly Apple ThunderboltIP). | |
e69b6c02 AL |
5 | * |
6 | * Copyright (C) 2017, Intel Corporation | |
7 | * Authors: Amir Levy <amir.jer.levy@intel.com> | |
8 | * Michael Jamet <michael.jamet@intel.com> | |
9 | * Mika Westerberg <mika.westerberg@linux.intel.com> | |
e69b6c02 AL |
10 | */ |
11 | ||
12 | #include <linux/atomic.h> | |
13 | #include <linux/highmem.h> | |
14 | #include <linux/if_vlan.h> | |
15 | #include <linux/jhash.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/etherdevice.h> | |
18 | #include <linux/rtnetlink.h> | |
19 | #include <linux/sizes.h> | |
20 | #include <linux/thunderbolt.h> | |
21 | #include <linux/uuid.h> | |
22 | #include <linux/workqueue.h> | |
23 | ||
24 | #include <net/ip6_checksum.h> | |
25 | ||
26 | /* Protocol timeouts in ms */ | |
27 | #define TBNET_LOGIN_DELAY 4500 | |
28 | #define TBNET_LOGIN_TIMEOUT 500 | |
95240075 | 29 | #define TBNET_LOGOUT_TIMEOUT 1000 |
e69b6c02 AL |
30 | |
31 | #define TBNET_RING_SIZE 256 | |
e69b6c02 | 32 | #define TBNET_LOGIN_RETRIES 60 |
95240075 | 33 | #define TBNET_LOGOUT_RETRIES 10 |
8bdc25cf | 34 | #define TBNET_E2E BIT(0) |
e69b6c02 | 35 | #define TBNET_MATCH_FRAGS_ID BIT(1) |
95240075 | 36 | #define TBNET_64K_FRAMES BIT(2) |
e69b6c02 AL |
37 | #define TBNET_MAX_MTU SZ_64K |
38 | #define TBNET_FRAME_SIZE SZ_4K | |
39 | #define TBNET_MAX_PAYLOAD_SIZE \ | |
40 | (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header)) | |
41 | /* Rx packets need to hold space for skb_shared_info */ | |
42 | #define TBNET_RX_MAX_SIZE \ | |
43 | (TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) | |
44 | #define TBNET_RX_PAGE_ORDER get_order(TBNET_RX_MAX_SIZE) | |
45 | #define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER) | |
46 | ||
47 | #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0)) | |
48 | ||
49 | /** | |
50 | * struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame | |
51 | * @frame_size: size of the data with the frame | |
52 | * @frame_index: running index on the frames | |
53 | * @frame_id: ID of the frame to match frames to specific packet | |
54 | * @frame_count: how many frames assembles a full packet | |
55 | * | |
56 | * Each data frame passed to the high-speed DMA ring has this header. If | |
57 | * the XDomain network directory announces that %TBNET_MATCH_FRAGS_ID is | |
58 | * supported then @frame_id is filled, otherwise it stays %0. | |
59 | */ | |
60 | struct thunderbolt_ip_frame_header { | |
a479f926 AS |
61 | __le32 frame_size; |
62 | __le16 frame_index; | |
63 | __le16 frame_id; | |
64 | __le32 frame_count; | |
e69b6c02 AL |
65 | }; |
66 | ||
67 | enum thunderbolt_ip_frame_pdf { | |
68 | TBIP_PDF_FRAME_START = 1, | |
69 | TBIP_PDF_FRAME_END, | |
70 | }; | |
71 | ||
72 | enum thunderbolt_ip_type { | |
73 | TBIP_LOGIN, | |
74 | TBIP_LOGIN_RESPONSE, | |
75 | TBIP_LOGOUT, | |
76 | TBIP_STATUS, | |
77 | }; | |
78 | ||
79 | struct thunderbolt_ip_header { | |
80 | u32 route_hi; | |
81 | u32 route_lo; | |
82 | u32 length_sn; | |
83 | uuid_t uuid; | |
84 | uuid_t initiator_uuid; | |
85 | uuid_t target_uuid; | |
86 | u32 type; | |
87 | u32 command_id; | |
88 | }; | |
89 | ||
90 | #define TBIP_HDR_LENGTH_MASK GENMASK(5, 0) | |
91 | #define TBIP_HDR_SN_MASK GENMASK(28, 27) | |
92 | #define TBIP_HDR_SN_SHIFT 27 | |
93 | ||
94 | struct thunderbolt_ip_login { | |
95 | struct thunderbolt_ip_header hdr; | |
96 | u32 proto_version; | |
97 | u32 transmit_path; | |
98 | u32 reserved[4]; | |
99 | }; | |
100 | ||
101 | #define TBIP_LOGIN_PROTO_VERSION 1 | |
102 | ||
103 | struct thunderbolt_ip_login_response { | |
104 | struct thunderbolt_ip_header hdr; | |
105 | u32 status; | |
106 | u32 receiver_mac[2]; | |
107 | u32 receiver_mac_len; | |
108 | u32 reserved[4]; | |
109 | }; | |
110 | ||
111 | struct thunderbolt_ip_logout { | |
112 | struct thunderbolt_ip_header hdr; | |
113 | }; | |
114 | ||
115 | struct thunderbolt_ip_status { | |
116 | struct thunderbolt_ip_header hdr; | |
117 | u32 status; | |
118 | }; | |
119 | ||
120 | struct tbnet_stats { | |
121 | u64 tx_packets; | |
122 | u64 rx_packets; | |
123 | u64 tx_bytes; | |
124 | u64 rx_bytes; | |
125 | u64 rx_errors; | |
126 | u64 tx_errors; | |
127 | u64 rx_length_errors; | |
128 | u64 rx_over_errors; | |
129 | u64 rx_crc_errors; | |
130 | u64 rx_missed_errors; | |
131 | }; | |
132 | ||
133 | struct tbnet_frame { | |
134 | struct net_device *dev; | |
135 | struct page *page; | |
136 | struct ring_frame frame; | |
137 | }; | |
138 | ||
139 | struct tbnet_ring { | |
140 | struct tbnet_frame frames[TBNET_RING_SIZE]; | |
141 | unsigned int cons; | |
142 | unsigned int prod; | |
143 | struct tb_ring *ring; | |
144 | }; | |
145 | ||
146 | /** | |
147 | * struct tbnet - ThunderboltIP network driver private data | |
148 | * @svc: XDomain service the driver is bound to | |
149 | * @xd: XDomain the service blongs to | |
150 | * @handler: ThunderboltIP configuration protocol handler | |
151 | * @dev: Networking device | |
152 | * @napi: NAPI structure for Rx polling | |
153 | * @stats: Network statistics | |
154 | * @skb: Network packet that is currently processed on Rx path | |
155 | * @command_id: ID used for next configuration protocol packet | |
156 | * @login_sent: ThunderboltIP login message successfully sent | |
157 | * @login_received: ThunderboltIP login message received from the remote | |
158 | * host | |
180b0689 MW |
159 | * @local_transmit_path: HopID we are using to send out packets |
160 | * @remote_transmit_path: HopID the other end is using to send packets to us | |
e69b6c02 AL |
161 | * @connection_lock: Lock serializing access to @login_sent, |
162 | * @login_received and @transmit_path. | |
163 | * @login_retries: Number of login retries currently done | |
164 | * @login_work: Worker to send ThunderboltIP login packets | |
165 | * @connected_work: Worker that finalizes the ThunderboltIP connection | |
166 | * setup and enables DMA paths for high speed data | |
167 | * transfers | |
027d351c MW |
168 | * @disconnect_work: Worker that handles tearing down the ThunderboltIP |
169 | * connection | |
e69b6c02 AL |
170 | * @rx_hdr: Copy of the currently processed Rx frame. Used when a |
171 | * network packet consists of multiple Thunderbolt frames. | |
172 | * In host byte order. | |
173 | * @rx_ring: Software ring holding Rx frames | |
174 | * @frame_id: Frame ID use for next Tx packet | |
175 | * (if %TBNET_MATCH_FRAGS_ID is supported in both ends) | |
176 | * @tx_ring: Software ring holding Tx frames | |
177 | */ | |
178 | struct tbnet { | |
179 | const struct tb_service *svc; | |
180 | struct tb_xdomain *xd; | |
181 | struct tb_protocol_handler handler; | |
182 | struct net_device *dev; | |
183 | struct napi_struct napi; | |
184 | struct tbnet_stats stats; | |
185 | struct sk_buff *skb; | |
186 | atomic_t command_id; | |
187 | bool login_sent; | |
188 | bool login_received; | |
180b0689 MW |
189 | int local_transmit_path; |
190 | int remote_transmit_path; | |
e69b6c02 AL |
191 | struct mutex connection_lock; |
192 | int login_retries; | |
193 | struct delayed_work login_work; | |
194 | struct work_struct connected_work; | |
027d351c | 195 | struct work_struct disconnect_work; |
e69b6c02 AL |
196 | struct thunderbolt_ip_frame_header rx_hdr; |
197 | struct tbnet_ring rx_ring; | |
198 | atomic_t frame_id; | |
199 | struct tbnet_ring tx_ring; | |
200 | }; | |
201 | ||
202 | /* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */ | |
203 | static const uuid_t tbnet_dir_uuid = | |
204 | UUID_INIT(0xc66189ca, 0x1cce, 0x4195, | |
205 | 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f); | |
206 | ||
207 | /* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */ | |
208 | static const uuid_t tbnet_svc_uuid = | |
209 | UUID_INIT(0x798f589e, 0x3616, 0x8a47, | |
210 | 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd); | |
211 | ||
212 | static struct tb_property_dir *tbnet_dir; | |
213 | ||
8bdc25cf MW |
214 | static bool tbnet_e2e = true; |
215 | module_param_named(e2e, tbnet_e2e, bool, 0444); | |
216 | MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)"); | |
217 | ||
e69b6c02 AL |
218 | static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route, |
219 | u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid, | |
220 | enum thunderbolt_ip_type type, size_t size, u32 command_id) | |
221 | { | |
222 | u32 length_sn; | |
223 | ||
224 | /* Length does not include route_hi/lo and length_sn fields */ | |
225 | length_sn = (size - 3 * 4) / 4; | |
226 | length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK; | |
227 | ||
228 | hdr->route_hi = upper_32_bits(route); | |
229 | hdr->route_lo = lower_32_bits(route); | |
230 | hdr->length_sn = length_sn; | |
231 | uuid_copy(&hdr->uuid, &tbnet_svc_uuid); | |
232 | uuid_copy(&hdr->initiator_uuid, initiator_uuid); | |
233 | uuid_copy(&hdr->target_uuid, target_uuid); | |
234 | hdr->type = type; | |
235 | hdr->command_id = command_id; | |
236 | } | |
237 | ||
238 | static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence, | |
239 | u32 command_id) | |
240 | { | |
241 | struct thunderbolt_ip_login_response reply; | |
242 | struct tb_xdomain *xd = net->xd; | |
243 | ||
244 | memset(&reply, 0, sizeof(reply)); | |
245 | tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, | |
246 | xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply), | |
247 | command_id); | |
248 | memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN); | |
249 | reply.receiver_mac_len = ETH_ALEN; | |
250 | ||
251 | return tb_xdomain_response(xd, &reply, sizeof(reply), | |
252 | TB_CFG_PKG_XDOMAIN_RESP); | |
253 | } | |
254 | ||
255 | static int tbnet_login_request(struct tbnet *net, u8 sequence) | |
256 | { | |
257 | struct thunderbolt_ip_login_response reply; | |
258 | struct thunderbolt_ip_login request; | |
259 | struct tb_xdomain *xd = net->xd; | |
260 | ||
261 | memset(&request, 0, sizeof(request)); | |
262 | tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid, | |
263 | xd->remote_uuid, TBIP_LOGIN, sizeof(request), | |
264 | atomic_inc_return(&net->command_id)); | |
265 | ||
266 | request.proto_version = TBIP_LOGIN_PROTO_VERSION; | |
180b0689 | 267 | request.transmit_path = net->local_transmit_path; |
e69b6c02 AL |
268 | |
269 | return tb_xdomain_request(xd, &request, sizeof(request), | |
270 | TB_CFG_PKG_XDOMAIN_RESP, &reply, | |
271 | sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP, | |
272 | TBNET_LOGIN_TIMEOUT); | |
273 | } | |
274 | ||
275 | static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence, | |
276 | u32 command_id) | |
277 | { | |
278 | struct thunderbolt_ip_status reply; | |
279 | struct tb_xdomain *xd = net->xd; | |
280 | ||
281 | memset(&reply, 0, sizeof(reply)); | |
282 | tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, | |
283 | xd->remote_uuid, TBIP_STATUS, sizeof(reply), | |
284 | atomic_inc_return(&net->command_id)); | |
285 | return tb_xdomain_response(xd, &reply, sizeof(reply), | |
286 | TB_CFG_PKG_XDOMAIN_RESP); | |
287 | } | |
288 | ||
289 | static int tbnet_logout_request(struct tbnet *net) | |
290 | { | |
291 | struct thunderbolt_ip_logout request; | |
292 | struct thunderbolt_ip_status reply; | |
293 | struct tb_xdomain *xd = net->xd; | |
294 | ||
295 | memset(&request, 0, sizeof(request)); | |
296 | tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid, | |
297 | xd->remote_uuid, TBIP_LOGOUT, sizeof(request), | |
298 | atomic_inc_return(&net->command_id)); | |
299 | ||
300 | return tb_xdomain_request(xd, &request, sizeof(request), | |
301 | TB_CFG_PKG_XDOMAIN_RESP, &reply, | |
302 | sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP, | |
303 | TBNET_LOGOUT_TIMEOUT); | |
304 | } | |
305 | ||
306 | static void start_login(struct tbnet *net) | |
307 | { | |
308 | mutex_lock(&net->connection_lock); | |
309 | net->login_sent = false; | |
310 | net->login_received = false; | |
311 | mutex_unlock(&net->connection_lock); | |
312 | ||
313 | queue_delayed_work(system_long_wq, &net->login_work, | |
314 | msecs_to_jiffies(1000)); | |
315 | } | |
316 | ||
317 | static void stop_login(struct tbnet *net) | |
318 | { | |
319 | cancel_delayed_work_sync(&net->login_work); | |
320 | cancel_work_sync(&net->connected_work); | |
321 | } | |
322 | ||
323 | static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf) | |
324 | { | |
325 | return tf->frame.size ? : TBNET_FRAME_SIZE; | |
326 | } | |
327 | ||
328 | static void tbnet_free_buffers(struct tbnet_ring *ring) | |
329 | { | |
330 | unsigned int i; | |
331 | ||
332 | for (i = 0; i < TBNET_RING_SIZE; i++) { | |
333 | struct device *dma_dev = tb_ring_dma_device(ring->ring); | |
334 | struct tbnet_frame *tf = &ring->frames[i]; | |
335 | enum dma_data_direction dir; | |
336 | unsigned int order; | |
337 | size_t size; | |
338 | ||
339 | if (!tf->page) | |
340 | continue; | |
341 | ||
342 | if (ring->ring->is_tx) { | |
343 | dir = DMA_TO_DEVICE; | |
344 | order = 0; | |
540c1115 | 345 | size = TBNET_FRAME_SIZE; |
e69b6c02 AL |
346 | } else { |
347 | dir = DMA_FROM_DEVICE; | |
348 | order = TBNET_RX_PAGE_ORDER; | |
349 | size = TBNET_RX_PAGE_SIZE; | |
350 | } | |
351 | ||
352 | if (tf->frame.buffer_phy) | |
353 | dma_unmap_page(dma_dev, tf->frame.buffer_phy, size, | |
354 | dir); | |
355 | ||
356 | __free_pages(tf->page, order); | |
357 | tf->page = NULL; | |
358 | } | |
359 | ||
360 | ring->cons = 0; | |
361 | ring->prod = 0; | |
362 | } | |
363 | ||
364 | static void tbnet_tear_down(struct tbnet *net, bool send_logout) | |
365 | { | |
366 | netif_carrier_off(net->dev); | |
367 | netif_stop_queue(net->dev); | |
368 | ||
369 | stop_login(net); | |
370 | ||
371 | mutex_lock(&net->connection_lock); | |
372 | ||
373 | if (net->login_sent && net->login_received) { | |
180b0689 | 374 | int ret, retries = TBNET_LOGOUT_RETRIES; |
e69b6c02 AL |
375 | |
376 | while (send_logout && retries-- > 0) { | |
180b0689 | 377 | ret = tbnet_logout_request(net); |
e69b6c02 AL |
378 | if (ret != -ETIMEDOUT) |
379 | break; | |
380 | } | |
381 | ||
382 | tb_ring_stop(net->rx_ring.ring); | |
383 | tb_ring_stop(net->tx_ring.ring); | |
384 | tbnet_free_buffers(&net->rx_ring); | |
385 | tbnet_free_buffers(&net->tx_ring); | |
386 | ||
180b0689 MW |
387 | ret = tb_xdomain_disable_paths(net->xd, |
388 | net->local_transmit_path, | |
389 | net->rx_ring.ring->hop, | |
390 | net->remote_transmit_path, | |
391 | net->tx_ring.ring->hop); | |
392 | if (ret) | |
e69b6c02 | 393 | netdev_warn(net->dev, "failed to disable DMA paths\n"); |
180b0689 MW |
394 | |
395 | tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path); | |
396 | net->remote_transmit_path = 0; | |
e69b6c02 AL |
397 | } |
398 | ||
399 | net->login_retries = 0; | |
400 | net->login_sent = false; | |
401 | net->login_received = false; | |
402 | ||
403 | mutex_unlock(&net->connection_lock); | |
404 | } | |
405 | ||
406 | static int tbnet_handle_packet(const void *buf, size_t size, void *data) | |
407 | { | |
408 | const struct thunderbolt_ip_login *pkg = buf; | |
409 | struct tbnet *net = data; | |
410 | u32 command_id; | |
411 | int ret = 0; | |
fa31f0c9 | 412 | u32 sequence; |
e69b6c02 AL |
413 | u64 route; |
414 | ||
415 | /* Make sure the packet is for us */ | |
416 | if (size < sizeof(struct thunderbolt_ip_header)) | |
417 | return 0; | |
418 | if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid)) | |
419 | return 0; | |
420 | if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid)) | |
421 | return 0; | |
422 | ||
423 | route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo; | |
424 | route &= ~BIT_ULL(63); | |
425 | if (route != net->xd->route) | |
426 | return 0; | |
427 | ||
428 | sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK; | |
429 | sequence >>= TBIP_HDR_SN_SHIFT; | |
430 | command_id = pkg->hdr.command_id; | |
431 | ||
432 | switch (pkg->hdr.type) { | |
433 | case TBIP_LOGIN: | |
434 | if (!netif_running(net->dev)) | |
435 | break; | |
436 | ||
437 | ret = tbnet_login_response(net, route, sequence, | |
438 | pkg->hdr.command_id); | |
439 | if (!ret) { | |
440 | mutex_lock(&net->connection_lock); | |
441 | net->login_received = true; | |
180b0689 | 442 | net->remote_transmit_path = pkg->transmit_path; |
e69b6c02 AL |
443 | |
444 | /* If we reached the number of max retries or | |
445 | * previous logout, schedule another round of | |
446 | * login retries | |
447 | */ | |
448 | if (net->login_retries >= TBNET_LOGIN_RETRIES || | |
449 | !net->login_sent) { | |
450 | net->login_retries = 0; | |
451 | queue_delayed_work(system_long_wq, | |
452 | &net->login_work, 0); | |
453 | } | |
454 | mutex_unlock(&net->connection_lock); | |
455 | ||
456 | queue_work(system_long_wq, &net->connected_work); | |
457 | } | |
458 | break; | |
459 | ||
460 | case TBIP_LOGOUT: | |
461 | ret = tbnet_logout_response(net, route, sequence, command_id); | |
462 | if (!ret) | |
027d351c | 463 | queue_work(system_long_wq, &net->disconnect_work); |
e69b6c02 AL |
464 | break; |
465 | ||
466 | default: | |
467 | return 0; | |
468 | } | |
469 | ||
470 | if (ret) | |
471 | netdev_warn(net->dev, "failed to send ThunderboltIP response\n"); | |
472 | ||
473 | return 1; | |
474 | } | |
475 | ||
476 | static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring) | |
477 | { | |
478 | return ring->prod - ring->cons; | |
479 | } | |
480 | ||
481 | static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers) | |
482 | { | |
483 | struct tbnet_ring *ring = &net->rx_ring; | |
484 | int ret; | |
485 | ||
486 | while (nbuffers--) { | |
487 | struct device *dma_dev = tb_ring_dma_device(ring->ring); | |
488 | unsigned int index = ring->prod & (TBNET_RING_SIZE - 1); | |
489 | struct tbnet_frame *tf = &ring->frames[index]; | |
490 | dma_addr_t dma_addr; | |
491 | ||
492 | if (tf->page) | |
493 | break; | |
494 | ||
495 | /* Allocate page (order > 0) so that it can hold maximum | |
496 | * ThunderboltIP frame (4kB) and the additional room for | |
497 | * SKB shared info required by build_skb(). | |
498 | */ | |
499 | tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER); | |
500 | if (!tf->page) { | |
501 | ret = -ENOMEM; | |
502 | goto err_free; | |
503 | } | |
504 | ||
505 | dma_addr = dma_map_page(dma_dev, tf->page, 0, | |
506 | TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE); | |
507 | if (dma_mapping_error(dma_dev, dma_addr)) { | |
508 | ret = -ENOMEM; | |
509 | goto err_free; | |
510 | } | |
511 | ||
512 | tf->frame.buffer_phy = dma_addr; | |
513 | tf->dev = net->dev; | |
514 | ||
515 | tb_ring_rx(ring->ring, &tf->frame); | |
516 | ||
517 | ring->prod++; | |
518 | } | |
519 | ||
520 | return 0; | |
521 | ||
522 | err_free: | |
523 | tbnet_free_buffers(ring); | |
524 | return ret; | |
525 | } | |
526 | ||
527 | static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) | |
528 | { | |
529 | struct tbnet_ring *ring = &net->tx_ring; | |
540c1115 | 530 | struct device *dma_dev = tb_ring_dma_device(ring->ring); |
e69b6c02 AL |
531 | struct tbnet_frame *tf; |
532 | unsigned int index; | |
533 | ||
534 | if (!tbnet_available_buffers(ring)) | |
535 | return NULL; | |
536 | ||
537 | index = ring->cons++ & (TBNET_RING_SIZE - 1); | |
538 | ||
539 | tf = &ring->frames[index]; | |
540 | tf->frame.size = 0; | |
540c1115 MW |
541 | |
542 | dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy, | |
543 | tbnet_frame_size(tf), DMA_TO_DEVICE); | |
e69b6c02 AL |
544 | |
545 | return tf; | |
546 | } | |
547 | ||
548 | static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame, | |
549 | bool canceled) | |
550 | { | |
551 | struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame); | |
e69b6c02 AL |
552 | struct tbnet *net = netdev_priv(tf->dev); |
553 | ||
e69b6c02 AL |
554 | /* Return buffer to the ring */ |
555 | net->tx_ring.prod++; | |
556 | ||
557 | if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2) | |
558 | netif_wake_queue(net->dev); | |
559 | } | |
560 | ||
561 | static int tbnet_alloc_tx_buffers(struct tbnet *net) | |
562 | { | |
563 | struct tbnet_ring *ring = &net->tx_ring; | |
540c1115 | 564 | struct device *dma_dev = tb_ring_dma_device(ring->ring); |
e69b6c02 AL |
565 | unsigned int i; |
566 | ||
567 | for (i = 0; i < TBNET_RING_SIZE; i++) { | |
568 | struct tbnet_frame *tf = &ring->frames[i]; | |
540c1115 | 569 | dma_addr_t dma_addr; |
e69b6c02 AL |
570 | |
571 | tf->page = alloc_page(GFP_KERNEL); | |
572 | if (!tf->page) { | |
573 | tbnet_free_buffers(ring); | |
574 | return -ENOMEM; | |
575 | } | |
576 | ||
540c1115 MW |
577 | dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE, |
578 | DMA_TO_DEVICE); | |
579 | if (dma_mapping_error(dma_dev, dma_addr)) { | |
580 | __free_page(tf->page); | |
581 | tf->page = NULL; | |
582 | tbnet_free_buffers(ring); | |
583 | return -ENOMEM; | |
584 | } | |
585 | ||
e69b6c02 | 586 | tf->dev = net->dev; |
540c1115 | 587 | tf->frame.buffer_phy = dma_addr; |
e69b6c02 AL |
588 | tf->frame.callback = tbnet_tx_callback; |
589 | tf->frame.sof = TBIP_PDF_FRAME_START; | |
590 | tf->frame.eof = TBIP_PDF_FRAME_END; | |
591 | } | |
592 | ||
593 | ring->cons = 0; | |
594 | ring->prod = TBNET_RING_SIZE - 1; | |
595 | ||
596 | return 0; | |
597 | } | |
598 | ||
599 | static void tbnet_connected_work(struct work_struct *work) | |
600 | { | |
601 | struct tbnet *net = container_of(work, typeof(*net), connected_work); | |
602 | bool connected; | |
603 | int ret; | |
604 | ||
605 | if (netif_carrier_ok(net->dev)) | |
606 | return; | |
607 | ||
608 | mutex_lock(&net->connection_lock); | |
609 | connected = net->login_sent && net->login_received; | |
610 | mutex_unlock(&net->connection_lock); | |
611 | ||
612 | if (!connected) | |
613 | return; | |
614 | ||
180b0689 MW |
615 | ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path); |
616 | if (ret != net->remote_transmit_path) { | |
617 | netdev_err(net->dev, "failed to allocate Rx HopID\n"); | |
618 | return; | |
619 | } | |
620 | ||
ff7cd07f MW |
621 | /* Both logins successful so enable the rings, high-speed DMA |
622 | * paths and start the network device queue. | |
623 | * | |
624 | * Note we enable the DMA paths last to make sure we have primed | |
625 | * the Rx ring before any incoming packets are allowed to | |
626 | * arrive. | |
e69b6c02 | 627 | */ |
e69b6c02 AL |
628 | tb_ring_start(net->tx_ring.ring); |
629 | tb_ring_start(net->rx_ring.ring); | |
630 | ||
631 | ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE); | |
632 | if (ret) | |
633 | goto err_stop_rings; | |
634 | ||
635 | ret = tbnet_alloc_tx_buffers(net); | |
636 | if (ret) | |
637 | goto err_free_rx_buffers; | |
638 | ||
ff7cd07f MW |
639 | ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path, |
640 | net->rx_ring.ring->hop, | |
641 | net->remote_transmit_path, | |
642 | net->tx_ring.ring->hop); | |
643 | if (ret) { | |
644 | netdev_err(net->dev, "failed to enable DMA paths\n"); | |
645 | goto err_free_tx_buffers; | |
646 | } | |
647 | ||
e69b6c02 AL |
648 | netif_carrier_on(net->dev); |
649 | netif_start_queue(net->dev); | |
650 | return; | |
651 | ||
ff7cd07f MW |
652 | err_free_tx_buffers: |
653 | tbnet_free_buffers(&net->tx_ring); | |
e69b6c02 AL |
654 | err_free_rx_buffers: |
655 | tbnet_free_buffers(&net->rx_ring); | |
656 | err_stop_rings: | |
657 | tb_ring_stop(net->rx_ring.ring); | |
658 | tb_ring_stop(net->tx_ring.ring); | |
180b0689 | 659 | tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path); |
e69b6c02 AL |
660 | } |
661 | ||
662 | static void tbnet_login_work(struct work_struct *work) | |
663 | { | |
664 | struct tbnet *net = container_of(work, typeof(*net), login_work.work); | |
665 | unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY); | |
666 | int ret; | |
667 | ||
668 | if (netif_carrier_ok(net->dev)) | |
669 | return; | |
670 | ||
671 | ret = tbnet_login_request(net, net->login_retries % 4); | |
672 | if (ret) { | |
673 | if (net->login_retries++ < TBNET_LOGIN_RETRIES) { | |
674 | queue_delayed_work(system_long_wq, &net->login_work, | |
675 | delay); | |
676 | } else { | |
677 | netdev_info(net->dev, "ThunderboltIP login timed out\n"); | |
678 | } | |
679 | } else { | |
680 | net->login_retries = 0; | |
681 | ||
682 | mutex_lock(&net->connection_lock); | |
683 | net->login_sent = true; | |
684 | mutex_unlock(&net->connection_lock); | |
685 | ||
686 | queue_work(system_long_wq, &net->connected_work); | |
687 | } | |
688 | } | |
689 | ||
027d351c MW |
690 | static void tbnet_disconnect_work(struct work_struct *work) |
691 | { | |
692 | struct tbnet *net = container_of(work, typeof(*net), disconnect_work); | |
693 | ||
694 | tbnet_tear_down(net, false); | |
695 | } | |
696 | ||
e69b6c02 AL |
697 | static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, |
698 | const struct thunderbolt_ip_frame_header *hdr) | |
699 | { | |
700 | u32 frame_id, frame_count, frame_size, frame_index; | |
701 | unsigned int size; | |
702 | ||
703 | if (tf->frame.flags & RING_DESC_CRC_ERROR) { | |
704 | net->stats.rx_crc_errors++; | |
705 | return false; | |
706 | } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) { | |
707 | net->stats.rx_over_errors++; | |
708 | return false; | |
709 | } | |
710 | ||
711 | /* Should be greater than just header i.e. contains data */ | |
712 | size = tbnet_frame_size(tf); | |
713 | if (size <= sizeof(*hdr)) { | |
714 | net->stats.rx_length_errors++; | |
715 | return false; | |
716 | } | |
717 | ||
718 | frame_count = le32_to_cpu(hdr->frame_count); | |
719 | frame_size = le32_to_cpu(hdr->frame_size); | |
720 | frame_index = le16_to_cpu(hdr->frame_index); | |
721 | frame_id = le16_to_cpu(hdr->frame_id); | |
722 | ||
723 | if ((frame_size > size - sizeof(*hdr)) || !frame_size) { | |
724 | net->stats.rx_length_errors++; | |
725 | return false; | |
726 | } | |
727 | ||
728 | /* In case we're in the middle of packet, validate the frame | |
729 | * header based on first fragment of the packet. | |
730 | */ | |
731 | if (net->skb && net->rx_hdr.frame_count) { | |
732 | /* Check the frame count fits the count field */ | |
733 | if (frame_count != net->rx_hdr.frame_count) { | |
734 | net->stats.rx_length_errors++; | |
735 | return false; | |
736 | } | |
737 | ||
738 | /* Check the frame identifiers are incremented correctly, | |
739 | * and id is matching. | |
740 | */ | |
741 | if (frame_index != net->rx_hdr.frame_index + 1 || | |
742 | frame_id != net->rx_hdr.frame_id) { | |
743 | net->stats.rx_missed_errors++; | |
744 | return false; | |
745 | } | |
746 | ||
747 | if (net->skb->len + frame_size > TBNET_MAX_MTU) { | |
748 | net->stats.rx_length_errors++; | |
749 | return false; | |
750 | } | |
751 | ||
752 | return true; | |
753 | } | |
754 | ||
755 | /* Start of packet, validate the frame header */ | |
756 | if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) { | |
757 | net->stats.rx_length_errors++; | |
758 | return false; | |
759 | } | |
760 | if (frame_index != 0) { | |
761 | net->stats.rx_missed_errors++; | |
762 | return false; | |
763 | } | |
764 | ||
765 | return true; | |
766 | } | |
767 | ||
768 | static int tbnet_poll(struct napi_struct *napi, int budget) | |
769 | { | |
770 | struct tbnet *net = container_of(napi, struct tbnet, napi); | |
771 | unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring); | |
772 | struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring); | |
773 | unsigned int rx_packets = 0; | |
774 | ||
775 | while (rx_packets < budget) { | |
776 | const struct thunderbolt_ip_frame_header *hdr; | |
777 | unsigned int hdr_size = sizeof(*hdr); | |
778 | struct sk_buff *skb = NULL; | |
779 | struct ring_frame *frame; | |
780 | struct tbnet_frame *tf; | |
781 | struct page *page; | |
782 | bool last = true; | |
783 | u32 frame_size; | |
784 | ||
785 | /* Return some buffers to hardware, one at a time is too | |
786 | * slow so allocate MAX_SKB_FRAGS buffers at the same | |
787 | * time. | |
788 | */ | |
789 | if (cleaned_count >= MAX_SKB_FRAGS) { | |
790 | tbnet_alloc_rx_buffers(net, cleaned_count); | |
791 | cleaned_count = 0; | |
792 | } | |
793 | ||
794 | frame = tb_ring_poll(net->rx_ring.ring); | |
795 | if (!frame) | |
796 | break; | |
797 | ||
798 | dma_unmap_page(dma_dev, frame->buffer_phy, | |
799 | TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE); | |
800 | ||
801 | tf = container_of(frame, typeof(*tf), frame); | |
802 | ||
803 | page = tf->page; | |
804 | tf->page = NULL; | |
805 | net->rx_ring.cons++; | |
806 | cleaned_count++; | |
807 | ||
808 | hdr = page_address(page); | |
809 | if (!tbnet_check_frame(net, tf, hdr)) { | |
810 | __free_pages(page, TBNET_RX_PAGE_ORDER); | |
811 | dev_kfree_skb_any(net->skb); | |
812 | net->skb = NULL; | |
813 | continue; | |
814 | } | |
815 | ||
816 | frame_size = le32_to_cpu(hdr->frame_size); | |
817 | ||
818 | skb = net->skb; | |
819 | if (!skb) { | |
820 | skb = build_skb(page_address(page), | |
821 | TBNET_RX_PAGE_SIZE); | |
822 | if (!skb) { | |
823 | __free_pages(page, TBNET_RX_PAGE_ORDER); | |
824 | net->stats.rx_errors++; | |
825 | break; | |
826 | } | |
827 | ||
828 | skb_reserve(skb, hdr_size); | |
829 | skb_put(skb, frame_size); | |
830 | ||
831 | net->skb = skb; | |
832 | } else { | |
833 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | |
834 | page, hdr_size, frame_size, | |
835 | TBNET_RX_PAGE_SIZE - hdr_size); | |
836 | } | |
837 | ||
838 | net->rx_hdr.frame_size = frame_size; | |
839 | net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count); | |
840 | net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index); | |
841 | net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id); | |
842 | last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1; | |
843 | ||
844 | rx_packets++; | |
845 | net->stats.rx_bytes += frame_size; | |
846 | ||
847 | if (last) { | |
848 | skb->protocol = eth_type_trans(skb, net->dev); | |
849 | napi_gro_receive(&net->napi, skb); | |
850 | net->skb = NULL; | |
851 | } | |
852 | } | |
853 | ||
854 | net->stats.rx_packets += rx_packets; | |
855 | ||
856 | if (cleaned_count) | |
857 | tbnet_alloc_rx_buffers(net, cleaned_count); | |
858 | ||
859 | if (rx_packets >= budget) | |
860 | return budget; | |
861 | ||
862 | napi_complete_done(napi, rx_packets); | |
863 | /* Re-enable the ring interrupt */ | |
864 | tb_ring_poll_complete(net->rx_ring.ring); | |
865 | ||
866 | return rx_packets; | |
867 | } | |
868 | ||
869 | static void tbnet_start_poll(void *data) | |
870 | { | |
871 | struct tbnet *net = data; | |
872 | ||
873 | napi_schedule(&net->napi); | |
874 | } | |
875 | ||
876 | static int tbnet_open(struct net_device *dev) | |
877 | { | |
878 | struct tbnet *net = netdev_priv(dev); | |
879 | struct tb_xdomain *xd = net->xd; | |
880 | u16 sof_mask, eof_mask; | |
881 | struct tb_ring *ring; | |
8bdc25cf | 882 | unsigned int flags; |
180b0689 | 883 | int hopid; |
e69b6c02 AL |
884 | |
885 | netif_carrier_off(dev); | |
886 | ||
887 | ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, | |
888 | RING_FLAG_FRAME); | |
889 | if (!ring) { | |
890 | netdev_err(dev, "failed to allocate Tx ring\n"); | |
891 | return -ENOMEM; | |
892 | } | |
893 | net->tx_ring.ring = ring; | |
894 | ||
180b0689 MW |
895 | hopid = tb_xdomain_alloc_out_hopid(xd, -1); |
896 | if (hopid < 0) { | |
897 | netdev_err(dev, "failed to allocate Tx HopID\n"); | |
898 | tb_ring_free(net->tx_ring.ring); | |
899 | net->tx_ring.ring = NULL; | |
900 | return hopid; | |
901 | } | |
902 | net->local_transmit_path = hopid; | |
903 | ||
e69b6c02 AL |
904 | sof_mask = BIT(TBIP_PDF_FRAME_START); |
905 | eof_mask = BIT(TBIP_PDF_FRAME_END); | |
906 | ||
8bdc25cf MW |
907 | flags = RING_FLAG_FRAME; |
908 | /* Only enable full E2E if the other end supports it too */ | |
909 | if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E) | |
910 | flags |= RING_FLAG_E2E; | |
911 | ||
912 | ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags, | |
913 | net->tx_ring.ring->hop, sof_mask, | |
914 | eof_mask, tbnet_start_poll, net); | |
e69b6c02 AL |
915 | if (!ring) { |
916 | netdev_err(dev, "failed to allocate Rx ring\n"); | |
ed14e590 | 917 | tb_xdomain_release_out_hopid(xd, hopid); |
e69b6c02 AL |
918 | tb_ring_free(net->tx_ring.ring); |
919 | net->tx_ring.ring = NULL; | |
920 | return -ENOMEM; | |
921 | } | |
922 | net->rx_ring.ring = ring; | |
923 | ||
924 | napi_enable(&net->napi); | |
925 | start_login(net); | |
926 | ||
927 | return 0; | |
928 | } | |
929 | ||
930 | static int tbnet_stop(struct net_device *dev) | |
931 | { | |
932 | struct tbnet *net = netdev_priv(dev); | |
933 | ||
934 | napi_disable(&net->napi); | |
935 | ||
027d351c | 936 | cancel_work_sync(&net->disconnect_work); |
e69b6c02 AL |
937 | tbnet_tear_down(net, true); |
938 | ||
939 | tb_ring_free(net->rx_ring.ring); | |
940 | net->rx_ring.ring = NULL; | |
180b0689 MW |
941 | |
942 | tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path); | |
e69b6c02 AL |
943 | tb_ring_free(net->tx_ring.ring); |
944 | net->tx_ring.ring = NULL; | |
945 | ||
946 | return 0; | |
947 | } | |
948 | ||
e69b6c02 AL |
949 | static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, |
950 | struct tbnet_frame **frames, u32 frame_count) | |
951 | { | |
952 | struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page); | |
953 | struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring); | |
954 | __wsum wsum = htonl(skb->len - skb_transport_offset(skb)); | |
955 | unsigned int i, len, offset = skb_transport_offset(skb); | |
956 | __be16 protocol = skb->protocol; | |
957 | void *data = skb->data; | |
958 | void *dest = hdr + 1; | |
959 | __sum16 *tucso; | |
960 | ||
961 | if (skb->ip_summed != CHECKSUM_PARTIAL) { | |
962 | /* No need to calculate checksum so we just update the | |
540c1115 | 963 | * total frame count and sync the frames for DMA. |
e69b6c02 AL |
964 | */ |
965 | for (i = 0; i < frame_count; i++) { | |
966 | hdr = page_address(frames[i]->page); | |
967 | hdr->frame_count = cpu_to_le32(frame_count); | |
540c1115 MW |
968 | dma_sync_single_for_device(dma_dev, |
969 | frames[i]->frame.buffer_phy, | |
970 | tbnet_frame_size(frames[i]), DMA_TO_DEVICE); | |
e69b6c02 AL |
971 | } |
972 | ||
973 | return true; | |
974 | } | |
975 | ||
976 | if (protocol == htons(ETH_P_8021Q)) { | |
977 | struct vlan_hdr *vhdr, vh; | |
978 | ||
979 | vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh); | |
980 | if (!vhdr) | |
981 | return false; | |
982 | ||
983 | protocol = vhdr->h_vlan_encapsulated_proto; | |
984 | } | |
985 | ||
986 | /* Data points on the beginning of packet. | |
987 | * Check is the checksum absolute place in the packet. | |
988 | * ipcso will update IP checksum. | |
989 | * tucso will update TCP/UPD checksum. | |
990 | */ | |
991 | if (protocol == htons(ETH_P_IP)) { | |
992 | __sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data); | |
993 | ||
994 | *ipcso = 0; | |
995 | *ipcso = ip_fast_csum(dest + skb_network_offset(skb), | |
996 | ip_hdr(skb)->ihl); | |
997 | ||
998 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | |
999 | tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); | |
1000 | else if (ip_hdr(skb)->protocol == IPPROTO_UDP) | |
1001 | tucso = dest + ((void *)&(udp_hdr(skb)->check) - data); | |
1002 | else | |
1003 | return false; | |
1004 | ||
1005 | *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | |
1006 | ip_hdr(skb)->daddr, 0, | |
1007 | ip_hdr(skb)->protocol, 0); | |
1008 | } else if (skb_is_gso_v6(skb)) { | |
1009 | tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); | |
1010 | *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
1011 | &ipv6_hdr(skb)->daddr, 0, | |
1012 | IPPROTO_TCP, 0); | |
1013 | return false; | |
1014 | } else if (protocol == htons(ETH_P_IPV6)) { | |
1015 | tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset; | |
1016 | *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
1017 | &ipv6_hdr(skb)->daddr, 0, | |
1018 | ipv6_hdr(skb)->nexthdr, 0); | |
1019 | } else { | |
1020 | return false; | |
1021 | } | |
1022 | ||
1023 | /* First frame was headers, rest of the frames contain data. | |
1024 | * Calculate checksum over each frame. | |
1025 | */ | |
1026 | for (i = 0; i < frame_count; i++) { | |
1027 | hdr = page_address(frames[i]->page); | |
1028 | dest = (void *)(hdr + 1) + offset; | |
1029 | len = le32_to_cpu(hdr->frame_size) - offset; | |
1030 | wsum = csum_partial(dest, len, wsum); | |
1031 | hdr->frame_count = cpu_to_le32(frame_count); | |
1032 | ||
1033 | offset = 0; | |
1034 | } | |
1035 | ||
1036 | *tucso = csum_fold(wsum); | |
1037 | ||
1038 | /* Checksum is finally calculated and we don't touch the memory | |
540c1115 | 1039 | * anymore, so DMA sync the frames now. |
e69b6c02 AL |
1040 | */ |
1041 | for (i = 0; i < frame_count; i++) { | |
540c1115 MW |
1042 | dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy, |
1043 | tbnet_frame_size(frames[i]), DMA_TO_DEVICE); | |
e69b6c02 AL |
1044 | } |
1045 | ||
1046 | return true; | |
e69b6c02 AL |
1047 | } |
1048 | ||
1049 | static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num, | |
1050 | unsigned int *len) | |
1051 | { | |
1052 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; | |
1053 | ||
1054 | *len = skb_frag_size(frag); | |
c3a8d375 | 1055 | return kmap_local_page(skb_frag_page(frag)) + skb_frag_off(frag); |
e69b6c02 AL |
1056 | } |
1057 | ||
1058 | static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb, | |
1059 | struct net_device *dev) | |
1060 | { | |
1061 | struct tbnet *net = netdev_priv(dev); | |
1062 | struct tbnet_frame *frames[MAX_SKB_FRAGS]; | |
1063 | u16 frame_id = atomic_read(&net->frame_id); | |
1064 | struct thunderbolt_ip_frame_header *hdr; | |
1065 | unsigned int len = skb_headlen(skb); | |
1066 | unsigned int data_len = skb->len; | |
1067 | unsigned int nframes, i; | |
1068 | unsigned int frag = 0; | |
1069 | void *src = skb->data; | |
1070 | u32 frame_index = 0; | |
1071 | bool unmap = false; | |
1072 | void *dest; | |
1073 | ||
1074 | nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE); | |
1075 | if (tbnet_available_buffers(&net->tx_ring) < nframes) { | |
1076 | netif_stop_queue(net->dev); | |
1077 | return NETDEV_TX_BUSY; | |
1078 | } | |
1079 | ||
1080 | frames[frame_index] = tbnet_get_tx_buffer(net); | |
1081 | if (!frames[frame_index]) | |
1082 | goto err_drop; | |
1083 | ||
1084 | hdr = page_address(frames[frame_index]->page); | |
1085 | dest = hdr + 1; | |
1086 | ||
1087 | /* If overall packet is bigger than the frame data size */ | |
1088 | while (data_len > TBNET_MAX_PAYLOAD_SIZE) { | |
1089 | unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE; | |
1090 | ||
1091 | hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE); | |
1092 | hdr->frame_index = cpu_to_le16(frame_index); | |
1093 | hdr->frame_id = cpu_to_le16(frame_id); | |
1094 | ||
1095 | do { | |
1096 | if (len > size_left) { | |
1097 | /* Copy data onto Tx buffer data with | |
1098 | * full frame size then break and go to | |
1099 | * next frame | |
1100 | */ | |
1101 | memcpy(dest, src, size_left); | |
1102 | len -= size_left; | |
1103 | dest += size_left; | |
1104 | src += size_left; | |
1105 | break; | |
1106 | } | |
1107 | ||
1108 | memcpy(dest, src, len); | |
1109 | size_left -= len; | |
1110 | dest += len; | |
1111 | ||
1112 | if (unmap) { | |
c3a8d375 | 1113 | kunmap_local(src); |
e69b6c02 AL |
1114 | unmap = false; |
1115 | } | |
1116 | ||
1117 | /* Ensure all fragments have been processed */ | |
1118 | if (frag < skb_shinfo(skb)->nr_frags) { | |
1119 | /* Map and then unmap quickly */ | |
1120 | src = tbnet_kmap_frag(skb, frag++, &len); | |
1121 | unmap = true; | |
1122 | } else if (unlikely(size_left > 0)) { | |
1123 | goto err_drop; | |
1124 | } | |
1125 | } while (size_left > 0); | |
1126 | ||
1127 | data_len -= TBNET_MAX_PAYLOAD_SIZE; | |
1128 | frame_index++; | |
1129 | ||
1130 | frames[frame_index] = tbnet_get_tx_buffer(net); | |
1131 | if (!frames[frame_index]) | |
1132 | goto err_drop; | |
1133 | ||
1134 | hdr = page_address(frames[frame_index]->page); | |
1135 | dest = hdr + 1; | |
1136 | } | |
1137 | ||
1138 | hdr->frame_size = cpu_to_le32(data_len); | |
1139 | hdr->frame_index = cpu_to_le16(frame_index); | |
1140 | hdr->frame_id = cpu_to_le16(frame_id); | |
1141 | ||
1142 | frames[frame_index]->frame.size = data_len + sizeof(*hdr); | |
1143 | ||
1144 | /* In case the remaining data_len is smaller than a frame */ | |
1145 | while (len < data_len) { | |
1146 | memcpy(dest, src, len); | |
1147 | data_len -= len; | |
1148 | dest += len; | |
1149 | ||
1150 | if (unmap) { | |
c3a8d375 | 1151 | kunmap_local(src); |
e69b6c02 AL |
1152 | unmap = false; |
1153 | } | |
1154 | ||
1155 | if (frag < skb_shinfo(skb)->nr_frags) { | |
1156 | src = tbnet_kmap_frag(skb, frag++, &len); | |
1157 | unmap = true; | |
1158 | } else if (unlikely(data_len > 0)) { | |
1159 | goto err_drop; | |
1160 | } | |
1161 | } | |
1162 | ||
1163 | memcpy(dest, src, data_len); | |
1164 | ||
1165 | if (unmap) | |
c3a8d375 | 1166 | kunmap_local(src); |
e69b6c02 AL |
1167 | |
1168 | if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1)) | |
1169 | goto err_drop; | |
1170 | ||
1171 | for (i = 0; i < frame_index + 1; i++) | |
1172 | tb_ring_tx(net->tx_ring.ring, &frames[i]->frame); | |
1173 | ||
1174 | if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID) | |
1175 | atomic_inc(&net->frame_id); | |
1176 | ||
1177 | net->stats.tx_packets++; | |
1178 | net->stats.tx_bytes += skb->len; | |
1179 | ||
1180 | dev_consume_skb_any(skb); | |
1181 | ||
1182 | return NETDEV_TX_OK; | |
1183 | ||
1184 | err_drop: | |
1185 | /* We can re-use the buffers */ | |
1186 | net->tx_ring.cons -= frame_index; | |
1187 | ||
1188 | dev_kfree_skb_any(skb); | |
1189 | net->stats.tx_errors++; | |
1190 | ||
1191 | return NETDEV_TX_OK; | |
1192 | } | |
1193 | ||
1194 | static void tbnet_get_stats64(struct net_device *dev, | |
1195 | struct rtnl_link_stats64 *stats) | |
1196 | { | |
1197 | struct tbnet *net = netdev_priv(dev); | |
1198 | ||
1199 | stats->tx_packets = net->stats.tx_packets; | |
1200 | stats->rx_packets = net->stats.rx_packets; | |
1201 | stats->tx_bytes = net->stats.tx_bytes; | |
1202 | stats->rx_bytes = net->stats.rx_bytes; | |
1203 | stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors + | |
1204 | net->stats.rx_over_errors + net->stats.rx_crc_errors + | |
1205 | net->stats.rx_missed_errors; | |
1206 | stats->tx_errors = net->stats.tx_errors; | |
1207 | stats->rx_length_errors = net->stats.rx_length_errors; | |
1208 | stats->rx_over_errors = net->stats.rx_over_errors; | |
1209 | stats->rx_crc_errors = net->stats.rx_crc_errors; | |
1210 | stats->rx_missed_errors = net->stats.rx_missed_errors; | |
1211 | } | |
1212 | ||
1213 | static const struct net_device_ops tbnet_netdev_ops = { | |
1214 | .ndo_open = tbnet_open, | |
1215 | .ndo_stop = tbnet_stop, | |
1216 | .ndo_start_xmit = tbnet_start_xmit, | |
1217 | .ndo_get_stats64 = tbnet_get_stats64, | |
1218 | }; | |
1219 | ||
1220 | static void tbnet_generate_mac(struct net_device *dev) | |
1221 | { | |
1222 | const struct tbnet *net = netdev_priv(dev); | |
1223 | const struct tb_xdomain *xd = net->xd; | |
5a48585d | 1224 | u8 addr[ETH_ALEN]; |
e69b6c02 AL |
1225 | u8 phy_port; |
1226 | u32 hash; | |
1227 | ||
1228 | phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route)); | |
1229 | ||
1230 | /* Unicast and locally administered MAC */ | |
5a48585d | 1231 | addr[0] = phy_port << 4 | 0x02; |
e69b6c02 | 1232 | hash = jhash2((u32 *)xd->local_uuid, 4, 0); |
5a48585d | 1233 | memcpy(addr + 1, &hash, sizeof(hash)); |
e69b6c02 | 1234 | hash = jhash2((u32 *)xd->local_uuid, 4, hash); |
5a48585d JK |
1235 | addr[5] = hash & 0xff; |
1236 | eth_hw_addr_set(dev, addr); | |
e69b6c02 AL |
1237 | } |
1238 | ||
1239 | static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id) | |
1240 | { | |
1241 | struct tb_xdomain *xd = tb_service_parent(svc); | |
1242 | struct net_device *dev; | |
1243 | struct tbnet *net; | |
1244 | int ret; | |
1245 | ||
1246 | dev = alloc_etherdev(sizeof(*net)); | |
1247 | if (!dev) | |
1248 | return -ENOMEM; | |
1249 | ||
1250 | SET_NETDEV_DEV(dev, &svc->dev); | |
1251 | ||
1252 | net = netdev_priv(dev); | |
1253 | INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); | |
1254 | INIT_WORK(&net->connected_work, tbnet_connected_work); | |
027d351c | 1255 | INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); |
e69b6c02 AL |
1256 | mutex_init(&net->connection_lock); |
1257 | atomic_set(&net->command_id, 0); | |
1258 | atomic_set(&net->frame_id, 0); | |
1259 | net->svc = svc; | |
1260 | net->dev = dev; | |
1261 | net->xd = xd; | |
1262 | ||
1263 | tbnet_generate_mac(dev); | |
1264 | ||
1265 | strcpy(dev->name, "thunderbolt%d"); | |
1266 | dev->netdev_ops = &tbnet_netdev_ops; | |
1267 | ||
1268 | /* ThunderboltIP takes advantage of TSO packets but instead of | |
1269 | * segmenting them we just split the packet into Thunderbolt | |
1270 | * frames (maximum payload size of each frame is 4084 bytes) and | |
1271 | * calculate checksum over the whole packet here. | |
1272 | * | |
1273 | * The receiving side does the opposite if the host OS supports | |
1274 | * LRO, otherwise it needs to split the large packet into MTU | |
1275 | * sized smaller packets. | |
1276 | * | |
1277 | * In order to receive large packets from the networking stack, | |
1278 | * we need to announce support for most of the offloading | |
1279 | * features here. | |
1280 | */ | |
1281 | dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO | | |
1282 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | |
1283 | dev->features = dev->hw_features | NETIF_F_HIGHDMA; | |
1284 | dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header); | |
1285 | ||
b48b89f9 | 1286 | netif_napi_add(dev, &net->napi, tbnet_poll); |
e69b6c02 AL |
1287 | |
1288 | /* MTU range: 68 - 65522 */ | |
1289 | dev->min_mtu = ETH_MIN_MTU; | |
1290 | dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN; | |
1291 | ||
1292 | net->handler.uuid = &tbnet_svc_uuid; | |
201d126a | 1293 | net->handler.callback = tbnet_handle_packet; |
e69b6c02 AL |
1294 | net->handler.data = net; |
1295 | tb_register_protocol_handler(&net->handler); | |
1296 | ||
1297 | tb_service_set_drvdata(svc, net); | |
1298 | ||
1299 | ret = register_netdev(dev); | |
1300 | if (ret) { | |
1301 | tb_unregister_protocol_handler(&net->handler); | |
1302 | free_netdev(dev); | |
1303 | return ret; | |
1304 | } | |
1305 | ||
1306 | return 0; | |
1307 | } | |
1308 | ||
1309 | static void tbnet_remove(struct tb_service *svc) | |
1310 | { | |
1311 | struct tbnet *net = tb_service_get_drvdata(svc); | |
1312 | ||
1313 | unregister_netdev(net->dev); | |
1314 | tb_unregister_protocol_handler(&net->handler); | |
1315 | free_netdev(net->dev); | |
1316 | } | |
1317 | ||
1318 | static void tbnet_shutdown(struct tb_service *svc) | |
1319 | { | |
1320 | tbnet_tear_down(tb_service_get_drvdata(svc), true); | |
1321 | } | |
1322 | ||
0bbe50f3 | 1323 | static int tbnet_suspend(struct device *dev) |
e69b6c02 AL |
1324 | { |
1325 | struct tb_service *svc = tb_to_service(dev); | |
1326 | struct tbnet *net = tb_service_get_drvdata(svc); | |
1327 | ||
1328 | stop_login(net); | |
1329 | if (netif_running(net->dev)) { | |
1330 | netif_device_detach(net->dev); | |
8e021a14 | 1331 | tbnet_tear_down(net, true); |
e69b6c02 AL |
1332 | } |
1333 | ||
9872760e | 1334 | tb_unregister_protocol_handler(&net->handler); |
e69b6c02 AL |
1335 | return 0; |
1336 | } | |
1337 | ||
0bbe50f3 | 1338 | static int tbnet_resume(struct device *dev) |
e69b6c02 AL |
1339 | { |
1340 | struct tb_service *svc = tb_to_service(dev); | |
1341 | struct tbnet *net = tb_service_get_drvdata(svc); | |
1342 | ||
9872760e MW |
1343 | tb_register_protocol_handler(&net->handler); |
1344 | ||
e69b6c02 AL |
1345 | netif_carrier_off(net->dev); |
1346 | if (netif_running(net->dev)) { | |
1347 | netif_device_attach(net->dev); | |
1348 | start_login(net); | |
1349 | } | |
1350 | ||
1351 | return 0; | |
1352 | } | |
1353 | ||
0bbe50f3 | 1354 | static DEFINE_SIMPLE_DEV_PM_OPS(tbnet_pm_ops, tbnet_suspend, tbnet_resume); |
e69b6c02 AL |
1355 | |
1356 | static const struct tb_service_id tbnet_ids[] = { | |
1357 | { TB_SERVICE("network", 1) }, | |
1358 | { }, | |
1359 | }; | |
1360 | MODULE_DEVICE_TABLE(tbsvc, tbnet_ids); | |
1361 | ||
1362 | static struct tb_service_driver tbnet_driver = { | |
1363 | .driver = { | |
1364 | .owner = THIS_MODULE, | |
1365 | .name = "thunderbolt-net", | |
0bbe50f3 | 1366 | .pm = pm_sleep_ptr(&tbnet_pm_ops), |
e69b6c02 AL |
1367 | }, |
1368 | .probe = tbnet_probe, | |
1369 | .remove = tbnet_remove, | |
1370 | .shutdown = tbnet_shutdown, | |
1371 | .id_table = tbnet_ids, | |
1372 | }; | |
1373 | ||
1374 | static int __init tbnet_init(void) | |
1375 | { | |
8bdc25cf | 1376 | unsigned int flags; |
e69b6c02 AL |
1377 | int ret; |
1378 | ||
1379 | tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid); | |
1380 | if (!tbnet_dir) | |
1381 | return -ENOMEM; | |
1382 | ||
1383 | tb_property_add_immediate(tbnet_dir, "prtcid", 1); | |
1384 | tb_property_add_immediate(tbnet_dir, "prtcvers", 1); | |
1385 | tb_property_add_immediate(tbnet_dir, "prtcrevs", 1); | |
8bdc25cf MW |
1386 | |
1387 | flags = TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES; | |
1388 | if (tbnet_e2e) | |
1389 | flags |= TBNET_E2E; | |
1390 | tb_property_add_immediate(tbnet_dir, "prtcstns", flags); | |
e69b6c02 AL |
1391 | |
1392 | ret = tb_register_property_dir("network", tbnet_dir); | |
f524b728 YC |
1393 | if (ret) |
1394 | goto err_free_dir; | |
1395 | ||
1396 | ret = tb_register_service_driver(&tbnet_driver); | |
1397 | if (ret) | |
1398 | goto err_unregister; | |
e69b6c02 | 1399 | |
f524b728 YC |
1400 | return 0; |
1401 | ||
1402 | err_unregister: | |
1403 | tb_unregister_property_dir("network", tbnet_dir); | |
1404 | err_free_dir: | |
1405 | tb_property_free_dir(tbnet_dir); | |
1406 | ||
1407 | return ret; | |
e69b6c02 AL |
1408 | } |
1409 | module_init(tbnet_init); | |
1410 | ||
1411 | static void __exit tbnet_exit(void) | |
1412 | { | |
1413 | tb_unregister_service_driver(&tbnet_driver); | |
1414 | tb_unregister_property_dir("network", tbnet_dir); | |
1415 | tb_property_free_dir(tbnet_dir); | |
1416 | } | |
1417 | module_exit(tbnet_exit); | |
1418 | ||
1419 | MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>"); | |
1420 | MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>"); | |
1421 | MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); | |
e550ed4b | 1422 | MODULE_DESCRIPTION("Thunderbolt/USB4 network driver"); |
e69b6c02 | 1423 | MODULE_LICENSE("GPL v2"); |