Commit | Line | Data |
---|---|---|
0e3d6777 | 1 | // SPDX-License-Identifier: ISC |
b40b15e1 LB |
2 | /* |
3 | * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> | |
b40b15e1 LB |
4 | */ |
5 | ||
db6bb5c6 | 6 | #include <linux/module.h> |
b40b15e1 LB |
7 | #include "mt76.h" |
8 | #include "usb_trace.h" | |
9 | #include "dma.h" | |
10 | ||
11 | #define MT_VEND_REQ_MAX_RETRY 10 | |
12 | #define MT_VEND_REQ_TOUT_MS 300 | |
13 | ||
c2908a0d LB |
14 | static bool disable_usb_sg; |
15 | module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644); | |
16 | MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support"); | |
17 | ||
b40b15e1 LB |
18 | static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, |
19 | u8 req_type, u16 val, u16 offset, | |
20 | void *buf, size_t len) | |
21 | { | |
80df01f4 LB |
22 | struct usb_interface *uintf = to_usb_interface(dev->dev); |
23 | struct usb_device *udev = interface_to_usbdev(uintf); | |
b40b15e1 LB |
24 | unsigned int pipe; |
25 | int i, ret; | |
26 | ||
af3076db LB |
27 | lockdep_assert_held(&dev->usb.usb_ctrl_mtx); |
28 | ||
b40b15e1 LB |
29 | pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0) |
30 | : usb_sndctrlpipe(udev, 0); | |
31 | for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { | |
32 | if (test_bit(MT76_REMOVED, &dev->state)) | |
33 | return -EIO; | |
34 | ||
35 | ret = usb_control_msg(udev, pipe, req, req_type, val, | |
36 | offset, buf, len, MT_VEND_REQ_TOUT_MS); | |
37 | if (ret == -ENODEV) | |
38 | set_bit(MT76_REMOVED, &dev->state); | |
39 | if (ret >= 0 || ret == -ENODEV) | |
40 | return ret; | |
41 | usleep_range(5000, 10000); | |
42 | } | |
43 | ||
44 | dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n", | |
45 | req, offset, ret); | |
46 | return ret; | |
47 | } | |
48 | ||
49 | int mt76u_vendor_request(struct mt76_dev *dev, u8 req, | |
50 | u8 req_type, u16 val, u16 offset, | |
51 | void *buf, size_t len) | |
52 | { | |
53 | int ret; | |
54 | ||
55 | mutex_lock(&dev->usb.usb_ctrl_mtx); | |
56 | ret = __mt76u_vendor_request(dev, req, req_type, | |
57 | val, offset, buf, len); | |
58 | trace_usb_reg_wr(dev, offset, val); | |
59 | mutex_unlock(&dev->usb.usb_ctrl_mtx); | |
60 | ||
61 | return ret; | |
62 | } | |
63 | EXPORT_SYMBOL_GPL(mt76u_vendor_request); | |
64 | ||
b40b15e1 LB |
65 | static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr) |
66 | { | |
67 | struct mt76_usb *usb = &dev->usb; | |
68 | u32 data = ~0; | |
69 | u16 offset; | |
70 | int ret; | |
71 | u8 req; | |
72 | ||
73 | switch (addr & MT_VEND_TYPE_MASK) { | |
74 | case MT_VEND_TYPE_EEPROM: | |
75 | req = MT_VEND_READ_EEPROM; | |
76 | break; | |
77 | case MT_VEND_TYPE_CFG: | |
78 | req = MT_VEND_READ_CFG; | |
79 | break; | |
80 | default: | |
81 | req = MT_VEND_MULTI_READ; | |
82 | break; | |
83 | } | |
84 | offset = addr & ~MT_VEND_TYPE_MASK; | |
85 | ||
86 | ret = __mt76u_vendor_request(dev, req, | |
87 | USB_DIR_IN | USB_TYPE_VENDOR, | |
8f72e98e | 88 | 0, offset, &usb->reg_val, sizeof(__le32)); |
b40b15e1 | 89 | if (ret == sizeof(__le32)) |
8f72e98e | 90 | data = le32_to_cpu(usb->reg_val); |
b40b15e1 LB |
91 | trace_usb_reg_rr(dev, addr, data); |
92 | ||
93 | return data; | |
94 | } | |
95 | ||
5567b373 | 96 | static u32 mt76u_rr(struct mt76_dev *dev, u32 addr) |
b40b15e1 LB |
97 | { |
98 | u32 ret; | |
99 | ||
100 | mutex_lock(&dev->usb.usb_ctrl_mtx); | |
101 | ret = __mt76u_rr(dev, addr); | |
102 | mutex_unlock(&dev->usb.usb_ctrl_mtx); | |
103 | ||
104 | return ret; | |
105 | } | |
106 | ||
b40b15e1 LB |
107 | static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) |
108 | { | |
109 | struct mt76_usb *usb = &dev->usb; | |
110 | u16 offset; | |
111 | u8 req; | |
112 | ||
113 | switch (addr & MT_VEND_TYPE_MASK) { | |
114 | case MT_VEND_TYPE_CFG: | |
115 | req = MT_VEND_WRITE_CFG; | |
116 | break; | |
117 | default: | |
118 | req = MT_VEND_MULTI_WRITE; | |
119 | break; | |
120 | } | |
121 | offset = addr & ~MT_VEND_TYPE_MASK; | |
122 | ||
8f72e98e | 123 | usb->reg_val = cpu_to_le32(val); |
b40b15e1 LB |
124 | __mt76u_vendor_request(dev, req, |
125 | USB_DIR_OUT | USB_TYPE_VENDOR, 0, | |
8f72e98e | 126 | offset, &usb->reg_val, sizeof(__le32)); |
b40b15e1 LB |
127 | trace_usb_reg_wr(dev, addr, val); |
128 | } | |
129 | ||
5567b373 | 130 | static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) |
b40b15e1 LB |
131 | { |
132 | mutex_lock(&dev->usb.usb_ctrl_mtx); | |
133 | __mt76u_wr(dev, addr, val); | |
134 | mutex_unlock(&dev->usb.usb_ctrl_mtx); | |
135 | } | |
136 | ||
137 | static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr, | |
138 | u32 mask, u32 val) | |
139 | { | |
140 | mutex_lock(&dev->usb.usb_ctrl_mtx); | |
141 | val |= __mt76u_rr(dev, addr) & ~mask; | |
142 | __mt76u_wr(dev, addr, val); | |
143 | mutex_unlock(&dev->usb.usb_ctrl_mtx); | |
144 | ||
145 | return val; | |
146 | } | |
147 | ||
148 | static void mt76u_copy(struct mt76_dev *dev, u32 offset, | |
149 | const void *data, int len) | |
150 | { | |
151 | struct mt76_usb *usb = &dev->usb; | |
152 | const u32 *val = data; | |
153 | int i, ret; | |
154 | ||
155 | mutex_lock(&usb->usb_ctrl_mtx); | |
850e8f6f | 156 | for (i = 0; i < DIV_ROUND_UP(len, 4); i++) { |
13381dcd | 157 | put_unaligned(val[i], (u32 *)usb->data); |
b40b15e1 LB |
158 | ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE, |
159 | USB_DIR_OUT | USB_TYPE_VENDOR, | |
160 | 0, offset + i * 4, usb->data, | |
b229bf7d | 161 | sizeof(u32)); |
b40b15e1 LB |
162 | if (ret < 0) |
163 | break; | |
164 | } | |
165 | mutex_unlock(&usb->usb_ctrl_mtx); | |
166 | } | |
167 | ||
168 | void mt76u_single_wr(struct mt76_dev *dev, const u8 req, | |
169 | const u16 offset, const u32 val) | |
170 | { | |
171 | mutex_lock(&dev->usb.usb_ctrl_mtx); | |
172 | __mt76u_vendor_request(dev, req, | |
173 | USB_DIR_OUT | USB_TYPE_VENDOR, | |
174 | val & 0xffff, offset, NULL, 0); | |
175 | __mt76u_vendor_request(dev, req, | |
176 | USB_DIR_OUT | USB_TYPE_VENDOR, | |
177 | val >> 16, offset + 2, NULL, 0); | |
178 | mutex_unlock(&dev->usb.usb_ctrl_mtx); | |
179 | } | |
180 | EXPORT_SYMBOL_GPL(mt76u_single_wr); | |
181 | ||
f1638c7c SG |
182 | static int |
183 | mt76u_req_wr_rp(struct mt76_dev *dev, u32 base, | |
184 | const struct mt76_reg_pair *data, int len) | |
185 | { | |
186 | struct mt76_usb *usb = &dev->usb; | |
187 | ||
188 | mutex_lock(&usb->usb_ctrl_mtx); | |
189 | while (len > 0) { | |
190 | __mt76u_wr(dev, base + data->reg, data->value); | |
191 | len--; | |
192 | data++; | |
193 | } | |
194 | mutex_unlock(&usb->usb_ctrl_mtx); | |
195 | ||
196 | return 0; | |
197 | } | |
198 | ||
17507157 LB |
199 | static int |
200 | mt76u_wr_rp(struct mt76_dev *dev, u32 base, | |
201 | const struct mt76_reg_pair *data, int n) | |
f1638c7c SG |
202 | { |
203 | if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state)) | |
17507157 | 204 | return dev->mcu_ops->mcu_wr_rp(dev, base, data, n); |
f1638c7c SG |
205 | else |
206 | return mt76u_req_wr_rp(dev, base, data, n); | |
207 | } | |
f1638c7c SG |
208 | |
209 | static int | |
210 | mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, | |
211 | int len) | |
212 | { | |
213 | struct mt76_usb *usb = &dev->usb; | |
214 | ||
215 | mutex_lock(&usb->usb_ctrl_mtx); | |
216 | while (len > 0) { | |
217 | data->value = __mt76u_rr(dev, base + data->reg); | |
218 | len--; | |
219 | data++; | |
220 | } | |
221 | mutex_unlock(&usb->usb_ctrl_mtx); | |
222 | ||
223 | return 0; | |
224 | } | |
225 | ||
17507157 LB |
226 | static int |
227 | mt76u_rd_rp(struct mt76_dev *dev, u32 base, | |
228 | struct mt76_reg_pair *data, int n) | |
f1638c7c SG |
229 | { |
230 | if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state)) | |
17507157 | 231 | return dev->mcu_ops->mcu_rd_rp(dev, base, data, n); |
f1638c7c SG |
232 | else |
233 | return mt76u_req_rd_rp(dev, base, data, n); | |
234 | } | |
f1638c7c | 235 | |
63a7de5d LB |
236 | static bool mt76u_check_sg(struct mt76_dev *dev) |
237 | { | |
80df01f4 LB |
238 | struct usb_interface *uintf = to_usb_interface(dev->dev); |
239 | struct usb_device *udev = interface_to_usbdev(uintf); | |
63a7de5d | 240 | |
c2908a0d | 241 | return (!disable_usb_sg && udev->bus->sg_tablesize > 0 && |
63a7de5d LB |
242 | (udev->bus->no_sg_constraint || |
243 | udev->speed == USB_SPEED_WIRELESS)); | |
244 | } | |
245 | ||
b40b15e1 LB |
246 | static int |
247 | mt76u_set_endpoints(struct usb_interface *intf, | |
248 | struct mt76_usb *usb) | |
249 | { | |
250 | struct usb_host_interface *intf_desc = intf->cur_altsetting; | |
251 | struct usb_endpoint_descriptor *ep_desc; | |
252 | int i, in_ep = 0, out_ep = 0; | |
253 | ||
254 | for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { | |
255 | ep_desc = &intf_desc->endpoint[i].desc; | |
256 | ||
257 | if (usb_endpoint_is_bulk_in(ep_desc) && | |
258 | in_ep < __MT_EP_IN_MAX) { | |
259 | usb->in_ep[in_ep] = usb_endpoint_num(ep_desc); | |
b40b15e1 LB |
260 | in_ep++; |
261 | } else if (usb_endpoint_is_bulk_out(ep_desc) && | |
262 | out_ep < __MT_EP_OUT_MAX) { | |
263 | usb->out_ep[out_ep] = usb_endpoint_num(ep_desc); | |
b40b15e1 LB |
264 | out_ep++; |
265 | } | |
266 | } | |
267 | ||
268 | if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX) | |
269 | return -EINVAL; | |
270 | return 0; | |
271 | } | |
272 | ||
273 | static int | |
1bb78d38 SG |
274 | mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, |
275 | int nsgs, gfp_t gfp) | |
b40b15e1 | 276 | { |
b40b15e1 LB |
277 | int i; |
278 | ||
279 | for (i = 0; i < nsgs; i++) { | |
280 | struct page *page; | |
281 | void *data; | |
282 | int offset; | |
283 | ||
92724071 | 284 | data = page_frag_alloc(&q->rx_page, q->buf_size, gfp); |
b40b15e1 LB |
285 | if (!data) |
286 | break; | |
287 | ||
288 | page = virt_to_head_page(data); | |
289 | offset = data - page_address(page); | |
f8f527b1 | 290 | sg_set_page(&urb->sg[i], page, q->buf_size, offset); |
b40b15e1 LB |
291 | } |
292 | ||
293 | if (i < nsgs) { | |
294 | int j; | |
295 | ||
296 | for (j = nsgs; j < urb->num_sgs; j++) | |
297 | skb_free_frag(sg_virt(&urb->sg[j])); | |
298 | urb->num_sgs = i; | |
299 | } | |
300 | ||
301 | urb->num_sgs = max_t(int, i, urb->num_sgs); | |
cf211051 | 302 | urb->transfer_buffer_length = urb->num_sgs * q->buf_size; |
b40b15e1 LB |
303 | sg_init_marker(urb->sg, urb->num_sgs); |
304 | ||
305 | return i ? : -ENOMEM; | |
306 | } | |
307 | ||
d704d16f | 308 | static int |
1bb78d38 | 309 | mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp) |
b40b15e1 | 310 | { |
1bb78d38 SG |
311 | struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; |
312 | ||
13381dcd | 313 | if (dev->usb.sg_en) |
1bb78d38 | 314 | return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp); |
13381dcd RL |
315 | |
316 | urb->transfer_buffer_length = q->buf_size; | |
317 | urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp); | |
318 | ||
319 | return urb->transfer_buffer ? 0 : -ENOMEM; | |
b40b15e1 | 320 | } |
b40b15e1 | 321 | |
888199b8 | 322 | static int |
14663f0c LB |
323 | mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e, |
324 | int sg_max_size) | |
d704d16f | 325 | { |
85d2955e | 326 | unsigned int size = sizeof(struct urb); |
d704d16f | 327 | |
85d2955e | 328 | if (dev->usb.sg_en) |
14663f0c | 329 | size += sg_max_size * sizeof(struct scatterlist); |
85d2955e SG |
330 | |
331 | e->urb = kzalloc(size, GFP_KERNEL); | |
332 | if (!e->urb) | |
d704d16f LB |
333 | return -ENOMEM; |
334 | ||
85d2955e SG |
335 | usb_init_urb(e->urb); |
336 | ||
337 | if (dev->usb.sg_en) | |
338 | e->urb->sg = (struct scatterlist *)(e->urb + 1); | |
d704d16f | 339 | |
48f5a90c SG |
340 | return 0; |
341 | } | |
342 | ||
343 | static int | |
344 | mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e) | |
345 | { | |
346 | int err; | |
347 | ||
14663f0c | 348 | err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE); |
48f5a90c SG |
349 | if (err) |
350 | return err; | |
351 | ||
14663f0c LB |
352 | return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE, |
353 | GFP_KERNEL); | |
d704d16f LB |
354 | } |
355 | ||
d7d4ea9a | 356 | static void mt76u_urb_free(struct urb *urb) |
b40b15e1 | 357 | { |
b40b15e1 LB |
358 | int i; |
359 | ||
cb1847cc LB |
360 | for (i = 0; i < urb->num_sgs; i++) |
361 | skb_free_frag(sg_virt(&urb->sg[i])); | |
cb83585e | 362 | |
26031b39 SG |
363 | if (urb->transfer_buffer) |
364 | skb_free_frag(urb->transfer_buffer); | |
d704d16f | 365 | |
d7d4ea9a | 366 | usb_free_urb(urb); |
b40b15e1 | 367 | } |
b40b15e1 | 368 | |
4de92bf1 LB |
369 | static void |
370 | mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, | |
d7d4ea9a | 371 | struct urb *urb, usb_complete_t complete_fn, |
4de92bf1 | 372 | void *context) |
b40b15e1 | 373 | { |
80df01f4 LB |
374 | struct usb_interface *uintf = to_usb_interface(dev->dev); |
375 | struct usb_device *udev = interface_to_usbdev(uintf); | |
b40b15e1 LB |
376 | unsigned int pipe; |
377 | ||
378 | if (dir == USB_DIR_IN) | |
379 | pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]); | |
380 | else | |
381 | pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]); | |
382 | ||
d7d4ea9a SG |
383 | urb->dev = udev; |
384 | urb->pipe = pipe; | |
385 | urb->complete = complete_fn; | |
386 | urb->context = context; | |
4de92bf1 LB |
387 | } |
388 | ||
d7d4ea9a | 389 | static inline struct urb * |
e5fc742f | 390 | mt76u_get_next_rx_entry(struct mt76_dev *dev) |
b40b15e1 | 391 | { |
e5fc742f | 392 | struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; |
d7d4ea9a | 393 | struct urb *urb = NULL; |
b40b15e1 LB |
394 | unsigned long flags; |
395 | ||
396 | spin_lock_irqsave(&q->lock, flags); | |
397 | if (q->queued > 0) { | |
d7d4ea9a | 398 | urb = q->entry[q->head].urb; |
b40b15e1 LB |
399 | q->head = (q->head + 1) % q->ndesc; |
400 | q->queued--; | |
401 | } | |
402 | spin_unlock_irqrestore(&q->lock, flags); | |
403 | ||
d7d4ea9a | 404 | return urb; |
b40b15e1 LB |
405 | } |
406 | ||
407 | static int mt76u_get_rx_entry_len(u8 *data, u32 data_len) | |
408 | { | |
409 | u16 dma_len, min_len; | |
410 | ||
411 | dma_len = get_unaligned_le16(data); | |
412 | min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + | |
413 | MT_FCE_INFO_LEN; | |
414 | ||
09dbcd8b LB |
415 | if (data_len < min_len || !dma_len || |
416 | dma_len + MT_DMA_HDR_LEN > data_len || | |
417 | (dma_len & 0x3)) | |
b40b15e1 LB |
418 | return -EINVAL; |
419 | return dma_len; | |
420 | } | |
421 | ||
2a92b08b LB |
422 | static struct sk_buff * |
423 | mt76u_build_rx_skb(void *data, int len, int buf_size) | |
424 | { | |
425 | struct sk_buff *skb; | |
426 | ||
427 | if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) { | |
428 | struct page *page; | |
429 | ||
430 | /* slow path, not enough space for data and | |
431 | * skb_shared_info | |
432 | */ | |
433 | skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC); | |
434 | if (!skb) | |
435 | return NULL; | |
436 | ||
437 | skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN); | |
438 | data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN); | |
439 | page = virt_to_head_page(data); | |
440 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | |
441 | page, data - page_address(page), | |
442 | len - MT_SKB_HEAD_LEN, buf_size); | |
443 | ||
444 | return skb; | |
445 | } | |
446 | ||
447 | /* fast path */ | |
448 | skb = build_skb(data, buf_size); | |
449 | if (!skb) | |
450 | return NULL; | |
451 | ||
452 | skb_reserve(skb, MT_DMA_HDR_LEN); | |
453 | __skb_put(skb, len); | |
454 | ||
455 | return skb; | |
456 | } | |
457 | ||
b40b15e1 | 458 | static int |
d7d4ea9a | 459 | mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb) |
b40b15e1 LB |
460 | { |
461 | struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; | |
26031b39 SG |
462 | u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer; |
463 | int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length; | |
464 | int len, nsgs = 1; | |
b40b15e1 LB |
465 | struct sk_buff *skb; |
466 | ||
467 | if (!test_bit(MT76_STATE_INITIALIZED, &dev->state)) | |
468 | return 0; | |
469 | ||
470 | len = mt76u_get_rx_entry_len(data, urb->actual_length); | |
471 | if (len < 0) | |
472 | return 0; | |
473 | ||
f7522949 | 474 | data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN); |
2a92b08b | 475 | skb = mt76u_build_rx_skb(data, data_len, q->buf_size); |
b40b15e1 LB |
476 | if (!skb) |
477 | return 0; | |
478 | ||
b40b15e1 | 479 | len -= data_len; |
200abe6a | 480 | while (len > 0 && nsgs < urb->num_sgs) { |
b40b15e1 LB |
481 | data_len = min_t(int, len, urb->sg[nsgs].length); |
482 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | |
483 | sg_page(&urb->sg[nsgs]), | |
484 | urb->sg[nsgs].offset, | |
485 | data_len, q->buf_size); | |
486 | len -= data_len; | |
487 | nsgs++; | |
488 | } | |
489 | dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb); | |
490 | ||
491 | return nsgs; | |
492 | } | |
493 | ||
494 | static void mt76u_complete_rx(struct urb *urb) | |
495 | { | |
496 | struct mt76_dev *dev = urb->context; | |
497 | struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; | |
498 | unsigned long flags; | |
499 | ||
e0168dc6 LB |
500 | trace_rx_urb(dev, urb); |
501 | ||
b40b15e1 LB |
502 | switch (urb->status) { |
503 | case -ECONNRESET: | |
504 | case -ESHUTDOWN: | |
505 | case -ENOENT: | |
506 | return; | |
507 | default: | |
4bfff1ec LB |
508 | dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", |
509 | urb->status); | |
b40b15e1 LB |
510 | /* fall through */ |
511 | case 0: | |
512 | break; | |
513 | } | |
514 | ||
515 | spin_lock_irqsave(&q->lock, flags); | |
d7d4ea9a | 516 | if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch")) |
b40b15e1 LB |
517 | goto out; |
518 | ||
519 | q->tail = (q->tail + 1) % q->ndesc; | |
520 | q->queued++; | |
521 | tasklet_schedule(&dev->usb.rx_tasklet); | |
522 | out: | |
523 | spin_unlock_irqrestore(&q->lock, flags); | |
524 | } | |
525 | ||
a5ba16eb | 526 | static int |
d7d4ea9a | 527 | mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb) |
a5ba16eb | 528 | { |
d7d4ea9a | 529 | mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb, |
a5ba16eb | 530 | mt76u_complete_rx, dev); |
d7d4ea9a | 531 | trace_submit_urb(dev, urb); |
a5ba16eb | 532 | |
d7d4ea9a | 533 | return usb_submit_urb(urb, GFP_ATOMIC); |
a5ba16eb SG |
534 | } |
535 | ||
b40b15e1 LB |
536 | static void mt76u_rx_tasklet(unsigned long data) |
537 | { | |
538 | struct mt76_dev *dev = (struct mt76_dev *)data; | |
d7d4ea9a | 539 | struct urb *urb; |
f7522949 | 540 | int err, count; |
b40b15e1 LB |
541 | |
542 | rcu_read_lock(); | |
543 | ||
544 | while (true) { | |
e5fc742f | 545 | urb = mt76u_get_next_rx_entry(dev); |
d7d4ea9a | 546 | if (!urb) |
b40b15e1 LB |
547 | break; |
548 | ||
d7d4ea9a | 549 | count = mt76u_process_rx_entry(dev, urb); |
f7522949 | 550 | if (count > 0) { |
1bb78d38 | 551 | err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC); |
b40b15e1 LB |
552 | if (err < 0) |
553 | break; | |
554 | } | |
d7d4ea9a | 555 | mt76u_submit_rx_buf(dev, urb); |
b40b15e1 LB |
556 | } |
557 | mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); | |
558 | ||
559 | rcu_read_unlock(); | |
560 | } | |
561 | ||
39d501d9 | 562 | static int mt76u_submit_rx_buffers(struct mt76_dev *dev) |
b40b15e1 LB |
563 | { |
564 | struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; | |
565 | unsigned long flags; | |
566 | int i, err = 0; | |
567 | ||
568 | spin_lock_irqsave(&q->lock, flags); | |
569 | for (i = 0; i < q->ndesc; i++) { | |
d7d4ea9a | 570 | err = mt76u_submit_rx_buf(dev, q->entry[i].urb); |
b40b15e1 LB |
571 | if (err < 0) |
572 | break; | |
573 | } | |
574 | q->head = q->tail = 0; | |
575 | q->queued = 0; | |
576 | spin_unlock_irqrestore(&q->lock, flags); | |
577 | ||
578 | return err; | |
579 | } | |
b40b15e1 LB |
580 | |
581 | static int mt76u_alloc_rx(struct mt76_dev *dev) | |
582 | { | |
fc994dbb | 583 | struct mt76_usb *usb = &dev->usb; |
b40b15e1 | 584 | struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; |
f7522949 | 585 | int i, err; |
b40b15e1 | 586 | |
fc994dbb SG |
587 | usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL); |
588 | if (!usb->mcu.data) | |
589 | return -ENOMEM; | |
590 | ||
b40b15e1 | 591 | spin_lock_init(&q->lock); |
329e0989 KC |
592 | q->entry = devm_kcalloc(dev->dev, |
593 | MT_NUM_RX_ENTRIES, sizeof(*q->entry), | |
b40b15e1 LB |
594 | GFP_KERNEL); |
595 | if (!q->entry) | |
596 | return -ENOMEM; | |
597 | ||
cb83585e | 598 | q->ndesc = MT_NUM_RX_ENTRIES; |
14663f0c LB |
599 | q->buf_size = PAGE_SIZE; |
600 | ||
cb83585e | 601 | for (i = 0; i < q->ndesc; i++) { |
48f5a90c | 602 | err = mt76u_rx_urb_alloc(dev, &q->entry[i]); |
b40b15e1 LB |
603 | if (err < 0) |
604 | return err; | |
605 | } | |
b40b15e1 LB |
606 | |
607 | return mt76u_submit_rx_buffers(dev); | |
608 | } | |
609 | ||
610 | static void mt76u_free_rx(struct mt76_dev *dev) | |
611 | { | |
612 | struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; | |
c12128ce | 613 | struct page *page; |
b40b15e1 LB |
614 | int i; |
615 | ||
616 | for (i = 0; i < q->ndesc; i++) | |
d7d4ea9a | 617 | mt76u_urb_free(q->entry[i].urb); |
c12128ce FF |
618 | |
619 | if (!q->rx_page.va) | |
069e2d34 | 620 | return; |
c12128ce FF |
621 | |
622 | page = virt_to_page(q->rx_page.va); | |
623 | __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); | |
624 | memset(&q->rx_page, 0, sizeof(q->rx_page)); | |
b40b15e1 LB |
625 | } |
626 | ||
39d501d9 | 627 | void mt76u_stop_rx(struct mt76_dev *dev) |
b40b15e1 LB |
628 | { |
629 | struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; | |
630 | int i; | |
631 | ||
632 | for (i = 0; i < q->ndesc; i++) | |
39d501d9 SG |
633 | usb_poison_urb(q->entry[i].urb); |
634 | ||
635 | tasklet_kill(&dev->usb.rx_tasklet); | |
636 | } | |
637 | EXPORT_SYMBOL_GPL(mt76u_stop_rx); | |
638 | ||
639 | int mt76u_resume_rx(struct mt76_dev *dev) | |
640 | { | |
641 | struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; | |
642 | int i; | |
643 | ||
644 | for (i = 0; i < q->ndesc; i++) | |
645 | usb_unpoison_urb(q->entry[i].urb); | |
646 | ||
647 | return mt76u_submit_rx_buffers(dev); | |
b40b15e1 | 648 | } |
39d501d9 | 649 | EXPORT_SYMBOL_GPL(mt76u_resume_rx); |
b40b15e1 | 650 | |
b40b15e1 LB |
651 | static void mt76u_tx_tasklet(unsigned long data) |
652 | { | |
653 | struct mt76_dev *dev = (struct mt76_dev *)data; | |
e207afa0 | 654 | struct mt76_queue_entry entry; |
af005f26 | 655 | struct mt76_sw_queue *sq; |
b40b15e1 LB |
656 | struct mt76_queue *q; |
657 | bool wake; | |
658 | int i; | |
659 | ||
660 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { | |
ce0fd825 LB |
661 | u32 n_dequeued = 0, n_sw_dequeued = 0; |
662 | ||
af005f26 LB |
663 | sq = &dev->q_tx[i]; |
664 | q = sq->q; | |
b40b15e1 | 665 | |
ce0fd825 LB |
666 | while (q->queued > n_dequeued) { |
667 | if (!q->entry[q->head].done) | |
b40b15e1 LB |
668 | break; |
669 | ||
b40b15e1 LB |
670 | if (q->entry[q->head].schedule) { |
671 | q->entry[q->head].schedule = false; | |
ce0fd825 | 672 | n_sw_dequeued++; |
b40b15e1 LB |
673 | } |
674 | ||
e207afa0 | 675 | entry = q->entry[q->head]; |
ce0fd825 | 676 | q->entry[q->head].done = false; |
b40b15e1 | 677 | q->head = (q->head + 1) % q->ndesc; |
ce0fd825 | 678 | n_dequeued++; |
e207afa0 | 679 | |
e226ba2e | 680 | dev->drv->tx_complete_skb(dev, i, &entry); |
b40b15e1 | 681 | } |
cd44bc40 | 682 | |
ce0fd825 LB |
683 | spin_lock_bh(&q->lock); |
684 | ||
685 | sq->swq_queued -= n_sw_dequeued; | |
686 | q->queued -= n_dequeued; | |
687 | ||
cd44bc40 LB |
688 | wake = q->stopped && q->queued < q->ndesc - 8; |
689 | if (wake) | |
690 | q->stopped = false; | |
691 | ||
b40b15e1 LB |
692 | if (!q->queued) |
693 | wake_up(&dev->tx_wait); | |
694 | ||
695 | spin_unlock_bh(&q->lock); | |
696 | ||
90fdc171 FF |
697 | mt76_txq_schedule(dev, i); |
698 | ||
b40b15e1 LB |
699 | if (!test_and_set_bit(MT76_READING_STATS, &dev->state)) |
700 | ieee80211_queue_delayed_work(dev->hw, | |
701 | &dev->usb.stat_work, | |
702 | msecs_to_jiffies(10)); | |
703 | ||
704 | if (wake) | |
705 | ieee80211_wake_queue(dev->hw, i); | |
706 | } | |
707 | } | |
708 | ||
709 | static void mt76u_tx_status_data(struct work_struct *work) | |
710 | { | |
711 | struct mt76_usb *usb; | |
712 | struct mt76_dev *dev; | |
713 | u8 update = 1; | |
714 | u16 count = 0; | |
715 | ||
716 | usb = container_of(work, struct mt76_usb, stat_work.work); | |
717 | dev = container_of(usb, struct mt76_dev, usb); | |
718 | ||
719 | while (true) { | |
720 | if (test_bit(MT76_REMOVED, &dev->state)) | |
721 | break; | |
722 | ||
723 | if (!dev->drv->tx_status_data(dev, &update)) | |
724 | break; | |
725 | count++; | |
726 | } | |
727 | ||
728 | if (count && test_bit(MT76_STATE_RUNNING, &dev->state)) | |
729 | ieee80211_queue_delayed_work(dev->hw, &usb->stat_work, | |
730 | msecs_to_jiffies(10)); | |
731 | else | |
732 | clear_bit(MT76_READING_STATS, &dev->state); | |
733 | } | |
734 | ||
735 | static void mt76u_complete_tx(struct urb *urb) | |
736 | { | |
112f980a | 737 | struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); |
279ade99 | 738 | struct mt76_queue_entry *e = urb->context; |
b40b15e1 LB |
739 | |
740 | if (mt76u_urb_error(urb)) | |
741 | dev_err(dev->dev, "tx urb failed: %d\n", urb->status); | |
279ade99 | 742 | e->done = true; |
b40b15e1 | 743 | |
a33b8ab8 | 744 | tasklet_schedule(&dev->tx_tasklet); |
b40b15e1 LB |
745 | } |
746 | ||
747 | static int | |
26031b39 SG |
748 | mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb, |
749 | struct urb *urb) | |
b40b15e1 | 750 | { |
26031b39 | 751 | urb->transfer_buffer_length = skb->len; |
b40b15e1 | 752 | |
26031b39 SG |
753 | if (!dev->usb.sg_en) { |
754 | urb->transfer_buffer = skb->data; | |
755 | return 0; | |
26031b39 | 756 | } |
13381dcd RL |
757 | |
758 | sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE); | |
759 | urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); | |
760 | if (!urb->num_sgs) | |
761 | return -ENOMEM; | |
762 | ||
763 | return urb->num_sgs; | |
b40b15e1 LB |
764 | } |
765 | ||
766 | static int | |
89a37842 | 767 | mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, |
b40b15e1 LB |
768 | struct sk_buff *skb, struct mt76_wcid *wcid, |
769 | struct ieee80211_sta *sta) | |
770 | { | |
af005f26 | 771 | struct mt76_queue *q = dev->q_tx[qid].q; |
cfaae9e6 LB |
772 | struct mt76_tx_info tx_info = { |
773 | .skb = skb, | |
774 | }; | |
b40b15e1 | 775 | u16 idx = q->tail; |
b40b15e1 LB |
776 | int err; |
777 | ||
778 | if (q->queued == q->ndesc) | |
779 | return -ENOSPC; | |
780 | ||
88046b2c | 781 | skb->prev = skb->next = NULL; |
cfaae9e6 | 782 | err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); |
b40b15e1 LB |
783 | if (err < 0) |
784 | return err; | |
785 | ||
cfaae9e6 | 786 | err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb); |
4de92bf1 LB |
787 | if (err < 0) |
788 | return err; | |
b40b15e1 | 789 | |
4de92bf1 | 790 | mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx), |
cfaae9e6 LB |
791 | q->entry[idx].urb, mt76u_complete_tx, |
792 | &q->entry[idx]); | |
b40b15e1 LB |
793 | |
794 | q->tail = (q->tail + 1) % q->ndesc; | |
cfaae9e6 | 795 | q->entry[idx].skb = tx_info.skb; |
b40b15e1 LB |
796 | q->queued++; |
797 | ||
798 | return idx; | |
799 | } | |
800 | ||
801 | static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) | |
802 | { | |
d7d4ea9a | 803 | struct urb *urb; |
b40b15e1 LB |
804 | int err; |
805 | ||
806 | while (q->first != q->tail) { | |
d7d4ea9a | 807 | urb = q->entry[q->first].urb; |
e0168dc6 | 808 | |
d7d4ea9a SG |
809 | trace_submit_urb(dev, urb); |
810 | err = usb_submit_urb(urb, GFP_ATOMIC); | |
b40b15e1 LB |
811 | if (err < 0) { |
812 | if (err == -ENODEV) | |
813 | set_bit(MT76_REMOVED, &dev->state); | |
814 | else | |
815 | dev_err(dev->dev, "tx urb submit failed:%d\n", | |
816 | err); | |
817 | break; | |
818 | } | |
819 | q->first = (q->first + 1) % q->ndesc; | |
820 | } | |
821 | } | |
822 | ||
823 | static int mt76u_alloc_tx(struct mt76_dev *dev) | |
824 | { | |
b40b15e1 | 825 | struct mt76_queue *q; |
48f5a90c | 826 | int i, j, err; |
b40b15e1 | 827 | |
8300ee7c | 828 | for (i = 0; i <= MT_TXQ_PSD; i++) { |
af005f26 LB |
829 | INIT_LIST_HEAD(&dev->q_tx[i].swq); |
830 | ||
8300ee7c SG |
831 | if (i >= IEEE80211_NUM_ACS) { |
832 | dev->q_tx[i].q = dev->q_tx[0].q; | |
833 | continue; | |
834 | } | |
835 | ||
af005f26 LB |
836 | q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); |
837 | if (!q) | |
838 | return -ENOMEM; | |
839 | ||
b40b15e1 | 840 | spin_lock_init(&q->lock); |
1d0496c6 | 841 | q->hw_idx = mt76_ac_to_hwq(i); |
af005f26 | 842 | dev->q_tx[i].q = q; |
b40b15e1 | 843 | |
329e0989 KC |
844 | q->entry = devm_kcalloc(dev->dev, |
845 | MT_NUM_TX_ENTRIES, sizeof(*q->entry), | |
b40b15e1 LB |
846 | GFP_KERNEL); |
847 | if (!q->entry) | |
848 | return -ENOMEM; | |
849 | ||
850 | q->ndesc = MT_NUM_TX_ENTRIES; | |
851 | for (j = 0; j < q->ndesc; j++) { | |
14663f0c LB |
852 | err = mt76u_urb_alloc(dev, &q->entry[j], |
853 | MT_TX_SG_MAX_SIZE); | |
48f5a90c SG |
854 | if (err < 0) |
855 | return err; | |
b40b15e1 LB |
856 | } |
857 | } | |
858 | return 0; | |
859 | } | |
860 | ||
861 | static void mt76u_free_tx(struct mt76_dev *dev) | |
862 | { | |
863 | struct mt76_queue *q; | |
864 | int i, j; | |
865 | ||
866 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { | |
af005f26 | 867 | q = dev->q_tx[i].q; |
b40b15e1 | 868 | for (j = 0; j < q->ndesc; j++) |
d7d4ea9a | 869 | usb_free_urb(q->entry[j].urb); |
b40b15e1 LB |
870 | } |
871 | } | |
872 | ||
39d501d9 | 873 | void mt76u_stop_tx(struct mt76_dev *dev) |
b40b15e1 | 874 | { |
39d501d9 | 875 | struct mt76_queue_entry entry; |
b40b15e1 | 876 | struct mt76_queue *q; |
39d501d9 | 877 | int i, j, ret; |
b40b15e1 | 878 | |
13381dcd RL |
879 | ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), |
880 | HZ / 5); | |
39d501d9 SG |
881 | if (!ret) { |
882 | dev_err(dev->dev, "timed out waiting for pending tx\n"); | |
b40b15e1 | 883 | |
39d501d9 SG |
884 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { |
885 | q = dev->q_tx[i].q; | |
886 | for (j = 0; j < q->ndesc; j++) | |
887 | usb_kill_urb(q->entry[j].urb); | |
888 | } | |
b40b15e1 | 889 | |
39d501d9 SG |
890 | tasklet_kill(&dev->tx_tasklet); |
891 | ||
892 | /* On device removal we maight queue skb's, but mt76u_tx_kick() | |
893 | * will fail to submit urb, cleanup those skb's manually. | |
894 | */ | |
895 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { | |
896 | q = dev->q_tx[i].q; | |
897 | ||
898 | /* Assure we are in sync with killed tasklet. */ | |
899 | spin_lock_bh(&q->lock); | |
900 | while (q->queued) { | |
901 | entry = q->entry[q->head]; | |
902 | q->head = (q->head + 1) % q->ndesc; | |
903 | q->queued--; | |
904 | ||
905 | dev->drv->tx_complete_skb(dev, i, &entry); | |
906 | } | |
907 | spin_unlock_bh(&q->lock); | |
908 | } | |
909 | } | |
b40b15e1 | 910 | |
b40b15e1 LB |
911 | cancel_delayed_work_sync(&dev->usb.stat_work); |
912 | clear_bit(MT76_READING_STATS, &dev->state); | |
39d501d9 SG |
913 | |
914 | mt76_tx_status_check(dev, NULL, true); | |
b40b15e1 | 915 | } |
39d501d9 | 916 | EXPORT_SYMBOL_GPL(mt76u_stop_tx); |
b40b15e1 LB |
917 | |
918 | void mt76u_queues_deinit(struct mt76_dev *dev) | |
919 | { | |
39d501d9 SG |
920 | mt76u_stop_rx(dev); |
921 | mt76u_stop_tx(dev); | |
b40b15e1 LB |
922 | |
923 | mt76u_free_rx(dev); | |
924 | mt76u_free_tx(dev); | |
925 | } | |
926 | EXPORT_SYMBOL_GPL(mt76u_queues_deinit); | |
927 | ||
928 | int mt76u_alloc_queues(struct mt76_dev *dev) | |
929 | { | |
930 | int err; | |
931 | ||
932 | err = mt76u_alloc_rx(dev); | |
933 | if (err < 0) | |
b3098121 | 934 | return err; |
b40b15e1 | 935 | |
b3098121 | 936 | return mt76u_alloc_tx(dev); |
b40b15e1 LB |
937 | } |
938 | EXPORT_SYMBOL_GPL(mt76u_alloc_queues); | |
939 | ||
940 | static const struct mt76_queue_ops usb_queue_ops = { | |
941 | .tx_queue_skb = mt76u_tx_queue_skb, | |
942 | .kick = mt76u_tx_kick, | |
943 | }; | |
944 | ||
945 | int mt76u_init(struct mt76_dev *dev, | |
946 | struct usb_interface *intf) | |
947 | { | |
948 | static const struct mt76_bus_ops mt76u_ops = { | |
949 | .rr = mt76u_rr, | |
950 | .wr = mt76u_wr, | |
951 | .rmw = mt76u_rmw, | |
35e4ebea | 952 | .write_copy = mt76u_copy, |
6da5a291 SG |
953 | .wr_rp = mt76u_wr_rp, |
954 | .rd_rp = mt76u_rd_rp, | |
c50479fa | 955 | .type = MT76_BUS_USB, |
b40b15e1 | 956 | }; |
80df01f4 | 957 | struct usb_device *udev = interface_to_usbdev(intf); |
b40b15e1 LB |
958 | struct mt76_usb *usb = &dev->usb; |
959 | ||
960 | tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev); | |
a33b8ab8 | 961 | tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev); |
b40b15e1 LB |
962 | INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data); |
963 | skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]); | |
964 | ||
b40b15e1 LB |
965 | mutex_init(&usb->mcu.mutex); |
966 | ||
967 | mutex_init(&usb->usb_ctrl_mtx); | |
968 | dev->bus = &mt76u_ops; | |
969 | dev->queue_ops = &usb_queue_ops; | |
970 | ||
80df01f4 LB |
971 | dev_set_drvdata(&udev->dev, dev); |
972 | ||
63a7de5d LB |
973 | usb->sg_en = mt76u_check_sg(dev); |
974 | ||
b40b15e1 LB |
975 | return mt76u_set_endpoints(intf, usb); |
976 | } | |
977 | EXPORT_SYMBOL_GPL(mt76u_init); | |
978 | ||
979 | MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); | |
980 | MODULE_LICENSE("Dual BSD/GPL"); |