mt76: usb: introduce disable_usb_sg parameter
[linux-block.git] / drivers / net / wireless / mediatek / mt76 / usb.c
CommitLineData
b40b15e1
LB
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
db6bb5c6 17#include <linux/module.h>
b40b15e1
LB
18#include "mt76.h"
19#include "usb_trace.h"
20#include "dma.h"
21
22#define MT_VEND_REQ_MAX_RETRY 10
23#define MT_VEND_REQ_TOUT_MS 300
24
c2908a0d
LB
25static bool disable_usb_sg;
26module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
27MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
28
b40b15e1
LB
29/* should be called with usb_ctrl_mtx locked */
30static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
31 u8 req_type, u16 val, u16 offset,
32 void *buf, size_t len)
33{
34 struct usb_interface *intf = to_usb_interface(dev->dev);
35 struct usb_device *udev = interface_to_usbdev(intf);
36 unsigned int pipe;
37 int i, ret;
38
39 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
40 : usb_sndctrlpipe(udev, 0);
41 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
42 if (test_bit(MT76_REMOVED, &dev->state))
43 return -EIO;
44
45 ret = usb_control_msg(udev, pipe, req, req_type, val,
46 offset, buf, len, MT_VEND_REQ_TOUT_MS);
47 if (ret == -ENODEV)
48 set_bit(MT76_REMOVED, &dev->state);
49 if (ret >= 0 || ret == -ENODEV)
50 return ret;
51 usleep_range(5000, 10000);
52 }
53
54 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
55 req, offset, ret);
56 return ret;
57}
58
59int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
60 u8 req_type, u16 val, u16 offset,
61 void *buf, size_t len)
62{
63 int ret;
64
65 mutex_lock(&dev->usb.usb_ctrl_mtx);
66 ret = __mt76u_vendor_request(dev, req, req_type,
67 val, offset, buf, len);
68 trace_usb_reg_wr(dev, offset, val);
69 mutex_unlock(&dev->usb.usb_ctrl_mtx);
70
71 return ret;
72}
73EXPORT_SYMBOL_GPL(mt76u_vendor_request);
74
75/* should be called with usb_ctrl_mtx locked */
76static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
77{
78 struct mt76_usb *usb = &dev->usb;
79 u32 data = ~0;
80 u16 offset;
81 int ret;
82 u8 req;
83
84 switch (addr & MT_VEND_TYPE_MASK) {
85 case MT_VEND_TYPE_EEPROM:
86 req = MT_VEND_READ_EEPROM;
87 break;
88 case MT_VEND_TYPE_CFG:
89 req = MT_VEND_READ_CFG;
90 break;
91 default:
92 req = MT_VEND_MULTI_READ;
93 break;
94 }
95 offset = addr & ~MT_VEND_TYPE_MASK;
96
97 ret = __mt76u_vendor_request(dev, req,
98 USB_DIR_IN | USB_TYPE_VENDOR,
99 0, offset, usb->data, sizeof(__le32));
100 if (ret == sizeof(__le32))
101 data = get_unaligned_le32(usb->data);
102 trace_usb_reg_rr(dev, addr, data);
103
104 return data;
105}
106
5567b373 107static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
b40b15e1
LB
108{
109 u32 ret;
110
111 mutex_lock(&dev->usb.usb_ctrl_mtx);
112 ret = __mt76u_rr(dev, addr);
113 mutex_unlock(&dev->usb.usb_ctrl_mtx);
114
115 return ret;
116}
117
118/* should be called with usb_ctrl_mtx locked */
119static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
120{
121 struct mt76_usb *usb = &dev->usb;
122 u16 offset;
123 u8 req;
124
125 switch (addr & MT_VEND_TYPE_MASK) {
126 case MT_VEND_TYPE_CFG:
127 req = MT_VEND_WRITE_CFG;
128 break;
129 default:
130 req = MT_VEND_MULTI_WRITE;
131 break;
132 }
133 offset = addr & ~MT_VEND_TYPE_MASK;
134
135 put_unaligned_le32(val, usb->data);
136 __mt76u_vendor_request(dev, req,
137 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
138 offset, usb->data, sizeof(__le32));
139 trace_usb_reg_wr(dev, addr, val);
140}
141
5567b373 142static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
b40b15e1
LB
143{
144 mutex_lock(&dev->usb.usb_ctrl_mtx);
145 __mt76u_wr(dev, addr, val);
146 mutex_unlock(&dev->usb.usb_ctrl_mtx);
147}
148
149static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
150 u32 mask, u32 val)
151{
152 mutex_lock(&dev->usb.usb_ctrl_mtx);
153 val |= __mt76u_rr(dev, addr) & ~mask;
154 __mt76u_wr(dev, addr, val);
155 mutex_unlock(&dev->usb.usb_ctrl_mtx);
156
157 return val;
158}
159
160static void mt76u_copy(struct mt76_dev *dev, u32 offset,
161 const void *data, int len)
162{
163 struct mt76_usb *usb = &dev->usb;
164 const u32 *val = data;
165 int i, ret;
166
167 mutex_lock(&usb->usb_ctrl_mtx);
168 for (i = 0; i < (len / 4); i++) {
169 put_unaligned_le32(val[i], usb->data);
170 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
171 USB_DIR_OUT | USB_TYPE_VENDOR,
172 0, offset + i * 4, usb->data,
173 sizeof(__le32));
174 if (ret < 0)
175 break;
176 }
177 mutex_unlock(&usb->usb_ctrl_mtx);
178}
179
180void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
181 const u16 offset, const u32 val)
182{
183 mutex_lock(&dev->usb.usb_ctrl_mtx);
184 __mt76u_vendor_request(dev, req,
185 USB_DIR_OUT | USB_TYPE_VENDOR,
186 val & 0xffff, offset, NULL, 0);
187 __mt76u_vendor_request(dev, req,
188 USB_DIR_OUT | USB_TYPE_VENDOR,
189 val >> 16, offset + 2, NULL, 0);
190 mutex_unlock(&dev->usb.usb_ctrl_mtx);
191}
192EXPORT_SYMBOL_GPL(mt76u_single_wr);
193
f1638c7c
SG
194static int
195mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
196 const struct mt76_reg_pair *data, int len)
197{
198 struct mt76_usb *usb = &dev->usb;
199
200 mutex_lock(&usb->usb_ctrl_mtx);
201 while (len > 0) {
202 __mt76u_wr(dev, base + data->reg, data->value);
203 len--;
204 data++;
205 }
206 mutex_unlock(&usb->usb_ctrl_mtx);
207
208 return 0;
209}
210
17507157
LB
211static int
212mt76u_wr_rp(struct mt76_dev *dev, u32 base,
213 const struct mt76_reg_pair *data, int n)
f1638c7c
SG
214{
215 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
17507157 216 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
f1638c7c
SG
217 else
218 return mt76u_req_wr_rp(dev, base, data, n);
219}
f1638c7c
SG
220
221static int
222mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
223 int len)
224{
225 struct mt76_usb *usb = &dev->usb;
226
227 mutex_lock(&usb->usb_ctrl_mtx);
228 while (len > 0) {
229 data->value = __mt76u_rr(dev, base + data->reg);
230 len--;
231 data++;
232 }
233 mutex_unlock(&usb->usb_ctrl_mtx);
234
235 return 0;
236}
237
17507157
LB
238static int
239mt76u_rd_rp(struct mt76_dev *dev, u32 base,
240 struct mt76_reg_pair *data, int n)
f1638c7c
SG
241{
242 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
17507157 243 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
f1638c7c
SG
244 else
245 return mt76u_req_rd_rp(dev, base, data, n);
246}
f1638c7c 247
63a7de5d
LB
248static bool mt76u_check_sg(struct mt76_dev *dev)
249{
250 struct usb_interface *intf = to_usb_interface(dev->dev);
251 struct usb_device *udev = interface_to_usbdev(intf);
252
c2908a0d 253 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
63a7de5d
LB
254 (udev->bus->no_sg_constraint ||
255 udev->speed == USB_SPEED_WIRELESS));
256}
257
b40b15e1
LB
258static int
259mt76u_set_endpoints(struct usb_interface *intf,
260 struct mt76_usb *usb)
261{
262 struct usb_host_interface *intf_desc = intf->cur_altsetting;
263 struct usb_endpoint_descriptor *ep_desc;
264 int i, in_ep = 0, out_ep = 0;
265
266 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
267 ep_desc = &intf_desc->endpoint[i].desc;
268
269 if (usb_endpoint_is_bulk_in(ep_desc) &&
270 in_ep < __MT_EP_IN_MAX) {
271 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
272 usb->in_max_packet = usb_endpoint_maxp(ep_desc);
273 in_ep++;
274 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
275 out_ep < __MT_EP_OUT_MAX) {
276 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
277 usb->out_max_packet = usb_endpoint_maxp(ep_desc);
278 out_ep++;
279 }
280 }
281
282 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
283 return -EINVAL;
284 return 0;
285}
286
287static int
288mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
289 int nsgs, int len, int sglen)
290{
c12128ce 291 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
b40b15e1
LB
292 struct urb *urb = buf->urb;
293 int i;
294
481bb043 295 spin_lock_bh(&q->rx_page_lock);
b40b15e1
LB
296 for (i = 0; i < nsgs; i++) {
297 struct page *page;
298 void *data;
299 int offset;
300
c87dff8c 301 data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
b40b15e1
LB
302 if (!data)
303 break;
304
305 page = virt_to_head_page(data);
306 offset = data - page_address(page);
307 sg_set_page(&urb->sg[i], page, sglen, offset);
308 }
481bb043 309 spin_unlock_bh(&q->rx_page_lock);
b40b15e1
LB
310
311 if (i < nsgs) {
312 int j;
313
314 for (j = nsgs; j < urb->num_sgs; j++)
315 skb_free_frag(sg_virt(&urb->sg[j]));
316 urb->num_sgs = i;
317 }
318
319 urb->num_sgs = max_t(int, i, urb->num_sgs);
320 buf->len = urb->num_sgs * sglen,
321 sg_init_marker(urb->sg, urb->num_sgs);
322
323 return i ? : -ENOMEM;
324}
325
d704d16f
LB
326static int
327mt76u_buf_alloc_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
328 int nsgs, int len, int sglen, gfp_t gfp)
b40b15e1
LB
329{
330 buf->urb = usb_alloc_urb(0, gfp);
331 if (!buf->urb)
332 return -ENOMEM;
333
329e0989 334 buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
b40b15e1
LB
335 gfp);
336 if (!buf->urb->sg)
337 return -ENOMEM;
338
339 sg_init_table(buf->urb->sg, nsgs);
340 buf->dev = dev;
341
342 return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
343}
b40b15e1 344
d704d16f
LB
345int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
346 int len, int data_len, gfp_t gfp)
347{
348 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
349
350 buf->urb = usb_alloc_urb(0, gfp);
351 if (!buf->urb)
352 return -ENOMEM;
353
354 buf->buf = page_frag_alloc(&q->rx_page, len, gfp);
355 if (!buf->buf)
356 return -ENOMEM;
357
358 buf->len = data_len;
359 buf->dev = dev;
360
361 return 0;
362}
363
b40b15e1
LB
364void mt76u_buf_free(struct mt76u_buf *buf)
365{
366 struct urb *urb = buf->urb;
cb83585e 367 struct scatterlist *sg;
b40b15e1
LB
368 int i;
369
cb83585e
LB
370 for (i = 0; i < urb->num_sgs; i++) {
371 sg = &urb->sg[i];
372 if (!sg)
373 continue;
374
375 skb_free_frag(sg_virt(sg));
376 }
d704d16f
LB
377 if (buf->buf)
378 skb_free_frag(buf->buf);
379
b40b15e1
LB
380 usb_free_urb(buf->urb);
381}
382EXPORT_SYMBOL_GPL(mt76u_buf_free);
383
384int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
385 struct mt76u_buf *buf, gfp_t gfp,
386 usb_complete_t complete_fn, void *context)
387{
388 struct usb_interface *intf = to_usb_interface(dev->dev);
389 struct usb_device *udev = interface_to_usbdev(intf);
d704d16f 390 u8 *data = buf->urb->num_sgs ? NULL : buf->buf;
b40b15e1
LB
391 unsigned int pipe;
392
393 if (dir == USB_DIR_IN)
394 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
395 else
396 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
397
d704d16f 398 usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len,
b40b15e1 399 complete_fn, context);
e0168dc6 400 trace_submit_urb(dev, buf->urb);
b40b15e1
LB
401
402 return usb_submit_urb(buf->urb, gfp);
403}
404EXPORT_SYMBOL_GPL(mt76u_submit_buf);
405
406static inline struct mt76u_buf
407*mt76u_get_next_rx_entry(struct mt76_queue *q)
408{
409 struct mt76u_buf *buf = NULL;
410 unsigned long flags;
411
412 spin_lock_irqsave(&q->lock, flags);
413 if (q->queued > 0) {
414 buf = &q->entry[q->head].ubuf;
415 q->head = (q->head + 1) % q->ndesc;
416 q->queued--;
417 }
418 spin_unlock_irqrestore(&q->lock, flags);
419
420 return buf;
421}
422
423static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
424{
425 u16 dma_len, min_len;
426
427 dma_len = get_unaligned_le16(data);
428 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
429 MT_FCE_INFO_LEN;
430
09dbcd8b
LB
431 if (data_len < min_len || !dma_len ||
432 dma_len + MT_DMA_HDR_LEN > data_len ||
433 (dma_len & 0x3))
b40b15e1
LB
434 return -EINVAL;
435 return dma_len;
436}
437
438static int
f7522949 439mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
b40b15e1
LB
440{
441 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
f7522949
LB
442 struct urb *urb = buf->urb;
443 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : buf->buf;
b40b15e1
LB
444 int data_len, len, nsgs = 1;
445 struct sk_buff *skb;
446
447 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
448 return 0;
449
450 len = mt76u_get_rx_entry_len(data, urb->actual_length);
451 if (len < 0)
452 return 0;
453
f7522949
LB
454 data_len = urb->num_sgs ? urb->sg[0].length : buf->len;
455 data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
0ecf94dc
LB
456 if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
457 return 0;
458
b40b15e1
LB
459 skb = build_skb(data, q->buf_size);
460 if (!skb)
461 return 0;
462
b40b15e1 463 skb_reserve(skb, MT_DMA_HDR_LEN);
b40b15e1
LB
464 __skb_put(skb, data_len);
465 len -= data_len;
466
f7522949 467 while (len > 0 && urb->num_sgs) {
b40b15e1
LB
468 data_len = min_t(int, len, urb->sg[nsgs].length);
469 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
470 sg_page(&urb->sg[nsgs]),
471 urb->sg[nsgs].offset,
472 data_len, q->buf_size);
473 len -= data_len;
474 nsgs++;
475 }
476 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
477
478 return nsgs;
479}
480
481static void mt76u_complete_rx(struct urb *urb)
482{
483 struct mt76_dev *dev = urb->context;
484 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
485 unsigned long flags;
486
e0168dc6
LB
487 trace_rx_urb(dev, urb);
488
b40b15e1
LB
489 switch (urb->status) {
490 case -ECONNRESET:
491 case -ESHUTDOWN:
492 case -ENOENT:
493 return;
494 default:
495 dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
496 /* fall through */
497 case 0:
498 break;
499 }
500
501 spin_lock_irqsave(&q->lock, flags);
502 if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
503 goto out;
504
505 q->tail = (q->tail + 1) % q->ndesc;
506 q->queued++;
507 tasklet_schedule(&dev->usb.rx_tasklet);
508out:
509 spin_unlock_irqrestore(&q->lock, flags);
510}
511
f7522949
LB
512static int
513mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
514 struct mt76u_buf *buf, int nsgs)
515{
516 if (dev->usb.sg_en) {
517 return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size,
518 SKB_WITH_OVERHEAD(q->buf_size));
519 } else {
520 buf->buf = page_frag_alloc(&q->rx_page, q->buf_size,
521 GFP_ATOMIC);
522 return buf->buf ? 0 : -ENOMEM;
523 }
524}
525
b40b15e1
LB
526static void mt76u_rx_tasklet(unsigned long data)
527{
528 struct mt76_dev *dev = (struct mt76_dev *)data;
529 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
b40b15e1 530 struct mt76u_buf *buf;
f7522949 531 int err, count;
b40b15e1
LB
532
533 rcu_read_lock();
534
535 while (true) {
536 buf = mt76u_get_next_rx_entry(q);
537 if (!buf)
538 break;
539
f7522949
LB
540 count = mt76u_process_rx_entry(dev, buf);
541 if (count > 0) {
542 err = mt76u_refill_rx(dev, q, buf, count);
b40b15e1
LB
543 if (err < 0)
544 break;
545 }
546 mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
547 buf, GFP_ATOMIC,
548 mt76u_complete_rx, dev);
549 }
550 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
551
552 rcu_read_unlock();
553}
554
555int mt76u_submit_rx_buffers(struct mt76_dev *dev)
556{
557 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
558 unsigned long flags;
559 int i, err = 0;
560
561 spin_lock_irqsave(&q->lock, flags);
562 for (i = 0; i < q->ndesc; i++) {
563 err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
564 &q->entry[i].ubuf, GFP_ATOMIC,
565 mt76u_complete_rx, dev);
566 if (err < 0)
567 break;
568 }
569 q->head = q->tail = 0;
570 q->queued = 0;
571 spin_unlock_irqrestore(&q->lock, flags);
572
573 return err;
574}
575EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
576
577static int mt76u_alloc_rx(struct mt76_dev *dev)
578{
579 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
f7522949 580 int i, err;
b40b15e1 581
481bb043 582 spin_lock_init(&q->rx_page_lock);
b40b15e1 583 spin_lock_init(&q->lock);
329e0989
KC
584 q->entry = devm_kcalloc(dev->dev,
585 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
b40b15e1
LB
586 GFP_KERNEL);
587 if (!q->entry)
588 return -ENOMEM;
589
f7522949 590 q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
cb83585e
LB
591 q->ndesc = MT_NUM_RX_ENTRIES;
592 for (i = 0; i < q->ndesc; i++) {
f7522949
LB
593 if (dev->usb.sg_en)
594 err = mt76u_buf_alloc_sg(dev, &q->entry[i].ubuf,
595 MT_SG_MAX_SIZE, q->buf_size,
596 SKB_WITH_OVERHEAD(q->buf_size),
597 GFP_KERNEL);
598 else
599 err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
600 q->buf_size,
601 SKB_WITH_OVERHEAD(q->buf_size),
602 GFP_KERNEL);
b40b15e1
LB
603 if (err < 0)
604 return err;
605 }
b40b15e1
LB
606
607 return mt76u_submit_rx_buffers(dev);
608}
609
610static void mt76u_free_rx(struct mt76_dev *dev)
611{
612 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
c12128ce 613 struct page *page;
b40b15e1
LB
614 int i;
615
616 for (i = 0; i < q->ndesc; i++)
617 mt76u_buf_free(&q->entry[i].ubuf);
c12128ce 618
481bb043 619 spin_lock_bh(&q->rx_page_lock);
c12128ce 620 if (!q->rx_page.va)
481bb043 621 goto out;
c12128ce
FF
622
623 page = virt_to_page(q->rx_page.va);
624 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
625 memset(&q->rx_page, 0, sizeof(q->rx_page));
481bb043
LB
626out:
627 spin_unlock_bh(&q->rx_page_lock);
b40b15e1
LB
628}
629
630static void mt76u_stop_rx(struct mt76_dev *dev)
631{
632 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
633 int i;
634
635 for (i = 0; i < q->ndesc; i++)
636 usb_kill_urb(q->entry[i].ubuf.urb);
637}
638
b40b15e1
LB
639static void mt76u_tx_tasklet(unsigned long data)
640{
641 struct mt76_dev *dev = (struct mt76_dev *)data;
e207afa0 642 struct mt76_queue_entry entry;
b40b15e1
LB
643 struct mt76u_buf *buf;
644 struct mt76_queue *q;
645 bool wake;
646 int i;
647
648 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
649 q = &dev->q_tx[i];
650
651 spin_lock_bh(&q->lock);
652 while (true) {
653 buf = &q->entry[q->head].ubuf;
654 if (!buf->done || !q->queued)
655 break;
656
b40b15e1
LB
657 if (q->entry[q->head].schedule) {
658 q->entry[q->head].schedule = false;
659 q->swq_queued--;
660 }
661
e207afa0 662 entry = q->entry[q->head];
b40b15e1
LB
663 q->head = (q->head + 1) % q->ndesc;
664 q->queued--;
e207afa0
LB
665
666 spin_unlock_bh(&q->lock);
667 dev->drv->tx_complete_skb(dev, q, &entry, false);
668 spin_lock_bh(&q->lock);
b40b15e1
LB
669 }
670 mt76_txq_schedule(dev, q);
671 wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
672 if (!q->queued)
673 wake_up(&dev->tx_wait);
674
675 spin_unlock_bh(&q->lock);
676
677 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
678 ieee80211_queue_delayed_work(dev->hw,
679 &dev->usb.stat_work,
680 msecs_to_jiffies(10));
681
682 if (wake)
683 ieee80211_wake_queue(dev->hw, i);
684 }
685}
686
687static void mt76u_tx_status_data(struct work_struct *work)
688{
689 struct mt76_usb *usb;
690 struct mt76_dev *dev;
691 u8 update = 1;
692 u16 count = 0;
693
694 usb = container_of(work, struct mt76_usb, stat_work.work);
695 dev = container_of(usb, struct mt76_dev, usb);
696
697 while (true) {
698 if (test_bit(MT76_REMOVED, &dev->state))
699 break;
700
701 if (!dev->drv->tx_status_data(dev, &update))
702 break;
703 count++;
704 }
705
706 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
707 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
708 msecs_to_jiffies(10));
709 else
710 clear_bit(MT76_READING_STATS, &dev->state);
711}
712
713static void mt76u_complete_tx(struct urb *urb)
714{
715 struct mt76u_buf *buf = urb->context;
716 struct mt76_dev *dev = buf->dev;
717
718 if (mt76u_urb_error(urb))
719 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
720 buf->done = true;
721
722 tasklet_schedule(&dev->usb.tx_tasklet);
723}
724
725static int
726mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
727{
728 int nsgs = 1 + skb_shinfo(skb)->nr_frags;
729 struct sk_buff *iter;
730
731 skb_walk_frags(skb, iter)
732 nsgs += 1 + skb_shinfo(iter)->nr_frags;
733
734 memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
735
736 nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
737 sg_init_marker(urb->sg, nsgs);
738 urb->num_sgs = nsgs;
739
740 return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
741}
742
743static int
744mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
745 struct sk_buff *skb, struct mt76_wcid *wcid,
746 struct ieee80211_sta *sta)
747{
748 struct usb_interface *intf = to_usb_interface(dev->dev);
749 struct usb_device *udev = interface_to_usbdev(intf);
f7522949 750 u8 *data = NULL, ep = q2ep(q->hw_idx);
b40b15e1
LB
751 struct mt76u_buf *buf;
752 u16 idx = q->tail;
753 unsigned int pipe;
754 int err;
755
756 if (q->queued == q->ndesc)
757 return -ENOSPC;
758
88046b2c 759 skb->prev = skb->next = NULL;
b40b15e1
LB
760 err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
761 if (err < 0)
762 return err;
763
764 buf = &q->entry[idx].ubuf;
765 buf->done = false;
766
f7522949
LB
767 if (dev->usb.sg_en) {
768 err = mt76u_tx_build_sg(skb, buf->urb);
769 if (err < 0)
770 return err;
771 } else {
772 data = skb->data;
773 }
b40b15e1
LB
774
775 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
f7522949 776 usb_fill_bulk_urb(buf->urb, udev, pipe, data, skb->len,
b40b15e1
LB
777 mt76u_complete_tx, buf);
778
779 q->tail = (q->tail + 1) % q->ndesc;
780 q->entry[idx].skb = skb;
781 q->queued++;
782
783 return idx;
784}
785
786static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
787{
788 struct mt76u_buf *buf;
789 int err;
790
791 while (q->first != q->tail) {
792 buf = &q->entry[q->first].ubuf;
e0168dc6
LB
793
794 trace_submit_urb(dev, buf->urb);
b40b15e1
LB
795 err = usb_submit_urb(buf->urb, GFP_ATOMIC);
796 if (err < 0) {
797 if (err == -ENODEV)
798 set_bit(MT76_REMOVED, &dev->state);
799 else
800 dev_err(dev->dev, "tx urb submit failed:%d\n",
801 err);
802 break;
803 }
804 q->first = (q->first + 1) % q->ndesc;
805 }
806}
807
808static int mt76u_alloc_tx(struct mt76_dev *dev)
809{
810 struct mt76u_buf *buf;
811 struct mt76_queue *q;
b40b15e1
LB
812 int i, j;
813
b40b15e1
LB
814 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
815 q = &dev->q_tx[i];
816 spin_lock_init(&q->lock);
817 INIT_LIST_HEAD(&q->swq);
1d0496c6 818 q->hw_idx = mt76_ac_to_hwq(i);
b40b15e1 819
329e0989
KC
820 q->entry = devm_kcalloc(dev->dev,
821 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
b40b15e1
LB
822 GFP_KERNEL);
823 if (!q->entry)
824 return -ENOMEM;
825
826 q->ndesc = MT_NUM_TX_ENTRIES;
827 for (j = 0; j < q->ndesc; j++) {
828 buf = &q->entry[j].ubuf;
829 buf->dev = dev;
830
831 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
832 if (!buf->urb)
833 return -ENOMEM;
834
f7522949
LB
835 if (dev->usb.sg_en) {
836 size_t size = MT_SG_MAX_SIZE *
837 sizeof(struct scatterlist);
838
839 buf->urb->sg = devm_kzalloc(dev->dev, size,
840 GFP_KERNEL);
841 if (!buf->urb->sg)
842 return -ENOMEM;
843 }
b40b15e1
LB
844 }
845 }
846 return 0;
847}
848
849static void mt76u_free_tx(struct mt76_dev *dev)
850{
851 struct mt76_queue *q;
852 int i, j;
853
854 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
855 q = &dev->q_tx[i];
856 for (j = 0; j < q->ndesc; j++)
857 usb_free_urb(q->entry[j].ubuf.urb);
858 }
859}
860
861static void mt76u_stop_tx(struct mt76_dev *dev)
862{
863 struct mt76_queue *q;
864 int i, j;
865
866 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
867 q = &dev->q_tx[i];
868 for (j = 0; j < q->ndesc; j++)
869 usb_kill_urb(q->entry[j].ubuf.urb);
870 }
871}
872
873void mt76u_stop_queues(struct mt76_dev *dev)
874{
875 tasklet_disable(&dev->usb.rx_tasklet);
876 tasklet_disable(&dev->usb.tx_tasklet);
877
878 mt76u_stop_rx(dev);
879 mt76u_stop_tx(dev);
880}
881EXPORT_SYMBOL_GPL(mt76u_stop_queues);
882
883void mt76u_stop_stat_wk(struct mt76_dev *dev)
884{
885 cancel_delayed_work_sync(&dev->usb.stat_work);
886 clear_bit(MT76_READING_STATS, &dev->state);
887}
888EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
889
890void mt76u_queues_deinit(struct mt76_dev *dev)
891{
892 mt76u_stop_queues(dev);
893
894 mt76u_free_rx(dev);
895 mt76u_free_tx(dev);
896}
897EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
898
899int mt76u_alloc_queues(struct mt76_dev *dev)
900{
901 int err;
902
903 err = mt76u_alloc_rx(dev);
904 if (err < 0)
b3098121 905 return err;
b40b15e1 906
b3098121 907 return mt76u_alloc_tx(dev);
b40b15e1
LB
908}
909EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
910
911static const struct mt76_queue_ops usb_queue_ops = {
912 .tx_queue_skb = mt76u_tx_queue_skb,
913 .kick = mt76u_tx_kick,
914};
915
916int mt76u_init(struct mt76_dev *dev,
917 struct usb_interface *intf)
918{
919 static const struct mt76_bus_ops mt76u_ops = {
920 .rr = mt76u_rr,
921 .wr = mt76u_wr,
922 .rmw = mt76u_rmw,
923 .copy = mt76u_copy,
6da5a291
SG
924 .wr_rp = mt76u_wr_rp,
925 .rd_rp = mt76u_rd_rp,
c50479fa 926 .type = MT76_BUS_USB,
b40b15e1
LB
927 };
928 struct mt76_usb *usb = &dev->usb;
929
930 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
931 tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
932 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
933 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
934
935 init_completion(&usb->mcu.cmpl);
936 mutex_init(&usb->mcu.mutex);
937
938 mutex_init(&usb->usb_ctrl_mtx);
939 dev->bus = &mt76u_ops;
940 dev->queue_ops = &usb_queue_ops;
941
63a7de5d
LB
942 usb->sg_en = mt76u_check_sg(dev);
943
b40b15e1
LB
944 return mt76u_set_endpoints(intf, usb);
945}
946EXPORT_SYMBOL_GPL(mt76u_init);
947
948MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
949MODULE_LICENSE("Dual BSD/GPL");