mt76: usb: use dev_err_ratelimited instead of dev_err in mt76u_complete_rx
[linux-block.git] / drivers / net / wireless / mediatek / mt76 / usb.c
CommitLineData
b40b15e1
LB
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
db6bb5c6 17#include <linux/module.h>
b40b15e1
LB
18#include "mt76.h"
19#include "usb_trace.h"
20#include "dma.h"
21
22#define MT_VEND_REQ_MAX_RETRY 10
23#define MT_VEND_REQ_TOUT_MS 300
24
c2908a0d
LB
25static bool disable_usb_sg;
26module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
27MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
28
b40b15e1
LB
29/* should be called with usb_ctrl_mtx locked */
30static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
31 u8 req_type, u16 val, u16 offset,
32 void *buf, size_t len)
33{
34 struct usb_interface *intf = to_usb_interface(dev->dev);
35 struct usb_device *udev = interface_to_usbdev(intf);
36 unsigned int pipe;
37 int i, ret;
38
39 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
40 : usb_sndctrlpipe(udev, 0);
41 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
42 if (test_bit(MT76_REMOVED, &dev->state))
43 return -EIO;
44
45 ret = usb_control_msg(udev, pipe, req, req_type, val,
46 offset, buf, len, MT_VEND_REQ_TOUT_MS);
47 if (ret == -ENODEV)
48 set_bit(MT76_REMOVED, &dev->state);
49 if (ret >= 0 || ret == -ENODEV)
50 return ret;
51 usleep_range(5000, 10000);
52 }
53
54 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
55 req, offset, ret);
56 return ret;
57}
58
59int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
60 u8 req_type, u16 val, u16 offset,
61 void *buf, size_t len)
62{
63 int ret;
64
65 mutex_lock(&dev->usb.usb_ctrl_mtx);
66 ret = __mt76u_vendor_request(dev, req, req_type,
67 val, offset, buf, len);
68 trace_usb_reg_wr(dev, offset, val);
69 mutex_unlock(&dev->usb.usb_ctrl_mtx);
70
71 return ret;
72}
73EXPORT_SYMBOL_GPL(mt76u_vendor_request);
74
75/* should be called with usb_ctrl_mtx locked */
76static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
77{
78 struct mt76_usb *usb = &dev->usb;
79 u32 data = ~0;
80 u16 offset;
81 int ret;
82 u8 req;
83
84 switch (addr & MT_VEND_TYPE_MASK) {
85 case MT_VEND_TYPE_EEPROM:
86 req = MT_VEND_READ_EEPROM;
87 break;
88 case MT_VEND_TYPE_CFG:
89 req = MT_VEND_READ_CFG;
90 break;
91 default:
92 req = MT_VEND_MULTI_READ;
93 break;
94 }
95 offset = addr & ~MT_VEND_TYPE_MASK;
96
97 ret = __mt76u_vendor_request(dev, req,
98 USB_DIR_IN | USB_TYPE_VENDOR,
99 0, offset, usb->data, sizeof(__le32));
100 if (ret == sizeof(__le32))
101 data = get_unaligned_le32(usb->data);
102 trace_usb_reg_rr(dev, addr, data);
103
104 return data;
105}
106
5567b373 107static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
b40b15e1
LB
108{
109 u32 ret;
110
111 mutex_lock(&dev->usb.usb_ctrl_mtx);
112 ret = __mt76u_rr(dev, addr);
113 mutex_unlock(&dev->usb.usb_ctrl_mtx);
114
115 return ret;
116}
117
118/* should be called with usb_ctrl_mtx locked */
119static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
120{
121 struct mt76_usb *usb = &dev->usb;
122 u16 offset;
123 u8 req;
124
125 switch (addr & MT_VEND_TYPE_MASK) {
126 case MT_VEND_TYPE_CFG:
127 req = MT_VEND_WRITE_CFG;
128 break;
129 default:
130 req = MT_VEND_MULTI_WRITE;
131 break;
132 }
133 offset = addr & ~MT_VEND_TYPE_MASK;
134
135 put_unaligned_le32(val, usb->data);
136 __mt76u_vendor_request(dev, req,
137 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
138 offset, usb->data, sizeof(__le32));
139 trace_usb_reg_wr(dev, addr, val);
140}
141
5567b373 142static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
b40b15e1
LB
143{
144 mutex_lock(&dev->usb.usb_ctrl_mtx);
145 __mt76u_wr(dev, addr, val);
146 mutex_unlock(&dev->usb.usb_ctrl_mtx);
147}
148
149static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
150 u32 mask, u32 val)
151{
152 mutex_lock(&dev->usb.usb_ctrl_mtx);
153 val |= __mt76u_rr(dev, addr) & ~mask;
154 __mt76u_wr(dev, addr, val);
155 mutex_unlock(&dev->usb.usb_ctrl_mtx);
156
157 return val;
158}
159
160static void mt76u_copy(struct mt76_dev *dev, u32 offset,
161 const void *data, int len)
162{
163 struct mt76_usb *usb = &dev->usb;
164 const u32 *val = data;
165 int i, ret;
166
167 mutex_lock(&usb->usb_ctrl_mtx);
168 for (i = 0; i < (len / 4); i++) {
169 put_unaligned_le32(val[i], usb->data);
170 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
171 USB_DIR_OUT | USB_TYPE_VENDOR,
172 0, offset + i * 4, usb->data,
173 sizeof(__le32));
174 if (ret < 0)
175 break;
176 }
177 mutex_unlock(&usb->usb_ctrl_mtx);
178}
179
180void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
181 const u16 offset, const u32 val)
182{
183 mutex_lock(&dev->usb.usb_ctrl_mtx);
184 __mt76u_vendor_request(dev, req,
185 USB_DIR_OUT | USB_TYPE_VENDOR,
186 val & 0xffff, offset, NULL, 0);
187 __mt76u_vendor_request(dev, req,
188 USB_DIR_OUT | USB_TYPE_VENDOR,
189 val >> 16, offset + 2, NULL, 0);
190 mutex_unlock(&dev->usb.usb_ctrl_mtx);
191}
192EXPORT_SYMBOL_GPL(mt76u_single_wr);
193
f1638c7c
SG
194static int
195mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
196 const struct mt76_reg_pair *data, int len)
197{
198 struct mt76_usb *usb = &dev->usb;
199
200 mutex_lock(&usb->usb_ctrl_mtx);
201 while (len > 0) {
202 __mt76u_wr(dev, base + data->reg, data->value);
203 len--;
204 data++;
205 }
206 mutex_unlock(&usb->usb_ctrl_mtx);
207
208 return 0;
209}
210
17507157
LB
211static int
212mt76u_wr_rp(struct mt76_dev *dev, u32 base,
213 const struct mt76_reg_pair *data, int n)
f1638c7c
SG
214{
215 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
17507157 216 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
f1638c7c
SG
217 else
218 return mt76u_req_wr_rp(dev, base, data, n);
219}
f1638c7c
SG
220
221static int
222mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
223 int len)
224{
225 struct mt76_usb *usb = &dev->usb;
226
227 mutex_lock(&usb->usb_ctrl_mtx);
228 while (len > 0) {
229 data->value = __mt76u_rr(dev, base + data->reg);
230 len--;
231 data++;
232 }
233 mutex_unlock(&usb->usb_ctrl_mtx);
234
235 return 0;
236}
237
17507157
LB
238static int
239mt76u_rd_rp(struct mt76_dev *dev, u32 base,
240 struct mt76_reg_pair *data, int n)
f1638c7c
SG
241{
242 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
17507157 243 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
f1638c7c
SG
244 else
245 return mt76u_req_rd_rp(dev, base, data, n);
246}
f1638c7c 247
63a7de5d
LB
248static bool mt76u_check_sg(struct mt76_dev *dev)
249{
250 struct usb_interface *intf = to_usb_interface(dev->dev);
251 struct usb_device *udev = interface_to_usbdev(intf);
252
c2908a0d 253 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
63a7de5d
LB
254 (udev->bus->no_sg_constraint ||
255 udev->speed == USB_SPEED_WIRELESS));
256}
257
b40b15e1
LB
258static int
259mt76u_set_endpoints(struct usb_interface *intf,
260 struct mt76_usb *usb)
261{
262 struct usb_host_interface *intf_desc = intf->cur_altsetting;
263 struct usb_endpoint_descriptor *ep_desc;
264 int i, in_ep = 0, out_ep = 0;
265
266 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
267 ep_desc = &intf_desc->endpoint[i].desc;
268
269 if (usb_endpoint_is_bulk_in(ep_desc) &&
270 in_ep < __MT_EP_IN_MAX) {
271 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
272 usb->in_max_packet = usb_endpoint_maxp(ep_desc);
273 in_ep++;
274 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
275 out_ep < __MT_EP_OUT_MAX) {
276 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
277 usb->out_max_packet = usb_endpoint_maxp(ep_desc);
278 out_ep++;
279 }
280 }
281
282 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
283 return -EINVAL;
284 return 0;
285}
286
287static int
288mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
289 int nsgs, int len, int sglen)
290{
c12128ce 291 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
b40b15e1
LB
292 struct urb *urb = buf->urb;
293 int i;
294
481bb043 295 spin_lock_bh(&q->rx_page_lock);
b40b15e1
LB
296 for (i = 0; i < nsgs; i++) {
297 struct page *page;
298 void *data;
299 int offset;
300
c87dff8c 301 data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
b40b15e1
LB
302 if (!data)
303 break;
304
305 page = virt_to_head_page(data);
306 offset = data - page_address(page);
307 sg_set_page(&urb->sg[i], page, sglen, offset);
308 }
481bb043 309 spin_unlock_bh(&q->rx_page_lock);
b40b15e1
LB
310
311 if (i < nsgs) {
312 int j;
313
314 for (j = nsgs; j < urb->num_sgs; j++)
315 skb_free_frag(sg_virt(&urb->sg[j]));
316 urb->num_sgs = i;
317 }
318
319 urb->num_sgs = max_t(int, i, urb->num_sgs);
320 buf->len = urb->num_sgs * sglen,
321 sg_init_marker(urb->sg, urb->num_sgs);
322
323 return i ? : -ENOMEM;
324}
325
d704d16f
LB
326static int
327mt76u_buf_alloc_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
328 int nsgs, int len, int sglen, gfp_t gfp)
b40b15e1
LB
329{
330 buf->urb = usb_alloc_urb(0, gfp);
331 if (!buf->urb)
332 return -ENOMEM;
333
329e0989 334 buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
b40b15e1
LB
335 gfp);
336 if (!buf->urb->sg)
337 return -ENOMEM;
338
339 sg_init_table(buf->urb->sg, nsgs);
340 buf->dev = dev;
341
342 return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
343}
b40b15e1 344
d704d16f
LB
345int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
346 int len, int data_len, gfp_t gfp)
347{
348 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
349
350 buf->urb = usb_alloc_urb(0, gfp);
351 if (!buf->urb)
352 return -ENOMEM;
353
354 buf->buf = page_frag_alloc(&q->rx_page, len, gfp);
355 if (!buf->buf)
356 return -ENOMEM;
357
358 buf->len = data_len;
359 buf->dev = dev;
360
361 return 0;
362}
363
b40b15e1
LB
364void mt76u_buf_free(struct mt76u_buf *buf)
365{
366 struct urb *urb = buf->urb;
cb83585e 367 struct scatterlist *sg;
b40b15e1
LB
368 int i;
369
cb83585e
LB
370 for (i = 0; i < urb->num_sgs; i++) {
371 sg = &urb->sg[i];
372 if (!sg)
373 continue;
374
375 skb_free_frag(sg_virt(sg));
376 }
d704d16f
LB
377 if (buf->buf)
378 skb_free_frag(buf->buf);
379
b40b15e1
LB
380 usb_free_urb(buf->urb);
381}
382EXPORT_SYMBOL_GPL(mt76u_buf_free);
383
384int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
385 struct mt76u_buf *buf, gfp_t gfp,
386 usb_complete_t complete_fn, void *context)
387{
388 struct usb_interface *intf = to_usb_interface(dev->dev);
389 struct usb_device *udev = interface_to_usbdev(intf);
d704d16f 390 u8 *data = buf->urb->num_sgs ? NULL : buf->buf;
b40b15e1
LB
391 unsigned int pipe;
392
393 if (dir == USB_DIR_IN)
394 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
395 else
396 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
397
d704d16f 398 usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len,
b40b15e1 399 complete_fn, context);
e0168dc6 400 trace_submit_urb(dev, buf->urb);
b40b15e1
LB
401
402 return usb_submit_urb(buf->urb, gfp);
403}
404EXPORT_SYMBOL_GPL(mt76u_submit_buf);
405
406static inline struct mt76u_buf
407*mt76u_get_next_rx_entry(struct mt76_queue *q)
408{
409 struct mt76u_buf *buf = NULL;
410 unsigned long flags;
411
412 spin_lock_irqsave(&q->lock, flags);
413 if (q->queued > 0) {
414 buf = &q->entry[q->head].ubuf;
415 q->head = (q->head + 1) % q->ndesc;
416 q->queued--;
417 }
418 spin_unlock_irqrestore(&q->lock, flags);
419
420 return buf;
421}
422
423static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
424{
425 u16 dma_len, min_len;
426
427 dma_len = get_unaligned_le16(data);
428 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
429 MT_FCE_INFO_LEN;
430
09dbcd8b
LB
431 if (data_len < min_len || !dma_len ||
432 dma_len + MT_DMA_HDR_LEN > data_len ||
433 (dma_len & 0x3))
b40b15e1
LB
434 return -EINVAL;
435 return dma_len;
436}
437
438static int
f7522949 439mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
b40b15e1
LB
440{
441 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
f7522949
LB
442 struct urb *urb = buf->urb;
443 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : buf->buf;
b40b15e1
LB
444 int data_len, len, nsgs = 1;
445 struct sk_buff *skb;
446
447 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
448 return 0;
449
450 len = mt76u_get_rx_entry_len(data, urb->actual_length);
451 if (len < 0)
452 return 0;
453
f7522949
LB
454 data_len = urb->num_sgs ? urb->sg[0].length : buf->len;
455 data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
0ecf94dc
LB
456 if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
457 return 0;
458
b40b15e1
LB
459 skb = build_skb(data, q->buf_size);
460 if (!skb)
461 return 0;
462
b40b15e1 463 skb_reserve(skb, MT_DMA_HDR_LEN);
b40b15e1
LB
464 __skb_put(skb, data_len);
465 len -= data_len;
466
f7522949 467 while (len > 0 && urb->num_sgs) {
b40b15e1
LB
468 data_len = min_t(int, len, urb->sg[nsgs].length);
469 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
470 sg_page(&urb->sg[nsgs]),
471 urb->sg[nsgs].offset,
472 data_len, q->buf_size);
473 len -= data_len;
474 nsgs++;
475 }
476 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
477
478 return nsgs;
479}
480
481static void mt76u_complete_rx(struct urb *urb)
482{
483 struct mt76_dev *dev = urb->context;
484 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
485 unsigned long flags;
486
e0168dc6
LB
487 trace_rx_urb(dev, urb);
488
b40b15e1
LB
489 switch (urb->status) {
490 case -ECONNRESET:
491 case -ESHUTDOWN:
492 case -ENOENT:
493 return;
494 default:
4bfff1ec
LB
495 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
496 urb->status);
b40b15e1
LB
497 /* fall through */
498 case 0:
499 break;
500 }
501
502 spin_lock_irqsave(&q->lock, flags);
503 if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
504 goto out;
505
506 q->tail = (q->tail + 1) % q->ndesc;
507 q->queued++;
508 tasklet_schedule(&dev->usb.rx_tasklet);
509out:
510 spin_unlock_irqrestore(&q->lock, flags);
511}
512
f7522949
LB
513static int
514mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
515 struct mt76u_buf *buf, int nsgs)
516{
517 if (dev->usb.sg_en) {
518 return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size,
519 SKB_WITH_OVERHEAD(q->buf_size));
520 } else {
521 buf->buf = page_frag_alloc(&q->rx_page, q->buf_size,
522 GFP_ATOMIC);
523 return buf->buf ? 0 : -ENOMEM;
524 }
525}
526
b40b15e1
LB
527static void mt76u_rx_tasklet(unsigned long data)
528{
529 struct mt76_dev *dev = (struct mt76_dev *)data;
530 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
b40b15e1 531 struct mt76u_buf *buf;
f7522949 532 int err, count;
b40b15e1
LB
533
534 rcu_read_lock();
535
536 while (true) {
537 buf = mt76u_get_next_rx_entry(q);
538 if (!buf)
539 break;
540
f7522949
LB
541 count = mt76u_process_rx_entry(dev, buf);
542 if (count > 0) {
543 err = mt76u_refill_rx(dev, q, buf, count);
b40b15e1
LB
544 if (err < 0)
545 break;
546 }
547 mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
548 buf, GFP_ATOMIC,
549 mt76u_complete_rx, dev);
550 }
551 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
552
553 rcu_read_unlock();
554}
555
556int mt76u_submit_rx_buffers(struct mt76_dev *dev)
557{
558 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
559 unsigned long flags;
560 int i, err = 0;
561
562 spin_lock_irqsave(&q->lock, flags);
563 for (i = 0; i < q->ndesc; i++) {
564 err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
565 &q->entry[i].ubuf, GFP_ATOMIC,
566 mt76u_complete_rx, dev);
567 if (err < 0)
568 break;
569 }
570 q->head = q->tail = 0;
571 q->queued = 0;
572 spin_unlock_irqrestore(&q->lock, flags);
573
574 return err;
575}
576EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
577
578static int mt76u_alloc_rx(struct mt76_dev *dev)
579{
580 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
f7522949 581 int i, err;
b40b15e1 582
481bb043 583 spin_lock_init(&q->rx_page_lock);
b40b15e1 584 spin_lock_init(&q->lock);
329e0989
KC
585 q->entry = devm_kcalloc(dev->dev,
586 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
b40b15e1
LB
587 GFP_KERNEL);
588 if (!q->entry)
589 return -ENOMEM;
590
f7522949 591 q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
cb83585e
LB
592 q->ndesc = MT_NUM_RX_ENTRIES;
593 for (i = 0; i < q->ndesc; i++) {
f7522949
LB
594 if (dev->usb.sg_en)
595 err = mt76u_buf_alloc_sg(dev, &q->entry[i].ubuf,
596 MT_SG_MAX_SIZE, q->buf_size,
597 SKB_WITH_OVERHEAD(q->buf_size),
598 GFP_KERNEL);
599 else
600 err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
601 q->buf_size,
602 SKB_WITH_OVERHEAD(q->buf_size),
603 GFP_KERNEL);
b40b15e1
LB
604 if (err < 0)
605 return err;
606 }
b40b15e1
LB
607
608 return mt76u_submit_rx_buffers(dev);
609}
610
611static void mt76u_free_rx(struct mt76_dev *dev)
612{
613 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
c12128ce 614 struct page *page;
b40b15e1
LB
615 int i;
616
617 for (i = 0; i < q->ndesc; i++)
618 mt76u_buf_free(&q->entry[i].ubuf);
c12128ce 619
481bb043 620 spin_lock_bh(&q->rx_page_lock);
c12128ce 621 if (!q->rx_page.va)
481bb043 622 goto out;
c12128ce
FF
623
624 page = virt_to_page(q->rx_page.va);
625 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
626 memset(&q->rx_page, 0, sizeof(q->rx_page));
481bb043
LB
627out:
628 spin_unlock_bh(&q->rx_page_lock);
b40b15e1
LB
629}
630
631static void mt76u_stop_rx(struct mt76_dev *dev)
632{
633 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
634 int i;
635
636 for (i = 0; i < q->ndesc; i++)
637 usb_kill_urb(q->entry[i].ubuf.urb);
638}
639
b40b15e1
LB
640static void mt76u_tx_tasklet(unsigned long data)
641{
642 struct mt76_dev *dev = (struct mt76_dev *)data;
e207afa0 643 struct mt76_queue_entry entry;
b40b15e1
LB
644 struct mt76u_buf *buf;
645 struct mt76_queue *q;
646 bool wake;
647 int i;
648
649 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
650 q = &dev->q_tx[i];
651
652 spin_lock_bh(&q->lock);
653 while (true) {
654 buf = &q->entry[q->head].ubuf;
655 if (!buf->done || !q->queued)
656 break;
657
b40b15e1
LB
658 if (q->entry[q->head].schedule) {
659 q->entry[q->head].schedule = false;
660 q->swq_queued--;
661 }
662
e207afa0 663 entry = q->entry[q->head];
b40b15e1
LB
664 q->head = (q->head + 1) % q->ndesc;
665 q->queued--;
e207afa0
LB
666
667 spin_unlock_bh(&q->lock);
668 dev->drv->tx_complete_skb(dev, q, &entry, false);
669 spin_lock_bh(&q->lock);
b40b15e1
LB
670 }
671 mt76_txq_schedule(dev, q);
672 wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
673 if (!q->queued)
674 wake_up(&dev->tx_wait);
675
676 spin_unlock_bh(&q->lock);
677
678 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
679 ieee80211_queue_delayed_work(dev->hw,
680 &dev->usb.stat_work,
681 msecs_to_jiffies(10));
682
683 if (wake)
684 ieee80211_wake_queue(dev->hw, i);
685 }
686}
687
688static void mt76u_tx_status_data(struct work_struct *work)
689{
690 struct mt76_usb *usb;
691 struct mt76_dev *dev;
692 u8 update = 1;
693 u16 count = 0;
694
695 usb = container_of(work, struct mt76_usb, stat_work.work);
696 dev = container_of(usb, struct mt76_dev, usb);
697
698 while (true) {
699 if (test_bit(MT76_REMOVED, &dev->state))
700 break;
701
702 if (!dev->drv->tx_status_data(dev, &update))
703 break;
704 count++;
705 }
706
707 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
708 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
709 msecs_to_jiffies(10));
710 else
711 clear_bit(MT76_READING_STATS, &dev->state);
712}
713
714static void mt76u_complete_tx(struct urb *urb)
715{
716 struct mt76u_buf *buf = urb->context;
717 struct mt76_dev *dev = buf->dev;
718
719 if (mt76u_urb_error(urb))
720 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
721 buf->done = true;
722
723 tasklet_schedule(&dev->usb.tx_tasklet);
724}
725
726static int
727mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
728{
729 int nsgs = 1 + skb_shinfo(skb)->nr_frags;
730 struct sk_buff *iter;
731
732 skb_walk_frags(skb, iter)
733 nsgs += 1 + skb_shinfo(iter)->nr_frags;
734
735 memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
736
737 nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
738 sg_init_marker(urb->sg, nsgs);
739 urb->num_sgs = nsgs;
740
741 return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
742}
743
744static int
745mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
746 struct sk_buff *skb, struct mt76_wcid *wcid,
747 struct ieee80211_sta *sta)
748{
749 struct usb_interface *intf = to_usb_interface(dev->dev);
750 struct usb_device *udev = interface_to_usbdev(intf);
f7522949 751 u8 *data = NULL, ep = q2ep(q->hw_idx);
b40b15e1
LB
752 struct mt76u_buf *buf;
753 u16 idx = q->tail;
754 unsigned int pipe;
755 int err;
756
757 if (q->queued == q->ndesc)
758 return -ENOSPC;
759
88046b2c 760 skb->prev = skb->next = NULL;
b40b15e1
LB
761 err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
762 if (err < 0)
763 return err;
764
765 buf = &q->entry[idx].ubuf;
766 buf->done = false;
767
f7522949
LB
768 if (dev->usb.sg_en) {
769 err = mt76u_tx_build_sg(skb, buf->urb);
770 if (err < 0)
771 return err;
772 } else {
773 data = skb->data;
774 }
b40b15e1
LB
775
776 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
f7522949 777 usb_fill_bulk_urb(buf->urb, udev, pipe, data, skb->len,
b40b15e1
LB
778 mt76u_complete_tx, buf);
779
780 q->tail = (q->tail + 1) % q->ndesc;
781 q->entry[idx].skb = skb;
782 q->queued++;
783
784 return idx;
785}
786
787static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
788{
789 struct mt76u_buf *buf;
790 int err;
791
792 while (q->first != q->tail) {
793 buf = &q->entry[q->first].ubuf;
e0168dc6
LB
794
795 trace_submit_urb(dev, buf->urb);
b40b15e1
LB
796 err = usb_submit_urb(buf->urb, GFP_ATOMIC);
797 if (err < 0) {
798 if (err == -ENODEV)
799 set_bit(MT76_REMOVED, &dev->state);
800 else
801 dev_err(dev->dev, "tx urb submit failed:%d\n",
802 err);
803 break;
804 }
805 q->first = (q->first + 1) % q->ndesc;
806 }
807}
808
809static int mt76u_alloc_tx(struct mt76_dev *dev)
810{
811 struct mt76u_buf *buf;
812 struct mt76_queue *q;
b40b15e1
LB
813 int i, j;
814
b40b15e1
LB
815 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
816 q = &dev->q_tx[i];
817 spin_lock_init(&q->lock);
818 INIT_LIST_HEAD(&q->swq);
1d0496c6 819 q->hw_idx = mt76_ac_to_hwq(i);
b40b15e1 820
329e0989
KC
821 q->entry = devm_kcalloc(dev->dev,
822 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
b40b15e1
LB
823 GFP_KERNEL);
824 if (!q->entry)
825 return -ENOMEM;
826
827 q->ndesc = MT_NUM_TX_ENTRIES;
828 for (j = 0; j < q->ndesc; j++) {
829 buf = &q->entry[j].ubuf;
830 buf->dev = dev;
831
832 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
833 if (!buf->urb)
834 return -ENOMEM;
835
f7522949
LB
836 if (dev->usb.sg_en) {
837 size_t size = MT_SG_MAX_SIZE *
838 sizeof(struct scatterlist);
839
840 buf->urb->sg = devm_kzalloc(dev->dev, size,
841 GFP_KERNEL);
842 if (!buf->urb->sg)
843 return -ENOMEM;
844 }
b40b15e1
LB
845 }
846 }
847 return 0;
848}
849
850static void mt76u_free_tx(struct mt76_dev *dev)
851{
852 struct mt76_queue *q;
853 int i, j;
854
855 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
856 q = &dev->q_tx[i];
857 for (j = 0; j < q->ndesc; j++)
858 usb_free_urb(q->entry[j].ubuf.urb);
859 }
860}
861
862static void mt76u_stop_tx(struct mt76_dev *dev)
863{
864 struct mt76_queue *q;
865 int i, j;
866
867 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
868 q = &dev->q_tx[i];
869 for (j = 0; j < q->ndesc; j++)
870 usb_kill_urb(q->entry[j].ubuf.urb);
871 }
872}
873
874void mt76u_stop_queues(struct mt76_dev *dev)
875{
876 tasklet_disable(&dev->usb.rx_tasklet);
877 tasklet_disable(&dev->usb.tx_tasklet);
878
879 mt76u_stop_rx(dev);
880 mt76u_stop_tx(dev);
881}
882EXPORT_SYMBOL_GPL(mt76u_stop_queues);
883
884void mt76u_stop_stat_wk(struct mt76_dev *dev)
885{
886 cancel_delayed_work_sync(&dev->usb.stat_work);
887 clear_bit(MT76_READING_STATS, &dev->state);
888}
889EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
890
891void mt76u_queues_deinit(struct mt76_dev *dev)
892{
893 mt76u_stop_queues(dev);
894
895 mt76u_free_rx(dev);
896 mt76u_free_tx(dev);
897}
898EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
899
900int mt76u_alloc_queues(struct mt76_dev *dev)
901{
902 int err;
903
904 err = mt76u_alloc_rx(dev);
905 if (err < 0)
b3098121 906 return err;
b40b15e1 907
b3098121 908 return mt76u_alloc_tx(dev);
b40b15e1
LB
909}
910EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
911
912static const struct mt76_queue_ops usb_queue_ops = {
913 .tx_queue_skb = mt76u_tx_queue_skb,
914 .kick = mt76u_tx_kick,
915};
916
917int mt76u_init(struct mt76_dev *dev,
918 struct usb_interface *intf)
919{
920 static const struct mt76_bus_ops mt76u_ops = {
921 .rr = mt76u_rr,
922 .wr = mt76u_wr,
923 .rmw = mt76u_rmw,
924 .copy = mt76u_copy,
6da5a291
SG
925 .wr_rp = mt76u_wr_rp,
926 .rd_rp = mt76u_rd_rp,
c50479fa 927 .type = MT76_BUS_USB,
b40b15e1
LB
928 };
929 struct mt76_usb *usb = &dev->usb;
930
931 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
932 tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
933 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
934 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
935
936 init_completion(&usb->mcu.cmpl);
937 mutex_init(&usb->mcu.mutex);
938
939 mutex_init(&usb->usb_ctrl_mtx);
940 dev->bus = &mt76u_ops;
941 dev->queue_ops = &usb_queue_ops;
942
63a7de5d
LB
943 usb->sg_en = mt76u_check_sg(dev);
944
b40b15e1
LB
945 return mt76u_set_endpoints(intf, usb);
946}
947EXPORT_SYMBOL_GPL(mt76u_init);
948
949MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
950MODULE_LICENSE("Dual BSD/GPL");