mt76: mt76x02u: remove bogus stop on suspend
[linux-block.git] / drivers / net / wireless / mediatek / mt76 / usb.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/module.h>
18#include "mt76.h"
19#include "usb_trace.h"
20#include "dma.h"
21
22#define MT_VEND_REQ_MAX_RETRY 10
23#define MT_VEND_REQ_TOUT_MS 300
24
25static bool disable_usb_sg;
26module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
27MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
28
29/* should be called with usb_ctrl_mtx locked */
30static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
31 u8 req_type, u16 val, u16 offset,
32 void *buf, size_t len)
33{
34 struct usb_device *udev = to_usb_device(dev->dev);
35 unsigned int pipe;
36 int i, ret;
37
38 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
39 : usb_sndctrlpipe(udev, 0);
40 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
41 if (test_bit(MT76_REMOVED, &dev->state))
42 return -EIO;
43
44 ret = usb_control_msg(udev, pipe, req, req_type, val,
45 offset, buf, len, MT_VEND_REQ_TOUT_MS);
46 if (ret == -ENODEV)
47 set_bit(MT76_REMOVED, &dev->state);
48 if (ret >= 0 || ret == -ENODEV)
49 return ret;
50 usleep_range(5000, 10000);
51 }
52
53 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
54 req, offset, ret);
55 return ret;
56}
57
58int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
59 u8 req_type, u16 val, u16 offset,
60 void *buf, size_t len)
61{
62 int ret;
63
64 mutex_lock(&dev->usb.usb_ctrl_mtx);
65 ret = __mt76u_vendor_request(dev, req, req_type,
66 val, offset, buf, len);
67 trace_usb_reg_wr(dev, offset, val);
68 mutex_unlock(&dev->usb.usb_ctrl_mtx);
69
70 return ret;
71}
72EXPORT_SYMBOL_GPL(mt76u_vendor_request);
73
74/* should be called with usb_ctrl_mtx locked */
75static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
76{
77 struct mt76_usb *usb = &dev->usb;
78 u32 data = ~0;
79 u16 offset;
80 int ret;
81 u8 req;
82
83 switch (addr & MT_VEND_TYPE_MASK) {
84 case MT_VEND_TYPE_EEPROM:
85 req = MT_VEND_READ_EEPROM;
86 break;
87 case MT_VEND_TYPE_CFG:
88 req = MT_VEND_READ_CFG;
89 break;
90 default:
91 req = MT_VEND_MULTI_READ;
92 break;
93 }
94 offset = addr & ~MT_VEND_TYPE_MASK;
95
96 ret = __mt76u_vendor_request(dev, req,
97 USB_DIR_IN | USB_TYPE_VENDOR,
98 0, offset, usb->data, sizeof(__le32));
99 if (ret == sizeof(__le32))
100 data = get_unaligned_le32(usb->data);
101 trace_usb_reg_rr(dev, addr, data);
102
103 return data;
104}
105
106static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
107{
108 u32 ret;
109
110 mutex_lock(&dev->usb.usb_ctrl_mtx);
111 ret = __mt76u_rr(dev, addr);
112 mutex_unlock(&dev->usb.usb_ctrl_mtx);
113
114 return ret;
115}
116
117/* should be called with usb_ctrl_mtx locked */
118static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
119{
120 struct mt76_usb *usb = &dev->usb;
121 u16 offset;
122 u8 req;
123
124 switch (addr & MT_VEND_TYPE_MASK) {
125 case MT_VEND_TYPE_CFG:
126 req = MT_VEND_WRITE_CFG;
127 break;
128 default:
129 req = MT_VEND_MULTI_WRITE;
130 break;
131 }
132 offset = addr & ~MT_VEND_TYPE_MASK;
133
134 put_unaligned_le32(val, usb->data);
135 __mt76u_vendor_request(dev, req,
136 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
137 offset, usb->data, sizeof(__le32));
138 trace_usb_reg_wr(dev, addr, val);
139}
140
141static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
142{
143 mutex_lock(&dev->usb.usb_ctrl_mtx);
144 __mt76u_wr(dev, addr, val);
145 mutex_unlock(&dev->usb.usb_ctrl_mtx);
146}
147
148static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
149 u32 mask, u32 val)
150{
151 mutex_lock(&dev->usb.usb_ctrl_mtx);
152 val |= __mt76u_rr(dev, addr) & ~mask;
153 __mt76u_wr(dev, addr, val);
154 mutex_unlock(&dev->usb.usb_ctrl_mtx);
155
156 return val;
157}
158
159static void mt76u_copy(struct mt76_dev *dev, u32 offset,
160 const void *data, int len)
161{
162 struct mt76_usb *usb = &dev->usb;
163 const u32 *val = data;
164 int i, ret;
165
166 mutex_lock(&usb->usb_ctrl_mtx);
167 for (i = 0; i < (len / 4); i++) {
168 put_unaligned_le32(val[i], usb->data);
169 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
170 USB_DIR_OUT | USB_TYPE_VENDOR,
171 0, offset + i * 4, usb->data,
172 sizeof(__le32));
173 if (ret < 0)
174 break;
175 }
176 mutex_unlock(&usb->usb_ctrl_mtx);
177}
178
179void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
180 const u16 offset, const u32 val)
181{
182 mutex_lock(&dev->usb.usb_ctrl_mtx);
183 __mt76u_vendor_request(dev, req,
184 USB_DIR_OUT | USB_TYPE_VENDOR,
185 val & 0xffff, offset, NULL, 0);
186 __mt76u_vendor_request(dev, req,
187 USB_DIR_OUT | USB_TYPE_VENDOR,
188 val >> 16, offset + 2, NULL, 0);
189 mutex_unlock(&dev->usb.usb_ctrl_mtx);
190}
191EXPORT_SYMBOL_GPL(mt76u_single_wr);
192
193static int
194mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
195 const struct mt76_reg_pair *data, int len)
196{
197 struct mt76_usb *usb = &dev->usb;
198
199 mutex_lock(&usb->usb_ctrl_mtx);
200 while (len > 0) {
201 __mt76u_wr(dev, base + data->reg, data->value);
202 len--;
203 data++;
204 }
205 mutex_unlock(&usb->usb_ctrl_mtx);
206
207 return 0;
208}
209
210static int
211mt76u_wr_rp(struct mt76_dev *dev, u32 base,
212 const struct mt76_reg_pair *data, int n)
213{
214 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
215 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
216 else
217 return mt76u_req_wr_rp(dev, base, data, n);
218}
219
220static int
221mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
222 int len)
223{
224 struct mt76_usb *usb = &dev->usb;
225
226 mutex_lock(&usb->usb_ctrl_mtx);
227 while (len > 0) {
228 data->value = __mt76u_rr(dev, base + data->reg);
229 len--;
230 data++;
231 }
232 mutex_unlock(&usb->usb_ctrl_mtx);
233
234 return 0;
235}
236
237static int
238mt76u_rd_rp(struct mt76_dev *dev, u32 base,
239 struct mt76_reg_pair *data, int n)
240{
241 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
242 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
243 else
244 return mt76u_req_rd_rp(dev, base, data, n);
245}
246
247static bool mt76u_check_sg(struct mt76_dev *dev)
248{
249 struct usb_device *udev = to_usb_device(dev->dev);
250
251 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
252 (udev->bus->no_sg_constraint ||
253 udev->speed == USB_SPEED_WIRELESS));
254}
255
256static int
257mt76u_set_endpoints(struct usb_interface *intf,
258 struct mt76_usb *usb)
259{
260 struct usb_host_interface *intf_desc = intf->cur_altsetting;
261 struct usb_endpoint_descriptor *ep_desc;
262 int i, in_ep = 0, out_ep = 0;
263
264 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
265 ep_desc = &intf_desc->endpoint[i].desc;
266
267 if (usb_endpoint_is_bulk_in(ep_desc) &&
268 in_ep < __MT_EP_IN_MAX) {
269 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
270 usb->in_max_packet = usb_endpoint_maxp(ep_desc);
271 in_ep++;
272 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
273 out_ep < __MT_EP_OUT_MAX) {
274 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
275 usb->out_max_packet = usb_endpoint_maxp(ep_desc);
276 out_ep++;
277 }
278 }
279
280 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
281 return -EINVAL;
282 return 0;
283}
284
285static int
286mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
287 int nsgs, gfp_t gfp)
288{
289 int sglen = SKB_WITH_OVERHEAD(q->buf_size);
290 int i;
291
292 for (i = 0; i < nsgs; i++) {
293 struct page *page;
294 void *data;
295 int offset;
296
297 data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
298 if (!data)
299 break;
300
301 page = virt_to_head_page(data);
302 offset = data - page_address(page);
303 sg_set_page(&urb->sg[i], page, sglen, offset);
304 }
305
306 if (i < nsgs) {
307 int j;
308
309 for (j = nsgs; j < urb->num_sgs; j++)
310 skb_free_frag(sg_virt(&urb->sg[j]));
311 urb->num_sgs = i;
312 }
313
314 urb->num_sgs = max_t(int, i, urb->num_sgs);
315 urb->transfer_buffer_length = urb->num_sgs * sglen,
316 sg_init_marker(urb->sg, urb->num_sgs);
317
318 return i ? : -ENOMEM;
319}
320
321static int
322mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
323{
324 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
325
326 if (dev->usb.sg_en) {
327 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
328 } else {
329 urb->transfer_buffer_length = SKB_WITH_OVERHEAD(q->buf_size);
330 urb->transfer_buffer = page_frag_alloc(&q->rx_page,
331 q->buf_size, gfp);
332 return urb->transfer_buffer ? 0 : -ENOMEM;
333 }
334}
335
336static int
337mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
338{
339 unsigned int size = sizeof(struct urb);
340
341 if (dev->usb.sg_en)
342 size += MT_SG_MAX_SIZE * sizeof(struct scatterlist);
343
344 e->urb = kzalloc(size, GFP_KERNEL);
345 if (!e->urb)
346 return -ENOMEM;
347
348 usb_init_urb(e->urb);
349
350 if (dev->usb.sg_en)
351 e->urb->sg = (struct scatterlist *)(e->urb + 1);
352
353 return 0;
354}
355
356static int
357mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
358{
359 int err;
360
361 err = mt76u_urb_alloc(dev, e);
362 if (err)
363 return err;
364
365 return mt76u_refill_rx(dev, e->urb, MT_SG_MAX_SIZE, GFP_KERNEL);
366}
367
368static void mt76u_urb_free(struct urb *urb)
369{
370 int i;
371
372 for (i = 0; i < urb->num_sgs; i++)
373 skb_free_frag(sg_virt(&urb->sg[i]));
374
375 if (urb->transfer_buffer)
376 skb_free_frag(urb->transfer_buffer);
377
378 usb_free_urb(urb);
379}
380
381static void
382mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
383 struct urb *urb, usb_complete_t complete_fn,
384 void *context)
385{
386 struct usb_device *udev = to_usb_device(dev->dev);
387 unsigned int pipe;
388
389 if (dir == USB_DIR_IN)
390 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
391 else
392 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
393
394 urb->dev = udev;
395 urb->pipe = pipe;
396 urb->complete = complete_fn;
397 urb->context = context;
398}
399
400static inline struct urb *
401mt76u_get_next_rx_entry(struct mt76_dev *dev)
402{
403 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
404 struct urb *urb = NULL;
405 unsigned long flags;
406
407 spin_lock_irqsave(&q->lock, flags);
408 if (q->queued > 0) {
409 urb = q->entry[q->head].urb;
410 q->head = (q->head + 1) % q->ndesc;
411 q->queued--;
412 }
413 spin_unlock_irqrestore(&q->lock, flags);
414
415 return urb;
416}
417
418static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
419{
420 u16 dma_len, min_len;
421
422 dma_len = get_unaligned_le16(data);
423 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
424 MT_FCE_INFO_LEN;
425
426 if (data_len < min_len || !dma_len ||
427 dma_len + MT_DMA_HDR_LEN > data_len ||
428 (dma_len & 0x3))
429 return -EINVAL;
430 return dma_len;
431}
432
433static int
434mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
435{
436 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
437 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
438 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
439 int len, nsgs = 1;
440 struct sk_buff *skb;
441
442 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
443 return 0;
444
445 len = mt76u_get_rx_entry_len(data, urb->actual_length);
446 if (len < 0)
447 return 0;
448
449 data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
450 if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
451 return 0;
452
453 skb = build_skb(data, q->buf_size);
454 if (!skb)
455 return 0;
456
457 skb_reserve(skb, MT_DMA_HDR_LEN);
458 __skb_put(skb, data_len);
459 len -= data_len;
460
461 while (len > 0 && nsgs < urb->num_sgs) {
462 data_len = min_t(int, len, urb->sg[nsgs].length);
463 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
464 sg_page(&urb->sg[nsgs]),
465 urb->sg[nsgs].offset,
466 data_len, q->buf_size);
467 len -= data_len;
468 nsgs++;
469 }
470 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
471
472 return nsgs;
473}
474
475static void mt76u_complete_rx(struct urb *urb)
476{
477 struct mt76_dev *dev = urb->context;
478 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
479 unsigned long flags;
480
481 trace_rx_urb(dev, urb);
482
483 switch (urb->status) {
484 case -ECONNRESET:
485 case -ESHUTDOWN:
486 case -ENOENT:
487 return;
488 default:
489 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
490 urb->status);
491 /* fall through */
492 case 0:
493 break;
494 }
495
496 spin_lock_irqsave(&q->lock, flags);
497 if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
498 goto out;
499
500 q->tail = (q->tail + 1) % q->ndesc;
501 q->queued++;
502 tasklet_schedule(&dev->usb.rx_tasklet);
503out:
504 spin_unlock_irqrestore(&q->lock, flags);
505}
506
507static int
508mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
509{
510 mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
511 mt76u_complete_rx, dev);
512 trace_submit_urb(dev, urb);
513
514 return usb_submit_urb(urb, GFP_ATOMIC);
515}
516
517static void mt76u_rx_tasklet(unsigned long data)
518{
519 struct mt76_dev *dev = (struct mt76_dev *)data;
520 struct urb *urb;
521 int err, count;
522
523 rcu_read_lock();
524
525 while (true) {
526 urb = mt76u_get_next_rx_entry(dev);
527 if (!urb)
528 break;
529
530 count = mt76u_process_rx_entry(dev, urb);
531 if (count > 0) {
532 err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
533 if (err < 0)
534 break;
535 }
536 mt76u_submit_rx_buf(dev, urb);
537 }
538 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
539
540 rcu_read_unlock();
541}
542
543int mt76u_submit_rx_buffers(struct mt76_dev *dev)
544{
545 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
546 unsigned long flags;
547 int i, err = 0;
548
549 spin_lock_irqsave(&q->lock, flags);
550 for (i = 0; i < q->ndesc; i++) {
551 err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
552 if (err < 0)
553 break;
554 }
555 q->head = q->tail = 0;
556 q->queued = 0;
557 spin_unlock_irqrestore(&q->lock, flags);
558
559 return err;
560}
561EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
562
563static int mt76u_alloc_rx(struct mt76_dev *dev)
564{
565 struct mt76_usb *usb = &dev->usb;
566 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
567 int i, err;
568
569 usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
570 if (!usb->mcu.data)
571 return -ENOMEM;
572
573 spin_lock_init(&q->lock);
574 q->entry = devm_kcalloc(dev->dev,
575 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
576 GFP_KERNEL);
577 if (!q->entry)
578 return -ENOMEM;
579
580 q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
581 q->ndesc = MT_NUM_RX_ENTRIES;
582 for (i = 0; i < q->ndesc; i++) {
583 err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
584 if (err < 0)
585 return err;
586 }
587
588 return mt76u_submit_rx_buffers(dev);
589}
590
591static void mt76u_free_rx(struct mt76_dev *dev)
592{
593 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
594 struct page *page;
595 int i;
596
597 for (i = 0; i < q->ndesc; i++)
598 mt76u_urb_free(q->entry[i].urb);
599
600 if (!q->rx_page.va)
601 return;
602
603 page = virt_to_page(q->rx_page.va);
604 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
605 memset(&q->rx_page, 0, sizeof(q->rx_page));
606}
607
608static void mt76u_stop_rx(struct mt76_dev *dev)
609{
610 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
611 int i;
612
613 for (i = 0; i < q->ndesc; i++)
614 usb_kill_urb(q->entry[i].urb);
615}
616
617static void mt76u_tx_tasklet(unsigned long data)
618{
619 struct mt76_dev *dev = (struct mt76_dev *)data;
620 struct mt76_queue_entry entry;
621 struct mt76_sw_queue *sq;
622 struct mt76_queue *q;
623 bool wake;
624 int i;
625
626 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
627 u32 n_dequeued = 0, n_sw_dequeued = 0;
628
629 sq = &dev->q_tx[i];
630 q = sq->q;
631
632 while (q->queued > n_dequeued) {
633 if (!q->entry[q->head].done)
634 break;
635
636 if (q->entry[q->head].schedule) {
637 q->entry[q->head].schedule = false;
638 n_sw_dequeued++;
639 }
640
641 entry = q->entry[q->head];
642 q->entry[q->head].done = false;
643 q->head = (q->head + 1) % q->ndesc;
644 n_dequeued++;
645
646 dev->drv->tx_complete_skb(dev, i, &entry);
647 }
648
649 spin_lock_bh(&q->lock);
650
651 sq->swq_queued -= n_sw_dequeued;
652 q->queued -= n_dequeued;
653
654 wake = q->stopped && q->queued < q->ndesc - 8;
655 if (wake)
656 q->stopped = false;
657
658 if (!q->queued)
659 wake_up(&dev->tx_wait);
660
661 spin_unlock_bh(&q->lock);
662
663 mt76_txq_schedule(dev, i);
664
665 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
666 ieee80211_queue_delayed_work(dev->hw,
667 &dev->usb.stat_work,
668 msecs_to_jiffies(10));
669
670 if (wake)
671 ieee80211_wake_queue(dev->hw, i);
672 }
673}
674
675static void mt76u_tx_status_data(struct work_struct *work)
676{
677 struct mt76_usb *usb;
678 struct mt76_dev *dev;
679 u8 update = 1;
680 u16 count = 0;
681
682 usb = container_of(work, struct mt76_usb, stat_work.work);
683 dev = container_of(usb, struct mt76_dev, usb);
684
685 while (true) {
686 if (test_bit(MT76_REMOVED, &dev->state))
687 break;
688
689 if (!dev->drv->tx_status_data(dev, &update))
690 break;
691 count++;
692 }
693
694 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
695 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
696 msecs_to_jiffies(10));
697 else
698 clear_bit(MT76_READING_STATS, &dev->state);
699}
700
701static void mt76u_complete_tx(struct urb *urb)
702{
703 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
704 struct mt76_queue_entry *e = urb->context;
705
706 if (mt76u_urb_error(urb))
707 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
708 e->done = true;
709
710 tasklet_schedule(&dev->tx_tasklet);
711}
712
713static int
714mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
715 struct urb *urb)
716{
717 urb->transfer_buffer_length = skb->len;
718
719 if (!dev->usb.sg_en) {
720 urb->transfer_buffer = skb->data;
721 return 0;
722 } else {
723 sg_init_table(urb->sg, MT_SG_MAX_SIZE);
724 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
725 if (urb->num_sgs == 0)
726 return -ENOMEM;
727 return urb->num_sgs;
728 }
729}
730
731static int
732mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
733 struct sk_buff *skb, struct mt76_wcid *wcid,
734 struct ieee80211_sta *sta)
735{
736 struct mt76_queue *q = dev->q_tx[qid].q;
737 struct mt76_tx_info tx_info = {
738 .skb = skb,
739 };
740 u16 idx = q->tail;
741 int err;
742
743 if (q->queued == q->ndesc)
744 return -ENOSPC;
745
746 skb->prev = skb->next = NULL;
747 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
748 if (err < 0)
749 return err;
750
751 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
752 if (err < 0)
753 return err;
754
755 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
756 q->entry[idx].urb, mt76u_complete_tx,
757 &q->entry[idx]);
758
759 q->tail = (q->tail + 1) % q->ndesc;
760 q->entry[idx].skb = tx_info.skb;
761 q->queued++;
762
763 return idx;
764}
765
766static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
767{
768 struct urb *urb;
769 int err;
770
771 while (q->first != q->tail) {
772 urb = q->entry[q->first].urb;
773
774 trace_submit_urb(dev, urb);
775 err = usb_submit_urb(urb, GFP_ATOMIC);
776 if (err < 0) {
777 if (err == -ENODEV)
778 set_bit(MT76_REMOVED, &dev->state);
779 else
780 dev_err(dev->dev, "tx urb submit failed:%d\n",
781 err);
782 break;
783 }
784 q->first = (q->first + 1) % q->ndesc;
785 }
786}
787
788static int mt76u_alloc_tx(struct mt76_dev *dev)
789{
790 struct mt76_queue *q;
791 int i, j, err;
792
793 for (i = 0; i <= MT_TXQ_PSD; i++) {
794 INIT_LIST_HEAD(&dev->q_tx[i].swq);
795
796 if (i >= IEEE80211_NUM_ACS) {
797 dev->q_tx[i].q = dev->q_tx[0].q;
798 continue;
799 }
800
801 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
802 if (!q)
803 return -ENOMEM;
804
805 spin_lock_init(&q->lock);
806 q->hw_idx = mt76_ac_to_hwq(i);
807 dev->q_tx[i].q = q;
808
809 q->entry = devm_kcalloc(dev->dev,
810 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
811 GFP_KERNEL);
812 if (!q->entry)
813 return -ENOMEM;
814
815 q->ndesc = MT_NUM_TX_ENTRIES;
816 for (j = 0; j < q->ndesc; j++) {
817 err = mt76u_urb_alloc(dev, &q->entry[j]);
818 if (err < 0)
819 return err;
820 }
821 }
822 return 0;
823}
824
825static void mt76u_free_tx(struct mt76_dev *dev)
826{
827 struct mt76_queue *q;
828 int i, j;
829
830 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
831 q = dev->q_tx[i].q;
832 for (j = 0; j < q->ndesc; j++)
833 usb_free_urb(q->entry[j].urb);
834 }
835}
836
837static void mt76u_stop_tx(struct mt76_dev *dev)
838{
839 struct mt76_queue *q;
840 int i, j;
841
842 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
843 q = dev->q_tx[i].q;
844 for (j = 0; j < q->ndesc; j++)
845 usb_kill_urb(q->entry[j].urb);
846 }
847}
848
849void mt76u_stop_queues(struct mt76_dev *dev)
850{
851 tasklet_disable(&dev->usb.rx_tasklet);
852 tasklet_disable(&dev->tx_tasklet);
853
854 mt76u_stop_rx(dev);
855 mt76u_stop_tx(dev);
856}
857EXPORT_SYMBOL_GPL(mt76u_stop_queues);
858
859void mt76u_stop_stat_wk(struct mt76_dev *dev)
860{
861 cancel_delayed_work_sync(&dev->usb.stat_work);
862 clear_bit(MT76_READING_STATS, &dev->state);
863}
864EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
865
866void mt76u_queues_deinit(struct mt76_dev *dev)
867{
868 mt76u_stop_queues(dev);
869
870 mt76u_free_rx(dev);
871 mt76u_free_tx(dev);
872}
873EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
874
875int mt76u_alloc_queues(struct mt76_dev *dev)
876{
877 int err;
878
879 err = mt76u_alloc_rx(dev);
880 if (err < 0)
881 return err;
882
883 return mt76u_alloc_tx(dev);
884}
885EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
886
887static const struct mt76_queue_ops usb_queue_ops = {
888 .tx_queue_skb = mt76u_tx_queue_skb,
889 .kick = mt76u_tx_kick,
890};
891
892int mt76u_init(struct mt76_dev *dev,
893 struct usb_interface *intf)
894{
895 static const struct mt76_bus_ops mt76u_ops = {
896 .rr = mt76u_rr,
897 .wr = mt76u_wr,
898 .rmw = mt76u_rmw,
899 .copy = mt76u_copy,
900 .wr_rp = mt76u_wr_rp,
901 .rd_rp = mt76u_rd_rp,
902 .type = MT76_BUS_USB,
903 };
904 struct mt76_usb *usb = &dev->usb;
905
906 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
907 tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
908 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
909 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
910
911 mutex_init(&usb->mcu.mutex);
912
913 mutex_init(&usb->usb_ctrl_mtx);
914 dev->bus = &mt76u_ops;
915 dev->queue_ops = &usb_queue_ops;
916
917 usb->sg_en = mt76u_check_sg(dev);
918
919 return mt76u_set_endpoints(intf, usb);
920}
921EXPORT_SYMBOL_GPL(mt76u_init);
922
923MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
924MODULE_LICENSE("Dual BSD/GPL");