2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/module.h>
19 #include "usb_trace.h"
22 #define MT_VEND_REQ_MAX_RETRY 10
23 #define MT_VEND_REQ_TOUT_MS 300
25 static bool disable_usb_sg;
26 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
27 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
29 /* should be called with usb_ctrl_mtx locked */
30 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
31 u8 req_type, u16 val, u16 offset,
32 void *buf, size_t len)
34 struct usb_device *udev = to_usb_device(dev->dev);
38 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
39 : usb_sndctrlpipe(udev, 0);
40 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
41 if (test_bit(MT76_REMOVED, &dev->state))
44 ret = usb_control_msg(udev, pipe, req, req_type, val,
45 offset, buf, len, MT_VEND_REQ_TOUT_MS);
47 set_bit(MT76_REMOVED, &dev->state);
48 if (ret >= 0 || ret == -ENODEV)
50 usleep_range(5000, 10000);
53 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
58 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
59 u8 req_type, u16 val, u16 offset,
60 void *buf, size_t len)
64 mutex_lock(&dev->usb.usb_ctrl_mtx);
65 ret = __mt76u_vendor_request(dev, req, req_type,
66 val, offset, buf, len);
67 trace_usb_reg_wr(dev, offset, val);
68 mutex_unlock(&dev->usb.usb_ctrl_mtx);
72 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
74 /* should be called with usb_ctrl_mtx locked */
75 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
77 struct mt76_usb *usb = &dev->usb;
83 switch (addr & MT_VEND_TYPE_MASK) {
84 case MT_VEND_TYPE_EEPROM:
85 req = MT_VEND_READ_EEPROM;
87 case MT_VEND_TYPE_CFG:
88 req = MT_VEND_READ_CFG;
91 req = MT_VEND_MULTI_READ;
94 offset = addr & ~MT_VEND_TYPE_MASK;
96 ret = __mt76u_vendor_request(dev, req,
97 USB_DIR_IN | USB_TYPE_VENDOR,
98 0, offset, usb->data, sizeof(__le32));
99 if (ret == sizeof(__le32))
100 data = get_unaligned_le32(usb->data);
101 trace_usb_reg_rr(dev, addr, data);
106 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
110 mutex_lock(&dev->usb.usb_ctrl_mtx);
111 ret = __mt76u_rr(dev, addr);
112 mutex_unlock(&dev->usb.usb_ctrl_mtx);
117 /* should be called with usb_ctrl_mtx locked */
118 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
120 struct mt76_usb *usb = &dev->usb;
124 switch (addr & MT_VEND_TYPE_MASK) {
125 case MT_VEND_TYPE_CFG:
126 req = MT_VEND_WRITE_CFG;
129 req = MT_VEND_MULTI_WRITE;
132 offset = addr & ~MT_VEND_TYPE_MASK;
134 put_unaligned_le32(val, usb->data);
135 __mt76u_vendor_request(dev, req,
136 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
137 offset, usb->data, sizeof(__le32));
138 trace_usb_reg_wr(dev, addr, val);
141 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
143 mutex_lock(&dev->usb.usb_ctrl_mtx);
144 __mt76u_wr(dev, addr, val);
145 mutex_unlock(&dev->usb.usb_ctrl_mtx);
148 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
151 mutex_lock(&dev->usb.usb_ctrl_mtx);
152 val |= __mt76u_rr(dev, addr) & ~mask;
153 __mt76u_wr(dev, addr, val);
154 mutex_unlock(&dev->usb.usb_ctrl_mtx);
159 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
160 const void *data, int len)
162 struct mt76_usb *usb = &dev->usb;
163 const u32 *val = data;
166 mutex_lock(&usb->usb_ctrl_mtx);
167 for (i = 0; i < (len / 4); i++) {
168 put_unaligned_le32(val[i], usb->data);
169 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
170 USB_DIR_OUT | USB_TYPE_VENDOR,
171 0, offset + i * 4, usb->data,
176 mutex_unlock(&usb->usb_ctrl_mtx);
179 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
180 const u16 offset, const u32 val)
182 mutex_lock(&dev->usb.usb_ctrl_mtx);
183 __mt76u_vendor_request(dev, req,
184 USB_DIR_OUT | USB_TYPE_VENDOR,
185 val & 0xffff, offset, NULL, 0);
186 __mt76u_vendor_request(dev, req,
187 USB_DIR_OUT | USB_TYPE_VENDOR,
188 val >> 16, offset + 2, NULL, 0);
189 mutex_unlock(&dev->usb.usb_ctrl_mtx);
191 EXPORT_SYMBOL_GPL(mt76u_single_wr);
194 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
195 const struct mt76_reg_pair *data, int len)
197 struct mt76_usb *usb = &dev->usb;
199 mutex_lock(&usb->usb_ctrl_mtx);
201 __mt76u_wr(dev, base + data->reg, data->value);
205 mutex_unlock(&usb->usb_ctrl_mtx);
211 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
212 const struct mt76_reg_pair *data, int n)
214 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
215 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
217 return mt76u_req_wr_rp(dev, base, data, n);
221 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
224 struct mt76_usb *usb = &dev->usb;
226 mutex_lock(&usb->usb_ctrl_mtx);
228 data->value = __mt76u_rr(dev, base + data->reg);
232 mutex_unlock(&usb->usb_ctrl_mtx);
238 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
239 struct mt76_reg_pair *data, int n)
241 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
242 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
244 return mt76u_req_rd_rp(dev, base, data, n);
247 static bool mt76u_check_sg(struct mt76_dev *dev)
249 struct usb_device *udev = to_usb_device(dev->dev);
251 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
252 (udev->bus->no_sg_constraint ||
253 udev->speed == USB_SPEED_WIRELESS));
257 mt76u_set_endpoints(struct usb_interface *intf,
258 struct mt76_usb *usb)
260 struct usb_host_interface *intf_desc = intf->cur_altsetting;
261 struct usb_endpoint_descriptor *ep_desc;
262 int i, in_ep = 0, out_ep = 0;
264 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
265 ep_desc = &intf_desc->endpoint[i].desc;
267 if (usb_endpoint_is_bulk_in(ep_desc) &&
268 in_ep < __MT_EP_IN_MAX) {
269 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
271 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
272 out_ep < __MT_EP_OUT_MAX) {
273 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
278 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
284 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
289 for (i = 0; i < nsgs; i++) {
294 data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
298 page = virt_to_head_page(data);
299 offset = data - page_address(page);
300 sg_set_page(&urb->sg[i], page, q->buf_size, offset);
306 for (j = nsgs; j < urb->num_sgs; j++)
307 skb_free_frag(sg_virt(&urb->sg[j]));
311 urb->num_sgs = max_t(int, i, urb->num_sgs);
312 urb->transfer_buffer_length = urb->num_sgs * q->buf_size,
313 sg_init_marker(urb->sg, urb->num_sgs);
315 return i ? : -ENOMEM;
319 mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
321 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
323 if (dev->usb.sg_en) {
324 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
326 urb->transfer_buffer_length = q->buf_size;
327 urb->transfer_buffer = page_frag_alloc(&q->rx_page,
329 return urb->transfer_buffer ? 0 : -ENOMEM;
334 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
337 unsigned int size = sizeof(struct urb);
340 size += sg_max_size * sizeof(struct scatterlist);
342 e->urb = kzalloc(size, GFP_KERNEL);
346 usb_init_urb(e->urb);
349 e->urb->sg = (struct scatterlist *)(e->urb + 1);
355 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
359 err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
363 return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
367 static void mt76u_urb_free(struct urb *urb)
371 for (i = 0; i < urb->num_sgs; i++)
372 skb_free_frag(sg_virt(&urb->sg[i]));
374 if (urb->transfer_buffer)
375 skb_free_frag(urb->transfer_buffer);
381 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
382 struct urb *urb, usb_complete_t complete_fn,
385 struct usb_device *udev = to_usb_device(dev->dev);
388 if (dir == USB_DIR_IN)
389 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
391 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
395 urb->complete = complete_fn;
396 urb->context = context;
399 static inline struct urb *
400 mt76u_get_next_rx_entry(struct mt76_dev *dev)
402 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
403 struct urb *urb = NULL;
406 spin_lock_irqsave(&q->lock, flags);
408 urb = q->entry[q->head].urb;
409 q->head = (q->head + 1) % q->ndesc;
412 spin_unlock_irqrestore(&q->lock, flags);
417 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
419 u16 dma_len, min_len;
421 dma_len = get_unaligned_le16(data);
422 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
425 if (data_len < min_len || !dma_len ||
426 dma_len + MT_DMA_HDR_LEN > data_len ||
432 static struct sk_buff *
433 mt76u_build_rx_skb(void *data, int len, int buf_size)
437 if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
440 /* slow path, not enough space for data and
443 skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
447 skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
448 data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
449 page = virt_to_head_page(data);
450 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
451 page, data - page_address(page),
452 len - MT_SKB_HEAD_LEN, buf_size);
458 skb = build_skb(data, buf_size);
462 skb_reserve(skb, MT_DMA_HDR_LEN);
469 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
471 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
472 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
473 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
477 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
480 len = mt76u_get_rx_entry_len(data, urb->actual_length);
484 data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
485 skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
490 while (len > 0 && nsgs < urb->num_sgs) {
491 data_len = min_t(int, len, urb->sg[nsgs].length);
492 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
493 sg_page(&urb->sg[nsgs]),
494 urb->sg[nsgs].offset,
495 data_len, q->buf_size);
499 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
504 static void mt76u_complete_rx(struct urb *urb)
506 struct mt76_dev *dev = urb->context;
507 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
510 trace_rx_urb(dev, urb);
512 switch (urb->status) {
518 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
525 spin_lock_irqsave(&q->lock, flags);
526 if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
529 q->tail = (q->tail + 1) % q->ndesc;
531 tasklet_schedule(&dev->usb.rx_tasklet);
533 spin_unlock_irqrestore(&q->lock, flags);
537 mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
539 mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
540 mt76u_complete_rx, dev);
541 trace_submit_urb(dev, urb);
543 return usb_submit_urb(urb, GFP_ATOMIC);
546 static void mt76u_rx_tasklet(unsigned long data)
548 struct mt76_dev *dev = (struct mt76_dev *)data;
555 urb = mt76u_get_next_rx_entry(dev);
559 count = mt76u_process_rx_entry(dev, urb);
561 err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
565 mt76u_submit_rx_buf(dev, urb);
567 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
572 static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
574 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
578 spin_lock_irqsave(&q->lock, flags);
579 for (i = 0; i < q->ndesc; i++) {
580 err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
584 q->head = q->tail = 0;
586 spin_unlock_irqrestore(&q->lock, flags);
591 static int mt76u_alloc_rx(struct mt76_dev *dev)
593 struct mt76_usb *usb = &dev->usb;
594 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
597 usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
601 spin_lock_init(&q->lock);
602 q->entry = devm_kcalloc(dev->dev,
603 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
608 q->ndesc = MT_NUM_RX_ENTRIES;
609 q->buf_size = PAGE_SIZE;
611 for (i = 0; i < q->ndesc; i++) {
612 err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
617 return mt76u_submit_rx_buffers(dev);
620 static void mt76u_free_rx(struct mt76_dev *dev)
622 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
626 for (i = 0; i < q->ndesc; i++)
627 mt76u_urb_free(q->entry[i].urb);
632 page = virt_to_page(q->rx_page.va);
633 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
634 memset(&q->rx_page, 0, sizeof(q->rx_page));
637 void mt76u_stop_rx(struct mt76_dev *dev)
639 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
642 for (i = 0; i < q->ndesc; i++)
643 usb_poison_urb(q->entry[i].urb);
645 tasklet_kill(&dev->usb.rx_tasklet);
647 EXPORT_SYMBOL_GPL(mt76u_stop_rx);
649 int mt76u_resume_rx(struct mt76_dev *dev)
651 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
654 for (i = 0; i < q->ndesc; i++)
655 usb_unpoison_urb(q->entry[i].urb);
657 return mt76u_submit_rx_buffers(dev);
659 EXPORT_SYMBOL_GPL(mt76u_resume_rx);
661 static void mt76u_tx_tasklet(unsigned long data)
663 struct mt76_dev *dev = (struct mt76_dev *)data;
664 struct mt76_queue_entry entry;
665 struct mt76_sw_queue *sq;
666 struct mt76_queue *q;
670 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
671 u32 n_dequeued = 0, n_sw_dequeued = 0;
676 while (q->queued > n_dequeued) {
677 if (!q->entry[q->head].done)
680 if (q->entry[q->head].schedule) {
681 q->entry[q->head].schedule = false;
685 entry = q->entry[q->head];
686 q->entry[q->head].done = false;
687 q->head = (q->head + 1) % q->ndesc;
690 dev->drv->tx_complete_skb(dev, i, &entry);
693 spin_lock_bh(&q->lock);
695 sq->swq_queued -= n_sw_dequeued;
696 q->queued -= n_dequeued;
698 wake = q->stopped && q->queued < q->ndesc - 8;
703 wake_up(&dev->tx_wait);
705 spin_unlock_bh(&q->lock);
707 mt76_txq_schedule(dev, i);
709 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
710 ieee80211_queue_delayed_work(dev->hw,
712 msecs_to_jiffies(10));
715 ieee80211_wake_queue(dev->hw, i);
719 static void mt76u_tx_status_data(struct work_struct *work)
721 struct mt76_usb *usb;
722 struct mt76_dev *dev;
726 usb = container_of(work, struct mt76_usb, stat_work.work);
727 dev = container_of(usb, struct mt76_dev, usb);
730 if (test_bit(MT76_REMOVED, &dev->state))
733 if (!dev->drv->tx_status_data(dev, &update))
738 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
739 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
740 msecs_to_jiffies(10));
742 clear_bit(MT76_READING_STATS, &dev->state);
745 static void mt76u_complete_tx(struct urb *urb)
747 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
748 struct mt76_queue_entry *e = urb->context;
750 if (mt76u_urb_error(urb))
751 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
754 tasklet_schedule(&dev->tx_tasklet);
758 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
761 urb->transfer_buffer_length = skb->len;
763 if (!dev->usb.sg_en) {
764 urb->transfer_buffer = skb->data;
767 sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
768 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
769 if (urb->num_sgs == 0)
776 mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
777 struct sk_buff *skb, struct mt76_wcid *wcid,
778 struct ieee80211_sta *sta)
780 struct mt76_queue *q = dev->q_tx[qid].q;
781 struct mt76_tx_info tx_info = {
787 if (q->queued == q->ndesc)
790 skb->prev = skb->next = NULL;
791 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
795 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
799 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
800 q->entry[idx].urb, mt76u_complete_tx,
803 q->tail = (q->tail + 1) % q->ndesc;
804 q->entry[idx].skb = tx_info.skb;
810 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
815 while (q->first != q->tail) {
816 urb = q->entry[q->first].urb;
818 trace_submit_urb(dev, urb);
819 err = usb_submit_urb(urb, GFP_ATOMIC);
822 set_bit(MT76_REMOVED, &dev->state);
824 dev_err(dev->dev, "tx urb submit failed:%d\n",
828 q->first = (q->first + 1) % q->ndesc;
832 static int mt76u_alloc_tx(struct mt76_dev *dev)
834 struct mt76_queue *q;
837 for (i = 0; i <= MT_TXQ_PSD; i++) {
838 INIT_LIST_HEAD(&dev->q_tx[i].swq);
840 if (i >= IEEE80211_NUM_ACS) {
841 dev->q_tx[i].q = dev->q_tx[0].q;
845 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
849 spin_lock_init(&q->lock);
850 q->hw_idx = mt76_ac_to_hwq(i);
853 q->entry = devm_kcalloc(dev->dev,
854 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
859 q->ndesc = MT_NUM_TX_ENTRIES;
860 for (j = 0; j < q->ndesc; j++) {
861 err = mt76u_urb_alloc(dev, &q->entry[j],
870 static void mt76u_free_tx(struct mt76_dev *dev)
872 struct mt76_queue *q;
875 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
877 for (j = 0; j < q->ndesc; j++)
878 usb_free_urb(q->entry[j].urb);
882 void mt76u_stop_tx(struct mt76_dev *dev)
884 struct mt76_queue_entry entry;
885 struct mt76_queue *q;
888 ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), HZ/5);
890 dev_err(dev->dev, "timed out waiting for pending tx\n");
892 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
894 for (j = 0; j < q->ndesc; j++)
895 usb_kill_urb(q->entry[j].urb);
898 tasklet_kill(&dev->tx_tasklet);
900 /* On device removal we maight queue skb's, but mt76u_tx_kick()
901 * will fail to submit urb, cleanup those skb's manually.
903 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
906 /* Assure we are in sync with killed tasklet. */
907 spin_lock_bh(&q->lock);
909 entry = q->entry[q->head];
910 q->head = (q->head + 1) % q->ndesc;
913 dev->drv->tx_complete_skb(dev, i, &entry);
915 spin_unlock_bh(&q->lock);
919 cancel_delayed_work_sync(&dev->usb.stat_work);
920 clear_bit(MT76_READING_STATS, &dev->state);
922 mt76_tx_status_check(dev, NULL, true);
924 EXPORT_SYMBOL_GPL(mt76u_stop_tx);
926 void mt76u_queues_deinit(struct mt76_dev *dev)
934 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
936 int mt76u_alloc_queues(struct mt76_dev *dev)
940 err = mt76u_alloc_rx(dev);
944 return mt76u_alloc_tx(dev);
946 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
948 static const struct mt76_queue_ops usb_queue_ops = {
949 .tx_queue_skb = mt76u_tx_queue_skb,
950 .kick = mt76u_tx_kick,
953 int mt76u_init(struct mt76_dev *dev,
954 struct usb_interface *intf)
956 static const struct mt76_bus_ops mt76u_ops = {
961 .wr_rp = mt76u_wr_rp,
962 .rd_rp = mt76u_rd_rp,
963 .type = MT76_BUS_USB,
965 struct mt76_usb *usb = &dev->usb;
967 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
968 tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
969 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
970 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
972 mutex_init(&usb->mcu.mutex);
974 mutex_init(&usb->usb_ctrl_mtx);
975 dev->bus = &mt76u_ops;
976 dev->queue_ops = &usb_queue_ops;
978 usb->sg_en = mt76u_check_sg(dev);
980 return mt76u_set_endpoints(intf, usb);
982 EXPORT_SYMBOL_GPL(mt76u_init);
984 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
985 MODULE_LICENSE("Dual BSD/GPL");