mt76: mt76x0: handle chip specific initval differences
[linux-block.git] / drivers / net / wireless / mediatek / mt76 / usb.c
CommitLineData
b40b15e1
LB
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
db6bb5c6 17#include <linux/module.h>
b40b15e1
LB
18#include "mt76.h"
19#include "usb_trace.h"
20#include "dma.h"
21
22#define MT_VEND_REQ_MAX_RETRY 10
23#define MT_VEND_REQ_TOUT_MS 300
24
25/* should be called with usb_ctrl_mtx locked */
26static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
27 u8 req_type, u16 val, u16 offset,
28 void *buf, size_t len)
29{
30 struct usb_interface *intf = to_usb_interface(dev->dev);
31 struct usb_device *udev = interface_to_usbdev(intf);
32 unsigned int pipe;
33 int i, ret;
34
35 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
36 : usb_sndctrlpipe(udev, 0);
37 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
38 if (test_bit(MT76_REMOVED, &dev->state))
39 return -EIO;
40
41 ret = usb_control_msg(udev, pipe, req, req_type, val,
42 offset, buf, len, MT_VEND_REQ_TOUT_MS);
43 if (ret == -ENODEV)
44 set_bit(MT76_REMOVED, &dev->state);
45 if (ret >= 0 || ret == -ENODEV)
46 return ret;
47 usleep_range(5000, 10000);
48 }
49
50 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
51 req, offset, ret);
52 return ret;
53}
54
55int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
56 u8 req_type, u16 val, u16 offset,
57 void *buf, size_t len)
58{
59 int ret;
60
61 mutex_lock(&dev->usb.usb_ctrl_mtx);
62 ret = __mt76u_vendor_request(dev, req, req_type,
63 val, offset, buf, len);
64 trace_usb_reg_wr(dev, offset, val);
65 mutex_unlock(&dev->usb.usb_ctrl_mtx);
66
67 return ret;
68}
69EXPORT_SYMBOL_GPL(mt76u_vendor_request);
70
71/* should be called with usb_ctrl_mtx locked */
72static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
73{
74 struct mt76_usb *usb = &dev->usb;
75 u32 data = ~0;
76 u16 offset;
77 int ret;
78 u8 req;
79
80 switch (addr & MT_VEND_TYPE_MASK) {
81 case MT_VEND_TYPE_EEPROM:
82 req = MT_VEND_READ_EEPROM;
83 break;
84 case MT_VEND_TYPE_CFG:
85 req = MT_VEND_READ_CFG;
86 break;
87 default:
88 req = MT_VEND_MULTI_READ;
89 break;
90 }
91 offset = addr & ~MT_VEND_TYPE_MASK;
92
93 ret = __mt76u_vendor_request(dev, req,
94 USB_DIR_IN | USB_TYPE_VENDOR,
95 0, offset, usb->data, sizeof(__le32));
96 if (ret == sizeof(__le32))
97 data = get_unaligned_le32(usb->data);
98 trace_usb_reg_rr(dev, addr, data);
99
100 return data;
101}
102
5567b373 103static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
b40b15e1
LB
104{
105 u32 ret;
106
107 mutex_lock(&dev->usb.usb_ctrl_mtx);
108 ret = __mt76u_rr(dev, addr);
109 mutex_unlock(&dev->usb.usb_ctrl_mtx);
110
111 return ret;
112}
113
114/* should be called with usb_ctrl_mtx locked */
115static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
116{
117 struct mt76_usb *usb = &dev->usb;
118 u16 offset;
119 u8 req;
120
121 switch (addr & MT_VEND_TYPE_MASK) {
122 case MT_VEND_TYPE_CFG:
123 req = MT_VEND_WRITE_CFG;
124 break;
125 default:
126 req = MT_VEND_MULTI_WRITE;
127 break;
128 }
129 offset = addr & ~MT_VEND_TYPE_MASK;
130
131 put_unaligned_le32(val, usb->data);
132 __mt76u_vendor_request(dev, req,
133 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
134 offset, usb->data, sizeof(__le32));
135 trace_usb_reg_wr(dev, addr, val);
136}
137
5567b373 138static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
b40b15e1
LB
139{
140 mutex_lock(&dev->usb.usb_ctrl_mtx);
141 __mt76u_wr(dev, addr, val);
142 mutex_unlock(&dev->usb.usb_ctrl_mtx);
143}
144
145static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
146 u32 mask, u32 val)
147{
148 mutex_lock(&dev->usb.usb_ctrl_mtx);
149 val |= __mt76u_rr(dev, addr) & ~mask;
150 __mt76u_wr(dev, addr, val);
151 mutex_unlock(&dev->usb.usb_ctrl_mtx);
152
153 return val;
154}
155
156static void mt76u_copy(struct mt76_dev *dev, u32 offset,
157 const void *data, int len)
158{
159 struct mt76_usb *usb = &dev->usb;
160 const u32 *val = data;
161 int i, ret;
162
163 mutex_lock(&usb->usb_ctrl_mtx);
164 for (i = 0; i < (len / 4); i++) {
165 put_unaligned_le32(val[i], usb->data);
166 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
167 USB_DIR_OUT | USB_TYPE_VENDOR,
168 0, offset + i * 4, usb->data,
169 sizeof(__le32));
170 if (ret < 0)
171 break;
172 }
173 mutex_unlock(&usb->usb_ctrl_mtx);
174}
175
176void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
177 const u16 offset, const u32 val)
178{
179 mutex_lock(&dev->usb.usb_ctrl_mtx);
180 __mt76u_vendor_request(dev, req,
181 USB_DIR_OUT | USB_TYPE_VENDOR,
182 val & 0xffff, offset, NULL, 0);
183 __mt76u_vendor_request(dev, req,
184 USB_DIR_OUT | USB_TYPE_VENDOR,
185 val >> 16, offset + 2, NULL, 0);
186 mutex_unlock(&dev->usb.usb_ctrl_mtx);
187}
188EXPORT_SYMBOL_GPL(mt76u_single_wr);
189
f1638c7c
SG
190static int
191mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
192 const struct mt76_reg_pair *data, int len)
193{
194 struct mt76_usb *usb = &dev->usb;
195
196 mutex_lock(&usb->usb_ctrl_mtx);
197 while (len > 0) {
198 __mt76u_wr(dev, base + data->reg, data->value);
199 len--;
200 data++;
201 }
202 mutex_unlock(&usb->usb_ctrl_mtx);
203
204 return 0;
205}
206
17507157
LB
207static int
208mt76u_wr_rp(struct mt76_dev *dev, u32 base,
209 const struct mt76_reg_pair *data, int n)
f1638c7c
SG
210{
211 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
17507157 212 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
f1638c7c
SG
213 else
214 return mt76u_req_wr_rp(dev, base, data, n);
215}
f1638c7c
SG
216
217static int
218mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
219 int len)
220{
221 struct mt76_usb *usb = &dev->usb;
222
223 mutex_lock(&usb->usb_ctrl_mtx);
224 while (len > 0) {
225 data->value = __mt76u_rr(dev, base + data->reg);
226 len--;
227 data++;
228 }
229 mutex_unlock(&usb->usb_ctrl_mtx);
230
231 return 0;
232}
233
17507157
LB
234static int
235mt76u_rd_rp(struct mt76_dev *dev, u32 base,
236 struct mt76_reg_pair *data, int n)
f1638c7c
SG
237{
238 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
17507157 239 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
f1638c7c
SG
240 else
241 return mt76u_req_rd_rp(dev, base, data, n);
242}
f1638c7c 243
b40b15e1
LB
244static int
245mt76u_set_endpoints(struct usb_interface *intf,
246 struct mt76_usb *usb)
247{
248 struct usb_host_interface *intf_desc = intf->cur_altsetting;
249 struct usb_endpoint_descriptor *ep_desc;
250 int i, in_ep = 0, out_ep = 0;
251
252 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
253 ep_desc = &intf_desc->endpoint[i].desc;
254
255 if (usb_endpoint_is_bulk_in(ep_desc) &&
256 in_ep < __MT_EP_IN_MAX) {
257 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
258 usb->in_max_packet = usb_endpoint_maxp(ep_desc);
259 in_ep++;
260 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
261 out_ep < __MT_EP_OUT_MAX) {
262 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
263 usb->out_max_packet = usb_endpoint_maxp(ep_desc);
264 out_ep++;
265 }
266 }
267
268 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
269 return -EINVAL;
270 return 0;
271}
272
273static int
274mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
275 int nsgs, int len, int sglen)
276{
c12128ce 277 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
b40b15e1
LB
278 struct urb *urb = buf->urb;
279 int i;
280
481bb043 281 spin_lock_bh(&q->rx_page_lock);
b40b15e1
LB
282 for (i = 0; i < nsgs; i++) {
283 struct page *page;
284 void *data;
285 int offset;
286
c87dff8c 287 data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
b40b15e1
LB
288 if (!data)
289 break;
290
291 page = virt_to_head_page(data);
292 offset = data - page_address(page);
293 sg_set_page(&urb->sg[i], page, sglen, offset);
294 }
481bb043 295 spin_unlock_bh(&q->rx_page_lock);
b40b15e1
LB
296
297 if (i < nsgs) {
298 int j;
299
300 for (j = nsgs; j < urb->num_sgs; j++)
301 skb_free_frag(sg_virt(&urb->sg[j]));
302 urb->num_sgs = i;
303 }
304
305 urb->num_sgs = max_t(int, i, urb->num_sgs);
306 buf->len = urb->num_sgs * sglen,
307 sg_init_marker(urb->sg, urb->num_sgs);
308
309 return i ? : -ENOMEM;
310}
311
312int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
313 int nsgs, int len, int sglen, gfp_t gfp)
314{
315 buf->urb = usb_alloc_urb(0, gfp);
316 if (!buf->urb)
317 return -ENOMEM;
318
329e0989 319 buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
b40b15e1
LB
320 gfp);
321 if (!buf->urb->sg)
322 return -ENOMEM;
323
324 sg_init_table(buf->urb->sg, nsgs);
325 buf->dev = dev;
326
327 return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
328}
329EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
330
331void mt76u_buf_free(struct mt76u_buf *buf)
332{
333 struct urb *urb = buf->urb;
334 int i;
335
336 for (i = 0; i < urb->num_sgs; i++)
337 skb_free_frag(sg_virt(&urb->sg[i]));
338 usb_free_urb(buf->urb);
339}
340EXPORT_SYMBOL_GPL(mt76u_buf_free);
341
342int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
343 struct mt76u_buf *buf, gfp_t gfp,
344 usb_complete_t complete_fn, void *context)
345{
346 struct usb_interface *intf = to_usb_interface(dev->dev);
347 struct usb_device *udev = interface_to_usbdev(intf);
348 unsigned int pipe;
349
350 if (dir == USB_DIR_IN)
351 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
352 else
353 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
354
355 usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
356 complete_fn, context);
357
358 return usb_submit_urb(buf->urb, gfp);
359}
360EXPORT_SYMBOL_GPL(mt76u_submit_buf);
361
362static inline struct mt76u_buf
363*mt76u_get_next_rx_entry(struct mt76_queue *q)
364{
365 struct mt76u_buf *buf = NULL;
366 unsigned long flags;
367
368 spin_lock_irqsave(&q->lock, flags);
369 if (q->queued > 0) {
370 buf = &q->entry[q->head].ubuf;
371 q->head = (q->head + 1) % q->ndesc;
372 q->queued--;
373 }
374 spin_unlock_irqrestore(&q->lock, flags);
375
376 return buf;
377}
378
379static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
380{
381 u16 dma_len, min_len;
382
383 dma_len = get_unaligned_le16(data);
384 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
385 MT_FCE_INFO_LEN;
386
09dbcd8b
LB
387 if (data_len < min_len || !dma_len ||
388 dma_len + MT_DMA_HDR_LEN > data_len ||
389 (dma_len & 0x3))
b40b15e1
LB
390 return -EINVAL;
391 return dma_len;
392}
393
394static int
395mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
396{
397 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
398 u8 *data = sg_virt(&urb->sg[0]);
399 int data_len, len, nsgs = 1;
400 struct sk_buff *skb;
401
402 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
403 return 0;
404
405 len = mt76u_get_rx_entry_len(data, urb->actual_length);
406 if (len < 0)
407 return 0;
408
409 skb = build_skb(data, q->buf_size);
410 if (!skb)
411 return 0;
412
413 data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
414 skb_reserve(skb, MT_DMA_HDR_LEN);
415 if (skb->tail + data_len > skb->end) {
416 dev_kfree_skb(skb);
417 return 1;
418 }
419
420 __skb_put(skb, data_len);
421 len -= data_len;
422
423 while (len > 0) {
424 data_len = min_t(int, len, urb->sg[nsgs].length);
425 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
426 sg_page(&urb->sg[nsgs]),
427 urb->sg[nsgs].offset,
428 data_len, q->buf_size);
429 len -= data_len;
430 nsgs++;
431 }
432 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
433
434 return nsgs;
435}
436
437static void mt76u_complete_rx(struct urb *urb)
438{
439 struct mt76_dev *dev = urb->context;
440 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
441 unsigned long flags;
442
443 switch (urb->status) {
444 case -ECONNRESET:
445 case -ESHUTDOWN:
446 case -ENOENT:
447 return;
448 default:
449 dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
450 /* fall through */
451 case 0:
452 break;
453 }
454
455 spin_lock_irqsave(&q->lock, flags);
456 if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
457 goto out;
458
459 q->tail = (q->tail + 1) % q->ndesc;
460 q->queued++;
461 tasklet_schedule(&dev->usb.rx_tasklet);
462out:
463 spin_unlock_irqrestore(&q->lock, flags);
464}
465
466static void mt76u_rx_tasklet(unsigned long data)
467{
468 struct mt76_dev *dev = (struct mt76_dev *)data;
469 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
470 int err, nsgs, buf_len = q->buf_size;
471 struct mt76u_buf *buf;
472
473 rcu_read_lock();
474
475 while (true) {
476 buf = mt76u_get_next_rx_entry(q);
477 if (!buf)
478 break;
479
480 nsgs = mt76u_process_rx_entry(dev, buf->urb);
481 if (nsgs > 0) {
482 err = mt76u_fill_rx_sg(dev, buf, nsgs,
483 buf_len,
484 SKB_WITH_OVERHEAD(buf_len));
485 if (err < 0)
486 break;
487 }
488 mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
489 buf, GFP_ATOMIC,
490 mt76u_complete_rx, dev);
491 }
492 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
493
494 rcu_read_unlock();
495}
496
497int mt76u_submit_rx_buffers(struct mt76_dev *dev)
498{
499 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
500 unsigned long flags;
501 int i, err = 0;
502
503 spin_lock_irqsave(&q->lock, flags);
504 for (i = 0; i < q->ndesc; i++) {
505 err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
506 &q->entry[i].ubuf, GFP_ATOMIC,
507 mt76u_complete_rx, dev);
508 if (err < 0)
509 break;
510 }
511 q->head = q->tail = 0;
512 q->queued = 0;
513 spin_unlock_irqrestore(&q->lock, flags);
514
515 return err;
516}
517EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
518
519static int mt76u_alloc_rx(struct mt76_dev *dev)
520{
521 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
522 int i, err, nsgs;
523
481bb043 524 spin_lock_init(&q->rx_page_lock);
b40b15e1 525 spin_lock_init(&q->lock);
329e0989
KC
526 q->entry = devm_kcalloc(dev->dev,
527 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
b40b15e1
LB
528 GFP_KERNEL);
529 if (!q->entry)
530 return -ENOMEM;
531
532 if (mt76u_check_sg(dev)) {
533 q->buf_size = MT_RX_BUF_SIZE;
534 nsgs = MT_SG_MAX_SIZE;
535 } else {
536 q->buf_size = PAGE_SIZE;
537 nsgs = 1;
538 }
539
540 for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
541 err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
542 nsgs, q->buf_size,
543 SKB_WITH_OVERHEAD(q->buf_size),
544 GFP_KERNEL);
545 if (err < 0)
546 return err;
547 }
548 q->ndesc = MT_NUM_RX_ENTRIES;
549
550 return mt76u_submit_rx_buffers(dev);
551}
552
553static void mt76u_free_rx(struct mt76_dev *dev)
554{
555 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
c12128ce 556 struct page *page;
b40b15e1
LB
557 int i;
558
559 for (i = 0; i < q->ndesc; i++)
560 mt76u_buf_free(&q->entry[i].ubuf);
c12128ce 561
481bb043 562 spin_lock_bh(&q->rx_page_lock);
c12128ce 563 if (!q->rx_page.va)
481bb043 564 goto out;
c12128ce
FF
565
566 page = virt_to_page(q->rx_page.va);
567 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
568 memset(&q->rx_page, 0, sizeof(q->rx_page));
481bb043
LB
569out:
570 spin_unlock_bh(&q->rx_page_lock);
b40b15e1
LB
571}
572
573static void mt76u_stop_rx(struct mt76_dev *dev)
574{
575 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
576 int i;
577
578 for (i = 0; i < q->ndesc; i++)
579 usb_kill_urb(q->entry[i].ubuf.urb);
580}
581
b40b15e1
LB
582static void mt76u_tx_tasklet(unsigned long data)
583{
584 struct mt76_dev *dev = (struct mt76_dev *)data;
585 struct mt76u_buf *buf;
586 struct mt76_queue *q;
587 bool wake;
588 int i;
589
590 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
591 q = &dev->q_tx[i];
592
593 spin_lock_bh(&q->lock);
594 while (true) {
595 buf = &q->entry[q->head].ubuf;
596 if (!buf->done || !q->queued)
597 break;
598
599 dev->drv->tx_complete_skb(dev, q,
600 &q->entry[q->head],
601 false);
602
603 if (q->entry[q->head].schedule) {
604 q->entry[q->head].schedule = false;
605 q->swq_queued--;
606 }
607
608 q->head = (q->head + 1) % q->ndesc;
609 q->queued--;
610 }
611 mt76_txq_schedule(dev, q);
612 wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
613 if (!q->queued)
614 wake_up(&dev->tx_wait);
615
616 spin_unlock_bh(&q->lock);
617
618 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
619 ieee80211_queue_delayed_work(dev->hw,
620 &dev->usb.stat_work,
621 msecs_to_jiffies(10));
622
623 if (wake)
624 ieee80211_wake_queue(dev->hw, i);
625 }
626}
627
628static void mt76u_tx_status_data(struct work_struct *work)
629{
630 struct mt76_usb *usb;
631 struct mt76_dev *dev;
632 u8 update = 1;
633 u16 count = 0;
634
635 usb = container_of(work, struct mt76_usb, stat_work.work);
636 dev = container_of(usb, struct mt76_dev, usb);
637
638 while (true) {
639 if (test_bit(MT76_REMOVED, &dev->state))
640 break;
641
642 if (!dev->drv->tx_status_data(dev, &update))
643 break;
644 count++;
645 }
646
647 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
648 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
649 msecs_to_jiffies(10));
650 else
651 clear_bit(MT76_READING_STATS, &dev->state);
652}
653
654static void mt76u_complete_tx(struct urb *urb)
655{
656 struct mt76u_buf *buf = urb->context;
657 struct mt76_dev *dev = buf->dev;
658
659 if (mt76u_urb_error(urb))
660 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
661 buf->done = true;
662
663 tasklet_schedule(&dev->usb.tx_tasklet);
664}
665
666static int
667mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
668{
669 int nsgs = 1 + skb_shinfo(skb)->nr_frags;
670 struct sk_buff *iter;
671
672 skb_walk_frags(skb, iter)
673 nsgs += 1 + skb_shinfo(iter)->nr_frags;
674
675 memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
676
677 nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
678 sg_init_marker(urb->sg, nsgs);
679 urb->num_sgs = nsgs;
680
681 return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
682}
683
684static int
685mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
686 struct sk_buff *skb, struct mt76_wcid *wcid,
687 struct ieee80211_sta *sta)
688{
689 struct usb_interface *intf = to_usb_interface(dev->dev);
690 struct usb_device *udev = interface_to_usbdev(intf);
691 u8 ep = q2ep(q->hw_idx);
692 struct mt76u_buf *buf;
693 u16 idx = q->tail;
694 unsigned int pipe;
695 int err;
696
697 if (q->queued == q->ndesc)
698 return -ENOSPC;
699
700 err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
701 if (err < 0)
702 return err;
703
704 buf = &q->entry[idx].ubuf;
705 buf->done = false;
706
707 err = mt76u_tx_build_sg(skb, buf->urb);
708 if (err < 0)
709 return err;
710
711 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
712 usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
713 mt76u_complete_tx, buf);
714
715 q->tail = (q->tail + 1) % q->ndesc;
716 q->entry[idx].skb = skb;
717 q->queued++;
718
719 return idx;
720}
721
722static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
723{
724 struct mt76u_buf *buf;
725 int err;
726
727 while (q->first != q->tail) {
728 buf = &q->entry[q->first].ubuf;
729 err = usb_submit_urb(buf->urb, GFP_ATOMIC);
730 if (err < 0) {
731 if (err == -ENODEV)
732 set_bit(MT76_REMOVED, &dev->state);
733 else
734 dev_err(dev->dev, "tx urb submit failed:%d\n",
735 err);
736 break;
737 }
738 q->first = (q->first + 1) % q->ndesc;
739 }
740}
741
742static int mt76u_alloc_tx(struct mt76_dev *dev)
743{
744 struct mt76u_buf *buf;
745 struct mt76_queue *q;
746 size_t size;
747 int i, j;
748
749 size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
750 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
751 q = &dev->q_tx[i];
752 spin_lock_init(&q->lock);
753 INIT_LIST_HEAD(&q->swq);
1d0496c6 754 q->hw_idx = mt76_ac_to_hwq(i);
b40b15e1 755
329e0989
KC
756 q->entry = devm_kcalloc(dev->dev,
757 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
b40b15e1
LB
758 GFP_KERNEL);
759 if (!q->entry)
760 return -ENOMEM;
761
762 q->ndesc = MT_NUM_TX_ENTRIES;
763 for (j = 0; j < q->ndesc; j++) {
764 buf = &q->entry[j].ubuf;
765 buf->dev = dev;
766
767 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
768 if (!buf->urb)
769 return -ENOMEM;
770
771 buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
772 if (!buf->urb->sg)
773 return -ENOMEM;
774 }
775 }
776 return 0;
777}
778
779static void mt76u_free_tx(struct mt76_dev *dev)
780{
781 struct mt76_queue *q;
782 int i, j;
783
784 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
785 q = &dev->q_tx[i];
786 for (j = 0; j < q->ndesc; j++)
787 usb_free_urb(q->entry[j].ubuf.urb);
788 }
789}
790
791static void mt76u_stop_tx(struct mt76_dev *dev)
792{
793 struct mt76_queue *q;
794 int i, j;
795
796 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
797 q = &dev->q_tx[i];
798 for (j = 0; j < q->ndesc; j++)
799 usb_kill_urb(q->entry[j].ubuf.urb);
800 }
801}
802
803void mt76u_stop_queues(struct mt76_dev *dev)
804{
805 tasklet_disable(&dev->usb.rx_tasklet);
806 tasklet_disable(&dev->usb.tx_tasklet);
807
808 mt76u_stop_rx(dev);
809 mt76u_stop_tx(dev);
810}
811EXPORT_SYMBOL_GPL(mt76u_stop_queues);
812
813void mt76u_stop_stat_wk(struct mt76_dev *dev)
814{
815 cancel_delayed_work_sync(&dev->usb.stat_work);
816 clear_bit(MT76_READING_STATS, &dev->state);
817}
818EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
819
820void mt76u_queues_deinit(struct mt76_dev *dev)
821{
822 mt76u_stop_queues(dev);
823
824 mt76u_free_rx(dev);
825 mt76u_free_tx(dev);
826}
827EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
828
829int mt76u_alloc_queues(struct mt76_dev *dev)
830{
831 int err;
832
833 err = mt76u_alloc_rx(dev);
834 if (err < 0)
835 goto err;
836
837 err = mt76u_alloc_tx(dev);
838 if (err < 0)
839 goto err;
840
841 return 0;
842err:
843 mt76u_queues_deinit(dev);
844 return err;
845}
846EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
847
848static const struct mt76_queue_ops usb_queue_ops = {
849 .tx_queue_skb = mt76u_tx_queue_skb,
850 .kick = mt76u_tx_kick,
851};
852
853int mt76u_init(struct mt76_dev *dev,
854 struct usb_interface *intf)
855{
856 static const struct mt76_bus_ops mt76u_ops = {
857 .rr = mt76u_rr,
858 .wr = mt76u_wr,
859 .rmw = mt76u_rmw,
860 .copy = mt76u_copy,
6da5a291
SG
861 .wr_rp = mt76u_wr_rp,
862 .rd_rp = mt76u_rd_rp,
c50479fa 863 .type = MT76_BUS_USB,
b40b15e1
LB
864 };
865 struct mt76_usb *usb = &dev->usb;
866
867 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
868 tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
869 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
870 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
871
872 init_completion(&usb->mcu.cmpl);
873 mutex_init(&usb->mcu.mutex);
874
875 mutex_init(&usb->usb_ctrl_mtx);
876 dev->bus = &mt76u_ops;
877 dev->queue_ops = &usb_queue_ops;
878
879 return mt76u_set_endpoints(intf, usb);
880}
881EXPORT_SYMBOL_GPL(mt76u_init);
882
883MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
884MODULE_LICENSE("Dual BSD/GPL");