mt76x02: enable AP mode for USB
[linux-block.git] / drivers / net / wireless / mediatek / mt76 / usb.c
CommitLineData
b40b15e1
LB
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
db6bb5c6 17#include <linux/module.h>
b40b15e1
LB
18#include "mt76.h"
19#include "usb_trace.h"
20#include "dma.h"
21
22#define MT_VEND_REQ_MAX_RETRY 10
23#define MT_VEND_REQ_TOUT_MS 300
24
c2908a0d
LB
25static bool disable_usb_sg;
26module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
27MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
28
b40b15e1
LB
29/* should be called with usb_ctrl_mtx locked */
30static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
31 u8 req_type, u16 val, u16 offset,
32 void *buf, size_t len)
33{
34 struct usb_interface *intf = to_usb_interface(dev->dev);
35 struct usb_device *udev = interface_to_usbdev(intf);
36 unsigned int pipe;
37 int i, ret;
38
39 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
40 : usb_sndctrlpipe(udev, 0);
41 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
42 if (test_bit(MT76_REMOVED, &dev->state))
43 return -EIO;
44
45 ret = usb_control_msg(udev, pipe, req, req_type, val,
46 offset, buf, len, MT_VEND_REQ_TOUT_MS);
47 if (ret == -ENODEV)
48 set_bit(MT76_REMOVED, &dev->state);
49 if (ret >= 0 || ret == -ENODEV)
50 return ret;
51 usleep_range(5000, 10000);
52 }
53
54 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
55 req, offset, ret);
56 return ret;
57}
58
59int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
60 u8 req_type, u16 val, u16 offset,
61 void *buf, size_t len)
62{
63 int ret;
64
65 mutex_lock(&dev->usb.usb_ctrl_mtx);
66 ret = __mt76u_vendor_request(dev, req, req_type,
67 val, offset, buf, len);
68 trace_usb_reg_wr(dev, offset, val);
69 mutex_unlock(&dev->usb.usb_ctrl_mtx);
70
71 return ret;
72}
73EXPORT_SYMBOL_GPL(mt76u_vendor_request);
74
75/* should be called with usb_ctrl_mtx locked */
76static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
77{
78 struct mt76_usb *usb = &dev->usb;
79 u32 data = ~0;
80 u16 offset;
81 int ret;
82 u8 req;
83
84 switch (addr & MT_VEND_TYPE_MASK) {
85 case MT_VEND_TYPE_EEPROM:
86 req = MT_VEND_READ_EEPROM;
87 break;
88 case MT_VEND_TYPE_CFG:
89 req = MT_VEND_READ_CFG;
90 break;
91 default:
92 req = MT_VEND_MULTI_READ;
93 break;
94 }
95 offset = addr & ~MT_VEND_TYPE_MASK;
96
97 ret = __mt76u_vendor_request(dev, req,
98 USB_DIR_IN | USB_TYPE_VENDOR,
99 0, offset, usb->data, sizeof(__le32));
100 if (ret == sizeof(__le32))
101 data = get_unaligned_le32(usb->data);
102 trace_usb_reg_rr(dev, addr, data);
103
104 return data;
105}
106
5567b373 107static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
b40b15e1
LB
108{
109 u32 ret;
110
111 mutex_lock(&dev->usb.usb_ctrl_mtx);
112 ret = __mt76u_rr(dev, addr);
113 mutex_unlock(&dev->usb.usb_ctrl_mtx);
114
115 return ret;
116}
117
118/* should be called with usb_ctrl_mtx locked */
119static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
120{
121 struct mt76_usb *usb = &dev->usb;
122 u16 offset;
123 u8 req;
124
125 switch (addr & MT_VEND_TYPE_MASK) {
126 case MT_VEND_TYPE_CFG:
127 req = MT_VEND_WRITE_CFG;
128 break;
129 default:
130 req = MT_VEND_MULTI_WRITE;
131 break;
132 }
133 offset = addr & ~MT_VEND_TYPE_MASK;
134
135 put_unaligned_le32(val, usb->data);
136 __mt76u_vendor_request(dev, req,
137 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
138 offset, usb->data, sizeof(__le32));
139 trace_usb_reg_wr(dev, addr, val);
140}
141
5567b373 142static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
b40b15e1
LB
143{
144 mutex_lock(&dev->usb.usb_ctrl_mtx);
145 __mt76u_wr(dev, addr, val);
146 mutex_unlock(&dev->usb.usb_ctrl_mtx);
147}
148
149static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
150 u32 mask, u32 val)
151{
152 mutex_lock(&dev->usb.usb_ctrl_mtx);
153 val |= __mt76u_rr(dev, addr) & ~mask;
154 __mt76u_wr(dev, addr, val);
155 mutex_unlock(&dev->usb.usb_ctrl_mtx);
156
157 return val;
158}
159
160static void mt76u_copy(struct mt76_dev *dev, u32 offset,
161 const void *data, int len)
162{
163 struct mt76_usb *usb = &dev->usb;
164 const u32 *val = data;
165 int i, ret;
166
167 mutex_lock(&usb->usb_ctrl_mtx);
168 for (i = 0; i < (len / 4); i++) {
169 put_unaligned_le32(val[i], usb->data);
170 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
171 USB_DIR_OUT | USB_TYPE_VENDOR,
172 0, offset + i * 4, usb->data,
173 sizeof(__le32));
174 if (ret < 0)
175 break;
176 }
177 mutex_unlock(&usb->usb_ctrl_mtx);
178}
179
180void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
181 const u16 offset, const u32 val)
182{
183 mutex_lock(&dev->usb.usb_ctrl_mtx);
184 __mt76u_vendor_request(dev, req,
185 USB_DIR_OUT | USB_TYPE_VENDOR,
186 val & 0xffff, offset, NULL, 0);
187 __mt76u_vendor_request(dev, req,
188 USB_DIR_OUT | USB_TYPE_VENDOR,
189 val >> 16, offset + 2, NULL, 0);
190 mutex_unlock(&dev->usb.usb_ctrl_mtx);
191}
192EXPORT_SYMBOL_GPL(mt76u_single_wr);
193
f1638c7c
SG
194static int
195mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
196 const struct mt76_reg_pair *data, int len)
197{
198 struct mt76_usb *usb = &dev->usb;
199
200 mutex_lock(&usb->usb_ctrl_mtx);
201 while (len > 0) {
202 __mt76u_wr(dev, base + data->reg, data->value);
203 len--;
204 data++;
205 }
206 mutex_unlock(&usb->usb_ctrl_mtx);
207
208 return 0;
209}
210
17507157
LB
211static int
212mt76u_wr_rp(struct mt76_dev *dev, u32 base,
213 const struct mt76_reg_pair *data, int n)
f1638c7c
SG
214{
215 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
17507157 216 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
f1638c7c
SG
217 else
218 return mt76u_req_wr_rp(dev, base, data, n);
219}
f1638c7c
SG
220
221static int
222mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
223 int len)
224{
225 struct mt76_usb *usb = &dev->usb;
226
227 mutex_lock(&usb->usb_ctrl_mtx);
228 while (len > 0) {
229 data->value = __mt76u_rr(dev, base + data->reg);
230 len--;
231 data++;
232 }
233 mutex_unlock(&usb->usb_ctrl_mtx);
234
235 return 0;
236}
237
17507157
LB
238static int
239mt76u_rd_rp(struct mt76_dev *dev, u32 base,
240 struct mt76_reg_pair *data, int n)
f1638c7c
SG
241{
242 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
17507157 243 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
f1638c7c
SG
244 else
245 return mt76u_req_rd_rp(dev, base, data, n);
246}
f1638c7c 247
63a7de5d
LB
248static bool mt76u_check_sg(struct mt76_dev *dev)
249{
250 struct usb_interface *intf = to_usb_interface(dev->dev);
251 struct usb_device *udev = interface_to_usbdev(intf);
252
c2908a0d 253 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
63a7de5d
LB
254 (udev->bus->no_sg_constraint ||
255 udev->speed == USB_SPEED_WIRELESS));
256}
257
b40b15e1
LB
258static int
259mt76u_set_endpoints(struct usb_interface *intf,
260 struct mt76_usb *usb)
261{
262 struct usb_host_interface *intf_desc = intf->cur_altsetting;
263 struct usb_endpoint_descriptor *ep_desc;
264 int i, in_ep = 0, out_ep = 0;
265
266 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
267 ep_desc = &intf_desc->endpoint[i].desc;
268
269 if (usb_endpoint_is_bulk_in(ep_desc) &&
270 in_ep < __MT_EP_IN_MAX) {
271 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
272 usb->in_max_packet = usb_endpoint_maxp(ep_desc);
273 in_ep++;
274 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
275 out_ep < __MT_EP_OUT_MAX) {
276 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
277 usb->out_max_packet = usb_endpoint_maxp(ep_desc);
278 out_ep++;
279 }
280 }
281
282 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
283 return -EINVAL;
284 return 0;
285}
286
287static int
288mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
289 int nsgs, int len, int sglen)
290{
c12128ce 291 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
b40b15e1
LB
292 struct urb *urb = buf->urb;
293 int i;
294
481bb043 295 spin_lock_bh(&q->rx_page_lock);
b40b15e1
LB
296 for (i = 0; i < nsgs; i++) {
297 struct page *page;
298 void *data;
299 int offset;
300
c87dff8c 301 data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
b40b15e1
LB
302 if (!data)
303 break;
304
305 page = virt_to_head_page(data);
306 offset = data - page_address(page);
307 sg_set_page(&urb->sg[i], page, sglen, offset);
308 }
481bb043 309 spin_unlock_bh(&q->rx_page_lock);
b40b15e1
LB
310
311 if (i < nsgs) {
312 int j;
313
314 for (j = nsgs; j < urb->num_sgs; j++)
315 skb_free_frag(sg_virt(&urb->sg[j]));
316 urb->num_sgs = i;
317 }
318
319 urb->num_sgs = max_t(int, i, urb->num_sgs);
320 buf->len = urb->num_sgs * sglen,
321 sg_init_marker(urb->sg, urb->num_sgs);
322
323 return i ? : -ENOMEM;
324}
325
d704d16f 326static int
888199b8
LB
327mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
328 struct mt76u_buf *buf, int nsgs, gfp_t gfp)
b40b15e1 329{
888199b8
LB
330 if (dev->usb.sg_en) {
331 return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size,
332 SKB_WITH_OVERHEAD(q->buf_size));
333 } else {
334 buf->buf = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
335 return buf->buf ? 0 : -ENOMEM;
336 }
b40b15e1 337}
b40b15e1 338
888199b8
LB
339static int
340mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf)
d704d16f
LB
341{
342 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
343
888199b8
LB
344 buf->len = SKB_WITH_OVERHEAD(q->buf_size);
345 buf->dev = dev;
346
347 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
d704d16f
LB
348 if (!buf->urb)
349 return -ENOMEM;
350
888199b8
LB
351 if (dev->usb.sg_en) {
352 buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE,
353 sizeof(*buf->urb->sg),
354 GFP_KERNEL);
355 if (!buf->urb->sg)
356 return -ENOMEM;
d704d16f 357
888199b8
LB
358 sg_init_table(buf->urb->sg, MT_SG_MAX_SIZE);
359 }
d704d16f 360
888199b8 361 return mt76u_refill_rx(dev, q, buf, MT_SG_MAX_SIZE, GFP_KERNEL);
d704d16f
LB
362}
363
6cad8240 364static void mt76u_buf_free(struct mt76u_buf *buf)
b40b15e1
LB
365{
366 struct urb *urb = buf->urb;
367 int i;
368
cb1847cc
LB
369 for (i = 0; i < urb->num_sgs; i++)
370 skb_free_frag(sg_virt(&urb->sg[i]));
cb83585e 371
d704d16f
LB
372 if (buf->buf)
373 skb_free_frag(buf->buf);
374
b40b15e1
LB
375 usb_free_urb(buf->urb);
376}
b40b15e1 377
4de92bf1
LB
378static void
379mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
380 struct mt76u_buf *buf, usb_complete_t complete_fn,
381 void *context)
b40b15e1
LB
382{
383 struct usb_interface *intf = to_usb_interface(dev->dev);
384 struct usb_device *udev = interface_to_usbdev(intf);
d704d16f 385 u8 *data = buf->urb->num_sgs ? NULL : buf->buf;
b40b15e1
LB
386 unsigned int pipe;
387
388 if (dir == USB_DIR_IN)
389 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
390 else
391 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
392
d704d16f 393 usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len,
b40b15e1 394 complete_fn, context);
4de92bf1
LB
395}
396
6cad8240
LB
397static int
398mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
399 struct mt76u_buf *buf, gfp_t gfp,
400 usb_complete_t complete_fn, void *context)
4de92bf1
LB
401{
402 mt76u_fill_bulk_urb(dev, dir, index, buf, complete_fn,
403 context);
e0168dc6 404 trace_submit_urb(dev, buf->urb);
b40b15e1
LB
405
406 return usb_submit_urb(buf->urb, gfp);
407}
b40b15e1
LB
408
409static inline struct mt76u_buf
410*mt76u_get_next_rx_entry(struct mt76_queue *q)
411{
412 struct mt76u_buf *buf = NULL;
413 unsigned long flags;
414
415 spin_lock_irqsave(&q->lock, flags);
416 if (q->queued > 0) {
417 buf = &q->entry[q->head].ubuf;
418 q->head = (q->head + 1) % q->ndesc;
419 q->queued--;
420 }
421 spin_unlock_irqrestore(&q->lock, flags);
422
423 return buf;
424}
425
426static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
427{
428 u16 dma_len, min_len;
429
430 dma_len = get_unaligned_le16(data);
431 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
432 MT_FCE_INFO_LEN;
433
09dbcd8b
LB
434 if (data_len < min_len || !dma_len ||
435 dma_len + MT_DMA_HDR_LEN > data_len ||
436 (dma_len & 0x3))
b40b15e1
LB
437 return -EINVAL;
438 return dma_len;
439}
440
441static int
f7522949 442mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
b40b15e1
LB
443{
444 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
f7522949
LB
445 struct urb *urb = buf->urb;
446 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : buf->buf;
b40b15e1
LB
447 int data_len, len, nsgs = 1;
448 struct sk_buff *skb;
449
450 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
451 return 0;
452
453 len = mt76u_get_rx_entry_len(data, urb->actual_length);
454 if (len < 0)
455 return 0;
456
f7522949
LB
457 data_len = urb->num_sgs ? urb->sg[0].length : buf->len;
458 data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
0ecf94dc
LB
459 if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
460 return 0;
461
b40b15e1
LB
462 skb = build_skb(data, q->buf_size);
463 if (!skb)
464 return 0;
465
b40b15e1 466 skb_reserve(skb, MT_DMA_HDR_LEN);
b40b15e1
LB
467 __skb_put(skb, data_len);
468 len -= data_len;
469
200abe6a 470 while (len > 0 && nsgs < urb->num_sgs) {
b40b15e1
LB
471 data_len = min_t(int, len, urb->sg[nsgs].length);
472 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
473 sg_page(&urb->sg[nsgs]),
474 urb->sg[nsgs].offset,
475 data_len, q->buf_size);
476 len -= data_len;
477 nsgs++;
478 }
479 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
480
481 return nsgs;
482}
483
484static void mt76u_complete_rx(struct urb *urb)
485{
486 struct mt76_dev *dev = urb->context;
487 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
488 unsigned long flags;
489
e0168dc6
LB
490 trace_rx_urb(dev, urb);
491
b40b15e1
LB
492 switch (urb->status) {
493 case -ECONNRESET:
494 case -ESHUTDOWN:
495 case -ENOENT:
496 return;
497 default:
4bfff1ec
LB
498 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
499 urb->status);
b40b15e1
LB
500 /* fall through */
501 case 0:
502 break;
503 }
504
505 spin_lock_irqsave(&q->lock, flags);
506 if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
507 goto out;
508
509 q->tail = (q->tail + 1) % q->ndesc;
510 q->queued++;
511 tasklet_schedule(&dev->usb.rx_tasklet);
512out:
513 spin_unlock_irqrestore(&q->lock, flags);
514}
515
516static void mt76u_rx_tasklet(unsigned long data)
517{
518 struct mt76_dev *dev = (struct mt76_dev *)data;
519 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
b40b15e1 520 struct mt76u_buf *buf;
f7522949 521 int err, count;
b40b15e1
LB
522
523 rcu_read_lock();
524
525 while (true) {
526 buf = mt76u_get_next_rx_entry(q);
527 if (!buf)
528 break;
529
f7522949
LB
530 count = mt76u_process_rx_entry(dev, buf);
531 if (count > 0) {
888199b8
LB
532 err = mt76u_refill_rx(dev, q, buf, count,
533 GFP_ATOMIC);
b40b15e1
LB
534 if (err < 0)
535 break;
536 }
537 mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
538 buf, GFP_ATOMIC,
539 mt76u_complete_rx, dev);
540 }
541 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
542
543 rcu_read_unlock();
544}
545
546int mt76u_submit_rx_buffers(struct mt76_dev *dev)
547{
548 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
549 unsigned long flags;
550 int i, err = 0;
551
552 spin_lock_irqsave(&q->lock, flags);
553 for (i = 0; i < q->ndesc; i++) {
554 err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
555 &q->entry[i].ubuf, GFP_ATOMIC,
556 mt76u_complete_rx, dev);
557 if (err < 0)
558 break;
559 }
560 q->head = q->tail = 0;
561 q->queued = 0;
562 spin_unlock_irqrestore(&q->lock, flags);
563
564 return err;
565}
566EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
567
568static int mt76u_alloc_rx(struct mt76_dev *dev)
569{
fc994dbb 570 struct mt76_usb *usb = &dev->usb;
b40b15e1 571 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
f7522949 572 int i, err;
b40b15e1 573
fc994dbb
SG
574 usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
575 if (!usb->mcu.data)
576 return -ENOMEM;
577
481bb043 578 spin_lock_init(&q->rx_page_lock);
b40b15e1 579 spin_lock_init(&q->lock);
329e0989
KC
580 q->entry = devm_kcalloc(dev->dev,
581 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
b40b15e1
LB
582 GFP_KERNEL);
583 if (!q->entry)
584 return -ENOMEM;
585
f7522949 586 q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
cb83585e
LB
587 q->ndesc = MT_NUM_RX_ENTRIES;
588 for (i = 0; i < q->ndesc; i++) {
888199b8 589 err = mt76u_buf_alloc(dev, &q->entry[i].ubuf);
b40b15e1
LB
590 if (err < 0)
591 return err;
592 }
b40b15e1
LB
593
594 return mt76u_submit_rx_buffers(dev);
595}
596
597static void mt76u_free_rx(struct mt76_dev *dev)
598{
599 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
c12128ce 600 struct page *page;
b40b15e1
LB
601 int i;
602
603 for (i = 0; i < q->ndesc; i++)
604 mt76u_buf_free(&q->entry[i].ubuf);
c12128ce 605
481bb043 606 spin_lock_bh(&q->rx_page_lock);
c12128ce 607 if (!q->rx_page.va)
481bb043 608 goto out;
c12128ce
FF
609
610 page = virt_to_page(q->rx_page.va);
611 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
612 memset(&q->rx_page, 0, sizeof(q->rx_page));
481bb043
LB
613out:
614 spin_unlock_bh(&q->rx_page_lock);
b40b15e1
LB
615}
616
617static void mt76u_stop_rx(struct mt76_dev *dev)
618{
619 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
620 int i;
621
622 for (i = 0; i < q->ndesc; i++)
623 usb_kill_urb(q->entry[i].ubuf.urb);
624}
625
b40b15e1
LB
626static void mt76u_tx_tasklet(unsigned long data)
627{
628 struct mt76_dev *dev = (struct mt76_dev *)data;
e207afa0 629 struct mt76_queue_entry entry;
af005f26 630 struct mt76_sw_queue *sq;
b40b15e1
LB
631 struct mt76u_buf *buf;
632 struct mt76_queue *q;
633 bool wake;
634 int i;
635
636 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
af005f26
LB
637 sq = &dev->q_tx[i];
638 q = sq->q;
b40b15e1
LB
639
640 spin_lock_bh(&q->lock);
641 while (true) {
642 buf = &q->entry[q->head].ubuf;
643 if (!buf->done || !q->queued)
644 break;
645
b40b15e1
LB
646 if (q->entry[q->head].schedule) {
647 q->entry[q->head].schedule = false;
af005f26 648 sq->swq_queued--;
b40b15e1
LB
649 }
650
e207afa0 651 entry = q->entry[q->head];
b40b15e1
LB
652 q->head = (q->head + 1) % q->ndesc;
653 q->queued--;
e207afa0
LB
654
655 spin_unlock_bh(&q->lock);
e226ba2e 656 dev->drv->tx_complete_skb(dev, i, &entry);
e207afa0 657 spin_lock_bh(&q->lock);
b40b15e1 658 }
af005f26 659 mt76_txq_schedule(dev, sq);
cd44bc40
LB
660
661 wake = q->stopped && q->queued < q->ndesc - 8;
662 if (wake)
663 q->stopped = false;
664
b40b15e1
LB
665 if (!q->queued)
666 wake_up(&dev->tx_wait);
667
668 spin_unlock_bh(&q->lock);
669
670 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
671 ieee80211_queue_delayed_work(dev->hw,
672 &dev->usb.stat_work,
673 msecs_to_jiffies(10));
674
675 if (wake)
676 ieee80211_wake_queue(dev->hw, i);
677 }
678}
679
680static void mt76u_tx_status_data(struct work_struct *work)
681{
682 struct mt76_usb *usb;
683 struct mt76_dev *dev;
684 u8 update = 1;
685 u16 count = 0;
686
687 usb = container_of(work, struct mt76_usb, stat_work.work);
688 dev = container_of(usb, struct mt76_dev, usb);
689
690 while (true) {
691 if (test_bit(MT76_REMOVED, &dev->state))
692 break;
693
694 if (!dev->drv->tx_status_data(dev, &update))
695 break;
696 count++;
697 }
698
699 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
700 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
701 msecs_to_jiffies(10));
702 else
703 clear_bit(MT76_READING_STATS, &dev->state);
704}
705
706static void mt76u_complete_tx(struct urb *urb)
707{
708 struct mt76u_buf *buf = urb->context;
709 struct mt76_dev *dev = buf->dev;
710
711 if (mt76u_urb_error(urb))
712 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
713 buf->done = true;
714
715 tasklet_schedule(&dev->usb.tx_tasklet);
716}
717
718static int
4de92bf1
LB
719mt76u_tx_build_sg(struct mt76_dev *dev, struct sk_buff *skb,
720 struct urb *urb)
b40b15e1 721{
4de92bf1
LB
722 if (!dev->usb.sg_en)
723 return 0;
b40b15e1 724
04eb16fc
LB
725 sg_init_table(urb->sg, MT_SG_MAX_SIZE);
726 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
727 return urb->num_sgs;
b40b15e1
LB
728}
729
730static int
89a37842 731mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
b40b15e1
LB
732 struct sk_buff *skb, struct mt76_wcid *wcid,
733 struct ieee80211_sta *sta)
734{
af005f26 735 struct mt76_queue *q = dev->q_tx[qid].q;
b40b15e1
LB
736 struct mt76u_buf *buf;
737 u16 idx = q->tail;
b40b15e1
LB
738 int err;
739
740 if (q->queued == q->ndesc)
741 return -ENOSPC;
742
88046b2c 743 skb->prev = skb->next = NULL;
300832ad 744 err = dev->drv->tx_prepare_skb(dev, NULL, skb, qid, wcid, sta, NULL);
b40b15e1
LB
745 if (err < 0)
746 return err;
747
748 buf = &q->entry[idx].ubuf;
4de92bf1
LB
749 buf->buf = skb->data;
750 buf->len = skb->len;
b40b15e1
LB
751 buf->done = false;
752
4de92bf1
LB
753 err = mt76u_tx_build_sg(dev, skb, buf->urb);
754 if (err < 0)
755 return err;
b40b15e1 756
4de92bf1
LB
757 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
758 buf, mt76u_complete_tx, buf);
b40b15e1
LB
759
760 q->tail = (q->tail + 1) % q->ndesc;
761 q->entry[idx].skb = skb;
762 q->queued++;
763
764 return idx;
765}
766
767static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
768{
769 struct mt76u_buf *buf;
770 int err;
771
772 while (q->first != q->tail) {
773 buf = &q->entry[q->first].ubuf;
e0168dc6
LB
774
775 trace_submit_urb(dev, buf->urb);
b40b15e1
LB
776 err = usb_submit_urb(buf->urb, GFP_ATOMIC);
777 if (err < 0) {
778 if (err == -ENODEV)
779 set_bit(MT76_REMOVED, &dev->state);
780 else
781 dev_err(dev->dev, "tx urb submit failed:%d\n",
782 err);
783 break;
784 }
785 q->first = (q->first + 1) % q->ndesc;
786 }
787}
788
789static int mt76u_alloc_tx(struct mt76_dev *dev)
790{
791 struct mt76u_buf *buf;
792 struct mt76_queue *q;
b40b15e1
LB
793 int i, j;
794
8300ee7c 795 for (i = 0; i <= MT_TXQ_PSD; i++) {
af005f26
LB
796 INIT_LIST_HEAD(&dev->q_tx[i].swq);
797
8300ee7c
SG
798 if (i >= IEEE80211_NUM_ACS) {
799 dev->q_tx[i].q = dev->q_tx[0].q;
800 continue;
801 }
802
af005f26
LB
803 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
804 if (!q)
805 return -ENOMEM;
806
b40b15e1 807 spin_lock_init(&q->lock);
1d0496c6 808 q->hw_idx = mt76_ac_to_hwq(i);
af005f26 809 dev->q_tx[i].q = q;
b40b15e1 810
329e0989
KC
811 q->entry = devm_kcalloc(dev->dev,
812 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
b40b15e1
LB
813 GFP_KERNEL);
814 if (!q->entry)
815 return -ENOMEM;
816
817 q->ndesc = MT_NUM_TX_ENTRIES;
818 for (j = 0; j < q->ndesc; j++) {
819 buf = &q->entry[j].ubuf;
820 buf->dev = dev;
821
822 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
823 if (!buf->urb)
824 return -ENOMEM;
825
cadae477
LB
826 if (!dev->usb.sg_en)
827 continue;
828
829 buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE,
830 sizeof(struct scatterlist),
831 GFP_KERNEL);
832 if (!buf->urb->sg)
833 return -ENOMEM;
b40b15e1
LB
834 }
835 }
836 return 0;
837}
838
839static void mt76u_free_tx(struct mt76_dev *dev)
840{
841 struct mt76_queue *q;
842 int i, j;
843
844 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
af005f26 845 q = dev->q_tx[i].q;
b40b15e1
LB
846 for (j = 0; j < q->ndesc; j++)
847 usb_free_urb(q->entry[j].ubuf.urb);
848 }
849}
850
851static void mt76u_stop_tx(struct mt76_dev *dev)
852{
853 struct mt76_queue *q;
854 int i, j;
855
856 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
af005f26 857 q = dev->q_tx[i].q;
b40b15e1
LB
858 for (j = 0; j < q->ndesc; j++)
859 usb_kill_urb(q->entry[j].ubuf.urb);
860 }
861}
862
863void mt76u_stop_queues(struct mt76_dev *dev)
864{
865 tasklet_disable(&dev->usb.rx_tasklet);
866 tasklet_disable(&dev->usb.tx_tasklet);
867
868 mt76u_stop_rx(dev);
869 mt76u_stop_tx(dev);
870}
871EXPORT_SYMBOL_GPL(mt76u_stop_queues);
872
873void mt76u_stop_stat_wk(struct mt76_dev *dev)
874{
875 cancel_delayed_work_sync(&dev->usb.stat_work);
876 clear_bit(MT76_READING_STATS, &dev->state);
877}
878EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
879
880void mt76u_queues_deinit(struct mt76_dev *dev)
881{
882 mt76u_stop_queues(dev);
883
884 mt76u_free_rx(dev);
885 mt76u_free_tx(dev);
886}
887EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
888
889int mt76u_alloc_queues(struct mt76_dev *dev)
890{
891 int err;
892
893 err = mt76u_alloc_rx(dev);
894 if (err < 0)
b3098121 895 return err;
b40b15e1 896
b3098121 897 return mt76u_alloc_tx(dev);
b40b15e1
LB
898}
899EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
900
901static const struct mt76_queue_ops usb_queue_ops = {
902 .tx_queue_skb = mt76u_tx_queue_skb,
903 .kick = mt76u_tx_kick,
904};
905
906int mt76u_init(struct mt76_dev *dev,
907 struct usb_interface *intf)
908{
909 static const struct mt76_bus_ops mt76u_ops = {
910 .rr = mt76u_rr,
911 .wr = mt76u_wr,
912 .rmw = mt76u_rmw,
913 .copy = mt76u_copy,
6da5a291
SG
914 .wr_rp = mt76u_wr_rp,
915 .rd_rp = mt76u_rd_rp,
c50479fa 916 .type = MT76_BUS_USB,
b40b15e1
LB
917 };
918 struct mt76_usb *usb = &dev->usb;
919
920 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
921 tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
922 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
923 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
924
b40b15e1
LB
925 mutex_init(&usb->mcu.mutex);
926
927 mutex_init(&usb->usb_ctrl_mtx);
928 dev->bus = &mt76u_ops;
929 dev->queue_ops = &usb_queue_ops;
930
63a7de5d
LB
931 usb->sg_en = mt76u_check_sg(dev);
932
b40b15e1
LB
933 return mt76u_set_endpoints(intf, usb);
934}
935EXPORT_SYMBOL_GPL(mt76u_init);
936
937MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
938MODULE_LICENSE("Dual BSD/GPL");