mt76: mt76x02u: remove bogus stop on suspend
[linux-block.git] / drivers / net / wireless / mediatek / mt76 / usb.c
CommitLineData
b40b15e1
LB
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
db6bb5c6 17#include <linux/module.h>
b40b15e1
LB
18#include "mt76.h"
19#include "usb_trace.h"
20#include "dma.h"
21
22#define MT_VEND_REQ_MAX_RETRY 10
23#define MT_VEND_REQ_TOUT_MS 300
24
c2908a0d
LB
25static bool disable_usb_sg;
26module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
27MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
28
b40b15e1
LB
29/* should be called with usb_ctrl_mtx locked */
30static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
31 u8 req_type, u16 val, u16 offset,
32 void *buf, size_t len)
33{
112f980a 34 struct usb_device *udev = to_usb_device(dev->dev);
b40b15e1
LB
35 unsigned int pipe;
36 int i, ret;
37
38 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
39 : usb_sndctrlpipe(udev, 0);
40 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
41 if (test_bit(MT76_REMOVED, &dev->state))
42 return -EIO;
43
44 ret = usb_control_msg(udev, pipe, req, req_type, val,
45 offset, buf, len, MT_VEND_REQ_TOUT_MS);
46 if (ret == -ENODEV)
47 set_bit(MT76_REMOVED, &dev->state);
48 if (ret >= 0 || ret == -ENODEV)
49 return ret;
50 usleep_range(5000, 10000);
51 }
52
53 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
54 req, offset, ret);
55 return ret;
56}
57
58int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
59 u8 req_type, u16 val, u16 offset,
60 void *buf, size_t len)
61{
62 int ret;
63
64 mutex_lock(&dev->usb.usb_ctrl_mtx);
65 ret = __mt76u_vendor_request(dev, req, req_type,
66 val, offset, buf, len);
67 trace_usb_reg_wr(dev, offset, val);
68 mutex_unlock(&dev->usb.usb_ctrl_mtx);
69
70 return ret;
71}
72EXPORT_SYMBOL_GPL(mt76u_vendor_request);
73
74/* should be called with usb_ctrl_mtx locked */
75static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
76{
77 struct mt76_usb *usb = &dev->usb;
78 u32 data = ~0;
79 u16 offset;
80 int ret;
81 u8 req;
82
83 switch (addr & MT_VEND_TYPE_MASK) {
84 case MT_VEND_TYPE_EEPROM:
85 req = MT_VEND_READ_EEPROM;
86 break;
87 case MT_VEND_TYPE_CFG:
88 req = MT_VEND_READ_CFG;
89 break;
90 default:
91 req = MT_VEND_MULTI_READ;
92 break;
93 }
94 offset = addr & ~MT_VEND_TYPE_MASK;
95
96 ret = __mt76u_vendor_request(dev, req,
97 USB_DIR_IN | USB_TYPE_VENDOR,
98 0, offset, usb->data, sizeof(__le32));
99 if (ret == sizeof(__le32))
100 data = get_unaligned_le32(usb->data);
101 trace_usb_reg_rr(dev, addr, data);
102
103 return data;
104}
105
5567b373 106static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
b40b15e1
LB
107{
108 u32 ret;
109
110 mutex_lock(&dev->usb.usb_ctrl_mtx);
111 ret = __mt76u_rr(dev, addr);
112 mutex_unlock(&dev->usb.usb_ctrl_mtx);
113
114 return ret;
115}
116
117/* should be called with usb_ctrl_mtx locked */
118static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
119{
120 struct mt76_usb *usb = &dev->usb;
121 u16 offset;
122 u8 req;
123
124 switch (addr & MT_VEND_TYPE_MASK) {
125 case MT_VEND_TYPE_CFG:
126 req = MT_VEND_WRITE_CFG;
127 break;
128 default:
129 req = MT_VEND_MULTI_WRITE;
130 break;
131 }
132 offset = addr & ~MT_VEND_TYPE_MASK;
133
134 put_unaligned_le32(val, usb->data);
135 __mt76u_vendor_request(dev, req,
136 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
137 offset, usb->data, sizeof(__le32));
138 trace_usb_reg_wr(dev, addr, val);
139}
140
5567b373 141static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
b40b15e1
LB
142{
143 mutex_lock(&dev->usb.usb_ctrl_mtx);
144 __mt76u_wr(dev, addr, val);
145 mutex_unlock(&dev->usb.usb_ctrl_mtx);
146}
147
148static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
149 u32 mask, u32 val)
150{
151 mutex_lock(&dev->usb.usb_ctrl_mtx);
152 val |= __mt76u_rr(dev, addr) & ~mask;
153 __mt76u_wr(dev, addr, val);
154 mutex_unlock(&dev->usb.usb_ctrl_mtx);
155
156 return val;
157}
158
159static void mt76u_copy(struct mt76_dev *dev, u32 offset,
160 const void *data, int len)
161{
162 struct mt76_usb *usb = &dev->usb;
163 const u32 *val = data;
164 int i, ret;
165
166 mutex_lock(&usb->usb_ctrl_mtx);
167 for (i = 0; i < (len / 4); i++) {
168 put_unaligned_le32(val[i], usb->data);
169 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
170 USB_DIR_OUT | USB_TYPE_VENDOR,
171 0, offset + i * 4, usb->data,
172 sizeof(__le32));
173 if (ret < 0)
174 break;
175 }
176 mutex_unlock(&usb->usb_ctrl_mtx);
177}
178
179void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
180 const u16 offset, const u32 val)
181{
182 mutex_lock(&dev->usb.usb_ctrl_mtx);
183 __mt76u_vendor_request(dev, req,
184 USB_DIR_OUT | USB_TYPE_VENDOR,
185 val & 0xffff, offset, NULL, 0);
186 __mt76u_vendor_request(dev, req,
187 USB_DIR_OUT | USB_TYPE_VENDOR,
188 val >> 16, offset + 2, NULL, 0);
189 mutex_unlock(&dev->usb.usb_ctrl_mtx);
190}
191EXPORT_SYMBOL_GPL(mt76u_single_wr);
192
f1638c7c
SG
193static int
194mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
195 const struct mt76_reg_pair *data, int len)
196{
197 struct mt76_usb *usb = &dev->usb;
198
199 mutex_lock(&usb->usb_ctrl_mtx);
200 while (len > 0) {
201 __mt76u_wr(dev, base + data->reg, data->value);
202 len--;
203 data++;
204 }
205 mutex_unlock(&usb->usb_ctrl_mtx);
206
207 return 0;
208}
209
17507157
LB
210static int
211mt76u_wr_rp(struct mt76_dev *dev, u32 base,
212 const struct mt76_reg_pair *data, int n)
f1638c7c
SG
213{
214 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
17507157 215 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
f1638c7c
SG
216 else
217 return mt76u_req_wr_rp(dev, base, data, n);
218}
f1638c7c
SG
219
220static int
221mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
222 int len)
223{
224 struct mt76_usb *usb = &dev->usb;
225
226 mutex_lock(&usb->usb_ctrl_mtx);
227 while (len > 0) {
228 data->value = __mt76u_rr(dev, base + data->reg);
229 len--;
230 data++;
231 }
232 mutex_unlock(&usb->usb_ctrl_mtx);
233
234 return 0;
235}
236
17507157
LB
237static int
238mt76u_rd_rp(struct mt76_dev *dev, u32 base,
239 struct mt76_reg_pair *data, int n)
f1638c7c
SG
240{
241 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
17507157 242 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
f1638c7c
SG
243 else
244 return mt76u_req_rd_rp(dev, base, data, n);
245}
f1638c7c 246
63a7de5d
LB
247static bool mt76u_check_sg(struct mt76_dev *dev)
248{
112f980a 249 struct usb_device *udev = to_usb_device(dev->dev);
63a7de5d 250
c2908a0d 251 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
63a7de5d
LB
252 (udev->bus->no_sg_constraint ||
253 udev->speed == USB_SPEED_WIRELESS));
254}
255
b40b15e1
LB
256static int
257mt76u_set_endpoints(struct usb_interface *intf,
258 struct mt76_usb *usb)
259{
260 struct usb_host_interface *intf_desc = intf->cur_altsetting;
261 struct usb_endpoint_descriptor *ep_desc;
262 int i, in_ep = 0, out_ep = 0;
263
264 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
265 ep_desc = &intf_desc->endpoint[i].desc;
266
267 if (usb_endpoint_is_bulk_in(ep_desc) &&
268 in_ep < __MT_EP_IN_MAX) {
269 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
270 usb->in_max_packet = usb_endpoint_maxp(ep_desc);
271 in_ep++;
272 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
273 out_ep < __MT_EP_OUT_MAX) {
274 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
275 usb->out_max_packet = usb_endpoint_maxp(ep_desc);
276 out_ep++;
277 }
278 }
279
280 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
281 return -EINVAL;
282 return 0;
283}
284
285static int
1bb78d38
SG
286mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
287 int nsgs, gfp_t gfp)
b40b15e1 288{
92724071 289 int sglen = SKB_WITH_OVERHEAD(q->buf_size);
b40b15e1
LB
290 int i;
291
292 for (i = 0; i < nsgs; i++) {
293 struct page *page;
294 void *data;
295 int offset;
296
92724071 297 data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
b40b15e1
LB
298 if (!data)
299 break;
300
301 page = virt_to_head_page(data);
302 offset = data - page_address(page);
303 sg_set_page(&urb->sg[i], page, sglen, offset);
304 }
305
306 if (i < nsgs) {
307 int j;
308
309 for (j = nsgs; j < urb->num_sgs; j++)
310 skb_free_frag(sg_virt(&urb->sg[j]));
311 urb->num_sgs = i;
312 }
313
314 urb->num_sgs = max_t(int, i, urb->num_sgs);
26031b39 315 urb->transfer_buffer_length = urb->num_sgs * sglen,
b40b15e1
LB
316 sg_init_marker(urb->sg, urb->num_sgs);
317
318 return i ? : -ENOMEM;
319}
320
d704d16f 321static int
1bb78d38 322mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
b40b15e1 323{
1bb78d38
SG
324 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
325
888199b8 326 if (dev->usb.sg_en) {
1bb78d38 327 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
888199b8 328 } else {
d7d4ea9a
SG
329 urb->transfer_buffer_length = SKB_WITH_OVERHEAD(q->buf_size);
330 urb->transfer_buffer = page_frag_alloc(&q->rx_page,
331 q->buf_size, gfp);
332 return urb->transfer_buffer ? 0 : -ENOMEM;
888199b8 333 }
b40b15e1 334}
b40b15e1 335
888199b8 336static int
d7d4ea9a 337mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
d704d16f 338{
85d2955e 339 unsigned int size = sizeof(struct urb);
d704d16f 340
85d2955e
SG
341 if (dev->usb.sg_en)
342 size += MT_SG_MAX_SIZE * sizeof(struct scatterlist);
343
344 e->urb = kzalloc(size, GFP_KERNEL);
345 if (!e->urb)
d704d16f
LB
346 return -ENOMEM;
347
85d2955e
SG
348 usb_init_urb(e->urb);
349
350 if (dev->usb.sg_en)
351 e->urb->sg = (struct scatterlist *)(e->urb + 1);
d704d16f 352
48f5a90c
SG
353 return 0;
354}
355
356static int
357mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
358{
359 int err;
360
361 err = mt76u_urb_alloc(dev, e);
362 if (err)
363 return err;
364
365 return mt76u_refill_rx(dev, e->urb, MT_SG_MAX_SIZE, GFP_KERNEL);
d704d16f
LB
366}
367
d7d4ea9a 368static void mt76u_urb_free(struct urb *urb)
b40b15e1 369{
b40b15e1
LB
370 int i;
371
cb1847cc
LB
372 for (i = 0; i < urb->num_sgs; i++)
373 skb_free_frag(sg_virt(&urb->sg[i]));
cb83585e 374
26031b39
SG
375 if (urb->transfer_buffer)
376 skb_free_frag(urb->transfer_buffer);
d704d16f 377
d7d4ea9a 378 usb_free_urb(urb);
b40b15e1 379}
b40b15e1 380
4de92bf1
LB
381static void
382mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
d7d4ea9a 383 struct urb *urb, usb_complete_t complete_fn,
4de92bf1 384 void *context)
b40b15e1 385{
112f980a 386 struct usb_device *udev = to_usb_device(dev->dev);
b40b15e1
LB
387 unsigned int pipe;
388
389 if (dir == USB_DIR_IN)
390 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
391 else
392 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
393
d7d4ea9a
SG
394 urb->dev = udev;
395 urb->pipe = pipe;
396 urb->complete = complete_fn;
397 urb->context = context;
4de92bf1
LB
398}
399
d7d4ea9a 400static inline struct urb *
e5fc742f 401mt76u_get_next_rx_entry(struct mt76_dev *dev)
b40b15e1 402{
e5fc742f 403 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
d7d4ea9a 404 struct urb *urb = NULL;
b40b15e1
LB
405 unsigned long flags;
406
407 spin_lock_irqsave(&q->lock, flags);
408 if (q->queued > 0) {
d7d4ea9a 409 urb = q->entry[q->head].urb;
b40b15e1
LB
410 q->head = (q->head + 1) % q->ndesc;
411 q->queued--;
412 }
413 spin_unlock_irqrestore(&q->lock, flags);
414
d7d4ea9a 415 return urb;
b40b15e1
LB
416}
417
418static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
419{
420 u16 dma_len, min_len;
421
422 dma_len = get_unaligned_le16(data);
423 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
424 MT_FCE_INFO_LEN;
425
09dbcd8b
LB
426 if (data_len < min_len || !dma_len ||
427 dma_len + MT_DMA_HDR_LEN > data_len ||
428 (dma_len & 0x3))
b40b15e1
LB
429 return -EINVAL;
430 return dma_len;
431}
432
433static int
d7d4ea9a 434mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
b40b15e1
LB
435{
436 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
26031b39
SG
437 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
438 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
439 int len, nsgs = 1;
b40b15e1
LB
440 struct sk_buff *skb;
441
442 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
443 return 0;
444
445 len = mt76u_get_rx_entry_len(data, urb->actual_length);
446 if (len < 0)
447 return 0;
448
f7522949 449 data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
0ecf94dc
LB
450 if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
451 return 0;
452
b40b15e1
LB
453 skb = build_skb(data, q->buf_size);
454 if (!skb)
455 return 0;
456
b40b15e1 457 skb_reserve(skb, MT_DMA_HDR_LEN);
b40b15e1
LB
458 __skb_put(skb, data_len);
459 len -= data_len;
460
200abe6a 461 while (len > 0 && nsgs < urb->num_sgs) {
b40b15e1
LB
462 data_len = min_t(int, len, urb->sg[nsgs].length);
463 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
464 sg_page(&urb->sg[nsgs]),
465 urb->sg[nsgs].offset,
466 data_len, q->buf_size);
467 len -= data_len;
468 nsgs++;
469 }
470 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
471
472 return nsgs;
473}
474
475static void mt76u_complete_rx(struct urb *urb)
476{
477 struct mt76_dev *dev = urb->context;
478 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
479 unsigned long flags;
480
e0168dc6
LB
481 trace_rx_urb(dev, urb);
482
b40b15e1
LB
483 switch (urb->status) {
484 case -ECONNRESET:
485 case -ESHUTDOWN:
486 case -ENOENT:
487 return;
488 default:
4bfff1ec
LB
489 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
490 urb->status);
b40b15e1
LB
491 /* fall through */
492 case 0:
493 break;
494 }
495
496 spin_lock_irqsave(&q->lock, flags);
d7d4ea9a 497 if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
b40b15e1
LB
498 goto out;
499
500 q->tail = (q->tail + 1) % q->ndesc;
501 q->queued++;
502 tasklet_schedule(&dev->usb.rx_tasklet);
503out:
504 spin_unlock_irqrestore(&q->lock, flags);
505}
506
a5ba16eb 507static int
d7d4ea9a 508mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
a5ba16eb 509{
d7d4ea9a 510 mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
a5ba16eb 511 mt76u_complete_rx, dev);
d7d4ea9a 512 trace_submit_urb(dev, urb);
a5ba16eb 513
d7d4ea9a 514 return usb_submit_urb(urb, GFP_ATOMIC);
a5ba16eb
SG
515}
516
b40b15e1
LB
517static void mt76u_rx_tasklet(unsigned long data)
518{
519 struct mt76_dev *dev = (struct mt76_dev *)data;
d7d4ea9a 520 struct urb *urb;
f7522949 521 int err, count;
b40b15e1
LB
522
523 rcu_read_lock();
524
525 while (true) {
e5fc742f 526 urb = mt76u_get_next_rx_entry(dev);
d7d4ea9a 527 if (!urb)
b40b15e1
LB
528 break;
529
d7d4ea9a 530 count = mt76u_process_rx_entry(dev, urb);
f7522949 531 if (count > 0) {
1bb78d38 532 err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
b40b15e1
LB
533 if (err < 0)
534 break;
535 }
d7d4ea9a 536 mt76u_submit_rx_buf(dev, urb);
b40b15e1
LB
537 }
538 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
539
540 rcu_read_unlock();
541}
542
543int mt76u_submit_rx_buffers(struct mt76_dev *dev)
544{
545 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
546 unsigned long flags;
547 int i, err = 0;
548
549 spin_lock_irqsave(&q->lock, flags);
550 for (i = 0; i < q->ndesc; i++) {
d7d4ea9a 551 err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
b40b15e1
LB
552 if (err < 0)
553 break;
554 }
555 q->head = q->tail = 0;
556 q->queued = 0;
557 spin_unlock_irqrestore(&q->lock, flags);
558
559 return err;
560}
561EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
562
563static int mt76u_alloc_rx(struct mt76_dev *dev)
564{
fc994dbb 565 struct mt76_usb *usb = &dev->usb;
b40b15e1 566 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
f7522949 567 int i, err;
b40b15e1 568
fc994dbb
SG
569 usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
570 if (!usb->mcu.data)
571 return -ENOMEM;
572
b40b15e1 573 spin_lock_init(&q->lock);
329e0989
KC
574 q->entry = devm_kcalloc(dev->dev,
575 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
b40b15e1
LB
576 GFP_KERNEL);
577 if (!q->entry)
578 return -ENOMEM;
579
f7522949 580 q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
cb83585e
LB
581 q->ndesc = MT_NUM_RX_ENTRIES;
582 for (i = 0; i < q->ndesc; i++) {
48f5a90c 583 err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
b40b15e1
LB
584 if (err < 0)
585 return err;
586 }
b40b15e1
LB
587
588 return mt76u_submit_rx_buffers(dev);
589}
590
591static void mt76u_free_rx(struct mt76_dev *dev)
592{
593 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
c12128ce 594 struct page *page;
b40b15e1
LB
595 int i;
596
597 for (i = 0; i < q->ndesc; i++)
d7d4ea9a 598 mt76u_urb_free(q->entry[i].urb);
c12128ce
FF
599
600 if (!q->rx_page.va)
069e2d34 601 return;
c12128ce
FF
602
603 page = virt_to_page(q->rx_page.va);
604 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
605 memset(&q->rx_page, 0, sizeof(q->rx_page));
b40b15e1
LB
606}
607
608static void mt76u_stop_rx(struct mt76_dev *dev)
609{
610 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
611 int i;
612
613 for (i = 0; i < q->ndesc; i++)
d7d4ea9a 614 usb_kill_urb(q->entry[i].urb);
b40b15e1
LB
615}
616
b40b15e1
LB
617static void mt76u_tx_tasklet(unsigned long data)
618{
619 struct mt76_dev *dev = (struct mt76_dev *)data;
e207afa0 620 struct mt76_queue_entry entry;
af005f26 621 struct mt76_sw_queue *sq;
b40b15e1
LB
622 struct mt76_queue *q;
623 bool wake;
624 int i;
625
626 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
ce0fd825
LB
627 u32 n_dequeued = 0, n_sw_dequeued = 0;
628
af005f26
LB
629 sq = &dev->q_tx[i];
630 q = sq->q;
b40b15e1 631
ce0fd825
LB
632 while (q->queued > n_dequeued) {
633 if (!q->entry[q->head].done)
b40b15e1
LB
634 break;
635
b40b15e1
LB
636 if (q->entry[q->head].schedule) {
637 q->entry[q->head].schedule = false;
ce0fd825 638 n_sw_dequeued++;
b40b15e1
LB
639 }
640
e207afa0 641 entry = q->entry[q->head];
ce0fd825 642 q->entry[q->head].done = false;
b40b15e1 643 q->head = (q->head + 1) % q->ndesc;
ce0fd825 644 n_dequeued++;
e207afa0 645
e226ba2e 646 dev->drv->tx_complete_skb(dev, i, &entry);
b40b15e1 647 }
cd44bc40 648
ce0fd825
LB
649 spin_lock_bh(&q->lock);
650
651 sq->swq_queued -= n_sw_dequeued;
652 q->queued -= n_dequeued;
653
cd44bc40
LB
654 wake = q->stopped && q->queued < q->ndesc - 8;
655 if (wake)
656 q->stopped = false;
657
b40b15e1
LB
658 if (!q->queued)
659 wake_up(&dev->tx_wait);
660
661 spin_unlock_bh(&q->lock);
662
90fdc171
FF
663 mt76_txq_schedule(dev, i);
664
b40b15e1
LB
665 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
666 ieee80211_queue_delayed_work(dev->hw,
667 &dev->usb.stat_work,
668 msecs_to_jiffies(10));
669
670 if (wake)
671 ieee80211_wake_queue(dev->hw, i);
672 }
673}
674
675static void mt76u_tx_status_data(struct work_struct *work)
676{
677 struct mt76_usb *usb;
678 struct mt76_dev *dev;
679 u8 update = 1;
680 u16 count = 0;
681
682 usb = container_of(work, struct mt76_usb, stat_work.work);
683 dev = container_of(usb, struct mt76_dev, usb);
684
685 while (true) {
686 if (test_bit(MT76_REMOVED, &dev->state))
687 break;
688
689 if (!dev->drv->tx_status_data(dev, &update))
690 break;
691 count++;
692 }
693
694 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
695 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
696 msecs_to_jiffies(10));
697 else
698 clear_bit(MT76_READING_STATS, &dev->state);
699}
700
701static void mt76u_complete_tx(struct urb *urb)
702{
112f980a 703 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
279ade99 704 struct mt76_queue_entry *e = urb->context;
b40b15e1
LB
705
706 if (mt76u_urb_error(urb))
707 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
279ade99 708 e->done = true;
b40b15e1 709
a33b8ab8 710 tasklet_schedule(&dev->tx_tasklet);
b40b15e1
LB
711}
712
713static int
26031b39
SG
714mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
715 struct urb *urb)
b40b15e1 716{
26031b39 717 urb->transfer_buffer_length = skb->len;
b40b15e1 718
26031b39
SG
719 if (!dev->usb.sg_en) {
720 urb->transfer_buffer = skb->data;
721 return 0;
722 } else {
723 sg_init_table(urb->sg, MT_SG_MAX_SIZE);
724 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
725 if (urb->num_sgs == 0)
726 return -ENOMEM;
727 return urb->num_sgs;
728 }
b40b15e1
LB
729}
730
731static int
89a37842 732mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
b40b15e1
LB
733 struct sk_buff *skb, struct mt76_wcid *wcid,
734 struct ieee80211_sta *sta)
735{
af005f26 736 struct mt76_queue *q = dev->q_tx[qid].q;
cfaae9e6
LB
737 struct mt76_tx_info tx_info = {
738 .skb = skb,
739 };
b40b15e1 740 u16 idx = q->tail;
b40b15e1
LB
741 int err;
742
743 if (q->queued == q->ndesc)
744 return -ENOSPC;
745
88046b2c 746 skb->prev = skb->next = NULL;
cfaae9e6 747 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
b40b15e1
LB
748 if (err < 0)
749 return err;
750
cfaae9e6 751 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
4de92bf1
LB
752 if (err < 0)
753 return err;
b40b15e1 754
4de92bf1 755 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
cfaae9e6
LB
756 q->entry[idx].urb, mt76u_complete_tx,
757 &q->entry[idx]);
b40b15e1
LB
758
759 q->tail = (q->tail + 1) % q->ndesc;
cfaae9e6 760 q->entry[idx].skb = tx_info.skb;
b40b15e1
LB
761 q->queued++;
762
763 return idx;
764}
765
766static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
767{
d7d4ea9a 768 struct urb *urb;
b40b15e1
LB
769 int err;
770
771 while (q->first != q->tail) {
d7d4ea9a 772 urb = q->entry[q->first].urb;
e0168dc6 773
d7d4ea9a
SG
774 trace_submit_urb(dev, urb);
775 err = usb_submit_urb(urb, GFP_ATOMIC);
b40b15e1
LB
776 if (err < 0) {
777 if (err == -ENODEV)
778 set_bit(MT76_REMOVED, &dev->state);
779 else
780 dev_err(dev->dev, "tx urb submit failed:%d\n",
781 err);
782 break;
783 }
784 q->first = (q->first + 1) % q->ndesc;
785 }
786}
787
788static int mt76u_alloc_tx(struct mt76_dev *dev)
789{
b40b15e1 790 struct mt76_queue *q;
48f5a90c 791 int i, j, err;
b40b15e1 792
8300ee7c 793 for (i = 0; i <= MT_TXQ_PSD; i++) {
af005f26
LB
794 INIT_LIST_HEAD(&dev->q_tx[i].swq);
795
8300ee7c
SG
796 if (i >= IEEE80211_NUM_ACS) {
797 dev->q_tx[i].q = dev->q_tx[0].q;
798 continue;
799 }
800
af005f26
LB
801 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
802 if (!q)
803 return -ENOMEM;
804
b40b15e1 805 spin_lock_init(&q->lock);
1d0496c6 806 q->hw_idx = mt76_ac_to_hwq(i);
af005f26 807 dev->q_tx[i].q = q;
b40b15e1 808
329e0989
KC
809 q->entry = devm_kcalloc(dev->dev,
810 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
b40b15e1
LB
811 GFP_KERNEL);
812 if (!q->entry)
813 return -ENOMEM;
814
815 q->ndesc = MT_NUM_TX_ENTRIES;
816 for (j = 0; j < q->ndesc; j++) {
48f5a90c
SG
817 err = mt76u_urb_alloc(dev, &q->entry[j]);
818 if (err < 0)
819 return err;
b40b15e1
LB
820 }
821 }
822 return 0;
823}
824
825static void mt76u_free_tx(struct mt76_dev *dev)
826{
827 struct mt76_queue *q;
828 int i, j;
829
830 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
af005f26 831 q = dev->q_tx[i].q;
b40b15e1 832 for (j = 0; j < q->ndesc; j++)
d7d4ea9a 833 usb_free_urb(q->entry[j].urb);
b40b15e1
LB
834 }
835}
836
837static void mt76u_stop_tx(struct mt76_dev *dev)
838{
839 struct mt76_queue *q;
840 int i, j;
841
842 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
af005f26 843 q = dev->q_tx[i].q;
b40b15e1 844 for (j = 0; j < q->ndesc; j++)
d7d4ea9a 845 usb_kill_urb(q->entry[j].urb);
b40b15e1
LB
846 }
847}
848
849void mt76u_stop_queues(struct mt76_dev *dev)
850{
851 tasklet_disable(&dev->usb.rx_tasklet);
a33b8ab8 852 tasklet_disable(&dev->tx_tasklet);
b40b15e1
LB
853
854 mt76u_stop_rx(dev);
855 mt76u_stop_tx(dev);
856}
857EXPORT_SYMBOL_GPL(mt76u_stop_queues);
858
859void mt76u_stop_stat_wk(struct mt76_dev *dev)
860{
861 cancel_delayed_work_sync(&dev->usb.stat_work);
862 clear_bit(MT76_READING_STATS, &dev->state);
863}
864EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
865
866void mt76u_queues_deinit(struct mt76_dev *dev)
867{
868 mt76u_stop_queues(dev);
869
870 mt76u_free_rx(dev);
871 mt76u_free_tx(dev);
872}
873EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
874
875int mt76u_alloc_queues(struct mt76_dev *dev)
876{
877 int err;
878
879 err = mt76u_alloc_rx(dev);
880 if (err < 0)
b3098121 881 return err;
b40b15e1 882
b3098121 883 return mt76u_alloc_tx(dev);
b40b15e1
LB
884}
885EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
886
887static const struct mt76_queue_ops usb_queue_ops = {
888 .tx_queue_skb = mt76u_tx_queue_skb,
889 .kick = mt76u_tx_kick,
890};
891
892int mt76u_init(struct mt76_dev *dev,
893 struct usb_interface *intf)
894{
895 static const struct mt76_bus_ops mt76u_ops = {
896 .rr = mt76u_rr,
897 .wr = mt76u_wr,
898 .rmw = mt76u_rmw,
899 .copy = mt76u_copy,
6da5a291
SG
900 .wr_rp = mt76u_wr_rp,
901 .rd_rp = mt76u_rd_rp,
c50479fa 902 .type = MT76_BUS_USB,
b40b15e1
LB
903 };
904 struct mt76_usb *usb = &dev->usb;
905
906 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
a33b8ab8 907 tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
b40b15e1
LB
908 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
909 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
910
b40b15e1
LB
911 mutex_init(&usb->mcu.mutex);
912
913 mutex_init(&usb->usb_ctrl_mtx);
914 dev->bus = &mt76u_ops;
915 dev->queue_ops = &usb_queue_ops;
916
63a7de5d
LB
917 usb->sg_en = mt76u_check_sg(dev);
918
b40b15e1
LB
919 return mt76u_set_endpoints(intf, usb);
920}
921EXPORT_SYMBOL_GPL(mt76u_init);
922
923MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
924MODULE_LICENSE("Dual BSD/GPL");