Commit | Line | Data |
---|---|---|
0e3d6777 | 1 | // SPDX-License-Identifier: ISC |
17f1de56 FF |
2 | /* |
3 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> | |
17f1de56 FF |
4 | */ |
5 | ||
6 | #include <linux/dma-mapping.h> | |
7 | #include "mt76.h" | |
8 | #include "dma.h" | |
9 | ||
17f1de56 | 10 | static int |
b1bfbe70 LB |
11 | mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, |
12 | int idx, int n_desc, int bufsize, | |
13 | u32 ring_base) | |
17f1de56 FF |
14 | { |
15 | int size; | |
16 | int i; | |
17 | ||
18 | spin_lock_init(&q->lock); | |
17f1de56 | 19 | |
b1bfbe70 LB |
20 | q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; |
21 | q->ndesc = n_desc; | |
22 | q->buf_size = bufsize; | |
23 | q->hw_idx = idx; | |
24 | ||
17f1de56 FF |
25 | size = q->ndesc * sizeof(struct mt76_desc); |
26 | q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); | |
27 | if (!q->desc) | |
28 | return -ENOMEM; | |
29 | ||
30 | size = q->ndesc * sizeof(*q->entry); | |
31 | q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); | |
32 | if (!q->entry) | |
33 | return -ENOMEM; | |
34 | ||
35 | /* clear descriptors */ | |
36 | for (i = 0; i < q->ndesc; i++) | |
37 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); | |
38 | ||
d908d4ec FF |
39 | writel(q->desc_dma, &q->regs->desc_base); |
40 | writel(0, &q->regs->cpu_idx); | |
41 | writel(0, &q->regs->dma_idx); | |
42 | writel(q->ndesc, &q->regs->ring_size); | |
17f1de56 FF |
43 | |
44 | return 0; | |
45 | } | |
46 | ||
47 | static int | |
48 | mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, | |
49 | struct mt76_queue_buf *buf, int nbufs, u32 info, | |
50 | struct sk_buff *skb, void *txwi) | |
51 | { | |
52 | struct mt76_desc *desc; | |
53 | u32 ctrl; | |
54 | int i, idx = -1; | |
55 | ||
56 | if (txwi) | |
598da386 | 57 | q->entry[q->head].txwi = DMA_DUMMY_DATA; |
17f1de56 FF |
58 | |
59 | for (i = 0; i < nbufs; i += 2, buf += 2) { | |
60 | u32 buf0 = buf[0].addr, buf1 = 0; | |
61 | ||
62 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); | |
63 | if (i < nbufs - 1) { | |
64 | buf1 = buf[1].addr; | |
65 | ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); | |
66 | } | |
67 | ||
68 | if (i == nbufs - 1) | |
69 | ctrl |= MT_DMA_CTL_LAST_SEC0; | |
70 | else if (i == nbufs - 2) | |
71 | ctrl |= MT_DMA_CTL_LAST_SEC1; | |
72 | ||
73 | idx = q->head; | |
74 | q->head = (q->head + 1) % q->ndesc; | |
75 | ||
76 | desc = &q->desc[idx]; | |
77 | ||
78 | WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); | |
79 | WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); | |
80 | WRITE_ONCE(desc->info, cpu_to_le32(info)); | |
81 | WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); | |
82 | ||
83 | q->queued++; | |
84 | } | |
85 | ||
86 | q->entry[idx].txwi = txwi; | |
87 | q->entry[idx].skb = skb; | |
88 | ||
89 | return idx; | |
90 | } | |
91 | ||
92 | static void | |
93 | mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, | |
94 | struct mt76_queue_entry *prev_e) | |
95 | { | |
96 | struct mt76_queue_entry *e = &q->entry[idx]; | |
97 | __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl); | |
98 | u32 ctrl = le32_to_cpu(__ctrl); | |
99 | ||
100 | if (!e->txwi || !e->skb) { | |
101 | __le32 addr = READ_ONCE(q->desc[idx].buf0); | |
102 | u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); | |
103 | ||
104 | dma_unmap_single(dev->dev, le32_to_cpu(addr), len, | |
105 | DMA_TO_DEVICE); | |
106 | } | |
107 | ||
108 | if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) { | |
109 | __le32 addr = READ_ONCE(q->desc[idx].buf1); | |
110 | u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl); | |
111 | ||
112 | dma_unmap_single(dev->dev, le32_to_cpu(addr), len, | |
113 | DMA_TO_DEVICE); | |
114 | } | |
115 | ||
598da386 | 116 | if (e->txwi == DMA_DUMMY_DATA) |
17f1de56 FF |
117 | e->txwi = NULL; |
118 | ||
598da386 LB |
119 | if (e->skb == DMA_DUMMY_DATA) |
120 | e->skb = NULL; | |
121 | ||
17f1de56 FF |
122 | *prev_e = *e; |
123 | memset(e, 0, sizeof(*e)); | |
124 | } | |
125 | ||
126 | static void | |
127 | mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) | |
128 | { | |
d908d4ec FF |
129 | writel(q->desc_dma, &q->regs->desc_base); |
130 | writel(q->ndesc, &q->regs->ring_size); | |
131 | q->head = readl(&q->regs->dma_idx); | |
17f1de56 | 132 | q->tail = q->head; |
d908d4ec | 133 | writel(q->head, &q->regs->cpu_idx); |
17f1de56 FF |
134 | } |
135 | ||
136 | static void | |
137 | mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) | |
138 | { | |
af005f26 LB |
139 | struct mt76_sw_queue *sq = &dev->q_tx[qid]; |
140 | struct mt76_queue *q = sq->q; | |
17f1de56 | 141 | struct mt76_queue_entry entry; |
2fe30dce FF |
142 | unsigned int n_swq_queued[4] = {}; |
143 | unsigned int n_queued = 0; | |
17f1de56 | 144 | bool wake = false; |
2fe30dce | 145 | int i, last; |
17f1de56 | 146 | |
af005f26 | 147 | if (!q) |
17f1de56 FF |
148 | return; |
149 | ||
17f1de56 FF |
150 | if (flush) |
151 | last = -1; | |
152 | else | |
d908d4ec | 153 | last = readl(&q->regs->dma_idx); |
17f1de56 | 154 | |
2fe30dce | 155 | while ((q->queued > n_queued) && q->tail != last) { |
17f1de56 FF |
156 | mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); |
157 | if (entry.schedule) | |
2fe30dce | 158 | n_swq_queued[entry.qid]++; |
17f1de56 | 159 | |
79d1c94c | 160 | q->tail = (q->tail + 1) % q->ndesc; |
2fe30dce | 161 | n_queued++; |
79d1c94c | 162 | |
2fe30dce | 163 | if (entry.skb) |
e226ba2e | 164 | dev->drv->tx_complete_skb(dev, qid, &entry); |
17f1de56 FF |
165 | |
166 | if (entry.txwi) { | |
6ca66722 LB |
167 | if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE)) |
168 | mt76_put_txwi(dev, entry.txwi); | |
13c6d5f8 | 169 | wake = !flush; |
17f1de56 FF |
170 | } |
171 | ||
17f1de56 | 172 | if (!flush && q->tail == last) |
d908d4ec | 173 | last = readl(&q->regs->dma_idx); |
17f1de56 FF |
174 | } |
175 | ||
2fe30dce FF |
176 | spin_lock_bh(&q->lock); |
177 | ||
178 | q->queued -= n_queued; | |
179 | for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) { | |
180 | if (!n_swq_queued[i]) | |
181 | continue; | |
182 | ||
183 | dev->q_tx[i].swq_queued -= n_swq_queued[i]; | |
184 | } | |
185 | ||
90fdc171 | 186 | if (flush) |
17f1de56 FF |
187 | mt76_dma_sync_idx(dev, q); |
188 | ||
cd44bc40 LB |
189 | wake = wake && q->stopped && |
190 | qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; | |
191 | if (wake) | |
192 | q->stopped = false; | |
26e40d4c FF |
193 | |
194 | if (!q->queued) | |
195 | wake_up(&dev->tx_wait); | |
196 | ||
17f1de56 FF |
197 | spin_unlock_bh(&q->lock); |
198 | ||
199 | if (wake) | |
200 | ieee80211_wake_queue(dev->hw, qid); | |
201 | } | |
202 | ||
203 | static void * | |
204 | mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, | |
205 | int *len, u32 *info, bool *more) | |
206 | { | |
207 | struct mt76_queue_entry *e = &q->entry[idx]; | |
208 | struct mt76_desc *desc = &q->desc[idx]; | |
209 | dma_addr_t buf_addr; | |
210 | void *buf = e->buf; | |
211 | int buf_len = SKB_WITH_OVERHEAD(q->buf_size); | |
212 | ||
213 | buf_addr = le32_to_cpu(READ_ONCE(desc->buf0)); | |
214 | if (len) { | |
215 | u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); | |
216 | *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl); | |
217 | *more = !(ctl & MT_DMA_CTL_LAST_SEC0); | |
218 | } | |
219 | ||
220 | if (info) | |
221 | *info = le32_to_cpu(desc->info); | |
222 | ||
223 | dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE); | |
224 | e->buf = NULL; | |
225 | ||
226 | return buf; | |
227 | } | |
228 | ||
229 | static void * | |
230 | mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, | |
231 | int *len, u32 *info, bool *more) | |
232 | { | |
233 | int idx = q->tail; | |
234 | ||
235 | *more = false; | |
236 | if (!q->queued) | |
237 | return NULL; | |
238 | ||
239 | if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) | |
240 | return NULL; | |
241 | ||
242 | q->tail = (q->tail + 1) % q->ndesc; | |
243 | q->queued--; | |
244 | ||
245 | return mt76_dma_get_buf(dev, q, idx, len, info, more); | |
246 | } | |
247 | ||
248 | static void | |
249 | mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) | |
250 | { | |
d908d4ec | 251 | writel(q->head, &q->regs->cpu_idx); |
17f1de56 FF |
252 | } |
253 | ||
5ed31128 LB |
254 | static int |
255 | mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, | |
256 | struct sk_buff *skb, u32 tx_info) | |
257 | { | |
af005f26 | 258 | struct mt76_queue *q = dev->q_tx[qid].q; |
5ed31128 LB |
259 | struct mt76_queue_buf buf; |
260 | dma_addr_t addr; | |
261 | ||
262 | addr = dma_map_single(dev->dev, skb->data, skb->len, | |
263 | DMA_TO_DEVICE); | |
6edf0747 | 264 | if (unlikely(dma_mapping_error(dev->dev, addr))) |
5ed31128 LB |
265 | return -ENOMEM; |
266 | ||
267 | buf.addr = addr; | |
268 | buf.len = skb->len; | |
269 | ||
270 | spin_lock_bh(&q->lock); | |
271 | mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); | |
272 | mt76_dma_kick_queue(dev, q); | |
273 | spin_unlock_bh(&q->lock); | |
274 | ||
275 | return 0; | |
276 | } | |
277 | ||
eb9ca7ec LB |
278 | static int |
279 | mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, | |
280 | struct sk_buff *skb, struct mt76_wcid *wcid, | |
281 | struct ieee80211_sta *sta) | |
fcdd99ce | 282 | { |
af005f26 | 283 | struct mt76_queue *q = dev->q_tx[qid].q; |
cfaae9e6 LB |
284 | struct mt76_tx_info tx_info = { |
285 | .skb = skb, | |
286 | }; | |
b5903c47 | 287 | int len, n = 0, ret = -ENOMEM; |
fcdd99ce LB |
288 | struct mt76_queue_entry e; |
289 | struct mt76_txwi_cache *t; | |
fcdd99ce LB |
290 | struct sk_buff *iter; |
291 | dma_addr_t addr; | |
f3950a41 | 292 | u8 *txwi; |
fcdd99ce LB |
293 | |
294 | t = mt76_get_txwi(dev); | |
295 | if (!t) { | |
296 | ieee80211_free_txskb(dev->hw, skb); | |
297 | return -ENOMEM; | |
298 | } | |
f3950a41 | 299 | txwi = mt76_get_txwi_ptr(dev, t); |
fcdd99ce | 300 | |
88046b2c | 301 | skb->prev = skb->next = NULL; |
66105538 LB |
302 | if (dev->drv->tx_aligned4_skbs) |
303 | mt76_insert_hdr_pad(skb); | |
304 | ||
eb071ba7 | 305 | len = skb_headlen(skb); |
fcdd99ce | 306 | addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); |
6edf0747 | 307 | if (unlikely(dma_mapping_error(dev->dev, addr))) |
fcdd99ce | 308 | goto free; |
fcdd99ce | 309 | |
b5903c47 LB |
310 | tx_info.buf[n].addr = t->dma_addr; |
311 | tx_info.buf[n++].len = dev->drv->txwi_size; | |
312 | tx_info.buf[n].addr = addr; | |
313 | tx_info.buf[n++].len = len; | |
fcdd99ce LB |
314 | |
315 | skb_walk_frags(skb, iter) { | |
b5903c47 | 316 | if (n == ARRAY_SIZE(tx_info.buf)) |
fcdd99ce LB |
317 | goto unmap; |
318 | ||
319 | addr = dma_map_single(dev->dev, iter->data, iter->len, | |
320 | DMA_TO_DEVICE); | |
6edf0747 | 321 | if (unlikely(dma_mapping_error(dev->dev, addr))) |
fcdd99ce LB |
322 | goto unmap; |
323 | ||
b5903c47 LB |
324 | tx_info.buf[n].addr = addr; |
325 | tx_info.buf[n++].len = iter->len; | |
fcdd99ce | 326 | } |
b5903c47 | 327 | tx_info.nbuf = n; |
fcdd99ce | 328 | |
f3950a41 | 329 | dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, |
eb071ba7 | 330 | DMA_TO_DEVICE); |
cfaae9e6 | 331 | ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); |
f3950a41 | 332 | dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size, |
eb071ba7 LB |
333 | DMA_TO_DEVICE); |
334 | if (ret < 0) | |
fcdd99ce LB |
335 | goto unmap; |
336 | ||
b5903c47 | 337 | if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { |
eb071ba7 LB |
338 | ret = -ENOMEM; |
339 | goto unmap; | |
340 | } | |
341 | ||
b5903c47 | 342 | return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, |
cfaae9e6 | 343 | tx_info.info, tx_info.skb, t); |
fcdd99ce LB |
344 | |
345 | unmap: | |
fcdd99ce | 346 | for (n--; n > 0; n--) |
b5903c47 LB |
347 | dma_unmap_single(dev->dev, tx_info.buf[n].addr, |
348 | tx_info.buf[n].len, DMA_TO_DEVICE); | |
fcdd99ce LB |
349 | |
350 | free: | |
cfaae9e6 | 351 | e.skb = tx_info.skb; |
fcdd99ce | 352 | e.txwi = t; |
e226ba2e | 353 | dev->drv->tx_complete_skb(dev, qid, &e); |
fcdd99ce LB |
354 | mt76_put_txwi(dev, t); |
355 | return ret; | |
356 | } | |
fcdd99ce | 357 | |
17f1de56 | 358 | static int |
4d4b12bc | 359 | mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
17f1de56 FF |
360 | { |
361 | dma_addr_t addr; | |
362 | void *buf; | |
363 | int frames = 0; | |
364 | int len = SKB_WITH_OVERHEAD(q->buf_size); | |
365 | int offset = q->buf_offset; | |
366 | int idx; | |
17f1de56 FF |
367 | |
368 | spin_lock_bh(&q->lock); | |
369 | ||
370 | while (q->queued < q->ndesc - 1) { | |
371 | struct mt76_queue_buf qbuf; | |
372 | ||
c12128ce | 373 | buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
17f1de56 FF |
374 | if (!buf) |
375 | break; | |
376 | ||
377 | addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE); | |
6edf0747 | 378 | if (unlikely(dma_mapping_error(dev->dev, addr))) { |
17f1de56 FF |
379 | skb_free_frag(buf); |
380 | break; | |
381 | } | |
382 | ||
383 | qbuf.addr = addr + offset; | |
384 | qbuf.len = len - offset; | |
385 | idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); | |
386 | frames++; | |
387 | } | |
388 | ||
389 | if (frames) | |
390 | mt76_dma_kick_queue(dev, q); | |
391 | ||
392 | spin_unlock_bh(&q->lock); | |
393 | ||
394 | return frames; | |
395 | } | |
396 | ||
397 | static void | |
398 | mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) | |
399 | { | |
c12128ce | 400 | struct page *page; |
17f1de56 FF |
401 | void *buf; |
402 | bool more; | |
403 | ||
404 | spin_lock_bh(&q->lock); | |
405 | do { | |
406 | buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); | |
407 | if (!buf) | |
408 | break; | |
409 | ||
410 | skb_free_frag(buf); | |
411 | } while (1); | |
412 | spin_unlock_bh(&q->lock); | |
c12128ce FF |
413 | |
414 | if (!q->rx_page.va) | |
415 | return; | |
416 | ||
417 | page = virt_to_page(q->rx_page.va); | |
418 | __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); | |
419 | memset(&q->rx_page, 0, sizeof(q->rx_page)); | |
17f1de56 FF |
420 | } |
421 | ||
422 | static void | |
423 | mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) | |
424 | { | |
425 | struct mt76_queue *q = &dev->q_rx[qid]; | |
426 | int i; | |
427 | ||
428 | for (i = 0; i < q->ndesc; i++) | |
429 | q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE); | |
430 | ||
431 | mt76_dma_rx_cleanup(dev, q); | |
432 | mt76_dma_sync_idx(dev, q); | |
4d4b12bc | 433 | mt76_dma_rx_fill(dev, q); |
17f1de56 FF |
434 | } |
435 | ||
436 | static void | |
437 | mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, | |
438 | int len, bool more) | |
439 | { | |
440 | struct page *page = virt_to_head_page(data); | |
441 | int offset = data - page_address(page); | |
442 | struct sk_buff *skb = q->rx_head; | |
443 | ||
444 | offset += q->buf_offset; | |
445 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len, | |
446 | q->buf_size); | |
447 | ||
448 | if (more) | |
449 | return; | |
450 | ||
451 | q->rx_head = NULL; | |
452 | dev->drv->rx_skb(dev, q - dev->q_rx, skb); | |
453 | } | |
454 | ||
455 | static int | |
456 | mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) | |
457 | { | |
87e86f90 | 458 | int len, data_len, done = 0; |
17f1de56 FF |
459 | struct sk_buff *skb; |
460 | unsigned char *data; | |
17f1de56 FF |
461 | bool more; |
462 | ||
463 | while (done < budget) { | |
464 | u32 info; | |
465 | ||
466 | data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); | |
467 | if (!data) | |
468 | break; | |
469 | ||
87e86f90 LB |
470 | if (q->rx_head) |
471 | data_len = q->buf_size; | |
472 | else | |
473 | data_len = SKB_WITH_OVERHEAD(q->buf_size); | |
474 | ||
475 | if (data_len < len + q->buf_offset) { | |
9fe31054 FF |
476 | dev_kfree_skb(q->rx_head); |
477 | q->rx_head = NULL; | |
478 | ||
479 | skb_free_frag(data); | |
480 | continue; | |
481 | } | |
482 | ||
17f1de56 FF |
483 | if (q->rx_head) { |
484 | mt76_add_fragment(dev, q, data, len, more); | |
485 | continue; | |
486 | } | |
487 | ||
488 | skb = build_skb(data, q->buf_size); | |
489 | if (!skb) { | |
490 | skb_free_frag(data); | |
491 | continue; | |
492 | } | |
17f1de56 | 493 | skb_reserve(skb, q->buf_offset); |
17f1de56 FF |
494 | |
495 | if (q == &dev->q_rx[MT_RXQ_MCU]) { | |
496 | u32 *rxfce = (u32 *) skb->cb; | |
497 | *rxfce = info; | |
498 | } | |
499 | ||
500 | __skb_put(skb, len); | |
501 | done++; | |
502 | ||
503 | if (more) { | |
504 | q->rx_head = skb; | |
505 | continue; | |
506 | } | |
507 | ||
508 | dev->drv->rx_skb(dev, q - dev->q_rx, skb); | |
509 | } | |
510 | ||
4d4b12bc | 511 | mt76_dma_rx_fill(dev, q); |
17f1de56 FF |
512 | return done; |
513 | } | |
514 | ||
515 | static int | |
516 | mt76_dma_rx_poll(struct napi_struct *napi, int budget) | |
517 | { | |
518 | struct mt76_dev *dev; | |
2b4307f5 | 519 | int qid, done = 0, cur; |
17f1de56 FF |
520 | |
521 | dev = container_of(napi->dev, struct mt76_dev, napi_dev); | |
522 | qid = napi - dev->napi; | |
523 | ||
9c68a57b FF |
524 | rcu_read_lock(); |
525 | ||
2b4307f5 FF |
526 | do { |
527 | cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); | |
81e850ef | 528 | mt76_rx_poll_complete(dev, qid, napi); |
2b4307f5 FF |
529 | done += cur; |
530 | } while (cur && done < budget); | |
531 | ||
9c68a57b FF |
532 | rcu_read_unlock(); |
533 | ||
17f1de56 FF |
534 | if (done < budget) { |
535 | napi_complete(napi); | |
536 | dev->drv->rx_poll_complete(dev, qid); | |
537 | } | |
17f1de56 FF |
538 | |
539 | return done; | |
540 | } | |
541 | ||
542 | static int | |
543 | mt76_dma_init(struct mt76_dev *dev) | |
544 | { | |
545 | int i; | |
546 | ||
547 | init_dummy_netdev(&dev->napi_dev); | |
548 | ||
549 | for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { | |
550 | netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, | |
551 | 64); | |
4d4b12bc | 552 | mt76_dma_rx_fill(dev, &dev->q_rx[i]); |
17f1de56 FF |
553 | skb_queue_head_init(&dev->rx_skb[i]); |
554 | napi_enable(&dev->napi[i]); | |
555 | } | |
556 | ||
557 | return 0; | |
558 | } | |
559 | ||
560 | static const struct mt76_queue_ops mt76_dma_ops = { | |
561 | .init = mt76_dma_init, | |
562 | .alloc = mt76_dma_alloc_queue, | |
5ed31128 | 563 | .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, |
469d4818 | 564 | .tx_queue_skb = mt76_dma_tx_queue_skb, |
17f1de56 FF |
565 | .tx_cleanup = mt76_dma_tx_cleanup, |
566 | .rx_reset = mt76_dma_rx_reset, | |
567 | .kick = mt76_dma_kick_queue, | |
568 | }; | |
569 | ||
bceac167 | 570 | void mt76_dma_attach(struct mt76_dev *dev) |
17f1de56 FF |
571 | { |
572 | dev->queue_ops = &mt76_dma_ops; | |
17f1de56 FF |
573 | } |
574 | EXPORT_SYMBOL_GPL(mt76_dma_attach); | |
575 | ||
576 | void mt76_dma_cleanup(struct mt76_dev *dev) | |
577 | { | |
578 | int i; | |
579 | ||
4875e346 | 580 | netif_napi_del(&dev->tx_napi); |
17f1de56 FF |
581 | for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) |
582 | mt76_dma_tx_cleanup(dev, i, true); | |
583 | ||
584 | for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { | |
585 | netif_napi_del(&dev->napi[i]); | |
586 | mt76_dma_rx_cleanup(dev, &dev->q_rx[i]); | |
587 | } | |
588 | } | |
589 | EXPORT_SYMBOL_GPL(mt76_dma_cleanup); |