Commit | Line | Data |
---|---|---|
0e3d6777 | 1 | // SPDX-License-Identifier: ISC |
17f1de56 FF |
2 | /* |
3 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> | |
17f1de56 FF |
4 | */ |
5 | ||
6 | #include <linux/dma-mapping.h> | |
7 | #include "mt76.h" | |
8 | #include "dma.h" | |
9 | ||
17f1de56 | 10 | static int |
b1bfbe70 LB |
11 | mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, |
12 | int idx, int n_desc, int bufsize, | |
13 | u32 ring_base) | |
17f1de56 FF |
14 | { |
15 | int size; | |
16 | int i; | |
17 | ||
18 | spin_lock_init(&q->lock); | |
17f1de56 | 19 | |
b1bfbe70 LB |
20 | q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; |
21 | q->ndesc = n_desc; | |
22 | q->buf_size = bufsize; | |
23 | q->hw_idx = idx; | |
24 | ||
17f1de56 FF |
25 | size = q->ndesc * sizeof(struct mt76_desc); |
26 | q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); | |
27 | if (!q->desc) | |
28 | return -ENOMEM; | |
29 | ||
30 | size = q->ndesc * sizeof(*q->entry); | |
31 | q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); | |
32 | if (!q->entry) | |
33 | return -ENOMEM; | |
34 | ||
35 | /* clear descriptors */ | |
36 | for (i = 0; i < q->ndesc; i++) | |
37 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); | |
38 | ||
d908d4ec FF |
39 | writel(q->desc_dma, &q->regs->desc_base); |
40 | writel(0, &q->regs->cpu_idx); | |
41 | writel(0, &q->regs->dma_idx); | |
42 | writel(q->ndesc, &q->regs->ring_size); | |
17f1de56 FF |
43 | |
44 | return 0; | |
45 | } | |
46 | ||
47 | static int | |
48 | mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, | |
49 | struct mt76_queue_buf *buf, int nbufs, u32 info, | |
50 | struct sk_buff *skb, void *txwi) | |
51 | { | |
52 | struct mt76_desc *desc; | |
53 | u32 ctrl; | |
54 | int i, idx = -1; | |
55 | ||
7bd0650b | 56 | if (txwi) { |
598da386 | 57 | q->entry[q->head].txwi = DMA_DUMMY_DATA; |
7bd0650b LB |
58 | q->entry[q->head].skip_buf0 = true; |
59 | } | |
17f1de56 FF |
60 | |
61 | for (i = 0; i < nbufs; i += 2, buf += 2) { | |
62 | u32 buf0 = buf[0].addr, buf1 = 0; | |
63 | ||
27d5c528 FF |
64 | if (buf[0].skip_unmap) |
65 | q->entry[q->head].skip_buf0 = true; | |
66 | q->entry[q->head].skip_buf1 = i == nbufs - 1; | |
67 | ||
17f1de56 FF |
68 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); |
69 | if (i < nbufs - 1) { | |
70 | buf1 = buf[1].addr; | |
71 | ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); | |
27d5c528 FF |
72 | if (buf[1].skip_unmap) |
73 | q->entry[q->head].skip_buf1 = true; | |
17f1de56 FF |
74 | } |
75 | ||
76 | if (i == nbufs - 1) | |
77 | ctrl |= MT_DMA_CTL_LAST_SEC0; | |
78 | else if (i == nbufs - 2) | |
79 | ctrl |= MT_DMA_CTL_LAST_SEC1; | |
80 | ||
81 | idx = q->head; | |
82 | q->head = (q->head + 1) % q->ndesc; | |
83 | ||
84 | desc = &q->desc[idx]; | |
85 | ||
86 | WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); | |
87 | WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); | |
88 | WRITE_ONCE(desc->info, cpu_to_le32(info)); | |
89 | WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); | |
90 | ||
91 | q->queued++; | |
92 | } | |
93 | ||
94 | q->entry[idx].txwi = txwi; | |
95 | q->entry[idx].skb = skb; | |
96 | ||
97 | return idx; | |
98 | } | |
99 | ||
100 | static void | |
101 | mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, | |
102 | struct mt76_queue_entry *prev_e) | |
103 | { | |
104 | struct mt76_queue_entry *e = &q->entry[idx]; | |
105 | __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl); | |
106 | u32 ctrl = le32_to_cpu(__ctrl); | |
107 | ||
7bd0650b | 108 | if (!e->skip_buf0) { |
17f1de56 FF |
109 | __le32 addr = READ_ONCE(q->desc[idx].buf0); |
110 | u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); | |
111 | ||
112 | dma_unmap_single(dev->dev, le32_to_cpu(addr), len, | |
113 | DMA_TO_DEVICE); | |
114 | } | |
115 | ||
27d5c528 | 116 | if (!e->skip_buf1) { |
17f1de56 FF |
117 | __le32 addr = READ_ONCE(q->desc[idx].buf1); |
118 | u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl); | |
119 | ||
120 | dma_unmap_single(dev->dev, le32_to_cpu(addr), len, | |
121 | DMA_TO_DEVICE); | |
122 | } | |
123 | ||
598da386 | 124 | if (e->txwi == DMA_DUMMY_DATA) |
17f1de56 FF |
125 | e->txwi = NULL; |
126 | ||
598da386 LB |
127 | if (e->skb == DMA_DUMMY_DATA) |
128 | e->skb = NULL; | |
129 | ||
17f1de56 FF |
130 | *prev_e = *e; |
131 | memset(e, 0, sizeof(*e)); | |
132 | } | |
133 | ||
134 | static void | |
135 | mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) | |
136 | { | |
d908d4ec FF |
137 | writel(q->desc_dma, &q->regs->desc_base); |
138 | writel(q->ndesc, &q->regs->ring_size); | |
139 | q->head = readl(&q->regs->dma_idx); | |
17f1de56 | 140 | q->tail = q->head; |
8f6c4f7b FF |
141 | } |
142 | ||
143 | static void | |
144 | mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) | |
145 | { | |
2d681047 | 146 | wmb(); |
d908d4ec | 147 | writel(q->head, &q->regs->cpu_idx); |
17f1de56 FF |
148 | } |
149 | ||
150 | static void | |
151 | mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) | |
152 | { | |
f099c2e5 | 153 | struct mt76_queue *q = dev->q_tx[qid]; |
17f1de56 FF |
154 | struct mt76_queue_entry entry; |
155 | bool wake = false; | |
0b51f186 | 156 | int last; |
17f1de56 | 157 | |
af005f26 | 158 | if (!q) |
17f1de56 FF |
159 | return; |
160 | ||
17f1de56 FF |
161 | if (flush) |
162 | last = -1; | |
163 | else | |
d908d4ec | 164 | last = readl(&q->regs->dma_idx); |
17f1de56 | 165 | |
0b51f186 | 166 | while (q->queued > 0 && q->tail != last) { |
17f1de56 | 167 | mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); |
fe5b5ab5 | 168 | mt76_queue_tx_complete(dev, q, &entry); |
17f1de56 FF |
169 | |
170 | if (entry.txwi) { | |
9ec0b821 | 171 | if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) |
6ca66722 | 172 | mt76_put_txwi(dev, entry.txwi); |
13c6d5f8 | 173 | wake = !flush; |
17f1de56 FF |
174 | } |
175 | ||
17f1de56 | 176 | if (!flush && q->tail == last) |
d908d4ec | 177 | last = readl(&q->regs->dma_idx); |
2fe30dce | 178 | |
5a95ca41 FF |
179 | } |
180 | ||
8f6c4f7b | 181 | if (flush) { |
0b51f186 | 182 | spin_lock_bh(&q->lock); |
17f1de56 | 183 | mt76_dma_sync_idx(dev, q); |
8f6c4f7b | 184 | mt76_dma_kick_queue(dev, q); |
0b51f186 | 185 | spin_unlock_bh(&q->lock); |
8f6c4f7b | 186 | } |
17f1de56 | 187 | |
cd44bc40 LB |
188 | wake = wake && q->stopped && |
189 | qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; | |
190 | if (wake) | |
191 | q->stopped = false; | |
26e40d4c FF |
192 | |
193 | if (!q->queued) | |
194 | wake_up(&dev->tx_wait); | |
195 | ||
17f1de56 FF |
196 | if (wake) |
197 | ieee80211_wake_queue(dev->hw, qid); | |
198 | } | |
199 | ||
200 | static void * | |
201 | mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, | |
202 | int *len, u32 *info, bool *more) | |
203 | { | |
204 | struct mt76_queue_entry *e = &q->entry[idx]; | |
205 | struct mt76_desc *desc = &q->desc[idx]; | |
206 | dma_addr_t buf_addr; | |
207 | void *buf = e->buf; | |
208 | int buf_len = SKB_WITH_OVERHEAD(q->buf_size); | |
209 | ||
210 | buf_addr = le32_to_cpu(READ_ONCE(desc->buf0)); | |
211 | if (len) { | |
212 | u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); | |
213 | *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl); | |
214 | *more = !(ctl & MT_DMA_CTL_LAST_SEC0); | |
215 | } | |
216 | ||
217 | if (info) | |
218 | *info = le32_to_cpu(desc->info); | |
219 | ||
220 | dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE); | |
221 | e->buf = NULL; | |
222 | ||
223 | return buf; | |
224 | } | |
225 | ||
226 | static void * | |
227 | mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, | |
228 | int *len, u32 *info, bool *more) | |
229 | { | |
230 | int idx = q->tail; | |
231 | ||
232 | *more = false; | |
233 | if (!q->queued) | |
234 | return NULL; | |
235 | ||
5ffc6b5a FF |
236 | if (flush) |
237 | q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); | |
238 | else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) | |
17f1de56 FF |
239 | return NULL; |
240 | ||
241 | q->tail = (q->tail + 1) % q->ndesc; | |
242 | q->queued--; | |
243 | ||
244 | return mt76_dma_get_buf(dev, q, idx, len, info, more); | |
245 | } | |
246 | ||
5ed31128 LB |
247 | static int |
248 | mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, | |
249 | struct sk_buff *skb, u32 tx_info) | |
250 | { | |
f099c2e5 | 251 | struct mt76_queue *q = dev->q_tx[qid]; |
5ed31128 LB |
252 | struct mt76_queue_buf buf; |
253 | dma_addr_t addr; | |
254 | ||
93eaec76 FF |
255 | if (q->queued + 1 >= q->ndesc - 1) |
256 | goto error; | |
257 | ||
5ed31128 LB |
258 | addr = dma_map_single(dev->dev, skb->data, skb->len, |
259 | DMA_TO_DEVICE); | |
6edf0747 | 260 | if (unlikely(dma_mapping_error(dev->dev, addr))) |
93eaec76 | 261 | goto error; |
5ed31128 LB |
262 | |
263 | buf.addr = addr; | |
264 | buf.len = skb->len; | |
265 | ||
266 | spin_lock_bh(&q->lock); | |
267 | mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); | |
268 | mt76_dma_kick_queue(dev, q); | |
269 | spin_unlock_bh(&q->lock); | |
270 | ||
271 | return 0; | |
93eaec76 FF |
272 | |
273 | error: | |
274 | dev_kfree_skb(skb); | |
275 | return -ENOMEM; | |
5ed31128 LB |
276 | } |
277 | ||
eb9ca7ec LB |
278 | static int |
279 | mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, | |
280 | struct sk_buff *skb, struct mt76_wcid *wcid, | |
281 | struct ieee80211_sta *sta) | |
fcdd99ce | 282 | { |
f099c2e5 | 283 | struct mt76_queue *q = dev->q_tx[qid]; |
cfaae9e6 LB |
284 | struct mt76_tx_info tx_info = { |
285 | .skb = skb, | |
286 | }; | |
e394b575 | 287 | struct ieee80211_hw *hw; |
b5903c47 | 288 | int len, n = 0, ret = -ENOMEM; |
fcdd99ce LB |
289 | struct mt76_queue_entry e; |
290 | struct mt76_txwi_cache *t; | |
fcdd99ce LB |
291 | struct sk_buff *iter; |
292 | dma_addr_t addr; | |
f3950a41 | 293 | u8 *txwi; |
fcdd99ce LB |
294 | |
295 | t = mt76_get_txwi(dev); | |
296 | if (!t) { | |
e394b575 FF |
297 | hw = mt76_tx_status_get_hw(dev, skb); |
298 | ieee80211_free_txskb(hw, skb); | |
fcdd99ce LB |
299 | return -ENOMEM; |
300 | } | |
f3950a41 | 301 | txwi = mt76_get_txwi_ptr(dev, t); |
fcdd99ce | 302 | |
88046b2c | 303 | skb->prev = skb->next = NULL; |
9ec0b821 | 304 | if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) |
66105538 LB |
305 | mt76_insert_hdr_pad(skb); |
306 | ||
eb071ba7 | 307 | len = skb_headlen(skb); |
fcdd99ce | 308 | addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); |
6edf0747 | 309 | if (unlikely(dma_mapping_error(dev->dev, addr))) |
fcdd99ce | 310 | goto free; |
fcdd99ce | 311 | |
b5903c47 LB |
312 | tx_info.buf[n].addr = t->dma_addr; |
313 | tx_info.buf[n++].len = dev->drv->txwi_size; | |
314 | tx_info.buf[n].addr = addr; | |
315 | tx_info.buf[n++].len = len; | |
fcdd99ce LB |
316 | |
317 | skb_walk_frags(skb, iter) { | |
b5903c47 | 318 | if (n == ARRAY_SIZE(tx_info.buf)) |
fcdd99ce LB |
319 | goto unmap; |
320 | ||
321 | addr = dma_map_single(dev->dev, iter->data, iter->len, | |
322 | DMA_TO_DEVICE); | |
6edf0747 | 323 | if (unlikely(dma_mapping_error(dev->dev, addr))) |
fcdd99ce LB |
324 | goto unmap; |
325 | ||
b5903c47 LB |
326 | tx_info.buf[n].addr = addr; |
327 | tx_info.buf[n++].len = iter->len; | |
fcdd99ce | 328 | } |
b5903c47 | 329 | tx_info.nbuf = n; |
fcdd99ce | 330 | |
f3950a41 | 331 | dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, |
eb071ba7 | 332 | DMA_TO_DEVICE); |
cfaae9e6 | 333 | ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); |
f3950a41 | 334 | dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size, |
eb071ba7 LB |
335 | DMA_TO_DEVICE); |
336 | if (ret < 0) | |
fcdd99ce LB |
337 | goto unmap; |
338 | ||
b5903c47 | 339 | if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { |
eb071ba7 LB |
340 | ret = -ENOMEM; |
341 | goto unmap; | |
342 | } | |
343 | ||
b5903c47 | 344 | return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, |
cfaae9e6 | 345 | tx_info.info, tx_info.skb, t); |
fcdd99ce LB |
346 | |
347 | unmap: | |
fcdd99ce | 348 | for (n--; n > 0; n--) |
b5903c47 LB |
349 | dma_unmap_single(dev->dev, tx_info.buf[n].addr, |
350 | tx_info.buf[n].len, DMA_TO_DEVICE); | |
fcdd99ce LB |
351 | |
352 | free: | |
f0efa862 FF |
353 | #ifdef CONFIG_NL80211_TESTMODE |
354 | /* fix tx_done accounting on queue overflow */ | |
355 | if (tx_info.skb == dev->test.tx_skb) | |
356 | dev->test.tx_done--; | |
357 | #endif | |
358 | ||
cfaae9e6 | 359 | e.skb = tx_info.skb; |
fcdd99ce | 360 | e.txwi = t; |
d80e52c7 | 361 | dev->drv->tx_complete_skb(dev, &e); |
fcdd99ce LB |
362 | mt76_put_txwi(dev, t); |
363 | return ret; | |
364 | } | |
fcdd99ce | 365 | |
17f1de56 | 366 | static int |
4d4b12bc | 367 | mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) |
17f1de56 FF |
368 | { |
369 | dma_addr_t addr; | |
370 | void *buf; | |
371 | int frames = 0; | |
372 | int len = SKB_WITH_OVERHEAD(q->buf_size); | |
373 | int offset = q->buf_offset; | |
17f1de56 FF |
374 | |
375 | spin_lock_bh(&q->lock); | |
376 | ||
377 | while (q->queued < q->ndesc - 1) { | |
378 | struct mt76_queue_buf qbuf; | |
379 | ||
c12128ce | 380 | buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); |
17f1de56 FF |
381 | if (!buf) |
382 | break; | |
383 | ||
384 | addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE); | |
6edf0747 | 385 | if (unlikely(dma_mapping_error(dev->dev, addr))) { |
17f1de56 FF |
386 | skb_free_frag(buf); |
387 | break; | |
388 | } | |
389 | ||
390 | qbuf.addr = addr + offset; | |
391 | qbuf.len = len - offset; | |
cc53b52d | 392 | mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); |
17f1de56 FF |
393 | frames++; |
394 | } | |
395 | ||
396 | if (frames) | |
397 | mt76_dma_kick_queue(dev, q); | |
398 | ||
399 | spin_unlock_bh(&q->lock); | |
400 | ||
401 | return frames; | |
402 | } | |
403 | ||
404 | static void | |
405 | mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) | |
406 | { | |
c12128ce | 407 | struct page *page; |
17f1de56 FF |
408 | void *buf; |
409 | bool more; | |
410 | ||
411 | spin_lock_bh(&q->lock); | |
412 | do { | |
413 | buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); | |
414 | if (!buf) | |
415 | break; | |
416 | ||
417 | skb_free_frag(buf); | |
418 | } while (1); | |
419 | spin_unlock_bh(&q->lock); | |
c12128ce FF |
420 | |
421 | if (!q->rx_page.va) | |
422 | return; | |
423 | ||
424 | page = virt_to_page(q->rx_page.va); | |
425 | __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); | |
426 | memset(&q->rx_page, 0, sizeof(q->rx_page)); | |
17f1de56 FF |
427 | } |
428 | ||
429 | static void | |
430 | mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) | |
431 | { | |
432 | struct mt76_queue *q = &dev->q_rx[qid]; | |
433 | int i; | |
434 | ||
435 | for (i = 0; i < q->ndesc; i++) | |
2703bafc | 436 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
17f1de56 FF |
437 | |
438 | mt76_dma_rx_cleanup(dev, q); | |
439 | mt76_dma_sync_idx(dev, q); | |
4d4b12bc | 440 | mt76_dma_rx_fill(dev, q); |
5a90107d FF |
441 | |
442 | if (!q->rx_head) | |
443 | return; | |
444 | ||
445 | dev_kfree_skb(q->rx_head); | |
446 | q->rx_head = NULL; | |
17f1de56 FF |
447 | } |
448 | ||
449 | static void | |
450 | mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, | |
451 | int len, bool more) | |
452 | { | |
453 | struct page *page = virt_to_head_page(data); | |
454 | int offset = data - page_address(page); | |
455 | struct sk_buff *skb = q->rx_head; | |
b102f0c5 | 456 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
17f1de56 | 457 | |
b102f0c5 FF |
458 | if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { |
459 | offset += q->buf_offset; | |
460 | skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, | |
461 | q->buf_size); | |
462 | } | |
17f1de56 FF |
463 | |
464 | if (more) | |
465 | return; | |
466 | ||
467 | q->rx_head = NULL; | |
468 | dev->drv->rx_skb(dev, q - dev->q_rx, skb); | |
469 | } | |
470 | ||
471 | static int | |
472 | mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) | |
473 | { | |
87e86f90 | 474 | int len, data_len, done = 0; |
17f1de56 FF |
475 | struct sk_buff *skb; |
476 | unsigned char *data; | |
17f1de56 FF |
477 | bool more; |
478 | ||
479 | while (done < budget) { | |
480 | u32 info; | |
481 | ||
482 | data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); | |
483 | if (!data) | |
484 | break; | |
485 | ||
87e86f90 LB |
486 | if (q->rx_head) |
487 | data_len = q->buf_size; | |
488 | else | |
489 | data_len = SKB_WITH_OVERHEAD(q->buf_size); | |
490 | ||
491 | if (data_len < len + q->buf_offset) { | |
9fe31054 FF |
492 | dev_kfree_skb(q->rx_head); |
493 | q->rx_head = NULL; | |
494 | ||
495 | skb_free_frag(data); | |
496 | continue; | |
497 | } | |
498 | ||
17f1de56 FF |
499 | if (q->rx_head) { |
500 | mt76_add_fragment(dev, q, data, len, more); | |
501 | continue; | |
502 | } | |
503 | ||
504 | skb = build_skb(data, q->buf_size); | |
505 | if (!skb) { | |
506 | skb_free_frag(data); | |
507 | continue; | |
508 | } | |
17f1de56 | 509 | skb_reserve(skb, q->buf_offset); |
17f1de56 FF |
510 | |
511 | if (q == &dev->q_rx[MT_RXQ_MCU]) { | |
13381dcd | 512 | u32 *rxfce = (u32 *)skb->cb; |
17f1de56 FF |
513 | *rxfce = info; |
514 | } | |
515 | ||
516 | __skb_put(skb, len); | |
517 | done++; | |
518 | ||
519 | if (more) { | |
520 | q->rx_head = skb; | |
521 | continue; | |
522 | } | |
523 | ||
524 | dev->drv->rx_skb(dev, q - dev->q_rx, skb); | |
525 | } | |
526 | ||
4d4b12bc | 527 | mt76_dma_rx_fill(dev, q); |
17f1de56 FF |
528 | return done; |
529 | } | |
530 | ||
531 | static int | |
532 | mt76_dma_rx_poll(struct napi_struct *napi, int budget) | |
533 | { | |
534 | struct mt76_dev *dev; | |
2b4307f5 | 535 | int qid, done = 0, cur; |
17f1de56 FF |
536 | |
537 | dev = container_of(napi->dev, struct mt76_dev, napi_dev); | |
538 | qid = napi - dev->napi; | |
539 | ||
37a68e00 | 540 | local_bh_disable(); |
9c68a57b FF |
541 | rcu_read_lock(); |
542 | ||
2b4307f5 FF |
543 | do { |
544 | cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); | |
81e850ef | 545 | mt76_rx_poll_complete(dev, qid, napi); |
2b4307f5 FF |
546 | done += cur; |
547 | } while (cur && done < budget); | |
548 | ||
9c68a57b | 549 | rcu_read_unlock(); |
37a68e00 | 550 | local_bh_enable(); |
9c68a57b | 551 | |
3e0705ac | 552 | if (done < budget && napi_complete(napi)) |
17f1de56 | 553 | dev->drv->rx_poll_complete(dev, qid); |
17f1de56 FF |
554 | |
555 | return done; | |
556 | } | |
557 | ||
558 | static int | |
559 | mt76_dma_init(struct mt76_dev *dev) | |
560 | { | |
561 | int i; | |
562 | ||
563 | init_dummy_netdev(&dev->napi_dev); | |
564 | ||
f473b42a | 565 | mt76_for_each_q_rx(dev, i) { |
17f1de56 FF |
566 | netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, |
567 | 64); | |
4d4b12bc | 568 | mt76_dma_rx_fill(dev, &dev->q_rx[i]); |
17f1de56 FF |
569 | napi_enable(&dev->napi[i]); |
570 | } | |
571 | ||
572 | return 0; | |
573 | } | |
574 | ||
575 | static const struct mt76_queue_ops mt76_dma_ops = { | |
576 | .init = mt76_dma_init, | |
577 | .alloc = mt76_dma_alloc_queue, | |
5ed31128 | 578 | .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, |
469d4818 | 579 | .tx_queue_skb = mt76_dma_tx_queue_skb, |
17f1de56 FF |
580 | .tx_cleanup = mt76_dma_tx_cleanup, |
581 | .rx_reset = mt76_dma_rx_reset, | |
582 | .kick = mt76_dma_kick_queue, | |
583 | }; | |
584 | ||
bceac167 | 585 | void mt76_dma_attach(struct mt76_dev *dev) |
17f1de56 FF |
586 | { |
587 | dev->queue_ops = &mt76_dma_ops; | |
17f1de56 FF |
588 | } |
589 | EXPORT_SYMBOL_GPL(mt76_dma_attach); | |
590 | ||
591 | void mt76_dma_cleanup(struct mt76_dev *dev) | |
592 | { | |
593 | int i; | |
594 | ||
781eef5b | 595 | mt76_worker_disable(&dev->tx_worker); |
4875e346 | 596 | netif_napi_del(&dev->tx_napi); |
17f1de56 FF |
597 | for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) |
598 | mt76_dma_tx_cleanup(dev, i, true); | |
599 | ||
f473b42a | 600 | mt76_for_each_q_rx(dev, i) { |
17f1de56 FF |
601 | netif_napi_del(&dev->napi[i]); |
602 | mt76_dma_rx_cleanup(dev, &dev->q_rx[i]); | |
603 | } | |
604 | } | |
605 | EXPORT_SYMBOL_GPL(mt76_dma_cleanup); |