Commit | Line | Data |
---|---|---|
0e3d6777 | 1 | // SPDX-License-Identifier: ISC |
17f1de56 FF |
2 | /* |
3 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> | |
17f1de56 FF |
4 | */ |
5 | ||
6 | #include <linux/dma-mapping.h> | |
7 | #include "mt76.h" | |
8 | #include "dma.h" | |
9 | ||
f68d6762 FF |
10 | #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) |
11 | ||
2e420b88 | 12 | #define Q_READ(_q, _field) ({ \ |
f68d6762 FF |
13 | u32 _offset = offsetof(struct mt76_queue_regs, _field); \ |
14 | u32 _val; \ | |
15 | if ((_q)->flags & MT_QFLAG_WED) \ | |
2e420b88 | 16 | _val = mtk_wed_device_reg_read((_q)->wed, \ |
f68d6762 FF |
17 | ((_q)->wed_regs + \ |
18 | _offset)); \ | |
19 | else \ | |
20 | _val = readl(&(_q)->regs->_field); \ | |
21 | _val; \ | |
22 | }) | |
23 | ||
2e420b88 | 24 | #define Q_WRITE(_q, _field, _val) do { \ |
f68d6762 FF |
25 | u32 _offset = offsetof(struct mt76_queue_regs, _field); \ |
26 | if ((_q)->flags & MT_QFLAG_WED) \ | |
2e420b88 | 27 | mtk_wed_device_reg_write((_q)->wed, \ |
f68d6762 FF |
28 | ((_q)->wed_regs + _offset), \ |
29 | _val); \ | |
30 | else \ | |
31 | writel(_val, &(_q)->regs->_field); \ | |
32 | } while (0) | |
33 | ||
34 | #else | |
35 | ||
2e420b88 LB |
36 | #define Q_READ(_q, _field) readl(&(_q)->regs->_field) |
37 | #define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field) | |
cc9fd945 | 38 | |
f68d6762 | 39 | #endif |
cc9fd945 | 40 | |
dd57a95c FF |
41 | static struct mt76_txwi_cache * |
42 | mt76_alloc_txwi(struct mt76_dev *dev) | |
43 | { | |
44 | struct mt76_txwi_cache *t; | |
45 | dma_addr_t addr; | |
46 | u8 *txwi; | |
47 | int size; | |
48 | ||
49 | size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); | |
402e0109 | 50 | txwi = kzalloc(size, GFP_ATOMIC); |
dd57a95c FF |
51 | if (!txwi) |
52 | return NULL; | |
53 | ||
d1ddc536 | 54 | addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, |
dd57a95c | 55 | DMA_TO_DEVICE); |
5d0e7dde DA |
56 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { |
57 | kfree(txwi); | |
58 | return NULL; | |
59 | } | |
60 | ||
dd57a95c FF |
61 | t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); |
62 | t->dma_addr = addr; | |
63 | ||
64 | return t; | |
65 | } | |
66 | ||
2666bece SC |
67 | static struct mt76_txwi_cache * |
68 | mt76_alloc_rxwi(struct mt76_dev *dev) | |
69 | { | |
70 | struct mt76_txwi_cache *t; | |
71 | ||
72 | t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); | |
73 | if (!t) | |
74 | return NULL; | |
75 | ||
76 | t->ptr = NULL; | |
77 | return t; | |
78 | } | |
79 | ||
dd57a95c FF |
80 | static struct mt76_txwi_cache * |
81 | __mt76_get_txwi(struct mt76_dev *dev) | |
82 | { | |
83 | struct mt76_txwi_cache *t = NULL; | |
84 | ||
85 | spin_lock(&dev->lock); | |
86 | if (!list_empty(&dev->txwi_cache)) { | |
87 | t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, | |
88 | list); | |
89 | list_del(&t->list); | |
90 | } | |
91 | spin_unlock(&dev->lock); | |
92 | ||
93 | return t; | |
94 | } | |
95 | ||
2666bece SC |
96 | static struct mt76_txwi_cache * |
97 | __mt76_get_rxwi(struct mt76_dev *dev) | |
98 | { | |
99 | struct mt76_txwi_cache *t = NULL; | |
100 | ||
19527314 | 101 | spin_lock_bh(&dev->wed_lock); |
2666bece SC |
102 | if (!list_empty(&dev->rxwi_cache)) { |
103 | t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, | |
104 | list); | |
105 | list_del(&t->list); | |
106 | } | |
19527314 | 107 | spin_unlock_bh(&dev->wed_lock); |
2666bece SC |
108 | |
109 | return t; | |
110 | } | |
111 | ||
dd57a95c FF |
112 | static struct mt76_txwi_cache * |
113 | mt76_get_txwi(struct mt76_dev *dev) | |
114 | { | |
115 | struct mt76_txwi_cache *t = __mt76_get_txwi(dev); | |
116 | ||
117 | if (t) | |
118 | return t; | |
119 | ||
120 | return mt76_alloc_txwi(dev); | |
121 | } | |
122 | ||
2666bece SC |
123 | struct mt76_txwi_cache * |
124 | mt76_get_rxwi(struct mt76_dev *dev) | |
125 | { | |
126 | struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); | |
127 | ||
128 | if (t) | |
129 | return t; | |
130 | ||
131 | return mt76_alloc_rxwi(dev); | |
132 | } | |
133 | EXPORT_SYMBOL_GPL(mt76_get_rxwi); | |
134 | ||
dd57a95c FF |
135 | void |
136 | mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) | |
137 | { | |
138 | if (!t) | |
139 | return; | |
140 | ||
141 | spin_lock(&dev->lock); | |
142 | list_add(&t->list, &dev->txwi_cache); | |
143 | spin_unlock(&dev->lock); | |
144 | } | |
145 | EXPORT_SYMBOL_GPL(mt76_put_txwi); | |
146 | ||
2666bece SC |
147 | void |
148 | mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) | |
149 | { | |
150 | if (!t) | |
151 | return; | |
152 | ||
19527314 | 153 | spin_lock_bh(&dev->wed_lock); |
2666bece | 154 | list_add(&t->list, &dev->rxwi_cache); |
19527314 | 155 | spin_unlock_bh(&dev->wed_lock); |
2666bece SC |
156 | } |
157 | EXPORT_SYMBOL_GPL(mt76_put_rxwi); | |
158 | ||
dd57a95c FF |
159 | static void |
160 | mt76_free_pending_txwi(struct mt76_dev *dev) | |
161 | { | |
162 | struct mt76_txwi_cache *t; | |
163 | ||
5f0ce584 | 164 | local_bh_disable(); |
402e0109 | 165 | while ((t = __mt76_get_txwi(dev)) != NULL) { |
d1ddc536 | 166 | dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, |
dd57a95c | 167 | DMA_TO_DEVICE); |
402e0109 FF |
168 | kfree(mt76_get_txwi_ptr(dev, t)); |
169 | } | |
5f0ce584 | 170 | local_bh_enable(); |
dd57a95c FF |
171 | } |
172 | ||
a97a467a | 173 | void |
2666bece SC |
174 | mt76_free_pending_rxwi(struct mt76_dev *dev) |
175 | { | |
176 | struct mt76_txwi_cache *t; | |
177 | ||
178 | local_bh_disable(); | |
179 | while ((t = __mt76_get_rxwi(dev)) != NULL) { | |
180 | if (t->ptr) | |
2f5c3c77 | 181 | mt76_put_page_pool_buf(t->ptr, false); |
2666bece SC |
182 | kfree(t); |
183 | } | |
184 | local_bh_enable(); | |
185 | } | |
a97a467a | 186 | EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi); |
2666bece | 187 | |
3990465d LB |
188 | static void |
189 | mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) | |
190 | { | |
2e420b88 | 191 | Q_WRITE(q, desc_base, q->desc_dma); |
950d0abb BJ |
192 | if (q->flags & MT_QFLAG_WED_RRO_EN) |
193 | Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); | |
194 | else | |
195 | Q_WRITE(q, ring_size, q->ndesc); | |
2e420b88 | 196 | q->head = Q_READ(q, dma_idx); |
3990465d LB |
197 | q->tail = q->head; |
198 | } | |
199 | ||
8a7386e7 LB |
200 | void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, |
201 | bool reset_idx) | |
3990465d | 202 | { |
f9b627f1 | 203 | if (!q || !q->ndesc) |
3990465d LB |
204 | return; |
205 | ||
950d0abb BJ |
206 | if (!mt76_queue_is_wed_rro_ind(q)) { |
207 | int i; | |
208 | ||
209 | /* clear descriptors */ | |
210 | for (i = 0; i < q->ndesc; i++) | |
211 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); | |
212 | } | |
3990465d | 213 | |
5bb7a655 LB |
214 | if (reset_idx) { |
215 | Q_WRITE(q, cpu_idx, 0); | |
216 | Q_WRITE(q, dma_idx, 0); | |
217 | } | |
3990465d LB |
218 | mt76_dma_sync_idx(dev, q); |
219 | } | |
220 | ||
8a7386e7 | 221 | void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) |
5bb7a655 LB |
222 | { |
223 | __mt76_dma_queue_reset(dev, q, true); | |
224 | } | |
225 | ||
953519b3 FF |
226 | static int |
227 | mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, | |
228 | struct mt76_queue_buf *buf, void *data) | |
229 | { | |
953519b3 FF |
230 | struct mt76_queue_entry *entry = &q->entry[q->head]; |
231 | struct mt76_txwi_cache *txwi = NULL; | |
950d0abb | 232 | struct mt76_desc *desc; |
953519b3 | 233 | int idx = q->head; |
4920a3a1 | 234 | u32 buf1 = 0, ctrl; |
953519b3 FF |
235 | int rx_token; |
236 | ||
950d0abb BJ |
237 | if (mt76_queue_is_wed_rro_ind(q)) { |
238 | struct mt76_wed_rro_desc *rro_desc; | |
239 | ||
240 | rro_desc = (struct mt76_wed_rro_desc *)q->desc; | |
241 | data = &rro_desc[q->head]; | |
242 | goto done; | |
243 | } | |
244 | ||
245 | desc = &q->desc[q->head]; | |
953519b3 | 246 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); |
4920a3a1 SC |
247 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
248 | buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32); | |
249 | #endif | |
953519b3 | 250 | |
58bcd4ed | 251 | if (mt76_queue_is_wed_rx(q)) { |
953519b3 FF |
252 | txwi = mt76_get_rxwi(dev); |
253 | if (!txwi) | |
254 | return -ENOMEM; | |
255 | ||
256 | rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr); | |
257 | if (rx_token < 0) { | |
258 | mt76_put_rxwi(dev, txwi); | |
259 | return -ENOMEM; | |
260 | } | |
261 | ||
262 | buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); | |
263 | ctrl |= MT_DMA_CTL_TO_HOST; | |
264 | } | |
265 | ||
266 | WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr)); | |
267 | WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); | |
268 | WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); | |
269 | WRITE_ONCE(desc->info, 0); | |
270 | ||
950d0abb | 271 | done: |
953519b3 FF |
272 | entry->dma_addr[0] = buf->addr; |
273 | entry->dma_len[0] = buf->len; | |
274 | entry->txwi = txwi; | |
275 | entry->buf = data; | |
276 | entry->wcid = 0xffff; | |
277 | entry->skip_buf1 = true; | |
278 | q->head = (q->head + 1) % q->ndesc; | |
279 | q->queued++; | |
280 | ||
281 | return idx; | |
282 | } | |
283 | ||
17f1de56 FF |
284 | static int |
285 | mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, | |
286 | struct mt76_queue_buf *buf, int nbufs, u32 info, | |
287 | struct sk_buff *skb, void *txwi) | |
288 | { | |
75d4bf1f | 289 | struct mt76_queue_entry *entry; |
17f1de56 | 290 | struct mt76_desc *desc; |
17f1de56 | 291 | int i, idx = -1; |
fe13dad8 | 292 | u32 ctrl, next; |
17f1de56 | 293 | |
953519b3 FF |
294 | if (txwi) { |
295 | q->entry[q->head].txwi = DMA_DUMMY_DATA; | |
296 | q->entry[q->head].skip_buf0 = true; | |
297 | } | |
298 | ||
17f1de56 FF |
299 | for (i = 0; i < nbufs; i += 2, buf += 2) { |
300 | u32 buf0 = buf[0].addr, buf1 = 0; | |
301 | ||
75d4bf1f | 302 | idx = q->head; |
fe13dad8 | 303 | next = (q->head + 1) % q->ndesc; |
75d4bf1f FF |
304 | |
305 | desc = &q->desc[idx]; | |
306 | entry = &q->entry[idx]; | |
307 | ||
953519b3 FF |
308 | if (buf[0].skip_unmap) |
309 | entry->skip_buf0 = true; | |
310 | entry->skip_buf1 = i == nbufs - 1; | |
311 | ||
312 | entry->dma_addr[0] = buf[0].addr; | |
313 | entry->dma_len[0] = buf[0].len; | |
314 | ||
315 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); | |
4920a3a1 SC |
316 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
317 | info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32); | |
318 | #endif | |
953519b3 FF |
319 | if (i < nbufs - 1) { |
320 | entry->dma_addr[1] = buf[1].addr; | |
321 | entry->dma_len[1] = buf[1].len; | |
322 | buf1 = buf[1].addr; | |
323 | ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); | |
4920a3a1 SC |
324 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
325 | info |= FIELD_PREP(MT_DMA_CTL_SDP1_H, | |
326 | buf[1].addr >> 32); | |
327 | #endif | |
953519b3 FF |
328 | if (buf[1].skip_unmap) |
329 | entry->skip_buf1 = true; | |
17f1de56 FF |
330 | } |
331 | ||
953519b3 FF |
332 | if (i == nbufs - 1) |
333 | ctrl |= MT_DMA_CTL_LAST_SEC0; | |
334 | else if (i == nbufs - 2) | |
335 | ctrl |= MT_DMA_CTL_LAST_SEC1; | |
336 | ||
17f1de56 FF |
337 | WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); |
338 | WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); | |
339 | WRITE_ONCE(desc->info, cpu_to_le32(info)); | |
340 | WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); | |
341 | ||
fe13dad8 | 342 | q->head = next; |
17f1de56 FF |
343 | q->queued++; |
344 | } | |
345 | ||
346 | q->entry[idx].txwi = txwi; | |
347 | q->entry[idx].skb = skb; | |
6d51cae2 | 348 | q->entry[idx].wcid = 0xffff; |
17f1de56 FF |
349 | |
350 | return idx; | |
351 | } | |
352 | ||
353 | static void | |
354 | mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, | |
355 | struct mt76_queue_entry *prev_e) | |
356 | { | |
357 | struct mt76_queue_entry *e = &q->entry[idx]; | |
17f1de56 | 358 | |
75d4bf1f | 359 | if (!e->skip_buf0) |
d1ddc536 | 360 | dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], |
17f1de56 | 361 | DMA_TO_DEVICE); |
17f1de56 | 362 | |
75d4bf1f | 363 | if (!e->skip_buf1) |
d1ddc536 | 364 | dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], |
17f1de56 | 365 | DMA_TO_DEVICE); |
17f1de56 | 366 | |
598da386 | 367 | if (e->txwi == DMA_DUMMY_DATA) |
17f1de56 FF |
368 | e->txwi = NULL; |
369 | ||
370 | *prev_e = *e; | |
371 | memset(e, 0, sizeof(*e)); | |
372 | } | |
373 | ||
8f6c4f7b FF |
374 | static void |
375 | mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) | |
376 | { | |
2d681047 | 377 | wmb(); |
2e420b88 | 378 | Q_WRITE(q, cpu_idx, q->head); |
17f1de56 FF |
379 | } |
380 | ||
381 | static void | |
e5655492 | 382 | mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) |
17f1de56 | 383 | { |
17f1de56 | 384 | struct mt76_queue_entry entry; |
0b51f186 | 385 | int last; |
17f1de56 | 386 | |
f9b627f1 | 387 | if (!q || !q->ndesc) |
17f1de56 FF |
388 | return; |
389 | ||
9716ef04 | 390 | spin_lock_bh(&q->cleanup_lock); |
17f1de56 FF |
391 | if (flush) |
392 | last = -1; | |
393 | else | |
2e420b88 | 394 | last = Q_READ(q, dma_idx); |
17f1de56 | 395 | |
0b51f186 | 396 | while (q->queued > 0 && q->tail != last) { |
17f1de56 | 397 | mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); |
fe5b5ab5 | 398 | mt76_queue_tx_complete(dev, q, &entry); |
17f1de56 FF |
399 | |
400 | if (entry.txwi) { | |
9ec0b821 | 401 | if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) |
6ca66722 | 402 | mt76_put_txwi(dev, entry.txwi); |
17f1de56 FF |
403 | } |
404 | ||
17f1de56 | 405 | if (!flush && q->tail == last) |
2e420b88 | 406 | last = Q_READ(q, dma_idx); |
5a95ca41 | 407 | } |
9716ef04 | 408 | spin_unlock_bh(&q->cleanup_lock); |
5a95ca41 | 409 | |
8f6c4f7b | 410 | if (flush) { |
0b51f186 | 411 | spin_lock_bh(&q->lock); |
17f1de56 | 412 | mt76_dma_sync_idx(dev, q); |
8f6c4f7b | 413 | mt76_dma_kick_queue(dev, q); |
0b51f186 | 414 | spin_unlock_bh(&q->lock); |
8f6c4f7b | 415 | } |
17f1de56 | 416 | |
26e40d4c FF |
417 | if (!q->queued) |
418 | wake_up(&dev->tx_wait); | |
17f1de56 FF |
419 | } |
420 | ||
421 | static void * | |
422 | mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, | |
cd372b8c | 423 | int *len, u32 *info, bool *more, bool *drop) |
17f1de56 FF |
424 | { |
425 | struct mt76_queue_entry *e = &q->entry[idx]; | |
426 | struct mt76_desc *desc = &q->desc[idx]; | |
950d0abb BJ |
427 | u32 ctrl, desc_info, buf1; |
428 | void *buf = e->buf; | |
429 | ||
430 | if (mt76_queue_is_wed_rro_ind(q)) | |
431 | goto done; | |
17f1de56 | 432 | |
950d0abb | 433 | ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
17f1de56 | 434 | if (len) { |
cd372b8c LB |
435 | *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); |
436 | *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); | |
17f1de56 FF |
437 | } |
438 | ||
950d0abb | 439 | desc_info = le32_to_cpu(desc->info); |
17f1de56 | 440 | if (info) |
950d0abb BJ |
441 | *info = desc_info; |
442 | ||
443 | buf1 = le32_to_cpu(desc->buf1); | |
444 | mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info); | |
17f1de56 | 445 | |
58bcd4ed | 446 | if (mt76_queue_is_wed_rx(q)) { |
e4d2b8bc | 447 | u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); |
cd372b8c LB |
448 | struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); |
449 | ||
450 | if (!t) | |
451 | return NULL; | |
452 | ||
2f5c3c77 LB |
453 | dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, |
454 | SKB_WITH_OVERHEAD(q->buf_size), | |
455 | page_pool_get_dma_dir(q->page_pool)); | |
cd372b8c LB |
456 | |
457 | buf = t->ptr; | |
458 | t->dma_addr = 0; | |
459 | t->ptr = NULL; | |
460 | ||
461 | mt76_put_rxwi(dev, t); | |
950d0abb | 462 | if (drop) |
e4d2b8bc | 463 | *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP); |
cd372b8c | 464 | } else { |
2f5c3c77 LB |
465 | dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0], |
466 | SKB_WITH_OVERHEAD(q->buf_size), | |
467 | page_pool_get_dma_dir(q->page_pool)); | |
cd372b8c | 468 | } |
17f1de56 | 469 | |
950d0abb BJ |
470 | done: |
471 | e->buf = NULL; | |
17f1de56 FF |
472 | return buf; |
473 | } | |
474 | ||
475 | static void * | |
476 | mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, | |
cd372b8c | 477 | int *len, u32 *info, bool *more, bool *drop) |
17f1de56 FF |
478 | { |
479 | int idx = q->tail; | |
480 | ||
481 | *more = false; | |
482 | if (!q->queued) | |
483 | return NULL; | |
484 | ||
950d0abb | 485 | if (mt76_queue_is_wed_rro_data(q)) |
17f1de56 FF |
486 | return NULL; |
487 | ||
950d0abb BJ |
488 | if (!mt76_queue_is_wed_rro_ind(q)) { |
489 | if (flush) | |
490 | q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); | |
491 | else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) | |
492 | return NULL; | |
493 | } | |
494 | ||
17f1de56 FF |
495 | q->tail = (q->tail + 1) % q->ndesc; |
496 | q->queued--; | |
497 | ||
cd372b8c | 498 | return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); |
17f1de56 FF |
499 | } |
500 | ||
5ed31128 | 501 | static int |
d95093a1 | 502 | mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, |
5ed31128 LB |
503 | struct sk_buff *skb, u32 tx_info) |
504 | { | |
b4403cee | 505 | struct mt76_queue_buf buf = {}; |
5ed31128 LB |
506 | dma_addr_t addr; |
507 | ||
1e64fdd4 BJ |
508 | if (test_bit(MT76_MCU_RESET, &dev->phy.state)) |
509 | goto error; | |
510 | ||
93eaec76 FF |
511 | if (q->queued + 1 >= q->ndesc - 1) |
512 | goto error; | |
513 | ||
d1ddc536 | 514 | addr = dma_map_single(dev->dma_dev, skb->data, skb->len, |
5ed31128 | 515 | DMA_TO_DEVICE); |
d1ddc536 | 516 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) |
93eaec76 | 517 | goto error; |
5ed31128 LB |
518 | |
519 | buf.addr = addr; | |
520 | buf.len = skb->len; | |
521 | ||
522 | spin_lock_bh(&q->lock); | |
523 | mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); | |
524 | mt76_dma_kick_queue(dev, q); | |
525 | spin_unlock_bh(&q->lock); | |
526 | ||
527 | return 0; | |
93eaec76 FF |
528 | |
529 | error: | |
530 | dev_kfree_skb(skb); | |
531 | return -ENOMEM; | |
5ed31128 LB |
532 | } |
533 | ||
eb9ca7ec | 534 | static int |
5d581c33 | 535 | mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, |
d08295f5 FF |
536 | enum mt76_txq_id qid, struct sk_buff *skb, |
537 | struct mt76_wcid *wcid, struct ieee80211_sta *sta) | |
fcdd99ce | 538 | { |
94e4f579 FF |
539 | struct ieee80211_tx_status status = { |
540 | .sta = sta, | |
541 | }; | |
cfaae9e6 LB |
542 | struct mt76_tx_info tx_info = { |
543 | .skb = skb, | |
544 | }; | |
5d581c33 | 545 | struct mt76_dev *dev = phy->dev; |
e394b575 | 546 | struct ieee80211_hw *hw; |
b5903c47 | 547 | int len, n = 0, ret = -ENOMEM; |
fcdd99ce | 548 | struct mt76_txwi_cache *t; |
fcdd99ce LB |
549 | struct sk_buff *iter; |
550 | dma_addr_t addr; | |
f3950a41 | 551 | u8 *txwi; |
fcdd99ce | 552 | |
5d581c33 | 553 | if (test_bit(MT76_RESET, &phy->state)) |
1e64fdd4 BJ |
554 | goto free_skb; |
555 | ||
fcdd99ce | 556 | t = mt76_get_txwi(dev); |
94e4f579 FF |
557 | if (!t) |
558 | goto free_skb; | |
559 | ||
f3950a41 | 560 | txwi = mt76_get_txwi_ptr(dev, t); |
fcdd99ce | 561 | |
88046b2c | 562 | skb->prev = skb->next = NULL; |
9ec0b821 | 563 | if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) |
66105538 LB |
564 | mt76_insert_hdr_pad(skb); |
565 | ||
eb071ba7 | 566 | len = skb_headlen(skb); |
d1ddc536 FF |
567 | addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); |
568 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) | |
fcdd99ce | 569 | goto free; |
fcdd99ce | 570 | |
b5903c47 LB |
571 | tx_info.buf[n].addr = t->dma_addr; |
572 | tx_info.buf[n++].len = dev->drv->txwi_size; | |
573 | tx_info.buf[n].addr = addr; | |
574 | tx_info.buf[n++].len = len; | |
fcdd99ce LB |
575 | |
576 | skb_walk_frags(skb, iter) { | |
b5903c47 | 577 | if (n == ARRAY_SIZE(tx_info.buf)) |
fcdd99ce LB |
578 | goto unmap; |
579 | ||
d1ddc536 | 580 | addr = dma_map_single(dev->dma_dev, iter->data, iter->len, |
fcdd99ce | 581 | DMA_TO_DEVICE); |
d1ddc536 | 582 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) |
fcdd99ce LB |
583 | goto unmap; |
584 | ||
b5903c47 LB |
585 | tx_info.buf[n].addr = addr; |
586 | tx_info.buf[n++].len = iter->len; | |
fcdd99ce | 587 | } |
b5903c47 | 588 | tx_info.nbuf = n; |
fcdd99ce | 589 | |
ae064fc0 FF |
590 | if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { |
591 | ret = -ENOMEM; | |
592 | goto unmap; | |
593 | } | |
594 | ||
d1ddc536 | 595 | dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, |
eb071ba7 | 596 | DMA_TO_DEVICE); |
d08295f5 | 597 | ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); |
d1ddc536 | 598 | dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, |
eb071ba7 LB |
599 | DMA_TO_DEVICE); |
600 | if (ret < 0) | |
fcdd99ce LB |
601 | goto unmap; |
602 | ||
b5903c47 | 603 | return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, |
cfaae9e6 | 604 | tx_info.info, tx_info.skb, t); |
fcdd99ce LB |
605 | |
606 | unmap: | |
fcdd99ce | 607 | for (n--; n > 0; n--) |
d1ddc536 | 608 | dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, |
b5903c47 | 609 | tx_info.buf[n].len, DMA_TO_DEVICE); |
fcdd99ce LB |
610 | |
611 | free: | |
f0efa862 FF |
612 | #ifdef CONFIG_NL80211_TESTMODE |
613 | /* fix tx_done accounting on queue overflow */ | |
c918c74d SC |
614 | if (mt76_is_testmode_skb(dev, skb, &hw)) { |
615 | struct mt76_phy *phy = hw->priv; | |
616 | ||
617 | if (tx_info.skb == phy->test.tx_skb) | |
618 | phy->test.tx_done--; | |
619 | } | |
f0efa862 FF |
620 | #endif |
621 | ||
fcdd99ce | 622 | mt76_put_txwi(dev, t); |
94e4f579 FF |
623 | |
624 | free_skb: | |
625 | status.skb = tx_info.skb; | |
626 | hw = mt76_tx_status_get_hw(dev, tx_info.skb); | |
5b8ccdfb | 627 | spin_lock_bh(&dev->rx_lock); |
94e4f579 | 628 | ieee80211_tx_status_ext(hw, &status); |
5b8ccdfb | 629 | spin_unlock_bh(&dev->rx_lock); |
94e4f579 | 630 | |
fcdd99ce LB |
631 | return ret; |
632 | } | |
fcdd99ce | 633 | |
8a7386e7 LB |
634 | int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, |
635 | bool allow_direct) | |
17f1de56 | 636 | { |
17f1de56 | 637 | int len = SKB_WITH_OVERHEAD(q->buf_size); |
2f5c3c77 | 638 | int frames = 0; |
17f1de56 | 639 | |
f9b627f1 BJ |
640 | if (!q->ndesc) |
641 | return 0; | |
642 | ||
17f1de56 FF |
643 | spin_lock_bh(&q->lock); |
644 | ||
645 | while (q->queued < q->ndesc - 1) { | |
950d0abb | 646 | struct mt76_queue_buf qbuf = {}; |
2f5c3c77 | 647 | enum dma_data_direction dir; |
2f5c3c77 LB |
648 | dma_addr_t addr; |
649 | int offset; | |
950d0abb BJ |
650 | void *buf = NULL; |
651 | ||
652 | if (mt76_queue_is_wed_rro_ind(q)) | |
653 | goto done; | |
17f1de56 | 654 | |
2f5c3c77 | 655 | buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); |
17f1de56 FF |
656 | if (!buf) |
657 | break; | |
658 | ||
2f5c3c77 LB |
659 | addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset; |
660 | dir = page_pool_get_dma_dir(q->page_pool); | |
661 | dma_sync_single_for_device(dev->dma_dev, addr, len, dir); | |
17f1de56 | 662 | |
2f5c3c77 | 663 | qbuf.addr = addr + q->buf_offset; |
950d0abb | 664 | done: |
2f5c3c77 | 665 | qbuf.len = len - q->buf_offset; |
577298ec | 666 | qbuf.skip_unmap = false; |
953519b3 | 667 | if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { |
2f5c3c77 | 668 | mt76_put_page_pool_buf(buf, allow_direct); |
96f134dc LB |
669 | break; |
670 | } | |
17f1de56 FF |
671 | frames++; |
672 | } | |
673 | ||
950d0abb | 674 | if (frames || mt76_queue_is_wed_rx(q)) |
17f1de56 FF |
675 | mt76_dma_kick_queue(dev, q); |
676 | ||
677 | spin_unlock_bh(&q->lock); | |
678 | ||
679 | return frames; | |
680 | } | |
681 | ||
f68d6762 FF |
682 | static int |
683 | mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, | |
684 | int idx, int n_desc, int bufsize, | |
685 | u32 ring_base) | |
686 | { | |
687 | int ret, size; | |
688 | ||
689 | spin_lock_init(&q->lock); | |
690 | spin_lock_init(&q->cleanup_lock); | |
691 | ||
692 | q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; | |
693 | q->ndesc = n_desc; | |
694 | q->buf_size = bufsize; | |
695 | q->hw_idx = idx; | |
696 | ||
950d0abb BJ |
697 | size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc) |
698 | : sizeof(struct mt76_desc); | |
699 | q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size, | |
700 | &q->desc_dma, GFP_KERNEL); | |
f68d6762 FF |
701 | if (!q->desc) |
702 | return -ENOMEM; | |
703 | ||
950d0abb BJ |
704 | if (mt76_queue_is_wed_rro_ind(q)) { |
705 | struct mt76_wed_rro_desc *rro_desc; | |
706 | int i; | |
707 | ||
708 | rro_desc = (struct mt76_wed_rro_desc *)q->desc; | |
709 | for (i = 0; i < q->ndesc; i++) { | |
710 | struct mt76_wed_rro_ind *cmd; | |
711 | ||
712 | cmd = (struct mt76_wed_rro_ind *)&rro_desc[i]; | |
713 | cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1; | |
714 | } | |
715 | } | |
716 | ||
f68d6762 FF |
717 | size = q->ndesc * sizeof(*q->entry); |
718 | q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); | |
719 | if (!q->entry) | |
720 | return -ENOMEM; | |
721 | ||
2f5c3c77 LB |
722 | ret = mt76_create_page_pool(dev, q); |
723 | if (ret) | |
724 | return ret; | |
725 | ||
8a7386e7 | 726 | ret = mt76_wed_dma_setup(dev, q, false); |
f68d6762 FF |
727 | if (ret) |
728 | return ret; | |
729 | ||
950d0abb BJ |
730 | if (mtk_wed_device_active(&dev->mmio.wed)) { |
731 | if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) || | |
732 | mt76_queue_is_wed_tx_free(q)) | |
733 | return 0; | |
734 | } | |
735 | ||
736 | mt76_dma_queue_reset(dev, q); | |
f68d6762 FF |
737 | |
738 | return 0; | |
739 | } | |
740 | ||
17f1de56 FF |
741 | static void |
742 | mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) | |
743 | { | |
744 | void *buf; | |
745 | bool more; | |
746 | ||
f9b627f1 BJ |
747 | if (!q->ndesc) |
748 | return; | |
749 | ||
17f1de56 | 750 | do { |
ef444ad0 | 751 | spin_lock_bh(&q->lock); |
cd372b8c | 752 | buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); |
ef444ad0 SW |
753 | spin_unlock_bh(&q->lock); |
754 | ||
17f1de56 FF |
755 | if (!buf) |
756 | break; | |
757 | ||
950d0abb BJ |
758 | if (!mt76_queue_is_wed_rro(q)) |
759 | mt76_put_page_pool_buf(buf, false); | |
17f1de56 | 760 | } while (1); |
1b88b47e | 761 | |
ef444ad0 | 762 | spin_lock_bh(&q->lock); |
1b88b47e LB |
763 | if (q->rx_head) { |
764 | dev_kfree_skb(q->rx_head); | |
765 | q->rx_head = NULL; | |
766 | } | |
767 | ||
17f1de56 FF |
768 | spin_unlock_bh(&q->lock); |
769 | } | |
770 | ||
771 | static void | |
772 | mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) | |
773 | { | |
774 | struct mt76_queue *q = &dev->q_rx[qid]; | |
17f1de56 | 775 | |
f9b627f1 BJ |
776 | if (!q->ndesc) |
777 | return; | |
778 | ||
950d0abb BJ |
779 | if (!mt76_queue_is_wed_rro_ind(q)) { |
780 | int i; | |
781 | ||
782 | for (i = 0; i < q->ndesc; i++) | |
783 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); | |
784 | } | |
17f1de56 FF |
785 | |
786 | mt76_dma_rx_cleanup(dev, q); | |
3bc4b811 SC |
787 | |
788 | /* reset WED rx queues */ | |
8a7386e7 | 789 | mt76_wed_dma_setup(dev, q, true); |
00d2ced0 LB |
790 | |
791 | if (mt76_queue_is_wed_tx_free(q)) | |
792 | return; | |
793 | ||
794 | if (mtk_wed_device_active(&dev->mmio.wed) && | |
795 | mt76_queue_is_wed_rro(q)) | |
796 | return; | |
797 | ||
798 | mt76_dma_sync_idx(dev, q); | |
799 | mt76_dma_rx_fill(dev, q, false); | |
17f1de56 FF |
800 | } |
801 | ||
802 | static void | |
803 | mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, | |
cd607f2c | 804 | int len, bool more, u32 info, bool allow_direct) |
17f1de56 | 805 | { |
17f1de56 | 806 | struct sk_buff *skb = q->rx_head; |
b102f0c5 | 807 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
d0bd52c5 | 808 | int nr_frags = shinfo->nr_frags; |
17f1de56 | 809 | |
d0bd52c5 | 810 | if (nr_frags < ARRAY_SIZE(shinfo->frags)) { |
93a1d479 LB |
811 | struct page *page = virt_to_head_page(data); |
812 | int offset = data - page_address(page) + q->buf_offset; | |
813 | ||
d0bd52c5 | 814 | skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); |
93a1d479 | 815 | } else { |
cd607f2c | 816 | mt76_put_page_pool_buf(data, allow_direct); |
b102f0c5 | 817 | } |
17f1de56 FF |
818 | |
819 | if (more) | |
820 | return; | |
821 | ||
822 | q->rx_head = NULL; | |
d0bd52c5 | 823 | if (nr_frags < ARRAY_SIZE(shinfo->frags)) |
c3137942 | 824 | dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); |
d0bd52c5 LB |
825 | else |
826 | dev_kfree_skb(skb); | |
17f1de56 FF |
827 | } |
828 | ||
829 | static int | |
830 | mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) | |
831 | { | |
f68d6762 | 832 | int len, data_len, done = 0, dma_idx; |
17f1de56 FF |
833 | struct sk_buff *skb; |
834 | unsigned char *data; | |
f68d6762 | 835 | bool check_ddone = false; |
cd607f2c | 836 | bool allow_direct = !mt76_queue_is_wed_rx(q); |
17f1de56 FF |
837 | bool more; |
838 | ||
f68d6762 | 839 | if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && |
132d74d3 | 840 | mt76_queue_is_wed_tx_free(q)) { |
2e420b88 | 841 | dma_idx = Q_READ(q, dma_idx); |
f68d6762 FF |
842 | check_ddone = true; |
843 | } | |
844 | ||
17f1de56 | 845 | while (done < budget) { |
cd372b8c | 846 | bool drop = false; |
17f1de56 FF |
847 | u32 info; |
848 | ||
f68d6762 FF |
849 | if (check_ddone) { |
850 | if (q->tail == dma_idx) | |
2e420b88 | 851 | dma_idx = Q_READ(q, dma_idx); |
f68d6762 FF |
852 | |
853 | if (q->tail == dma_idx) | |
854 | break; | |
855 | } | |
856 | ||
cd372b8c LB |
857 | data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, |
858 | &drop); | |
17f1de56 FF |
859 | if (!data) |
860 | break; | |
861 | ||
cd372b8c LB |
862 | if (drop) |
863 | goto free_frag; | |
864 | ||
87e86f90 LB |
865 | if (q->rx_head) |
866 | data_len = q->buf_size; | |
867 | else | |
868 | data_len = SKB_WITH_OVERHEAD(q->buf_size); | |
869 | ||
870 | if (data_len < len + q->buf_offset) { | |
9fe31054 FF |
871 | dev_kfree_skb(q->rx_head); |
872 | q->rx_head = NULL; | |
fbe50d9a | 873 | goto free_frag; |
9fe31054 FF |
874 | } |
875 | ||
17f1de56 | 876 | if (q->rx_head) { |
cd607f2c FF |
877 | mt76_add_fragment(dev, q, data, len, more, info, |
878 | allow_direct); | |
17f1de56 FF |
879 | continue; |
880 | } | |
881 | ||
fbe50d9a FF |
882 | if (!more && dev->drv->rx_check && |
883 | !(dev->drv->rx_check(dev, data, len))) | |
884 | goto free_frag; | |
885 | ||
f4d63a87 | 886 | skb = napi_build_skb(data, q->buf_size); |
fbe50d9a FF |
887 | if (!skb) |
888 | goto free_frag; | |
889 | ||
17f1de56 | 890 | skb_reserve(skb, q->buf_offset); |
2f5c3c77 | 891 | skb_mark_for_recycle(skb); |
17f1de56 | 892 | |
443dc85a | 893 | *(u32 *)skb->cb = info; |
17f1de56 FF |
894 | |
895 | __skb_put(skb, len); | |
896 | done++; | |
897 | ||
898 | if (more) { | |
899 | q->rx_head = skb; | |
900 | continue; | |
901 | } | |
902 | ||
c3137942 | 903 | dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); |
fbe50d9a FF |
904 | continue; |
905 | ||
906 | free_frag: | |
cd607f2c | 907 | mt76_put_page_pool_buf(data, allow_direct); |
17f1de56 FF |
908 | } |
909 | ||
2f5c3c77 | 910 | mt76_dma_rx_fill(dev, q, true); |
17f1de56 FF |
911 | return done; |
912 | } | |
913 | ||
cb8ed33d | 914 | int mt76_dma_rx_poll(struct napi_struct *napi, int budget) |
17f1de56 FF |
915 | { |
916 | struct mt76_dev *dev; | |
2b4307f5 | 917 | int qid, done = 0, cur; |
17f1de56 | 918 | |
08f116c9 | 919 | dev = mt76_priv(napi->dev); |
17f1de56 FF |
920 | qid = napi - dev->napi; |
921 | ||
9c68a57b FF |
922 | rcu_read_lock(); |
923 | ||
2b4307f5 FF |
924 | do { |
925 | cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); | |
81e850ef | 926 | mt76_rx_poll_complete(dev, qid, napi); |
2b4307f5 FF |
927 | done += cur; |
928 | } while (cur && done < budget); | |
929 | ||
9c68a57b FF |
930 | rcu_read_unlock(); |
931 | ||
3e0705ac | 932 | if (done < budget && napi_complete(napi)) |
17f1de56 | 933 | dev->drv->rx_poll_complete(dev, qid); |
17f1de56 FF |
934 | |
935 | return done; | |
936 | } | |
cb8ed33d | 937 | EXPORT_SYMBOL_GPL(mt76_dma_rx_poll); |
17f1de56 FF |
938 | |
939 | static int | |
cb8ed33d LB |
940 | mt76_dma_init(struct mt76_dev *dev, |
941 | int (*poll)(struct napi_struct *napi, int budget)) | |
17f1de56 | 942 | { |
08f116c9 | 943 | struct mt76_dev **priv; |
17f1de56 FF |
944 | int i; |
945 | ||
08f116c9 BL |
946 | dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *)); |
947 | if (!dev->napi_dev) | |
948 | return -ENOMEM; | |
949 | ||
950 | /* napi_dev private data points to mt76_dev parent, so, mt76_dev | |
951 | * can be retrieved given napi_dev | |
952 | */ | |
953 | priv = netdev_priv(dev->napi_dev); | |
954 | *priv = dev; | |
955 | ||
956 | dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *)); | |
957 | if (!dev->tx_napi_dev) { | |
958 | free_netdev(dev->napi_dev); | |
959 | return -ENOMEM; | |
960 | } | |
961 | priv = netdev_priv(dev->tx_napi_dev); | |
962 | *priv = dev; | |
963 | ||
964 | snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s", | |
aa40528a | 965 | wiphy_name(dev->hw->wiphy)); |
08f116c9 | 966 | dev->napi_dev->threaded = 1; |
36b7fce1 LB |
967 | init_completion(&dev->mmio.wed_reset); |
968 | init_completion(&dev->mmio.wed_reset_complete); | |
17f1de56 | 969 | |
f473b42a | 970 | mt76_for_each_q_rx(dev, i) { |
08f116c9 | 971 | netif_napi_add(dev->napi_dev, &dev->napi[i], poll); |
2f5c3c77 | 972 | mt76_dma_rx_fill(dev, &dev->q_rx[i], false); |
17f1de56 FF |
973 | napi_enable(&dev->napi[i]); |
974 | } | |
975 | ||
976 | return 0; | |
977 | } | |
978 | ||
979 | static const struct mt76_queue_ops mt76_dma_ops = { | |
980 | .init = mt76_dma_init, | |
981 | .alloc = mt76_dma_alloc_queue, | |
3990465d | 982 | .reset_q = mt76_dma_queue_reset, |
5ed31128 | 983 | .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, |
469d4818 | 984 | .tx_queue_skb = mt76_dma_tx_queue_skb, |
17f1de56 | 985 | .tx_cleanup = mt76_dma_tx_cleanup, |
c001df97 | 986 | .rx_cleanup = mt76_dma_rx_cleanup, |
17f1de56 FF |
987 | .rx_reset = mt76_dma_rx_reset, |
988 | .kick = mt76_dma_kick_queue, | |
989 | }; | |
990 | ||
bceac167 | 991 | void mt76_dma_attach(struct mt76_dev *dev) |
17f1de56 FF |
992 | { |
993 | dev->queue_ops = &mt76_dma_ops; | |
17f1de56 FF |
994 | } |
995 | EXPORT_SYMBOL_GPL(mt76_dma_attach); | |
996 | ||
997 | void mt76_dma_cleanup(struct mt76_dev *dev) | |
998 | { | |
999 | int i; | |
1000 | ||
781eef5b | 1001 | mt76_worker_disable(&dev->tx_worker); |
4875e346 | 1002 | netif_napi_del(&dev->tx_napi); |
e637763b | 1003 | |
dc44c45c LB |
1004 | for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { |
1005 | struct mt76_phy *phy = dev->phys[i]; | |
1006 | int j; | |
1007 | ||
1008 | if (!phy) | |
1009 | continue; | |
1010 | ||
1011 | for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++) | |
1012 | mt76_dma_tx_cleanup(dev, phy->q_tx[j], true); | |
91990519 | 1013 | } |
17f1de56 | 1014 | |
e637763b LB |
1015 | for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) |
1016 | mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); | |
1017 | ||
f473b42a | 1018 | mt76_for_each_q_rx(dev, i) { |
52546e27 LB |
1019 | struct mt76_queue *q = &dev->q_rx[i]; |
1020 | ||
950d0abb BJ |
1021 | if (mtk_wed_device_active(&dev->mmio.wed) && |
1022 | mt76_queue_is_wed_rro(q)) | |
1023 | continue; | |
1024 | ||
17f1de56 | 1025 | netif_napi_del(&dev->napi[i]); |
3f7dda36 | 1026 | mt76_dma_rx_cleanup(dev, q); |
2f5c3c77 LB |
1027 | |
1028 | page_pool_destroy(q->page_pool); | |
17f1de56 | 1029 | } |
dd57a95c | 1030 | |
f68d6762 FF |
1031 | if (mtk_wed_device_active(&dev->mmio.wed)) |
1032 | mtk_wed_device_detach(&dev->mmio.wed); | |
83eafc92 SC |
1033 | |
1034 | if (mtk_wed_device_active(&dev->mmio.wed_hif2)) | |
1035 | mtk_wed_device_detach(&dev->mmio.wed_hif2); | |
1036 | ||
1037 | mt76_free_pending_txwi(dev); | |
1038 | mt76_free_pending_rxwi(dev); | |
08f116c9 BL |
1039 | free_netdev(dev->napi_dev); |
1040 | free_netdev(dev->tx_napi_dev); | |
17f1de56 FF |
1041 | } |
1042 | EXPORT_SYMBOL_GPL(mt76_dma_cleanup); |