Commit | Line | Data |
---|---|---|
0e3d6777 | 1 | // SPDX-License-Identifier: ISC |
17f1de56 FF |
2 | /* |
3 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> | |
17f1de56 FF |
4 | */ |
5 | ||
6 | #include <linux/dma-mapping.h> | |
7 | #include "mt76.h" | |
8 | #include "dma.h" | |
9 | ||
f68d6762 FF |
10 | #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) |
11 | ||
12 | #define Q_READ(_dev, _q, _field) ({ \ | |
13 | u32 _offset = offsetof(struct mt76_queue_regs, _field); \ | |
14 | u32 _val; \ | |
15 | if ((_q)->flags & MT_QFLAG_WED) \ | |
16 | _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \ | |
17 | ((_q)->wed_regs + \ | |
18 | _offset)); \ | |
19 | else \ | |
20 | _val = readl(&(_q)->regs->_field); \ | |
21 | _val; \ | |
22 | }) | |
23 | ||
24 | #define Q_WRITE(_dev, _q, _field, _val) do { \ | |
25 | u32 _offset = offsetof(struct mt76_queue_regs, _field); \ | |
26 | if ((_q)->flags & MT_QFLAG_WED) \ | |
27 | mtk_wed_device_reg_write(&(_dev)->mmio.wed, \ | |
28 | ((_q)->wed_regs + _offset), \ | |
29 | _val); \ | |
30 | else \ | |
31 | writel(_val, &(_q)->regs->_field); \ | |
32 | } while (0) | |
33 | ||
34 | #else | |
35 | ||
36 | #define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field) | |
37 | #define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field) | |
cc9fd945 | 38 | |
f68d6762 | 39 | #endif |
cc9fd945 | 40 | |
dd57a95c FF |
41 | static struct mt76_txwi_cache * |
42 | mt76_alloc_txwi(struct mt76_dev *dev) | |
43 | { | |
44 | struct mt76_txwi_cache *t; | |
45 | dma_addr_t addr; | |
46 | u8 *txwi; | |
47 | int size; | |
48 | ||
49 | size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); | |
402e0109 | 50 | txwi = kzalloc(size, GFP_ATOMIC); |
dd57a95c FF |
51 | if (!txwi) |
52 | return NULL; | |
53 | ||
d1ddc536 | 54 | addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, |
dd57a95c FF |
55 | DMA_TO_DEVICE); |
56 | t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); | |
57 | t->dma_addr = addr; | |
58 | ||
59 | return t; | |
60 | } | |
61 | ||
2666bece SC |
62 | static struct mt76_txwi_cache * |
63 | mt76_alloc_rxwi(struct mt76_dev *dev) | |
64 | { | |
65 | struct mt76_txwi_cache *t; | |
66 | ||
67 | t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); | |
68 | if (!t) | |
69 | return NULL; | |
70 | ||
71 | t->ptr = NULL; | |
72 | return t; | |
73 | } | |
74 | ||
dd57a95c FF |
75 | static struct mt76_txwi_cache * |
76 | __mt76_get_txwi(struct mt76_dev *dev) | |
77 | { | |
78 | struct mt76_txwi_cache *t = NULL; | |
79 | ||
80 | spin_lock(&dev->lock); | |
81 | if (!list_empty(&dev->txwi_cache)) { | |
82 | t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, | |
83 | list); | |
84 | list_del(&t->list); | |
85 | } | |
86 | spin_unlock(&dev->lock); | |
87 | ||
88 | return t; | |
89 | } | |
90 | ||
2666bece SC |
91 | static struct mt76_txwi_cache * |
92 | __mt76_get_rxwi(struct mt76_dev *dev) | |
93 | { | |
94 | struct mt76_txwi_cache *t = NULL; | |
95 | ||
19527314 | 96 | spin_lock_bh(&dev->wed_lock); |
2666bece SC |
97 | if (!list_empty(&dev->rxwi_cache)) { |
98 | t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, | |
99 | list); | |
100 | list_del(&t->list); | |
101 | } | |
19527314 | 102 | spin_unlock_bh(&dev->wed_lock); |
2666bece SC |
103 | |
104 | return t; | |
105 | } | |
106 | ||
dd57a95c FF |
107 | static struct mt76_txwi_cache * |
108 | mt76_get_txwi(struct mt76_dev *dev) | |
109 | { | |
110 | struct mt76_txwi_cache *t = __mt76_get_txwi(dev); | |
111 | ||
112 | if (t) | |
113 | return t; | |
114 | ||
115 | return mt76_alloc_txwi(dev); | |
116 | } | |
117 | ||
2666bece SC |
118 | struct mt76_txwi_cache * |
119 | mt76_get_rxwi(struct mt76_dev *dev) | |
120 | { | |
121 | struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); | |
122 | ||
123 | if (t) | |
124 | return t; | |
125 | ||
126 | return mt76_alloc_rxwi(dev); | |
127 | } | |
128 | EXPORT_SYMBOL_GPL(mt76_get_rxwi); | |
129 | ||
dd57a95c FF |
130 | void |
131 | mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) | |
132 | { | |
133 | if (!t) | |
134 | return; | |
135 | ||
136 | spin_lock(&dev->lock); | |
137 | list_add(&t->list, &dev->txwi_cache); | |
138 | spin_unlock(&dev->lock); | |
139 | } | |
140 | EXPORT_SYMBOL_GPL(mt76_put_txwi); | |
141 | ||
2666bece SC |
142 | void |
143 | mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) | |
144 | { | |
145 | if (!t) | |
146 | return; | |
147 | ||
19527314 | 148 | spin_lock_bh(&dev->wed_lock); |
2666bece | 149 | list_add(&t->list, &dev->rxwi_cache); |
19527314 | 150 | spin_unlock_bh(&dev->wed_lock); |
2666bece SC |
151 | } |
152 | EXPORT_SYMBOL_GPL(mt76_put_rxwi); | |
153 | ||
dd57a95c FF |
154 | static void |
155 | mt76_free_pending_txwi(struct mt76_dev *dev) | |
156 | { | |
157 | struct mt76_txwi_cache *t; | |
158 | ||
5f0ce584 | 159 | local_bh_disable(); |
402e0109 | 160 | while ((t = __mt76_get_txwi(dev)) != NULL) { |
d1ddc536 | 161 | dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, |
dd57a95c | 162 | DMA_TO_DEVICE); |
402e0109 FF |
163 | kfree(mt76_get_txwi_ptr(dev, t)); |
164 | } | |
5f0ce584 | 165 | local_bh_enable(); |
dd57a95c FF |
166 | } |
167 | ||
a97a467a | 168 | void |
2666bece SC |
169 | mt76_free_pending_rxwi(struct mt76_dev *dev) |
170 | { | |
171 | struct mt76_txwi_cache *t; | |
172 | ||
173 | local_bh_disable(); | |
174 | while ((t = __mt76_get_rxwi(dev)) != NULL) { | |
175 | if (t->ptr) | |
2f5c3c77 | 176 | mt76_put_page_pool_buf(t->ptr, false); |
2666bece SC |
177 | kfree(t); |
178 | } | |
179 | local_bh_enable(); | |
180 | } | |
a97a467a | 181 | EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi); |
2666bece | 182 | |
3990465d LB |
183 | static void |
184 | mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) | |
185 | { | |
cc9fd945 FF |
186 | Q_WRITE(dev, q, desc_base, q->desc_dma); |
187 | Q_WRITE(dev, q, ring_size, q->ndesc); | |
188 | q->head = Q_READ(dev, q, dma_idx); | |
3990465d LB |
189 | q->tail = q->head; |
190 | } | |
191 | ||
192 | static void | |
193 | mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) | |
194 | { | |
195 | int i; | |
196 | ||
f9b627f1 | 197 | if (!q || !q->ndesc) |
3990465d LB |
198 | return; |
199 | ||
200 | /* clear descriptors */ | |
201 | for (i = 0; i < q->ndesc; i++) | |
202 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); | |
203 | ||
cc9fd945 FF |
204 | Q_WRITE(dev, q, cpu_idx, 0); |
205 | Q_WRITE(dev, q, dma_idx, 0); | |
3990465d LB |
206 | mt76_dma_sync_idx(dev, q); |
207 | } | |
208 | ||
953519b3 FF |
209 | static int |
210 | mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, | |
211 | struct mt76_queue_buf *buf, void *data) | |
212 | { | |
213 | struct mt76_desc *desc = &q->desc[q->head]; | |
214 | struct mt76_queue_entry *entry = &q->entry[q->head]; | |
215 | struct mt76_txwi_cache *txwi = NULL; | |
216 | u32 buf1 = 0, ctrl; | |
217 | int idx = q->head; | |
218 | int rx_token; | |
219 | ||
220 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); | |
221 | ||
58bcd4ed | 222 | if (mt76_queue_is_wed_rx(q)) { |
953519b3 FF |
223 | txwi = mt76_get_rxwi(dev); |
224 | if (!txwi) | |
225 | return -ENOMEM; | |
226 | ||
227 | rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr); | |
228 | if (rx_token < 0) { | |
229 | mt76_put_rxwi(dev, txwi); | |
230 | return -ENOMEM; | |
231 | } | |
232 | ||
233 | buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); | |
234 | ctrl |= MT_DMA_CTL_TO_HOST; | |
235 | } | |
236 | ||
237 | WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr)); | |
238 | WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); | |
239 | WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); | |
240 | WRITE_ONCE(desc->info, 0); | |
241 | ||
242 | entry->dma_addr[0] = buf->addr; | |
243 | entry->dma_len[0] = buf->len; | |
244 | entry->txwi = txwi; | |
245 | entry->buf = data; | |
246 | entry->wcid = 0xffff; | |
247 | entry->skip_buf1 = true; | |
248 | q->head = (q->head + 1) % q->ndesc; | |
249 | q->queued++; | |
250 | ||
251 | return idx; | |
252 | } | |
253 | ||
17f1de56 FF |
254 | static int |
255 | mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, | |
256 | struct mt76_queue_buf *buf, int nbufs, u32 info, | |
257 | struct sk_buff *skb, void *txwi) | |
258 | { | |
75d4bf1f | 259 | struct mt76_queue_entry *entry; |
17f1de56 | 260 | struct mt76_desc *desc; |
17f1de56 | 261 | int i, idx = -1; |
fe13dad8 | 262 | u32 ctrl, next; |
17f1de56 | 263 | |
953519b3 FF |
264 | if (txwi) { |
265 | q->entry[q->head].txwi = DMA_DUMMY_DATA; | |
266 | q->entry[q->head].skip_buf0 = true; | |
267 | } | |
268 | ||
17f1de56 FF |
269 | for (i = 0; i < nbufs; i += 2, buf += 2) { |
270 | u32 buf0 = buf[0].addr, buf1 = 0; | |
271 | ||
75d4bf1f | 272 | idx = q->head; |
fe13dad8 | 273 | next = (q->head + 1) % q->ndesc; |
75d4bf1f FF |
274 | |
275 | desc = &q->desc[idx]; | |
276 | entry = &q->entry[idx]; | |
277 | ||
953519b3 FF |
278 | if (buf[0].skip_unmap) |
279 | entry->skip_buf0 = true; | |
280 | entry->skip_buf1 = i == nbufs - 1; | |
281 | ||
282 | entry->dma_addr[0] = buf[0].addr; | |
283 | entry->dma_len[0] = buf[0].len; | |
284 | ||
285 | ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); | |
286 | if (i < nbufs - 1) { | |
287 | entry->dma_addr[1] = buf[1].addr; | |
288 | entry->dma_len[1] = buf[1].len; | |
289 | buf1 = buf[1].addr; | |
290 | ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); | |
291 | if (buf[1].skip_unmap) | |
292 | entry->skip_buf1 = true; | |
17f1de56 FF |
293 | } |
294 | ||
953519b3 FF |
295 | if (i == nbufs - 1) |
296 | ctrl |= MT_DMA_CTL_LAST_SEC0; | |
297 | else if (i == nbufs - 2) | |
298 | ctrl |= MT_DMA_CTL_LAST_SEC1; | |
299 | ||
17f1de56 FF |
300 | WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); |
301 | WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); | |
302 | WRITE_ONCE(desc->info, cpu_to_le32(info)); | |
303 | WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); | |
304 | ||
fe13dad8 | 305 | q->head = next; |
17f1de56 FF |
306 | q->queued++; |
307 | } | |
308 | ||
309 | q->entry[idx].txwi = txwi; | |
310 | q->entry[idx].skb = skb; | |
6d51cae2 | 311 | q->entry[idx].wcid = 0xffff; |
17f1de56 FF |
312 | |
313 | return idx; | |
314 | } | |
315 | ||
316 | static void | |
317 | mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, | |
318 | struct mt76_queue_entry *prev_e) | |
319 | { | |
320 | struct mt76_queue_entry *e = &q->entry[idx]; | |
17f1de56 | 321 | |
75d4bf1f | 322 | if (!e->skip_buf0) |
d1ddc536 | 323 | dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], |
17f1de56 | 324 | DMA_TO_DEVICE); |
17f1de56 | 325 | |
75d4bf1f | 326 | if (!e->skip_buf1) |
d1ddc536 | 327 | dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], |
17f1de56 | 328 | DMA_TO_DEVICE); |
17f1de56 | 329 | |
598da386 | 330 | if (e->txwi == DMA_DUMMY_DATA) |
17f1de56 FF |
331 | e->txwi = NULL; |
332 | ||
598da386 LB |
333 | if (e->skb == DMA_DUMMY_DATA) |
334 | e->skb = NULL; | |
335 | ||
17f1de56 FF |
336 | *prev_e = *e; |
337 | memset(e, 0, sizeof(*e)); | |
338 | } | |
339 | ||
8f6c4f7b FF |
340 | static void |
341 | mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) | |
342 | { | |
2d681047 | 343 | wmb(); |
cc9fd945 | 344 | Q_WRITE(dev, q, cpu_idx, q->head); |
17f1de56 FF |
345 | } |
346 | ||
347 | static void | |
e5655492 | 348 | mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) |
17f1de56 | 349 | { |
17f1de56 | 350 | struct mt76_queue_entry entry; |
0b51f186 | 351 | int last; |
17f1de56 | 352 | |
f9b627f1 | 353 | if (!q || !q->ndesc) |
17f1de56 FF |
354 | return; |
355 | ||
9716ef04 | 356 | spin_lock_bh(&q->cleanup_lock); |
17f1de56 FF |
357 | if (flush) |
358 | last = -1; | |
359 | else | |
cc9fd945 | 360 | last = Q_READ(dev, q, dma_idx); |
17f1de56 | 361 | |
0b51f186 | 362 | while (q->queued > 0 && q->tail != last) { |
17f1de56 | 363 | mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); |
fe5b5ab5 | 364 | mt76_queue_tx_complete(dev, q, &entry); |
17f1de56 FF |
365 | |
366 | if (entry.txwi) { | |
9ec0b821 | 367 | if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) |
6ca66722 | 368 | mt76_put_txwi(dev, entry.txwi); |
17f1de56 FF |
369 | } |
370 | ||
17f1de56 | 371 | if (!flush && q->tail == last) |
cc9fd945 | 372 | last = Q_READ(dev, q, dma_idx); |
5a95ca41 | 373 | } |
9716ef04 | 374 | spin_unlock_bh(&q->cleanup_lock); |
5a95ca41 | 375 | |
8f6c4f7b | 376 | if (flush) { |
0b51f186 | 377 | spin_lock_bh(&q->lock); |
17f1de56 | 378 | mt76_dma_sync_idx(dev, q); |
8f6c4f7b | 379 | mt76_dma_kick_queue(dev, q); |
0b51f186 | 380 | spin_unlock_bh(&q->lock); |
8f6c4f7b | 381 | } |
17f1de56 | 382 | |
26e40d4c FF |
383 | if (!q->queued) |
384 | wake_up(&dev->tx_wait); | |
17f1de56 FF |
385 | } |
386 | ||
387 | static void * | |
388 | mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, | |
cd372b8c | 389 | int *len, u32 *info, bool *more, bool *drop) |
17f1de56 FF |
390 | { |
391 | struct mt76_queue_entry *e = &q->entry[idx]; | |
392 | struct mt76_desc *desc = &q->desc[idx]; | |
cd372b8c | 393 | void *buf; |
17f1de56 | 394 | |
17f1de56 | 395 | if (len) { |
cd372b8c LB |
396 | u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); |
397 | *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); | |
398 | *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); | |
17f1de56 FF |
399 | } |
400 | ||
401 | if (info) | |
402 | *info = le32_to_cpu(desc->info); | |
403 | ||
58bcd4ed | 404 | if (mt76_queue_is_wed_rx(q)) { |
e4d2b8bc PC |
405 | u32 buf1 = le32_to_cpu(desc->buf1); |
406 | u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); | |
cd372b8c LB |
407 | struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); |
408 | ||
409 | if (!t) | |
410 | return NULL; | |
411 | ||
2f5c3c77 LB |
412 | dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, |
413 | SKB_WITH_OVERHEAD(q->buf_size), | |
414 | page_pool_get_dma_dir(q->page_pool)); | |
cd372b8c LB |
415 | |
416 | buf = t->ptr; | |
417 | t->dma_addr = 0; | |
418 | t->ptr = NULL; | |
419 | ||
420 | mt76_put_rxwi(dev, t); | |
421 | ||
422 | if (drop) { | |
423 | u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); | |
424 | ||
425 | *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | | |
426 | MT_DMA_CTL_DROP)); | |
e4d2b8bc PC |
427 | |
428 | *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP); | |
cd372b8c LB |
429 | } |
430 | } else { | |
431 | buf = e->buf; | |
432 | e->buf = NULL; | |
2f5c3c77 LB |
433 | dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0], |
434 | SKB_WITH_OVERHEAD(q->buf_size), | |
435 | page_pool_get_dma_dir(q->page_pool)); | |
cd372b8c | 436 | } |
17f1de56 FF |
437 | |
438 | return buf; | |
439 | } | |
440 | ||
441 | static void * | |
442 | mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, | |
cd372b8c | 443 | int *len, u32 *info, bool *more, bool *drop) |
17f1de56 FF |
444 | { |
445 | int idx = q->tail; | |
446 | ||
447 | *more = false; | |
448 | if (!q->queued) | |
449 | return NULL; | |
450 | ||
5ffc6b5a FF |
451 | if (flush) |
452 | q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); | |
453 | else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) | |
17f1de56 FF |
454 | return NULL; |
455 | ||
456 | q->tail = (q->tail + 1) % q->ndesc; | |
457 | q->queued--; | |
458 | ||
cd372b8c | 459 | return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); |
17f1de56 FF |
460 | } |
461 | ||
5ed31128 | 462 | static int |
d95093a1 | 463 | mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, |
5ed31128 LB |
464 | struct sk_buff *skb, u32 tx_info) |
465 | { | |
b4403cee | 466 | struct mt76_queue_buf buf = {}; |
5ed31128 LB |
467 | dma_addr_t addr; |
468 | ||
1e64fdd4 BJ |
469 | if (test_bit(MT76_MCU_RESET, &dev->phy.state)) |
470 | goto error; | |
471 | ||
93eaec76 FF |
472 | if (q->queued + 1 >= q->ndesc - 1) |
473 | goto error; | |
474 | ||
d1ddc536 | 475 | addr = dma_map_single(dev->dma_dev, skb->data, skb->len, |
5ed31128 | 476 | DMA_TO_DEVICE); |
d1ddc536 | 477 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) |
93eaec76 | 478 | goto error; |
5ed31128 LB |
479 | |
480 | buf.addr = addr; | |
481 | buf.len = skb->len; | |
482 | ||
483 | spin_lock_bh(&q->lock); | |
484 | mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); | |
485 | mt76_dma_kick_queue(dev, q); | |
486 | spin_unlock_bh(&q->lock); | |
487 | ||
488 | return 0; | |
93eaec76 FF |
489 | |
490 | error: | |
491 | dev_kfree_skb(skb); | |
492 | return -ENOMEM; | |
5ed31128 LB |
493 | } |
494 | ||
eb9ca7ec | 495 | static int |
89870594 | 496 | mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, |
d08295f5 FF |
497 | enum mt76_txq_id qid, struct sk_buff *skb, |
498 | struct mt76_wcid *wcid, struct ieee80211_sta *sta) | |
fcdd99ce | 499 | { |
94e4f579 FF |
500 | struct ieee80211_tx_status status = { |
501 | .sta = sta, | |
502 | }; | |
cfaae9e6 LB |
503 | struct mt76_tx_info tx_info = { |
504 | .skb = skb, | |
505 | }; | |
e394b575 | 506 | struct ieee80211_hw *hw; |
b5903c47 | 507 | int len, n = 0, ret = -ENOMEM; |
fcdd99ce | 508 | struct mt76_txwi_cache *t; |
fcdd99ce LB |
509 | struct sk_buff *iter; |
510 | dma_addr_t addr; | |
f3950a41 | 511 | u8 *txwi; |
fcdd99ce | 512 | |
1e64fdd4 BJ |
513 | if (test_bit(MT76_RESET, &dev->phy.state)) |
514 | goto free_skb; | |
515 | ||
fcdd99ce | 516 | t = mt76_get_txwi(dev); |
94e4f579 FF |
517 | if (!t) |
518 | goto free_skb; | |
519 | ||
f3950a41 | 520 | txwi = mt76_get_txwi_ptr(dev, t); |
fcdd99ce | 521 | |
88046b2c | 522 | skb->prev = skb->next = NULL; |
9ec0b821 | 523 | if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) |
66105538 LB |
524 | mt76_insert_hdr_pad(skb); |
525 | ||
eb071ba7 | 526 | len = skb_headlen(skb); |
d1ddc536 FF |
527 | addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); |
528 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) | |
fcdd99ce | 529 | goto free; |
fcdd99ce | 530 | |
b5903c47 LB |
531 | tx_info.buf[n].addr = t->dma_addr; |
532 | tx_info.buf[n++].len = dev->drv->txwi_size; | |
533 | tx_info.buf[n].addr = addr; | |
534 | tx_info.buf[n++].len = len; | |
fcdd99ce LB |
535 | |
536 | skb_walk_frags(skb, iter) { | |
b5903c47 | 537 | if (n == ARRAY_SIZE(tx_info.buf)) |
fcdd99ce LB |
538 | goto unmap; |
539 | ||
d1ddc536 | 540 | addr = dma_map_single(dev->dma_dev, iter->data, iter->len, |
fcdd99ce | 541 | DMA_TO_DEVICE); |
d1ddc536 | 542 | if (unlikely(dma_mapping_error(dev->dma_dev, addr))) |
fcdd99ce LB |
543 | goto unmap; |
544 | ||
b5903c47 LB |
545 | tx_info.buf[n].addr = addr; |
546 | tx_info.buf[n++].len = iter->len; | |
fcdd99ce | 547 | } |
b5903c47 | 548 | tx_info.nbuf = n; |
fcdd99ce | 549 | |
ae064fc0 FF |
550 | if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { |
551 | ret = -ENOMEM; | |
552 | goto unmap; | |
553 | } | |
554 | ||
d1ddc536 | 555 | dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, |
eb071ba7 | 556 | DMA_TO_DEVICE); |
d08295f5 | 557 | ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); |
d1ddc536 | 558 | dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, |
eb071ba7 LB |
559 | DMA_TO_DEVICE); |
560 | if (ret < 0) | |
fcdd99ce LB |
561 | goto unmap; |
562 | ||
b5903c47 | 563 | return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, |
cfaae9e6 | 564 | tx_info.info, tx_info.skb, t); |
fcdd99ce LB |
565 | |
566 | unmap: | |
fcdd99ce | 567 | for (n--; n > 0; n--) |
d1ddc536 | 568 | dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, |
b5903c47 | 569 | tx_info.buf[n].len, DMA_TO_DEVICE); |
fcdd99ce LB |
570 | |
571 | free: | |
f0efa862 FF |
572 | #ifdef CONFIG_NL80211_TESTMODE |
573 | /* fix tx_done accounting on queue overflow */ | |
c918c74d SC |
574 | if (mt76_is_testmode_skb(dev, skb, &hw)) { |
575 | struct mt76_phy *phy = hw->priv; | |
576 | ||
577 | if (tx_info.skb == phy->test.tx_skb) | |
578 | phy->test.tx_done--; | |
579 | } | |
f0efa862 FF |
580 | #endif |
581 | ||
fcdd99ce | 582 | mt76_put_txwi(dev, t); |
94e4f579 FF |
583 | |
584 | free_skb: | |
585 | status.skb = tx_info.skb; | |
586 | hw = mt76_tx_status_get_hw(dev, tx_info.skb); | |
5b8ccdfb | 587 | spin_lock_bh(&dev->rx_lock); |
94e4f579 | 588 | ieee80211_tx_status_ext(hw, &status); |
5b8ccdfb | 589 | spin_unlock_bh(&dev->rx_lock); |
94e4f579 | 590 | |
fcdd99ce LB |
591 | return ret; |
592 | } | |
fcdd99ce | 593 | |
17f1de56 | 594 | static int |
2f5c3c77 LB |
595 | mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, |
596 | bool allow_direct) | |
17f1de56 | 597 | { |
17f1de56 | 598 | int len = SKB_WITH_OVERHEAD(q->buf_size); |
2f5c3c77 | 599 | int frames = 0; |
17f1de56 | 600 | |
f9b627f1 BJ |
601 | if (!q->ndesc) |
602 | return 0; | |
603 | ||
17f1de56 FF |
604 | spin_lock_bh(&q->lock); |
605 | ||
606 | while (q->queued < q->ndesc - 1) { | |
2f5c3c77 | 607 | enum dma_data_direction dir; |
17f1de56 | 608 | struct mt76_queue_buf qbuf; |
2f5c3c77 LB |
609 | dma_addr_t addr; |
610 | int offset; | |
611 | void *buf; | |
17f1de56 | 612 | |
2f5c3c77 | 613 | buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); |
17f1de56 FF |
614 | if (!buf) |
615 | break; | |
616 | ||
2f5c3c77 LB |
617 | addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset; |
618 | dir = page_pool_get_dma_dir(q->page_pool); | |
619 | dma_sync_single_for_device(dev->dma_dev, addr, len, dir); | |
17f1de56 | 620 | |
2f5c3c77 LB |
621 | qbuf.addr = addr + q->buf_offset; |
622 | qbuf.len = len - q->buf_offset; | |
577298ec | 623 | qbuf.skip_unmap = false; |
953519b3 | 624 | if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { |
2f5c3c77 | 625 | mt76_put_page_pool_buf(buf, allow_direct); |
96f134dc LB |
626 | break; |
627 | } | |
17f1de56 FF |
628 | frames++; |
629 | } | |
630 | ||
631 | if (frames) | |
632 | mt76_dma_kick_queue(dev, q); | |
633 | ||
634 | spin_unlock_bh(&q->lock); | |
635 | ||
636 | return frames; | |
637 | } | |
638 | ||
1d5f5d55 | 639 | int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) |
f68d6762 FF |
640 | { |
641 | #ifdef CONFIG_NET_MEDIATEK_SOC_WED | |
642 | struct mtk_wed_device *wed = &dev->mmio.wed; | |
643 | int ret, type, ring; | |
1d5f5d55 SC |
644 | u8 flags; |
645 | ||
646 | if (!q || !q->ndesc) | |
647 | return -EINVAL; | |
f68d6762 | 648 | |
1d5f5d55 | 649 | flags = q->flags; |
f68d6762 FF |
650 | if (!mtk_wed_device_active(wed)) |
651 | q->flags &= ~MT_QFLAG_WED; | |
652 | ||
653 | if (!(q->flags & MT_QFLAG_WED)) | |
654 | return 0; | |
655 | ||
656 | type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); | |
657 | ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); | |
658 | ||
659 | switch (type) { | |
660 | case MT76_WED_Q_TX: | |
1d5f5d55 | 661 | ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset); |
f68d6762 FF |
662 | if (!ret) |
663 | q->wed_regs = wed->tx_ring[ring].reg_base; | |
664 | break; | |
665 | case MT76_WED_Q_TXFREE: | |
666 | /* WED txfree queue needs ring to be initialized before setup */ | |
667 | q->flags = 0; | |
668 | mt76_dma_queue_reset(dev, q); | |
2f5c3c77 | 669 | mt76_dma_rx_fill(dev, q, false); |
f68d6762 FF |
670 | q->flags = flags; |
671 | ||
672 | ret = mtk_wed_device_txfree_ring_setup(wed, q->regs); | |
673 | if (!ret) | |
674 | q->wed_regs = wed->txfree_ring.reg_base; | |
675 | break; | |
52546e27 | 676 | case MT76_WED_Q_RX: |
1d5f5d55 | 677 | ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset); |
52546e27 LB |
678 | if (!ret) |
679 | q->wed_regs = wed->rx_ring[ring].reg_base; | |
680 | break; | |
f68d6762 FF |
681 | default: |
682 | ret = -EINVAL; | |
683 | } | |
684 | ||
685 | return ret; | |
686 | #else | |
687 | return 0; | |
688 | #endif | |
689 | } | |
1d5f5d55 | 690 | EXPORT_SYMBOL_GPL(mt76_dma_wed_setup); |
f68d6762 FF |
691 | |
692 | static int | |
693 | mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, | |
694 | int idx, int n_desc, int bufsize, | |
695 | u32 ring_base) | |
696 | { | |
697 | int ret, size; | |
698 | ||
699 | spin_lock_init(&q->lock); | |
700 | spin_lock_init(&q->cleanup_lock); | |
701 | ||
702 | q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; | |
703 | q->ndesc = n_desc; | |
704 | q->buf_size = bufsize; | |
705 | q->hw_idx = idx; | |
706 | ||
707 | size = q->ndesc * sizeof(struct mt76_desc); | |
708 | q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); | |
709 | if (!q->desc) | |
710 | return -ENOMEM; | |
711 | ||
712 | size = q->ndesc * sizeof(*q->entry); | |
713 | q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); | |
714 | if (!q->entry) | |
715 | return -ENOMEM; | |
716 | ||
2f5c3c77 LB |
717 | ret = mt76_create_page_pool(dev, q); |
718 | if (ret) | |
719 | return ret; | |
720 | ||
1d5f5d55 | 721 | ret = mt76_dma_wed_setup(dev, q, false); |
f68d6762 FF |
722 | if (ret) |
723 | return ret; | |
724 | ||
725 | if (q->flags != MT_WED_Q_TXFREE) | |
726 | mt76_dma_queue_reset(dev, q); | |
727 | ||
728 | return 0; | |
729 | } | |
730 | ||
17f1de56 FF |
731 | static void |
732 | mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) | |
733 | { | |
734 | void *buf; | |
735 | bool more; | |
736 | ||
f9b627f1 BJ |
737 | if (!q->ndesc) |
738 | return; | |
739 | ||
17f1de56 | 740 | spin_lock_bh(&q->lock); |
1b88b47e | 741 | |
17f1de56 | 742 | do { |
cd372b8c | 743 | buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); |
17f1de56 FF |
744 | if (!buf) |
745 | break; | |
746 | ||
2f5c3c77 | 747 | mt76_put_page_pool_buf(buf, false); |
17f1de56 | 748 | } while (1); |
1b88b47e LB |
749 | |
750 | if (q->rx_head) { | |
751 | dev_kfree_skb(q->rx_head); | |
752 | q->rx_head = NULL; | |
753 | } | |
754 | ||
17f1de56 FF |
755 | spin_unlock_bh(&q->lock); |
756 | } | |
757 | ||
758 | static void | |
759 | mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) | |
760 | { | |
761 | struct mt76_queue *q = &dev->q_rx[qid]; | |
762 | int i; | |
763 | ||
f9b627f1 BJ |
764 | if (!q->ndesc) |
765 | return; | |
766 | ||
17f1de56 | 767 | for (i = 0; i < q->ndesc; i++) |
2703bafc | 768 | q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); |
17f1de56 FF |
769 | |
770 | mt76_dma_rx_cleanup(dev, q); | |
3bc4b811 SC |
771 | |
772 | /* reset WED rx queues */ | |
773 | mt76_dma_wed_setup(dev, q, true); | |
774 | if (q->flags != MT_WED_Q_TXFREE) { | |
775 | mt76_dma_sync_idx(dev, q); | |
776 | mt76_dma_rx_fill(dev, q, false); | |
777 | } | |
17f1de56 FF |
778 | } |
779 | ||
780 | static void | |
781 | mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, | |
c3137942 | 782 | int len, bool more, u32 info) |
17f1de56 | 783 | { |
17f1de56 | 784 | struct sk_buff *skb = q->rx_head; |
b102f0c5 | 785 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
d0bd52c5 | 786 | int nr_frags = shinfo->nr_frags; |
17f1de56 | 787 | |
d0bd52c5 | 788 | if (nr_frags < ARRAY_SIZE(shinfo->frags)) { |
93a1d479 LB |
789 | struct page *page = virt_to_head_page(data); |
790 | int offset = data - page_address(page) + q->buf_offset; | |
791 | ||
d0bd52c5 | 792 | skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); |
93a1d479 | 793 | } else { |
2f5c3c77 | 794 | mt76_put_page_pool_buf(data, true); |
b102f0c5 | 795 | } |
17f1de56 FF |
796 | |
797 | if (more) | |
798 | return; | |
799 | ||
800 | q->rx_head = NULL; | |
d0bd52c5 | 801 | if (nr_frags < ARRAY_SIZE(shinfo->frags)) |
c3137942 | 802 | dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); |
d0bd52c5 LB |
803 | else |
804 | dev_kfree_skb(skb); | |
17f1de56 FF |
805 | } |
806 | ||
807 | static int | |
808 | mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) | |
809 | { | |
f68d6762 | 810 | int len, data_len, done = 0, dma_idx; |
17f1de56 FF |
811 | struct sk_buff *skb; |
812 | unsigned char *data; | |
f68d6762 | 813 | bool check_ddone = false; |
17f1de56 FF |
814 | bool more; |
815 | ||
f68d6762 FF |
816 | if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && |
817 | q->flags == MT_WED_Q_TXFREE) { | |
818 | dma_idx = Q_READ(dev, q, dma_idx); | |
819 | check_ddone = true; | |
820 | } | |
821 | ||
17f1de56 | 822 | while (done < budget) { |
cd372b8c | 823 | bool drop = false; |
17f1de56 FF |
824 | u32 info; |
825 | ||
f68d6762 FF |
826 | if (check_ddone) { |
827 | if (q->tail == dma_idx) | |
828 | dma_idx = Q_READ(dev, q, dma_idx); | |
829 | ||
830 | if (q->tail == dma_idx) | |
831 | break; | |
832 | } | |
833 | ||
cd372b8c LB |
834 | data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, |
835 | &drop); | |
17f1de56 FF |
836 | if (!data) |
837 | break; | |
838 | ||
cd372b8c LB |
839 | if (drop) |
840 | goto free_frag; | |
841 | ||
87e86f90 LB |
842 | if (q->rx_head) |
843 | data_len = q->buf_size; | |
844 | else | |
845 | data_len = SKB_WITH_OVERHEAD(q->buf_size); | |
846 | ||
847 | if (data_len < len + q->buf_offset) { | |
9fe31054 FF |
848 | dev_kfree_skb(q->rx_head); |
849 | q->rx_head = NULL; | |
fbe50d9a | 850 | goto free_frag; |
9fe31054 FF |
851 | } |
852 | ||
17f1de56 | 853 | if (q->rx_head) { |
c3137942 | 854 | mt76_add_fragment(dev, q, data, len, more, info); |
17f1de56 FF |
855 | continue; |
856 | } | |
857 | ||
fbe50d9a FF |
858 | if (!more && dev->drv->rx_check && |
859 | !(dev->drv->rx_check(dev, data, len))) | |
860 | goto free_frag; | |
861 | ||
f4d63a87 | 862 | skb = napi_build_skb(data, q->buf_size); |
fbe50d9a FF |
863 | if (!skb) |
864 | goto free_frag; | |
865 | ||
17f1de56 | 866 | skb_reserve(skb, q->buf_offset); |
2f5c3c77 | 867 | skb_mark_for_recycle(skb); |
17f1de56 | 868 | |
443dc85a | 869 | *(u32 *)skb->cb = info; |
17f1de56 FF |
870 | |
871 | __skb_put(skb, len); | |
872 | done++; | |
873 | ||
874 | if (more) { | |
875 | q->rx_head = skb; | |
876 | continue; | |
877 | } | |
878 | ||
c3137942 | 879 | dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); |
fbe50d9a FF |
880 | continue; |
881 | ||
882 | free_frag: | |
2f5c3c77 | 883 | mt76_put_page_pool_buf(data, true); |
17f1de56 FF |
884 | } |
885 | ||
2f5c3c77 | 886 | mt76_dma_rx_fill(dev, q, true); |
17f1de56 FF |
887 | return done; |
888 | } | |
889 | ||
cb8ed33d | 890 | int mt76_dma_rx_poll(struct napi_struct *napi, int budget) |
17f1de56 FF |
891 | { |
892 | struct mt76_dev *dev; | |
2b4307f5 | 893 | int qid, done = 0, cur; |
17f1de56 FF |
894 | |
895 | dev = container_of(napi->dev, struct mt76_dev, napi_dev); | |
896 | qid = napi - dev->napi; | |
897 | ||
9c68a57b FF |
898 | rcu_read_lock(); |
899 | ||
2b4307f5 FF |
900 | do { |
901 | cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); | |
81e850ef | 902 | mt76_rx_poll_complete(dev, qid, napi); |
2b4307f5 FF |
903 | done += cur; |
904 | } while (cur && done < budget); | |
905 | ||
9c68a57b FF |
906 | rcu_read_unlock(); |
907 | ||
3e0705ac | 908 | if (done < budget && napi_complete(napi)) |
17f1de56 | 909 | dev->drv->rx_poll_complete(dev, qid); |
17f1de56 FF |
910 | |
911 | return done; | |
912 | } | |
cb8ed33d | 913 | EXPORT_SYMBOL_GPL(mt76_dma_rx_poll); |
17f1de56 FF |
914 | |
915 | static int | |
cb8ed33d LB |
916 | mt76_dma_init(struct mt76_dev *dev, |
917 | int (*poll)(struct napi_struct *napi, int budget)) | |
17f1de56 FF |
918 | { |
919 | int i; | |
920 | ||
921 | init_dummy_netdev(&dev->napi_dev); | |
aa40528a FF |
922 | init_dummy_netdev(&dev->tx_napi_dev); |
923 | snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s", | |
924 | wiphy_name(dev->hw->wiphy)); | |
925 | dev->napi_dev.threaded = 1; | |
36b7fce1 LB |
926 | init_completion(&dev->mmio.wed_reset); |
927 | init_completion(&dev->mmio.wed_reset_complete); | |
17f1de56 | 928 | |
f473b42a | 929 | mt76_for_each_q_rx(dev, i) { |
b48b89f9 | 930 | netif_napi_add(&dev->napi_dev, &dev->napi[i], poll); |
2f5c3c77 | 931 | mt76_dma_rx_fill(dev, &dev->q_rx[i], false); |
17f1de56 FF |
932 | napi_enable(&dev->napi[i]); |
933 | } | |
934 | ||
935 | return 0; | |
936 | } | |
937 | ||
938 | static const struct mt76_queue_ops mt76_dma_ops = { | |
939 | .init = mt76_dma_init, | |
940 | .alloc = mt76_dma_alloc_queue, | |
3990465d | 941 | .reset_q = mt76_dma_queue_reset, |
5ed31128 | 942 | .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, |
469d4818 | 943 | .tx_queue_skb = mt76_dma_tx_queue_skb, |
17f1de56 | 944 | .tx_cleanup = mt76_dma_tx_cleanup, |
c001df97 | 945 | .rx_cleanup = mt76_dma_rx_cleanup, |
17f1de56 FF |
946 | .rx_reset = mt76_dma_rx_reset, |
947 | .kick = mt76_dma_kick_queue, | |
948 | }; | |
949 | ||
bceac167 | 950 | void mt76_dma_attach(struct mt76_dev *dev) |
17f1de56 FF |
951 | { |
952 | dev->queue_ops = &mt76_dma_ops; | |
17f1de56 FF |
953 | } |
954 | EXPORT_SYMBOL_GPL(mt76_dma_attach); | |
955 | ||
956 | void mt76_dma_cleanup(struct mt76_dev *dev) | |
957 | { | |
958 | int i; | |
959 | ||
781eef5b | 960 | mt76_worker_disable(&dev->tx_worker); |
4875e346 | 961 | netif_napi_del(&dev->tx_napi); |
e637763b | 962 | |
dc44c45c LB |
963 | for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { |
964 | struct mt76_phy *phy = dev->phys[i]; | |
965 | int j; | |
966 | ||
967 | if (!phy) | |
968 | continue; | |
969 | ||
970 | for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++) | |
971 | mt76_dma_tx_cleanup(dev, phy->q_tx[j], true); | |
91990519 | 972 | } |
17f1de56 | 973 | |
e637763b LB |
974 | for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) |
975 | mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); | |
976 | ||
f473b42a | 977 | mt76_for_each_q_rx(dev, i) { |
52546e27 LB |
978 | struct mt76_queue *q = &dev->q_rx[i]; |
979 | ||
17f1de56 | 980 | netif_napi_del(&dev->napi[i]); |
3f7dda36 | 981 | mt76_dma_rx_cleanup(dev, q); |
2f5c3c77 LB |
982 | |
983 | page_pool_destroy(q->page_pool); | |
17f1de56 | 984 | } |
dd57a95c FF |
985 | |
986 | mt76_free_pending_txwi(dev); | |
2666bece | 987 | mt76_free_pending_rxwi(dev); |
f68d6762 FF |
988 | |
989 | if (mtk_wed_device_active(&dev->mmio.wed)) | |
990 | mtk_wed_device_detach(&dev->mmio.wed); | |
17f1de56 FF |
991 | } |
992 | EXPORT_SYMBOL_GPL(mt76_dma_cleanup); |