Commit | Line | Data |
---|---|---|
0e3d6777 | 1 | // SPDX-License-Identifier: ISC |
17f1de56 FF |
2 | /* |
3 | * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> | |
17f1de56 FF |
4 | */ |
5 | ||
6 | #include "mt76.h" | |
7 | ||
8 | static struct mt76_txwi_cache * | |
9 | mt76_alloc_txwi(struct mt76_dev *dev) | |
10 | { | |
11 | struct mt76_txwi_cache *t; | |
12 | dma_addr_t addr; | |
f3950a41 | 13 | u8 *txwi; |
17f1de56 FF |
14 | int size; |
15 | ||
f3950a41 LB |
16 | size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); |
17 | txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC); | |
18 | if (!txwi) | |
17f1de56 FF |
19 | return NULL; |
20 | ||
f3950a41 | 21 | addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, |
17f1de56 | 22 | DMA_TO_DEVICE); |
f3950a41 | 23 | t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); |
17f1de56 FF |
24 | t->dma_addr = addr; |
25 | ||
26 | return t; | |
27 | } | |
28 | ||
29 | static struct mt76_txwi_cache * | |
30 | __mt76_get_txwi(struct mt76_dev *dev) | |
31 | { | |
32 | struct mt76_txwi_cache *t = NULL; | |
33 | ||
34 | spin_lock_bh(&dev->lock); | |
35 | if (!list_empty(&dev->txwi_cache)) { | |
36 | t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, | |
37 | list); | |
38 | list_del(&t->list); | |
39 | } | |
40 | spin_unlock_bh(&dev->lock); | |
41 | ||
42 | return t; | |
43 | } | |
44 | ||
fcdd99ce | 45 | struct mt76_txwi_cache * |
17f1de56 FF |
46 | mt76_get_txwi(struct mt76_dev *dev) |
47 | { | |
48 | struct mt76_txwi_cache *t = __mt76_get_txwi(dev); | |
49 | ||
50 | if (t) | |
51 | return t; | |
52 | ||
53 | return mt76_alloc_txwi(dev); | |
54 | } | |
55 | ||
56 | void | |
57 | mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) | |
58 | { | |
59 | if (!t) | |
60 | return; | |
61 | ||
62 | spin_lock_bh(&dev->lock); | |
63 | list_add(&t->list, &dev->txwi_cache); | |
64 | spin_unlock_bh(&dev->lock); | |
65 | } | |
6ca66722 | 66 | EXPORT_SYMBOL_GPL(mt76_put_txwi); |
17f1de56 FF |
67 | |
68 | void mt76_tx_free(struct mt76_dev *dev) | |
69 | { | |
70 | struct mt76_txwi_cache *t; | |
71 | ||
72 | while ((t = __mt76_get_txwi(dev)) != NULL) | |
f3950a41 | 73 | dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, |
17f1de56 FF |
74 | DMA_TO_DEVICE); |
75 | } | |
76 | ||
77 | static int | |
78 | mt76_txq_get_qid(struct ieee80211_txq *txq) | |
79 | { | |
80 | if (!txq->sta) | |
81 | return MT_TXQ_BE; | |
82 | ||
83 | return txq->ac; | |
84 | } | |
85 | ||
49f45fa1 FF |
86 | static void |
87 | mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) | |
88 | { | |
13381dcd | 89 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
49f45fa1 | 90 | |
5155938d FF |
91 | if (!ieee80211_is_data_qos(hdr->frame_control) || |
92 | !ieee80211_is_data_present(hdr->frame_control)) | |
49f45fa1 FF |
93 | return; |
94 | ||
95 | mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; | |
96 | } | |
97 | ||
79d1c94c FF |
98 | void |
99 | mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) | |
100 | __acquires(&dev->status_list.lock) | |
101 | { | |
102 | __skb_queue_head_init(list); | |
103 | spin_lock_bh(&dev->status_list.lock); | |
104 | __acquire(&dev->status_list.lock); | |
105 | } | |
106 | EXPORT_SYMBOL_GPL(mt76_tx_status_lock); | |
107 | ||
108 | void | |
109 | mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) | |
110 | __releases(&dev->status_list.unlock) | |
111 | { | |
112 | struct sk_buff *skb; | |
113 | ||
114 | spin_unlock_bh(&dev->status_list.lock); | |
115 | __release(&dev->status_list.unlock); | |
116 | ||
117 | while ((skb = __skb_dequeue(list)) != NULL) | |
118 | ieee80211_tx_status(dev->hw, skb); | |
119 | } | |
120 | EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); | |
121 | ||
88046b2c | 122 | static void |
79d1c94c FF |
123 | __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, |
124 | struct sk_buff_head *list) | |
88046b2c FF |
125 | { |
126 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
127 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); | |
128 | u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; | |
129 | ||
130 | flags |= cb->flags; | |
131 | cb->flags = flags; | |
132 | ||
133 | if ((flags & done) != done) | |
134 | return; | |
135 | ||
136 | __skb_unlink(skb, &dev->status_list); | |
137 | ||
138 | /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ | |
139 | if (flags & MT_TX_CB_TXS_FAILED) { | |
140 | ieee80211_tx_info_clear_status(info); | |
141 | info->status.rates[0].idx = -1; | |
142 | info->flags |= IEEE80211_TX_STAT_ACK; | |
143 | } | |
144 | ||
79d1c94c | 145 | __skb_queue_tail(list, skb); |
88046b2c FF |
146 | } |
147 | ||
148 | void | |
79d1c94c FF |
149 | mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, |
150 | struct sk_buff_head *list) | |
88046b2c | 151 | { |
79d1c94c | 152 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); |
88046b2c FF |
153 | } |
154 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); | |
155 | ||
156 | int | |
157 | mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, | |
158 | struct sk_buff *skb) | |
159 | { | |
160 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
161 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); | |
162 | int pid; | |
163 | ||
164 | if (!wcid) | |
013b2dff | 165 | return MT_PACKET_ID_NO_ACK; |
88046b2c FF |
166 | |
167 | if (info->flags & IEEE80211_TX_CTL_NO_ACK) | |
168 | return MT_PACKET_ID_NO_ACK; | |
169 | ||
170 | if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | | |
171 | IEEE80211_TX_CTL_RATE_CTRL_PROBE))) | |
013b2dff | 172 | return MT_PACKET_ID_NO_SKB; |
88046b2c FF |
173 | |
174 | spin_lock_bh(&dev->status_list.lock); | |
175 | ||
176 | memset(cb, 0, sizeof(*cb)); | |
177 | wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK; | |
013b2dff FF |
178 | if (wcid->packet_id == MT_PACKET_ID_NO_ACK || |
179 | wcid->packet_id == MT_PACKET_ID_NO_SKB) | |
180 | wcid->packet_id = MT_PACKET_ID_FIRST; | |
88046b2c FF |
181 | |
182 | pid = wcid->packet_id; | |
183 | cb->wcid = wcid->idx; | |
184 | cb->pktid = pid; | |
185 | cb->jiffies = jiffies; | |
186 | ||
187 | __skb_queue_tail(&dev->status_list, skb); | |
188 | spin_unlock_bh(&dev->status_list.lock); | |
189 | ||
190 | return pid; | |
191 | } | |
192 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); | |
193 | ||
194 | struct sk_buff * | |
79d1c94c FF |
195 | mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, |
196 | struct sk_buff_head *list) | |
88046b2c FF |
197 | { |
198 | struct sk_buff *skb, *tmp; | |
199 | ||
88046b2c FF |
200 | skb_queue_walk_safe(&dev->status_list, skb, tmp) { |
201 | struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); | |
202 | ||
203 | if (wcid && cb->wcid != wcid->idx) | |
204 | continue; | |
205 | ||
206 | if (cb->pktid == pktid) | |
207 | return skb; | |
208 | ||
13381dcd RL |
209 | if (pktid >= 0 && !time_after(jiffies, cb->jiffies + |
210 | MT_TX_STATUS_SKB_TIMEOUT)) | |
88046b2c FF |
211 | continue; |
212 | ||
213 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | | |
79d1c94c | 214 | MT_TX_CB_TXS_DONE, list); |
88046b2c FF |
215 | } |
216 | ||
217 | return NULL; | |
218 | } | |
219 | EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); | |
220 | ||
79d1c94c FF |
221 | void |
222 | mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush) | |
223 | { | |
224 | struct sk_buff_head list; | |
225 | ||
226 | mt76_tx_status_lock(dev, &list); | |
227 | mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); | |
228 | mt76_tx_status_unlock(dev, &list); | |
229 | } | |
230 | EXPORT_SYMBOL_GPL(mt76_tx_status_check); | |
231 | ||
88046b2c FF |
232 | void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb) |
233 | { | |
79d1c94c FF |
234 | struct sk_buff_head list; |
235 | ||
88046b2c FF |
236 | if (!skb->prev) { |
237 | ieee80211_free_txskb(dev->hw, skb); | |
238 | return; | |
239 | } | |
240 | ||
79d1c94c FF |
241 | mt76_tx_status_lock(dev, &list); |
242 | __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); | |
243 | mt76_tx_status_unlock(dev, &list); | |
88046b2c FF |
244 | } |
245 | EXPORT_SYMBOL_GPL(mt76_tx_complete_skb); | |
246 | ||
17f1de56 FF |
247 | void |
248 | mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, | |
249 | struct mt76_wcid *wcid, struct sk_buff *skb) | |
250 | { | |
251 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | |
13381dcd | 252 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
17f1de56 FF |
253 | struct mt76_queue *q; |
254 | int qid = skb_get_queue_mapping(skb); | |
255 | ||
256 | if (WARN_ON(qid >= MT_TXQ_PSD)) { | |
257 | qid = MT_TXQ_BE; | |
258 | skb_set_queue_mapping(skb, qid); | |
259 | } | |
260 | ||
db9f11d3 | 261 | if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) |
17f1de56 FF |
262 | ieee80211_get_tx_rates(info->control.vif, sta, skb, |
263 | info->control.rates, 1); | |
264 | ||
49f45fa1 FF |
265 | if (sta && ieee80211_is_data_qos(hdr->frame_control)) { |
266 | struct ieee80211_txq *txq; | |
267 | struct mt76_txq *mtxq; | |
268 | u8 tid; | |
269 | ||
270 | tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; | |
271 | txq = sta->txq[tid]; | |
13381dcd | 272 | mtxq = (struct mt76_txq *)txq->drv_priv; |
49f45fa1 FF |
273 | |
274 | if (mtxq->aggr) | |
275 | mt76_check_agg_ssn(mtxq, skb); | |
276 | } | |
277 | ||
af005f26 | 278 | q = dev->q_tx[qid].q; |
17f1de56 FF |
279 | |
280 | spin_lock_bh(&q->lock); | |
89a37842 | 281 | dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta); |
17f1de56 FF |
282 | dev->queue_ops->kick(dev, q); |
283 | ||
cd44bc40 | 284 | if (q->queued > q->ndesc - 8 && !q->stopped) { |
17f1de56 | 285 | ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); |
cd44bc40 LB |
286 | q->stopped = true; |
287 | } | |
288 | ||
17f1de56 FF |
289 | spin_unlock_bh(&q->lock); |
290 | } | |
291 | EXPORT_SYMBOL_GPL(mt76_tx); | |
292 | ||
293 | static struct sk_buff * | |
294 | mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps) | |
295 | { | |
296 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); | |
297 | struct sk_buff *skb; | |
298 | ||
299 | skb = skb_dequeue(&mtxq->retry_q); | |
300 | if (skb) { | |
301 | u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; | |
302 | ||
303 | if (ps && skb_queue_empty(&mtxq->retry_q)) | |
304 | ieee80211_sta_set_buffered(txq->sta, tid, false); | |
305 | ||
306 | return skb; | |
307 | } | |
308 | ||
309 | skb = ieee80211_tx_dequeue(dev->hw, txq); | |
310 | if (!skb) | |
311 | return NULL; | |
312 | ||
313 | return skb; | |
314 | } | |
315 | ||
17f1de56 FF |
316 | static void |
317 | mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, | |
318 | struct sk_buff *skb, bool last) | |
319 | { | |
13381dcd | 320 | struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; |
17f1de56 | 321 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
17f1de56 FF |
322 | |
323 | info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; | |
324 | if (last) | |
7267a796 FF |
325 | info->flags |= IEEE80211_TX_STATUS_EOSP | |
326 | IEEE80211_TX_CTL_REQ_TX_STATUS; | |
17f1de56 FF |
327 | |
328 | mt76_skb_set_moredata(skb, !last); | |
89a37842 | 329 | dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta); |
17f1de56 FF |
330 | } |
331 | ||
332 | void | |
333 | mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, | |
334 | u16 tids, int nframes, | |
335 | enum ieee80211_frame_release_type reason, | |
336 | bool more_data) | |
337 | { | |
338 | struct mt76_dev *dev = hw->priv; | |
339 | struct sk_buff *last_skb = NULL; | |
af005f26 | 340 | struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q; |
17f1de56 FF |
341 | int i; |
342 | ||
343 | spin_lock_bh(&hwq->lock); | |
344 | for (i = 0; tids && nframes; i++, tids >>= 1) { | |
345 | struct ieee80211_txq *txq = sta->txq[i]; | |
13381dcd | 346 | struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; |
17f1de56 FF |
347 | struct sk_buff *skb; |
348 | ||
349 | if (!(tids & 1)) | |
350 | continue; | |
351 | ||
352 | do { | |
353 | skb = mt76_txq_dequeue(dev, mtxq, true); | |
354 | if (!skb) | |
355 | break; | |
356 | ||
357 | if (mtxq->aggr) | |
358 | mt76_check_agg_ssn(mtxq, skb); | |
359 | ||
360 | nframes--; | |
361 | if (last_skb) | |
362 | mt76_queue_ps_skb(dev, sta, last_skb, false); | |
363 | ||
364 | last_skb = skb; | |
365 | } while (nframes); | |
366 | } | |
367 | ||
368 | if (last_skb) { | |
369 | mt76_queue_ps_skb(dev, sta, last_skb, true); | |
370 | dev->queue_ops->kick(dev, hwq); | |
ffc9a7ff FF |
371 | } else { |
372 | ieee80211_sta_eosp(sta); | |
17f1de56 | 373 | } |
ffc9a7ff | 374 | |
17f1de56 FF |
375 | spin_unlock_bh(&hwq->lock); |
376 | } | |
377 | EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); | |
378 | ||
379 | static int | |
af005f26 | 380 | mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq, |
a6701111 | 381 | struct mt76_txq *mtxq) |
17f1de56 FF |
382 | { |
383 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); | |
89a37842 | 384 | enum mt76_txq_id qid = mt76_txq_get_qid(txq); |
17f1de56 | 385 | struct mt76_wcid *wcid = mtxq->wcid; |
af005f26 LB |
386 | struct mt76_queue *hwq = sq->q; |
387 | struct ieee80211_tx_info *info; | |
17f1de56 FF |
388 | struct sk_buff *skb; |
389 | int n_frames = 1, limit; | |
390 | struct ieee80211_tx_rate tx_rate; | |
391 | bool ampdu; | |
392 | bool probe; | |
393 | int idx; | |
394 | ||
a6701111 | 395 | if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) |
d225581d | 396 | return 0; |
d225581d | 397 | |
17f1de56 | 398 | skb = mt76_txq_dequeue(dev, mtxq, false); |
a6701111 | 399 | if (!skb) |
17f1de56 | 400 | return 0; |
17f1de56 FF |
401 | |
402 | info = IEEE80211_SKB_CB(skb); | |
db9f11d3 | 403 | if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) |
17f1de56 FF |
404 | ieee80211_get_tx_rates(txq->vif, txq->sta, skb, |
405 | info->control.rates, 1); | |
406 | tx_rate = info->control.rates[0]; | |
407 | ||
408 | probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); | |
409 | ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU; | |
410 | limit = ampdu ? 16 : 3; | |
411 | ||
412 | if (ampdu) | |
413 | mt76_check_agg_ssn(mtxq, skb); | |
414 | ||
89a37842 | 415 | idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta); |
17f1de56 FF |
416 | |
417 | if (idx < 0) | |
418 | return idx; | |
419 | ||
420 | do { | |
421 | bool cur_ampdu; | |
422 | ||
423 | if (probe) | |
424 | break; | |
425 | ||
04824da9 | 426 | if (test_bit(MT76_RESET, &dev->state)) |
17f1de56 FF |
427 | return -EBUSY; |
428 | ||
429 | skb = mt76_txq_dequeue(dev, mtxq, false); | |
a6701111 | 430 | if (!skb) |
17f1de56 | 431 | break; |
17f1de56 FF |
432 | |
433 | info = IEEE80211_SKB_CB(skb); | |
434 | cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; | |
435 | ||
436 | if (ampdu != cur_ampdu || | |
437 | (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { | |
438 | skb_queue_tail(&mtxq->retry_q, skb); | |
439 | break; | |
440 | } | |
441 | ||
442 | info->control.rates[0] = tx_rate; | |
443 | ||
444 | if (cur_ampdu) | |
445 | mt76_check_agg_ssn(mtxq, skb); | |
446 | ||
89a37842 | 447 | idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, |
469d4818 | 448 | txq->sta); |
17f1de56 FF |
449 | if (idx < 0) |
450 | return idx; | |
451 | ||
452 | n_frames++; | |
453 | } while (n_frames < limit); | |
454 | ||
455 | if (!probe) { | |
d290c121 | 456 | hwq->entry[idx].qid = sq - dev->q_tx; |
17f1de56 | 457 | hwq->entry[idx].schedule = true; |
af005f26 | 458 | sq->swq_queued++; |
17f1de56 FF |
459 | } |
460 | ||
461 | dev->queue_ops->kick(dev, hwq); | |
462 | ||
463 | return n_frames; | |
464 | } | |
465 | ||
466 | static int | |
90fdc171 | 467 | mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid) |
17f1de56 | 468 | { |
90fdc171 | 469 | struct mt76_sw_queue *sq = &dev->q_tx[qid]; |
af005f26 | 470 | struct mt76_queue *hwq = sq->q; |
90fdc171 FF |
471 | struct ieee80211_txq *txq; |
472 | struct mt76_txq *mtxq; | |
473 | struct mt76_wcid *wcid; | |
474 | int ret = 0; | |
17f1de56 | 475 | |
90fdc171 FF |
476 | spin_lock_bh(&hwq->lock); |
477 | while (1) { | |
90fdc171 FF |
478 | if (sq->swq_queued >= 4) |
479 | break; | |
17f1de56 | 480 | |
04824da9 | 481 | if (test_bit(MT76_RESET, &dev->state)) { |
90fdc171 FF |
482 | ret = -EBUSY; |
483 | break; | |
484 | } | |
485 | ||
486 | txq = ieee80211_next_txq(dev->hw, qid); | |
487 | if (!txq) | |
488 | break; | |
489 | ||
490 | mtxq = (struct mt76_txq *)txq->drv_priv; | |
491 | wcid = mtxq->wcid; | |
492 | if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags)) | |
493 | continue; | |
95135e8c | 494 | |
17f1de56 FF |
495 | if (mtxq->send_bar && mtxq->aggr) { |
496 | struct ieee80211_txq *txq = mtxq_to_txq(mtxq); | |
497 | struct ieee80211_sta *sta = txq->sta; | |
498 | struct ieee80211_vif *vif = txq->vif; | |
499 | u16 agg_ssn = mtxq->agg_ssn; | |
500 | u8 tid = txq->tid; | |
501 | ||
502 | mtxq->send_bar = false; | |
503 | spin_unlock_bh(&hwq->lock); | |
504 | ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); | |
505 | spin_lock_bh(&hwq->lock); | |
17f1de56 FF |
506 | } |
507 | ||
a6701111 LB |
508 | ret += mt76_txq_send_burst(dev, sq, mtxq); |
509 | ieee80211_return_txq(dev->hw, txq, | |
510 | !skb_queue_empty(&mtxq->retry_q)); | |
17f1de56 | 511 | } |
90fdc171 | 512 | spin_unlock_bh(&hwq->lock); |
17f1de56 | 513 | |
90fdc171 | 514 | return ret; |
17f1de56 FF |
515 | } |
516 | ||
90fdc171 | 517 | void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid) |
17f1de56 | 518 | { |
90fdc171 | 519 | struct mt76_sw_queue *sq = &dev->q_tx[qid]; |
17f1de56 FF |
520 | int len; |
521 | ||
90fdc171 FF |
522 | if (qid >= 4) |
523 | return; | |
524 | ||
525 | if (sq->swq_queued >= 4) | |
526 | return; | |
527 | ||
1d868b70 | 528 | rcu_read_lock(); |
17f1de56 | 529 | |
90fdc171 FF |
530 | do { |
531 | ieee80211_txq_schedule_start(dev->hw, qid); | |
532 | len = mt76_txq_schedule_list(dev, qid); | |
533 | ieee80211_txq_schedule_end(dev->hw, qid); | |
17f1de56 | 534 | } while (len > 0); |
90fdc171 | 535 | |
1d868b70 | 536 | rcu_read_unlock(); |
17f1de56 FF |
537 | } |
538 | EXPORT_SYMBOL_GPL(mt76_txq_schedule); | |
539 | ||
540 | void mt76_txq_schedule_all(struct mt76_dev *dev) | |
541 | { | |
542 | int i; | |
543 | ||
90fdc171 FF |
544 | for (i = 0; i <= MT_TXQ_BK; i++) |
545 | mt76_txq_schedule(dev, i); | |
17f1de56 FF |
546 | } |
547 | EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); | |
548 | ||
c325c9c7 LB |
549 | void mt76_tx_tasklet(unsigned long data) |
550 | { | |
551 | struct mt76_dev *dev = (struct mt76_dev *)data; | |
552 | ||
553 | mt76_txq_schedule_all(dev); | |
554 | } | |
555 | ||
17f1de56 FF |
556 | void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, |
557 | bool send_bar) | |
558 | { | |
559 | int i; | |
560 | ||
561 | for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { | |
562 | struct ieee80211_txq *txq = sta->txq[i]; | |
af005f26 | 563 | struct mt76_queue *hwq; |
7c250f46 LB |
564 | struct mt76_txq *mtxq; |
565 | ||
566 | if (!txq) | |
567 | continue; | |
568 | ||
569 | mtxq = (struct mt76_txq *)txq->drv_priv; | |
af005f26 | 570 | hwq = mtxq->swq->q; |
17f1de56 | 571 | |
af005f26 | 572 | spin_lock_bh(&hwq->lock); |
17f1de56 | 573 | mtxq->send_bar = mtxq->aggr && send_bar; |
af005f26 | 574 | spin_unlock_bh(&hwq->lock); |
17f1de56 FF |
575 | } |
576 | } | |
577 | EXPORT_SYMBOL_GPL(mt76_stop_tx_queues); | |
578 | ||
579 | void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) | |
580 | { | |
581 | struct mt76_dev *dev = hw->priv; | |
17f1de56 | 582 | |
00496042 FF |
583 | if (!test_bit(MT76_STATE_RUNNING, &dev->state)) |
584 | return; | |
585 | ||
41634aa8 | 586 | tasklet_schedule(&dev->tx_tasklet); |
17f1de56 FF |
587 | } |
588 | EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); | |
589 | ||
590 | void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) | |
591 | { | |
af005f26 | 592 | struct mt76_txq *mtxq; |
17f1de56 FF |
593 | struct sk_buff *skb; |
594 | ||
595 | if (!txq) | |
596 | return; | |
597 | ||
13381dcd | 598 | mtxq = (struct mt76_txq *)txq->drv_priv; |
17f1de56 FF |
599 | |
600 | while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) | |
601 | ieee80211_free_txskb(dev->hw, skb); | |
602 | } | |
603 | EXPORT_SYMBOL_GPL(mt76_txq_remove); | |
604 | ||
605 | void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) | |
606 | { | |
13381dcd | 607 | struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; |
17f1de56 | 608 | |
17f1de56 FF |
609 | skb_queue_head_init(&mtxq->retry_q); |
610 | ||
af005f26 | 611 | mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)]; |
17f1de56 FF |
612 | } |
613 | EXPORT_SYMBOL_GPL(mt76_txq_init); | |
1d0496c6 SG |
614 | |
615 | u8 mt76_ac_to_hwq(u8 ac) | |
616 | { | |
617 | static const u8 wmm_queue_map[] = { | |
618 | [IEEE80211_AC_BE] = 0, | |
619 | [IEEE80211_AC_BK] = 1, | |
620 | [IEEE80211_AC_VI] = 2, | |
621 | [IEEE80211_AC_VO] = 3, | |
622 | }; | |
623 | ||
624 | if (WARN_ON(ac >= IEEE80211_NUM_ACS)) | |
625 | return 0; | |
626 | ||
627 | return wmm_queue_map[ac]; | |
628 | } | |
629 | EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); |