2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 static struct mt76_txwi_cache *
20 mt76_alloc_txwi(struct mt76_dev *dev)
22 struct mt76_txwi_cache *t;
27 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
28 txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
32 addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
34 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
40 static struct mt76_txwi_cache *
41 __mt76_get_txwi(struct mt76_dev *dev)
43 struct mt76_txwi_cache *t = NULL;
45 spin_lock_bh(&dev->lock);
46 if (!list_empty(&dev->txwi_cache)) {
47 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
51 spin_unlock_bh(&dev->lock);
56 struct mt76_txwi_cache *
57 mt76_get_txwi(struct mt76_dev *dev)
59 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
64 return mt76_alloc_txwi(dev);
68 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
73 spin_lock_bh(&dev->lock);
74 list_add(&t->list, &dev->txwi_cache);
75 spin_unlock_bh(&dev->lock);
77 EXPORT_SYMBOL_GPL(mt76_put_txwi);
79 void mt76_tx_free(struct mt76_dev *dev)
81 struct mt76_txwi_cache *t;
83 while ((t = __mt76_get_txwi(dev)) != NULL)
84 dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
89 mt76_txq_get_qid(struct ieee80211_txq *txq)
98 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
100 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
102 if (!ieee80211_is_data_qos(hdr->frame_control) ||
103 !ieee80211_is_data_present(hdr->frame_control))
106 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
110 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
111 __acquires(&dev->status_list.lock)
113 __skb_queue_head_init(list);
114 spin_lock_bh(&dev->status_list.lock);
115 __acquire(&dev->status_list.lock);
117 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
120 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
121 __releases(&dev->status_list.unlock)
125 spin_unlock_bh(&dev->status_list.lock);
126 __release(&dev->status_list.unlock);
128 while ((skb = __skb_dequeue(list)) != NULL)
129 ieee80211_tx_status(dev->hw, skb);
131 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
134 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
135 struct sk_buff_head *list)
137 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
138 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
139 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
144 if ((flags & done) != done)
147 __skb_unlink(skb, &dev->status_list);
149 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
150 if (flags & MT_TX_CB_TXS_FAILED) {
151 ieee80211_tx_info_clear_status(info);
152 info->status.rates[0].idx = -1;
153 info->flags |= IEEE80211_TX_STAT_ACK;
156 __skb_queue_tail(list, skb);
160 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
161 struct sk_buff_head *list)
163 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
165 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
168 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
171 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
172 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
176 return MT_PACKET_ID_NO_ACK;
178 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
179 return MT_PACKET_ID_NO_ACK;
181 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
182 IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
183 return MT_PACKET_ID_NO_SKB;
185 spin_lock_bh(&dev->status_list.lock);
187 memset(cb, 0, sizeof(*cb));
188 wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
189 if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
190 wcid->packet_id == MT_PACKET_ID_NO_SKB)
191 wcid->packet_id = MT_PACKET_ID_FIRST;
193 pid = wcid->packet_id;
194 cb->wcid = wcid->idx;
196 cb->jiffies = jiffies;
198 __skb_queue_tail(&dev->status_list, skb);
199 spin_unlock_bh(&dev->status_list.lock);
203 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
206 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
207 struct sk_buff_head *list)
209 struct sk_buff *skb, *tmp;
211 skb_queue_walk_safe(&dev->status_list, skb, tmp) {
212 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
214 if (wcid && cb->wcid != wcid->idx)
217 if (cb->pktid == pktid)
221 !time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT))
224 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
225 MT_TX_CB_TXS_DONE, list);
230 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
233 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
235 struct sk_buff_head list;
237 mt76_tx_status_lock(dev, &list);
238 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
239 mt76_tx_status_unlock(dev, &list);
241 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
243 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
245 struct sk_buff_head list;
248 ieee80211_free_txskb(dev->hw, skb);
252 mt76_tx_status_lock(dev, &list);
253 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
254 mt76_tx_status_unlock(dev, &list);
256 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
259 mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
260 struct mt76_wcid *wcid, struct sk_buff *skb)
262 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
263 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
264 struct mt76_queue *q;
265 int qid = skb_get_queue_mapping(skb);
267 if (WARN_ON(qid >= MT_TXQ_PSD)) {
269 skb_set_queue_mapping(skb, qid);
272 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
273 ieee80211_get_tx_rates(info->control.vif, sta, skb,
274 info->control.rates, 1);
276 if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
277 struct ieee80211_txq *txq;
278 struct mt76_txq *mtxq;
281 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
283 mtxq = (struct mt76_txq *) txq->drv_priv;
286 mt76_check_agg_ssn(mtxq, skb);
289 q = dev->q_tx[qid].q;
291 spin_lock_bh(&q->lock);
292 dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
293 dev->queue_ops->kick(dev, q);
295 if (q->queued > q->ndesc - 8 && !q->stopped) {
296 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
300 spin_unlock_bh(&q->lock);
302 EXPORT_SYMBOL_GPL(mt76_tx);
304 static struct sk_buff *
305 mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
307 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
310 skb = skb_dequeue(&mtxq->retry_q);
312 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
314 if (ps && skb_queue_empty(&mtxq->retry_q))
315 ieee80211_sta_set_buffered(txq->sta, tid, false);
320 skb = ieee80211_tx_dequeue(dev->hw, txq);
328 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
329 struct sk_buff *skb, bool last)
331 struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
332 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
334 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
336 info->flags |= IEEE80211_TX_STATUS_EOSP |
337 IEEE80211_TX_CTL_REQ_TX_STATUS;
339 mt76_skb_set_moredata(skb, !last);
340 dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
344 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
345 u16 tids, int nframes,
346 enum ieee80211_frame_release_type reason,
349 struct mt76_dev *dev = hw->priv;
350 struct sk_buff *last_skb = NULL;
351 struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
354 spin_lock_bh(&hwq->lock);
355 for (i = 0; tids && nframes; i++, tids >>= 1) {
356 struct ieee80211_txq *txq = sta->txq[i];
357 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
364 skb = mt76_txq_dequeue(dev, mtxq, true);
369 mt76_check_agg_ssn(mtxq, skb);
373 mt76_queue_ps_skb(dev, sta, last_skb, false);
380 mt76_queue_ps_skb(dev, sta, last_skb, true);
381 dev->queue_ops->kick(dev, hwq);
383 ieee80211_sta_eosp(sta);
386 spin_unlock_bh(&hwq->lock);
388 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
391 mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
392 struct mt76_txq *mtxq, bool *empty)
394 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
395 enum mt76_txq_id qid = mt76_txq_get_qid(txq);
396 struct mt76_wcid *wcid = mtxq->wcid;
397 struct mt76_queue *hwq = sq->q;
398 struct ieee80211_tx_info *info;
400 int n_frames = 1, limit;
401 struct ieee80211_tx_rate tx_rate;
406 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) {
411 skb = mt76_txq_dequeue(dev, mtxq, false);
417 info = IEEE80211_SKB_CB(skb);
418 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
419 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
420 info->control.rates, 1);
421 tx_rate = info->control.rates[0];
423 probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
424 ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
425 limit = ampdu ? 16 : 3;
428 mt76_check_agg_ssn(mtxq, skb);
430 idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
441 if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
442 test_bit(MT76_RESET, &dev->state))
445 skb = mt76_txq_dequeue(dev, mtxq, false);
451 info = IEEE80211_SKB_CB(skb);
452 cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
454 if (ampdu != cur_ampdu ||
455 (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
456 skb_queue_tail(&mtxq->retry_q, skb);
460 info->control.rates[0] = tx_rate;
463 mt76_check_agg_ssn(mtxq, skb);
465 idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
471 } while (n_frames < limit);
474 hwq->entry[idx].qid = sq - dev->q_tx;
475 hwq->entry[idx].schedule = true;
479 dev->queue_ops->kick(dev, hwq);
485 mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
487 struct mt76_sw_queue *sq = &dev->q_tx[qid];
488 struct mt76_queue *hwq = sq->q;
489 struct ieee80211_txq *txq;
490 struct mt76_txq *mtxq;
491 struct mt76_wcid *wcid;
494 spin_lock_bh(&hwq->lock);
498 if (sq->swq_queued >= 4)
501 if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
502 test_bit(MT76_RESET, &dev->state)) {
507 txq = ieee80211_next_txq(dev->hw, qid);
511 mtxq = (struct mt76_txq *)txq->drv_priv;
513 if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
516 if (mtxq->send_bar && mtxq->aggr) {
517 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
518 struct ieee80211_sta *sta = txq->sta;
519 struct ieee80211_vif *vif = txq->vif;
520 u16 agg_ssn = mtxq->agg_ssn;
523 mtxq->send_bar = false;
524 spin_unlock_bh(&hwq->lock);
525 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
526 spin_lock_bh(&hwq->lock);
529 ret += mt76_txq_send_burst(dev, sq, mtxq, &empty);
530 if (skb_queue_empty(&mtxq->retry_q))
532 ieee80211_return_txq(dev->hw, txq, !empty);
534 spin_unlock_bh(&hwq->lock);
539 void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
541 struct mt76_sw_queue *sq = &dev->q_tx[qid];
547 if (sq->swq_queued >= 4)
553 ieee80211_txq_schedule_start(dev->hw, qid);
554 len = mt76_txq_schedule_list(dev, qid);
555 ieee80211_txq_schedule_end(dev->hw, qid);
560 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
562 void mt76_txq_schedule_all(struct mt76_dev *dev)
566 for (i = 0; i <= MT_TXQ_BK; i++)
567 mt76_txq_schedule(dev, i);
569 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
571 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
576 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
577 struct ieee80211_txq *txq = sta->txq[i];
578 struct mt76_queue *hwq;
579 struct mt76_txq *mtxq;
584 mtxq = (struct mt76_txq *)txq->drv_priv;
587 spin_lock_bh(&hwq->lock);
588 mtxq->send_bar = mtxq->aggr && send_bar;
589 spin_unlock_bh(&hwq->lock);
592 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
594 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
596 struct mt76_dev *dev = hw->priv;
598 if (!test_bit(MT76_STATE_RUNNING, &dev->state))
601 tasklet_schedule(&dev->tx_tasklet);
603 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
605 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
607 struct mt76_txq *mtxq;
613 mtxq = (struct mt76_txq *) txq->drv_priv;
615 while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
616 ieee80211_free_txskb(dev->hw, skb);
618 EXPORT_SYMBOL_GPL(mt76_txq_remove);
620 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
622 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
624 skb_queue_head_init(&mtxq->retry_q);
626 mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
628 EXPORT_SYMBOL_GPL(mt76_txq_init);
630 u8 mt76_ac_to_hwq(u8 ac)
632 static const u8 wmm_queue_map[] = {
633 [IEEE80211_AC_BE] = 0,
634 [IEEE80211_AC_BK] = 1,
635 [IEEE80211_AC_VI] = 2,
636 [IEEE80211_AC_VO] = 3,
639 if (WARN_ON(ac >= IEEE80211_NUM_ACS))
642 return wmm_queue_map[ac];
644 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);