Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / net / wireless / mediatek / mt76 / mt7615 / mac.c
CommitLineData
04b8e659
RL
1// SPDX-License-Identifier: ISC
2/* Copyright (C) 2019 MediaTek Inc.
3 *
4 * Author: Ryder Lee <ryder.lee@mediatek.com>
5 * Roy Luo <royluo@google.com>
6 * Felix Fietkau <nbd@nbd.name>
7 * Lorenzo Bianconi <lorenzo@kernel.org>
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/timekeeping.h>
12#include "mt7615.h"
13#include "../dma.h"
14#include "mac.h"
15
bf92e768
RL
16static inline s8 to_rssi(u32 field, u32 rxv)
17{
18 return (FIELD_GET(field, rxv) - 220) / 2;
19}
20
04b8e659
RL
21static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
22 u8 idx, bool unicast)
23{
24 struct mt7615_sta *sta;
25 struct mt76_wcid *wcid;
26
27 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
28 return NULL;
29
30 wcid = rcu_dereference(dev->mt76.wcid[idx]);
31 if (unicast || !wcid)
32 return wcid;
33
34 if (!wcid->sta)
35 return NULL;
36
37 sta = container_of(wcid, struct mt7615_sta, wcid);
38 if (!sta->vif)
39 return NULL;
40
41 return &sta->vif->sta.wcid;
42}
43
75601194
LB
44void mt7615_mac_reset_counters(struct mt7615_dev *dev)
45{
46 int i;
47
48 for (i = 0; i < 4; i++)
49 mt76_rr(dev, MT_TX_AGG_CNT(i));
50
51 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
6bfa6e38
LB
52
53 /* TODO: add DBDC support */
54
55 /* reset airtime counters */
29ed2a79 56 mt76_rr(dev, MT_MIB_SDR9(0));
6bfa6e38
LB
57 mt76_rr(dev, MT_MIB_SDR36(0));
58 mt76_rr(dev, MT_MIB_SDR37(0));
59 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
b2c2f029 60 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
75601194
LB
61}
62
04b8e659
RL
63int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
64{
65 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
66 struct ieee80211_supported_band *sband;
67 struct ieee80211_hdr *hdr;
68 __le32 *rxd = (__le32 *)skb->data;
69 u32 rxd0 = le32_to_cpu(rxd[0]);
70 u32 rxd1 = le32_to_cpu(rxd[1]);
71 u32 rxd2 = le32_to_cpu(rxd[2]);
72 bool unicast, remove_pad, insert_ccmp_hdr = false;
73 int i, idx;
74
2dcb79cd
LB
75 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
76 return -EINVAL;
77
04b8e659
RL
78 memset(status, 0, sizeof(*status));
79
80 unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
81 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
82 status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
83
b2c2f029
LB
84 if (status->wcid) {
85 struct mt7615_sta *msta;
86
87 msta = container_of(status->wcid, struct mt7615_sta, wcid);
88 spin_lock_bh(&dev->sta_poll_lock);
89 if (list_empty(&msta->poll_list))
90 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
91 spin_unlock_bh(&dev->sta_poll_lock);
92 }
93
04b8e659
RL
94 /* TODO: properly support DBDC */
95 status->freq = dev->mt76.chandef.chan->center_freq;
96 status->band = dev->mt76.chandef.chan->band;
97 if (status->band == NL80211_BAND_5GHZ)
98 sband = &dev->mt76.sband_5g.sband;
99 else
100 sband = &dev->mt76.sband_2g.sband;
101
102 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
103 status->flag |= RX_FLAG_FAILED_FCS_CRC;
104
105 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
106 status->flag |= RX_FLAG_MMIC_ERROR;
107
108 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
109 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
110 status->flag |= RX_FLAG_DECRYPTED;
111 status->flag |= RX_FLAG_IV_STRIPPED;
112 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
113 }
114
d515fdca
FF
115 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
116 MT_RXD2_NORMAL_NON_AMPDU))) {
117 status->flag |= RX_FLAG_AMPDU_DETAILS;
118
119 /* all subframes of an A-MPDU have the same timestamp */
120 if (dev->rx_ampdu_ts != rxd[12]) {
121 if (!++dev->mt76.ampdu_ref)
122 dev->mt76.ampdu_ref++;
123 }
124 dev->rx_ampdu_ts = rxd[12];
125
126 status->ampdu_ref = dev->mt76.ampdu_ref;
127 }
128
04b8e659
RL
129 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
130
131 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
132 return -EINVAL;
133
134 if (!sband->channels)
135 return -EINVAL;
136
137 rxd += 4;
138 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
139 rxd += 4;
140 if ((u8 *)rxd - skb->data >= skb->len)
141 return -EINVAL;
142 }
143
144 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
145 u8 *data = (u8 *)rxd;
146
147 if (status->flag & RX_FLAG_DECRYPTED) {
148 status->iv[0] = data[5];
149 status->iv[1] = data[4];
150 status->iv[2] = data[3];
151 status->iv[3] = data[2];
152 status->iv[4] = data[1];
153 status->iv[5] = data[0];
154
155 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
156 }
157 rxd += 4;
158 if ((u8 *)rxd - skb->data >= skb->len)
159 return -EINVAL;
160 }
161
162 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
163 rxd += 2;
164 if ((u8 *)rxd - skb->data >= skb->len)
165 return -EINVAL;
166 }
167
168 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
169 u32 rxdg0 = le32_to_cpu(rxd[0]);
170 u32 rxdg1 = le32_to_cpu(rxd[1]);
bf92e768 171 u32 rxdg3 = le32_to_cpu(rxd[3]);
04b8e659
RL
172 u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
173 bool cck = false;
174
175 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
176 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
177 case MT_PHY_TYPE_CCK:
178 cck = true;
179 /* fall through */
180 case MT_PHY_TYPE_OFDM:
d2679d65 181 i = mt76_get_rate(&dev->mt76, sband, i, cck);
04b8e659
RL
182 break;
183 case MT_PHY_TYPE_HT_GF:
184 case MT_PHY_TYPE_HT:
185 status->encoding = RX_ENC_HT;
186 if (i > 31)
187 return -EINVAL;
188 break;
189 case MT_PHY_TYPE_VHT:
190 status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
191 status->encoding = RX_ENC_VHT;
192 break;
193 default:
194 return -EINVAL;
195 }
196 status->rate_idx = i;
197
198 switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
199 case MT_PHY_BW_20:
200 break;
201 case MT_PHY_BW_40:
202 status->bw = RATE_INFO_BW_40;
203 break;
204 case MT_PHY_BW_80:
205 status->bw = RATE_INFO_BW_80;
206 break;
207 case MT_PHY_BW_160:
208 status->bw = RATE_INFO_BW_160;
209 break;
210 default:
211 return -EINVAL;
212 }
213
214 if (rxdg0 & MT_RXV1_HT_SHORT_GI)
215 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
216 if (rxdg0 & MT_RXV1_HT_AD_CODE)
217 status->enc_flags |= RX_ENC_FLAG_LDPC;
218
219 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
220
bf92e768
RL
221 status->chains = dev->mt76.antenna_mask;
222 status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
223 status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
224 status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
225 status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
226 status->signal = status->chain_signal[0];
227
228 for (i = 1; i < hweight8(dev->mt76.antenna_mask); i++) {
229 if (!(status->chains & BIT(i)))
230 continue;
231
232 status->signal = max(status->signal,
233 status->chain_signal[i]);
234 }
235
04b8e659
RL
236 rxd += 6;
237 if ((u8 *)rxd - skb->data >= skb->len)
238 return -EINVAL;
239 }
240
241 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
242
243 if (insert_ccmp_hdr) {
244 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
245
eadfd98f 246 mt76_insert_ccmp_hdr(skb, key_id);
04b8e659
RL
247 }
248
249 hdr = (struct ieee80211_hdr *)skb->data;
250 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
251 return 0;
252
253 status->aggr = unicast &&
254 !ieee80211_is_qos_nullfunc(hdr->frame_control);
255 status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
e8027946 256 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
04b8e659
RL
257
258 return 0;
259}
260
261void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
262{
263}
264
265void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
266 struct mt76_queue_entry *e)
267{
268 if (!e->txwi) {
269 dev_kfree_skb_any(e->skb);
270 return;
271 }
272
273 /* error path */
274 if (e->skb == DMA_DUMMY_DATA) {
275 struct mt76_txwi_cache *t;
276 struct mt7615_dev *dev;
277 struct mt7615_txp *txp;
04b8e659 278
04b8e659 279 dev = container_of(mdev, struct mt7615_dev, mt76);
373a9a13 280 txp = mt7615_txwi_to_txp(mdev, e->txwi);
04b8e659
RL
281
282 spin_lock_bh(&dev->token_lock);
283 t = idr_remove(&dev->token, le16_to_cpu(txp->token));
284 spin_unlock_bh(&dev->token_lock);
285 e->skb = t ? t->skb : NULL;
286 }
287
288 if (e->skb)
289 mt76_tx_complete_skb(mdev, e->skb);
290}
291
592ed85d
FF
292static u16
293mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
294 const struct ieee80211_tx_rate *rate,
295 bool stbc, u8 *bw)
04b8e659
RL
296{
297 u8 phy, nss, rate_idx;
3eb514dd 298 u16 rateval = 0;
04b8e659
RL
299
300 *bw = 0;
301
302 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
303 rate_idx = ieee80211_rate_get_vht_mcs(rate);
304 nss = ieee80211_rate_get_vht_nss(rate);
305 phy = MT_PHY_TYPE_VHT;
306 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
307 *bw = 1;
308 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
309 *bw = 2;
310 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
311 *bw = 3;
312 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
313 rate_idx = rate->idx;
314 nss = 1 + (rate->idx >> 3);
315 phy = MT_PHY_TYPE_HT;
316 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
317 phy = MT_PHY_TYPE_HT_GF;
318 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
319 *bw = 1;
320 } else {
321 const struct ieee80211_rate *r;
322 int band = dev->mt76.chandef.chan->band;
323 u16 val;
324
325 nss = 1;
326 r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
327 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
328 val = r->hw_value_short;
329 else
330 val = r->hw_value;
331
332 phy = val >> 8;
333 rate_idx = val & 0xff;
334 }
335
3eb514dd
FF
336 if (stbc && nss == 1) {
337 nss++;
04b8e659 338 rateval |= MT_TX_RATE_STBC;
3eb514dd
FF
339 }
340
341 rateval |= (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
342 FIELD_PREP(MT_TX_RATE_MODE, phy) |
343 FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
04b8e659
RL
344
345 return rateval;
346}
347
348int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
349 struct sk_buff *skb, struct mt76_wcid *wcid,
350 struct ieee80211_sta *sta, int pid,
351 struct ieee80211_key_conf *key)
352{
353 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
354 struct ieee80211_tx_rate *rate = &info->control.rates[0];
355 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
01cfc1b4 356 bool multicast = is_multicast_ether_addr(hdr->addr1);
04b8e659
RL
357 struct ieee80211_vif *vif = info->control.vif;
358 int tx_count = 8;
49f1132c 359 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
e8027946 360 __le16 fc = hdr->frame_control;
04b8e659
RL
361 u16 seqno = 0;
362 u32 val;
363
364 if (vif) {
365 struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
366
367 omac_idx = mvif->omac_idx;
49f1132c 368 wmm_idx = mvif->wmm_idx;
04b8e659
RL
369 }
370
371 if (sta) {
372 struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
373
374 tx_count = msta->rate_count;
375 }
376
e8027946
RL
377 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
378 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
04b8e659 379
1f5581df 380 if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) {
13381dcd
RL
381 q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
382 skb_get_queue_mapping(skb);
04b8e659
RL
383 p_fmt = MT_TX_TYPE_CT;
384 } else if (ieee80211_is_beacon(fc)) {
385 q_idx = MT_LMAC_BCN0;
386 p_fmt = MT_TX_TYPE_FW;
387 } else {
388 q_idx = MT_LMAC_ALTX0;
389 p_fmt = MT_TX_TYPE_CT;
390 }
391
392 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
393 FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
394 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
395 txwi[0] = cpu_to_le32(val);
396
397 val = MT_TXD1_LONG_FORMAT |
398 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
399 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
400 FIELD_PREP(MT_TXD1_HDR_INFO,
401 ieee80211_get_hdrlen_from_skb(skb) / 2) |
402 FIELD_PREP(MT_TXD1_TID,
403 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
404 FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
405 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
406 txwi[1] = cpu_to_le32(val);
407
408 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
409 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
01cfc1b4
LB
410 FIELD_PREP(MT_TXD2_MULTICAST, multicast);
411 if (key) {
412 if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
413 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
414 val |= MT_TXD2_BIP;
415 txwi[3] = 0;
416 } else {
417 txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME);
418 }
419 } else {
420 txwi[3] = 0;
421 }
04b8e659
RL
422 txwi[2] = cpu_to_le32(val);
423
424 if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
425 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
426
427 txwi[4] = 0;
428 txwi[6] = 0;
429
430 if (rate->idx >= 0 && rate->count &&
431 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
432 bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
433 u8 bw;
434 u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
435
436 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
437
438 val = MT_TXD6_FIXED_BW |
439 FIELD_PREP(MT_TXD6_BW, bw) |
440 FIELD_PREP(MT_TXD6_TX_RATE, rateval);
441 txwi[6] |= cpu_to_le32(val);
442
443 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
444 txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
445
446 if (info->flags & IEEE80211_TX_CTL_LDPC)
447 txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
448
449 if (!(rate->flags & (IEEE80211_TX_RC_MCS |
450 IEEE80211_TX_RC_VHT_MCS)))
451 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
452
453 tx_count = rate->count;
454 }
455
456 if (!ieee80211_is_beacon(fc)) {
457 val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
458 FIELD_PREP(MT_TXD5_PID, pid);
459 txwi[5] = cpu_to_le32(val);
460 } else {
461 txwi[5] = 0;
462 /* use maximum tx count for beacons */
463 tx_count = 0x1f;
464 }
465
466 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
467 if (ieee80211_is_data_qos(hdr->frame_control)) {
468 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
469 val |= MT_TXD3_SN_VALID;
470 } else if (ieee80211_is_back_req(hdr->frame_control)) {
471 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
472
473 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
474 val |= MT_TXD3_SN_VALID;
475 }
476 val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
477
01cfc1b4 478 txwi[3] |= cpu_to_le32(val);
04b8e659
RL
479
480 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
481 txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
482
04b8e659
RL
483 txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
484 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
485
486 return 0;
487}
488
489void mt7615_txp_skb_unmap(struct mt76_dev *dev,
490 struct mt76_txwi_cache *t)
491{
492 struct mt7615_txp *txp;
04b8e659
RL
493 int i;
494
373a9a13 495 txp = mt7615_txwi_to_txp(dev, t);
04b8e659
RL
496 for (i = 1; i < txp->nbuf; i++)
497 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
e8027946 498 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
04b8e659
RL
499}
500
3d687a7f
LB
501static u32 mt7615_mac_wtbl_addr(int wcid)
502{
503 return MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE;
504}
505
87d3cdeb
LB
506bool mt7615_mac_wtbl_update(struct mt7615_dev *dev, int idx, u32 mask)
507{
508 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
509 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
510
511 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
512 0, 5000);
513}
514
b2c2f029
LB
515void mt7615_mac_sta_poll(struct mt7615_dev *dev)
516{
517 static const u8 ac_to_tid[4] = {
518 [IEEE80211_AC_BE] = 0,
519 [IEEE80211_AC_BK] = 1,
520 [IEEE80211_AC_VI] = 4,
521 [IEEE80211_AC_VO] = 6
522 };
523 static const u8 hw_queue_map[] = {
524 [IEEE80211_AC_BK] = 0,
525 [IEEE80211_AC_BE] = 1,
526 [IEEE80211_AC_VI] = 2,
527 [IEEE80211_AC_VO] = 3,
528 };
529 struct ieee80211_sta *sta;
530 struct mt7615_sta *msta;
531 u32 addr, tx_time[4], rx_time[4];
532 int i;
533
534 rcu_read_lock();
535
536 while (true) {
537 bool clear = false;
538
539 spin_lock_bh(&dev->sta_poll_lock);
540 if (list_empty(&dev->sta_poll_list)) {
541 spin_unlock_bh(&dev->sta_poll_lock);
542 break;
543 }
544 msta = list_first_entry(&dev->sta_poll_list,
545 struct mt7615_sta, poll_list);
546 list_del_init(&msta->poll_list);
547 spin_unlock_bh(&dev->sta_poll_lock);
548
549 addr = mt7615_mac_wtbl_addr(msta->wcid.idx) + 19 * 4;
550
551 for (i = 0; i < 4; i++, addr += 8) {
552 u32 tx_last = msta->airtime_ac[i];
553 u32 rx_last = msta->airtime_ac[i + 4];
554
555 msta->airtime_ac[i] = mt76_rr(dev, addr);
556 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
557 tx_time[i] = msta->airtime_ac[i] - tx_last;
558 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
559
560 if ((tx_last | rx_last) & BIT(30))
561 clear = true;
562 }
563
564 if (clear) {
565 mt7615_mac_wtbl_update(dev, msta->wcid.idx,
566 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
567 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
568 }
569
570 if (!msta->wcid.sta)
571 continue;
572
573 sta = container_of((void *)msta, struct ieee80211_sta,
574 drv_priv);
575 for (i = 0; i < 4; i++) {
576 u32 tx_cur = tx_time[i];
577 u32 rx_cur = rx_time[hw_queue_map[i]];
578 u8 tid = ac_to_tid[i];
579
580 if (!tx_cur && !rx_cur)
581 continue;
582
583 ieee80211_sta_register_airtime(sta, tid, tx_cur,
584 rx_cur);
585 }
586 }
587
588 rcu_read_unlock();
589}
590
592ed85d
FF
591void mt7615_mac_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
592 struct ieee80211_tx_rate *probe_rate,
593 struct ieee80211_tx_rate *rates)
594{
4af81f02 595 struct ieee80211_tx_rate *ref;
592ed85d 596 int wcid = sta->wcid.idx;
3d687a7f 597 u32 addr = mt7615_mac_wtbl_addr(wcid);
592ed85d
FF
598 bool stbc = false;
599 int n_rates = sta->n_rates;
600 u8 bw, bw_prev, bw_idx = 0;
601 u16 val[4];
602 u16 probe_val;
603 u32 w5, w27;
4af81f02
FF
604 bool rateset;
605 int i, k;
592ed85d
FF
606
607 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
608 return;
609
610 for (i = n_rates; i < 4; i++)
611 rates[i] = rates[n_rates - 1];
612
4af81f02
FF
613 rateset = !(sta->rate_set_tsf & BIT(0));
614 memcpy(sta->rateset[rateset].rates, rates,
615 sizeof(sta->rateset[rateset].rates));
616 if (probe_rate) {
617 sta->rateset[rateset].probe_rate = *probe_rate;
618 ref = &sta->rateset[rateset].probe_rate;
619 } else {
620 sta->rateset[rateset].probe_rate.idx = -1;
621 ref = &sta->rateset[rateset].rates[0];
622 }
623
624 rates = sta->rateset[rateset].rates;
625 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
626 /*
627 * We don't support switching between short and long GI
628 * within the rate set. For accurate tx status reporting, we
629 * need to make sure that flags match.
630 * For improved performance, avoid duplicate entries by
631 * decrementing the MCS index if necessary
632 */
633 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
634 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
635
636 for (k = 0; k < i; k++) {
637 if (rates[i].idx != rates[k].idx)
638 continue;
639 if ((rates[i].flags ^ rates[k].flags) &
640 (IEEE80211_TX_RC_40_MHZ_WIDTH |
641 IEEE80211_TX_RC_80_MHZ_WIDTH |
642 IEEE80211_TX_RC_160_MHZ_WIDTH))
643 continue;
644
f4635f66
FF
645 if (!rates[i].idx)
646 continue;
647
4af81f02
FF
648 rates[i].idx--;
649 }
4af81f02
FF
650 }
651
592ed85d
FF
652 val[0] = mt7615_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
653 bw_prev = bw;
654
655 if (probe_rate) {
656 probe_val = mt7615_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
657 if (bw)
658 bw_idx = 1;
659 else
660 bw_prev = 0;
661 } else {
662 probe_val = val[0];
663 }
664
665 val[1] = mt7615_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
666 if (bw_prev) {
667 bw_idx = 3;
668 bw_prev = bw;
669 }
670
671 val[2] = mt7615_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
672 if (bw_prev) {
673 bw_idx = 5;
674 bw_prev = bw;
675 }
676
677 val[3] = mt7615_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
678 if (bw_prev)
679 bw_idx = 7;
680
681 w27 = mt76_rr(dev, addr + 27 * 4);
682 w27 &= ~MT_WTBL_W27_CC_BW_SEL;
683 w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, bw);
684
685 w5 = mt76_rr(dev, addr + 5 * 4);
5f3413fc
FF
686 w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
687 MT_WTBL_W5_MPDU_OK_COUNT |
688 MT_WTBL_W5_MPDU_FAIL_COUNT |
689 MT_WTBL_W5_RATE_IDX);
592ed85d
FF
690 w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, bw) |
691 FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, bw_idx ? bw_idx - 1 : 7);
692
693 mt76_wr(dev, MT_WTBL_RIUCR0, w5);
694
695 mt76_wr(dev, MT_WTBL_RIUCR1,
696 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
697 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
4af81f02 698 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1]));
592ed85d
FF
699
700 mt76_wr(dev, MT_WTBL_RIUCR2,
4af81f02 701 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) |
592ed85d 702 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
4af81f02 703 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) |
592ed85d
FF
704 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
705
706 mt76_wr(dev, MT_WTBL_RIUCR3,
707 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
4af81f02 708 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) |
592ed85d
FF
709 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
710
711 mt76_wr(dev, MT_WTBL_UPDATE,
712 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
713 MT_WTBL_UPDATE_RATE_UPDATE |
714 MT_WTBL_UPDATE_TX_COUNT_CLEAR);
715
716 mt76_wr(dev, addr + 27 * 4, w27);
717
4af81f02
FF
718 mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
719 sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset;
720
592ed85d
FF
721 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
722 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
723
724 sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
725 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
726}
92671eb9 727
45db4400 728static enum mt7615_cipher_type
01cfc1b4 729mt7615_mac_get_cipher(int cipher)
92671eb9 730{
01cfc1b4 731 switch (cipher) {
92671eb9
LB
732 case WLAN_CIPHER_SUITE_WEP40:
733 return MT_CIPHER_WEP40;
734 case WLAN_CIPHER_SUITE_WEP104:
735 return MT_CIPHER_WEP104;
736 case WLAN_CIPHER_SUITE_TKIP:
92671eb9 737 return MT_CIPHER_TKIP;
01cfc1b4
LB
738 case WLAN_CIPHER_SUITE_AES_CMAC:
739 return MT_CIPHER_BIP_CMAC_128;
92671eb9
LB
740 case WLAN_CIPHER_SUITE_CCMP:
741 return MT_CIPHER_AES_CCMP;
742 case WLAN_CIPHER_SUITE_CCMP_256:
743 return MT_CIPHER_CCMP_256;
744 case WLAN_CIPHER_SUITE_GCMP:
745 return MT_CIPHER_GCMP;
746 case WLAN_CIPHER_SUITE_GCMP_256:
747 return MT_CIPHER_GCMP_256;
748 case WLAN_CIPHER_SUITE_SMS4:
749 return MT_CIPHER_WAPI;
750 default:
751 return MT_CIPHER_NONE;
752 }
753}
754
01cfc1b4
LB
755static int
756mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
757 struct ieee80211_key_conf *key,
758 enum mt7615_cipher_type cipher,
759 enum set_key_cmd cmd)
45db4400 760{
01cfc1b4
LB
761 u32 addr = mt7615_mac_wtbl_addr(wcid->idx) + 30 * 4;
762 u8 data[32] = {};
45db4400 763
01cfc1b4
LB
764 if (key->keylen > sizeof(data))
765 return -EINVAL;
45db4400 766
01cfc1b4
LB
767 mt76_rr_copy(dev, addr, data, sizeof(data));
768 if (cmd == SET_KEY) {
769 if (cipher == MT_CIPHER_TKIP) {
770 /* Rx/Tx MIC keys are swapped */
771 memcpy(data + 16, key->key + 24, 8);
772 memcpy(data + 24, key->key + 16, 8);
773 }
774 if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
775 memmove(data + 16, data, 16);
776 if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
777 memcpy(data, key->key, key->keylen);
778 else if (cipher == MT_CIPHER_BIP_CMAC_128)
779 memcpy(data + 16, key->key, 16);
780 } else {
781 if (wcid->cipher & ~BIT(cipher)) {
782 if (cipher != MT_CIPHER_BIP_CMAC_128)
783 memmove(data, data + 16, 16);
784 memset(data + 16, 0, 16);
785 } else {
786 memset(data, 0, sizeof(data));
787 }
45db4400 788 }
01cfc1b4 789 mt76_wr_copy(dev, addr, data, sizeof(data));
45db4400 790
01cfc1b4
LB
791 return 0;
792}
45db4400 793
01cfc1b4
LB
794static int
795mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
796 enum mt7615_cipher_type cipher, int keyidx,
797 enum set_key_cmd cmd)
798{
799 u32 addr = mt7615_mac_wtbl_addr(wcid->idx), w0, w1;
45db4400 800
01cfc1b4
LB
801 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
802 return -ETIMEDOUT;
45db4400
LB
803
804 w0 = mt76_rr(dev, addr);
805 w1 = mt76_rr(dev, addr + 4);
01cfc1b4
LB
806 if (cmd == SET_KEY) {
807 w0 |= MT_WTBL_W0_RX_KEY_VALID |
808 FIELD_PREP(MT_WTBL_W0_RX_IK_VALID,
809 cipher == MT_CIPHER_BIP_CMAC_128);
810 if (cipher != MT_CIPHER_BIP_CMAC_128 ||
811 !wcid->cipher)
812 w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
813 } else {
814 if (!(wcid->cipher & ~BIT(cipher)))
815 w0 &= ~(MT_WTBL_W0_RX_KEY_VALID |
816 MT_WTBL_W0_KEY_IDX);
817 if (cipher == MT_CIPHER_BIP_CMAC_128)
818 w0 &= ~MT_WTBL_W0_RX_IK_VALID;
819 }
45db4400
LB
820 mt76_wr(dev, MT_WTBL_RICR0, w0);
821 mt76_wr(dev, MT_WTBL_RICR1, w1);
822
87d3cdeb
LB
823 if (!mt7615_mac_wtbl_update(dev, wcid->idx,
824 MT_WTBL_UPDATE_RXINFO_UPDATE))
01cfc1b4
LB
825 return -ETIMEDOUT;
826
827 return 0;
828}
829
830static void
831mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
832 enum mt7615_cipher_type cipher,
833 enum set_key_cmd cmd)
834{
835 u32 addr = mt7615_mac_wtbl_addr(wcid->idx);
836
837 if (cmd == SET_KEY) {
838 if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
839 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
840 FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
841 } else {
842 if (cipher != MT_CIPHER_BIP_CMAC_128 &&
843 wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128))
844 mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
845 FIELD_PREP(MT_WTBL_W2_KEY_TYPE,
846 MT_CIPHER_BIP_CMAC_128));
847 else if (!(wcid->cipher & ~BIT(cipher)))
848 mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
849 }
850}
851
852int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
853 struct mt76_wcid *wcid,
854 struct ieee80211_key_conf *key,
855 enum set_key_cmd cmd)
856{
857 enum mt7615_cipher_type cipher;
858 int err;
859
860 cipher = mt7615_mac_get_cipher(key->cipher);
861 if (cipher == MT_CIPHER_NONE)
862 return -EOPNOTSUPP;
863
864 spin_lock_bh(&dev->mt76.lock);
865
866 mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
867 err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
868 if (err < 0)
869 goto out;
870
871 err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
872 cmd);
873 if (err < 0)
874 goto out;
875
876 if (cmd == SET_KEY)
877 wcid->cipher |= BIT(cipher);
878 else
879 wcid->cipher &= ~BIT(cipher);
45db4400
LB
880
881out:
882 spin_unlock_bh(&dev->mt76.lock);
883
884 return err;
885}
886
04b8e659
RL
887int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
888 enum mt76_txq_id qid, struct mt76_wcid *wcid,
889 struct ieee80211_sta *sta,
890 struct mt76_tx_info *tx_info)
891{
892 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
893 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
894 struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
895 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
896 struct ieee80211_key_conf *key = info->control.hw_key;
897 struct ieee80211_vif *vif = info->control.vif;
898 int i, pid, id, nbuf = tx_info->nbuf - 1;
899 u8 *txwi = (u8 *)txwi_ptr;
900 struct mt76_txwi_cache *t;
901 struct mt7615_txp *txp;
902
903 if (!wcid)
904 wcid = &dev->mt76.global_wcid;
905
906 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
907
908 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
909 spin_lock_bh(&dev->mt76.lock);
592ed85d 910 mt7615_mac_set_rates(dev, msta, &info->control.rates[0],
04b8e659 911 msta->rates);
4af81f02 912 msta->rate_probe = true;
04b8e659
RL
913 spin_unlock_bh(&dev->mt76.lock);
914 }
915
916 mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
917 pid, key);
918
919 txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
920 for (i = 0; i < nbuf; i++) {
921 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
e8027946 922 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
04b8e659
RL
923 }
924 txp->nbuf = nbuf;
925
926 /* pass partial skb header to fw */
927 tx_info->buf[1].len = MT_CT_PARSE_LEN;
928 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
929
930 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
931
932 if (!key)
933 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
934
935 if (ieee80211_is_mgmt(hdr->frame_control))
936 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
937
938 if (vif) {
939 struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
940
941 txp->bss_idx = mvif->idx;
942 }
943
944 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
945 t->skb = tx_info->skb;
946
947 spin_lock_bh(&dev->token_lock);
948 id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
949 spin_unlock_bh(&dev->token_lock);
950 if (id < 0)
951 return id;
952
953 txp->token = cpu_to_le16(id);
954 txp->rept_wds_wcid = 0xff;
955 tx_info->skb = DMA_DUMMY_DATA;
956
957 return 0;
958}
959
960static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
961 struct ieee80211_tx_info *info, __le32 *txs_data)
962{
963 struct ieee80211_supported_band *sband;
4af81f02
FF
964 struct mt7615_rate_set *rs;
965 int first_idx = 0, last_idx;
966 int i, idx, count;
d3edd108 967 bool fixed_rate, ack_timeout;
04b8e659 968 bool probe, ampdu, cck = false;
4af81f02
FF
969 bool rs_idx;
970 u32 rate_set_tsf;
04b8e659 971 u32 final_rate, final_rate_flags, final_nss, txs;
04b8e659
RL
972
973 fixed_rate = info->status.rates[0].count;
974 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
975
976 txs = le32_to_cpu(txs_data[1]);
04b8e659
RL
977 ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
978
979 txs = le32_to_cpu(txs_data[3]);
980 count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
4af81f02 981 last_idx = FIELD_GET(MT_TXS3_LAST_TX_RATE, txs);
04b8e659
RL
982
983 txs = le32_to_cpu(txs_data[0]);
04b8e659
RL
984 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
985 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
986
987 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
988 return false;
989
990 if (txs & MT_TXS0_QUEUE_TIMEOUT)
991 return false;
992
993 if (!ack_timeout)
994 info->flags |= IEEE80211_TX_STAT_ACK;
995
996 info->status.ampdu_len = 1;
997 info->status.ampdu_ack_len = !!(info->flags &
998 IEEE80211_TX_STAT_ACK);
999
1000 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
1001 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
1002
4af81f02
FF
1003 first_idx = max_t(int, 0, last_idx - (count + 1) / MT7615_RATE_RETRY);
1004
04b8e659
RL
1005 if (fixed_rate && !probe) {
1006 info->status.rates[0].count = count;
4af81f02 1007 i = 0;
04b8e659
RL
1008 goto out;
1009 }
1010
4af81f02
FF
1011 rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
1012 rs_idx = !((u32)(FIELD_GET(MT_TXS4_F0_TIMESTAMP, le32_to_cpu(txs_data[4])) -
1013 rate_set_tsf) < 1000000);
1014 rs_idx ^= rate_set_tsf & BIT(0);
1015 rs = &sta->rateset[rs_idx];
04b8e659 1016
4af81f02
FF
1017 if (!first_idx && rs->probe_rate.idx >= 0) {
1018 info->status.rates[0] = rs->probe_rate;
04b8e659 1019
4af81f02
FF
1020 spin_lock_bh(&dev->mt76.lock);
1021 if (sta->rate_probe) {
1022 mt7615_mac_set_rates(dev, sta, NULL, sta->rates);
1023 sta->rate_probe = false;
04b8e659 1024 }
4af81f02 1025 spin_unlock_bh(&dev->mt76.lock);
0dacf9d3 1026 } else {
4af81f02 1027 info->status.rates[0] = rs->rates[first_idx / 2];
0dacf9d3 1028 }
4af81f02 1029 info->status.rates[0].count = 0;
04b8e659 1030
4af81f02
FF
1031 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
1032 struct ieee80211_tx_rate *cur_rate;
1033 int cur_count;
04b8e659 1034
4af81f02
FF
1035 cur_rate = &rs->rates[idx / 2];
1036 cur_count = min_t(int, MT7615_RATE_RETRY, count);
04b8e659 1037 count -= cur_count;
4af81f02
FF
1038
1039 if (idx && (cur_rate->idx != info->status.rates[i].idx ||
1040 cur_rate->flags != info->status.rates[i].flags)) {
1041 i++;
e8b970c8
LB
1042 if (i == ARRAY_SIZE(info->status.rates)) {
1043 i--;
4af81f02 1044 break;
e8b970c8 1045 }
4af81f02
FF
1046
1047 info->status.rates[i] = *cur_rate;
1048 info->status.rates[i].count = 0;
1049 }
1050
1051 info->status.rates[i].count += cur_count;
04b8e659
RL
1052 }
1053
1054out:
4af81f02 1055 final_rate_flags = info->status.rates[i].flags;
04b8e659
RL
1056
1057 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
1058 case MT_PHY_TYPE_CCK:
1059 cck = true;
1060 /* fall through */
1061 case MT_PHY_TYPE_OFDM:
1062 if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
1063 sband = &dev->mt76.sband_5g.sband;
1064 else
1065 sband = &dev->mt76.sband_2g.sband;
1066 final_rate &= MT_TX_RATE_IDX;
d2679d65
LB
1067 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
1068 cck);
04b8e659
RL
1069 final_rate_flags = 0;
1070 break;
1071 case MT_PHY_TYPE_HT_GF:
1072 case MT_PHY_TYPE_HT:
1073 final_rate_flags |= IEEE80211_TX_RC_MCS;
1074 final_rate &= MT_TX_RATE_IDX;
1075 if (final_rate > 31)
1076 return false;
1077 break;
1078 case MT_PHY_TYPE_VHT:
1079 final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
3eb514dd
FF
1080
1081 if ((final_rate & MT_TX_RATE_STBC) && final_nss)
1082 final_nss--;
1083
04b8e659
RL
1084 final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
1085 final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
1086 break;
1087 default:
1088 return false;
1089 }
1090
4af81f02
FF
1091 info->status.rates[i].idx = final_rate;
1092 info->status.rates[i].flags = final_rate_flags;
04b8e659
RL
1093
1094 return true;
1095}
1096
1097static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
1098 struct mt7615_sta *sta, int pid,
1099 __le32 *txs_data)
1100{
1101 struct mt76_dev *mdev = &dev->mt76;
1102 struct sk_buff_head list;
1103 struct sk_buff *skb;
1104
1105 if (pid < MT_PACKET_ID_FIRST)
1106 return false;
1107
1108 mt76_tx_status_lock(mdev, &list);
1109 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
1110 if (skb) {
1111 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1112
04b8e659
RL
1113 if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
1114 ieee80211_tx_info_clear_status(info);
1115 info->status.rates[0].idx = -1;
1116 }
1117
1118 mt76_tx_status_skb_done(mdev, skb, &list);
1119 }
1120 mt76_tx_status_unlock(mdev, &list);
1121
1122 return !!skb;
1123}
1124
1125void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
1126{
1127 struct ieee80211_tx_info info = {};
1128 struct ieee80211_sta *sta = NULL;
1129 struct mt7615_sta *msta = NULL;
1130 struct mt76_wcid *wcid;
1131 __le32 *txs_data = data;
1132 u32 txs;
1133 u8 wcidx;
1134 u8 pid;
1135
1136 txs = le32_to_cpu(txs_data[0]);
1137 pid = FIELD_GET(MT_TXS0_PID, txs);
1138 txs = le32_to_cpu(txs_data[2]);
1139 wcidx = FIELD_GET(MT_TXS2_WCID, txs);
1140
1141 if (pid == MT_PACKET_ID_NO_ACK)
1142 return;
1143
1144 if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
1145 return;
1146
1147 rcu_read_lock();
1148
1149 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1150 if (!wcid)
1151 goto out;
1152
1153 msta = container_of(wcid, struct mt7615_sta, wcid);
1154 sta = wcid_to_sta(wcid);
1155
b2c2f029
LB
1156 spin_lock_bh(&dev->sta_poll_lock);
1157 if (list_empty(&msta->poll_list))
1158 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1159 spin_unlock_bh(&dev->sta_poll_lock);
1160
04b8e659
RL
1161 if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
1162 goto out;
1163
1164 if (wcidx >= MT7615_WTBL_STA || !sta)
1165 goto out;
1166
1167 if (mt7615_fill_txs(dev, msta, &info, txs_data))
1168 ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
1169
1170out:
1171 rcu_read_unlock();
1172}
1173
1174void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
1175{
1176 struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
1177 struct mt76_dev *mdev = &dev->mt76;
1178 struct mt76_txwi_cache *txwi;
1179 u8 i, count;
1180
1181 count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
1182 for (i = 0; i < count; i++) {
1183 spin_lock_bh(&dev->token_lock);
1184 txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
1185 spin_unlock_bh(&dev->token_lock);
1186
1187 if (!txwi)
1188 continue;
1189
1190 mt7615_txp_skb_unmap(mdev, txwi);
1191 if (txwi->skb) {
1192 mt76_tx_complete_skb(mdev, txwi->skb);
1193 txwi->skb = NULL;
1194 }
1195
1196 mt76_put_txwi(mdev, txwi);
1197 }
1198 dev_kfree_skb(skb);
1199}
1200
49de79ad
LB
1201static void
1202mt7615_mac_set_default_sensitivity(struct mt7615_dev *dev)
1203{
1204 mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
1205 MT_WF_PHY_B0_PD_OFDM_MASK,
1206 MT_WF_PHY_B0_PD_OFDM(0x13c));
1207 mt76_rmw(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
1208 MT_WF_PHY_B1_PD_OFDM_MASK,
1209 MT_WF_PHY_B1_PD_OFDM(0x13c));
1210
1211 mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
1212 MT_WF_PHY_B0_PD_CCK_MASK,
1213 MT_WF_PHY_B0_PD_CCK(0x92));
1214 mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
1215 MT_WF_PHY_B1_PD_CCK_MASK,
1216 MT_WF_PHY_B1_PD_CCK(0x92));
1217
1218 dev->ofdm_sensitivity = -98;
1219 dev->cck_sensitivity = -110;
1220 dev->last_cca_adj = jiffies;
1221}
1222
1223void mt7615_mac_set_scs(struct mt7615_dev *dev, bool enable)
1224{
1225 mutex_lock(&dev->mt76.mutex);
1226
1227 if (dev->scs_en == enable)
1228 goto out;
1229
1230 if (enable) {
1231 /* DBDC not supported */
1232 mt76_set(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
1233 MT_WF_PHY_B0_PD_BLK);
1234 if (is_mt7622(&dev->mt76)) {
1235 mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7 << 8);
1236 mt76_set(dev, MT_MIB_M0_MISC_CR, 0x7);
1237 }
1238 } else {
1239 mt76_clear(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
1240 MT_WF_PHY_B0_PD_BLK);
1241 mt76_clear(dev, MT_WF_PHY_B1_MIN_PRI_PWR,
1242 MT_WF_PHY_B1_PD_BLK);
1243 }
1244
1245 mt7615_mac_set_default_sensitivity(dev);
1246 dev->scs_en = enable;
1247
1248out:
1249 mutex_unlock(&dev->mt76.mutex);
1250}
1251
1252void mt7615_mac_cca_stats_reset(struct mt7615_dev *dev)
1253{
1254 mt76_clear(dev, MT_WF_PHY_R0_B0_PHYMUX_5, GENMASK(22, 20));
1255 mt76_set(dev, MT_WF_PHY_R0_B0_PHYMUX_5, BIT(22) | BIT(20));
1256}
1257
1258static void
1259mt7615_mac_adjust_sensitivity(struct mt7615_dev *dev,
1260 u32 rts_err_rate, bool ofdm)
1261{
1262 int false_cca = ofdm ? dev->false_cca_ofdm : dev->false_cca_cck;
1263 u16 def_th = ofdm ? -98 : -110;
1264 bool update = false;
1265 s8 *sensitivity;
1266 int signal;
1267
1268 sensitivity = ofdm ? &dev->ofdm_sensitivity : &dev->cck_sensitivity;
1269 signal = mt76_get_min_avg_rssi(&dev->mt76);
1270 if (!signal) {
1271 mt7615_mac_set_default_sensitivity(dev);
1272 return;
1273 }
1274
1275 signal = min(signal, -72);
1276 if (false_cca > 500) {
1277 if (rts_err_rate > MT_FRAC(40, 100))
1278 return;
1279
1280 /* decrease coverage */
1281 if (*sensitivity == def_th && signal > -90) {
1282 *sensitivity = -90;
1283 update = true;
1284 } else if (*sensitivity + 2 < signal) {
1285 *sensitivity += 2;
1286 update = true;
1287 }
1288 } else if ((false_cca > 0 && false_cca < 50) ||
1289 rts_err_rate > MT_FRAC(60, 100)) {
1290 /* increase coverage */
1291 if (*sensitivity - 2 >= def_th) {
1292 *sensitivity -= 2;
1293 update = true;
1294 }
1295 }
1296
1297 if (*sensitivity > signal) {
1298 *sensitivity = signal;
1299 update = true;
1300 }
1301
1302 if (update) {
1303 u16 val;
1304
1305 if (ofdm) {
1306 /* DBDC not supported */
1307 val = *sensitivity * 2 + 512;
1308 mt76_rmw(dev, MT_WF_PHY_B0_MIN_PRI_PWR,
1309 MT_WF_PHY_B0_PD_OFDM_MASK,
1310 MT_WF_PHY_B0_PD_OFDM(val));
1311 } else {
1312 val = *sensitivity + 256;
1313 mt76_rmw(dev, MT_WF_PHY_B0_RXTD_CCK_PD,
1314 MT_WF_PHY_B0_PD_CCK_MASK,
1315 MT_WF_PHY_B0_PD_CCK(val));
1316 mt76_rmw(dev, MT_WF_PHY_B1_RXTD_CCK_PD,
1317 MT_WF_PHY_B1_PD_CCK_MASK,
1318 MT_WF_PHY_B1_PD_CCK(val));
1319 }
1320 dev->last_cca_adj = jiffies;
1321 }
1322}
1323
1324static void
1325mt7615_mac_scs_check(struct mt7615_dev *dev)
1326{
1327 u32 val, rts_cnt = 0, rts_retries_cnt = 0, rts_err_rate = 0;
1328 u32 mdrdy_cck, mdrdy_ofdm, pd_cck, pd_ofdm;
1329 int i;
1330
1331 if (!dev->scs_en)
1332 return;
1333
1334 for (i = 0; i < 4; i++) {
1335 u32 data;
1336
1337 val = mt76_rr(dev, MT_MIB_MB_SDR0(i));
1338 data = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
1339 if (data > rts_retries_cnt) {
1340 rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1341 rts_retries_cnt = data;
1342 }
1343 }
1344
1345 val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS0);
1346 pd_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_CCK, val);
1347 pd_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_PD_OFDM, val);
1348
1349 val = mt76_rr(dev, MT_WF_PHY_R0_B0_PHYCTRL_STS5);
1350 mdrdy_cck = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_CCK, val);
1351 mdrdy_ofdm = FIELD_GET(MT_WF_PHYCTRL_STAT_MDRDY_OFDM, val);
1352
1353 dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
1354 dev->false_cca_cck = pd_cck - mdrdy_cck;
1355 mt7615_mac_cca_stats_reset(dev);
1356
1357 if (rts_cnt + rts_retries_cnt)
1358 rts_err_rate = MT_FRAC(rts_retries_cnt,
1359 rts_cnt + rts_retries_cnt);
1360
1361 /* cck */
1362 mt7615_mac_adjust_sensitivity(dev, rts_err_rate, false);
1363 /* ofdm */
1364 mt7615_mac_adjust_sensitivity(dev, rts_err_rate, true);
1365
1366 if (time_after(jiffies, dev->last_cca_adj + 10 * HZ))
1367 mt7615_mac_set_default_sensitivity(dev);
1368}
1369
863c15a1
LB
1370void mt7615_update_channel(struct mt76_dev *mdev)
1371{
1372 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
1373 struct mt76_channel_state *state;
6bfa6e38 1374 u64 busy_time, tx_time, rx_time, obss_time;
863c15a1 1375
863c15a1 1376 /* TODO: add DBDC support */
29ed2a79
FF
1377 busy_time = mt76_get_field(dev, MT_MIB_SDR9(0),
1378 MT_MIB_SDR9_BUSY_MASK);
6bfa6e38
LB
1379 tx_time = mt76_get_field(dev, MT_MIB_SDR36(0),
1380 MT_MIB_SDR36_TXTIME_MASK);
1381 rx_time = mt76_get_field(dev, MT_MIB_SDR37(0),
1382 MT_MIB_SDR37_RXTIME_MASK);
1383 obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_TIME5,
1384 MT_MIB_OBSSTIME_MASK);
1385
aec65e48 1386 state = mdev->chan_state;
6bfa6e38
LB
1387 state->cc_busy += busy_time;
1388 state->cc_tx += tx_time;
1389 state->cc_rx += rx_time + obss_time;
1390 state->cc_bss_rx += rx_time;
1391
1392 /* reset obss airtime */
1393 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
863c15a1
LB
1394}
1395
04b8e659
RL
1396void mt7615_mac_work(struct work_struct *work)
1397{
1398 struct mt7615_dev *dev;
75601194 1399 int i, idx;
04b8e659
RL
1400
1401 dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
1402 mac_work.work);
1403
49de79ad 1404 mutex_lock(&dev->mt76.mutex);
5ce09c1a 1405 mt76_update_survey(&dev->mt76);
49de79ad
LB
1406 if (++dev->mac_work_count == 5) {
1407 mt7615_mac_scs_check(dev);
1408 dev->mac_work_count = 0;
1409 }
75601194
LB
1410
1411 for (i = 0, idx = 0; i < 4; i++) {
1412 u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
1413
1414 dev->mt76.aggr_stats[idx++] += val & 0xffff;
1415 dev->mt76.aggr_stats[idx++] += val >> 16;
1416 }
49de79ad
LB
1417 mutex_unlock(&dev->mt76.mutex);
1418
04b8e659
RL
1419 mt76_tx_status_check(&dev->mt76, NULL, false);
1420 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
1421 MT7615_WATCHDOG_TIME);
1422}
d67a6646
LB
1423
1424int mt7615_dfs_stop_radar_detector(struct mt7615_dev *dev)
1425{
1426 struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
1427 int err;
1428
1429 err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD0,
1430 MT_RX_SEL0, 0);
1431 if (err < 0)
1432 return err;
1433
1434 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
1435 chandef->width == NL80211_CHAN_WIDTH_80P80)
1436 err = mt7615_mcu_rdd_cmd(dev, RDD_STOP, MT_HW_RDD1,
1437 MT_RX_SEL0, 0);
1438 return err;
1439}
1440
1441static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
1442{
1443 int err;
1444
1445 err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
1446 if (err < 0)
1447 return err;
1448
1449 return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
1450 MT_RX_SEL0, 1);
1451}
1452
1453int mt7615_dfs_start_radar_detector(struct mt7615_dev *dev)
1454{
1455 struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
1456 int err;
1457
1458 /* start CAC */
1459 err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, MT_HW_RDD0,
1460 MT_RX_SEL0, 0);
1461 if (err < 0)
1462 return err;
1463
1464 /* TODO: DBDC support */
1465
1466 err = mt7615_dfs_start_rdd(dev, MT_HW_RDD0);
1467 if (err < 0)
1468 return err;
1469
1470 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
1471 chandef->width == NL80211_CHAN_WIDTH_80P80) {
1472 err = mt7615_dfs_start_rdd(dev, MT_HW_RDD1);
1473 if (err < 0)
1474 return err;
1475 }
1476
1477 return 0;
1478}
1479
1480int mt7615_dfs_init_radar_detector(struct mt7615_dev *dev)
1481{
1482 struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
1483 int err;
1484
1485 if (dev->mt76.region == NL80211_DFS_UNSET)
1486 return 0;
1487
1488 if (test_bit(MT76_SCANNING, &dev->mt76.state))
1489 return 0;
1490
1491 if (dev->dfs_state == chandef->chan->dfs_state)
1492 return 0;
1493
1494 dev->dfs_state = chandef->chan->dfs_state;
1495
1496 if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
1497 if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
1498 return mt7615_dfs_start_radar_detector(dev);
1499 else
1500 return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, MT_HW_RDD0,
1501 MT_RX_SEL0, 0);
1502 } else {
1503 err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START,
1504 MT_HW_RDD0, MT_RX_SEL0, 0);
1505 if (err < 0)
1506 return err;
1507
1508 return mt7615_dfs_stop_radar_detector(dev);
1509 }
1510}