1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2015, 2018-2022 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #include <net/mac80211.h>
14 * New version of ADD_STA_sta command added new fields at the end of the
15 * structure, so sending the size of the relevant API's structure is enough to
16 * support both API versions.
18 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
20 if (iwl_mvm_has_new_rx_api(mvm) ||
21 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
22 return sizeof(struct iwl_mvm_add_sta_cmd);
24 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
27 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
28 enum nl80211_iftype iftype)
33 BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
34 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
36 lockdep_assert_held(&mvm->mutex);
38 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
39 if (iftype != NL80211_IFTYPE_STATION)
40 reserved_ids = BIT(0);
42 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
43 for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
44 if (BIT(sta_id) & reserved_ids)
47 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
48 lockdep_is_held(&mvm->mutex)))
51 return IWL_MVM_INVALID_STA;
54 /* send station add/update command to firmware */
55 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
56 bool update, unsigned int flags)
58 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
59 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
60 .sta_id = mvm_sta->sta_id,
61 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
62 .add_modify = update ? 1 : 0,
63 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
65 STA_FLG_RTS_MIMO_PROT),
66 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
70 u32 agg_size = 0, mpdu_dens = 0;
72 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
73 add_sta_cmd.station_type = mvm_sta->sta_type;
75 if (!update || (flags & STA_MODIFY_QUEUES)) {
76 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
78 if (!iwl_mvm_has_new_tx_api(mvm)) {
79 add_sta_cmd.tfd_queue_msk =
80 cpu_to_le32(mvm_sta->tfd_queue_msk);
82 if (flags & STA_MODIFY_QUEUES)
83 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
85 WARN_ON(flags & STA_MODIFY_QUEUES);
89 switch (sta->deflink.bandwidth) {
90 case IEEE80211_STA_RX_BW_320:
91 case IEEE80211_STA_RX_BW_160:
92 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
94 case IEEE80211_STA_RX_BW_80:
95 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
97 case IEEE80211_STA_RX_BW_40:
98 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
100 case IEEE80211_STA_RX_BW_20:
101 if (sta->deflink.ht_cap.ht_supported)
102 add_sta_cmd.station_flags |=
103 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
107 switch (sta->deflink.rx_nss) {
109 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
112 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
115 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
119 switch (sta->deflink.smps_mode) {
120 case IEEE80211_SMPS_AUTOMATIC:
121 case IEEE80211_SMPS_NUM_MODES:
124 case IEEE80211_SMPS_STATIC:
126 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
127 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
129 case IEEE80211_SMPS_DYNAMIC:
130 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
132 case IEEE80211_SMPS_OFF:
137 if (sta->deflink.ht_cap.ht_supported) {
138 add_sta_cmd.station_flags_msk |=
139 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
140 STA_FLG_AGG_MPDU_DENS_MSK);
142 mpdu_dens = sta->deflink.ht_cap.ampdu_density;
145 if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
146 add_sta_cmd.station_flags_msk |=
147 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
148 STA_FLG_AGG_MPDU_DENS_MSK);
150 mpdu_dens = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
151 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
152 agg_size = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
153 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
154 } else if (sta->deflink.vht_cap.vht_supported) {
155 agg_size = sta->deflink.vht_cap.cap &
156 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
158 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
159 } else if (sta->deflink.ht_cap.ht_supported) {
160 agg_size = sta->deflink.ht_cap.ampdu_factor;
163 /* D6.0 10.12.2 A-MPDU length limit rules
164 * A STA indicates the maximum length of the A-MPDU preEOF padding
165 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
166 * Exponent field in its HT Capabilities, VHT Capabilities,
167 * and HE 6 GHz Band Capabilities elements (if present) and the
168 * Maximum AMPDU Length Exponent Extension field in its HE
169 * Capabilities element
171 if (sta->deflink.he_cap.has_he)
172 agg_size += u8_get_bits(sta->deflink.he_cap.he_cap_elem.mac_cap_info[3],
173 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
175 /* Limit to max A-MPDU supported by FW */
176 if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
177 agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
178 STA_FLG_MAX_AGG_SIZE_SHIFT);
180 add_sta_cmd.station_flags |=
181 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
182 add_sta_cmd.station_flags |=
183 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
184 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
185 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
188 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
190 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
191 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
192 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
193 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
194 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
195 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
196 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
197 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
198 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
199 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
202 status = ADD_STA_SUCCESS;
203 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
204 iwl_mvm_add_sta_cmd_size(mvm),
205 &add_sta_cmd, &status);
209 switch (status & IWL_ADD_STA_STATUS_MASK) {
210 case ADD_STA_SUCCESS:
211 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
215 IWL_ERR(mvm, "ADD_STA failed\n");
222 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
224 struct iwl_mvm_baid_data *data =
225 from_timer(data, t, session_timer);
226 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
227 struct iwl_mvm_baid_data *ba_data;
228 struct ieee80211_sta *sta;
229 struct iwl_mvm_sta *mvm_sta;
230 unsigned long timeout;
234 ba_data = rcu_dereference(*rcu_ptr);
236 if (WARN_ON(!ba_data))
239 if (!ba_data->timeout)
242 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
243 if (time_is_after_jiffies(timeout)) {
244 mod_timer(&ba_data->session_timer, timeout);
249 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
252 * sta should be valid unless the following happens:
253 * The firmware asserts which triggers a reconfig flow, but
254 * the reconfig fails before we set the pointer to sta into
255 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
256 * A-MDPU and hence the timer continues to run. Then, the
257 * timer expires and sta is NULL.
262 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
263 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
264 sta->addr, ba_data->tid);
269 /* Disable aggregations for a bitmap of TIDs for a given station */
270 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
271 unsigned long disable_agg_tids,
274 struct iwl_mvm_add_sta_cmd cmd = {};
275 struct ieee80211_sta *sta;
276 struct iwl_mvm_sta *mvmsta;
280 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
283 sta_id = mvm->queue_info[queue].ra_sta_id;
287 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
289 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
294 mvmsta = iwl_mvm_sta_from_mac80211(sta);
296 mvmsta->tid_disable_agg |= disable_agg_tids;
298 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
299 cmd.sta_id = mvmsta->sta_id;
300 cmd.add_modify = STA_MODE_MODIFY;
301 cmd.modify_mask = STA_MODIFY_QUEUES;
302 if (disable_agg_tids)
303 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
305 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
306 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
307 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
311 /* Notify FW of queue removal from the STA queues */
312 status = ADD_STA_SUCCESS;
313 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
314 iwl_mvm_add_sta_cmd_size(mvm),
318 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
319 int sta_id, u16 *queueptr, u8 tid)
321 int queue = *queueptr;
322 struct iwl_scd_txq_cfg_cmd cmd = {
324 .action = SCD_CFG_DISABLE_QUEUE,
328 lockdep_assert_held(&mvm->mutex);
330 if (iwl_mvm_has_new_tx_api(mvm)) {
331 if (mvm->sta_remove_requires_queue_remove) {
332 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
333 SCD_QUEUE_CONFIG_CMD);
334 struct iwl_scd_queue_cfg_cmd remove_cmd = {
335 .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
336 .u.remove.tid = cpu_to_le32(tid),
337 .u.remove.sta_mask = cpu_to_le32(BIT(sta_id)),
340 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
347 iwl_trans_txq_free(mvm->trans, queue);
348 *queueptr = IWL_MVM_INVALID_QUEUE;
353 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
356 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
358 cmd.action = mvm->queue_info[queue].tid_bitmap ?
359 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
360 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
361 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
363 IWL_DEBUG_TX_QUEUES(mvm,
364 "Disabling TXQ #%d tids=0x%x\n",
366 mvm->queue_info[queue].tid_bitmap);
368 /* If the queue is still enabled - nothing left to do in this func */
369 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
372 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
373 cmd.tid = mvm->queue_info[queue].txq_tid;
375 /* Make sure queue info is correct even though we overwrite it */
376 WARN(mvm->queue_info[queue].tid_bitmap,
377 "TXQ #%d info out-of-sync - tids=0x%x\n",
378 queue, mvm->queue_info[queue].tid_bitmap);
380 /* If we are here - the queue is freed and we can zero out these vals */
381 mvm->queue_info[queue].tid_bitmap = 0;
384 struct iwl_mvm_txq *mvmtxq =
385 iwl_mvm_txq_from_tid(sta, tid);
387 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
388 list_del_init(&mvmtxq->list);
391 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
392 mvm->queue_info[queue].reserved = false;
394 iwl_trans_txq_disable(mvm->trans, queue, false);
395 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
396 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
399 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
404 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
406 struct ieee80211_sta *sta;
407 struct iwl_mvm_sta *mvmsta;
408 unsigned long tid_bitmap;
409 unsigned long agg_tids = 0;
413 lockdep_assert_held(&mvm->mutex);
415 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
418 sta_id = mvm->queue_info[queue].ra_sta_id;
419 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
421 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
422 lockdep_is_held(&mvm->mutex));
424 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
427 mvmsta = iwl_mvm_sta_from_mac80211(sta);
429 spin_lock_bh(&mvmsta->lock);
430 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
431 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
432 agg_tids |= BIT(tid);
434 spin_unlock_bh(&mvmsta->lock);
440 * Remove a queue from a station's resources.
441 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
442 * doesn't disable the queue
444 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
446 struct ieee80211_sta *sta;
447 struct iwl_mvm_sta *mvmsta;
448 unsigned long tid_bitmap;
449 unsigned long disable_agg_tids = 0;
453 lockdep_assert_held(&mvm->mutex);
455 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
458 sta_id = mvm->queue_info[queue].ra_sta_id;
459 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
463 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
465 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
470 mvmsta = iwl_mvm_sta_from_mac80211(sta);
472 spin_lock_bh(&mvmsta->lock);
473 /* Unmap MAC queues and TIDs from this queue */
474 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
475 struct iwl_mvm_txq *mvmtxq =
476 iwl_mvm_txq_from_tid(sta, tid);
478 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
479 disable_agg_tids |= BIT(tid);
480 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
482 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
483 list_del_init(&mvmtxq->list);
486 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
487 spin_unlock_bh(&mvmsta->lock);
492 * The TX path may have been using this TXQ_ID from the tid_data,
493 * so make sure it's no longer running so that we can safely reuse
494 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
495 * above, but nothing guarantees we've stopped using them. Thus,
496 * without this, we could get to iwl_mvm_disable_txq() and remove
497 * the queue while still sending frames to it.
501 return disable_agg_tids;
504 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
505 struct ieee80211_sta *old_sta,
508 struct iwl_mvm_sta *mvmsta;
510 unsigned long disable_agg_tids = 0;
512 u16 queue_tmp = queue;
515 lockdep_assert_held(&mvm->mutex);
517 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
520 sta_id = mvm->queue_info[queue].ra_sta_id;
521 tid = mvm->queue_info[queue].txq_tid;
523 same_sta = sta_id == new_sta_id;
525 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
526 if (WARN_ON(!mvmsta))
529 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
530 /* Disable the queue */
531 if (disable_agg_tids)
532 iwl_mvm_invalidate_sta_queue(mvm, queue,
533 disable_agg_tids, false);
535 ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid);
538 "Failed to free inactive queue %d (ret=%d)\n",
544 /* If TXQ is allocated to another STA, update removal in FW */
546 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
551 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
552 unsigned long tfd_queue_mask, u8 ac)
555 u8 ac_to_queue[IEEE80211_NUM_ACS];
559 * This protects us against grabbing a queue that's being reconfigured
560 * by the inactivity checker.
562 lockdep_assert_held(&mvm->mutex);
564 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
567 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
569 /* See what ACs the existing queues for this STA have */
570 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
571 /* Only DATA queues can be shared */
572 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
573 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
576 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
580 * The queue to share is chosen only from DATA queues as follows (in
581 * descending priority):
584 * 3. Highest AC queue that is lower than new AC
585 * 4. Any existing AC (there always is at least 1 DATA queue)
588 /* Priority 1: An AC_BE queue */
589 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
590 queue = ac_to_queue[IEEE80211_AC_BE];
591 /* Priority 2: Same AC queue */
592 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
593 queue = ac_to_queue[ac];
594 /* Priority 3a: If new AC is VO and VI exists - use VI */
595 else if (ac == IEEE80211_AC_VO &&
596 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
597 queue = ac_to_queue[IEEE80211_AC_VI];
598 /* Priority 3b: No BE so only AC less than the new one is BK */
599 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
600 queue = ac_to_queue[IEEE80211_AC_BK];
601 /* Priority 4a: No BE nor BK - use VI if exists */
602 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
603 queue = ac_to_queue[IEEE80211_AC_VI];
604 /* Priority 4b: No BE, BK nor VI - use VO if exists */
605 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
606 queue = ac_to_queue[IEEE80211_AC_VO];
608 /* Make sure queue found (or not) is legal */
609 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
610 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
611 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
612 IWL_ERR(mvm, "No DATA queues available to share\n");
619 /* Re-configure the SCD for a queue that has already been configured */
620 static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
621 int sta_id, int tid, int frame_limit, u16 ssn)
623 struct iwl_scd_txq_cfg_cmd cmd = {
625 .action = SCD_CFG_ENABLE_QUEUE,
626 .window = frame_limit,
628 .ssn = cpu_to_le16(ssn),
630 .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
631 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
636 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
639 if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
640 "Trying to reconfig unallocated queue %d\n", queue))
643 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
645 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
646 WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
653 * If a given queue has a higher AC than the TID stream that is being compared
654 * to, the queue needs to be redirected to the lower AC. This function does that
655 * in such a case, otherwise - if no redirection required - it does nothing,
656 * unless the %force param is true.
658 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
659 int ac, int ssn, unsigned int wdg_timeout,
660 bool force, struct iwl_mvm_txq *txq)
662 struct iwl_scd_txq_cfg_cmd cmd = {
664 .action = SCD_CFG_DISABLE_QUEUE,
669 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
673 * If the AC is lower than current one - FIFO needs to be redirected to
674 * the lowest one of the streams in the queue. Check if this is needed
676 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
677 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
678 * we need to check if the numerical value of X is LARGER than of Y.
680 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
681 IWL_DEBUG_TX_QUEUES(mvm,
682 "No redirection needed on TXQ #%d\n",
687 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
688 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
689 cmd.tid = mvm->queue_info[queue].txq_tid;
690 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
692 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
693 queue, iwl_mvm_ac_to_tx_fifo[ac]);
695 /* Stop the queue and wait for it to empty */
696 set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
698 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
700 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
706 /* Before redirecting the queue we need to de-activate it */
707 iwl_trans_txq_disable(mvm->trans, queue, false);
708 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
710 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
713 /* Make sure the SCD wrptr is correctly set before reconfiguring */
714 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
716 /* Update the TID "owner" of the queue */
717 mvm->queue_info[queue].txq_tid = tid;
719 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
721 /* Redirect to lower AC */
722 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
723 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
725 /* Update AC marking of the queue */
726 mvm->queue_info[queue].mac80211_ac = ac;
729 * Mark queue as shared in transport if shared
730 * Note this has to be done after queue enablement because enablement
731 * can also set this value, and there is no indication there to shared
735 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
738 /* Continue using the queue */
739 clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
744 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
749 lockdep_assert_held(&mvm->mutex);
751 if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
752 "max queue %d >= num_of_queues (%d)", maxq,
753 mvm->trans->trans_cfg->base_params->num_of_queues))
754 maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
756 /* This should not be hit with new TX path */
757 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
760 /* Start by looking for a free queue */
761 for (i = minq; i <= maxq; i++)
762 if (mvm->queue_info[i].tid_bitmap == 0 &&
763 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
769 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
770 u8 sta_id, u8 tid, unsigned int timeout)
774 if (tid == IWL_MAX_TID_COUNT) {
776 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
777 mvm->trans->cfg->min_txq_size);
779 struct ieee80211_sta *sta;
782 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
784 /* this queue isn't used for traffic (cab_queue) */
785 if (IS_ERR_OR_NULL(sta)) {
786 size = IWL_MGMT_QUEUE_SIZE;
787 } else if (sta->deflink.he_cap.has_he) {
788 /* support for 256 ba size */
789 size = IWL_DEFAULT_QUEUE_SIZE_HE;
791 size = IWL_DEFAULT_QUEUE_SIZE;
797 /* take the min with bc tbl entries allowed */
798 size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
800 /* size needs to be power of 2 values for calculating read/write pointers */
801 size = rounddown_pow_of_two(size);
804 queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id),
808 IWL_DEBUG_TX_QUEUES(mvm,
809 "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
810 size, sta_id, tid, queue);
812 } while (queue < 0 && size >= 16);
817 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
823 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
824 struct ieee80211_sta *sta, u8 ac,
827 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
828 struct iwl_mvm_txq *mvmtxq =
829 iwl_mvm_txq_from_tid(sta, tid);
830 unsigned int wdg_timeout =
831 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
834 lockdep_assert_held(&mvm->mutex);
836 IWL_DEBUG_TX_QUEUES(mvm,
837 "Allocating queue for sta %d on tid %d\n",
838 mvmsta->sta_id, tid);
839 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
843 mvmtxq->txq_id = queue;
844 mvm->tvqm_info[queue].txq_tid = tid;
845 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
847 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
849 spin_lock_bh(&mvmsta->lock);
850 mvmsta->tid_data[tid].txq_id = queue;
851 spin_unlock_bh(&mvmsta->lock);
856 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
857 struct ieee80211_sta *sta,
858 int queue, u8 sta_id, u8 tid)
860 bool enable_queue = true;
862 /* Make sure this TID isn't already enabled */
863 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
864 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
869 /* Update mappings and refcounts */
870 if (mvm->queue_info[queue].tid_bitmap)
871 enable_queue = false;
873 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
874 mvm->queue_info[queue].ra_sta_id = sta_id;
877 if (tid != IWL_MAX_TID_COUNT)
878 mvm->queue_info[queue].mac80211_ac =
879 tid_to_mac80211_ac[tid];
881 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
883 mvm->queue_info[queue].txq_tid = tid;
887 struct iwl_mvm_txq *mvmtxq =
888 iwl_mvm_txq_from_tid(sta, tid);
890 mvmtxq->txq_id = queue;
893 IWL_DEBUG_TX_QUEUES(mvm,
894 "Enabling TXQ #%d tids=0x%x\n",
895 queue, mvm->queue_info[queue].tid_bitmap);
900 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
902 const struct iwl_trans_txq_scd_cfg *cfg,
903 unsigned int wdg_timeout)
905 struct iwl_scd_txq_cfg_cmd cmd = {
907 .action = SCD_CFG_ENABLE_QUEUE,
908 .window = cfg->frame_limit,
909 .sta_id = cfg->sta_id,
910 .ssn = cpu_to_le16(ssn),
911 .tx_fifo = cfg->fifo,
912 .aggregate = cfg->aggregate,
917 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
920 /* Send the enabling command if we need to */
921 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
924 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
927 le16_add_cpu(&cmd.ssn, 1);
929 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
930 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
935 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
937 struct iwl_scd_txq_cfg_cmd cmd = {
939 .action = SCD_CFG_UPDATE_QUEUE_TID,
942 unsigned long tid_bitmap;
945 lockdep_assert_held(&mvm->mutex);
947 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
950 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
952 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
955 /* Find any TID for queue */
956 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
958 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
960 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
962 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
967 mvm->queue_info[queue].txq_tid = tid;
968 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
972 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
974 struct ieee80211_sta *sta;
975 struct iwl_mvm_sta *mvmsta;
978 unsigned long tid_bitmap;
979 unsigned int wdg_timeout;
983 /* queue sharing is disabled on new TX path */
984 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
987 lockdep_assert_held(&mvm->mutex);
989 sta_id = mvm->queue_info[queue].ra_sta_id;
990 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
992 /* Find TID for queue, and make sure it is the only one on the queue */
993 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
994 if (tid_bitmap != BIT(tid)) {
995 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1000 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1003 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1004 lockdep_is_held(&mvm->mutex));
1006 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1009 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1010 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1012 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1014 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
1015 tid_to_mac80211_ac[tid], ssn,
1017 iwl_mvm_txq_from_tid(sta, tid));
1019 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1023 /* If aggs should be turned back on - do it */
1024 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1025 struct iwl_mvm_add_sta_cmd cmd = {0};
1027 mvmsta->tid_disable_agg &= ~BIT(tid);
1029 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1030 cmd.sta_id = mvmsta->sta_id;
1031 cmd.add_modify = STA_MODE_MODIFY;
1032 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1033 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1034 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1036 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1037 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1039 IWL_DEBUG_TX_QUEUES(mvm,
1040 "TXQ #%d is now aggregated again\n",
1043 /* Mark queue intenally as aggregating again */
1044 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1048 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1052 * Remove inactive TIDs of a given queue.
1053 * If all queue TIDs are inactive - mark the queue as inactive
1054 * If only some the queue TIDs are inactive - unmap them from the queue
1056 * Returns %true if all TIDs were removed and the queue could be reused.
1058 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1059 struct iwl_mvm_sta *mvmsta, int queue,
1060 unsigned long tid_bitmap,
1061 unsigned long *unshare_queues,
1062 unsigned long *changetid_queues)
1066 lockdep_assert_held(&mvmsta->lock);
1067 lockdep_assert_held(&mvm->mutex);
1069 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1072 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1073 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1074 /* If some TFDs are still queued - don't mark TID as inactive */
1075 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1076 tid_bitmap &= ~BIT(tid);
1078 /* Don't mark as inactive any TID that has an active BA */
1079 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1080 tid_bitmap &= ~BIT(tid);
1083 /* If all TIDs in the queue are inactive - return it can be reused */
1084 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1085 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1090 * If we are here, this is a shared queue and not all TIDs timed-out.
1091 * Remove the ones that did.
1093 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1096 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1097 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1099 q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1102 * We need to take into account a situation in which a TXQ was
1103 * allocated to TID x, and then turned shared by adding TIDs y
1104 * and z. If TID x becomes inactive and is removed from the TXQ,
1105 * ownership must be given to one of the remaining TIDs.
1106 * This is mainly because if TID x continues - a new queue can't
1107 * be allocated for it as long as it is an owner of another TXQ.
1109 * Mark this queue in the right bitmap, we'll send the command
1110 * to the firmware later.
1112 if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1113 set_bit(queue, changetid_queues);
1115 IWL_DEBUG_TX_QUEUES(mvm,
1116 "Removing inactive TID %d from shared Q:%d\n",
1120 IWL_DEBUG_TX_QUEUES(mvm,
1121 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1122 mvm->queue_info[queue].tid_bitmap);
1125 * There may be different TIDs with the same mac queues, so make
1126 * sure all TIDs have existing corresponding mac queues enabled
1128 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1130 /* If the queue is marked as shared - "unshare" it */
1131 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1132 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1133 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1135 set_bit(queue, unshare_queues);
1142 * Check for inactivity - this includes checking if any queue
1143 * can be unshared and finding one (and only one) that can be
1145 * This function is also invoked as a sort of clean-up task,
1146 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1148 * Returns the queue number, or -ENOSPC.
1150 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1152 unsigned long now = jiffies;
1153 unsigned long unshare_queues = 0;
1154 unsigned long changetid_queues = 0;
1155 int i, ret, free_queue = -ENOSPC;
1156 struct ieee80211_sta *queue_owner = NULL;
1158 lockdep_assert_held(&mvm->mutex);
1160 if (iwl_mvm_has_new_tx_api(mvm))
1165 /* we skip the CMD queue below by starting at 1 */
1166 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1168 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1169 struct ieee80211_sta *sta;
1170 struct iwl_mvm_sta *mvmsta;
1173 unsigned long inactive_tid_bitmap = 0;
1174 unsigned long queue_tid_bitmap;
1176 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1177 if (!queue_tid_bitmap)
1180 /* If TXQ isn't in active use anyway - nothing to do here... */
1181 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1182 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1185 /* Check to see if there are inactive TIDs on this queue */
1186 for_each_set_bit(tid, &queue_tid_bitmap,
1187 IWL_MAX_TID_COUNT + 1) {
1188 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1189 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1192 inactive_tid_bitmap |= BIT(tid);
1195 /* If all TIDs are active - finish check on this queue */
1196 if (!inactive_tid_bitmap)
1200 * If we are here - the queue hadn't been served recently and is
1204 sta_id = mvm->queue_info[i].ra_sta_id;
1205 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1208 * If the STA doesn't exist anymore, it isn't an error. It could
1209 * be that it was removed since getting the queues, and in this
1210 * case it should've inactivated its queues anyway.
1212 if (IS_ERR_OR_NULL(sta))
1215 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1217 spin_lock_bh(&mvmsta->lock);
1218 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1219 inactive_tid_bitmap,
1222 if (ret && free_queue < 0) {
1226 /* only unlock sta lock - we still need the queue info lock */
1227 spin_unlock_bh(&mvmsta->lock);
1231 /* Reconfigure queues requiring reconfiguation */
1232 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1233 iwl_mvm_unshare_queue(mvm, i);
1234 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1235 iwl_mvm_change_queue_tid(mvm, i);
1239 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1240 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1249 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1250 struct ieee80211_sta *sta, u8 ac, int tid)
1252 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1253 struct iwl_trans_txq_scd_cfg cfg = {
1254 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1255 .sta_id = mvmsta->sta_id,
1257 .frame_limit = IWL_FRAME_LIMIT,
1259 unsigned int wdg_timeout =
1260 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1263 unsigned long disable_agg_tids = 0;
1264 enum iwl_mvm_agg_state queue_state;
1265 bool shared_queue = false, inc_ssn;
1267 unsigned long tfd_queue_mask;
1270 lockdep_assert_held(&mvm->mutex);
1272 if (iwl_mvm_has_new_tx_api(mvm))
1273 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1275 spin_lock_bh(&mvmsta->lock);
1276 tfd_queue_mask = mvmsta->tfd_queue_msk;
1277 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1278 spin_unlock_bh(&mvmsta->lock);
1280 if (tid == IWL_MAX_TID_COUNT) {
1281 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1282 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1283 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1284 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1285 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1288 /* If no such queue is found, we'll use a DATA queue instead */
1291 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1292 (mvm->queue_info[mvmsta->reserved_queue].status ==
1293 IWL_MVM_QUEUE_RESERVED)) {
1294 queue = mvmsta->reserved_queue;
1295 mvm->queue_info[queue].reserved = true;
1296 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1300 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1301 IWL_MVM_DQA_MIN_DATA_QUEUE,
1302 IWL_MVM_DQA_MAX_DATA_QUEUE);
1304 /* try harder - perhaps kill an inactive queue */
1305 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1308 /* No free queue - we'll have to share */
1310 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1312 shared_queue = true;
1313 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1318 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1319 * to make sure no one else takes it.
1320 * This will allow avoiding re-acquiring the lock at the end of the
1321 * configuration. On error we'll mark it back as free.
1323 if (queue > 0 && !shared_queue)
1324 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1326 /* This shouldn't happen - out of queues */
1327 if (WARN_ON(queue <= 0)) {
1328 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1334 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1335 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1337 * Mark all DATA queues as allowing to be aggregated at some point
1339 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1340 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1342 IWL_DEBUG_TX_QUEUES(mvm,
1343 "Allocating %squeue #%d to sta %d on tid %d\n",
1344 shared_queue ? "shared " : "", queue,
1345 mvmsta->sta_id, tid);
1348 /* Disable any open aggs on this queue */
1349 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1351 if (disable_agg_tids) {
1352 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1354 iwl_mvm_invalidate_sta_queue(mvm, queue,
1355 disable_agg_tids, false);
1359 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1362 * Mark queue as shared in transport if shared
1363 * Note this has to be done after queue enablement because enablement
1364 * can also set this value, and there is no indication there to shared
1368 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1370 spin_lock_bh(&mvmsta->lock);
1372 * This looks racy, but it is not. We have only one packet for
1373 * this ra/tid in our Tx path since we stop the Qdisc when we
1374 * need to allocate a new TFD queue.
1377 mvmsta->tid_data[tid].seq_number += 0x10;
1378 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1380 mvmsta->tid_data[tid].txq_id = queue;
1381 mvmsta->tfd_queue_msk |= BIT(queue);
1382 queue_state = mvmsta->tid_data[tid].state;
1384 if (mvmsta->reserved_queue == queue)
1385 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1386 spin_unlock_bh(&mvmsta->lock);
1388 if (!shared_queue) {
1389 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1393 /* If we need to re-enable aggregations... */
1394 if (queue_state == IWL_AGG_ON) {
1395 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1400 /* Redirect queue, if needed */
1401 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1403 iwl_mvm_txq_from_tid(sta, tid));
1412 iwl_mvm_disable_txq(mvm, sta, mvmsta->sta_id, &queue_tmp, tid);
1417 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1419 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1422 mutex_lock(&mvm->mutex);
1424 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1426 while (!list_empty(&mvm->add_stream_txqs)) {
1427 struct iwl_mvm_txq *mvmtxq;
1428 struct ieee80211_txq *txq;
1431 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1432 struct iwl_mvm_txq, list);
1434 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1437 if (tid == IEEE80211_NUM_TIDS)
1438 tid = IWL_MAX_TID_COUNT;
1441 * We can't really do much here, but if this fails we can't
1442 * transmit anyway - so just don't transmit the frame etc.
1443 * and let them back up ... we've tried our best to allocate
1444 * a queue in the function itself.
1446 if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1447 list_del_init(&mvmtxq->list);
1451 list_del_init(&mvmtxq->list);
1453 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1457 mutex_unlock(&mvm->mutex);
1460 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1461 struct ieee80211_sta *sta,
1462 enum nl80211_iftype vif_type)
1464 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1467 /* queue reserving is disabled on new TX path */
1468 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1471 /* run the general cleanup/unsharing of queues */
1472 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1474 /* Make sure we have free resources for this STA */
1475 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1476 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1477 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1478 IWL_MVM_QUEUE_FREE))
1479 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1481 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1482 IWL_MVM_DQA_MIN_DATA_QUEUE,
1483 IWL_MVM_DQA_MAX_DATA_QUEUE);
1485 /* try again - this time kick out a queue if needed */
1486 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1488 IWL_ERR(mvm, "No available queues for new station\n");
1492 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1494 mvmsta->reserved_queue = queue;
1496 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1497 queue, mvmsta->sta_id);
1503 * In DQA mode, after a HW restart the queues should be allocated as before, in
1504 * order to avoid race conditions when there are shared queues. This function
1505 * does the re-mapping and queue allocation.
1507 * Note that re-enabling aggregations isn't done in this function.
1509 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1510 struct ieee80211_sta *sta)
1512 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1514 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1516 struct iwl_trans_txq_scd_cfg cfg = {
1517 .sta_id = mvm_sta->sta_id,
1518 .frame_limit = IWL_FRAME_LIMIT,
1521 /* Make sure reserved queue is still marked as such (if allocated) */
1522 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1523 mvm->queue_info[mvm_sta->reserved_queue].status =
1524 IWL_MVM_QUEUE_RESERVED;
1526 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1527 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1528 int txq_id = tid_data->txq_id;
1531 if (txq_id == IWL_MVM_INVALID_QUEUE)
1534 ac = tid_to_mac80211_ac[i];
1536 if (iwl_mvm_has_new_tx_api(mvm)) {
1537 IWL_DEBUG_TX_QUEUES(mvm,
1538 "Re-mapping sta %d tid %d\n",
1539 mvm_sta->sta_id, i);
1540 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1543 * on failures, just set it to IWL_MVM_INVALID_QUEUE
1544 * to try again later, we have no other good way of
1548 txq_id = IWL_MVM_INVALID_QUEUE;
1549 tid_data->txq_id = txq_id;
1552 * Since we don't set the seq number after reset, and HW
1553 * sets it now, FW reset will cause the seq num to start
1554 * at 0 again, so driver will need to update it
1555 * internally as well, so it keeps in sync with real val
1557 tid_data->seq_number = 0;
1559 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1562 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1563 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1565 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1567 IWL_DEBUG_TX_QUEUES(mvm,
1568 "Re-mapping sta %d tid %d to queue %d\n",
1569 mvm_sta->sta_id, i, txq_id);
1571 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1572 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1577 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1578 struct iwl_mvm_int_sta *sta,
1580 u16 mac_id, u16 color)
1582 struct iwl_mvm_add_sta_cmd cmd;
1584 u32 status = ADD_STA_SUCCESS;
1586 lockdep_assert_held(&mvm->mutex);
1588 memset(&cmd, 0, sizeof(cmd));
1589 cmd.sta_id = sta->sta_id;
1591 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
1592 sta->type == IWL_STA_AUX_ACTIVITY)
1593 cmd.mac_id_n_color = cpu_to_le32(mac_id);
1595 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1598 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1599 cmd.station_type = sta->type;
1601 if (!iwl_mvm_has_new_tx_api(mvm))
1602 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1603 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1606 memcpy(cmd.addr, addr, ETH_ALEN);
1608 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1609 iwl_mvm_add_sta_cmd_size(mvm),
1614 switch (status & IWL_ADD_STA_STATUS_MASK) {
1615 case ADD_STA_SUCCESS:
1616 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1620 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1627 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1628 struct ieee80211_vif *vif,
1629 struct ieee80211_sta *sta)
1631 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1632 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1633 struct iwl_mvm_rxq_dup_data *dup_data;
1635 bool sta_update = false;
1636 unsigned int sta_flags = 0;
1638 lockdep_assert_held(&mvm->mutex);
1640 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1641 sta_id = iwl_mvm_find_free_sta_id(mvm,
1642 ieee80211_vif_type_p2p(vif));
1644 sta_id = mvm_sta->sta_id;
1646 if (sta_id == IWL_MVM_INVALID_STA)
1649 spin_lock_init(&mvm_sta->lock);
1651 /* if this is a HW restart re-alloc existing queues */
1652 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1653 struct iwl_mvm_int_sta tmp_sta = {
1655 .type = mvm_sta->sta_type,
1659 * First add an empty station since allocating
1660 * a queue requires a valid station
1662 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1663 mvmvif->id, mvmvif->color);
1667 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1669 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1673 mvm_sta->sta_id = sta_id;
1674 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1677 if (!mvm->trans->trans_cfg->gen2)
1678 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1680 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1681 mvm_sta->tx_protection = 0;
1682 mvm_sta->tt_tx_protection = false;
1683 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1685 /* HW restart, don't assume the memory has been zeroed */
1686 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1687 mvm_sta->tfd_queue_msk = 0;
1689 /* for HW restart - reset everything but the sequence number */
1690 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1691 u16 seq = mvm_sta->tid_data[i].seq_number;
1692 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1693 mvm_sta->tid_data[i].seq_number = seq;
1696 * Mark all queues for this STA as unallocated and defer TX
1697 * frames until the queue is allocated
1699 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1702 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1703 struct iwl_mvm_txq *mvmtxq =
1704 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1706 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1707 INIT_LIST_HEAD(&mvmtxq->list);
1708 atomic_set(&mvmtxq->tx_request, 0);
1711 mvm_sta->agg_tids = 0;
1713 if (iwl_mvm_has_new_rx_api(mvm) &&
1714 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1717 dup_data = kcalloc(mvm->trans->num_rx_queues,
1718 sizeof(*dup_data), GFP_KERNEL);
1722 * Initialize all the last_seq values to 0xffff which can never
1723 * compare equal to the frame's seq_ctrl in the check in
1724 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1725 * number and fragmented packets don't reach that function.
1727 * This thus allows receiving a packet with seqno 0 and the
1728 * retry bit set as the very first packet on a new TID.
1730 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1731 memset(dup_data[q].last_seq, 0xff,
1732 sizeof(dup_data[q].last_seq));
1733 mvm_sta->dup_data = dup_data;
1736 if (!iwl_mvm_has_new_tx_api(mvm)) {
1737 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1738 ieee80211_vif_type_p2p(vif));
1744 * if rs is registered with mac80211, then "add station" will be handled
1745 * via the corresponding ops, otherwise need to notify rate scaling here
1747 if (iwl_mvm_has_tlc_offload(mvm))
1748 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1750 spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1752 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1755 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1759 if (vif->type == NL80211_IFTYPE_STATION) {
1761 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1762 mvmvif->ap_sta_id = sta_id;
1764 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1768 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1776 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1779 struct iwl_mvm_add_sta_cmd cmd = {};
1783 lockdep_assert_held(&mvm->mutex);
1785 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1786 cmd.sta_id = mvmsta->sta_id;
1787 cmd.add_modify = STA_MODE_MODIFY;
1788 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1789 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1791 status = ADD_STA_SUCCESS;
1792 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1793 iwl_mvm_add_sta_cmd_size(mvm),
1798 switch (status & IWL_ADD_STA_STATUS_MASK) {
1799 case ADD_STA_SUCCESS:
1800 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1805 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1814 * Remove a station from the FW table. Before sending the command to remove
1815 * the station validate that the station is indeed known to the driver (sanity
1818 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1820 struct ieee80211_sta *sta;
1821 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1826 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1827 lockdep_is_held(&mvm->mutex));
1829 /* Note: internal stations are marked as error values */
1831 IWL_ERR(mvm, "Invalid station id\n");
1835 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1836 sizeof(rm_sta_cmd), &rm_sta_cmd);
1838 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1845 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1846 struct ieee80211_vif *vif,
1847 struct ieee80211_sta *sta)
1849 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1852 lockdep_assert_held(&mvm->mutex);
1854 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1855 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1858 iwl_mvm_disable_txq(mvm, sta, mvm_sta->sta_id,
1859 &mvm_sta->tid_data[i].txq_id, i);
1860 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1863 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1864 struct iwl_mvm_txq *mvmtxq =
1865 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1867 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1868 list_del_init(&mvmtxq->list);
1872 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1873 struct iwl_mvm_sta *mvm_sta)
1877 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1881 spin_lock_bh(&mvm_sta->lock);
1882 txq_id = mvm_sta->tid_data[i].txq_id;
1883 spin_unlock_bh(&mvm_sta->lock);
1885 if (txq_id == IWL_MVM_INVALID_QUEUE)
1888 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1896 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1897 struct ieee80211_vif *vif,
1898 struct ieee80211_sta *sta)
1900 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1901 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1902 u8 sta_id = mvm_sta->sta_id;
1905 lockdep_assert_held(&mvm->mutex);
1907 if (iwl_mvm_has_new_rx_api(mvm))
1908 kfree(mvm_sta->dup_data);
1910 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1914 /* flush its queues here since we are freeing mvm_sta */
1915 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
1918 if (iwl_mvm_has_new_tx_api(mvm)) {
1919 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1921 u32 q_mask = mvm_sta->tfd_queue_msk;
1923 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1929 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1931 iwl_mvm_disable_sta_queues(mvm, vif, sta);
1933 /* If there is a TXQ still marked as reserved - free it */
1934 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1935 u8 reserved_txq = mvm_sta->reserved_queue;
1936 enum iwl_mvm_queue_status *status;
1939 * If no traffic has gone through the reserved TXQ - it
1940 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1941 * should be manually marked as free again
1943 status = &mvm->queue_info[reserved_txq].status;
1944 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1945 (*status != IWL_MVM_QUEUE_FREE),
1946 "sta_id %d reserved txq %d status %d",
1947 sta_id, reserved_txq, *status))
1950 *status = IWL_MVM_QUEUE_FREE;
1953 if (vif->type == NL80211_IFTYPE_STATION &&
1954 mvmvif->ap_sta_id == sta_id) {
1955 /* if associated - we can't remove the AP STA now */
1959 /* first remove remaining keys */
1960 iwl_mvm_sec_key_remove_ap(mvm, vif);
1962 /* unassoc - go ahead - remove the AP STA now */
1963 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1967 * This shouldn't happen - the TDLS channel switch should be canceled
1968 * before the STA is removed.
1970 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1971 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1972 cancel_delayed_work(&mvm->tdls_cs.dwork);
1976 * Make sure that the tx response code sees the station as -EBUSY and
1977 * calls the drain worker.
1979 spin_lock_bh(&mvm_sta->lock);
1980 spin_unlock_bh(&mvm_sta->lock);
1982 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1983 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1988 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1989 struct ieee80211_vif *vif,
1992 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1994 lockdep_assert_held(&mvm->mutex);
1996 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
2000 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2001 struct iwl_mvm_int_sta *sta,
2002 u32 qmask, enum nl80211_iftype iftype,
2003 enum iwl_sta_type type)
2005 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2006 sta->sta_id == IWL_MVM_INVALID_STA) {
2007 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2008 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
2012 sta->tfd_queue_msk = qmask;
2015 /* put a non-NULL value so iterating over the stations won't stop */
2016 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2020 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2022 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2023 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2024 sta->sta_id = IWL_MVM_INVALID_STA;
2027 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
2030 unsigned int wdg_timeout =
2031 mvm->trans->trans_cfg->base_params->wd_timeout;
2032 struct iwl_trans_txq_scd_cfg cfg = {
2035 .tid = IWL_MAX_TID_COUNT,
2037 .frame_limit = IWL_FRAME_LIMIT,
2040 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2042 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2045 static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2047 unsigned int wdg_timeout =
2048 mvm->trans->trans_cfg->base_params->wd_timeout;
2050 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2052 return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
2056 static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2057 int maccolor, u8 *addr,
2058 struct iwl_mvm_int_sta *sta,
2059 u16 *queue, int fifo)
2063 /* Map queue to fifo - needs to happen before adding station */
2064 if (!iwl_mvm_has_new_tx_api(mvm))
2065 iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2067 ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2069 if (!iwl_mvm_has_new_tx_api(mvm))
2070 iwl_mvm_disable_txq(mvm, NULL, sta->sta_id, queue,
2076 * For 22000 firmware and on we cannot add queue to a station unknown
2077 * to firmware so enable queue here - after the station was added
2079 if (iwl_mvm_has_new_tx_api(mvm)) {
2082 txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2084 iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2094 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2098 lockdep_assert_held(&mvm->mutex);
2100 /* Allocate aux station and assign to it the aux queue */
2101 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2102 NL80211_IFTYPE_UNSPECIFIED,
2103 IWL_STA_AUX_ACTIVITY);
2108 * In CDB NICs we need to specify which lmac to use for aux activity
2109 * using the mac_id argument place to send lmac_id to the function
2111 ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2112 &mvm->aux_sta, &mvm->aux_queue,
2113 IWL_MVM_TX_FIFO_MCAST);
2115 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2122 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2124 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2126 lockdep_assert_held(&mvm->mutex);
2128 return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2129 NULL, &mvm->snif_sta,
2131 IWL_MVM_TX_FIFO_BE);
2134 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2138 lockdep_assert_held(&mvm->mutex);
2140 if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2143 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id,
2144 &mvm->snif_queue, IWL_MAX_TID_COUNT);
2145 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2147 IWL_WARN(mvm, "Failed sending remove station\n");
2152 int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2156 lockdep_assert_held(&mvm->mutex);
2158 if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2161 iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id,
2162 &mvm->aux_queue, IWL_MAX_TID_COUNT);
2163 ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2165 IWL_WARN(mvm, "Failed sending remove station\n");
2166 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2171 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2173 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2177 * Send the add station command for the vif's broadcast station.
2178 * Assumes that the station was already allocated.
2180 * @mvm: the mvm component
2181 * @vif: the interface to which the broadcast station is added
2182 * @bsta: the broadcast station to add.
2184 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2186 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2187 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2188 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2189 const u8 *baddr = _baddr;
2192 unsigned int wdg_timeout =
2193 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2194 struct iwl_trans_txq_scd_cfg cfg = {
2195 .fifo = IWL_MVM_TX_FIFO_VO,
2196 .sta_id = mvmvif->bcast_sta.sta_id,
2197 .tid = IWL_MAX_TID_COUNT,
2199 .frame_limit = IWL_FRAME_LIMIT,
2202 lockdep_assert_held(&mvm->mutex);
2204 if (!iwl_mvm_has_new_tx_api(mvm)) {
2205 if (vif->type == NL80211_IFTYPE_AP ||
2206 vif->type == NL80211_IFTYPE_ADHOC) {
2207 queue = mvm->probe_queue;
2208 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2209 queue = mvm->p2p_dev_queue;
2211 WARN(1, "Missing required TXQ for adding bcast STA\n");
2215 bsta->tfd_queue_msk |= BIT(queue);
2217 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2220 if (vif->type == NL80211_IFTYPE_ADHOC)
2221 baddr = vif->bss_conf.bssid;
2223 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2226 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2227 mvmvif->id, mvmvif->color);
2232 * For 22000 firmware and on we cannot add queue to a station unknown
2233 * to firmware so enable queue here - after the station was added
2235 if (iwl_mvm_has_new_tx_api(mvm)) {
2236 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2240 iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2244 if (vif->type == NL80211_IFTYPE_AP ||
2245 vif->type == NL80211_IFTYPE_ADHOC)
2246 mvm->probe_queue = queue;
2247 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2248 mvm->p2p_dev_queue = queue;
2254 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2255 struct ieee80211_vif *vif)
2257 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2258 u16 *queueptr, queue;
2260 lockdep_assert_held(&mvm->mutex);
2262 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
2264 switch (vif->type) {
2265 case NL80211_IFTYPE_AP:
2266 case NL80211_IFTYPE_ADHOC:
2267 queueptr = &mvm->probe_queue;
2269 case NL80211_IFTYPE_P2P_DEVICE:
2270 queueptr = &mvm->p2p_dev_queue;
2273 WARN(1, "Can't free bcast queue on vif type %d\n",
2279 iwl_mvm_disable_txq(mvm, NULL, mvmvif->bcast_sta.sta_id,
2280 queueptr, IWL_MAX_TID_COUNT);
2281 if (iwl_mvm_has_new_tx_api(mvm))
2284 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2285 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2288 /* Send the FW a request to remove the station from it's internal data
2289 * structures, but DO NOT remove the entry from the local data structures. */
2290 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2292 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2295 lockdep_assert_held(&mvm->mutex);
2297 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2299 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2301 IWL_WARN(mvm, "Failed sending remove station\n");
2305 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2307 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2309 lockdep_assert_held(&mvm->mutex);
2311 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2312 ieee80211_vif_type_p2p(vif),
2313 IWL_STA_GENERAL_PURPOSE);
2316 /* Allocate a new station entry for the broadcast station to the given vif,
2317 * and send it to the FW.
2318 * Note that each P2P mac should have its own broadcast station.
2320 * @mvm: the mvm component
2321 * @vif: the interface to which the broadcast station is added
2322 * @bsta: the broadcast station to add. */
2323 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2325 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2326 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2329 lockdep_assert_held(&mvm->mutex);
2331 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2335 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2338 iwl_mvm_dealloc_int_sta(mvm, bsta);
2343 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2345 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2347 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2351 * Send the FW a request to remove the station from it's internal data
2352 * structures, and in addition remove it from the local data structure.
2354 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2358 lockdep_assert_held(&mvm->mutex);
2360 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2362 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2368 * Allocate a new station entry for the multicast station to the given vif,
2369 * and send it to the FW.
2370 * Note that each AP/GO mac should have its own multicast station.
2372 * @mvm: the mvm component
2373 * @vif: the interface to which the multicast station is added
2375 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2377 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2378 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2379 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2380 const u8 *maddr = _maddr;
2381 struct iwl_trans_txq_scd_cfg cfg = {
2382 .fifo = vif->type == NL80211_IFTYPE_AP ?
2383 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2384 .sta_id = msta->sta_id,
2387 .frame_limit = IWL_FRAME_LIMIT,
2389 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2392 lockdep_assert_held(&mvm->mutex);
2394 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2395 vif->type != NL80211_IFTYPE_ADHOC))
2399 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2400 * invalid, so make sure we use the queue we want.
2401 * Note that this is done here as we want to avoid making DQA
2402 * changes in mac80211 layer.
2404 if (vif->type == NL80211_IFTYPE_ADHOC)
2405 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2408 * While in previous FWs we had to exclude cab queue from TFD queue
2409 * mask, now it is needed as any other queue.
2411 if (!iwl_mvm_has_new_tx_api(mvm) &&
2412 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2413 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2415 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2417 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2418 mvmvif->id, mvmvif->color);
2423 * Enable cab queue after the ADD_STA command is sent.
2424 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2425 * command with unknown station id, and for FW that doesn't support
2426 * station API since the cab queue is not included in the
2429 if (iwl_mvm_has_new_tx_api(mvm)) {
2430 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2437 mvmvif->cab_queue = queue;
2438 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2439 IWL_UCODE_TLV_API_STA_TYPE))
2440 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2445 iwl_mvm_dealloc_int_sta(mvm, msta);
2449 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2450 struct ieee80211_key_conf *keyconf,
2454 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2455 struct iwl_mvm_add_sta_key_cmd cmd;
2457 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2458 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2463 /* This is a valid situation for GTK removal */
2464 if (sta_id == IWL_MVM_INVALID_STA)
2467 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2468 STA_KEY_FLG_KEYID_MSK);
2469 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2470 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2473 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2476 * The fields assigned here are in the same location at the start
2477 * of the command, so we can do this union trick.
2479 u.cmd.common.key_flags = key_flags;
2480 u.cmd.common.key_offset = keyconf->hw_key_idx;
2481 u.cmd.common.sta_id = sta_id;
2483 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2485 status = ADD_STA_SUCCESS;
2486 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2490 case ADD_STA_SUCCESS:
2491 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2495 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2503 * Send the FW a request to remove the station from it's internal data
2504 * structures, and in addition remove it from the local data structure.
2506 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2508 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2511 lockdep_assert_held(&mvm->mutex);
2513 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
2515 iwl_mvm_disable_txq(mvm, NULL, mvmvif->mcast_sta.sta_id,
2516 &mvmvif->cab_queue, 0);
2518 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2520 IWL_WARN(mvm, "Failed sending remove station\n");
2525 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2527 struct iwl_mvm_delba_data notif = {
2531 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2532 ¬if, sizeof(notif));
2535 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2536 struct iwl_mvm_baid_data *data)
2540 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2542 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2544 struct iwl_mvm_reorder_buffer *reorder_buf =
2545 &data->reorder_buf[i];
2546 struct iwl_mvm_reorder_buf_entry *entries =
2547 &data->entries[i * data->entries_per_queue];
2549 spin_lock_bh(&reorder_buf->lock);
2550 if (likely(!reorder_buf->num_stored)) {
2551 spin_unlock_bh(&reorder_buf->lock);
2556 * This shouldn't happen in regular DELBA since the internal
2557 * delBA notification should trigger a release of all frames in
2558 * the reorder buffer.
2562 for (j = 0; j < reorder_buf->buf_size; j++)
2563 __skb_queue_purge(&entries[j].e.frames);
2565 * Prevent timer re-arm. This prevents a very far fetched case
2566 * where we timed out on the notification. There may be prior
2567 * RX frames pending in the RX queue before the notification
2568 * that might get processed between now and the actual deletion
2569 * and we would re-arm the timer although we are deleting the
2572 reorder_buf->removed = true;
2573 spin_unlock_bh(&reorder_buf->lock);
2574 del_timer_sync(&reorder_buf->reorder_timer);
2578 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2579 struct iwl_mvm_baid_data *data,
2580 u16 ssn, u16 buf_size)
2584 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2585 struct iwl_mvm_reorder_buffer *reorder_buf =
2586 &data->reorder_buf[i];
2587 struct iwl_mvm_reorder_buf_entry *entries =
2588 &data->entries[i * data->entries_per_queue];
2591 reorder_buf->num_stored = 0;
2592 reorder_buf->head_sn = ssn;
2593 reorder_buf->buf_size = buf_size;
2594 /* rx reorder timer */
2595 timer_setup(&reorder_buf->reorder_timer,
2596 iwl_mvm_reorder_timer_expired, 0);
2597 spin_lock_init(&reorder_buf->lock);
2598 reorder_buf->mvm = mvm;
2599 reorder_buf->queue = i;
2600 reorder_buf->valid = false;
2601 for (j = 0; j < reorder_buf->buf_size; j++)
2602 __skb_queue_head_init(&entries[j].e.frames);
2606 static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
2607 struct iwl_mvm_sta *mvm_sta,
2608 bool start, int tid, u16 ssn,
2611 struct iwl_mvm_add_sta_cmd cmd = {
2612 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
2613 .sta_id = mvm_sta->sta_id,
2614 .add_modify = STA_MODE_MODIFY,
2620 cmd.add_immediate_ba_tid = tid;
2621 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2622 cmd.rx_ba_window = cpu_to_le16(buf_size);
2623 cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
2625 cmd.remove_immediate_ba_tid = tid;
2626 cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
2629 status = ADD_STA_SUCCESS;
2630 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2631 iwl_mvm_add_sta_cmd_size(mvm),
2636 switch (status & IWL_ADD_STA_STATUS_MASK) {
2637 case ADD_STA_SUCCESS:
2638 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2639 start ? "start" : "stopp");
2640 if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
2641 !(status & IWL_ADD_STA_BAID_VALID_MASK)))
2643 return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
2644 case ADD_STA_IMMEDIATE_BA_FAILURE:
2645 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2648 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2649 start ? "start" : "stopp", status);
2654 static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
2655 struct iwl_mvm_sta *mvm_sta,
2656 bool start, int tid, u16 ssn,
2657 u16 buf_size, int baid)
2659 struct iwl_rx_baid_cfg_cmd cmd = {
2660 .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
2661 cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
2663 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
2666 BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
2669 cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
2670 cmd.alloc.tid = tid;
2671 cmd.alloc.ssn = cpu_to_le16(ssn);
2672 cmd.alloc.win_size = cpu_to_le16(buf_size);
2674 } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
2675 cmd.remove_v1.baid = cpu_to_le32(baid);
2676 BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
2678 cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
2679 cmd.remove.tid = cpu_to_le32(tid);
2682 ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
2688 /* ignore firmware baid on remove */
2692 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2693 start ? "start" : "stopp");
2695 if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
2701 static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
2702 bool start, int tid, u16 ssn, u16 buf_size,
2705 if (fw_has_capa(&mvm->fw->ucode_capa,
2706 IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
2707 return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start,
2708 tid, ssn, buf_size, baid);
2710 return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start,
2711 tid, ssn, buf_size);
2714 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2715 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2717 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2718 struct iwl_mvm_baid_data *baid_data = NULL;
2720 u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
2723 lockdep_assert_held(&mvm->mutex);
2725 if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
2726 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2730 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2731 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2733 /* sparse doesn't like the __align() so don't check */
2736 * The division below will be OK if either the cache line size
2737 * can be divided by the entry size (ALIGN will round up) or if
2738 * if the entry size can be divided by the cache line size, in
2739 * which case the ALIGN() will do nothing.
2741 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2742 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2746 * Upward align the reorder buffer size to fill an entire cache
2747 * line for each queue, to avoid sharing cache lines between
2750 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2753 * Allocate here so if allocation fails we can bail out early
2754 * before starting the BA session in the firmware
2756 baid_data = kzalloc(sizeof(*baid_data) +
2757 mvm->trans->num_rx_queues *
2764 * This division is why we need the above BUILD_BUG_ON(),
2765 * if that doesn't hold then this will not be right.
2767 baid_data->entries_per_queue =
2768 reorder_buf_size / sizeof(baid_data->entries[0]);
2771 if (iwl_mvm_has_new_rx_api(mvm) && !start) {
2772 baid = mvm_sta->tid_to_baid[tid];
2774 /* we don't really need it in this case */
2778 /* Don't send command to remove (start=0) BAID during restart */
2779 if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2780 baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size,
2789 mvm->rx_ba_sessions++;
2791 if (!iwl_mvm_has_new_rx_api(mvm))
2794 baid_data->baid = baid;
2795 baid_data->timeout = timeout;
2796 baid_data->last_rx = jiffies;
2797 baid_data->rcu_ptr = &mvm->baid_map[baid];
2798 timer_setup(&baid_data->session_timer,
2799 iwl_mvm_rx_agg_session_expired, 0);
2800 baid_data->mvm = mvm;
2801 baid_data->tid = tid;
2802 baid_data->sta_id = mvm_sta->sta_id;
2804 mvm_sta->tid_to_baid[tid] = baid;
2806 mod_timer(&baid_data->session_timer,
2807 TU_TO_EXP_TIME(timeout * 2));
2809 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2811 * protect the BA data with RCU to cover a case where our
2812 * internal RX sync mechanism will timeout (not that it's
2813 * supposed to happen) and we will free the session data while
2814 * RX is being processed in parallel
2816 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2817 mvm_sta->sta_id, tid, baid);
2818 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2819 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2821 baid = mvm_sta->tid_to_baid[tid];
2823 if (mvm->rx_ba_sessions > 0)
2824 /* check that restart flow didn't zero the counter */
2825 mvm->rx_ba_sessions--;
2826 if (!iwl_mvm_has_new_rx_api(mvm))
2829 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2832 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2833 if (WARN_ON(!baid_data))
2836 /* synchronize all rx queues so we can safely delete */
2837 iwl_mvm_free_reorder(mvm, baid_data);
2838 timer_shutdown_sync(&baid_data->session_timer);
2839 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2840 kfree_rcu(baid_data, rcu_head);
2841 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2844 * After we've deleted it, do another queue sync
2845 * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
2846 * running it won't find a new session in the old
2847 * BAID. It can find the NULL pointer for the BAID,
2848 * but we must not have it find a different session.
2850 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
2860 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2861 int tid, u8 queue, bool start)
2863 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2864 struct iwl_mvm_add_sta_cmd cmd = {};
2868 lockdep_assert_held(&mvm->mutex);
2871 mvm_sta->tfd_queue_msk |= BIT(queue);
2872 mvm_sta->tid_disable_agg &= ~BIT(tid);
2874 /* In DQA-mode the queue isn't removed on agg termination */
2875 mvm_sta->tid_disable_agg |= BIT(tid);
2878 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2879 cmd.sta_id = mvm_sta->sta_id;
2880 cmd.add_modify = STA_MODE_MODIFY;
2881 if (!iwl_mvm_has_new_tx_api(mvm))
2882 cmd.modify_mask = STA_MODIFY_QUEUES;
2883 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2884 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2885 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2887 status = ADD_STA_SUCCESS;
2888 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2889 iwl_mvm_add_sta_cmd_size(mvm),
2894 switch (status & IWL_ADD_STA_STATUS_MASK) {
2895 case ADD_STA_SUCCESS:
2899 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2900 start ? "start" : "stopp", status);
2907 const u8 tid_to_mac80211_ac[] = {
2916 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2919 static const u8 tid_to_ucode_ac[] = {
2930 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2931 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2933 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2934 struct iwl_mvm_tid_data *tid_data;
2939 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2942 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2943 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2945 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2946 mvmsta->tid_data[tid].state);
2950 lockdep_assert_held(&mvm->mutex);
2952 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2953 iwl_mvm_has_new_tx_api(mvm)) {
2954 u8 ac = tid_to_mac80211_ac[tid];
2956 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2961 spin_lock_bh(&mvmsta->lock);
2964 * Note the possible cases:
2965 * 1. An enabled TXQ - TXQ needs to become agg'ed
2966 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2969 txq_id = mvmsta->tid_data[tid].txq_id;
2970 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2971 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2972 IWL_MVM_DQA_MIN_DATA_QUEUE,
2973 IWL_MVM_DQA_MAX_DATA_QUEUE);
2975 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2981 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2982 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2983 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2985 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2986 tid, IWL_MAX_HW_QUEUES - 1);
2989 } else if (unlikely(mvm->queue_info[txq_id].status ==
2990 IWL_MVM_QUEUE_SHARED)) {
2992 IWL_DEBUG_TX_QUEUES(mvm,
2993 "Can't start tid %d agg on shared queue!\n",
2998 IWL_DEBUG_TX_QUEUES(mvm,
2999 "AGG for tid %d will be on queue #%d\n",
3002 tid_data = &mvmsta->tid_data[tid];
3003 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3004 tid_data->txq_id = txq_id;
3005 *ssn = tid_data->ssn;
3007 IWL_DEBUG_TX_QUEUES(mvm,
3008 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
3009 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
3010 tid_data->next_reclaimed);
3013 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3014 * to align the wrap around of ssn so we compare relevant values.
3016 normalized_ssn = tid_data->ssn;
3017 if (mvm->trans->trans_cfg->gen2)
3018 normalized_ssn &= 0xff;
3020 if (normalized_ssn == tid_data->next_reclaimed) {
3021 tid_data->state = IWL_AGG_STARTING;
3022 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
3024 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3025 ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
3029 spin_unlock_bh(&mvmsta->lock);
3034 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3035 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
3038 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3039 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3040 unsigned int wdg_timeout =
3041 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
3043 bool alloc_queue = true;
3044 enum iwl_mvm_queue_status queue_status;
3047 struct iwl_trans_txq_scd_cfg cfg = {
3048 .sta_id = mvmsta->sta_id,
3050 .frame_limit = buf_size,
3055 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3056 * manager, so this function should never be called in this case.
3058 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
3061 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3062 != IWL_MAX_TID_COUNT);
3064 spin_lock_bh(&mvmsta->lock);
3065 ssn = tid_data->ssn;
3066 queue = tid_data->txq_id;
3067 tid_data->state = IWL_AGG_ON;
3068 mvmsta->agg_tids |= BIT(tid);
3069 tid_data->ssn = 0xffff;
3070 tid_data->amsdu_in_ampdu_allowed = amsdu;
3071 spin_unlock_bh(&mvmsta->lock);
3073 if (iwl_mvm_has_new_tx_api(mvm)) {
3075 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3076 * would have failed, so if we are here there is no need to
3078 * However, if aggregation size is different than the default
3079 * size, the scheduler should be reconfigured.
3080 * We cannot do this with the new TX API, so return unsupported
3081 * for now, until it will be offloaded to firmware..
3082 * Note that if SCD default value changes - this condition
3083 * should be updated as well.
3085 if (buf_size < IWL_FRAME_LIMIT)
3088 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3094 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3096 queue_status = mvm->queue_info[queue].status;
3098 /* Maybe there is no need to even alloc a queue... */
3099 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3100 alloc_queue = false;
3103 * Only reconfig the SCD for the queue if the window size has
3104 * changed from current (become smaller)
3106 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3108 * If reconfiguring an existing queue, it first must be
3111 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3115 "Error draining queue before reconfig\n");
3119 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3120 mvmsta->sta_id, tid,
3124 "Error reconfiguring TXQ #%d\n", queue);
3130 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
3133 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3134 if (queue_status != IWL_MVM_QUEUE_SHARED) {
3135 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3140 /* No need to mark as reserved */
3141 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3145 * Even though in theory the peer could have different
3146 * aggregation reorder buffer sizes for different sessions,
3147 * our ucode doesn't allow for that and has a global limit
3148 * for each station. Therefore, use the minimum of all the
3149 * aggregation sessions and our default value.
3151 mvmsta->max_agg_bufsize =
3152 min(mvmsta->max_agg_bufsize, buf_size);
3153 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3155 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3158 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
3161 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3162 struct iwl_mvm_sta *mvmsta,
3163 struct iwl_mvm_tid_data *tid_data)
3165 u16 txq_id = tid_data->txq_id;
3167 lockdep_assert_held(&mvm->mutex);
3169 if (iwl_mvm_has_new_tx_api(mvm))
3173 * The TXQ is marked as reserved only if no traffic came through yet
3174 * This means no traffic has been sent on this TID (agg'd or not), so
3175 * we no longer have use for the queue. Since it hasn't even been
3176 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3179 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3180 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3181 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3185 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3186 struct ieee80211_sta *sta, u16 tid)
3188 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3189 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3194 * If mac80211 is cleaning its state, then say that we finished since
3195 * our state has been cleared anyway.
3197 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3198 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3202 spin_lock_bh(&mvmsta->lock);
3204 txq_id = tid_data->txq_id;
3206 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3207 mvmsta->sta_id, tid, txq_id, tid_data->state);
3209 mvmsta->agg_tids &= ~BIT(tid);
3211 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3213 switch (tid_data->state) {
3215 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3217 IWL_DEBUG_TX_QUEUES(mvm,
3218 "ssn = %d, next_recl = %d\n",
3219 tid_data->ssn, tid_data->next_reclaimed);
3221 tid_data->ssn = 0xffff;
3222 tid_data->state = IWL_AGG_OFF;
3223 spin_unlock_bh(&mvmsta->lock);
3225 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3227 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3229 case IWL_AGG_STARTING:
3230 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3232 * The agg session has been stopped before it was set up. This
3233 * can happen when the AddBA timer times out for example.
3236 /* No barriers since we are under mutex */
3237 lockdep_assert_held(&mvm->mutex);
3239 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3240 tid_data->state = IWL_AGG_OFF;
3245 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3246 mvmsta->sta_id, tid, tid_data->state);
3248 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3252 spin_unlock_bh(&mvmsta->lock);
3257 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3258 struct ieee80211_sta *sta, u16 tid)
3260 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3261 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3263 enum iwl_mvm_agg_state old_state;
3266 * First set the agg state to OFF to avoid calling
3267 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3269 spin_lock_bh(&mvmsta->lock);
3270 txq_id = tid_data->txq_id;
3271 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3272 mvmsta->sta_id, tid, txq_id, tid_data->state);
3273 old_state = tid_data->state;
3274 tid_data->state = IWL_AGG_OFF;
3275 mvmsta->agg_tids &= ~BIT(tid);
3276 spin_unlock_bh(&mvmsta->lock);
3278 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3280 if (old_state >= IWL_AGG_ON) {
3281 iwl_mvm_drain_sta(mvm, mvmsta, true);
3283 if (iwl_mvm_has_new_tx_api(mvm)) {
3284 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3286 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3287 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3289 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3290 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3291 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3294 iwl_mvm_drain_sta(mvm, mvmsta, false);
3296 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3302 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3304 int i, max = -1, max_offs = -1;
3306 lockdep_assert_held(&mvm->mutex);
3308 /* Pick the unused key offset with the highest 'deleted'
3309 * counter. Every time a key is deleted, all the counters
3310 * are incremented and the one that was just deleted is
3311 * reset to zero. Thus, the highest counter is the one
3312 * that was deleted longest ago. Pick that one.
3314 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3315 if (test_bit(i, mvm->fw_key_table))
3317 if (mvm->fw_key_deleted[i] > max) {
3318 max = mvm->fw_key_deleted[i];
3324 return STA_KEY_IDX_INVALID;
3329 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3330 struct ieee80211_vif *vif,
3331 struct ieee80211_sta *sta)
3333 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3336 return iwl_mvm_sta_from_mac80211(sta);
3339 * The device expects GTKs for station interfaces to be
3340 * installed as GTKs for the AP station. If we have no
3341 * station ID, then use AP's station ID.
3343 if (vif->type == NL80211_IFTYPE_STATION &&
3344 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3345 u8 sta_id = mvmvif->ap_sta_id;
3347 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3348 lockdep_is_held(&mvm->mutex));
3351 * It is possible that the 'sta' parameter is NULL,
3352 * for example when a GTK is removed - the sta_id will then
3353 * be the AP ID, and no station was passed by mac80211.
3355 if (IS_ERR_OR_NULL(sta))
3358 return iwl_mvm_sta_from_mac80211(sta);
3364 static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
3368 for (i = len - 1; i >= 0; i--) {
3369 if (pn1[i] > pn2[i])
3371 if (pn1[i] < pn2[i])
3378 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3380 struct ieee80211_key_conf *key, bool mcast,
3381 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3382 u8 key_offset, bool mfp)
3385 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3386 struct iwl_mvm_add_sta_key_cmd cmd;
3394 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3395 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3396 int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
3399 if (sta_id == IWL_MVM_INVALID_STA)
3402 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3403 STA_KEY_FLG_KEYID_MSK;
3404 key_flags = cpu_to_le16(keyidx);
3405 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3407 switch (key->cipher) {
3408 case WLAN_CIPHER_SUITE_TKIP:
3409 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3411 memcpy((void *)&u.cmd.tx_mic_key,
3412 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3415 memcpy((void *)&u.cmd.rx_mic_key,
3416 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3418 pn = atomic64_read(&key->tx_pn);
3421 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3422 for (i = 0; i < 5; i++)
3423 u.cmd_v1.tkip_rx_ttak[i] =
3424 cpu_to_le16(tkip_p1k[i]);
3426 memcpy(u.cmd.common.key, key->key, key->keylen);
3428 case WLAN_CIPHER_SUITE_CCMP:
3429 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3430 memcpy(u.cmd.common.key, key->key, key->keylen);
3432 pn = atomic64_read(&key->tx_pn);
3434 case WLAN_CIPHER_SUITE_WEP104:
3435 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3437 case WLAN_CIPHER_SUITE_WEP40:
3438 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3439 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3441 case WLAN_CIPHER_SUITE_GCMP_256:
3442 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3444 case WLAN_CIPHER_SUITE_GCMP:
3445 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3446 memcpy(u.cmd.common.key, key->key, key->keylen);
3448 pn = atomic64_read(&key->tx_pn);
3451 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3452 memcpy(u.cmd.common.key, key->key, key->keylen);
3456 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3458 key_flags |= cpu_to_le16(STA_KEY_MFP);
3460 u.cmd.common.key_offset = key_offset;
3461 u.cmd.common.key_flags = key_flags;
3462 u.cmd.common.sta_id = sta_id;
3464 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3469 for (; i < IEEE80211_NUM_TIDS; i++) {
3470 struct ieee80211_key_seq seq = {};
3471 u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
3473 /* there's a hole at 2/3 in FW format depending on version */
3474 int hole = api_ver >= 3 ? 0 : 2;
3476 ieee80211_get_key_rx_seq(key, i, &seq);
3478 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
3479 rx_pn[0] = seq.tkip.iv16;
3480 rx_pn[1] = seq.tkip.iv16 >> 8;
3481 rx_pn[2 + hole] = seq.tkip.iv32;
3482 rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
3483 rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
3484 rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
3485 } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
3487 rx_pn_len = seq.hw.seq_len;
3489 rx_pn[0] = seq.ccmp.pn[0];
3490 rx_pn[1] = seq.ccmp.pn[1];
3491 rx_pn[2 + hole] = seq.ccmp.pn[2];
3492 rx_pn[3 + hole] = seq.ccmp.pn[3];
3493 rx_pn[4 + hole] = seq.ccmp.pn[4];
3494 rx_pn[5 + hole] = seq.ccmp.pn[5];
3497 if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
3499 memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
3504 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3505 size = sizeof(u.cmd);
3507 size = sizeof(u.cmd_v1);
3510 status = ADD_STA_SUCCESS;
3511 if (cmd_flags & CMD_ASYNC)
3512 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3515 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3519 case ADD_STA_SUCCESS:
3520 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3524 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3531 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3532 struct ieee80211_key_conf *keyconf,
3533 u8 sta_id, bool remove_key)
3535 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3537 /* verify the key details match the required command's expectations */
3538 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3539 (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3540 keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3541 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3542 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3543 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3546 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3547 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3550 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3551 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3554 /* This is a valid situation for IGTK */
3555 if (sta_id == IWL_MVM_INVALID_STA)
3558 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3560 struct ieee80211_key_seq seq;
3563 switch (keyconf->cipher) {
3564 case WLAN_CIPHER_SUITE_AES_CMAC:
3565 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3567 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3568 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3569 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3575 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3576 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3577 igtk_cmd.ctrl_flags |=
3578 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3579 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3580 pn = seq.aes_cmac.pn;
3581 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3582 ((u64) pn[4] << 8) |
3583 ((u64) pn[3] << 16) |
3584 ((u64) pn[2] << 24) |
3585 ((u64) pn[1] << 32) |
3586 ((u64) pn[0] << 40));
3589 IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3590 remove_key ? "removing" : "installing",
3591 keyconf->keyidx >= 6 ? "B" : "",
3592 keyconf->keyidx, igtk_cmd.sta_id);
3594 if (!iwl_mvm_has_new_rx_api(mvm)) {
3595 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3596 .ctrl_flags = igtk_cmd.ctrl_flags,
3597 .key_id = igtk_cmd.key_id,
3598 .sta_id = igtk_cmd.sta_id,
3599 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3602 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3603 ARRAY_SIZE(igtk_cmd_v1.igtk));
3604 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3605 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3607 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3608 sizeof(igtk_cmd), &igtk_cmd);
3612 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3613 struct ieee80211_vif *vif,
3614 struct ieee80211_sta *sta)
3616 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3621 if (vif->type == NL80211_IFTYPE_STATION &&
3622 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3623 u8 sta_id = mvmvif->ap_sta_id;
3624 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3625 lockdep_is_held(&mvm->mutex));
3633 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3634 struct ieee80211_vif *vif,
3635 struct ieee80211_sta *sta,
3636 struct ieee80211_key_conf *keyconf,
3641 struct ieee80211_key_seq seq;
3647 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3649 sta_id = mvm_sta->sta_id;
3651 } else if (vif->type == NL80211_IFTYPE_AP &&
3652 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3653 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3655 sta_id = mvmvif->mcast_sta.sta_id;
3657 IWL_ERR(mvm, "Failed to find station id\n");
3661 if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
3662 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3663 /* get phase 1 key from mac80211 */
3664 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3665 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3667 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3668 seq.tkip.iv32, p1k, 0, key_offset,
3672 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3673 0, NULL, 0, key_offset, mfp);
3676 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3677 struct ieee80211_vif *vif,
3678 struct ieee80211_sta *sta,
3679 struct ieee80211_key_conf *keyconf,
3682 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3683 struct iwl_mvm_sta *mvm_sta;
3684 u8 sta_id = IWL_MVM_INVALID_STA;
3686 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3688 lockdep_assert_held(&mvm->mutex);
3690 if (vif->type != NL80211_IFTYPE_AP ||
3691 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3692 /* Get the station id from the mvm local station table */
3693 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3695 IWL_ERR(mvm, "Failed to find station\n");
3698 sta_id = mvm_sta->sta_id;
3701 * It is possible that the 'sta' parameter is NULL, and thus
3702 * there is a need to retrieve the sta from the local station
3706 sta = rcu_dereference_protected(
3707 mvm->fw_id_to_mac_id[sta_id],
3708 lockdep_is_held(&mvm->mutex));
3709 if (IS_ERR_OR_NULL(sta)) {
3710 IWL_ERR(mvm, "Invalid station id\n");
3715 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3718 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3720 sta_id = mvmvif->mcast_sta.sta_id;
3723 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3724 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3725 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3726 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3730 /* If the key_offset is not pre-assigned, we need to find a
3731 * new offset to use. In normal cases, the offset is not
3732 * pre-assigned, but during HW_RESTART we want to reuse the
3733 * same indices, so we pass them when this function is called.
3735 * In D3 entry, we need to hardcoded the indices (because the
3736 * firmware hardcodes the PTK offset to 0). In this case, we
3737 * need to make sure we don't overwrite the hw_key_idx in the
3738 * keyconf structure, because otherwise we cannot configure
3739 * the original ones back when resuming.
3741 if (key_offset == STA_KEY_IDX_INVALID) {
3742 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3743 if (key_offset == STA_KEY_IDX_INVALID)
3745 keyconf->hw_key_idx = key_offset;
3748 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3753 * For WEP, the same key is used for multicast and unicast. Upload it
3754 * again, using the same key offset, and now pointing the other one
3755 * to the same key slot (offset).
3756 * If this fails, remove the original as well.
3758 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3759 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3761 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3762 key_offset, !mcast);
3764 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3769 __set_bit(key_offset, mvm->fw_key_table);
3772 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3773 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3774 sta ? sta->addr : zero_addr, ret);
3778 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3779 struct ieee80211_vif *vif,
3780 struct ieee80211_sta *sta,
3781 struct ieee80211_key_conf *keyconf)
3783 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3784 struct iwl_mvm_sta *mvm_sta;
3785 u8 sta_id = IWL_MVM_INVALID_STA;
3788 lockdep_assert_held(&mvm->mutex);
3790 /* Get the station from the mvm local station table */
3791 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3793 sta_id = mvm_sta->sta_id;
3794 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3795 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3798 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3799 keyconf->keyidx, sta_id);
3801 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3802 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3803 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3804 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3806 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3807 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3808 keyconf->hw_key_idx);
3812 /* track which key was deleted last */
3813 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3814 if (mvm->fw_key_deleted[i] < U8_MAX)
3815 mvm->fw_key_deleted[i]++;
3817 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3819 if (sta && !mvm_sta) {
3820 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3824 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3828 /* delete WEP key twice to get rid of (now useless) offset */
3829 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3830 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3831 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3836 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3837 struct ieee80211_vif *vif,
3838 struct ieee80211_key_conf *keyconf,
3839 struct ieee80211_sta *sta, u32 iv32,
3842 struct iwl_mvm_sta *mvm_sta;
3843 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3844 bool mfp = sta ? sta->mfp : false;
3848 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3849 if (WARN_ON_ONCE(!mvm_sta))
3851 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3852 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3859 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3860 struct ieee80211_sta *sta)
3862 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3863 struct iwl_mvm_add_sta_cmd cmd = {
3864 .add_modify = STA_MODE_MODIFY,
3865 .sta_id = mvmsta->sta_id,
3866 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3867 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3871 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3872 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3874 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3877 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3878 struct ieee80211_sta *sta,
3879 enum ieee80211_frame_release_type reason,
3880 u16 cnt, u16 tids, bool more_data,
3881 bool single_sta_queue)
3883 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3884 struct iwl_mvm_add_sta_cmd cmd = {
3885 .add_modify = STA_MODE_MODIFY,
3886 .sta_id = mvmsta->sta_id,
3887 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3888 .sleep_tx_count = cpu_to_le16(cnt),
3889 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3892 unsigned long _tids = tids;
3894 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3895 * Note that this field is reserved and unused by firmware not
3896 * supporting GO uAPSD, so it's safe to always do this.
3898 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3899 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3901 /* If we're releasing frames from aggregation or dqa queues then check
3902 * if all the queues that we're releasing frames from, combined, have:
3903 * - more frames than the service period, in which case more_data
3905 * - fewer than 'cnt' frames, in which case we need to adjust the
3906 * firmware command (but do that unconditionally)
3908 if (single_sta_queue) {
3909 int remaining = cnt;
3912 spin_lock_bh(&mvmsta->lock);
3913 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3914 struct iwl_mvm_tid_data *tid_data;
3917 tid_data = &mvmsta->tid_data[tid];
3919 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3920 if (n_queued > remaining) {
3925 remaining -= n_queued;
3927 sleep_tx_count = cnt - remaining;
3928 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3929 mvmsta->sleep_tx_count = sleep_tx_count;
3930 spin_unlock_bh(&mvmsta->lock);
3932 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3933 if (WARN_ON(cnt - remaining == 0)) {
3934 ieee80211_sta_eosp(sta);
3939 /* Note: this is ignored by firmware not supporting GO uAPSD */
3941 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3943 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3944 mvmsta->next_status_eosp = true;
3945 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3947 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3950 /* block the Tx queues until the FW updated the sleep Tx count */
3951 iwl_trans_block_txq_ptrs(mvm->trans, true);
3953 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3954 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3955 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3957 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3960 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3961 struct iwl_rx_cmd_buffer *rxb)
3963 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3964 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3965 struct ieee80211_sta *sta;
3966 u32 sta_id = le32_to_cpu(notif->sta_id);
3968 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
3972 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3973 if (!IS_ERR_OR_NULL(sta))
3974 ieee80211_sta_eosp(sta);
3978 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3979 struct iwl_mvm_sta *mvmsta, bool disable)
3981 struct iwl_mvm_add_sta_cmd cmd = {
3982 .add_modify = STA_MODE_MODIFY,
3983 .sta_id = mvmsta->sta_id,
3984 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3985 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3986 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3990 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3991 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3993 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3996 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3997 struct ieee80211_sta *sta,
4000 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4002 spin_lock_bh(&mvm_sta->lock);
4004 if (mvm_sta->disable_tx == disable) {
4005 spin_unlock_bh(&mvm_sta->lock);
4009 mvm_sta->disable_tx = disable;
4012 * If sta PS state is handled by mac80211, tell it to start/stop
4013 * queuing tx for this station.
4015 if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
4016 ieee80211_sta_block_awake(mvm->hw, sta, disable);
4018 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
4020 spin_unlock_bh(&mvm_sta->lock);
4023 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
4024 struct iwl_mvm_vif *mvmvif,
4025 struct iwl_mvm_int_sta *sta,
4028 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
4029 struct iwl_mvm_add_sta_cmd cmd = {
4030 .add_modify = STA_MODE_MODIFY,
4031 .sta_id = sta->sta_id,
4032 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4033 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4034 .mac_id_n_color = cpu_to_le32(id),
4038 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4039 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4041 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4044 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
4045 struct iwl_mvm_vif *mvmvif,
4048 struct ieee80211_sta *sta;
4049 struct iwl_mvm_sta *mvm_sta;
4054 /* Block/unblock all the stations of the given mvmvif */
4055 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
4056 sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
4057 if (IS_ERR_OR_NULL(sta))
4060 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4061 if (mvm_sta->mac_id_n_color !=
4062 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4065 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4070 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4073 /* Need to block/unblock also multicast station */
4074 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
4075 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4076 &mvmvif->mcast_sta, disable);
4079 * Only unblock the broadcast station (FW blocks it for immediate
4080 * quiet, not the driver)
4082 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4083 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4084 &mvmvif->bcast_sta, disable);
4087 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4089 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4090 struct iwl_mvm_sta *mvmsta;
4094 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4097 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4102 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4104 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4107 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4108 * to align the wrap around of ssn so we compare relevant values.
4110 if (mvm->trans->trans_cfg->gen2)
4113 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4116 int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4117 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
4118 u8 *key, u32 key_len)
4122 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4123 struct ieee80211_key_conf *keyconf;
4125 ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
4126 NL80211_IFTYPE_UNSPECIFIED,
4131 ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
4133 IWL_MVM_TX_FIFO_BE);
4137 keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
4143 keyconf->cipher = cipher;
4144 memcpy(keyconf->key, key, key_len);
4145 keyconf->keylen = key_len;
4147 ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
4148 0, NULL, 0, 0, true);
4152 iwl_mvm_dealloc_int_sta(mvm, sta);
4156 void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
4157 struct ieee80211_vif *vif,
4160 struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
4161 .mac_id = cpu_to_le32(mac_id),
4165 ret = iwl_mvm_send_cmd_pdu(mvm,
4166 WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
4168 sizeof(cancel_channel_switch_cmd),
4169 &cancel_channel_switch_cmd);
4171 IWL_ERR(mvm, "Failed to cancel the channel switch\n");