iwlwifi: introduce device family AX210
[linux-block.git] / drivers / net / wireless / intel / iwlwifi / mvm / sta.c
CommitLineData
8ca151b5
JB
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
fa7878e7
AO
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
26d6c16b 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
4883145a 11 * Copyright(c) 2018 Intel Corporation
8ca151b5
JB
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
8ca151b5 22 * The full GNU General Public License is included in this distribution
410dc5aa 23 * in the file called COPYING.
8ca151b5
JB
24 *
25 * Contact Information:
cb2f8277 26 * Intel Linux Wireless <linuxwifi@intel.com>
8ca151b5
JB
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
fa7878e7
AO
31 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
26d6c16b 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
bd8f3fc6 34 * Copyright(c) 2018 Intel Corporation
8ca151b5
JB
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#include <net/mac80211.h>
65
66#include "mvm.h"
67#include "sta.h"
9ee718aa 68#include "rs.h"
8ca151b5 69
337bfc98
AS
70static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
71
72static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
73 u32 sta_id,
74 struct ieee80211_key_conf *key, bool mcast,
75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 u8 key_offset, bool mfp);
77
854c5705
SS
78/*
79 * New version of ADD_STA_sta command added new fields at the end of the
80 * structure, so sending the size of the relevant API's structure is enough to
81 * support both API versions.
82 */
83static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
84{
ced19f26
SS
85 if (iwl_mvm_has_new_rx_api(mvm) ||
86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 return sizeof(struct iwl_mvm_add_sta_cmd);
88 else
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
854c5705
SS
90}
91
b92e661b
EP
92static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 enum nl80211_iftype iftype)
8ca151b5
JB
94{
95 int sta_id;
b92e661b 96 u32 reserved_ids = 0;
8ca151b5 97
b92e661b 98 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
8ca151b5
JB
99 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
100
101 lockdep_assert_held(&mvm->mutex);
102
b92e661b
EP
103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 if (iftype != NL80211_IFTYPE_STATION)
105 reserved_ids = BIT(0);
106
8ca151b5 107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
0ae98812 108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
b92e661b
EP
109 if (BIT(sta_id) & reserved_ids)
110 continue;
111
8ca151b5
JB
112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 lockdep_is_held(&mvm->mutex)))
114 return sta_id;
b92e661b 115 }
0ae98812 116 return IWL_MVM_INVALID_STA;
8ca151b5
JB
117}
118
7a453973
JB
119/* send station add/update command to firmware */
120int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
24afba76 121 bool update, unsigned int flags)
8ca151b5 122{
9d8ce6af 123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4b8265ab
EG
124 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 .sta_id = mvm_sta->sta_id,
126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 .add_modify = update ? 1 : 0,
128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
8addabf8
NG
129 STA_FLG_MIMO_EN_MSK |
130 STA_FLG_RTS_MIMO_PROT),
cf0cda19 131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
4b8265ab 132 };
8ca151b5
JB
133 int ret;
134 u32 status;
135 u32 agg_size = 0, mpdu_dens = 0;
136
ced19f26
SS
137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 add_sta_cmd.station_type = mvm_sta->sta_type;
139
24afba76 140 if (!update || (flags & STA_MODIFY_QUEUES)) {
7a453973 141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
24afba76 142
bb49701b
SS
143 if (!iwl_mvm_has_new_tx_api(mvm)) {
144 add_sta_cmd.tfd_queue_msk =
145 cpu_to_le32(mvm_sta->tfd_queue_msk);
146
147 if (flags & STA_MODIFY_QUEUES)
148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
149 } else {
150 WARN_ON(flags & STA_MODIFY_QUEUES);
151 }
7a453973 152 }
5bc5aaad
JB
153
154 switch (sta->bandwidth) {
155 case IEEE80211_STA_RX_BW_160:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
157 /* fall through */
158 case IEEE80211_STA_RX_BW_80:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
160 /* fall through */
161 case IEEE80211_STA_RX_BW_40:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
163 /* fall through */
164 case IEEE80211_STA_RX_BW_20:
165 if (sta->ht_cap.ht_supported)
166 add_sta_cmd.station_flags |=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
168 break;
169 }
170
171 switch (sta->rx_nss) {
172 case 1:
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
174 break;
175 case 2:
176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
177 break;
178 case 3 ... 8:
179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
180 break;
181 }
182
183 switch (sta->smps_mode) {
184 case IEEE80211_SMPS_AUTOMATIC:
185 case IEEE80211_SMPS_NUM_MODES:
186 WARN_ON(1);
187 break;
188 case IEEE80211_SMPS_STATIC:
189 /* override NSS */
190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
192 break;
193 case IEEE80211_SMPS_DYNAMIC:
194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
195 break;
196 case IEEE80211_SMPS_OFF:
197 /* nothing */
198 break;
199 }
8ca151b5
JB
200
201 if (sta->ht_cap.ht_supported) {
202 add_sta_cmd.station_flags_msk |=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 STA_FLG_AGG_MPDU_DENS_MSK);
205
206 mpdu_dens = sta->ht_cap.ampdu_density;
207 }
208
209 if (sta->vht_cap.vht_supported) {
210 agg_size = sta->vht_cap.cap &
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
212 agg_size >>=
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 } else if (sta->ht_cap.ht_supported) {
215 agg_size = sta->ht_cap.ampdu_factor;
216 }
217
218 add_sta_cmd.station_flags |=
219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 add_sta_cmd.station_flags |=
221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
d94c5a82 222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
6ea29ce5 223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
8ca151b5 224
65e25482
JB
225 if (sta->wme) {
226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
227
228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
c80eb570 229 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
65e25482 230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
c80eb570 231 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
65e25482 232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
c80eb570 233 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
65e25482 234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
c80eb570
EG
235 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
e71ca5ea 237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
65e25482
JB
238 }
239
8ca151b5 240 status = ADD_STA_SUCCESS;
854c5705
SS
241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 iwl_mvm_add_sta_cmd_size(mvm),
f9dc0004 243 &add_sta_cmd, &status);
8ca151b5
JB
244 if (ret)
245 return ret;
246
837c4da9 247 switch (status & IWL_ADD_STA_STATUS_MASK) {
8ca151b5
JB
248 case ADD_STA_SUCCESS:
249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
250 break;
251 default:
252 ret = -EIO;
253 IWL_ERR(mvm, "ADD_STA failed\n");
254 break;
255 }
256
257 return ret;
258}
259
8cef5344 260static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
10b2b201 261{
8cef5344
KC
262 struct iwl_mvm_baid_data *data =
263 from_timer(data, t, session_timer);
264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
10b2b201
SS
265 struct iwl_mvm_baid_data *ba_data;
266 struct ieee80211_sta *sta;
267 struct iwl_mvm_sta *mvm_sta;
268 unsigned long timeout;
269
270 rcu_read_lock();
271
272 ba_data = rcu_dereference(*rcu_ptr);
273
274 if (WARN_ON(!ba_data))
275 goto unlock;
276
277 if (!ba_data->timeout)
278 goto unlock;
279
280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 if (time_is_after_jiffies(timeout)) {
282 mod_timer(&ba_data->session_timer, timeout);
283 goto unlock;
284 }
285
286 /* Timer expired */
287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
61dd8a8a
EG
288
289 /*
290 * sta should be valid unless the following happens:
291 * The firmware asserts which triggers a reconfig flow, but
292 * the reconfig fails before we set the pointer to sta into
293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 * A-MDPU and hence the timer continues to run. Then, the
295 * timer expires and sta is NULL.
296 */
297 if (!sta)
298 goto unlock;
299
10b2b201 300 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
20fc690f
NG
301 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 sta->addr, ba_data->tid);
10b2b201
SS
303unlock:
304 rcu_read_unlock();
305}
306
9794c64f
LK
307/* Disable aggregations for a bitmap of TIDs for a given station */
308static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 unsigned long disable_agg_tids,
310 bool remove_queue)
311{
312 struct iwl_mvm_add_sta_cmd cmd = {};
313 struct ieee80211_sta *sta;
314 struct iwl_mvm_sta *mvmsta;
315 u32 status;
316 u8 sta_id;
9794c64f 317
bb49701b
SS
318 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
319 return -EINVAL;
320
9794c64f 321 sta_id = mvm->queue_info[queue].ra_sta_id;
9794c64f
LK
322
323 rcu_read_lock();
324
325 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
326
327 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
328 rcu_read_unlock();
329 return -EINVAL;
330 }
331
332 mvmsta = iwl_mvm_sta_from_mac80211(sta);
333
334 mvmsta->tid_disable_agg |= disable_agg_tids;
335
336 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
337 cmd.sta_id = mvmsta->sta_id;
338 cmd.add_modify = STA_MODE_MODIFY;
339 cmd.modify_mask = STA_MODIFY_QUEUES;
340 if (disable_agg_tids)
341 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
342 if (remove_queue)
343 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
344 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
345 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
346
347 rcu_read_unlock();
348
349 /* Notify FW of queue removal from the STA queues */
350 status = ADD_STA_SUCCESS;
b2c1bf59
SS
351 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
352 iwl_mvm_add_sta_cmd_size(mvm),
353 &cmd, &status);
9794c64f
LK
354}
355
cfbc6c4c
SS
356static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
357 int queue, u8 tid, u8 flags)
99448a8c
JB
358{
359 struct iwl_scd_txq_cfg_cmd cmd = {
360 .scd_queue = queue,
361 .action = SCD_CFG_DISABLE_QUEUE,
362 };
99448a8c
JB
363 int ret;
364
99448a8c 365 if (iwl_mvm_has_new_tx_api(mvm)) {
99448a8c
JB
366 iwl_trans_txq_free(mvm->trans, queue);
367
368 return 0;
369 }
370
f3f240f9 371 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
99448a8c 372 return 0;
99448a8c
JB
373
374 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
375
1c14089e 376 cmd.action = mvm->queue_info[queue].tid_bitmap ?
99448a8c
JB
377 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
378 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
379 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
380
381 IWL_DEBUG_TX_QUEUES(mvm,
cfbc6c4c 382 "Disabling TXQ #%d tids=0x%x\n",
99448a8c 383 queue,
cfbc6c4c 384 mvm->queue_info[queue].tid_bitmap);
99448a8c
JB
385
386 /* If the queue is still enabled - nothing left to do in this func */
f3f240f9 387 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
99448a8c 388 return 0;
99448a8c
JB
389
390 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
391 cmd.tid = mvm->queue_info[queue].txq_tid;
392
393 /* Make sure queue info is correct even though we overwrite it */
cfbc6c4c
SS
394 WARN(mvm->queue_info[queue].tid_bitmap,
395 "TXQ #%d info out-of-sync - tids=0x%x\n",
396 queue, mvm->queue_info[queue].tid_bitmap);
99448a8c
JB
397
398 /* If we are here - the queue is freed and we can zero out these vals */
99448a8c 399 mvm->queue_info[queue].tid_bitmap = 0;
cfbc6c4c
SS
400
401 if (sta) {
402 struct iwl_mvm_txq *mvmtxq =
403 iwl_mvm_txq_from_tid(sta, tid);
404
405 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
406 }
99448a8c
JB
407
408 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
409 mvm->queue_info[queue].reserved = false;
410
99448a8c
JB
411 iwl_trans_txq_disable(mvm->trans, queue, false);
412 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
413 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
414
415 if (ret)
416 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
417 queue, ret);
418 return ret;
419}
420
42db09c1
LK
421static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
422{
423 struct ieee80211_sta *sta;
424 struct iwl_mvm_sta *mvmsta;
425 unsigned long tid_bitmap;
426 unsigned long agg_tids = 0;
806911da 427 u8 sta_id;
42db09c1
LK
428 int tid;
429
430 lockdep_assert_held(&mvm->mutex);
431
bb49701b
SS
432 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
433 return -EINVAL;
434
42db09c1
LK
435 sta_id = mvm->queue_info[queue].ra_sta_id;
436 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
42db09c1
LK
437
438 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
439 lockdep_is_held(&mvm->mutex));
440
441 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
442 return -EINVAL;
443
444 mvmsta = iwl_mvm_sta_from_mac80211(sta);
445
446 spin_lock_bh(&mvmsta->lock);
447 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
448 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
449 agg_tids |= BIT(tid);
450 }
451 spin_unlock_bh(&mvmsta->lock);
452
453 return agg_tids;
454}
455
9794c64f
LK
456/*
457 * Remove a queue from a station's resources.
458 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
459 * doesn't disable the queue
460 */
461static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
462{
463 struct ieee80211_sta *sta;
464 struct iwl_mvm_sta *mvmsta;
465 unsigned long tid_bitmap;
466 unsigned long disable_agg_tids = 0;
467 u8 sta_id;
468 int tid;
469
470 lockdep_assert_held(&mvm->mutex);
471
bb49701b
SS
472 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
473 return -EINVAL;
474
9794c64f
LK
475 sta_id = mvm->queue_info[queue].ra_sta_id;
476 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
9794c64f
LK
477
478 rcu_read_lock();
479
480 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
481
482 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
483 rcu_read_unlock();
484 return 0;
485 }
486
487 mvmsta = iwl_mvm_sta_from_mac80211(sta);
488
489 spin_lock_bh(&mvmsta->lock);
42db09c1 490 /* Unmap MAC queues and TIDs from this queue */
9794c64f 491 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
cfbc6c4c
SS
492 struct iwl_mvm_txq *mvmtxq =
493 iwl_mvm_txq_from_tid(sta, tid);
494
9794c64f
LK
495 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
496 disable_agg_tids |= BIT(tid);
6862fcee 497 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
cfbc6c4c
SS
498
499 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
9794c64f 500 }
9794c64f 501
42db09c1 502 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
9794c64f
LK
503 spin_unlock_bh(&mvmsta->lock);
504
505 rcu_read_unlock();
506
06bc6f6e
JB
507 /*
508 * The TX path may have been using this TXQ_ID from the tid_data,
509 * so make sure it's no longer running so that we can safely reuse
510 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
511 * above, but nothing guarantees we've stopped using them. Thus,
512 * without this, we could get to iwl_mvm_disable_txq() and remove
513 * the queue while still sending frames to it.
514 */
515 synchronize_net();
516
9794c64f
LK
517 return disable_agg_tids;
518}
519
01796ff2 520static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
cfbc6c4c 521 struct ieee80211_sta *old_sta,
724fe771 522 u8 new_sta_id)
01796ff2
SS
523{
524 struct iwl_mvm_sta *mvmsta;
cfbc6c4c 525 u8 sta_id, tid;
01796ff2 526 unsigned long disable_agg_tids = 0;
724fe771 527 bool same_sta;
01796ff2
SS
528 int ret;
529
530 lockdep_assert_held(&mvm->mutex);
531
bb49701b
SS
532 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
533 return -EINVAL;
534
01796ff2
SS
535 sta_id = mvm->queue_info[queue].ra_sta_id;
536 tid = mvm->queue_info[queue].txq_tid;
01796ff2 537
724fe771
JB
538 same_sta = sta_id == new_sta_id;
539
01796ff2 540 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
e3df1e4b
SD
541 if (WARN_ON(!mvmsta))
542 return -EINVAL;
01796ff2
SS
543
544 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
545 /* Disable the queue */
546 if (disable_agg_tids)
547 iwl_mvm_invalidate_sta_queue(mvm, queue,
548 disable_agg_tids, false);
549
cfbc6c4c 550 ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
01796ff2 551 if (ret) {
01796ff2
SS
552 IWL_ERR(mvm,
553 "Failed to free inactive queue %d (ret=%d)\n",
554 queue, ret);
555
556 return ret;
557 }
558
559 /* If TXQ is allocated to another STA, update removal in FW */
560 if (!same_sta)
561 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
562
563 return 0;
564}
565
42db09c1
LK
566static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
567 unsigned long tfd_queue_mask, u8 ac)
568{
569 int queue = 0;
570 u8 ac_to_queue[IEEE80211_NUM_ACS];
571 int i;
572
90d2d94c
JB
573 /*
574 * This protects us against grabbing a queue that's being reconfigured
575 * by the inactivity checker.
576 */
577 lockdep_assert_held(&mvm->mutex);
90d2d94c 578
bb49701b
SS
579 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
580 return -EINVAL;
42db09c1
LK
581
582 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
583
584 /* See what ACs the existing queues for this STA have */
585 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
586 /* Only DATA queues can be shared */
587 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
588 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
589 continue;
590
591 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
592 }
593
594 /*
595 * The queue to share is chosen only from DATA queues as follows (in
596 * descending priority):
597 * 1. An AC_BE queue
598 * 2. Same AC queue
599 * 3. Highest AC queue that is lower than new AC
600 * 4. Any existing AC (there always is at least 1 DATA queue)
601 */
602
603 /* Priority 1: An AC_BE queue */
604 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
605 queue = ac_to_queue[IEEE80211_AC_BE];
606 /* Priority 2: Same AC queue */
607 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
608 queue = ac_to_queue[ac];
609 /* Priority 3a: If new AC is VO and VI exists - use VI */
610 else if (ac == IEEE80211_AC_VO &&
611 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
612 queue = ac_to_queue[IEEE80211_AC_VI];
613 /* Priority 3b: No BE so only AC less than the new one is BK */
614 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
615 queue = ac_to_queue[IEEE80211_AC_BK];
616 /* Priority 4a: No BE nor BK - use VI if exists */
617 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
618 queue = ac_to_queue[IEEE80211_AC_VI];
619 /* Priority 4b: No BE, BK nor VI - use VO if exists */
620 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
621 queue = ac_to_queue[IEEE80211_AC_VO];
622
623 /* Make sure queue found (or not) is legal */
9f9af3d7
LK
624 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
625 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
626 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
42db09c1 627 IWL_ERR(mvm, "No DATA queues available to share\n");
9f9af3d7
LK
628 return -ENOSPC;
629 }
630
42db09c1
LK
631 return queue;
632}
633
58f2cc57 634/*
9f9af3d7
LK
635 * If a given queue has a higher AC than the TID stream that is being compared
636 * to, the queue needs to be redirected to the lower AC. This function does that
58f2cc57
LK
637 * in such a case, otherwise - if no redirection required - it does nothing,
638 * unless the %force param is true.
639 */
cfbc6c4c
SS
640static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
641 int ac, int ssn, unsigned int wdg_timeout,
642 bool force, struct iwl_mvm_txq *txq)
58f2cc57
LK
643{
644 struct iwl_scd_txq_cfg_cmd cmd = {
645 .scd_queue = queue,
f7c692de 646 .action = SCD_CFG_DISABLE_QUEUE,
58f2cc57
LK
647 };
648 bool shared_queue;
58f2cc57
LK
649 int ret;
650
bb49701b
SS
651 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
652 return -EINVAL;
653
58f2cc57
LK
654 /*
655 * If the AC is lower than current one - FIFO needs to be redirected to
656 * the lowest one of the streams in the queue. Check if this is needed
657 * here.
658 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
659 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
660 * we need to check if the numerical value of X is LARGER than of Y.
661 */
58f2cc57 662 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
58f2cc57
LK
663 IWL_DEBUG_TX_QUEUES(mvm,
664 "No redirection needed on TXQ #%d\n",
665 queue);
666 return 0;
667 }
668
669 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
670 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
edbe961c 671 cmd.tid = mvm->queue_info[queue].txq_tid;
1c14089e 672 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
58f2cc57 673
9f9af3d7 674 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
58f2cc57
LK
675 queue, iwl_mvm_ac_to_tx_fifo[ac]);
676
cfbc6c4c
SS
677 /* Stop the queue and wait for it to empty */
678 txq->stopped = true;
679
a1a57877 680 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
58f2cc57
LK
681 if (ret) {
682 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
683 queue);
684 ret = -EIO;
685 goto out;
686 }
687
688 /* Before redirecting the queue we need to de-activate it */
689 iwl_trans_txq_disable(mvm->trans, queue, false);
690 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
691 if (ret)
692 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
693 ret);
694
695 /* Make sure the SCD wrptr is correctly set before reconfiguring */
ca3b9c6b 696 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
58f2cc57 697
edbe961c 698 /* Update the TID "owner" of the queue */
edbe961c 699 mvm->queue_info[queue].txq_tid = tid;
edbe961c 700
58f2cc57
LK
701 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
702
703 /* Redirect to lower AC */
704 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
0ec9257b 705 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
58f2cc57
LK
706
707 /* Update AC marking of the queue */
58f2cc57 708 mvm->queue_info[queue].mac80211_ac = ac;
58f2cc57
LK
709
710 /*
711 * Mark queue as shared in transport if shared
712 * Note this has to be done after queue enablement because enablement
713 * can also set this value, and there is no indication there to shared
714 * queues
715 */
716 if (shared_queue)
717 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
718
719out:
cfbc6c4c
SS
720 /* Continue using the queue */
721 txq->stopped = false;
58f2cc57
LK
722
723 return ret;
724}
725
99448a8c
JB
726static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
727 u8 minq, u8 maxq)
728{
729 int i;
730
f3f240f9 731 lockdep_assert_held(&mvm->mutex);
99448a8c
JB
732
733 /* This should not be hit with new TX path */
734 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
735 return -ENOSPC;
736
737 /* Start by looking for a free queue */
738 for (i = minq; i <= maxq; i++)
1c14089e 739 if (mvm->queue_info[i].tid_bitmap == 0 &&
99448a8c
JB
740 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
741 return i;
742
743 return -ENOSPC;
744}
745
cfbc6c4c 746static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
99448a8c
JB
747 u8 sta_id, u8 tid, unsigned int timeout)
748{
749 int queue, size = IWL_DEFAULT_QUEUE_SIZE;
750
751 if (tid == IWL_MAX_TID_COUNT) {
752 tid = IWL_MGMT_TID;
ff911dca
ST
753 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
754 mvm->trans->cfg->min_txq_size);
99448a8c
JB
755 }
756 queue = iwl_trans_txq_alloc(mvm->trans,
757 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
758 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
759
760 if (queue < 0) {
761 IWL_DEBUG_TX_QUEUES(mvm,
762 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
763 sta_id, tid, queue);
764 return queue;
765 }
766
767 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
768 queue, sta_id, tid);
769
cfbc6c4c 770 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
99448a8c
JB
771
772 return queue;
773}
774
310181ec
SS
775static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
776 struct ieee80211_sta *sta, u8 ac,
777 int tid)
778{
779 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
cfbc6c4c
SS
780 struct iwl_mvm_txq *mvmtxq =
781 iwl_mvm_txq_from_tid(sta, tid);
310181ec
SS
782 unsigned int wdg_timeout =
783 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
310181ec
SS
784 int queue = -1;
785
786 lockdep_assert_held(&mvm->mutex);
787
788 IWL_DEBUG_TX_QUEUES(mvm,
789 "Allocating queue for sta %d on tid %d\n",
790 mvmsta->sta_id, tid);
cfbc6c4c 791 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
310181ec
SS
792 if (queue < 0)
793 return queue;
794
f992c61d
JB
795 mvmtxq->txq_id = queue;
796 mvm->tvqm_info[queue].txq_tid = tid;
797 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
cfbc6c4c 798
310181ec
SS
799 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
800
801 spin_lock_bh(&mvmsta->lock);
802 mvmsta->tid_data[tid].txq_id = queue;
310181ec
SS
803 spin_unlock_bh(&mvmsta->lock);
804
310181ec
SS
805 return 0;
806}
807
cfbc6c4c
SS
808static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
809 struct ieee80211_sta *sta,
810 int queue, u8 sta_id, u8 tid)
99448a8c
JB
811{
812 bool enable_queue = true;
813
99448a8c
JB
814 /* Make sure this TID isn't already enabled */
815 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
99448a8c
JB
816 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
817 queue, tid);
818 return false;
819 }
820
821 /* Update mappings and refcounts */
1c14089e 822 if (mvm->queue_info[queue].tid_bitmap)
99448a8c
JB
823 enable_queue = false;
824
99448a8c
JB
825 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
826 mvm->queue_info[queue].ra_sta_id = sta_id;
827
828 if (enable_queue) {
829 if (tid != IWL_MAX_TID_COUNT)
830 mvm->queue_info[queue].mac80211_ac =
831 tid_to_mac80211_ac[tid];
832 else
833 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
834
835 mvm->queue_info[queue].txq_tid = tid;
836 }
837
cfbc6c4c
SS
838 if (sta) {
839 struct iwl_mvm_txq *mvmtxq =
840 iwl_mvm_txq_from_tid(sta, tid);
841
842 mvmtxq->txq_id = queue;
843 }
844
99448a8c 845 IWL_DEBUG_TX_QUEUES(mvm,
cfbc6c4c
SS
846 "Enabling TXQ #%d tids=0x%x\n",
847 queue, mvm->queue_info[queue].tid_bitmap);
99448a8c 848
99448a8c
JB
849 return enable_queue;
850}
851
cfbc6c4c
SS
852static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
853 int queue, u16 ssn,
99448a8c
JB
854 const struct iwl_trans_txq_scd_cfg *cfg,
855 unsigned int wdg_timeout)
856{
857 struct iwl_scd_txq_cfg_cmd cmd = {
858 .scd_queue = queue,
859 .action = SCD_CFG_ENABLE_QUEUE,
860 .window = cfg->frame_limit,
861 .sta_id = cfg->sta_id,
862 .ssn = cpu_to_le16(ssn),
863 .tx_fifo = cfg->fifo,
864 .aggregate = cfg->aggregate,
865 .tid = cfg->tid,
866 };
867 bool inc_ssn;
868
869 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
870 return false;
871
872 /* Send the enabling command if we need to */
cfbc6c4c 873 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
99448a8c
JB
874 return false;
875
876 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
877 NULL, wdg_timeout);
878 if (inc_ssn)
879 le16_add_cpu(&cmd.ssn, 1);
880
881 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
882 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
883
884 return inc_ssn;
885}
886
b3a87f11 887static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
19aefa45
LK
888{
889 struct iwl_scd_txq_cfg_cmd cmd = {
890 .scd_queue = queue,
891 .action = SCD_CFG_UPDATE_QUEUE_TID,
892 };
19aefa45
LK
893 int tid;
894 unsigned long tid_bitmap;
895 int ret;
896
897 lockdep_assert_held(&mvm->mutex);
898
bb49701b
SS
899 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
900 return;
901
19aefa45 902 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
19aefa45
LK
903
904 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
905 return;
906
907 /* Find any TID for queue */
908 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
909 cmd.tid = tid;
910 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
911
912 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
341ca402 913 if (ret) {
19aefa45
LK
914 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
915 queue, ret);
341ca402
LK
916 return;
917 }
918
341ca402 919 mvm->queue_info[queue].txq_tid = tid;
341ca402
LK
920 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
921 queue, tid);
19aefa45
LK
922}
923
9f9af3d7
LK
924static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
925{
926 struct ieee80211_sta *sta;
927 struct iwl_mvm_sta *mvmsta;
806911da 928 u8 sta_id;
9f9af3d7
LK
929 int tid = -1;
930 unsigned long tid_bitmap;
931 unsigned int wdg_timeout;
932 int ssn;
933 int ret = true;
934
bb49701b
SS
935 /* queue sharing is disabled on new TX path */
936 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
937 return;
938
9f9af3d7
LK
939 lockdep_assert_held(&mvm->mutex);
940
9f9af3d7
LK
941 sta_id = mvm->queue_info[queue].ra_sta_id;
942 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
9f9af3d7
LK
943
944 /* Find TID for queue, and make sure it is the only one on the queue */
945 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
946 if (tid_bitmap != BIT(tid)) {
947 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
948 queue, tid_bitmap);
949 return;
950 }
951
952 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
953 tid);
954
955 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
956 lockdep_is_held(&mvm->mutex));
957
958 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
959 return;
960
961 mvmsta = iwl_mvm_sta_from_mac80211(sta);
962 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
963
964 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
965
cfbc6c4c
SS
966 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
967 tid_to_mac80211_ac[tid], ssn,
968 wdg_timeout, true,
969 iwl_mvm_txq_from_tid(sta, tid));
9f9af3d7
LK
970 if (ret) {
971 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
972 return;
973 }
974
975 /* If aggs should be turned back on - do it */
976 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
9cd70e80 977 struct iwl_mvm_add_sta_cmd cmd = {0};
9f9af3d7
LK
978
979 mvmsta->tid_disable_agg &= ~BIT(tid);
980
981 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
982 cmd.sta_id = mvmsta->sta_id;
983 cmd.add_modify = STA_MODE_MODIFY;
984 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
985 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
986 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
987
988 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
989 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
990 if (!ret) {
991 IWL_DEBUG_TX_QUEUES(mvm,
992 "TXQ #%d is now aggregated again\n",
993 queue);
994
995 /* Mark queue intenally as aggregating again */
996 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
997 }
998 }
999
9f9af3d7 1000 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
9f9af3d7
LK
1001}
1002
99448a8c
JB
1003/*
1004 * Remove inactive TIDs of a given queue.
1005 * If all queue TIDs are inactive - mark the queue as inactive
1006 * If only some the queue TIDs are inactive - unmap them from the queue
724fe771
JB
1007 *
1008 * Returns %true if all TIDs were removed and the queue could be reused.
99448a8c 1009 */
724fe771 1010static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
99448a8c 1011 struct iwl_mvm_sta *mvmsta, int queue,
90d2d94c 1012 unsigned long tid_bitmap,
b3a87f11
JB
1013 unsigned long *unshare_queues,
1014 unsigned long *changetid_queues)
99448a8c
JB
1015{
1016 int tid;
1017
1018 lockdep_assert_held(&mvmsta->lock);
f3f240f9 1019 lockdep_assert_held(&mvm->mutex);
99448a8c
JB
1020
1021 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
724fe771 1022 return false;
99448a8c
JB
1023
1024 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1025 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1026 /* If some TFDs are still queued - don't mark TID as inactive */
1027 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1028 tid_bitmap &= ~BIT(tid);
1029
1030 /* Don't mark as inactive any TID that has an active BA */
1031 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1032 tid_bitmap &= ~BIT(tid);
1033 }
1034
724fe771 1035 /* If all TIDs in the queue are inactive - return it can be reused */
99448a8c 1036 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
724fe771
JB
1037 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1038 return true;
99448a8c
JB
1039 }
1040
1041 /*
1042 * If we are here, this is a shared queue and not all TIDs timed-out.
1043 * Remove the ones that did.
1044 */
1045 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
b3a87f11 1046 u16 tid_bitmap;
99448a8c
JB
1047
1048 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
99448a8c 1049 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
99448a8c 1050
b3a87f11
JB
1051 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1052
1053 /*
1054 * We need to take into account a situation in which a TXQ was
1055 * allocated to TID x, and then turned shared by adding TIDs y
1056 * and z. If TID x becomes inactive and is removed from the TXQ,
1057 * ownership must be given to one of the remaining TIDs.
1058 * This is mainly because if TID x continues - a new queue can't
1059 * be allocated for it as long as it is an owner of another TXQ.
1060 *
1061 * Mark this queue in the right bitmap, we'll send the command
1062 * to the firmware later.
1063 */
1064 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1065 set_bit(queue, changetid_queues);
1066
99448a8c
JB
1067 IWL_DEBUG_TX_QUEUES(mvm,
1068 "Removing inactive TID %d from shared Q:%d\n",
1069 tid, queue);
1070 }
1071
1072 IWL_DEBUG_TX_QUEUES(mvm,
1073 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1074 mvm->queue_info[queue].tid_bitmap);
1075
1076 /*
1077 * There may be different TIDs with the same mac queues, so make
1078 * sure all TIDs have existing corresponding mac queues enabled
1079 */
1080 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
99448a8c
JB
1081
1082 /* If the queue is marked as shared - "unshare" it */
1c14089e 1083 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
99448a8c 1084 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
99448a8c
JB
1085 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1086 queue);
90d2d94c 1087 set_bit(queue, unshare_queues);
99448a8c 1088 }
724fe771
JB
1089
1090 return false;
99448a8c
JB
1091}
1092
724fe771
JB
1093/*
1094 * Check for inactivity - this includes checking if any queue
1095 * can be unshared and finding one (and only one) that can be
1096 * reused.
1097 * This function is also invoked as a sort of clean-up task,
1098 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1099 *
1100 * Returns the queue number, or -ENOSPC.
1101 */
1102static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
99448a8c 1103{
99448a8c 1104 unsigned long now = jiffies;
90d2d94c 1105 unsigned long unshare_queues = 0;
b3a87f11 1106 unsigned long changetid_queues = 0;
724fe771 1107 int i, ret, free_queue = -ENOSPC;
cfbc6c4c 1108 struct ieee80211_sta *queue_owner = NULL;
99448a8c 1109
df2a2245
JB
1110 lockdep_assert_held(&mvm->mutex);
1111
99448a8c 1112 if (iwl_mvm_has_new_tx_api(mvm))
724fe771 1113 return -ENOSPC;
99448a8c 1114
99448a8c
JB
1115 rcu_read_lock();
1116
459ab045
JB
1117 /* we skip the CMD queue below by starting at 1 */
1118 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1119
459ab045 1120 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
99448a8c
JB
1121 struct ieee80211_sta *sta;
1122 struct iwl_mvm_sta *mvmsta;
1123 u8 sta_id;
1124 int tid;
1125 unsigned long inactive_tid_bitmap = 0;
1126 unsigned long queue_tid_bitmap;
1127
99448a8c 1128 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
459ab045
JB
1129 if (!queue_tid_bitmap)
1130 continue;
99448a8c
JB
1131
1132 /* If TXQ isn't in active use anyway - nothing to do here... */
1133 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
459ab045 1134 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
99448a8c 1135 continue;
99448a8c
JB
1136
1137 /* Check to see if there are inactive TIDs on this queue */
1138 for_each_set_bit(tid, &queue_tid_bitmap,
1139 IWL_MAX_TID_COUNT + 1) {
1140 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1141 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1142 continue;
1143
1144 inactive_tid_bitmap |= BIT(tid);
1145 }
99448a8c
JB
1146
1147 /* If all TIDs are active - finish check on this queue */
1148 if (!inactive_tid_bitmap)
1149 continue;
1150
1151 /*
1152 * If we are here - the queue hadn't been served recently and is
1153 * in use
1154 */
1155
1156 sta_id = mvm->queue_info[i].ra_sta_id;
1157 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1158
1159 /*
1160 * If the STA doesn't exist anymore, it isn't an error. It could
1161 * be that it was removed since getting the queues, and in this
1162 * case it should've inactivated its queues anyway.
1163 */
1164 if (IS_ERR_OR_NULL(sta))
1165 continue;
1166
1167 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1168
f3f240f9 1169 spin_lock_bh(&mvmsta->lock);
724fe771
JB
1170 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1171 inactive_tid_bitmap,
1172 &unshare_queues,
1173 &changetid_queues);
cfbc6c4c
SS
1174 if (ret >= 0 && free_queue < 0) {
1175 queue_owner = sta;
724fe771 1176 free_queue = ret;
cfbc6c4c 1177 }
459ab045 1178 /* only unlock sta lock - we still need the queue info lock */
f3f240f9 1179 spin_unlock_bh(&mvmsta->lock);
99448a8c
JB
1180 }
1181
df2a2245
JB
1182
1183 /* Reconfigure queues requiring reconfiguation */
90d2d94c
JB
1184 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1185 iwl_mvm_unshare_queue(mvm, i);
b3a87f11
JB
1186 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1187 iwl_mvm_change_queue_tid(mvm, i);
724fe771
JB
1188
1189 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
cfbc6c4c 1190 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
724fe771 1191 alloc_for_sta);
cfbc6c4c
SS
1192 if (ret) {
1193 rcu_read_unlock();
724fe771 1194 return ret;
cfbc6c4c 1195 }
724fe771
JB
1196 }
1197
cfbc6c4c
SS
1198 rcu_read_unlock();
1199
724fe771 1200 return free_queue;
99448a8c
JB
1201}
1202
c20e08b0 1203static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
cfbc6c4c 1204 struct ieee80211_sta *sta, u8 ac, int tid)
c20e08b0
JB
1205{
1206 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1207 struct iwl_trans_txq_scd_cfg cfg = {
1208 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1209 .sta_id = mvmsta->sta_id,
1210 .tid = tid,
1211 .frame_limit = IWL_FRAME_LIMIT,
1212 };
1213 unsigned int wdg_timeout =
1214 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
c20e08b0 1215 int queue = -1;
c20e08b0
JB
1216 unsigned long disable_agg_tids = 0;
1217 enum iwl_mvm_agg_state queue_state;
1218 bool shared_queue = false, inc_ssn;
1219 int ssn;
1220 unsigned long tfd_queue_mask;
1221 int ret;
1222
1223 lockdep_assert_held(&mvm->mutex);
1224
1225 if (iwl_mvm_has_new_tx_api(mvm))
1226 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1227
1228 spin_lock_bh(&mvmsta->lock);
1229 tfd_queue_mask = mvmsta->tfd_queue_msk;
35739348 1230 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
c20e08b0
JB
1231 spin_unlock_bh(&mvmsta->lock);
1232
cfbc6c4c 1233 if (tid == IWL_MAX_TID_COUNT) {
c20e08b0
JB
1234 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1235 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1236 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1237 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1238 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1239 queue);
1240
1241 /* If no such queue is found, we'll use a DATA queue instead */
1242 }
1243
1244 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1245 (mvm->queue_info[mvmsta->reserved_queue].status ==
724fe771 1246 IWL_MVM_QUEUE_RESERVED)) {
c20e08b0
JB
1247 queue = mvmsta->reserved_queue;
1248 mvm->queue_info[queue].reserved = true;
1249 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1250 }
1251
1252 if (queue < 0)
1253 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1254 IWL_MVM_DQA_MIN_DATA_QUEUE,
1255 IWL_MVM_DQA_MAX_DATA_QUEUE);
724fe771 1256 if (queue < 0) {
724fe771
JB
1257 /* try harder - perhaps kill an inactive queue */
1258 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
c20e08b0
JB
1259 }
1260
1261 /* No free queue - we'll have to share */
1262 if (queue <= 0) {
1263 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1264 if (queue > 0) {
1265 shared_queue = true;
1266 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1267 }
1268 }
1269
1270 /*
1271 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1272 * to make sure no one else takes it.
1273 * This will allow avoiding re-acquiring the lock at the end of the
1274 * configuration. On error we'll mark it back as free.
1275 */
1276 if (queue > 0 && !shared_queue)
1277 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1278
c20e08b0
JB
1279 /* This shouldn't happen - out of queues */
1280 if (WARN_ON(queue <= 0)) {
1281 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1282 tid, cfg.sta_id);
1283 return queue;
1284 }
1285
1286 /*
1287 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1288 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1289 * as aggregatable.
1290 * Mark all DATA queues as allowing to be aggregated at some point
1291 */
1292 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1293 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1294
c20e08b0
JB
1295 IWL_DEBUG_TX_QUEUES(mvm,
1296 "Allocating %squeue #%d to sta %d on tid %d\n",
1297 shared_queue ? "shared " : "", queue,
1298 mvmsta->sta_id, tid);
1299
1300 if (shared_queue) {
1301 /* Disable any open aggs on this queue */
1302 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1303
1304 if (disable_agg_tids) {
1305 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1306 queue);
1307 iwl_mvm_invalidate_sta_queue(mvm, queue,
1308 disable_agg_tids, false);
1309 }
1310 }
1311
cfbc6c4c 1312 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
c20e08b0
JB
1313
1314 /*
1315 * Mark queue as shared in transport if shared
1316 * Note this has to be done after queue enablement because enablement
1317 * can also set this value, and there is no indication there to shared
1318 * queues
1319 */
1320 if (shared_queue)
1321 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1322
1323 spin_lock_bh(&mvmsta->lock);
1324 /*
1325 * This looks racy, but it is not. We have only one packet for
1326 * this ra/tid in our Tx path since we stop the Qdisc when we
1327 * need to allocate a new TFD queue.
1328 */
35739348 1329 if (inc_ssn) {
c20e08b0 1330 mvmsta->tid_data[tid].seq_number += 0x10;
35739348
SS
1331 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1332 }
c20e08b0 1333 mvmsta->tid_data[tid].txq_id = queue;
c20e08b0
JB
1334 mvmsta->tfd_queue_msk |= BIT(queue);
1335 queue_state = mvmsta->tid_data[tid].state;
1336
1337 if (mvmsta->reserved_queue == queue)
1338 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1339 spin_unlock_bh(&mvmsta->lock);
1340
1341 if (!shared_queue) {
1342 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1343 if (ret)
1344 goto out_err;
1345
1346 /* If we need to re-enable aggregations... */
1347 if (queue_state == IWL_AGG_ON) {
1348 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1349 if (ret)
1350 goto out_err;
1351 }
1352 } else {
1353 /* Redirect queue, if needed */
cfbc6c4c
SS
1354 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1355 wdg_timeout, false,
1356 iwl_mvm_txq_from_tid(sta, tid));
c20e08b0
JB
1357 if (ret)
1358 goto out_err;
1359 }
1360
1361 return 0;
1362
1363out_err:
cfbc6c4c 1364 iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
c20e08b0
JB
1365
1366 return ret;
1367}
1368
24afba76
LK
1369static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1370{
1371 if (tid == IWL_MAX_TID_COUNT)
1372 return IEEE80211_AC_VO; /* MGMT */
1373
1374 return tid_to_mac80211_ac[tid];
1375}
1376
24afba76
LK
1377void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1378{
1379 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1380 add_stream_wk);
24afba76
LK
1381
1382 mutex_lock(&mvm->mutex);
1383
724fe771 1384 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
9f9af3d7 1385
cfbc6c4c
SS
1386 while (!list_empty(&mvm->add_stream_txqs)) {
1387 struct iwl_mvm_txq *mvmtxq;
1388 struct ieee80211_txq *txq;
1389 u8 tid;
24afba76 1390
cfbc6c4c
SS
1391 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1392 struct iwl_mvm_txq, list);
1393
1394 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1395 drv_priv);
1396 tid = txq->tid;
1397 if (tid == IEEE80211_NUM_TIDS)
1398 tid = IWL_MAX_TID_COUNT;
24afba76 1399
cfbc6c4c
SS
1400 iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
1401 list_del_init(&mvmtxq->list);
cfbc6c4c 1402 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
24afba76
LK
1403 }
1404
1405 mutex_unlock(&mvm->mutex);
1406}
1407
1408static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
d5216a28
LK
1409 struct ieee80211_sta *sta,
1410 enum nl80211_iftype vif_type)
24afba76
LK
1411{
1412 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1413 int queue;
1414
396952ee
SS
1415 /* queue reserving is disabled on new TX path */
1416 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1417 return 0;
1418
724fe771
JB
1419 /* run the general cleanup/unsharing of queues */
1420 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
9794c64f 1421
24afba76 1422 /* Make sure we have free resources for this STA */
d5216a28 1423 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1c14089e 1424 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
cf961e16
LK
1425 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1426 IWL_MVM_QUEUE_FREE))
d5216a28
LK
1427 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1428 else
9794c64f
LK
1429 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1430 IWL_MVM_DQA_MIN_DATA_QUEUE,
d5216a28 1431 IWL_MVM_DQA_MAX_DATA_QUEUE);
24afba76 1432 if (queue < 0) {
724fe771
JB
1433 /* try again - this time kick out a queue if needed */
1434 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1435 if (queue < 0) {
1436 IWL_ERR(mvm, "No available queues for new station\n");
1437 return -ENOSPC;
1438 }
24afba76 1439 }
cf961e16 1440 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
24afba76 1441
24afba76
LK
1442 mvmsta->reserved_queue = queue;
1443
1444 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1445 queue, mvmsta->sta_id);
1446
1447 return 0;
1448}
1449
8d98ae6e
LK
1450/*
1451 * In DQA mode, after a HW restart the queues should be allocated as before, in
1452 * order to avoid race conditions when there are shared queues. This function
1453 * does the re-mapping and queue allocation.
1454 *
1455 * Note that re-enabling aggregations isn't done in this function.
1456 */
1457static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
cfbc6c4c 1458 struct ieee80211_sta *sta)
8d98ae6e 1459{
cfbc6c4c
SS
1460 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1461 unsigned int wdg =
1462 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
8d98ae6e
LK
1463 int i;
1464 struct iwl_trans_txq_scd_cfg cfg = {
1465 .sta_id = mvm_sta->sta_id,
1466 .frame_limit = IWL_FRAME_LIMIT,
1467 };
1468
03c902bf
JB
1469 /* Make sure reserved queue is still marked as such (if allocated) */
1470 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1471 mvm->queue_info[mvm_sta->reserved_queue].status =
1472 IWL_MVM_QUEUE_RESERVED;
8d98ae6e
LK
1473
1474 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1475 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1476 int txq_id = tid_data->txq_id;
1477 int ac;
8d98ae6e 1478
6862fcee 1479 if (txq_id == IWL_MVM_INVALID_QUEUE)
8d98ae6e
LK
1480 continue;
1481
8d98ae6e 1482 ac = tid_to_mac80211_ac[i];
8d98ae6e 1483
310181ec
SS
1484 if (iwl_mvm_has_new_tx_api(mvm)) {
1485 IWL_DEBUG_TX_QUEUES(mvm,
1486 "Re-mapping sta %d tid %d\n",
1487 mvm_sta->sta_id, i);
cfbc6c4c
SS
1488 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1489 i, wdg);
310181ec 1490 tid_data->txq_id = txq_id;
5d39051a
LK
1491
1492 /*
1493 * Since we don't set the seq number after reset, and HW
1494 * sets it now, FW reset will cause the seq num to start
1495 * at 0 again, so driver will need to update it
1496 * internally as well, so it keeps in sync with real val
1497 */
1498 tid_data->seq_number = 0;
310181ec
SS
1499 } else {
1500 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
8d98ae6e 1501
310181ec 1502 cfg.tid = i;
cf6c6ea3 1503 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
310181ec
SS
1504 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1505 txq_id ==
1506 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
8d98ae6e 1507
310181ec
SS
1508 IWL_DEBUG_TX_QUEUES(mvm,
1509 "Re-mapping sta %d tid %d to queue %d\n",
1510 mvm_sta->sta_id, i, txq_id);
1511
cfbc6c4c 1512 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
34e10860 1513 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
310181ec 1514 }
8d98ae6e 1515 }
8d98ae6e
LK
1516}
1517
732d06e9
ST
1518static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1519 struct iwl_mvm_int_sta *sta,
1520 const u8 *addr,
1521 u16 mac_id, u16 color)
1522{
1523 struct iwl_mvm_add_sta_cmd cmd;
1524 int ret;
3f497de9 1525 u32 status = ADD_STA_SUCCESS;
732d06e9
ST
1526
1527 lockdep_assert_held(&mvm->mutex);
1528
1529 memset(&cmd, 0, sizeof(cmd));
1530 cmd.sta_id = sta->sta_id;
1531 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1532 color));
1533 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1534 cmd.station_type = sta->type;
1535
1536 if (!iwl_mvm_has_new_tx_api(mvm))
1537 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1538 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1539
1540 if (addr)
1541 memcpy(cmd.addr, addr, ETH_ALEN);
1542
1543 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1544 iwl_mvm_add_sta_cmd_size(mvm),
1545 &cmd, &status);
1546 if (ret)
1547 return ret;
1548
1549 switch (status & IWL_ADD_STA_STATUS_MASK) {
1550 case ADD_STA_SUCCESS:
1551 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1552 return 0;
1553 default:
1554 ret = -EIO;
1555 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1556 status);
1557 break;
1558 }
1559 return ret;
1560}
1561
8ca151b5
JB
1562int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1563 struct ieee80211_vif *vif,
1564 struct ieee80211_sta *sta)
1565{
1566 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
9d8ce6af 1567 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
a571f5f6 1568 struct iwl_mvm_rxq_dup_data *dup_data;
8ca151b5 1569 int i, ret, sta_id;
732d06e9
ST
1570 bool sta_update = false;
1571 unsigned int sta_flags = 0;
8ca151b5
JB
1572
1573 lockdep_assert_held(&mvm->mutex);
1574
1575 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
b92e661b
EP
1576 sta_id = iwl_mvm_find_free_sta_id(mvm,
1577 ieee80211_vif_type_p2p(vif));
8ca151b5
JB
1578 else
1579 sta_id = mvm_sta->sta_id;
1580
0ae98812 1581 if (sta_id == IWL_MVM_INVALID_STA)
8ca151b5
JB
1582 return -ENOSPC;
1583
1584 spin_lock_init(&mvm_sta->lock);
1585
c8f54701
JB
1586 /* if this is a HW restart re-alloc existing queues */
1587 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
732d06e9
ST
1588 struct iwl_mvm_int_sta tmp_sta = {
1589 .sta_id = sta_id,
1590 .type = mvm_sta->sta_type,
1591 };
1592
1593 /*
1594 * First add an empty station since allocating
1595 * a queue requires a valid station
1596 */
1597 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1598 mvmvif->id, mvmvif->color);
1599 if (ret)
1600 goto err;
1601
cfbc6c4c 1602 iwl_mvm_realloc_queues_after_restart(mvm, sta);
732d06e9
ST
1603 sta_update = true;
1604 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
8d98ae6e
LK
1605 goto update_fw;
1606 }
1607
8ca151b5
JB
1608 mvm_sta->sta_id = sta_id;
1609 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1610 mvmvif->color);
1611 mvm_sta->vif = vif;
a58bb468
LK
1612 if (!mvm->trans->cfg->gen2)
1613 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1614 else
1615 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
9ee718aa
EL
1616 mvm_sta->tx_protection = 0;
1617 mvm_sta->tt_tx_protection = false;
ced19f26 1618 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
8ca151b5
JB
1619
1620 /* HW restart, don't assume the memory has been zeroed */
69191afe 1621 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
8ca151b5 1622 mvm_sta->tfd_queue_msk = 0;
a0f6bf2a 1623
6d9d32b8 1624 /* for HW restart - reset everything but the sequence number */
24afba76 1625 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
6d9d32b8
JB
1626 u16 seq = mvm_sta->tid_data[i].seq_number;
1627 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1628 mvm_sta->tid_data[i].seq_number = seq;
24afba76 1629
24afba76
LK
1630 /*
1631 * Mark all queues for this STA as unallocated and defer TX
1632 * frames until the queue is allocated
1633 */
6862fcee 1634 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
6d9d32b8 1635 }
cfbc6c4c
SS
1636
1637 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1638 struct iwl_mvm_txq *mvmtxq =
1639 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1640
1641 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1642 INIT_LIST_HEAD(&mvmtxq->list);
fba8248e 1643 atomic_set(&mvmtxq->tx_request, 0);
cfbc6c4c
SS
1644 }
1645
efed6640 1646 mvm_sta->agg_tids = 0;
8ca151b5 1647
a571f5f6
SS
1648 if (iwl_mvm_has_new_rx_api(mvm) &&
1649 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
92c4dca6
JB
1650 int q;
1651
a571f5f6 1652 dup_data = kcalloc(mvm->trans->num_rx_queues,
92c4dca6 1653 sizeof(*dup_data), GFP_KERNEL);
a571f5f6
SS
1654 if (!dup_data)
1655 return -ENOMEM;
92c4dca6
JB
1656 /*
1657 * Initialize all the last_seq values to 0xffff which can never
1658 * compare equal to the frame's seq_ctrl in the check in
1659 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1660 * number and fragmented packets don't reach that function.
1661 *
1662 * This thus allows receiving a packet with seqno 0 and the
1663 * retry bit set as the very first packet on a new TID.
1664 */
1665 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1666 memset(dup_data[q].last_seq, 0xff,
1667 sizeof(dup_data[q].last_seq));
a571f5f6
SS
1668 mvm_sta->dup_data = dup_data;
1669 }
1670
c8f54701 1671 if (!iwl_mvm_has_new_tx_api(mvm)) {
d5216a28
LK
1672 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1673 ieee80211_vif_type_p2p(vif));
24afba76
LK
1674 if (ret)
1675 goto err;
1676 }
1677
9f66a397
GG
1678 /*
1679 * if rs is registered with mac80211, then "add station" will be handled
1680 * via the corresponding ops, otherwise need to notify rate scaling here
1681 */
4243edb4 1682 if (iwl_mvm_has_tlc_offload(mvm))
9f66a397
GG
1683 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1684
0dde2440
AS
1685 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1686
8d98ae6e 1687update_fw:
732d06e9 1688 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
8ca151b5 1689 if (ret)
a0f6bf2a 1690 goto err;
8ca151b5 1691
9e848010
JB
1692 if (vif->type == NL80211_IFTYPE_STATION) {
1693 if (!sta->tdls) {
0ae98812 1694 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
9e848010
JB
1695 mvmvif->ap_sta_id = sta_id;
1696 } else {
0ae98812 1697 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
9e848010
JB
1698 }
1699 }
8ca151b5
JB
1700
1701 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1702
1703 return 0;
a0f6bf2a
AN
1704
1705err:
a0f6bf2a 1706 return ret;
8ca151b5
JB
1707}
1708
1709int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1710 bool drain)
1711{
f9dc0004 1712 struct iwl_mvm_add_sta_cmd cmd = {};
8ca151b5
JB
1713 int ret;
1714 u32 status;
1715
1716 lockdep_assert_held(&mvm->mutex);
1717
1718 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1719 cmd.sta_id = mvmsta->sta_id;
1720 cmd.add_modify = STA_MODE_MODIFY;
1721 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1722 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1723
1724 status = ADD_STA_SUCCESS;
854c5705
SS
1725 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1726 iwl_mvm_add_sta_cmd_size(mvm),
f9dc0004 1727 &cmd, &status);
8ca151b5
JB
1728 if (ret)
1729 return ret;
1730
837c4da9 1731 switch (status & IWL_ADD_STA_STATUS_MASK) {
8ca151b5
JB
1732 case ADD_STA_SUCCESS:
1733 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1734 mvmsta->sta_id);
1735 break;
1736 default:
1737 ret = -EIO;
1738 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1739 mvmsta->sta_id);
1740 break;
1741 }
1742
1743 return ret;
1744}
1745
1746/*
1747 * Remove a station from the FW table. Before sending the command to remove
1748 * the station validate that the station is indeed known to the driver (sanity
1749 * only).
1750 */
1751static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1752{
1753 struct ieee80211_sta *sta;
1754 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1755 .sta_id = sta_id,
1756 };
1757 int ret;
1758
1759 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1760 lockdep_is_held(&mvm->mutex));
1761
1762 /* Note: internal stations are marked as error values */
1763 if (!sta) {
1764 IWL_ERR(mvm, "Invalid station id\n");
1765 return -EINVAL;
1766 }
1767
a1022927 1768 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
8ca151b5
JB
1769 sizeof(rm_sta_cmd), &rm_sta_cmd);
1770 if (ret) {
1771 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1772 return ret;
1773 }
1774
1775 return 0;
1776}
1777
24afba76
LK
1778static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1779 struct ieee80211_vif *vif,
cfbc6c4c 1780 struct ieee80211_sta *sta)
24afba76 1781{
cfbc6c4c 1782 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
24afba76
LK
1783 int i;
1784
1785 lockdep_assert_held(&mvm->mutex);
1786
1787 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
6862fcee 1788 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
24afba76
LK
1789 continue;
1790
cfbc6c4c
SS
1791 iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
1792 0);
6862fcee 1793 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
24afba76 1794 }
cfbc6c4c
SS
1795
1796 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1797 struct iwl_mvm_txq *mvmtxq =
1798 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1799
1800 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1801 }
24afba76
LK
1802}
1803
d6d517b7
SS
1804int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1805 struct iwl_mvm_sta *mvm_sta)
1806{
bec9522a 1807 int i;
d6d517b7
SS
1808
1809 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1810 u16 txq_id;
bec9522a 1811 int ret;
d6d517b7
SS
1812
1813 spin_lock_bh(&mvm_sta->lock);
1814 txq_id = mvm_sta->tid_data[i].txq_id;
1815 spin_unlock_bh(&mvm_sta->lock);
1816
1817 if (txq_id == IWL_MVM_INVALID_QUEUE)
1818 continue;
1819
1820 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1821 if (ret)
bec9522a 1822 return ret;
d6d517b7
SS
1823 }
1824
bec9522a 1825 return 0;
d6d517b7
SS
1826}
1827
8ca151b5
JB
1828int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1829 struct ieee80211_vif *vif,
1830 struct ieee80211_sta *sta)
1831{
1832 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
9d8ce6af 1833 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
94c3e614 1834 u8 sta_id = mvm_sta->sta_id;
8ca151b5
JB
1835 int ret;
1836
1837 lockdep_assert_held(&mvm->mutex);
1838
a571f5f6
SS
1839 if (iwl_mvm_has_new_rx_api(mvm))
1840 kfree(mvm_sta->dup_data);
1841
c8f54701
JB
1842 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1843 if (ret)
1844 return ret;
d6d517b7 1845
c8f54701
JB
1846 /* flush its queues here since we are freeing mvm_sta */
1847 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1848 if (ret)
1849 return ret;
1850 if (iwl_mvm_has_new_tx_api(mvm)) {
1851 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1852 } else {
1853 u32 q_mask = mvm_sta->tfd_queue_msk;
56214749 1854
c8f54701
JB
1855 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1856 q_mask);
1857 }
1858 if (ret)
1859 return ret;
1860
1861 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
a0315dea 1862
cfbc6c4c 1863 iwl_mvm_disable_sta_queues(mvm, vif, sta);
c8f54701
JB
1864
1865 /* If there is a TXQ still marked as reserved - free it */
1866 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1867 u8 reserved_txq = mvm_sta->reserved_queue;
1868 enum iwl_mvm_queue_status *status;
a0315dea 1869
c8f54701
JB
1870 /*
1871 * If no traffic has gone through the reserved TXQ - it
1872 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1873 * should be manually marked as free again
1874 */
c8f54701
JB
1875 status = &mvm->queue_info[reserved_txq].status;
1876 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1877 (*status != IWL_MVM_QUEUE_FREE),
1878 "sta_id %d reserved txq %d status %d",
f3f240f9 1879 sta_id, reserved_txq, *status))
c8f54701 1880 return -EINVAL;
a0315dea 1881
c8f54701 1882 *status = IWL_MVM_QUEUE_FREE;
c8f54701
JB
1883 }
1884
1885 if (vif->type == NL80211_IFTYPE_STATION &&
1886 mvmvif->ap_sta_id == sta_id) {
1887 /* if associated - we can't remove the AP STA now */
1888 if (vif->bss_conf.assoc)
1889 return ret;
8ca151b5 1890
c8f54701
JB
1891 /* unassoc - go ahead - remove the AP STA now */
1892 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
37577fe2 1893
c8f54701
JB
1894 /* clear d0i3_ap_sta_id if no longer relevant */
1895 if (mvm->d0i3_ap_sta_id == sta_id)
1896 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
8ca151b5
JB
1897 }
1898
1d3c3f63
AN
1899 /*
1900 * This shouldn't happen - the TDLS channel switch should be canceled
1901 * before the STA is removed.
1902 */
94c3e614 1903 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
0ae98812 1904 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1d3c3f63
AN
1905 cancel_delayed_work(&mvm->tdls_cs.dwork);
1906 }
1907
e3d4bc8c
EG
1908 /*
1909 * Make sure that the tx response code sees the station as -EBUSY and
1910 * calls the drain worker.
1911 */
1912 spin_lock_bh(&mvm_sta->lock);
c8f54701 1913 spin_unlock_bh(&mvm_sta->lock);
94c3e614 1914
c8f54701
JB
1915 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1916 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
8ca151b5
JB
1917
1918 return ret;
1919}
1920
1921int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1922 struct ieee80211_vif *vif,
1923 u8 sta_id)
1924{
1925 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1926
1927 lockdep_assert_held(&mvm->mutex);
1928
c531c771 1929 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
8ca151b5
JB
1930 return ret;
1931}
1932
0e39eb03
CRI
1933int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1934 struct iwl_mvm_int_sta *sta,
ced19f26
SS
1935 u32 qmask, enum nl80211_iftype iftype,
1936 enum iwl_sta_type type)
8ca151b5 1937{
df65c8d1
AS
1938 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1939 sta->sta_id == IWL_MVM_INVALID_STA) {
b92e661b 1940 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
0ae98812 1941 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
8ca151b5
JB
1942 return -ENOSPC;
1943 }
1944
1945 sta->tfd_queue_msk = qmask;
ced19f26 1946 sta->type = type;
8ca151b5
JB
1947
1948 /* put a non-NULL value so iterating over the stations won't stop */
1949 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1950 return 0;
1951}
1952
26d6c16b 1953void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
8ca151b5 1954{
c531c771 1955 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
8ca151b5 1956 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
0ae98812 1957 sta->sta_id = IWL_MVM_INVALID_STA;
8ca151b5
JB
1958}
1959
b13f43a4
EG
1960static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1961 u8 sta_id, u8 fifo)
8ca151b5 1962{
4cf677fd
EG
1963 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1964 mvm->cfg->base_params->wd_timeout :
1965 IWL_WATCHDOG_DISABLED;
8ca151b5 1966
310181ec 1967 if (iwl_mvm_has_new_tx_api(mvm)) {
b13f43a4 1968 int tvqm_queue =
cfbc6c4c 1969 iwl_mvm_tvqm_enable_txq(mvm, sta_id,
b13f43a4
EG
1970 IWL_MAX_TID_COUNT,
1971 wdg_timeout);
1972 *queue = tvqm_queue;
c8f54701 1973 } else {
28d0793e 1974 struct iwl_trans_txq_scd_cfg cfg = {
b13f43a4
EG
1975 .fifo = fifo,
1976 .sta_id = sta_id,
28d0793e
LK
1977 .tid = IWL_MAX_TID_COUNT,
1978 .aggregate = false,
1979 .frame_limit = IWL_FRAME_LIMIT,
1980 };
1981
cfbc6c4c 1982 iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
28d0793e 1983 }
c5a719ee 1984}
28d0793e 1985
c5a719ee
SS
1986int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1987{
1988 int ret;
1989
1990 lockdep_assert_held(&mvm->mutex);
8ca151b5 1991
c5a719ee
SS
1992 /* Allocate aux station and assign to it the aux queue */
1993 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
ced19f26
SS
1994 NL80211_IFTYPE_UNSPECIFIED,
1995 IWL_STA_AUX_ACTIVITY);
8ca151b5 1996 if (ret)
c5a719ee
SS
1997 return ret;
1998
1999 /* Map Aux queue to fifo - needs to happen before adding Aux station */
2000 if (!iwl_mvm_has_new_tx_api(mvm))
b13f43a4
EG
2001 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2002 mvm->aux_sta.sta_id,
2003 IWL_MVM_TX_FIFO_MCAST);
c5a719ee
SS
2004
2005 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2006 MAC_INDEX_AUX, 0);
2007 if (ret) {
8ca151b5 2008 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
c5a719ee
SS
2009 return ret;
2010 }
2011
2012 /*
2f7a3863 2013 * For 22000 firmware and on we cannot add queue to a station unknown
c5a719ee
SS
2014 * to firmware so enable queue here - after the station was added
2015 */
2016 if (iwl_mvm_has_new_tx_api(mvm))
b13f43a4
EG
2017 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2018 mvm->aux_sta.sta_id,
2019 IWL_MVM_TX_FIFO_MCAST);
c5a719ee
SS
2020
2021 return 0;
8ca151b5
JB
2022}
2023
0e39eb03
CRI
2024int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2025{
2026 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
b13f43a4 2027 int ret;
0e39eb03
CRI
2028
2029 lockdep_assert_held(&mvm->mutex);
b13f43a4
EG
2030
2031 /* Map snif queue to fifo - must happen before adding snif station */
2032 if (!iwl_mvm_has_new_tx_api(mvm))
2033 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2034 mvm->snif_sta.sta_id,
2035 IWL_MVM_TX_FIFO_BE);
2036
2037 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
0e39eb03 2038 mvmvif->id, 0);
b13f43a4
EG
2039 if (ret)
2040 return ret;
2041
2042 /*
2043 * For 22000 firmware and on we cannot add queue to a station unknown
2044 * to firmware so enable queue here - after the station was added
2045 */
2046 if (iwl_mvm_has_new_tx_api(mvm))
2047 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2048 mvm->snif_sta.sta_id,
2049 IWL_MVM_TX_FIFO_BE);
2050
2051 return 0;
0e39eb03
CRI
2052}
2053
2054int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2055{
2056 int ret;
2057
2058 lockdep_assert_held(&mvm->mutex);
2059
cfbc6c4c 2060 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
0e39eb03
CRI
2061 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2062 if (ret)
2063 IWL_WARN(mvm, "Failed sending remove station\n");
2064
2065 return ret;
2066}
2067
2068void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2069{
2070 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2071}
2072
712b24ad
JB
2073void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2074{
2075 lockdep_assert_held(&mvm->mutex);
2076
2077 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2078}
2079
8ca151b5
JB
2080/*
2081 * Send the add station command for the vif's broadcast station.
2082 * Assumes that the station was already allocated.
2083 *
2084 * @mvm: the mvm component
2085 * @vif: the interface to which the broadcast station is added
2086 * @bsta: the broadcast station to add.
2087 */
013290aa 2088int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
8ca151b5
JB
2089{
2090 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
013290aa 2091 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
5023d966 2092 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
a4243402 2093 const u8 *baddr = _baddr;
7daa7624 2094 int queue;
df88c08d 2095 int ret;
c5a719ee
SS
2096 unsigned int wdg_timeout =
2097 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2098 struct iwl_trans_txq_scd_cfg cfg = {
2099 .fifo = IWL_MVM_TX_FIFO_VO,
2100 .sta_id = mvmvif->bcast_sta.sta_id,
2101 .tid = IWL_MAX_TID_COUNT,
2102 .aggregate = false,
2103 .frame_limit = IWL_FRAME_LIMIT,
2104 };
8ca151b5
JB
2105
2106 lockdep_assert_held(&mvm->mutex);
2107
c8f54701 2108 if (!iwl_mvm_has_new_tx_api(mvm)) {
4d339989
LK
2109 if (vif->type == NL80211_IFTYPE_AP ||
2110 vif->type == NL80211_IFTYPE_ADHOC)
49f71713 2111 queue = mvm->probe_queue;
df88c08d 2112 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
49f71713 2113 queue = mvm->p2p_dev_queue;
df88c08d 2114 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
de24f638
LK
2115 return -EINVAL;
2116
df88c08d 2117 bsta->tfd_queue_msk |= BIT(queue);
c5a719ee 2118
cfbc6c4c 2119 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
de24f638
LK
2120 }
2121
5023d966
JB
2122 if (vif->type == NL80211_IFTYPE_ADHOC)
2123 baddr = vif->bss_conf.bssid;
2124
0ae98812 2125 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
8ca151b5
JB
2126 return -ENOSPC;
2127
df88c08d
LK
2128 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2129 mvmvif->id, mvmvif->color);
2130 if (ret)
2131 return ret;
2132
2133 /*
2f7a3863 2134 * For 22000 firmware and on we cannot add queue to a station unknown
c5a719ee 2135 * to firmware so enable queue here - after the station was added
df88c08d 2136 */
310181ec 2137 if (iwl_mvm_has_new_tx_api(mvm)) {
cfbc6c4c 2138 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
7daa7624
JB
2139 IWL_MAX_TID_COUNT,
2140 wdg_timeout);
2141
7b758a11
LC
2142 if (vif->type == NL80211_IFTYPE_AP ||
2143 vif->type == NL80211_IFTYPE_ADHOC)
310181ec
SS
2144 mvm->probe_queue = queue;
2145 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2146 mvm->p2p_dev_queue = queue;
310181ec 2147 }
df88c08d
LK
2148
2149 return 0;
2150}
2151
2152static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2153 struct ieee80211_vif *vif)
2154{
2155 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
d167e81a 2156 int queue;
df88c08d
LK
2157
2158 lockdep_assert_held(&mvm->mutex);
2159
d49394a1
SS
2160 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2161
d167e81a
MG
2162 switch (vif->type) {
2163 case NL80211_IFTYPE_AP:
2164 case NL80211_IFTYPE_ADHOC:
2165 queue = mvm->probe_queue;
2166 break;
2167 case NL80211_IFTYPE_P2P_DEVICE:
2168 queue = mvm->p2p_dev_queue;
2169 break;
2170 default:
2171 WARN(1, "Can't free bcast queue on vif type %d\n",
2172 vif->type);
2173 return;
df88c08d
LK
2174 }
2175
cfbc6c4c 2176 iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
d167e81a
MG
2177 if (iwl_mvm_has_new_tx_api(mvm))
2178 return;
2179
2180 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2181 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
8ca151b5
JB
2182}
2183
2184/* Send the FW a request to remove the station from it's internal data
2185 * structures, but DO NOT remove the entry from the local data structures. */
013290aa 2186int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
8ca151b5 2187{
013290aa 2188 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
8ca151b5
JB
2189 int ret;
2190
2191 lockdep_assert_held(&mvm->mutex);
2192
c8f54701 2193 iwl_mvm_free_bcast_sta_queues(mvm, vif);
df88c08d 2194
013290aa 2195 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
8ca151b5
JB
2196 if (ret)
2197 IWL_WARN(mvm, "Failed sending remove station\n");
2198 return ret;
2199}
2200
013290aa
JB
2201int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2202{
2203 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
013290aa
JB
2204
2205 lockdep_assert_held(&mvm->mutex);
2206
c8f54701 2207 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
ced19f26
SS
2208 ieee80211_vif_type_p2p(vif),
2209 IWL_STA_GENERAL_PURPOSE);
013290aa
JB
2210}
2211
8ca151b5
JB
2212/* Allocate a new station entry for the broadcast station to the given vif,
2213 * and send it to the FW.
2214 * Note that each P2P mac should have its own broadcast station.
2215 *
2216 * @mvm: the mvm component
2217 * @vif: the interface to which the broadcast station is added
2218 * @bsta: the broadcast station to add. */
d197358b 2219int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
8ca151b5
JB
2220{
2221 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
013290aa 2222 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
8ca151b5
JB
2223 int ret;
2224
2225 lockdep_assert_held(&mvm->mutex);
2226
013290aa 2227 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
8ca151b5
JB
2228 if (ret)
2229 return ret;
2230
013290aa 2231 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
8ca151b5
JB
2232
2233 if (ret)
2234 iwl_mvm_dealloc_int_sta(mvm, bsta);
013290aa 2235
8ca151b5
JB
2236 return ret;
2237}
2238
013290aa
JB
2239void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2240{
2241 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2242
2243 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2244}
2245
8ca151b5
JB
2246/*
2247 * Send the FW a request to remove the station from it's internal data
2248 * structures, and in addition remove it from the local data structure.
2249 */
d197358b 2250int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
8ca151b5
JB
2251{
2252 int ret;
2253
2254 lockdep_assert_held(&mvm->mutex);
2255
013290aa
JB
2256 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2257
2258 iwl_mvm_dealloc_bcast_sta(mvm, vif);
8ca151b5 2259
8ca151b5
JB
2260 return ret;
2261}
2262
26d6c16b
SS
2263/*
2264 * Allocate a new station entry for the multicast station to the given vif,
2265 * and send it to the FW.
2266 * Note that each AP/GO mac should have its own multicast station.
2267 *
2268 * @mvm: the mvm component
2269 * @vif: the interface to which the multicast station is added
2270 */
2271int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2272{
2273 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2274 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2275 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2276 const u8 *maddr = _maddr;
2277 struct iwl_trans_txq_scd_cfg cfg = {
2278 .fifo = IWL_MVM_TX_FIFO_MCAST,
2279 .sta_id = msta->sta_id,
6508de03 2280 .tid = 0,
26d6c16b
SS
2281 .aggregate = false,
2282 .frame_limit = IWL_FRAME_LIMIT,
2283 };
2284 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2285 int ret;
2286
2287 lockdep_assert_held(&mvm->mutex);
2288
ee48b722
LK
2289 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2290 vif->type != NL80211_IFTYPE_ADHOC))
26d6c16b
SS
2291 return -ENOTSUPP;
2292
fc07bd8c
SS
2293 /*
2294 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2295 * invalid, so make sure we use the queue we want.
2296 * Note that this is done here as we want to avoid making DQA
2297 * changes in mac80211 layer.
2298 */
cfbc6c4c
SS
2299 if (vif->type == NL80211_IFTYPE_ADHOC)
2300 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
fc07bd8c 2301
ced19f26
SS
2302 /*
2303 * While in previous FWs we had to exclude cab queue from TFD queue
2304 * mask, now it is needed as any other queue.
2305 */
2306 if (!iwl_mvm_has_new_tx_api(mvm) &&
2307 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
cfbc6c4c
SS
2308 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2309 timeout);
2310 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
ced19f26 2311 }
26d6c16b
SS
2312 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2313 mvmvif->id, mvmvif->color);
2314 if (ret) {
2315 iwl_mvm_dealloc_int_sta(mvm, msta);
2316 return ret;
2317 }
2318
2319 /*
2320 * Enable cab queue after the ADD_STA command is sent.
2f7a3863 2321 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
ced19f26
SS
2322 * command with unknown station id, and for FW that doesn't support
2323 * station API since the cab queue is not included in the
2324 * tfd_queue_mask.
26d6c16b 2325 */
310181ec 2326 if (iwl_mvm_has_new_tx_api(mvm)) {
cfbc6c4c 2327 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
6508de03 2328 0,
310181ec 2329 timeout);
e2af3fab 2330 mvmvif->cab_queue = queue;
ced19f26 2331 } else if (!fw_has_api(&mvm->fw->ucode_capa,
fc07bd8c 2332 IWL_UCODE_TLV_API_STA_TYPE))
cfbc6c4c
SS
2333 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2334 timeout);
26d6c16b 2335
337bfc98
AS
2336 if (mvmvif->ap_wep_key) {
2337 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2338
28916a16
EG
2339 __set_bit(key_offset, mvm->fw_key_table);
2340
337bfc98
AS
2341 if (key_offset == STA_KEY_IDX_INVALID)
2342 return -ENOSPC;
2343
2344 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
28916a16 2345 mvmvif->ap_wep_key, true, 0, NULL, 0,
337bfc98
AS
2346 key_offset, 0);
2347 if (ret)
2348 return ret;
2349 }
2350
26d6c16b
SS
2351 return 0;
2352}
2353
28916a16
EG
2354static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2355 struct ieee80211_key_conf *keyconf,
2356 bool mcast)
2357{
2358 union {
2359 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2360 struct iwl_mvm_add_sta_key_cmd cmd;
2361 } u = {};
2362 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2363 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2364 __le16 key_flags;
2365 int ret, size;
2366 u32 status;
2367
2368 /* This is a valid situation for GTK removal */
2369 if (sta_id == IWL_MVM_INVALID_STA)
2370 return 0;
2371
2372 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2373 STA_KEY_FLG_KEYID_MSK);
2374 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2375 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2376
2377 if (mcast)
2378 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2379
2380 /*
2381 * The fields assigned here are in the same location at the start
2382 * of the command, so we can do this union trick.
2383 */
2384 u.cmd.common.key_flags = key_flags;
2385 u.cmd.common.key_offset = keyconf->hw_key_idx;
2386 u.cmd.common.sta_id = sta_id;
2387
2388 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2389
2390 status = ADD_STA_SUCCESS;
2391 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2392 &status);
2393
2394 switch (status) {
2395 case ADD_STA_SUCCESS:
2396 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2397 break;
2398 default:
2399 ret = -EIO;
2400 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2401 break;
2402 }
2403
2404 return ret;
2405}
2406
26d6c16b
SS
2407/*
2408 * Send the FW a request to remove the station from it's internal data
2409 * structures, and in addition remove it from the local data structure.
2410 */
2411int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2412{
2413 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2414 int ret;
2415
2416 lockdep_assert_held(&mvm->mutex);
2417
d49394a1
SS
2418 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2419
cfbc6c4c 2420 iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
26d6c16b 2421
28916a16
EG
2422 if (mvmvif->ap_wep_key) {
2423 int i;
2424
2425 if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx,
2426 mvm->fw_key_table)) {
2427 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
2428 mvmvif->ap_wep_key->hw_key_idx);
2429 return -ENOENT;
2430 }
2431
2432 /* track which key was deleted last */
2433 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2434 if (mvm->fw_key_deleted[i] < U8_MAX)
2435 mvm->fw_key_deleted[i]++;
2436 }
2437 mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0;
2438 ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2439 mvmvif->ap_wep_key, true);
2440 if (ret)
2441 return ret;
2442 }
2443
26d6c16b
SS
2444 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2445 if (ret)
2446 IWL_WARN(mvm, "Failed sending remove station\n");
2447
2448 return ret;
2449}
2450
113a0447
EG
2451#define IWL_MAX_RX_BA_SESSIONS 16
2452
b915c101 2453static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
10b2b201 2454{
b915c101
SS
2455 struct iwl_mvm_delba_notif notif = {
2456 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2457 .metadata.sync = 1,
2458 .delba.baid = baid,
10b2b201 2459 };
b915c101
SS
2460 iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
2461};
10b2b201 2462
b915c101
SS
2463static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2464 struct iwl_mvm_baid_data *data)
2465{
2466 int i;
2467
2468 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2469
2470 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2471 int j;
2472 struct iwl_mvm_reorder_buffer *reorder_buf =
2473 &data->reorder_buf[i];
dfdddd92
JB
2474 struct iwl_mvm_reorder_buf_entry *entries =
2475 &data->entries[i * data->entries_per_queue];
b915c101 2476
0690405f
SS
2477 spin_lock_bh(&reorder_buf->lock);
2478 if (likely(!reorder_buf->num_stored)) {
2479 spin_unlock_bh(&reorder_buf->lock);
b915c101 2480 continue;
0690405f 2481 }
b915c101
SS
2482
2483 /*
2484 * This shouldn't happen in regular DELBA since the internal
2485 * delBA notification should trigger a release of all frames in
2486 * the reorder buffer.
2487 */
2488 WARN_ON(1);
2489
2490 for (j = 0; j < reorder_buf->buf_size; j++)
dfdddd92 2491 __skb_queue_purge(&entries[j].e.frames);
0690405f
SS
2492 /*
2493 * Prevent timer re-arm. This prevents a very far fetched case
2494 * where we timed out on the notification. There may be prior
2495 * RX frames pending in the RX queue before the notification
2496 * that might get processed between now and the actual deletion
2497 * and we would re-arm the timer although we are deleting the
2498 * reorder buffer.
2499 */
2500 reorder_buf->removed = true;
2501 spin_unlock_bh(&reorder_buf->lock);
2502 del_timer_sync(&reorder_buf->reorder_timer);
b915c101
SS
2503 }
2504}
2505
2506static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
b915c101 2507 struct iwl_mvm_baid_data *data,
514c3069 2508 u16 ssn, u16 buf_size)
b915c101
SS
2509{
2510 int i;
2511
2512 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2513 struct iwl_mvm_reorder_buffer *reorder_buf =
2514 &data->reorder_buf[i];
dfdddd92
JB
2515 struct iwl_mvm_reorder_buf_entry *entries =
2516 &data->entries[i * data->entries_per_queue];
b915c101
SS
2517 int j;
2518
2519 reorder_buf->num_stored = 0;
2520 reorder_buf->head_sn = ssn;
2521 reorder_buf->buf_size = buf_size;
0690405f 2522 /* rx reorder timer */
8cef5344
KC
2523 timer_setup(&reorder_buf->reorder_timer,
2524 iwl_mvm_reorder_timer_expired, 0);
0690405f
SS
2525 spin_lock_init(&reorder_buf->lock);
2526 reorder_buf->mvm = mvm;
b915c101 2527 reorder_buf->queue = i;
5d43eab6 2528 reorder_buf->valid = false;
b915c101 2529 for (j = 0; j < reorder_buf->buf_size; j++)
dfdddd92 2530 __skb_queue_head_init(&entries[j].e.frames);
b915c101 2531 }
10b2b201
SS
2532}
2533
8ca151b5 2534int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
514c3069 2535 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
8ca151b5 2536{
9d8ce6af 2537 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
f9dc0004 2538 struct iwl_mvm_add_sta_cmd cmd = {};
10b2b201 2539 struct iwl_mvm_baid_data *baid_data = NULL;
8ca151b5
JB
2540 int ret;
2541 u32 status;
2542
2543 lockdep_assert_held(&mvm->mutex);
2544
113a0447
EG
2545 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2546 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2547 return -ENOSPC;
2548 }
2549
10b2b201 2550 if (iwl_mvm_has_new_rx_api(mvm) && start) {
dfdddd92
JB
2551 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2552
2553 /* sparse doesn't like the __align() so don't check */
2554#ifndef __CHECKER__
2555 /*
2556 * The division below will be OK if either the cache line size
2557 * can be divided by the entry size (ALIGN will round up) or if
2558 * if the entry size can be divided by the cache line size, in
2559 * which case the ALIGN() will do nothing.
2560 */
2561 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2562 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2563#endif
2564
2565 /*
2566 * Upward align the reorder buffer size to fill an entire cache
2567 * line for each queue, to avoid sharing cache lines between
2568 * different queues.
2569 */
2570 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2571
10b2b201
SS
2572 /*
2573 * Allocate here so if allocation fails we can bail out early
2574 * before starting the BA session in the firmware
2575 */
b915c101
SS
2576 baid_data = kzalloc(sizeof(*baid_data) +
2577 mvm->trans->num_rx_queues *
dfdddd92 2578 reorder_buf_size,
b915c101 2579 GFP_KERNEL);
10b2b201
SS
2580 if (!baid_data)
2581 return -ENOMEM;
dfdddd92
JB
2582
2583 /*
2584 * This division is why we need the above BUILD_BUG_ON(),
2585 * if that doesn't hold then this will not be right.
2586 */
2587 baid_data->entries_per_queue =
2588 reorder_buf_size / sizeof(baid_data->entries[0]);
10b2b201
SS
2589 }
2590
8ca151b5
JB
2591 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2592 cmd.sta_id = mvm_sta->sta_id;
2593 cmd.add_modify = STA_MODE_MODIFY;
93a42667
EG
2594 if (start) {
2595 cmd.add_immediate_ba_tid = (u8) tid;
2596 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
514c3069 2597 cmd.rx_ba_window = cpu_to_le16(buf_size);
93a42667
EG
2598 } else {
2599 cmd.remove_immediate_ba_tid = (u8) tid;
2600 }
8ca151b5
JB
2601 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2602 STA_MODIFY_REMOVE_BA_TID;
2603
2604 status = ADD_STA_SUCCESS;
854c5705
SS
2605 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2606 iwl_mvm_add_sta_cmd_size(mvm),
f9dc0004 2607 &cmd, &status);
8ca151b5 2608 if (ret)
10b2b201 2609 goto out_free;
8ca151b5 2610
837c4da9 2611 switch (status & IWL_ADD_STA_STATUS_MASK) {
8ca151b5 2612 case ADD_STA_SUCCESS:
35263a03
SS
2613 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2614 start ? "start" : "stopp");
8ca151b5
JB
2615 break;
2616 case ADD_STA_IMMEDIATE_BA_FAILURE:
2617 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2618 ret = -ENOSPC;
2619 break;
2620 default:
2621 ret = -EIO;
2622 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2623 start ? "start" : "stopp", status);
2624 break;
2625 }
2626
10b2b201
SS
2627 if (ret)
2628 goto out_free;
2629
2630 if (start) {
2631 u8 baid;
2632
2633 mvm->rx_ba_sessions++;
2634
2635 if (!iwl_mvm_has_new_rx_api(mvm))
2636 return 0;
2637
2638 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2639 ret = -EINVAL;
2640 goto out_free;
2641 }
2642 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2643 IWL_ADD_STA_BAID_SHIFT);
2644 baid_data->baid = baid;
2645 baid_data->timeout = timeout;
2646 baid_data->last_rx = jiffies;
8cef5344
KC
2647 baid_data->rcu_ptr = &mvm->baid_map[baid];
2648 timer_setup(&baid_data->session_timer,
2649 iwl_mvm_rx_agg_session_expired, 0);
10b2b201
SS
2650 baid_data->mvm = mvm;
2651 baid_data->tid = tid;
2652 baid_data->sta_id = mvm_sta->sta_id;
2653
2654 mvm_sta->tid_to_baid[tid] = baid;
2655 if (timeout)
2656 mod_timer(&baid_data->session_timer,
2657 TU_TO_EXP_TIME(timeout * 2));
2658
3f1c4c58 2659 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
10b2b201
SS
2660 /*
2661 * protect the BA data with RCU to cover a case where our
2662 * internal RX sync mechanism will timeout (not that it's
2663 * supposed to happen) and we will free the session data while
2664 * RX is being processed in parallel
2665 */
35263a03
SS
2666 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2667 mvm_sta->sta_id, tid, baid);
10b2b201
SS
2668 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2669 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
60dec523 2670 } else {
10b2b201
SS
2671 u8 baid = mvm_sta->tid_to_baid[tid];
2672
60dec523
SS
2673 if (mvm->rx_ba_sessions > 0)
2674 /* check that restart flow didn't zero the counter */
2675 mvm->rx_ba_sessions--;
10b2b201
SS
2676 if (!iwl_mvm_has_new_rx_api(mvm))
2677 return 0;
2678
2679 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2680 return -EINVAL;
2681
2682 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2683 if (WARN_ON(!baid_data))
2684 return -EINVAL;
2685
2686 /* synchronize all rx queues so we can safely delete */
b915c101 2687 iwl_mvm_free_reorder(mvm, baid_data);
10b2b201 2688 del_timer_sync(&baid_data->session_timer);
10b2b201
SS
2689 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2690 kfree_rcu(baid_data, rcu_head);
35263a03 2691 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
113a0447 2692 }
10b2b201 2693 return 0;
113a0447 2694
10b2b201
SS
2695out_free:
2696 kfree(baid_data);
8ca151b5
JB
2697 return ret;
2698}
2699
9794c64f
LK
2700int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2701 int tid, u8 queue, bool start)
8ca151b5 2702{
9d8ce6af 2703 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
f9dc0004 2704 struct iwl_mvm_add_sta_cmd cmd = {};
8ca151b5
JB
2705 int ret;
2706 u32 status;
2707
2708 lockdep_assert_held(&mvm->mutex);
2709
2710 if (start) {
2711 mvm_sta->tfd_queue_msk |= BIT(queue);
2712 mvm_sta->tid_disable_agg &= ~BIT(tid);
2713 } else {
cf961e16 2714 /* In DQA-mode the queue isn't removed on agg termination */
8ca151b5
JB
2715 mvm_sta->tid_disable_agg |= BIT(tid);
2716 }
2717
2718 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2719 cmd.sta_id = mvm_sta->sta_id;
2720 cmd.add_modify = STA_MODE_MODIFY;
bb49701b
SS
2721 if (!iwl_mvm_has_new_tx_api(mvm))
2722 cmd.modify_mask = STA_MODIFY_QUEUES;
2723 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
8ca151b5
JB
2724 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2725 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2726
2727 status = ADD_STA_SUCCESS;
854c5705
SS
2728 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2729 iwl_mvm_add_sta_cmd_size(mvm),
f9dc0004 2730 &cmd, &status);
8ca151b5
JB
2731 if (ret)
2732 return ret;
2733
837c4da9 2734 switch (status & IWL_ADD_STA_STATUS_MASK) {
8ca151b5
JB
2735 case ADD_STA_SUCCESS:
2736 break;
2737 default:
2738 ret = -EIO;
2739 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2740 start ? "start" : "stopp", status);
2741 break;
2742 }
2743
2744 return ret;
2745}
2746
b797e3fb 2747const u8 tid_to_mac80211_ac[] = {
8ca151b5
JB
2748 IEEE80211_AC_BE,
2749 IEEE80211_AC_BK,
2750 IEEE80211_AC_BK,
2751 IEEE80211_AC_BE,
2752 IEEE80211_AC_VI,
2753 IEEE80211_AC_VI,
2754 IEEE80211_AC_VO,
2755 IEEE80211_AC_VO,
9794c64f 2756 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
8ca151b5
JB
2757};
2758
3e56eadf
JB
2759static const u8 tid_to_ucode_ac[] = {
2760 AC_BE,
2761 AC_BK,
2762 AC_BK,
2763 AC_BE,
2764 AC_VI,
2765 AC_VI,
2766 AC_VO,
2767 AC_VO,
2768};
2769
8ca151b5
JB
2770int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2771 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2772{
5b577a90 2773 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
8ca151b5 2774 struct iwl_mvm_tid_data *tid_data;
dd32162d 2775 u16 normalized_ssn;
b0d795a9 2776 u16 txq_id;
4ecafae9 2777 int ret;
8ca151b5
JB
2778
2779 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2780 return -EINVAL;
2781
bd800e41
NG
2782 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2783 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2784 IWL_ERR(mvm,
2785 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
8ca151b5
JB
2786 mvmsta->tid_data[tid].state);
2787 return -ENXIO;
2788 }
2789
2790 lockdep_assert_held(&mvm->mutex);
2791
bd8f3fc6
LK
2792 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2793 iwl_mvm_has_new_tx_api(mvm)) {
2794 u8 ac = tid_to_mac80211_ac[tid];
2795
2796 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2797 if (ret)
2798 return ret;
2799 }
2800
b2492501
AN
2801 spin_lock_bh(&mvmsta->lock);
2802
2803 /* possible race condition - we entered D0i3 while starting agg */
2804 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2805 spin_unlock_bh(&mvmsta->lock);
2806 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2807 return -EIO;
2808 }
2809
cf961e16
LK
2810 /*
2811 * Note the possible cases:
4a6d2e52
AS
2812 * 1. An enabled TXQ - TXQ needs to become agg'ed
2813 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2814 * it as reserved
cf961e16
LK
2815 */
2816 txq_id = mvmsta->tid_data[tid].txq_id;
4a6d2e52 2817 if (txq_id == IWL_MVM_INVALID_QUEUE) {
b0d795a9
MG
2818 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2819 IWL_MVM_DQA_MIN_DATA_QUEUE,
2820 IWL_MVM_DQA_MAX_DATA_QUEUE);
2821 if (ret < 0) {
cf961e16 2822 IWL_ERR(mvm, "Failed to allocate agg queue\n");
f3f240f9 2823 goto out;
cf961e16
LK
2824 }
2825
b0d795a9
MG
2826 txq_id = ret;
2827
cf961e16
LK
2828 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2829 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
b0d795a9
MG
2830 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2831 ret = -ENXIO;
2832 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2833 tid, IWL_MAX_HW_QUEUES - 1);
2834 goto out;
2835
4a6d2e52
AS
2836 } else if (unlikely(mvm->queue_info[txq_id].status ==
2837 IWL_MVM_QUEUE_SHARED)) {
2838 ret = -ENXIO;
2839 IWL_DEBUG_TX_QUEUES(mvm,
2840 "Can't start tid %d agg on shared queue!\n",
2841 tid);
f3f240f9 2842 goto out;
4ecafae9 2843 }
9f9af3d7 2844
cf961e16
LK
2845 IWL_DEBUG_TX_QUEUES(mvm,
2846 "AGG for tid %d will be on queue #%d\n",
2847 tid, txq_id);
2848
8ca151b5 2849 tid_data = &mvmsta->tid_data[tid];
9a886586 2850 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
8ca151b5
JB
2851 tid_data->txq_id = txq_id;
2852 *ssn = tid_data->ssn;
2853
2854 IWL_DEBUG_TX_QUEUES(mvm,
2855 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2856 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2857 tid_data->next_reclaimed);
2858
dd32162d 2859 /*
2f7a3863 2860 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
dd32162d
LK
2861 * to align the wrap around of ssn so we compare relevant values.
2862 */
2863 normalized_ssn = tid_data->ssn;
2864 if (mvm->trans->cfg->gen2)
2865 normalized_ssn &= 0xff;
2866
2867 if (normalized_ssn == tid_data->next_reclaimed) {
8ca151b5
JB
2868 tid_data->state = IWL_AGG_STARTING;
2869 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2870 } else {
2871 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2872 }
2873
4ecafae9
LK
2874 ret = 0;
2875
9f9af3d7 2876out:
8ca151b5
JB
2877 spin_unlock_bh(&mvmsta->lock);
2878
4ecafae9 2879 return ret;
8ca151b5
JB
2880}
2881
2882int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
514c3069 2883 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
bb81bb68 2884 bool amsdu)
8ca151b5 2885{
5b577a90 2886 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
8ca151b5 2887 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
5d42e7b2
EG
2888 unsigned int wdg_timeout =
2889 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
eea76c36 2890 int queue, ret;
cf961e16 2891 bool alloc_queue = true;
9f9af3d7 2892 enum iwl_mvm_queue_status queue_status;
8ca151b5
JB
2893 u16 ssn;
2894
eea76c36
EG
2895 struct iwl_trans_txq_scd_cfg cfg = {
2896 .sta_id = mvmsta->sta_id,
2897 .tid = tid,
2898 .frame_limit = buf_size,
2899 .aggregate = true,
2900 };
2901
ecaf71de
GG
2902 /*
2903 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2904 * manager, so this function should never be called in this case.
2905 */
4243edb4 2906 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
ecaf71de
GG
2907 return -EINVAL;
2908
efed6640
ES
2909 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2910 != IWL_MAX_TID_COUNT);
2911
8ca151b5
JB
2912 spin_lock_bh(&mvmsta->lock);
2913 ssn = tid_data->ssn;
2914 queue = tid_data->txq_id;
2915 tid_data->state = IWL_AGG_ON;
efed6640 2916 mvmsta->agg_tids |= BIT(tid);
8ca151b5 2917 tid_data->ssn = 0xffff;
bb81bb68 2918 tid_data->amsdu_in_ampdu_allowed = amsdu;
8ca151b5
JB
2919 spin_unlock_bh(&mvmsta->lock);
2920
34e10860
SS
2921 if (iwl_mvm_has_new_tx_api(mvm)) {
2922 /*
0ec9257b
SS
2923 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2924 * would have failed, so if we are here there is no need to
2925 * allocate a queue.
2926 * However, if aggregation size is different than the default
2927 * size, the scheduler should be reconfigured.
2928 * We cannot do this with the new TX API, so return unsupported
2929 * for now, until it will be offloaded to firmware..
2930 * Note that if SCD default value changes - this condition
2931 * should be updated as well.
34e10860 2932 */
0ec9257b 2933 if (buf_size < IWL_FRAME_LIMIT)
34e10860
SS
2934 return -ENOTSUPP;
2935
2936 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2937 if (ret)
2938 return -EIO;
2939 goto out;
2940 }
2941
eea76c36 2942 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
8ca151b5 2943
9f9af3d7 2944 queue_status = mvm->queue_info[queue].status;
9f9af3d7 2945
c8f54701
JB
2946 /* Maybe there is no need to even alloc a queue... */
2947 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2948 alloc_queue = false;
cf961e16 2949
c8f54701
JB
2950 /*
2951 * Only reconfig the SCD for the queue if the window size has
2952 * changed from current (become smaller)
2953 */
0ec9257b 2954 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
cf961e16 2955 /*
c8f54701
JB
2956 * If reconfiguring an existing queue, it first must be
2957 * drained
cf961e16 2958 */
c8f54701
JB
2959 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2960 BIT(queue));
2961 if (ret) {
2962 IWL_ERR(mvm,
2963 "Error draining queue before reconfig\n");
2964 return ret;
2965 }
cf961e16 2966
c8f54701
JB
2967 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2968 mvmsta->sta_id, tid,
2969 buf_size, ssn);
2970 if (ret) {
2971 IWL_ERR(mvm,
2972 "Error reconfiguring TXQ #%d\n", queue);
2973 return ret;
cf961e16
LK
2974 }
2975 }
2976
2977 if (alloc_queue)
cfbc6c4c 2978 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
cf961e16 2979 &cfg, wdg_timeout);
fa7878e7 2980
9f9af3d7
LK
2981 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2982 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2983 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2984 if (ret)
2985 return -EIO;
2986 }
8ca151b5 2987
4ecafae9 2988 /* No need to mark as reserved */
cf961e16 2989 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
4ecafae9 2990
34e10860 2991out:
8ca151b5
JB
2992 /*
2993 * Even though in theory the peer could have different
2994 * aggregation reorder buffer sizes for different sessions,
2995 * our ucode doesn't allow for that and has a global limit
2996 * for each station. Therefore, use the minimum of all the
2997 * aggregation sessions and our default value.
2998 */
2999 mvmsta->max_agg_bufsize =
3000 min(mvmsta->max_agg_bufsize, buf_size);
ecaf71de 3001 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
8ca151b5 3002
9ee718aa
EL
3003 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3004 sta->addr, tid);
3005
ecaf71de 3006 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
8ca151b5
JB
3007}
3008
34e10860
SS
3009static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3010 struct iwl_mvm_sta *mvmsta,
4b387906 3011 struct iwl_mvm_tid_data *tid_data)
34e10860 3012{
4b387906
AS
3013 u16 txq_id = tid_data->txq_id;
3014
f3f240f9
JB
3015 lockdep_assert_held(&mvm->mutex);
3016
34e10860
SS
3017 if (iwl_mvm_has_new_tx_api(mvm))
3018 return;
3019
34e10860
SS
3020 /*
3021 * The TXQ is marked as reserved only if no traffic came through yet
3022 * This means no traffic has been sent on this TID (agg'd or not), so
3023 * we no longer have use for the queue. Since it hasn't even been
3024 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3025 * free.
3026 */
4b387906 3027 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
34e10860 3028 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
4b387906
AS
3029 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3030 }
34e10860
SS
3031}
3032
8ca151b5
JB
3033int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3034 struct ieee80211_sta *sta, u16 tid)
3035{
5b577a90 3036 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
8ca151b5
JB
3037 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3038 u16 txq_id;
3039 int err;
3040
f9aa8dd3
EG
3041 /*
3042 * If mac80211 is cleaning its state, then say that we finished since
3043 * our state has been cleared anyway.
3044 */
3045 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3046 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3047 return 0;
3048 }
3049
8ca151b5
JB
3050 spin_lock_bh(&mvmsta->lock);
3051
3052 txq_id = tid_data->txq_id;
3053
3054 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3055 mvmsta->sta_id, tid, txq_id, tid_data->state);
3056
efed6640
ES
3057 mvmsta->agg_tids &= ~BIT(tid);
3058
4b387906 3059 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
4ecafae9 3060
8ca151b5
JB
3061 switch (tid_data->state) {
3062 case IWL_AGG_ON:
9a886586 3063 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
8ca151b5
JB
3064
3065 IWL_DEBUG_TX_QUEUES(mvm,
3066 "ssn = %d, next_recl = %d\n",
3067 tid_data->ssn, tid_data->next_reclaimed);
3068
8ca151b5 3069 tid_data->ssn = 0xffff;
f7f89e7b 3070 tid_data->state = IWL_AGG_OFF;
f7f89e7b
JB
3071 spin_unlock_bh(&mvmsta->lock);
3072
3073 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3074
3075 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
f7f89e7b 3076 return 0;
8ca151b5
JB
3077 case IWL_AGG_STARTING:
3078 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3079 /*
3080 * The agg session has been stopped before it was set up. This
3081 * can happen when the AddBA timer times out for example.
3082 */
3083
3084 /* No barriers since we are under mutex */
3085 lockdep_assert_held(&mvm->mutex);
8ca151b5
JB
3086
3087 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3088 tid_data->state = IWL_AGG_OFF;
3089 err = 0;
3090 break;
3091 default:
3092 IWL_ERR(mvm,
3093 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3094 mvmsta->sta_id, tid, tid_data->state);
3095 IWL_ERR(mvm,
3096 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3097 err = -EINVAL;
3098 }
3099
3100 spin_unlock_bh(&mvmsta->lock);
3101
3102 return err;
3103}
3104
e3d9e7ce
EG
3105int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3106 struct ieee80211_sta *sta, u16 tid)
3107{
5b577a90 3108 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
e3d9e7ce
EG
3109 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3110 u16 txq_id;
b6658ff8 3111 enum iwl_mvm_agg_state old_state;
e3d9e7ce
EG
3112
3113 /*
3114 * First set the agg state to OFF to avoid calling
3115 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3116 */
3117 spin_lock_bh(&mvmsta->lock);
3118 txq_id = tid_data->txq_id;
3119 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3120 mvmsta->sta_id, tid, txq_id, tid_data->state);
b6658ff8 3121 old_state = tid_data->state;
e3d9e7ce 3122 tid_data->state = IWL_AGG_OFF;
efed6640 3123 mvmsta->agg_tids &= ~BIT(tid);
e3d9e7ce
EG
3124 spin_unlock_bh(&mvmsta->lock);
3125
4b387906 3126 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
4ecafae9 3127
b6658ff8 3128 if (old_state >= IWL_AGG_ON) {
fe92e32a 3129 iwl_mvm_drain_sta(mvm, mvmsta, true);
d6d517b7 3130
d167e81a
MG
3131 if (iwl_mvm_has_new_tx_api(mvm)) {
3132 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3133 BIT(tid), 0))
3134 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
d6d517b7 3135 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
d167e81a
MG
3136 } else {
3137 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3138 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
d6d517b7 3139 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
d167e81a 3140 }
d6d517b7 3141
fe92e32a 3142 iwl_mvm_drain_sta(mvm, mvmsta, false);
b6658ff8 3143
f7f89e7b 3144 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
b6658ff8 3145 }
e3d9e7ce 3146
e3d9e7ce
EG
3147 return 0;
3148}
3149
8ca151b5
JB
3150static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3151{
2dc2a15e 3152 int i, max = -1, max_offs = -1;
8ca151b5
JB
3153
3154 lockdep_assert_held(&mvm->mutex);
3155
2dc2a15e
JB
3156 /* Pick the unused key offset with the highest 'deleted'
3157 * counter. Every time a key is deleted, all the counters
3158 * are incremented and the one that was just deleted is
3159 * reset to zero. Thus, the highest counter is the one
3160 * that was deleted longest ago. Pick that one.
3161 */
3162 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3163 if (test_bit(i, mvm->fw_key_table))
3164 continue;
3165 if (mvm->fw_key_deleted[i] > max) {
3166 max = mvm->fw_key_deleted[i];
3167 max_offs = i;
3168 }
3169 }
8ca151b5 3170
2dc2a15e 3171 if (max_offs < 0)
8ca151b5
JB
3172 return STA_KEY_IDX_INVALID;
3173
2dc2a15e 3174 return max_offs;
8ca151b5
JB
3175}
3176
5f7a1847
JB
3177static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3178 struct ieee80211_vif *vif,
3179 struct ieee80211_sta *sta)
8ca151b5 3180{
5b530e95 3181 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
8ca151b5 3182
5f7a1847
JB
3183 if (sta)
3184 return iwl_mvm_sta_from_mac80211(sta);
8ca151b5
JB
3185
3186 /*
3187 * The device expects GTKs for station interfaces to be
3188 * installed as GTKs for the AP station. If we have no
3189 * station ID, then use AP's station ID.
3190 */
3191 if (vif->type == NL80211_IFTYPE_STATION &&
0ae98812 3192 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
9513c5e1
AA
3193 u8 sta_id = mvmvif->ap_sta_id;
3194
7d6a1ab6
EG
3195 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3196 lockdep_is_held(&mvm->mutex));
3197
9513c5e1
AA
3198 /*
3199 * It is possible that the 'sta' parameter is NULL,
3200 * for example when a GTK is removed - the sta_id will then
3201 * be the AP ID, and no station was passed by mac80211.
3202 */
7d6a1ab6
EG
3203 if (IS_ERR_OR_NULL(sta))
3204 return NULL;
3205
3206 return iwl_mvm_sta_from_mac80211(sta);
9513c5e1 3207 }
8ca151b5 3208
5f7a1847 3209 return NULL;
8ca151b5
JB
3210}
3211
3212static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
85aeb58c 3213 u32 sta_id,
45c458b4 3214 struct ieee80211_key_conf *key, bool mcast,
d6ee54a9 3215 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
4883145a 3216 u8 key_offset, bool mfp)
8ca151b5 3217{
45c458b4
SS
3218 union {
3219 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3220 struct iwl_mvm_add_sta_key_cmd cmd;
3221 } u = {};
f9dc0004 3222 __le16 key_flags;
79920749
JB
3223 int ret;
3224 u32 status;
8ca151b5 3225 u16 keyidx;
45c458b4
SS
3226 u64 pn = 0;
3227 int i, size;
3228 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3229 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
8ca151b5 3230
85aeb58c
DS
3231 if (sta_id == IWL_MVM_INVALID_STA)
3232 return -EINVAL;
3233
45c458b4 3234 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
8ca151b5
JB
3235 STA_KEY_FLG_KEYID_MSK;
3236 key_flags = cpu_to_le16(keyidx);
3237 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3238
45c458b4 3239 switch (key->cipher) {
8ca151b5
JB
3240 case WLAN_CIPHER_SUITE_TKIP:
3241 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
45c458b4
SS
3242 if (new_api) {
3243 memcpy((void *)&u.cmd.tx_mic_key,
3244 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3245 IWL_MIC_KEY_SIZE);
3246
3247 memcpy((void *)&u.cmd.rx_mic_key,
3248 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3249 IWL_MIC_KEY_SIZE);
3250 pn = atomic64_read(&key->tx_pn);
3251
3252 } else {
3253 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3254 for (i = 0; i < 5; i++)
3255 u.cmd_v1.tkip_rx_ttak[i] =
3256 cpu_to_le16(tkip_p1k[i]);
3257 }
3258 memcpy(u.cmd.common.key, key->key, key->keylen);
8ca151b5
JB
3259 break;
3260 case WLAN_CIPHER_SUITE_CCMP:
3261 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
45c458b4
SS
3262 memcpy(u.cmd.common.key, key->key, key->keylen);
3263 if (new_api)
3264 pn = atomic64_read(&key->tx_pn);
8ca151b5 3265 break;
ba3943b0
JB
3266 case WLAN_CIPHER_SUITE_WEP104:
3267 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
aa0cb08b 3268 /* fall through */
ba3943b0
JB
3269 case WLAN_CIPHER_SUITE_WEP40:
3270 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
45c458b4 3271 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
ba3943b0 3272 break;
2a53d166
AB
3273 case WLAN_CIPHER_SUITE_GCMP_256:
3274 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3275 /* fall through */
3276 case WLAN_CIPHER_SUITE_GCMP:
3277 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
45c458b4
SS
3278 memcpy(u.cmd.common.key, key->key, key->keylen);
3279 if (new_api)
3280 pn = atomic64_read(&key->tx_pn);
2a53d166 3281 break;
8ca151b5 3282 default:
e36e5433 3283 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
45c458b4 3284 memcpy(u.cmd.common.key, key->key, key->keylen);
8ca151b5
JB
3285 }
3286
ba3943b0 3287 if (mcast)
8ca151b5 3288 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
4883145a
EG
3289 if (mfp)
3290 key_flags |= cpu_to_le16(STA_KEY_MFP);
8ca151b5 3291
45c458b4
SS
3292 u.cmd.common.key_offset = key_offset;
3293 u.cmd.common.key_flags = key_flags;
85aeb58c 3294 u.cmd.common.sta_id = sta_id;
45c458b4
SS
3295
3296 if (new_api) {
3297 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3298 size = sizeof(u.cmd);
3299 } else {
3300 size = sizeof(u.cmd_v1);
3301 }
8ca151b5
JB
3302
3303 status = ADD_STA_SUCCESS;
a1022927 3304 if (cmd_flags & CMD_ASYNC)
45c458b4
SS
3305 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3306 &u.cmd);
a1022927 3307 else
45c458b4
SS
3308 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3309 &u.cmd, &status);
8ca151b5
JB
3310
3311 switch (status) {
3312 case ADD_STA_SUCCESS:
3313 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3314 break;
3315 default:
3316 ret = -EIO;
3317 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3318 break;
3319 }
3320
3321 return ret;
3322}
3323
3324static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3325 struct ieee80211_key_conf *keyconf,
3326 u8 sta_id, bool remove_key)
3327{
3328 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3329
3330 /* verify the key details match the required command's expectations */
8e160ab8
AB
3331 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3332 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3333 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3334 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3335 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3336 return -EINVAL;
3337
3338 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3339 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
8ca151b5
JB
3340 return -EINVAL;
3341
3342 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3343 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3344
3345 if (remove_key) {
3346 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3347 } else {
3348 struct ieee80211_key_seq seq;
3349 const u8 *pn;
3350
aa950524
AB
3351 switch (keyconf->cipher) {
3352 case WLAN_CIPHER_SUITE_AES_CMAC:
3353 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3354 break;
8e160ab8
AB
3355 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3356 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3357 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3358 break;
aa950524
AB
3359 default:
3360 return -EINVAL;
3361 }
3362
8e160ab8
AB
3363 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3364 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3365 igtk_cmd.ctrl_flags |=
3366 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
8ca151b5
JB
3367 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3368 pn = seq.aes_cmac.pn;
3369 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3370 ((u64) pn[4] << 8) |
3371 ((u64) pn[3] << 16) |
3372 ((u64) pn[2] << 24) |
3373 ((u64) pn[1] << 32) |
3374 ((u64) pn[0] << 40));
3375 }
3376
3377 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3378 remove_key ? "removing" : "installing",
3379 igtk_cmd.sta_id);
3380
8e160ab8
AB
3381 if (!iwl_mvm_has_new_rx_api(mvm)) {
3382 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3383 .ctrl_flags = igtk_cmd.ctrl_flags,
3384 .key_id = igtk_cmd.key_id,
3385 .sta_id = igtk_cmd.sta_id,
3386 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3387 };
3388
3389 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3390 ARRAY_SIZE(igtk_cmd_v1.igtk));
3391 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3392 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3393 }
a1022927 3394 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
8ca151b5
JB
3395 sizeof(igtk_cmd), &igtk_cmd);
3396}
3397
3398
3399static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3400 struct ieee80211_vif *vif,
3401 struct ieee80211_sta *sta)
3402{
5b530e95 3403 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
8ca151b5
JB
3404
3405 if (sta)
3406 return sta->addr;
3407
3408 if (vif->type == NL80211_IFTYPE_STATION &&
0ae98812 3409 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
8ca151b5
JB
3410 u8 sta_id = mvmvif->ap_sta_id;
3411 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3412 lockdep_is_held(&mvm->mutex));
3413 return sta->addr;
3414 }
3415
3416
3417 return NULL;
3418}
3419
2f6319d1
JB
3420static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3421 struct ieee80211_vif *vif,
3422 struct ieee80211_sta *sta,
ba3943b0 3423 struct ieee80211_key_conf *keyconf,
d6ee54a9 3424 u8 key_offset,
ba3943b0 3425 bool mcast)
2f6319d1 3426{
2f6319d1
JB
3427 int ret;
3428 const u8 *addr;
3429 struct ieee80211_key_seq seq;
3430 u16 p1k[5];
85aeb58c 3431 u32 sta_id;
4883145a 3432 bool mfp = false;
85aeb58c
DS
3433
3434 if (sta) {
3435 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3436
3437 sta_id = mvm_sta->sta_id;
4883145a 3438 mfp = sta->mfp;
85aeb58c
DS
3439 } else if (vif->type == NL80211_IFTYPE_AP &&
3440 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3441 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3442
3443 sta_id = mvmvif->mcast_sta.sta_id;
3444 } else {
3445 IWL_ERR(mvm, "Failed to find station id\n");
3446 return -EINVAL;
3447 }
2f6319d1
JB
3448
3449 switch (keyconf->cipher) {
3450 case WLAN_CIPHER_SUITE_TKIP:
3451 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3452 /* get phase 1 key from mac80211 */
3453 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3454 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
85aeb58c 3455 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
4883145a
EG
3456 seq.tkip.iv32, p1k, 0, key_offset,
3457 mfp);
2f6319d1
JB
3458 break;
3459 case WLAN_CIPHER_SUITE_CCMP:
ba3943b0
JB
3460 case WLAN_CIPHER_SUITE_WEP40:
3461 case WLAN_CIPHER_SUITE_WEP104:
2a53d166
AB
3462 case WLAN_CIPHER_SUITE_GCMP:
3463 case WLAN_CIPHER_SUITE_GCMP_256:
85aeb58c 3464 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
4883145a 3465 0, NULL, 0, key_offset, mfp);
2f6319d1
JB
3466 break;
3467 default:
85aeb58c 3468 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
4883145a 3469 0, NULL, 0, key_offset, mfp);
2f6319d1
JB
3470 }
3471
3472 return ret;
3473}
3474
8ca151b5
JB
3475int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3476 struct ieee80211_vif *vif,
3477 struct ieee80211_sta *sta,
3478 struct ieee80211_key_conf *keyconf,
d6ee54a9 3479 u8 key_offset)
8ca151b5 3480{
ba3943b0 3481 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
5f7a1847 3482 struct iwl_mvm_sta *mvm_sta;
85aeb58c 3483 u8 sta_id = IWL_MVM_INVALID_STA;
8ca151b5 3484 int ret;
11828dbc 3485 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
8ca151b5
JB
3486
3487 lockdep_assert_held(&mvm->mutex);
3488
85aeb58c
DS
3489 if (vif->type != NL80211_IFTYPE_AP ||
3490 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3491 /* Get the station id from the mvm local station table */
3492 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3493 if (!mvm_sta) {
3494 IWL_ERR(mvm, "Failed to find station\n");
3495 return -EINVAL;
3496 }
3497 sta_id = mvm_sta->sta_id;
8ca151b5 3498
85aeb58c
DS
3499 /*
3500 * It is possible that the 'sta' parameter is NULL, and thus
e829b17c 3501 * there is a need to retrieve the sta from the local station
85aeb58c
DS
3502 * table.
3503 */
3504 if (!sta) {
3505 sta = rcu_dereference_protected(
3506 mvm->fw_id_to_mac_id[sta_id],
3507 lockdep_is_held(&mvm->mutex));
3508 if (IS_ERR_OR_NULL(sta)) {
3509 IWL_ERR(mvm, "Invalid station id\n");
3510 return -EINVAL;
3511 }
8ca151b5 3512 }
8ca151b5 3513
85aeb58c
DS
3514 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3515 return -EINVAL;
e829b17c
BL
3516 } else {
3517 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3518
3519 sta_id = mvmvif->mcast_sta.sta_id;
3520 }
3521
3522 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3523 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3524 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3525 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3526 goto end;
85aeb58c 3527 }
8ca151b5 3528
d6ee54a9
LC
3529 /* If the key_offset is not pre-assigned, we need to find a
3530 * new offset to use. In normal cases, the offset is not
3531 * pre-assigned, but during HW_RESTART we want to reuse the
3532 * same indices, so we pass them when this function is called.
3533 *
3534 * In D3 entry, we need to hardcoded the indices (because the
3535 * firmware hardcodes the PTK offset to 0). In this case, we
3536 * need to make sure we don't overwrite the hw_key_idx in the
3537 * keyconf structure, because otherwise we cannot configure
3538 * the original ones back when resuming.
3539 */
3540 if (key_offset == STA_KEY_IDX_INVALID) {
3541 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3542 if (key_offset == STA_KEY_IDX_INVALID)
8ca151b5 3543 return -ENOSPC;
d6ee54a9 3544 keyconf->hw_key_idx = key_offset;
8ca151b5
JB
3545 }
3546
d6ee54a9 3547 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
9c3deeb5 3548 if (ret)
ba3943b0 3549 goto end;
ba3943b0
JB
3550
3551 /*
3552 * For WEP, the same key is used for multicast and unicast. Upload it
3553 * again, using the same key offset, and now pointing the other one
3554 * to the same key slot (offset).
3555 * If this fails, remove the original as well.
3556 */
85aeb58c
DS
3557 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3558 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3559 sta) {
d6ee54a9
LC
3560 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3561 key_offset, !mcast);
ba3943b0 3562 if (ret) {
ba3943b0 3563 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
9c3deeb5 3564 goto end;
ba3943b0
JB
3565 }
3566 }
8ca151b5 3567
9c3deeb5
LC
3568 __set_bit(key_offset, mvm->fw_key_table);
3569
8ca151b5
JB
3570end:
3571 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3572 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
11828dbc 3573 sta ? sta->addr : zero_addr, ret);
8ca151b5
JB
3574 return ret;
3575}
3576
3577int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3578 struct ieee80211_vif *vif,
3579 struct ieee80211_sta *sta,
3580 struct ieee80211_key_conf *keyconf)
3581{
ba3943b0 3582 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
5f7a1847 3583 struct iwl_mvm_sta *mvm_sta;
0ae98812 3584 u8 sta_id = IWL_MVM_INVALID_STA;
2dc2a15e 3585 int ret, i;
8ca151b5
JB
3586
3587 lockdep_assert_held(&mvm->mutex);
3588
5f7a1847
JB
3589 /* Get the station from the mvm local station table */
3590 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
71793b7d
LC
3591 if (mvm_sta)
3592 sta_id = mvm_sta->sta_id;
85aeb58c
DS
3593 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3594 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3595
8ca151b5
JB
3596
3597 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3598 keyconf->keyidx, sta_id);
3599
71793b7d
LC
3600 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3601 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3602 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
8ca151b5
JB
3603 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3604
2f6319d1 3605 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
8ca151b5
JB
3606 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3607 keyconf->hw_key_idx);
3608 return -ENOENT;
3609 }
3610
2dc2a15e
JB
3611 /* track which key was deleted last */
3612 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3613 if (mvm->fw_key_deleted[i] < U8_MAX)
3614 mvm->fw_key_deleted[i]++;
3615 }
3616 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3617
85aeb58c 3618 if (sta && !mvm_sta) {
8ca151b5
JB
3619 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3620 return 0;
3621 }
3622
ba3943b0
JB
3623 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3624 if (ret)
3625 return ret;
3626
3627 /* delete WEP key twice to get rid of (now useless) offset */
3628 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3629 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3630 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3631
3632 return ret;
8ca151b5
JB
3633}
3634
3635void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3636 struct ieee80211_vif *vif,
3637 struct ieee80211_key_conf *keyconf,
3638 struct ieee80211_sta *sta, u32 iv32,
3639 u16 *phase1key)
3640{
c3eb536a 3641 struct iwl_mvm_sta *mvm_sta;
ba3943b0 3642 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
4883145a 3643 bool mfp = sta ? sta->mfp : false;
8ca151b5 3644
c3eb536a
BL
3645 rcu_read_lock();
3646
5f7a1847
JB
3647 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3648 if (WARN_ON_ONCE(!mvm_sta))
45854360 3649 goto unlock;
85aeb58c 3650 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
4883145a
EG
3651 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3652 mfp);
45854360
JB
3653
3654 unlock:
c3eb536a 3655 rcu_read_unlock();
8ca151b5
JB
3656}
3657
9cc40712
JB
3658void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3659 struct ieee80211_sta *sta)
8ca151b5 3660{
5b577a90 3661 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
f9dc0004 3662 struct iwl_mvm_add_sta_cmd cmd = {
8ca151b5 3663 .add_modify = STA_MODE_MODIFY,
9cc40712 3664 .sta_id = mvmsta->sta_id,
5af01772 3665 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
9cc40712 3666 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
8ca151b5
JB
3667 };
3668 int ret;
3669
854c5705
SS
3670 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3671 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
8ca151b5
JB
3672 if (ret)
3673 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3674}
3675
9cc40712
JB
3676void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3677 struct ieee80211_sta *sta,
8ca151b5 3678 enum ieee80211_frame_release_type reason,
3e56eadf 3679 u16 cnt, u16 tids, bool more_data,
9a3fcf91 3680 bool single_sta_queue)
8ca151b5 3681{
5b577a90 3682 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
f9dc0004 3683 struct iwl_mvm_add_sta_cmd cmd = {
8ca151b5 3684 .add_modify = STA_MODE_MODIFY,
9cc40712 3685 .sta_id = mvmsta->sta_id,
8ca151b5
JB
3686 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3687 .sleep_tx_count = cpu_to_le16(cnt),
9cc40712 3688 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
8ca151b5 3689 };
3e56eadf
JB
3690 int tid, ret;
3691 unsigned long _tids = tids;
3692
3693 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3694 * Note that this field is reserved and unused by firmware not
3695 * supporting GO uAPSD, so it's safe to always do this.
3696 */
3697 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3698 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3699
9a3fcf91
SS
3700 /* If we're releasing frames from aggregation or dqa queues then check
3701 * if all the queues that we're releasing frames from, combined, have:
3e56eadf
JB
3702 * - more frames than the service period, in which case more_data
3703 * needs to be set
3704 * - fewer than 'cnt' frames, in which case we need to adjust the
3705 * firmware command (but do that unconditionally)
3706 */
9a3fcf91 3707 if (single_sta_queue) {
3e56eadf 3708 int remaining = cnt;
36be0eb6 3709 int sleep_tx_count;
3e56eadf
JB
3710
3711 spin_lock_bh(&mvmsta->lock);
3712 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3713 struct iwl_mvm_tid_data *tid_data;
3714 u16 n_queued;
3715
3716 tid_data = &mvmsta->tid_data[tid];
3e56eadf 3717
dd32162d 3718 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3e56eadf
JB
3719 if (n_queued > remaining) {
3720 more_data = true;
3721 remaining = 0;
3722 break;
3723 }
3724 remaining -= n_queued;
3725 }
36be0eb6
EG
3726 sleep_tx_count = cnt - remaining;
3727 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3728 mvmsta->sleep_tx_count = sleep_tx_count;
3e56eadf
JB
3729 spin_unlock_bh(&mvmsta->lock);
3730
36be0eb6 3731 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3e56eadf
JB
3732 if (WARN_ON(cnt - remaining == 0)) {
3733 ieee80211_sta_eosp(sta);
3734 return;
3735 }
3736 }
3737
3738 /* Note: this is ignored by firmware not supporting GO uAPSD */
3739 if (more_data)
ced19f26 3740 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3e56eadf
JB
3741
3742 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3743 mvmsta->next_status_eosp = true;
ced19f26 3744 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3e56eadf 3745 } else {
ced19f26 3746 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3e56eadf 3747 }
8ca151b5 3748
156f92f2
EG
3749 /* block the Tx queues until the FW updated the sleep Tx count */
3750 iwl_trans_block_txq_ptrs(mvm->trans, true);
3751
3752 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3753 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
854c5705 3754 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
8ca151b5
JB
3755 if (ret)
3756 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3757}
3e56eadf 3758
0416841d
JB
3759void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3760 struct iwl_rx_cmd_buffer *rxb)
3e56eadf
JB
3761{
3762 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3763 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3764 struct ieee80211_sta *sta;
3765 u32 sta_id = le32_to_cpu(notif->sta_id);
3766
3767 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
0416841d 3768 return;
3e56eadf
JB
3769
3770 rcu_read_lock();
3771 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3772 if (!IS_ERR_OR_NULL(sta))
3773 ieee80211_sta_eosp(sta);
3774 rcu_read_unlock();
3e56eadf 3775}
09b0ce1a
AO
3776
3777void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3778 struct iwl_mvm_sta *mvmsta, bool disable)
3779{
3780 struct iwl_mvm_add_sta_cmd cmd = {
3781 .add_modify = STA_MODE_MODIFY,
3782 .sta_id = mvmsta->sta_id,
3783 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3784 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3785 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3786 };
3787 int ret;
3788
854c5705
SS
3789 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3790 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
09b0ce1a
AO
3791 if (ret)
3792 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3793}
003e5236
AO
3794
3795void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3796 struct ieee80211_sta *sta,
3797 bool disable)
3798{
3799 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3800
3801 spin_lock_bh(&mvm_sta->lock);
3802
3803 if (mvm_sta->disable_tx == disable) {
3804 spin_unlock_bh(&mvm_sta->lock);
3805 return;
3806 }
3807
3808 mvm_sta->disable_tx = disable;
3809
c8f54701
JB
3810 /* Tell mac80211 to start/stop queuing tx for this station */
3811 ieee80211_sta_block_awake(mvm->hw, sta, disable);
003e5236
AO
3812
3813 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3814
3815 spin_unlock_bh(&mvm_sta->lock);
3816}
3817
ced19f26
SS
3818static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3819 struct iwl_mvm_vif *mvmvif,
3820 struct iwl_mvm_int_sta *sta,
3821 bool disable)
3822{
3823 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3824 struct iwl_mvm_add_sta_cmd cmd = {
3825 .add_modify = STA_MODE_MODIFY,
3826 .sta_id = sta->sta_id,
3827 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3828 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3829 .mac_id_n_color = cpu_to_le32(id),
3830 };
3831 int ret;
3832
3833 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3834 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3835 if (ret)
3836 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3837}
3838
003e5236
AO
3839void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3840 struct iwl_mvm_vif *mvmvif,
3841 bool disable)
3842{
3843 struct ieee80211_sta *sta;
3844 struct iwl_mvm_sta *mvm_sta;
3845 int i;
3846
3847 lockdep_assert_held(&mvm->mutex);
3848
3849 /* Block/unblock all the stations of the given mvmvif */
0ae98812 3850 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
003e5236
AO
3851 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3852 lockdep_is_held(&mvm->mutex));
3853 if (IS_ERR_OR_NULL(sta))
3854 continue;
3855
3856 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3857 if (mvm_sta->mac_id_n_color !=
3858 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3859 continue;
3860
3861 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3862 }
ced19f26
SS
3863
3864 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3865 return;
3866
3867 /* Need to block/unblock also multicast station */
3868 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3869 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3870 &mvmvif->mcast_sta, disable);
3871
3872 /*
3873 * Only unblock the broadcast station (FW blocks it for immediate
3874 * quiet, not the driver)
3875 */
3876 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3877 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3878 &mvmvif->bcast_sta, disable);
003e5236 3879}
dc88b4ba
LC
3880
3881void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3882{
3883 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3884 struct iwl_mvm_sta *mvmsta;
3885
3886 rcu_read_lock();
3887
3888 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3889
3890 if (!WARN_ON(!mvmsta))
3891 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3892
3893 rcu_read_unlock();
3894}
dd32162d
LK
3895
3896u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3897{
3898 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3899
3900 /*
2f7a3863 3901 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
dd32162d
LK
3902 * to align the wrap around of ssn so we compare relevant values.
3903 */
3904 if (mvm->trans->cfg->gen2)
3905 sn &= 0xff;
3906
3907 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3908}