1 /******************************************************************************
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
39 #include "iwl-helpers.h"
40 #include "iwl-agn-hw.h"
42 #include "iwl-trans.h"
45 * mac80211 queues, ACs, hardware queues, FIFOs.
47 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
49 * Mac80211 uses the following numbers, which we get as from it
50 * by way of skb_get_queue_mapping(skb):
58 * Regular (not A-MPDU) frames are put into hardware queues corresponding
59 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
60 * own queue per aggregation session (RA/TID combination), such queues are
61 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
62 * order to map frames to the right queue, we also need an AC->hw queue
63 * mapping. This is implemented here.
65 * Due to the way hw queues are set up (by the hw specific modules like
66 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
70 static const u8 tid_to_ac[] = {
81 static inline int get_ac_from_tid(u16 tid)
83 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
84 return tid_to_ac[tid];
86 /* no support for TIDs 8-15 yet */
90 static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
92 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
93 return ctx->ac_to_fifo[tid_to_ac[tid]];
95 /* no support for TIDs 8-15 yet */
99 static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
102 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
103 (IWLAGN_FIRST_AMPDU_QUEUE +
104 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
106 "queue number out of range: %d, must be %d to %d\n",
107 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
108 IWLAGN_FIRST_AMPDU_QUEUE +
109 priv->cfg->base_params->num_of_ampdu_queues - 1);
113 /* Modify device's station table to Tx this TID */
114 return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
117 static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
118 struct ieee80211_tx_info *info,
119 __le16 fc, __le32 *tx_flags)
121 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
122 info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
123 info->flags & IEEE80211_TX_CTL_AMPDU)
124 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
128 * handle build REPLY_TX command notification.
130 static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
132 struct iwl_tx_cmd *tx_cmd,
133 struct ieee80211_tx_info *info,
134 struct ieee80211_hdr *hdr,
137 __le16 fc = hdr->frame_control;
138 __le32 tx_flags = tx_cmd->tx_flags;
140 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
142 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
143 tx_flags |= TX_CMD_FLG_ACK_MSK;
145 tx_flags &= ~TX_CMD_FLG_ACK_MSK;
147 if (ieee80211_is_probe_resp(fc))
148 tx_flags |= TX_CMD_FLG_TSF_MSK;
149 else if (ieee80211_is_back_req(fc))
150 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
151 else if (info->band == IEEE80211_BAND_2GHZ &&
152 priv->cfg->bt_params &&
153 priv->cfg->bt_params->advanced_bt_coexist &&
154 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
155 ieee80211_is_reassoc_req(fc) ||
156 skb->protocol == cpu_to_be16(ETH_P_PAE)))
157 tx_flags |= TX_CMD_FLG_IGNORE_BT;
160 tx_cmd->sta_id = std_id;
161 if (ieee80211_has_morefrags(fc))
162 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
164 if (ieee80211_is_data_qos(fc)) {
165 u8 *qc = ieee80211_get_qos_ctl(hdr);
166 tx_cmd->tid_tspec = qc[0] & 0xf;
167 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
169 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
172 iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
174 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
175 if (ieee80211_is_mgmt(fc)) {
176 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
177 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
179 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
181 tx_cmd->timeout.pm_frame_timeout = 0;
184 tx_cmd->driver_txop = 0;
185 tx_cmd->tx_flags = tx_flags;
186 tx_cmd->next_frame_len = 0;
189 #define RTS_DFAULT_RETRY_LIMIT 60
191 static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
192 struct iwl_tx_cmd *tx_cmd,
193 struct ieee80211_tx_info *info,
202 /* Set retry limit on DATA packets and Probe Responses*/
203 if (ieee80211_is_probe_resp(fc))
204 data_retry_limit = 3;
206 data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
207 tx_cmd->data_retry_limit = data_retry_limit;
209 /* Set retry limit on RTS packets */
210 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
211 if (data_retry_limit < rts_retry_limit)
212 rts_retry_limit = data_retry_limit;
213 tx_cmd->rts_retry_limit = rts_retry_limit;
215 /* DATA packets will use the uCode station table for rate/antenna
217 if (ieee80211_is_data(fc)) {
218 tx_cmd->initial_rate_index = 0;
219 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
220 if (priv->tm_fixed_rate) {
222 * rate overwrite by testmode
223 * we not only send lq command to change rate
224 * we also re-enforce per data pkt base.
226 tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK;
227 memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
228 sizeof(tx_cmd->rate_n_flags));
234 * If the current TX rate stored in mac80211 has the MCS bit set, it's
235 * not really a TX rate. Thus, we use the lowest supported rate for
236 * this band. Also use the lowest supported rate if the stored rate
239 rate_idx = info->control.rates[0].idx;
240 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
241 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
242 rate_idx = rate_lowest_index(&priv->bands[info->band],
244 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
245 if (info->band == IEEE80211_BAND_5GHZ)
246 rate_idx += IWL_FIRST_OFDM_RATE;
247 /* Get PLCP rate for tx_cmd->rate_n_flags */
248 rate_plcp = iwl_rates[rate_idx].plcp;
249 /* Zero out flags for this packet */
252 /* Set CCK flag as needed */
253 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
254 rate_flags |= RATE_MCS_CCK_MSK;
256 /* Set up antennas */
257 if (priv->cfg->bt_params &&
258 priv->cfg->bt_params->advanced_bt_coexist &&
259 priv->bt_full_concurrent) {
260 /* operated as 1x1 in full concurrency mode */
261 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
262 first_antenna(priv->hw_params.valid_tx_ant));
264 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
265 priv->hw_params.valid_tx_ant);
266 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
268 /* Set the rate in the TX cmd */
269 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
272 static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
273 struct ieee80211_tx_info *info,
274 struct iwl_tx_cmd *tx_cmd,
275 struct sk_buff *skb_frag,
278 struct ieee80211_key_conf *keyconf = info->control.hw_key;
280 switch (keyconf->cipher) {
281 case WLAN_CIPHER_SUITE_CCMP:
282 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
283 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
284 if (info->flags & IEEE80211_TX_CTL_AMPDU)
285 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
286 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
289 case WLAN_CIPHER_SUITE_TKIP:
290 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
291 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
292 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
295 case WLAN_CIPHER_SUITE_WEP104:
296 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
298 case WLAN_CIPHER_SUITE_WEP40:
299 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
300 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
302 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
304 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
305 "with key %d\n", keyconf->keyidx);
309 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
315 * start REPLY_TX command process
317 int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
319 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
320 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
321 struct iwl_station_priv *sta_priv = NULL;
322 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
323 struct iwl_tx_cmd *tx_cmd;
336 * If the frame needs to go out off-channel, then
337 * we'll have put the PAN context to that channel,
338 * so make the frame go out there.
340 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
341 ctx = &priv->contexts[IWL_RXON_CTX_PAN];
342 else if (info->control.vif)
343 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
345 spin_lock_irqsave(&priv->lock, flags);
346 if (iwl_is_rfkill(priv)) {
347 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
348 goto drop_unlock_priv;
351 fc = hdr->frame_control;
353 #ifdef CONFIG_IWLWIFI_DEBUG
354 if (ieee80211_is_auth(fc))
355 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
356 else if (ieee80211_is_assoc_req(fc))
357 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
358 else if (ieee80211_is_reassoc_req(fc))
359 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
362 hdr_len = ieee80211_hdrlen(fc);
364 /* For management frames use broadcast id to do not break aggregation */
365 if (!ieee80211_is_data(fc))
366 sta_id = ctx->bcast_sta_id;
368 /* Find index into station table for destination station */
369 sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
370 if (sta_id == IWL_INVALID_STATION) {
371 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
373 goto drop_unlock_priv;
377 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
379 if (info->control.sta)
380 sta_priv = (void *)info->control.sta->drv_priv;
382 if (sta_priv && sta_priv->asleep &&
383 (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
385 * This sends an asynchronous command to the device,
386 * but we can rely on it being processed before the
387 * next frame is processed -- and the next frame to
388 * this station is the one that will consume this
390 * For now set the counter to just 1 since we do not
393 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
397 * Send this frame after DTIM -- there's a special queue
398 * reserved for this for contexts that support AP mode.
400 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
401 txq_id = ctx->mcast_queue;
403 * The microcode will clear the more data
404 * bit in the last frame it transmits.
406 hdr->frame_control |=
407 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
409 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
411 /* irqs already disabled/saved above when locking priv->lock */
412 spin_lock(&priv->sta_lock);
414 if (ieee80211_is_data_qos(fc)) {
416 qc = ieee80211_get_qos_ctl(hdr);
417 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
419 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT))
420 goto drop_unlock_sta;
422 seq_number = priv->stations[sta_id].tid[tid].seq_number;
423 seq_number &= IEEE80211_SCTL_SEQ;
424 hdr->seq_ctrl = hdr->seq_ctrl &
425 cpu_to_le16(IEEE80211_SCTL_FRAG);
426 hdr->seq_ctrl |= cpu_to_le16(seq_number);
428 /* aggregation is on for this <sta,tid> */
429 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
430 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
431 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
436 tx_cmd = trans_get_tx_cmd(priv, txq_id);
437 if (unlikely(!tx_cmd))
438 goto drop_unlock_sta;
440 /* Copy MAC header from skb into command buffer */
441 memcpy(tx_cmd->hdr, hdr, hdr_len);
443 /* Total # bytes to be transmitted */
445 tx_cmd->len = cpu_to_le16(len);
447 if (info->control.hw_key)
448 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
450 /* TODO need this for burst mode later on */
451 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
452 iwl_dbg_log_tx_data_frame(priv, len, hdr);
454 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
456 iwl_update_stats(priv, true, fc, len);
458 if (trans_tx(priv, skb, tx_cmd, txq_id, fc, is_agg, ctx))
459 goto drop_unlock_sta;
461 if (ieee80211_is_data_qos(fc)) {
462 priv->stations[sta_id].tid[tid].tfds_in_queue++;
463 if (!ieee80211_has_morefrags(fc))
464 priv->stations[sta_id].tid[tid].seq_number = seq_number;
467 spin_unlock(&priv->sta_lock);
468 spin_unlock_irqrestore(&priv->lock, flags);
471 * Avoid atomic ops if it isn't an associated client.
472 * Also, if this is a packet for aggregation, don't
473 * increase the counter because the ucode will stop
474 * aggregation queues when their respective station
477 if (sta_priv && sta_priv->client && !is_agg)
478 atomic_inc(&sta_priv->pending_frames);
483 spin_unlock(&priv->sta_lock);
485 spin_unlock_irqrestore(&priv->lock, flags);
490 * Find first available (lowest unused) Tx Queue, mark it "active".
491 * Called only when finding queue for aggregation.
492 * Should never return anything < 7, because they should already
493 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
495 static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
499 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
500 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
505 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
506 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
513 struct iwl_tid_data *tid_data;
515 tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
516 if (unlikely(tx_fifo < 0))
519 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
522 sta_id = iwl_sta_id(sta);
523 if (sta_id == IWL_INVALID_STATION) {
524 IWL_ERR(priv, "Start AGG on invalid station\n");
527 if (unlikely(tid >= MAX_TID_COUNT))
530 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
531 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
535 txq_id = iwlagn_txq_ctx_activate_free(priv);
537 IWL_ERR(priv, "No free aggregation queue available\n");
541 spin_lock_irqsave(&priv->sta_lock, flags);
542 tid_data = &priv->stations[sta_id].tid[tid];
543 *ssn = SEQ_TO_SN(tid_data->seq_number);
544 tid_data->agg.txq_id = txq_id;
545 tid_data->agg.tx_fifo = tx_fifo;
546 iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
547 spin_unlock_irqrestore(&priv->sta_lock, flags);
549 ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
553 spin_lock_irqsave(&priv->sta_lock, flags);
554 tid_data = &priv->stations[sta_id].tid[tid];
555 if (tid_data->tfds_in_queue == 0) {
556 IWL_DEBUG_HT(priv, "HW queue is empty\n");
557 tid_data->agg.state = IWL_AGG_ON;
558 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
560 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
561 tid_data->tfds_in_queue);
562 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
564 spin_unlock_irqrestore(&priv->sta_lock, flags);
568 int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
569 struct ieee80211_sta *sta, u16 tid)
571 int tx_fifo_id, txq_id, sta_id, ssn;
572 struct iwl_tid_data *tid_data;
573 int write_ptr, read_ptr;
576 tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
577 if (unlikely(tx_fifo_id < 0))
580 sta_id = iwl_sta_id(sta);
582 if (sta_id == IWL_INVALID_STATION) {
583 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
587 spin_lock_irqsave(&priv->sta_lock, flags);
589 tid_data = &priv->stations[sta_id].tid[tid];
590 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
591 txq_id = tid_data->agg.txq_id;
593 switch (priv->stations[sta_id].tid[tid].agg.state) {
594 case IWL_EMPTYING_HW_QUEUE_ADDBA:
596 * This can happen if the peer stops aggregation
597 * again before we've had a chance to drain the
598 * queue we selected previously, i.e. before the
599 * session was really started completely.
601 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
606 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
609 write_ptr = priv->txq[txq_id].q.write_ptr;
610 read_ptr = priv->txq[txq_id].q.read_ptr;
612 /* The queue is not empty */
613 if (write_ptr != read_ptr) {
614 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
615 priv->stations[sta_id].tid[tid].agg.state =
616 IWL_EMPTYING_HW_QUEUE_DELBA;
617 spin_unlock_irqrestore(&priv->sta_lock, flags);
621 IWL_DEBUG_HT(priv, "HW queue is empty\n");
623 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
625 /* do not restore/save irqs */
626 spin_unlock(&priv->sta_lock);
627 spin_lock(&priv->lock);
630 * the only reason this call can fail is queue number out of range,
631 * which can happen if uCode is reloaded and all the station
632 * information are lost. if it is outside the range, there is no need
633 * to deactivate the uCode queue, just return "success" to allow
634 * mac80211 to clean up it own data.
636 trans_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
637 spin_unlock_irqrestore(&priv->lock, flags);
639 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
644 int iwlagn_txq_check_empty(struct iwl_priv *priv,
645 int sta_id, u8 tid, int txq_id)
647 struct iwl_queue *q = &priv->txq[txq_id].q;
648 u8 *addr = priv->stations[sta_id].sta.sta.addr;
649 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
650 struct iwl_rxon_context *ctx;
652 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
654 lockdep_assert_held(&priv->sta_lock);
656 switch (priv->stations[sta_id].tid[tid].agg.state) {
657 case IWL_EMPTYING_HW_QUEUE_DELBA:
658 /* We are reclaiming the last packet of the */
659 /* aggregated HW queue */
660 if ((txq_id == tid_data->agg.txq_id) &&
661 (q->read_ptr == q->write_ptr)) {
662 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
663 int tx_fifo = get_fifo_from_tid(ctx, tid);
664 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
665 trans_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
666 tid_data->agg.state = IWL_AGG_OFF;
667 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
670 case IWL_EMPTYING_HW_QUEUE_ADDBA:
671 /* We are reclaiming the last packet of the queue */
672 if (tid_data->tfds_in_queue == 0) {
673 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
674 tid_data->agg.state = IWL_AGG_ON;
675 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
683 static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
684 struct iwl_rxon_context *ctx,
687 struct ieee80211_sta *sta;
688 struct iwl_station_priv *sta_priv;
691 sta = ieee80211_find_sta(ctx->vif, addr1);
693 sta_priv = (void *)sta->drv_priv;
694 /* avoid atomic ops if this isn't a client */
695 if (sta_priv->client &&
696 atomic_dec_return(&sta_priv->pending_frames) == 0)
697 ieee80211_sta_block_awake(priv->hw, sta, false);
702 static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
705 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
708 iwlagn_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
710 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
713 int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
715 struct iwl_tx_queue *txq = &priv->txq[txq_id];
716 struct iwl_queue *q = &txq->q;
717 struct iwl_tx_info *tx_info;
719 struct ieee80211_hdr *hdr;
721 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
722 IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
723 "index %d is out of range [0-%d] %d %d.\n", __func__,
724 txq_id, index, q->n_bd, q->write_ptr, q->read_ptr);
728 for (index = iwl_queue_inc_wrap(index, q->n_bd);
729 q->read_ptr != index;
730 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
732 tx_info = &txq->txb[txq->q.read_ptr];
734 if (WARN_ON_ONCE(tx_info->skb == NULL))
737 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
738 if (ieee80211_is_data_qos(hdr->frame_control))
741 iwlagn_tx_status(priv, tx_info,
742 txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
745 iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
747 iwlagn_txq_free_tfd(priv, txq, txq->q.read_ptr);
753 * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
755 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
756 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
758 static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
759 struct iwl_ht_agg *agg,
760 struct iwl_compressed_ba_resp *ba_resp)
764 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
765 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
766 struct ieee80211_tx_info *info;
767 u64 bitmap, sent_bitmap;
769 if (unlikely(!agg->wait_for_ba)) {
770 if (unlikely(ba_resp->bitmap))
771 IWL_ERR(priv, "Received BA when not expected\n");
775 /* Mark that the expected block-ack response arrived */
776 agg->wait_for_ba = 0;
777 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
779 /* Calculate shift to align block-ack bits with our Tx window bits */
780 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
785 * Check for success or failure according to the
786 * transmitted bitmap and block-ack bitmap
788 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
789 sent_bitmap = bitmap & agg->bitmap;
791 /* Sanity check values reported by uCode */
792 if (ba_resp->txed_2_done > ba_resp->txed) {
793 IWL_DEBUG_TX_REPLY(priv,
794 "bogus sent(%d) and ack(%d) count\n",
795 ba_resp->txed, ba_resp->txed_2_done);
797 * set txed_2_done = txed,
798 * so it won't impact rate scale
800 ba_resp->txed = ba_resp->txed_2_done;
802 IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
803 ba_resp->txed, ba_resp->txed_2_done);
805 /* Find the first ACKed frame to store the TX status */
806 while (sent_bitmap && !(sent_bitmap & 1)) {
807 agg->start_idx = (agg->start_idx + 1) & 0xff;
811 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
812 memset(&info->status, 0, sizeof(info->status));
813 info->flags |= IEEE80211_TX_STAT_ACK;
814 info->flags |= IEEE80211_TX_STAT_AMPDU;
815 info->status.ampdu_ack_len = ba_resp->txed_2_done;
816 info->status.ampdu_len = ba_resp->txed;
817 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
823 * translate ucode response to mac80211 tx status control values
825 void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
826 struct ieee80211_tx_info *info)
828 struct ieee80211_tx_rate *r = &info->control.rates[0];
830 info->antenna_sel_tx =
831 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
832 if (rate_n_flags & RATE_MCS_HT_MSK)
833 r->flags |= IEEE80211_TX_RC_MCS;
834 if (rate_n_flags & RATE_MCS_GF_MSK)
835 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
836 if (rate_n_flags & RATE_MCS_HT40_MSK)
837 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
838 if (rate_n_flags & RATE_MCS_DUP_MSK)
839 r->flags |= IEEE80211_TX_RC_DUP_DATA;
840 if (rate_n_flags & RATE_MCS_SGI_MSK)
841 r->flags |= IEEE80211_TX_RC_SHORT_GI;
842 r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
846 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
848 * Handles block-acknowledge notification from device, which reports success
849 * of frames sent via aggregation.
851 void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
852 struct iwl_rx_mem_buffer *rxb)
854 struct iwl_rx_packet *pkt = rxb_addr(rxb);
855 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
856 struct iwl_tx_queue *txq = NULL;
857 struct iwl_ht_agg *agg;
863 /* "flow" corresponds to Tx queue */
864 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
866 /* "ssn" is start of block-ack Tx window, corresponds to index
867 * (in Tx queue's circular buffer) of first TFD/frame in window */
868 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
870 if (scd_flow >= priv->hw_params.max_txq_num) {
872 "BUG_ON scd_flow is bigger than number of queues\n");
876 txq = &priv->txq[scd_flow];
877 sta_id = ba_resp->sta_id;
879 agg = &priv->stations[sta_id].tid[tid].agg;
880 if (unlikely(agg->txq_id != scd_flow)) {
882 * FIXME: this is a uCode bug which need to be addressed,
883 * log the information and return for now!
884 * since it is possible happen very often and in order
885 * not to fill the syslog, don't enable the logging by default
887 IWL_DEBUG_TX_REPLY(priv,
888 "BA scd_flow %d does not match txq_id %d\n",
889 scd_flow, agg->txq_id);
893 /* Find index just before block-ack window */
894 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
896 spin_lock_irqsave(&priv->sta_lock, flags);
898 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
901 (u8 *) &ba_resp->sta_addr_lo32,
903 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
904 "%d, scd_ssn = %d\n",
907 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
910 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
912 (unsigned long long)agg->bitmap);
914 /* Update driver's record of ACK vs. not for each frame in window */
915 iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp);
917 /* Release all TFDs before the SSN, i.e. all TFDs in front of
918 * block-ack window (we assume that they've been successfully
919 * transmitted ... if not, it's too late anyway). */
920 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
921 /* calculate mac80211 ampdu sw queue to wake */
922 int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index);
923 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
925 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
926 priv->mac80211_registered &&
927 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
928 iwl_wake_queue(priv, txq);
930 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
933 spin_unlock_irqrestore(&priv->sta_lock, flags);
936 #ifdef CONFIG_IWLWIFI_DEBUG
937 const char *iwl_get_tx_fail_reason(u32 status)
939 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
940 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
942 switch (status & TX_STATUS_MSK) {
943 case TX_STATUS_SUCCESS:
945 TX_STATUS_POSTPONE(DELAY);
946 TX_STATUS_POSTPONE(FEW_BYTES);
947 TX_STATUS_POSTPONE(BT_PRIO);
948 TX_STATUS_POSTPONE(QUIET_PERIOD);
949 TX_STATUS_POSTPONE(CALC_TTAK);
950 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
951 TX_STATUS_FAIL(SHORT_LIMIT);
952 TX_STATUS_FAIL(LONG_LIMIT);
953 TX_STATUS_FAIL(FIFO_UNDERRUN);
954 TX_STATUS_FAIL(DRAIN_FLOW);
955 TX_STATUS_FAIL(RFKILL_FLUSH);
956 TX_STATUS_FAIL(LIFE_EXPIRE);
957 TX_STATUS_FAIL(DEST_PS);
958 TX_STATUS_FAIL(HOST_ABORTED);
959 TX_STATUS_FAIL(BT_RETRY);
960 TX_STATUS_FAIL(STA_INVALID);
961 TX_STATUS_FAIL(FRAG_DROPPED);
962 TX_STATUS_FAIL(TID_DISABLE);
963 TX_STATUS_FAIL(FIFO_FLUSHED);
964 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
965 TX_STATUS_FAIL(PASSIVE_NO_RX);
966 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
971 #undef TX_STATUS_FAIL
972 #undef TX_STATUS_POSTPONE
974 #endif /* CONFIG_IWLWIFI_DEBUG */