2 * This file is part of wl1271
4 * Copyright (C) 2009 Nokia Corporation
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/etherdevice.h>
36 static int wl1271_set_default_wep_key(struct wl1271 *wl,
37 struct wl12xx_vif *wlvif, u8 id)
40 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
43 ret = wl12xx_cmd_set_default_wep_key(wl, id,
44 wlvif->ap.bcast_hlid);
46 ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
51 wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
55 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
59 id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS);
60 if (id >= ACX_TX_DESCRIPTORS)
63 __set_bit(id, wl->tx_frames_map);
64 wl->tx_frames[id] = skb;
69 static void wl1271_free_tx_id(struct wl1271 *wl, int id)
71 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
72 if (unlikely(wl->tx_frames_cnt == ACX_TX_DESCRIPTORS))
73 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
75 wl->tx_frames[id] = NULL;
80 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
83 struct ieee80211_hdr *hdr;
86 * add the station to the known list before transmitting the
87 * authentication response. this way it won't get de-authed by FW
88 * when transmitting too soon.
90 hdr = (struct ieee80211_hdr *)(skb->data +
91 sizeof(struct wl1271_tx_hw_descr));
92 if (ieee80211_is_auth(hdr->frame_control))
93 wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
96 static void wl1271_tx_regulate_link(struct wl1271 *wl,
97 struct wl12xx_vif *wlvif,
100 bool fw_ps, single_sta;
103 if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
106 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
107 tx_pkts = wl->links[hlid].allocated_pkts;
108 single_sta = (wl->active_sta_count == 1);
111 * if in FW PS and there is enough data in FW we can put the link
112 * into high-level PS and clean out its TX queues.
113 * Make an exception if this is the only connected station. In this
114 * case FW-memory congestion is not a problem.
116 if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
117 wl12xx_ps_link_start(wl, wlvif, hlid, true);
120 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
122 return wl->dummy_packet == skb;
125 u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
128 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
130 if (control->control.sta) {
131 struct wl1271_station *wl_sta;
133 wl_sta = (struct wl1271_station *)
134 control->control.sta->drv_priv;
137 struct ieee80211_hdr *hdr;
139 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
140 return wl->system_hlid;
142 hdr = (struct ieee80211_hdr *)skb->data;
143 if (ieee80211_is_mgmt(hdr->frame_control))
144 return wlvif->ap.global_hlid;
146 return wlvif->ap.bcast_hlid;
150 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
153 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
155 if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
156 return wl->system_hlid;
158 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
159 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
161 if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
162 test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
163 !ieee80211_is_auth(hdr->frame_control) &&
164 !ieee80211_is_assoc_req(hdr->frame_control))
165 return wlvif->sta.hlid;
167 return wlvif->dev_hlid;
170 static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
171 unsigned int packet_length)
173 if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)
174 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
176 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
179 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
180 struct sk_buff *skb, u32 extra, u32 buf_offset,
183 struct wl1271_tx_hw_descr *desc;
184 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
187 int id, ret = -EBUSY, ac;
188 u32 spare_blocks = wl->tx_spare_blocks;
189 bool is_dummy = false;
191 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
194 /* allocate free identifier for the packet */
195 id = wl1271_alloc_tx_id(wl, skb);
199 /* approximate the number of blocks required for this packet
201 len = wl12xx_calc_packet_alignment(wl, total_len);
203 /* in case of a dummy packet, use default amount of spare mem blocks */
204 if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
206 spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
209 total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
212 if (total_blocks <= wl->tx_blocks_available) {
213 desc = (struct wl1271_tx_hw_descr *)skb_push(
214 skb, total_len - skb->len);
216 /* HW descriptor fields change between wl127x and wl128x */
217 if (wl->chip.id == CHIP_ID_1283_PG20) {
218 desc->wl128x_mem.total_mem_blocks = total_blocks;
220 desc->wl127x_mem.extra_blocks = spare_blocks;
221 desc->wl127x_mem.total_mem_blocks = total_blocks;
226 wl->tx_blocks_available -= total_blocks;
227 wl->tx_allocated_blocks += total_blocks;
229 /* If the FW was empty before, arm the Tx watchdog */
230 if (wl->tx_allocated_blocks == total_blocks)
231 wl12xx_rearm_tx_watchdog_locked(wl);
233 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
234 wl->tx_allocated_pkts[ac]++;
236 if (!is_dummy && wlvif &&
237 wlvif->bss_type == BSS_TYPE_AP_BSS &&
238 test_bit(hlid, wlvif->ap.sta_hlid_map))
239 wl->links[hlid].allocated_pkts++;
243 wl1271_debug(DEBUG_TX,
244 "tx_allocate: size: %d, blocks: %d, id: %d",
245 total_len, total_blocks, id);
247 wl1271_free_tx_id(wl, id);
253 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
254 struct sk_buff *skb, u32 extra,
255 struct ieee80211_tx_info *control, u8 hlid)
258 struct wl1271_tx_hw_descr *desc;
259 int aligned_len, ac, rate_idx;
262 __le16 frame_control;
263 struct ieee80211_hdr *hdr;
267 desc = (struct wl1271_tx_hw_descr *) skb->data;
268 frame_start = (u8 *)(desc + 1);
269 hdr = (struct ieee80211_hdr *)(frame_start + extra);
270 frame_control = hdr->frame_control;
272 /* relocate space for security header */
274 int hdrlen = ieee80211_hdrlen(frame_control);
275 memmove(frame_start, hdr, hdrlen);
278 /* configure packet life time */
280 hosttime = (timespec_to_ns(&ts) >> 10);
281 desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
283 is_dummy = wl12xx_is_dummy_packet(wl, skb);
284 if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
285 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
287 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
290 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
291 desc->tid = skb->priority;
295 * FW expects the dummy packet to have an invalid session id -
296 * any session id that is different than the one set in the join
298 tx_attr = (SESSION_COUNTER_INVALID <<
299 TX_HW_ATTR_OFST_SESSION_COUNTER) &
300 TX_HW_ATTR_SESSION_COUNTER;
302 tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
304 /* configure the tx attributes */
305 tx_attr = wlvif->session_counter <<
306 TX_HW_ATTR_OFST_SESSION_COUNTER;
310 if (is_dummy || !wlvif)
312 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
313 /* if the packets are destined for AP (have a STA entry)
314 send them with AP rate policies, otherwise use default
316 if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
317 rate_idx = wlvif->sta.p2p_rate_idx;
318 else if (control->control.sta)
319 rate_idx = wlvif->sta.ap_rate_idx;
321 rate_idx = wlvif->sta.basic_rate_idx;
323 if (hlid == wlvif->ap.global_hlid)
324 rate_idx = wlvif->ap.mgmt_rate_idx;
325 else if (hlid == wlvif->ap.bcast_hlid)
326 rate_idx = wlvif->ap.bcast_rate_idx;
328 rate_idx = wlvif->ap.ucast_rate_idx[ac];
331 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
334 aligned_len = wl12xx_calc_packet_alignment(wl, skb->len);
336 if (wl->chip.id == CHIP_ID_1283_PG20) {
337 desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
338 desc->length = cpu_to_le16(aligned_len >> 2);
340 wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
341 "tx_attr: 0x%x len: %d life: %d mem: %d",
343 le16_to_cpu(desc->length),
344 le16_to_cpu(desc->life_time),
345 desc->wl128x_mem.total_mem_blocks);
349 /* Store the aligned length in terms of words */
350 desc->length = cpu_to_le16(aligned_len >> 2);
352 /* calculate number of padding bytes */
353 pad = aligned_len - skb->len;
354 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
356 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
357 "tx_attr: 0x%x len: %d life: %d mem: %d", pad,
359 le16_to_cpu(desc->length),
360 le16_to_cpu(desc->life_time),
361 desc->wl127x_mem.total_mem_blocks);
364 /* for WEP shared auth - no fw encryption is needed */
365 if (ieee80211_is_auth(frame_control) &&
366 ieee80211_has_protected(frame_control))
367 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
369 desc->tx_attr = cpu_to_le16(tx_attr);
372 /* caller must hold wl->mutex */
373 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
374 struct sk_buff *skb, u32 buf_offset)
376 struct ieee80211_tx_info *info;
386 info = IEEE80211_SKB_CB(skb);
388 /* TODO: handle dummy packets on multi-vifs */
389 is_dummy = wl12xx_is_dummy_packet(wl, skb);
391 if (info->control.hw_key &&
392 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
393 extra = WL1271_EXTRA_SPACE_TKIP;
395 if (info->control.hw_key) {
397 u8 idx = info->control.hw_key->hw_key_idx;
398 u32 cipher = info->control.hw_key->cipher;
400 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
401 (cipher == WLAN_CIPHER_SUITE_WEP104);
403 if (unlikely(is_wep && wlvif->default_key != idx)) {
404 ret = wl1271_set_default_wep_key(wl, wlvif, idx);
407 wlvif->default_key = idx;
410 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
411 if (hlid == WL12XX_INVALID_LINK_ID) {
412 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
416 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
420 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
422 if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
423 wl1271_tx_ap_update_inconnection_sta(wl, skb);
424 wl1271_tx_regulate_link(wl, wlvif, hlid);
428 * The length of each packet is stored in terms of
429 * words. Thus, we must pad the skb data to make sure its
430 * length is aligned. The number of padding bytes is computed
431 * and set in wl1271_tx_fill_hdr.
432 * In special cases, we want to align to a specific block size
433 * (eg. for wl128x with SDIO we align to 256).
435 total_len = wl12xx_calc_packet_alignment(wl, skb->len);
437 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
438 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
440 /* Revert side effects in the dummy packet skb, so it can be reused */
442 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
447 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
448 enum ieee80211_band rate_band)
450 struct ieee80211_supported_band *band;
451 u32 enabled_rates = 0;
454 band = wl->hw->wiphy->bands[rate_band];
455 for (bit = 0; bit < band->n_bitrates; bit++) {
457 enabled_rates |= band->bitrates[bit].hw_value;
461 /* MCS rates indication are on bits 16 - 23 */
462 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
464 for (bit = 0; bit < 8; bit++) {
466 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
470 return enabled_rates;
473 void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
478 for (i = 0; i < NUM_TX_QUEUES; i++) {
479 if (test_bit(i, &wl->stopped_queues_map) &&
480 wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
481 /* firmware buffer has space, restart queues */
482 spin_lock_irqsave(&wl->wl_lock, flags);
483 ieee80211_wake_queue(wl->hw,
484 wl1271_tx_get_mac80211_queue(i));
485 clear_bit(i, &wl->stopped_queues_map);
486 spin_unlock_irqrestore(&wl->wl_lock, flags);
491 static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
492 struct sk_buff_head *queues)
495 u32 min_pkts = 0xffffffff;
498 * Find a non-empty ac where:
499 * 1. There are packets to transmit
500 * 2. The FW has the least allocated blocks
502 * We prioritize the ACs according to VO>VI>BE>BK
504 for (i = 0; i < NUM_TX_QUEUES; i++) {
505 ac = wl1271_tx_get_queue(i);
506 if (!skb_queue_empty(&queues[ac]) &&
507 (wl->tx_allocated_pkts[ac] < min_pkts)) {
509 min_pkts = wl->tx_allocated_pkts[q];
519 static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
520 struct wl1271_link *lnk)
524 struct sk_buff_head *queue;
526 queue = wl1271_select_queue(wl, lnk->tx_queue);
530 skb = skb_dequeue(queue);
532 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
533 spin_lock_irqsave(&wl->wl_lock, flags);
534 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
535 wl->tx_queue_count[q]--;
536 spin_unlock_irqrestore(&wl->wl_lock, flags);
542 static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
543 struct wl12xx_vif *wlvif)
545 struct sk_buff *skb = NULL;
546 int i, h, start_hlid;
548 /* start from the link after the last one */
549 start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
551 /* dequeue according to AC, round robin on each link */
552 for (i = 0; i < WL12XX_MAX_LINKS; i++) {
553 h = (start_hlid + i) % WL12XX_MAX_LINKS;
555 /* only consider connected stations */
556 if (!test_bit(h, wlvif->links_map))
559 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
563 wlvif->last_tx_hlid = h;
568 wlvif->last_tx_hlid = 0;
573 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
576 struct wl12xx_vif *wlvif = wl->last_wlvif;
577 struct sk_buff *skb = NULL;
579 /* continue from last wlvif (round robin) */
581 wl12xx_for_each_wlvif_continue(wl, wlvif) {
582 skb = wl12xx_vif_skb_dequeue(wl, wlvif);
584 wl->last_wlvif = wlvif;
590 /* dequeue from the system HLID before the restarting wlvif list */
592 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
594 /* do a new pass over the wlvif list */
596 wl12xx_for_each_wlvif(wl, wlvif) {
597 skb = wl12xx_vif_skb_dequeue(wl, wlvif);
599 wl->last_wlvif = wlvif;
604 * No need to continue after last_wlvif. The previous
605 * pass should have found it.
607 if (wlvif == wl->last_wlvif)
613 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
616 skb = wl->dummy_packet;
617 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
618 spin_lock_irqsave(&wl->wl_lock, flags);
619 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
620 wl->tx_queue_count[q]--;
621 spin_unlock_irqrestore(&wl->wl_lock, flags);
627 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
631 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
633 if (wl12xx_is_dummy_packet(wl, skb)) {
634 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
636 u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
637 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
639 /* make sure we dequeue the same packet next time */
640 wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
644 spin_lock_irqsave(&wl->wl_lock, flags);
645 wl->tx_queue_count[q]++;
646 spin_unlock_irqrestore(&wl->wl_lock, flags);
649 static bool wl1271_tx_is_data_present(struct sk_buff *skb)
651 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
653 return ieee80211_is_data_present(hdr->frame_control);
656 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
658 struct wl12xx_vif *wlvif;
662 if (!wl->conf.rx_streaming.interval)
665 if (!wl->conf.rx_streaming.always &&
666 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
669 timeout = wl->conf.rx_streaming.duration;
670 wl12xx_for_each_wlvif_sta(wl, wlvif) {
672 for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
673 if (test_bit(hlid, wlvif->links_map)) {
682 /* enable rx streaming */
683 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
684 ieee80211_queue_work(wl->hw,
685 &wlvif->rx_streaming_enable_work);
687 mod_timer(&wlvif->rx_streaming_timer,
688 jiffies + msecs_to_jiffies(timeout));
692 void wl1271_tx_work_locked(struct wl1271 *wl)
694 struct wl12xx_vif *wlvif;
696 struct wl1271_tx_hw_descr *desc;
698 bool sent_packets = false;
699 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
702 if (unlikely(wl->state == WL1271_STATE_OFF))
705 while ((skb = wl1271_skb_dequeue(wl))) {
706 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
707 bool has_data = false;
710 if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
711 wlvif = wl12xx_vif_to_data(info->control.vif);
713 has_data = wlvif && wl1271_tx_is_data_present(skb);
714 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
715 if (ret == -EAGAIN) {
717 * Aggregation buffer is full.
718 * Flush buffer and try again.
720 wl1271_skb_queue_head(wl, wlvif, skb);
721 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
726 } else if (ret == -EBUSY) {
728 * Firmware buffer is full.
729 * Queue back last skb, and stop aggregating.
731 wl1271_skb_queue_head(wl, wlvif, skb);
732 /* No work left, avoid scheduling redundant tx work */
733 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
735 } else if (ret < 0) {
736 if (wl12xx_is_dummy_packet(wl, skb))
738 * fw still expects dummy packet,
741 wl1271_skb_queue_head(wl, wlvif, skb);
743 ieee80211_free_txskb(wl->hw, skb);
747 wl->tx_packets_count++;
749 desc = (struct wl1271_tx_hw_descr *) skb->data;
750 __set_bit(desc->hlid, active_hlids);
756 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
762 * Interrupt the firmware with the new packets. This is only
763 * required for older hardware revisions
765 if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
766 wl1271_write32(wl, WL1271_HOST_WR_ACCESS,
767 wl->tx_packets_count);
769 wl1271_handle_tx_low_watermark(wl);
771 wl12xx_rearm_rx_streaming(wl, active_hlids);
774 void wl1271_tx_work(struct work_struct *work)
776 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
779 mutex_lock(&wl->mutex);
780 ret = wl1271_ps_elp_wakeup(wl);
784 wl1271_tx_work_locked(wl);
786 wl1271_ps_elp_sleep(wl);
788 mutex_unlock(&wl->mutex);
791 static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
795 if (rate_class_index >= CONF_HW_RXTX_RATE_MCS_MIN &&
796 rate_class_index <= CONF_HW_RXTX_RATE_MCS_MAX)
797 flags |= IEEE80211_TX_RC_MCS;
798 if (rate_class_index == CONF_HW_RXTX_RATE_MCS7_SGI)
799 flags |= IEEE80211_TX_RC_SHORT_GI;
803 static void wl1271_tx_complete_packet(struct wl1271 *wl,
804 struct wl1271_tx_hw_res_descr *result)
806 struct ieee80211_tx_info *info;
807 struct ieee80211_vif *vif;
808 struct wl12xx_vif *wlvif;
815 /* check for id legality */
816 if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
817 wl1271_warning("TX result illegal id: %d", id);
821 skb = wl->tx_frames[id];
822 info = IEEE80211_SKB_CB(skb);
824 if (wl12xx_is_dummy_packet(wl, skb)) {
825 wl1271_free_tx_id(wl, id);
829 /* info->control is valid as long as we don't update info->status */
830 vif = info->control.vif;
831 wlvif = wl12xx_vif_to_data(vif);
833 /* update the TX status info */
834 if (result->status == TX_SUCCESS) {
835 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
836 info->flags |= IEEE80211_TX_STAT_ACK;
837 rate = wl1271_rate_to_idx(result->rate_class_index,
839 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
840 retries = result->ack_failures;
841 } else if (result->status == TX_RETRY_EXCEEDED) {
842 wl->stats.excessive_retries++;
843 retries = result->ack_failures;
846 info->status.rates[0].idx = rate;
847 info->status.rates[0].count = retries;
848 info->status.rates[0].flags = rate_flags;
849 info->status.ack_signal = -1;
851 wl->stats.retry_count += result->ack_failures;
854 * update sequence number only when relevant, i.e. only in
855 * sessions of TKIP, AES and GEM (not in open or WEP sessions)
857 if (info->control.hw_key &&
858 (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
859 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
860 info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
861 u8 fw_lsb = result->tx_security_sequence_number_lsb;
862 u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
865 * update security sequence number, taking care of potential
868 wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
869 wlvif->tx_security_last_seq_lsb = fw_lsb;
872 /* remove private header from packet */
873 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
875 /* remove TKIP header space if present */
876 if (info->control.hw_key &&
877 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
878 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
879 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
881 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
884 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
886 result->id, skb, result->ack_failures,
887 result->rate_class_index, result->status);
889 /* return the packet to the stack */
890 skb_queue_tail(&wl->deferred_tx_queue, skb);
891 queue_work(wl->freezable_wq, &wl->netstack_work);
892 wl1271_free_tx_id(wl, result->id);
895 /* Called upon reception of a TX complete interrupt */
896 void wl1271_tx_complete(struct wl1271 *wl)
898 struct wl1271_acx_mem_map *memmap =
899 (struct wl1271_acx_mem_map *)wl->target_mem_map;
900 u32 count, fw_counter;
903 /* read the tx results from the chipset */
904 wl1271_read(wl, le32_to_cpu(memmap->tx_result),
905 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
906 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
908 /* write host counter to chipset (to ack) */
909 wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
910 offsetof(struct wl1271_tx_hw_res_if,
911 tx_result_host_counter), fw_counter);
913 count = fw_counter - wl->tx_results_count;
914 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
916 /* verify that the result buffer is not getting overrun */
917 if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
918 wl1271_warning("TX result overflow from chipset: %d", count);
920 /* process the results */
921 for (i = 0; i < count; i++) {
922 struct wl1271_tx_hw_res_descr *result;
923 u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
925 /* process the packet */
926 result = &(wl->tx_res_if->tx_results_queue[offset]);
927 wl1271_tx_complete_packet(wl, result);
929 wl->tx_results_count++;
933 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
938 struct ieee80211_tx_info *info;
939 int total[NUM_TX_QUEUES];
941 for (i = 0; i < NUM_TX_QUEUES; i++) {
943 while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
944 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
946 if (!wl12xx_is_dummy_packet(wl, skb)) {
947 info = IEEE80211_SKB_CB(skb);
948 info->status.rates[0].idx = -1;
949 info->status.rates[0].count = 0;
950 ieee80211_tx_status_ni(wl->hw, skb);
957 spin_lock_irqsave(&wl->wl_lock, flags);
958 for (i = 0; i < NUM_TX_QUEUES; i++)
959 wl->tx_queue_count[i] -= total[i];
960 spin_unlock_irqrestore(&wl->wl_lock, flags);
962 wl1271_handle_tx_low_watermark(wl);
965 /* caller must hold wl->mutex and TX must be stopped */
966 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
971 for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
972 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
973 wl1271_free_sta(wl, wlvif, i);
975 wlvif->sta.ba_rx_bitmap = 0;
977 wl->links[i].allocated_pkts = 0;
978 wl->links[i].prev_freed_pkts = 0;
980 wlvif->last_tx_hlid = 0;
983 /* caller must hold wl->mutex and TX must be stopped */
984 void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
988 struct ieee80211_tx_info *info;
990 /* only reset the queues if something bad happened */
991 if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
992 for (i = 0; i < WL12XX_MAX_LINKS; i++)
993 wl1271_tx_reset_link_queues(wl, i);
995 for (i = 0; i < NUM_TX_QUEUES; i++)
996 wl->tx_queue_count[i] = 0;
999 wl->stopped_queues_map = 0;
1002 * Make sure the driver is at a consistent state, in case this
1003 * function is called from a context other than interface removal.
1004 * This call will always wake the TX queues.
1006 if (reset_tx_queues)
1007 wl1271_handle_tx_low_watermark(wl);
1009 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
1010 if (wl->tx_frames[i] == NULL)
1013 skb = wl->tx_frames[i];
1014 wl1271_free_tx_id(wl, i);
1015 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
1017 if (!wl12xx_is_dummy_packet(wl, skb)) {
1019 * Remove private headers before passing the skb to
1022 info = IEEE80211_SKB_CB(skb);
1023 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1024 if (info->control.hw_key &&
1025 info->control.hw_key->cipher ==
1026 WLAN_CIPHER_SUITE_TKIP) {
1027 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1028 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
1030 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
1033 info->status.rates[0].idx = -1;
1034 info->status.rates[0].count = 0;
1036 ieee80211_tx_status_ni(wl->hw, skb);
1041 #define WL1271_TX_FLUSH_TIMEOUT 500000
1043 /* caller must *NOT* hold wl->mutex */
1044 void wl1271_tx_flush(struct wl1271 *wl)
1046 unsigned long timeout;
1048 timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1050 while (!time_after(jiffies, timeout)) {
1051 mutex_lock(&wl->mutex);
1052 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
1054 wl1271_tx_total_queue_count(wl));
1055 if ((wl->tx_frames_cnt == 0) &&
1056 (wl1271_tx_total_queue_count(wl) == 0)) {
1057 mutex_unlock(&wl->mutex);
1060 mutex_unlock(&wl->mutex);
1064 wl1271_warning("Unable to flush all TX buffers, timed out.");
1066 /* forcibly flush all Tx buffers on our queues */
1067 mutex_lock(&wl->mutex);
1068 for (i = 0; i < WL12XX_MAX_LINKS; i++)
1069 wl1271_tx_reset_link_queues(wl, i);
1070 mutex_unlock(&wl->mutex);
1073 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1075 if (WARN_ON(!rate_set))
1078 return BIT(__ffs(rate_set));