3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
40 #include "vendor_cmd.h"
45 #define WL1271_BOOT_RETRIES 3
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery = -1;
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 struct ieee80211_vif *vif,
54 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
62 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
71 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
75 wl1271_info("Association completed.");
79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 struct regulatory_request *request)
82 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
83 struct wl1271 *wl = hw->priv;
85 /* copy the current dfs region */
87 wl->dfs_region = request->dfs_region;
89 wlcore_regdomain_config(wl);
92 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
97 /* we should hold wl->mutex */
98 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
103 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
105 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
111 * this function is being called when the rx_streaming interval
112 * has beed changed or rx_streaming should be disabled
114 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
117 int period = wl->conf.rx_streaming.interval;
119 /* don't reconfigure if rx_streaming is disabled */
120 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
123 /* reconfigure/disable according to new streaming_period */
125 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
126 (wl->conf.rx_streaming.always ||
127 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
128 ret = wl1271_set_rx_streaming(wl, wlvif, true);
130 ret = wl1271_set_rx_streaming(wl, wlvif, false);
131 /* don't cancel_work_sync since we might deadlock */
132 del_timer_sync(&wlvif->rx_streaming_timer);
138 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
141 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
142 rx_streaming_enable_work);
143 struct wl1271 *wl = wlvif->wl;
145 mutex_lock(&wl->mutex);
147 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
148 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
149 (!wl->conf.rx_streaming.always &&
150 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 if (!wl->conf.rx_streaming.interval)
156 ret = wl1271_ps_elp_wakeup(wl);
160 ret = wl1271_set_rx_streaming(wl, wlvif, true);
164 /* stop it after some time of inactivity */
165 mod_timer(&wlvif->rx_streaming_timer,
166 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
169 wl1271_ps_elp_sleep(wl);
171 mutex_unlock(&wl->mutex);
174 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
177 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
178 rx_streaming_disable_work);
179 struct wl1271 *wl = wlvif->wl;
181 mutex_lock(&wl->mutex);
183 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
186 ret = wl1271_ps_elp_wakeup(wl);
190 ret = wl1271_set_rx_streaming(wl, wlvif, false);
195 wl1271_ps_elp_sleep(wl);
197 mutex_unlock(&wl->mutex);
200 static void wl1271_rx_streaming_timer(unsigned long data)
202 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
203 struct wl1271 *wl = wlvif->wl;
204 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
207 /* wl->mutex must be taken */
208 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
210 /* if the watchdog is not armed, don't do anything */
211 if (wl->tx_allocated_blocks == 0)
214 cancel_delayed_work(&wl->tx_watchdog_work);
215 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
216 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
219 static void wlcore_rc_update_work(struct work_struct *work)
222 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
224 struct wl1271 *wl = wlvif->wl;
226 mutex_lock(&wl->mutex);
228 if (unlikely(wl->state != WLCORE_STATE_ON))
231 ret = wl1271_ps_elp_wakeup(wl);
235 wlcore_hw_sta_rc_update(wl, wlvif);
237 wl1271_ps_elp_sleep(wl);
239 mutex_unlock(&wl->mutex);
242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
244 struct delayed_work *dwork;
247 dwork = container_of(work, struct delayed_work, work);
248 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
250 mutex_lock(&wl->mutex);
252 if (unlikely(wl->state != WLCORE_STATE_ON))
255 /* Tx went out in the meantime - everything is ok */
256 if (unlikely(wl->tx_allocated_blocks == 0))
260 * if a ROC is in progress, we might not have any Tx for a long
261 * time (e.g. pending Tx on the non-ROC channels)
263 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 wl->conf.tx.tx_watchdog_timeout);
266 wl12xx_rearm_tx_watchdog_locked(wl);
271 * if a scan is in progress, we might not have any Tx for a long
274 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 wl->conf.tx.tx_watchdog_timeout);
277 wl12xx_rearm_tx_watchdog_locked(wl);
282 * AP might cache a frame for a long time for a sleeping station,
283 * so rearm the timer if there's an AP interface with stations. If
284 * Tx is genuinely stuck we will most hopefully discover it when all
285 * stations are removed due to inactivity.
287 if (wl->active_sta_count) {
288 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
290 wl->conf.tx.tx_watchdog_timeout,
291 wl->active_sta_count);
292 wl12xx_rearm_tx_watchdog_locked(wl);
296 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_queue_recovery_work(wl);
301 mutex_unlock(&wl->mutex);
304 static void wlcore_adjust_conf(struct wl1271 *wl)
306 /* Adjust settings according to optional module parameters */
308 /* Firmware Logger params */
309 if (fwlog_mem_blocks != -1) {
310 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
311 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
312 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
315 "Illegal fwlog_mem_blocks=%d using default %d",
316 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
321 if (!strcmp(fwlog_param, "continuous")) {
322 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
323 } else if (!strcmp(fwlog_param, "ondemand")) {
324 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
325 } else if (!strcmp(fwlog_param, "dbgpins")) {
326 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
327 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
328 } else if (!strcmp(fwlog_param, "disable")) {
329 wl->conf.fwlog.mem_blocks = 0;
330 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
332 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
336 if (bug_on_recovery != -1)
337 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
339 if (no_recovery != -1)
340 wl->conf.recovery.no_recovery = (u8) no_recovery;
343 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
344 struct wl12xx_vif *wlvif,
349 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
352 * Wake up from high level PS if the STA is asleep with too little
353 * packets in FW or if the STA is awake.
355 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_end(wl, wlvif, hlid);
359 * Start high-level PS if the STA is asleep with enough blocks in FW.
360 * Make an exception if this is the only connected link. In this
361 * case FW-memory congestion is less of a problem.
362 * Note that a single connected STA means 2*ap_count + 1 active links,
363 * since we must account for the global and broadcast AP links
364 * for each AP. The "fw_ps" check assures us the other link is a STA
365 * connected to the AP. Otherwise the FW would not set the PSM bit.
367 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
368 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
369 wl12xx_ps_link_start(wl, wlvif, hlid, true);
372 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
373 struct wl12xx_vif *wlvif,
374 struct wl_fw_status *status)
376 unsigned long cur_fw_ps_map;
379 cur_fw_ps_map = status->link_ps_bitmap;
380 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
381 wl1271_debug(DEBUG_PSM,
382 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
383 wl->ap_fw_ps_map, cur_fw_ps_map,
384 wl->ap_fw_ps_map ^ cur_fw_ps_map);
386 wl->ap_fw_ps_map = cur_fw_ps_map;
389 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
390 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
391 wl->links[hlid].allocated_pkts);
394 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
396 struct wl12xx_vif *wlvif;
398 u32 old_tx_blk_count = wl->tx_blocks_available;
399 int avail, freed_blocks;
402 struct wl1271_link *lnk;
404 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
406 wl->fw_status_len, false);
410 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
412 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
413 "drv_rx_counter = %d, tx_results_counter = %d)",
415 status->fw_rx_counter,
416 status->drv_rx_counter,
417 status->tx_results_counter);
419 for (i = 0; i < NUM_TX_QUEUES; i++) {
420 /* prevent wrap-around in freed-packets counter */
421 wl->tx_allocated_pkts[i] -=
422 (status->counters.tx_released_pkts[i] -
423 wl->tx_pkts_freed[i]) & 0xff;
425 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
429 for_each_set_bit(i, wl->links_map, wl->num_links) {
433 /* prevent wrap-around in freed-packets counter */
434 diff = (status->counters.tx_lnk_free_pkts[i] -
435 lnk->prev_freed_pkts) & 0xff;
440 lnk->allocated_pkts -= diff;
441 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
443 /* accumulate the prev_freed_pkts counter */
444 lnk->total_freed_pkts += diff;
447 /* prevent wrap-around in total blocks counter */
448 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
449 freed_blocks = status->total_released_blks -
452 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
453 status->total_released_blks;
455 wl->tx_blocks_freed = status->total_released_blks;
457 wl->tx_allocated_blocks -= freed_blocks;
460 * If the FW freed some blocks:
461 * If we still have allocated blocks - re-arm the timer, Tx is
462 * not stuck. Otherwise, cancel the timer (no Tx currently).
465 if (wl->tx_allocated_blocks)
466 wl12xx_rearm_tx_watchdog_locked(wl);
468 cancel_delayed_work(&wl->tx_watchdog_work);
471 avail = status->tx_total - wl->tx_allocated_blocks;
474 * The FW might change the total number of TX memblocks before
475 * we get a notification about blocks being released. Thus, the
476 * available blocks calculation might yield a temporary result
477 * which is lower than the actual available blocks. Keeping in
478 * mind that only blocks that were allocated can be moved from
479 * TX to RX, tx_blocks_available should never decrease here.
481 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
484 /* if more blocks are available now, tx work can be scheduled */
485 if (wl->tx_blocks_available > old_tx_blk_count)
486 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
488 /* for AP update num of allocated TX blocks per link and ps status */
489 wl12xx_for_each_wlvif_ap(wl, wlvif) {
490 wl12xx_irq_update_links_status(wl, wlvif, status);
493 /* update the host-chipset time offset */
495 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
496 (s64)(status->fw_localtime);
498 wl->fw_fast_lnk_map = status->link_fast_bitmap;
503 static void wl1271_flush_deferred_work(struct wl1271 *wl)
507 /* Pass all received frames to the network stack */
508 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
509 ieee80211_rx_ni(wl->hw, skb);
511 /* Return sent skbs to the network stack */
512 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
513 ieee80211_tx_status_ni(wl->hw, skb);
516 static void wl1271_netstack_work(struct work_struct *work)
519 container_of(work, struct wl1271, netstack_work);
522 wl1271_flush_deferred_work(wl);
523 } while (skb_queue_len(&wl->deferred_rx_queue));
526 #define WL1271_IRQ_MAX_LOOPS 256
528 static int wlcore_irq_locked(struct wl1271 *wl)
532 int loopcount = WL1271_IRQ_MAX_LOOPS;
534 unsigned int defer_count;
538 * In case edge triggered interrupt must be used, we cannot iterate
539 * more than once without introducing race conditions with the hardirq.
541 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
544 wl1271_debug(DEBUG_IRQ, "IRQ work");
546 if (unlikely(wl->state != WLCORE_STATE_ON))
549 ret = wl1271_ps_elp_wakeup(wl);
553 while (!done && loopcount--) {
555 * In order to avoid a race with the hardirq, clear the flag
556 * before acknowledging the chip. Since the mutex is held,
557 * wl1271_ps_elp_wakeup cannot be called concurrently.
559 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
560 smp_mb__after_atomic();
562 ret = wlcore_fw_status(wl, wl->fw_status);
566 wlcore_hw_tx_immediate_compl(wl);
568 intr = wl->fw_status->intr;
569 intr &= WLCORE_ALL_INTR_MASK;
575 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
576 wl1271_error("HW watchdog interrupt received! starting recovery.");
577 wl->watchdog_recovery = true;
580 /* restarting the chip. ignore any other interrupt. */
584 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
585 wl1271_error("SW watchdog interrupt received! "
586 "starting recovery.");
587 wl->watchdog_recovery = true;
590 /* restarting the chip. ignore any other interrupt. */
594 if (likely(intr & WL1271_ACX_INTR_DATA)) {
595 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
597 ret = wlcore_rx(wl, wl->fw_status);
601 /* Check if any tx blocks were freed */
602 spin_lock_irqsave(&wl->wl_lock, flags);
603 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
604 wl1271_tx_total_queue_count(wl) > 0) {
605 spin_unlock_irqrestore(&wl->wl_lock, flags);
607 * In order to avoid starvation of the TX path,
608 * call the work function directly.
610 ret = wlcore_tx_work_locked(wl);
614 spin_unlock_irqrestore(&wl->wl_lock, flags);
617 /* check for tx results */
618 ret = wlcore_hw_tx_delayed_compl(wl);
622 /* Make sure the deferred queues don't get too long */
623 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
624 skb_queue_len(&wl->deferred_rx_queue);
625 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
626 wl1271_flush_deferred_work(wl);
629 if (intr & WL1271_ACX_INTR_EVENT_A) {
630 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
631 ret = wl1271_event_handle(wl, 0);
636 if (intr & WL1271_ACX_INTR_EVENT_B) {
637 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
638 ret = wl1271_event_handle(wl, 1);
643 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
644 wl1271_debug(DEBUG_IRQ,
645 "WL1271_ACX_INTR_INIT_COMPLETE");
647 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
648 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
651 wl1271_ps_elp_sleep(wl);
657 static irqreturn_t wlcore_irq(int irq, void *cookie)
661 struct wl1271 *wl = cookie;
663 /* complete the ELP completion */
664 spin_lock_irqsave(&wl->wl_lock, flags);
665 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
667 complete(wl->elp_compl);
668 wl->elp_compl = NULL;
671 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
672 /* don't enqueue a work right now. mark it as pending */
673 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
674 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
675 disable_irq_nosync(wl->irq);
676 pm_wakeup_event(wl->dev, 0);
677 spin_unlock_irqrestore(&wl->wl_lock, flags);
680 spin_unlock_irqrestore(&wl->wl_lock, flags);
682 /* TX might be handled here, avoid redundant work */
683 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
684 cancel_work_sync(&wl->tx_work);
686 mutex_lock(&wl->mutex);
688 ret = wlcore_irq_locked(wl);
690 wl12xx_queue_recovery_work(wl);
692 spin_lock_irqsave(&wl->wl_lock, flags);
693 /* In case TX was not handled here, queue TX work */
694 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
695 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
696 wl1271_tx_total_queue_count(wl) > 0)
697 ieee80211_queue_work(wl->hw, &wl->tx_work);
698 spin_unlock_irqrestore(&wl->wl_lock, flags);
700 mutex_unlock(&wl->mutex);
705 struct vif_counter_data {
708 struct ieee80211_vif *cur_vif;
709 bool cur_vif_running;
712 static void wl12xx_vif_count_iter(void *data, u8 *mac,
713 struct ieee80211_vif *vif)
715 struct vif_counter_data *counter = data;
718 if (counter->cur_vif == vif)
719 counter->cur_vif_running = true;
722 /* caller must not hold wl->mutex, as it might deadlock */
723 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
724 struct ieee80211_vif *cur_vif,
725 struct vif_counter_data *data)
727 memset(data, 0, sizeof(*data));
728 data->cur_vif = cur_vif;
730 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
731 wl12xx_vif_count_iter, data);
734 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
736 const struct firmware *fw;
738 enum wl12xx_fw_type fw_type;
742 fw_type = WL12XX_FW_TYPE_PLT;
743 fw_name = wl->plt_fw_name;
746 * we can't call wl12xx_get_vif_count() here because
747 * wl->mutex is taken, so use the cached last_vif_count value
749 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
750 fw_type = WL12XX_FW_TYPE_MULTI;
751 fw_name = wl->mr_fw_name;
753 fw_type = WL12XX_FW_TYPE_NORMAL;
754 fw_name = wl->sr_fw_name;
758 if (wl->fw_type == fw_type)
761 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
763 ret = request_firmware(&fw, fw_name, wl->dev);
766 wl1271_error("could not get firmware %s: %d", fw_name, ret);
771 wl1271_error("firmware size is not multiple of 32 bits: %zu",
778 wl->fw_type = WL12XX_FW_TYPE_NONE;
779 wl->fw_len = fw->size;
780 wl->fw = vmalloc(wl->fw_len);
783 wl1271_error("could not allocate memory for the firmware");
788 memcpy(wl->fw, fw->data, wl->fw_len);
790 wl->fw_type = fw_type;
792 release_firmware(fw);
797 void wl12xx_queue_recovery_work(struct wl1271 *wl)
799 /* Avoid a recursive recovery */
800 if (wl->state == WLCORE_STATE_ON) {
801 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
804 wl->state = WLCORE_STATE_RESTARTING;
805 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
806 wl1271_ps_elp_wakeup(wl);
807 wlcore_disable_interrupts_nosync(wl);
808 ieee80211_queue_work(wl->hw, &wl->recovery_work);
812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
816 /* Make sure we have enough room */
817 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
819 /* Fill the FW log file, consumed by the sysfs fwlog entry */
820 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 wl->fwlog_size += len;
826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
828 struct wlcore_partition_set part, old_part;
835 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
836 (wl->conf.fwlog.mem_blocks == 0))
839 wl1271_info("Reading FW panic log");
841 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
846 * Make sure the chip is awake and the logger isn't active.
847 * Do not send a stop fwlog command if the fw is hanged or if
848 * dbgpins are used (due to some fw bug).
850 if (wl1271_ps_elp_wakeup(wl))
852 if (!wl->watchdog_recovery &&
853 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
854 wl12xx_cmd_stop_fwlog(wl);
856 /* Read the first memory block address */
857 ret = wlcore_fw_status(wl, wl->fw_status);
861 addr = wl->fw_status->log_start_addr;
865 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
866 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
867 end_of_log = wl->fwlog_end;
869 offset = sizeof(addr);
873 old_part = wl->curr_part;
874 memset(&part, 0, sizeof(part));
876 /* Traverse the memory blocks linked list */
878 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
879 part.mem.size = PAGE_SIZE;
881 ret = wlcore_set_partition(wl, &part);
883 wl1271_error("%s: set_partition start=0x%X size=%d",
884 __func__, part.mem.start, part.mem.size);
888 memset(block, 0, wl->fw_mem_block_size);
889 ret = wlcore_read_hwaddr(wl, addr, block,
890 wl->fw_mem_block_size, false);
896 * Memory blocks are linked to one another. The first 4 bytes
897 * of each memory block hold the hardware address of the next
898 * one. The last memory block points to the first one in
899 * on demand mode and is equal to 0x2000000 in continuous mode.
901 addr = le32_to_cpup((__le32 *)block);
903 if (!wl12xx_copy_fwlog(wl, block + offset,
904 wl->fw_mem_block_size - offset))
906 } while (addr && (addr != end_of_log));
908 wake_up_interruptible(&wl->fwlog_waitq);
912 wlcore_set_partition(wl, &old_part);
915 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
916 u8 hlid, struct ieee80211_sta *sta)
918 struct wl1271_station *wl_sta;
919 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
921 wl_sta = (void *)sta->drv_priv;
922 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
925 * increment the initial seq number on recovery to account for
926 * transmitted packets that we haven't yet got in the FW status
928 if (wlvif->encryption_type == KEY_GEM)
929 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
931 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
932 wl_sta->total_freed_pkts += sqn_recovery_padding;
935 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
936 struct wl12xx_vif *wlvif,
937 u8 hlid, const u8 *addr)
939 struct ieee80211_sta *sta;
940 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
942 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
943 is_zero_ether_addr(addr)))
947 sta = ieee80211_find_sta(vif, addr);
949 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
953 static void wlcore_print_recovery(struct wl1271 *wl)
959 wl1271_info("Hardware recovery in progress. FW ver: %s",
960 wl->chip.fw_ver_str);
962 /* change partitions momentarily so we can read the FW pc */
963 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
967 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
971 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
975 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
976 pc, hint_sts, ++wl->recovery_count);
978 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
982 static void wl1271_recovery_work(struct work_struct *work)
985 container_of(work, struct wl1271, recovery_work);
986 struct wl12xx_vif *wlvif;
987 struct ieee80211_vif *vif;
989 mutex_lock(&wl->mutex);
991 if (wl->state == WLCORE_STATE_OFF || wl->plt)
994 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
995 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
996 wl12xx_read_fwlog_panic(wl);
997 wlcore_print_recovery(wl);
1000 BUG_ON(wl->conf.recovery.bug_on_recovery &&
1001 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1003 if (wl->conf.recovery.no_recovery) {
1004 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1008 /* Prevent spurious TX during FW restart */
1009 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1011 /* reboot the chipset */
1012 while (!list_empty(&wl->wlvif_list)) {
1013 wlvif = list_first_entry(&wl->wlvif_list,
1014 struct wl12xx_vif, list);
1015 vif = wl12xx_wlvif_to_vif(wlvif);
1017 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1018 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1019 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1020 vif->bss_conf.bssid);
1023 __wl1271_op_remove_interface(wl, vif, false);
1026 wlcore_op_stop_locked(wl);
1028 ieee80211_restart_hw(wl->hw);
1031 * Its safe to enable TX now - the queues are stopped after a request
1032 * to restart the HW.
1034 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1037 wl->watchdog_recovery = false;
1038 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1039 mutex_unlock(&wl->mutex);
1042 static int wlcore_fw_wakeup(struct wl1271 *wl)
1044 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1047 static int wl1271_setup(struct wl1271 *wl)
1049 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1050 if (!wl->raw_fw_status)
1053 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1057 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1063 kfree(wl->fw_status);
1064 kfree(wl->raw_fw_status);
1068 static int wl12xx_set_power_on(struct wl1271 *wl)
1072 msleep(WL1271_PRE_POWER_ON_SLEEP);
1073 ret = wl1271_power_on(wl);
1076 msleep(WL1271_POWER_ON_SLEEP);
1077 wl1271_io_reset(wl);
1080 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1084 /* ELP module wake up */
1085 ret = wlcore_fw_wakeup(wl);
1093 wl1271_power_off(wl);
1097 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1101 ret = wl12xx_set_power_on(wl);
1106 * For wl127x based devices we could use the default block
1107 * size (512 bytes), but due to a bug in the sdio driver, we
1108 * need to set it explicitly after the chip is powered on. To
1109 * simplify the code and since the performance impact is
1110 * negligible, we use the same block size for all different
1113 * Check if the bus supports blocksize alignment and, if it
1114 * doesn't, make sure we don't have the quirk.
1116 if (!wl1271_set_block_size(wl))
1117 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1119 /* TODO: make sure the lower driver has set things up correctly */
1121 ret = wl1271_setup(wl);
1125 ret = wl12xx_fetch_firmware(wl, plt);
1133 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1135 int retries = WL1271_BOOT_RETRIES;
1136 struct wiphy *wiphy = wl->hw->wiphy;
1138 static const char* const PLT_MODE[] = {
1147 mutex_lock(&wl->mutex);
1149 wl1271_notice("power up");
1151 if (wl->state != WLCORE_STATE_OFF) {
1152 wl1271_error("cannot go into PLT state because not "
1153 "in off state: %d", wl->state);
1158 /* Indicate to lower levels that we are now in PLT mode */
1160 wl->plt_mode = plt_mode;
1164 ret = wl12xx_chip_wakeup(wl, true);
1168 if (plt_mode != PLT_CHIP_AWAKE) {
1169 ret = wl->ops->plt_init(wl);
1174 wl->state = WLCORE_STATE_ON;
1175 wl1271_notice("firmware booted in PLT mode %s (%s)",
1177 wl->chip.fw_ver_str);
1179 /* update hw/fw version info in wiphy struct */
1180 wiphy->hw_version = wl->chip.id;
1181 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1182 sizeof(wiphy->fw_version));
1187 wl1271_power_off(wl);
1191 wl->plt_mode = PLT_OFF;
1193 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1194 WL1271_BOOT_RETRIES);
1196 mutex_unlock(&wl->mutex);
1201 int wl1271_plt_stop(struct wl1271 *wl)
1205 wl1271_notice("power down");
1208 * Interrupts must be disabled before setting the state to OFF.
1209 * Otherwise, the interrupt handler might be called and exit without
1210 * reading the interrupt status.
1212 wlcore_disable_interrupts(wl);
1213 mutex_lock(&wl->mutex);
1215 mutex_unlock(&wl->mutex);
1218 * This will not necessarily enable interrupts as interrupts
1219 * may have been disabled when op_stop was called. It will,
1220 * however, balance the above call to disable_interrupts().
1222 wlcore_enable_interrupts(wl);
1224 wl1271_error("cannot power down because not in PLT "
1225 "state: %d", wl->state);
1230 mutex_unlock(&wl->mutex);
1232 wl1271_flush_deferred_work(wl);
1233 cancel_work_sync(&wl->netstack_work);
1234 cancel_work_sync(&wl->recovery_work);
1235 cancel_delayed_work_sync(&wl->elp_work);
1236 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1238 mutex_lock(&wl->mutex);
1239 wl1271_power_off(wl);
1241 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1242 wl->state = WLCORE_STATE_OFF;
1244 wl->plt_mode = PLT_OFF;
1246 mutex_unlock(&wl->mutex);
1252 static void wl1271_op_tx(struct ieee80211_hw *hw,
1253 struct ieee80211_tx_control *control,
1254 struct sk_buff *skb)
1256 struct wl1271 *wl = hw->priv;
1257 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1258 struct ieee80211_vif *vif = info->control.vif;
1259 struct wl12xx_vif *wlvif = NULL;
1260 unsigned long flags;
1265 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1266 ieee80211_free_txskb(hw, skb);
1270 wlvif = wl12xx_vif_to_data(vif);
1271 mapping = skb_get_queue_mapping(skb);
1272 q = wl1271_tx_get_queue(mapping);
1274 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1276 spin_lock_irqsave(&wl->wl_lock, flags);
1279 * drop the packet if the link is invalid or the queue is stopped
1280 * for any reason but watermark. Watermark is a "soft"-stop so we
1281 * allow these packets through.
1283 if (hlid == WL12XX_INVALID_LINK_ID ||
1284 (!test_bit(hlid, wlvif->links_map)) ||
1285 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1286 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1287 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1288 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1289 ieee80211_free_txskb(hw, skb);
1293 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1295 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1297 wl->tx_queue_count[q]++;
1298 wlvif->tx_queue_count[q]++;
1301 * The workqueue is slow to process the tx_queue and we need stop
1302 * the queue here, otherwise the queue will get too long.
1304 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1305 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1306 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1307 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1308 wlcore_stop_queue_locked(wl, wlvif, q,
1309 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1313 * The chip specific setup must run before the first TX packet -
1314 * before that, the tx_work will not be initialized!
1317 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1318 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1319 ieee80211_queue_work(wl->hw, &wl->tx_work);
1322 spin_unlock_irqrestore(&wl->wl_lock, flags);
1325 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1327 unsigned long flags;
1330 /* no need to queue a new dummy packet if one is already pending */
1331 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1334 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1336 spin_lock_irqsave(&wl->wl_lock, flags);
1337 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1338 wl->tx_queue_count[q]++;
1339 spin_unlock_irqrestore(&wl->wl_lock, flags);
1341 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1342 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1343 return wlcore_tx_work_locked(wl);
1346 * If the FW TX is busy, TX work will be scheduled by the threaded
1347 * interrupt handler function
1353 * The size of the dummy packet should be at least 1400 bytes. However, in
1354 * order to minimize the number of bus transactions, aligning it to 512 bytes
1355 * boundaries could be beneficial, performance wise
1357 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1359 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1361 struct sk_buff *skb;
1362 struct ieee80211_hdr_3addr *hdr;
1363 unsigned int dummy_packet_size;
1365 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1366 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1368 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1370 wl1271_warning("Failed to allocate a dummy packet skb");
1374 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1376 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1377 memset(hdr, 0, sizeof(*hdr));
1378 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1379 IEEE80211_STYPE_NULLFUNC |
1380 IEEE80211_FCTL_TODS);
1382 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1384 /* Dummy packets require the TID to be management */
1385 skb->priority = WL1271_TID_MGMT;
1387 /* Initialize all fields that might be used */
1388 skb_set_queue_mapping(skb, 0);
1389 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1397 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1399 int num_fields = 0, in_field = 0, fields_size = 0;
1400 int i, pattern_len = 0;
1403 wl1271_warning("No mask in WoWLAN pattern");
1408 * The pattern is broken up into segments of bytes at different offsets
1409 * that need to be checked by the FW filter. Each segment is called
1410 * a field in the FW API. We verify that the total number of fields
1411 * required for this pattern won't exceed FW limits (8)
1412 * as well as the total fields buffer won't exceed the FW limit.
1413 * Note that if there's a pattern which crosses Ethernet/IP header
1414 * boundary a new field is required.
1416 for (i = 0; i < p->pattern_len; i++) {
1417 if (test_bit(i, (unsigned long *)p->mask)) {
1422 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1424 fields_size += pattern_len +
1425 RX_FILTER_FIELD_OVERHEAD;
1433 fields_size += pattern_len +
1434 RX_FILTER_FIELD_OVERHEAD;
1441 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1445 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1446 wl1271_warning("RX Filter too complex. Too many segments");
1450 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1451 wl1271_warning("RX filter pattern is too big");
1458 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1460 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1463 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1470 for (i = 0; i < filter->num_fields; i++)
1471 kfree(filter->fields[i].pattern);
1476 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1477 u16 offset, u8 flags,
1478 const u8 *pattern, u8 len)
1480 struct wl12xx_rx_filter_field *field;
1482 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1483 wl1271_warning("Max fields per RX filter. can't alloc another");
1487 field = &filter->fields[filter->num_fields];
1489 field->pattern = kzalloc(len, GFP_KERNEL);
1490 if (!field->pattern) {
1491 wl1271_warning("Failed to allocate RX filter pattern");
1495 filter->num_fields++;
1497 field->offset = cpu_to_le16(offset);
1498 field->flags = flags;
1500 memcpy(field->pattern, pattern, len);
1505 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1507 int i, fields_size = 0;
1509 for (i = 0; i < filter->num_fields; i++)
1510 fields_size += filter->fields[i].len +
1511 sizeof(struct wl12xx_rx_filter_field) -
1517 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1521 struct wl12xx_rx_filter_field *field;
1523 for (i = 0; i < filter->num_fields; i++) {
1524 field = (struct wl12xx_rx_filter_field *)buf;
1526 field->offset = filter->fields[i].offset;
1527 field->flags = filter->fields[i].flags;
1528 field->len = filter->fields[i].len;
1530 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1531 buf += sizeof(struct wl12xx_rx_filter_field) -
1532 sizeof(u8 *) + field->len;
1537 * Allocates an RX filter returned through f
1538 * which needs to be freed using rx_filter_free()
1541 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1542 struct wl12xx_rx_filter **f)
1545 struct wl12xx_rx_filter *filter;
1549 filter = wl1271_rx_filter_alloc();
1551 wl1271_warning("Failed to alloc rx filter");
1557 while (i < p->pattern_len) {
1558 if (!test_bit(i, (unsigned long *)p->mask)) {
1563 for (j = i; j < p->pattern_len; j++) {
1564 if (!test_bit(j, (unsigned long *)p->mask))
1567 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1568 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1572 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1574 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1576 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1577 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1582 ret = wl1271_rx_filter_alloc_field(filter,
1585 &p->pattern[i], len);
1592 filter->action = FILTER_SIGNAL;
1598 wl1271_rx_filter_free(filter);
1604 static int wl1271_configure_wowlan(struct wl1271 *wl,
1605 struct cfg80211_wowlan *wow)
1609 if (!wow || wow->any || !wow->n_patterns) {
1610 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1615 ret = wl1271_rx_filter_clear_all(wl);
1622 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1625 /* Validate all incoming patterns before clearing current FW state */
1626 for (i = 0; i < wow->n_patterns; i++) {
1627 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1629 wl1271_warning("Bad wowlan pattern %d", i);
1634 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1638 ret = wl1271_rx_filter_clear_all(wl);
1642 /* Translate WoWLAN patterns into filters */
1643 for (i = 0; i < wow->n_patterns; i++) {
1644 struct cfg80211_pkt_pattern *p;
1645 struct wl12xx_rx_filter *filter = NULL;
1647 p = &wow->patterns[i];
1649 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1651 wl1271_warning("Failed to create an RX filter from "
1652 "wowlan pattern %d", i);
1656 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1658 wl1271_rx_filter_free(filter);
1663 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1669 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1670 struct wl12xx_vif *wlvif,
1671 struct cfg80211_wowlan *wow)
1675 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1678 ret = wl1271_configure_wowlan(wl, wow);
1682 if ((wl->conf.conn.suspend_wake_up_event ==
1683 wl->conf.conn.wake_up_event) &&
1684 (wl->conf.conn.suspend_listen_interval ==
1685 wl->conf.conn.listen_interval))
1688 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1689 wl->conf.conn.suspend_wake_up_event,
1690 wl->conf.conn.suspend_listen_interval);
1693 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1699 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1700 struct wl12xx_vif *wlvif,
1701 struct cfg80211_wowlan *wow)
1705 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1708 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1712 ret = wl1271_configure_wowlan(wl, wow);
1721 static int wl1271_configure_suspend(struct wl1271 *wl,
1722 struct wl12xx_vif *wlvif,
1723 struct cfg80211_wowlan *wow)
1725 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1726 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1727 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1728 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1732 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1735 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1736 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1738 if ((!is_ap) && (!is_sta))
1741 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1742 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1745 wl1271_configure_wowlan(wl, NULL);
1748 if ((wl->conf.conn.suspend_wake_up_event ==
1749 wl->conf.conn.wake_up_event) &&
1750 (wl->conf.conn.suspend_listen_interval ==
1751 wl->conf.conn.listen_interval))
1754 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1755 wl->conf.conn.wake_up_event,
1756 wl->conf.conn.listen_interval);
1759 wl1271_error("resume: wake up conditions failed: %d",
1763 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1767 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1768 struct cfg80211_wowlan *wow)
1770 struct wl1271 *wl = hw->priv;
1771 struct wl12xx_vif *wlvif;
1774 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1777 /* we want to perform the recovery before suspending */
1778 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1779 wl1271_warning("postponing suspend to perform recovery");
1783 wl1271_tx_flush(wl);
1785 mutex_lock(&wl->mutex);
1787 ret = wl1271_ps_elp_wakeup(wl);
1791 wl->wow_enabled = true;
1792 wl12xx_for_each_wlvif(wl, wlvif) {
1793 ret = wl1271_configure_suspend(wl, wlvif, wow);
1795 mutex_unlock(&wl->mutex);
1796 wl1271_warning("couldn't prepare device to suspend");
1801 /* disable fast link flow control notifications from FW */
1802 ret = wlcore_hw_interrupt_notify(wl, false);
1806 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1807 ret = wlcore_hw_rx_ba_filter(wl,
1808 !!wl->conf.conn.suspend_rx_ba_activity);
1813 wl1271_ps_elp_sleep(wl);
1814 mutex_unlock(&wl->mutex);
1817 wl1271_warning("couldn't prepare device to suspend");
1821 /* flush any remaining work */
1822 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1825 * disable and re-enable interrupts in order to flush
1828 wlcore_disable_interrupts(wl);
1831 * set suspended flag to avoid triggering a new threaded_irq
1832 * work. no need for spinlock as interrupts are disabled.
1834 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1836 wlcore_enable_interrupts(wl);
1837 flush_work(&wl->tx_work);
1838 flush_delayed_work(&wl->elp_work);
1841 * Cancel the watchdog even if above tx_flush failed. We will detect
1842 * it on resume anyway.
1844 cancel_delayed_work(&wl->tx_watchdog_work);
1849 static int wl1271_op_resume(struct ieee80211_hw *hw)
1851 struct wl1271 *wl = hw->priv;
1852 struct wl12xx_vif *wlvif;
1853 unsigned long flags;
1854 bool run_irq_work = false, pending_recovery;
1857 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1859 WARN_ON(!wl->wow_enabled);
1862 * re-enable irq_work enqueuing, and call irq_work directly if
1863 * there is a pending work.
1865 spin_lock_irqsave(&wl->wl_lock, flags);
1866 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1867 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1868 run_irq_work = true;
1869 spin_unlock_irqrestore(&wl->wl_lock, flags);
1871 mutex_lock(&wl->mutex);
1873 /* test the recovery flag before calling any SDIO functions */
1874 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1878 wl1271_debug(DEBUG_MAC80211,
1879 "run postponed irq_work directly");
1881 /* don't talk to the HW if recovery is pending */
1882 if (!pending_recovery) {
1883 ret = wlcore_irq_locked(wl);
1885 wl12xx_queue_recovery_work(wl);
1888 wlcore_enable_interrupts(wl);
1891 if (pending_recovery) {
1892 wl1271_warning("queuing forgotten recovery on resume");
1893 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1897 ret = wl1271_ps_elp_wakeup(wl);
1901 wl12xx_for_each_wlvif(wl, wlvif) {
1902 wl1271_configure_resume(wl, wlvif);
1905 ret = wlcore_hw_interrupt_notify(wl, true);
1909 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1910 ret = wlcore_hw_rx_ba_filter(wl, false);
1915 wl1271_ps_elp_sleep(wl);
1918 wl->wow_enabled = false;
1921 * Set a flag to re-init the watchdog on the first Tx after resume.
1922 * That way we avoid possible conditions where Tx-complete interrupts
1923 * fail to arrive and we perform a spurious recovery.
1925 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1926 mutex_unlock(&wl->mutex);
1932 static int wl1271_op_start(struct ieee80211_hw *hw)
1934 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1937 * We have to delay the booting of the hardware because
1938 * we need to know the local MAC address before downloading and
1939 * initializing the firmware. The MAC address cannot be changed
1940 * after boot, and without the proper MAC address, the firmware
1941 * will not function properly.
1943 * The MAC address is first known when the corresponding interface
1944 * is added. That is where we will initialize the hardware.
1950 static void wlcore_op_stop_locked(struct wl1271 *wl)
1954 if (wl->state == WLCORE_STATE_OFF) {
1955 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1957 wlcore_enable_interrupts(wl);
1963 * this must be before the cancel_work calls below, so that the work
1964 * functions don't perform further work.
1966 wl->state = WLCORE_STATE_OFF;
1969 * Use the nosync variant to disable interrupts, so the mutex could be
1970 * held while doing so without deadlocking.
1972 wlcore_disable_interrupts_nosync(wl);
1974 mutex_unlock(&wl->mutex);
1976 wlcore_synchronize_interrupts(wl);
1977 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1978 cancel_work_sync(&wl->recovery_work);
1979 wl1271_flush_deferred_work(wl);
1980 cancel_delayed_work_sync(&wl->scan_complete_work);
1981 cancel_work_sync(&wl->netstack_work);
1982 cancel_work_sync(&wl->tx_work);
1983 cancel_delayed_work_sync(&wl->elp_work);
1984 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1986 /* let's notify MAC80211 about the remaining pending TX frames */
1987 mutex_lock(&wl->mutex);
1988 wl12xx_tx_reset(wl);
1990 wl1271_power_off(wl);
1992 * In case a recovery was scheduled, interrupts were disabled to avoid
1993 * an interrupt storm. Now that the power is down, it is safe to
1994 * re-enable interrupts to balance the disable depth
1996 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1997 wlcore_enable_interrupts(wl);
1999 wl->band = IEEE80211_BAND_2GHZ;
2002 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2003 wl->channel_type = NL80211_CHAN_NO_HT;
2004 wl->tx_blocks_available = 0;
2005 wl->tx_allocated_blocks = 0;
2006 wl->tx_results_count = 0;
2007 wl->tx_packets_count = 0;
2008 wl->time_offset = 0;
2009 wl->ap_fw_ps_map = 0;
2011 wl->sleep_auth = WL1271_PSM_ILLEGAL;
2012 memset(wl->roles_map, 0, sizeof(wl->roles_map));
2013 memset(wl->links_map, 0, sizeof(wl->links_map));
2014 memset(wl->roc_map, 0, sizeof(wl->roc_map));
2015 memset(wl->session_ids, 0, sizeof(wl->session_ids));
2016 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2017 wl->active_sta_count = 0;
2018 wl->active_link_count = 0;
2020 /* The system link is always allocated */
2021 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2022 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2023 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2026 * this is performed after the cancel_work calls and the associated
2027 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2028 * get executed before all these vars have been reset.
2032 wl->tx_blocks_freed = 0;
2034 for (i = 0; i < NUM_TX_QUEUES; i++) {
2035 wl->tx_pkts_freed[i] = 0;
2036 wl->tx_allocated_pkts[i] = 0;
2039 wl1271_debugfs_reset(wl);
2041 kfree(wl->raw_fw_status);
2042 wl->raw_fw_status = NULL;
2043 kfree(wl->fw_status);
2044 wl->fw_status = NULL;
2045 kfree(wl->tx_res_if);
2046 wl->tx_res_if = NULL;
2047 kfree(wl->target_mem_map);
2048 wl->target_mem_map = NULL;
2051 * FW channels must be re-calibrated after recovery,
2052 * save current Reg-Domain channel configuration and clear it.
2054 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2055 sizeof(wl->reg_ch_conf_pending));
2056 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2059 static void wlcore_op_stop(struct ieee80211_hw *hw)
2061 struct wl1271 *wl = hw->priv;
2063 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2065 mutex_lock(&wl->mutex);
2067 wlcore_op_stop_locked(wl);
2069 mutex_unlock(&wl->mutex);
2072 static void wlcore_channel_switch_work(struct work_struct *work)
2074 struct delayed_work *dwork;
2076 struct ieee80211_vif *vif;
2077 struct wl12xx_vif *wlvif;
2080 dwork = container_of(work, struct delayed_work, work);
2081 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2084 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2086 mutex_lock(&wl->mutex);
2088 if (unlikely(wl->state != WLCORE_STATE_ON))
2091 /* check the channel switch is still ongoing */
2092 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2095 vif = wl12xx_wlvif_to_vif(wlvif);
2096 ieee80211_chswitch_done(vif, false);
2098 ret = wl1271_ps_elp_wakeup(wl);
2102 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2104 wl1271_ps_elp_sleep(wl);
2106 mutex_unlock(&wl->mutex);
2109 static void wlcore_connection_loss_work(struct work_struct *work)
2111 struct delayed_work *dwork;
2113 struct ieee80211_vif *vif;
2114 struct wl12xx_vif *wlvif;
2116 dwork = container_of(work, struct delayed_work, work);
2117 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2120 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2122 mutex_lock(&wl->mutex);
2124 if (unlikely(wl->state != WLCORE_STATE_ON))
2127 /* Call mac80211 connection loss */
2128 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2131 vif = wl12xx_wlvif_to_vif(wlvif);
2132 ieee80211_connection_loss(vif);
2134 mutex_unlock(&wl->mutex);
2137 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2139 struct delayed_work *dwork;
2141 struct wl12xx_vif *wlvif;
2142 unsigned long time_spare;
2145 dwork = container_of(work, struct delayed_work, work);
2146 wlvif = container_of(dwork, struct wl12xx_vif,
2147 pending_auth_complete_work);
2150 mutex_lock(&wl->mutex);
2152 if (unlikely(wl->state != WLCORE_STATE_ON))
2156 * Make sure a second really passed since the last auth reply. Maybe
2157 * a second auth reply arrived while we were stuck on the mutex.
2158 * Check for a little less than the timeout to protect from scheduler
2161 time_spare = jiffies +
2162 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2163 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2166 ret = wl1271_ps_elp_wakeup(wl);
2170 /* cancel the ROC if active */
2171 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2173 wl1271_ps_elp_sleep(wl);
2175 mutex_unlock(&wl->mutex);
2178 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2180 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2181 WL12XX_MAX_RATE_POLICIES);
2182 if (policy >= WL12XX_MAX_RATE_POLICIES)
2185 __set_bit(policy, wl->rate_policies_map);
2190 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2192 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2195 __clear_bit(*idx, wl->rate_policies_map);
2196 *idx = WL12XX_MAX_RATE_POLICIES;
2199 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2201 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2202 WLCORE_MAX_KLV_TEMPLATES);
2203 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2206 __set_bit(policy, wl->klv_templates_map);
2211 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2213 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2216 __clear_bit(*idx, wl->klv_templates_map);
2217 *idx = WLCORE_MAX_KLV_TEMPLATES;
2220 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2222 switch (wlvif->bss_type) {
2223 case BSS_TYPE_AP_BSS:
2225 return WL1271_ROLE_P2P_GO;
2227 return WL1271_ROLE_AP;
2229 case BSS_TYPE_STA_BSS:
2231 return WL1271_ROLE_P2P_CL;
2233 return WL1271_ROLE_STA;
2236 return WL1271_ROLE_IBSS;
2239 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2241 return WL12XX_INVALID_ROLE_TYPE;
2244 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2246 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2249 /* clear everything but the persistent data */
2250 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2252 switch (ieee80211_vif_type_p2p(vif)) {
2253 case NL80211_IFTYPE_P2P_CLIENT:
2256 case NL80211_IFTYPE_STATION:
2257 wlvif->bss_type = BSS_TYPE_STA_BSS;
2259 case NL80211_IFTYPE_ADHOC:
2260 wlvif->bss_type = BSS_TYPE_IBSS;
2262 case NL80211_IFTYPE_P2P_GO:
2265 case NL80211_IFTYPE_AP:
2266 wlvif->bss_type = BSS_TYPE_AP_BSS;
2269 wlvif->bss_type = MAX_BSS_TYPE;
2273 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2274 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2275 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2277 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2278 wlvif->bss_type == BSS_TYPE_IBSS) {
2279 /* init sta/ibss data */
2280 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2281 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2282 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2283 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2284 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2285 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2286 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2287 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2290 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2291 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2292 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2293 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2294 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2295 wl12xx_allocate_rate_policy(wl,
2296 &wlvif->ap.ucast_rate_idx[i]);
2297 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2299 * TODO: check if basic_rate shouldn't be
2300 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2301 * instead (the same thing for STA above).
2303 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2304 /* TODO: this seems to be used only for STA, check it */
2305 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2308 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2309 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2310 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2313 * mac80211 configures some values globally, while we treat them
2314 * per-interface. thus, on init, we have to copy them from wl
2316 wlvif->band = wl->band;
2317 wlvif->channel = wl->channel;
2318 wlvif->power_level = wl->power_level;
2319 wlvif->channel_type = wl->channel_type;
2321 INIT_WORK(&wlvif->rx_streaming_enable_work,
2322 wl1271_rx_streaming_enable_work);
2323 INIT_WORK(&wlvif->rx_streaming_disable_work,
2324 wl1271_rx_streaming_disable_work);
2325 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2326 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2327 wlcore_channel_switch_work);
2328 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2329 wlcore_connection_loss_work);
2330 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2331 wlcore_pending_auth_complete_work);
2332 INIT_LIST_HEAD(&wlvif->list);
2334 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2335 (unsigned long) wlvif);
2339 static int wl12xx_init_fw(struct wl1271 *wl)
2341 int retries = WL1271_BOOT_RETRIES;
2342 bool booted = false;
2343 struct wiphy *wiphy = wl->hw->wiphy;
2348 ret = wl12xx_chip_wakeup(wl, false);
2352 ret = wl->ops->boot(wl);
2356 ret = wl1271_hw_init(wl);
2364 mutex_unlock(&wl->mutex);
2365 /* Unlocking the mutex in the middle of handling is
2366 inherently unsafe. In this case we deem it safe to do,
2367 because we need to let any possibly pending IRQ out of
2368 the system (and while we are WLCORE_STATE_OFF the IRQ
2369 work function will not do anything.) Also, any other
2370 possible concurrent operations will fail due to the
2371 current state, hence the wl1271 struct should be safe. */
2372 wlcore_disable_interrupts(wl);
2373 wl1271_flush_deferred_work(wl);
2374 cancel_work_sync(&wl->netstack_work);
2375 mutex_lock(&wl->mutex);
2377 wl1271_power_off(wl);
2381 wl1271_error("firmware boot failed despite %d retries",
2382 WL1271_BOOT_RETRIES);
2386 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2388 /* update hw/fw version info in wiphy struct */
2389 wiphy->hw_version = wl->chip.id;
2390 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2391 sizeof(wiphy->fw_version));
2394 * Now we know if 11a is supported (info from the NVS), so disable
2395 * 11a channels if not supported
2397 if (!wl->enable_11a)
2398 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2400 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2401 wl->enable_11a ? "" : "not ");
2403 wl->state = WLCORE_STATE_ON;
2408 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2410 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2414 * Check whether a fw switch (i.e. moving from one loaded
2415 * fw to another) is needed. This function is also responsible
2416 * for updating wl->last_vif_count, so it must be called before
2417 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2420 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2421 struct vif_counter_data vif_counter_data,
2424 enum wl12xx_fw_type current_fw = wl->fw_type;
2425 u8 vif_count = vif_counter_data.counter;
2427 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2430 /* increase the vif count if this is a new vif */
2431 if (add && !vif_counter_data.cur_vif_running)
2434 wl->last_vif_count = vif_count;
2436 /* no need for fw change if the device is OFF */
2437 if (wl->state == WLCORE_STATE_OFF)
2440 /* no need for fw change if a single fw is used */
2441 if (!wl->mr_fw_name)
2444 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2446 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2453 * Enter "forced psm". Make sure the sta is in psm against the ap,
2454 * to make the fw switch a bit more disconnection-persistent.
2456 static void wl12xx_force_active_psm(struct wl1271 *wl)
2458 struct wl12xx_vif *wlvif;
2460 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2461 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2465 struct wlcore_hw_queue_iter_data {
2466 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2468 struct ieee80211_vif *vif;
2469 /* is the current vif among those iterated */
2473 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2474 struct ieee80211_vif *vif)
2476 struct wlcore_hw_queue_iter_data *iter_data = data;
2478 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2481 if (iter_data->cur_running || vif == iter_data->vif) {
2482 iter_data->cur_running = true;
2486 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2489 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2490 struct wl12xx_vif *wlvif)
2492 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2493 struct wlcore_hw_queue_iter_data iter_data = {};
2496 iter_data.vif = vif;
2498 /* mark all bits taken by active interfaces */
2499 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2500 IEEE80211_IFACE_ITER_RESUME_ALL,
2501 wlcore_hw_queue_iter, &iter_data);
2503 /* the current vif is already running in mac80211 (resume/recovery) */
2504 if (iter_data.cur_running) {
2505 wlvif->hw_queue_base = vif->hw_queue[0];
2506 wl1271_debug(DEBUG_MAC80211,
2507 "using pre-allocated hw queue base %d",
2508 wlvif->hw_queue_base);
2510 /* interface type might have changed type */
2511 goto adjust_cab_queue;
2514 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2515 WLCORE_NUM_MAC_ADDRESSES);
2516 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2519 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2520 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2521 wlvif->hw_queue_base);
2523 for (i = 0; i < NUM_TX_QUEUES; i++) {
2524 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2525 /* register hw queues in mac80211 */
2526 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2530 /* the last places are reserved for cab queues per interface */
2531 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2532 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2533 wlvif->hw_queue_base / NUM_TX_QUEUES;
2535 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2540 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2541 struct ieee80211_vif *vif)
2543 struct wl1271 *wl = hw->priv;
2544 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2545 struct vif_counter_data vif_count;
2550 wl1271_error("Adding Interface not allowed while in PLT mode");
2554 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2555 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2557 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2558 ieee80211_vif_type_p2p(vif), vif->addr);
2560 wl12xx_get_vif_count(hw, vif, &vif_count);
2562 mutex_lock(&wl->mutex);
2563 ret = wl1271_ps_elp_wakeup(wl);
2568 * in some very corner case HW recovery scenarios its possible to
2569 * get here before __wl1271_op_remove_interface is complete, so
2570 * opt out if that is the case.
2572 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2573 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2579 ret = wl12xx_init_vif_data(wl, vif);
2584 role_type = wl12xx_get_role_type(wl, wlvif);
2585 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2590 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2594 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2595 wl12xx_force_active_psm(wl);
2596 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2597 mutex_unlock(&wl->mutex);
2598 wl1271_recovery_work(&wl->recovery_work);
2603 * TODO: after the nvs issue will be solved, move this block
2604 * to start(), and make sure here the driver is ON.
2606 if (wl->state == WLCORE_STATE_OFF) {
2608 * we still need this in order to configure the fw
2609 * while uploading the nvs
2611 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2613 ret = wl12xx_init_fw(wl);
2618 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2619 role_type, &wlvif->role_id);
2623 ret = wl1271_init_vif_specific(wl, vif);
2627 list_add(&wlvif->list, &wl->wlvif_list);
2628 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2630 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2635 wl1271_ps_elp_sleep(wl);
2637 mutex_unlock(&wl->mutex);
2642 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2643 struct ieee80211_vif *vif,
2644 bool reset_tx_queues)
2646 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2648 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2650 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2652 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2655 /* because of hardware recovery, we may get here twice */
2656 if (wl->state == WLCORE_STATE_OFF)
2659 wl1271_info("down");
2661 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2662 wl->scan_wlvif == wlvif) {
2664 * Rearm the tx watchdog just before idling scan. This
2665 * prevents just-finished scans from triggering the watchdog
2667 wl12xx_rearm_tx_watchdog_locked(wl);
2669 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2670 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2671 wl->scan_wlvif = NULL;
2672 wl->scan.req = NULL;
2673 ieee80211_scan_completed(wl->hw, true);
2676 if (wl->sched_vif == wlvif)
2677 wl->sched_vif = NULL;
2679 if (wl->roc_vif == vif) {
2681 ieee80211_remain_on_channel_expired(wl->hw);
2684 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2685 /* disable active roles */
2686 ret = wl1271_ps_elp_wakeup(wl);
2690 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2691 wlvif->bss_type == BSS_TYPE_IBSS) {
2692 if (wl12xx_dev_role_started(wlvif))
2693 wl12xx_stop_dev(wl, wlvif);
2696 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2700 wl1271_ps_elp_sleep(wl);
2703 wl12xx_tx_reset_wlvif(wl, wlvif);
2705 /* clear all hlids (except system_hlid) */
2706 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2708 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2709 wlvif->bss_type == BSS_TYPE_IBSS) {
2710 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2711 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2712 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2713 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2714 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2716 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2717 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2718 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2719 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2720 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2721 wl12xx_free_rate_policy(wl,
2722 &wlvif->ap.ucast_rate_idx[i]);
2723 wl1271_free_ap_keys(wl, wlvif);
2726 dev_kfree_skb(wlvif->probereq);
2727 wlvif->probereq = NULL;
2728 if (wl->last_wlvif == wlvif)
2729 wl->last_wlvif = NULL;
2730 list_del(&wlvif->list);
2731 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2732 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2733 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2741 * Last AP, have more stations. Configure sleep auth according to STA.
2742 * Don't do thin on unintended recovery.
2744 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2745 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2748 if (wl->ap_count == 0 && is_ap) {
2749 /* mask ap events */
2750 wl->event_mask &= ~wl->ap_event_mask;
2751 wl1271_event_unmask(wl);
2754 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2755 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2756 /* Configure for power according to debugfs */
2757 if (sta_auth != WL1271_PSM_ILLEGAL)
2758 wl1271_acx_sleep_auth(wl, sta_auth);
2759 /* Configure for ELP power saving */
2761 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2765 mutex_unlock(&wl->mutex);
2767 del_timer_sync(&wlvif->rx_streaming_timer);
2768 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2769 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2770 cancel_work_sync(&wlvif->rc_update_work);
2771 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2772 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2773 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2775 mutex_lock(&wl->mutex);
2778 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2779 struct ieee80211_vif *vif)
2781 struct wl1271 *wl = hw->priv;
2782 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2783 struct wl12xx_vif *iter;
2784 struct vif_counter_data vif_count;
2786 wl12xx_get_vif_count(hw, vif, &vif_count);
2787 mutex_lock(&wl->mutex);
2789 if (wl->state == WLCORE_STATE_OFF ||
2790 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2794 * wl->vif can be null here if someone shuts down the interface
2795 * just when hardware recovery has been started.
2797 wl12xx_for_each_wlvif(wl, iter) {
2801 __wl1271_op_remove_interface(wl, vif, true);
2804 WARN_ON(iter != wlvif);
2805 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2806 wl12xx_force_active_psm(wl);
2807 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2808 wl12xx_queue_recovery_work(wl);
2811 mutex_unlock(&wl->mutex);
2814 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2815 struct ieee80211_vif *vif,
2816 enum nl80211_iftype new_type, bool p2p)
2818 struct wl1271 *wl = hw->priv;
2821 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2822 wl1271_op_remove_interface(hw, vif);
2824 vif->type = new_type;
2826 ret = wl1271_op_add_interface(hw, vif);
2828 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2832 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2835 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2838 * One of the side effects of the JOIN command is that is clears
2839 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2840 * to a WPA/WPA2 access point will therefore kill the data-path.
2841 * Currently the only valid scenario for JOIN during association
2842 * is on roaming, in which case we will also be given new keys.
2843 * Keep the below message for now, unless it starts bothering
2844 * users who really like to roam a lot :)
2846 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2847 wl1271_info("JOIN while associated.");
2849 /* clear encryption type */
2850 wlvif->encryption_type = KEY_NONE;
2853 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2855 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2857 * TODO: this is an ugly workaround for wl12xx fw
2858 * bug - we are not able to tx/rx after the first
2859 * start_sta, so make dummy start+stop calls,
2860 * and then call start_sta again.
2861 * this should be fixed in the fw.
2863 wl12xx_cmd_role_start_sta(wl, wlvif);
2864 wl12xx_cmd_role_stop_sta(wl, wlvif);
2867 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2873 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2877 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2881 wl1271_error("No SSID in IEs!");
2886 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2887 wl1271_error("SSID is too long!");
2891 wlvif->ssid_len = ssid_len;
2892 memcpy(wlvif->ssid, ptr+2, ssid_len);
2896 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2898 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2899 struct sk_buff *skb;
2902 /* we currently only support setting the ssid from the ap probe req */
2903 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2906 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2910 ieoffset = offsetof(struct ieee80211_mgmt,
2911 u.probe_req.variable);
2912 wl1271_ssid_set(wlvif, skb, ieoffset);
2918 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2919 struct ieee80211_bss_conf *bss_conf,
2925 wlvif->aid = bss_conf->aid;
2926 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2927 wlvif->beacon_int = bss_conf->beacon_int;
2928 wlvif->wmm_enabled = bss_conf->qos;
2930 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2933 * with wl1271, we don't need to update the
2934 * beacon_int and dtim_period, because the firmware
2935 * updates it by itself when the first beacon is
2936 * received after a join.
2938 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2943 * Get a template for hardware connection maintenance
2945 dev_kfree_skb(wlvif->probereq);
2946 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2949 ieoffset = offsetof(struct ieee80211_mgmt,
2950 u.probe_req.variable);
2951 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2953 /* enable the connection monitoring feature */
2954 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2959 * The join command disable the keep-alive mode, shut down its process,
2960 * and also clear the template config, so we need to reset it all after
2961 * the join. The acx_aid starts the keep-alive process, and the order
2962 * of the commands below is relevant.
2964 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2968 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2972 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2976 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2977 wlvif->sta.klv_template_id,
2978 ACX_KEEP_ALIVE_TPL_VALID);
2983 * The default fw psm configuration is AUTO, while mac80211 default
2984 * setting is off (ACTIVE), so sync the fw with the correct value.
2986 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2992 wl1271_tx_enabled_rates_get(wl,
2995 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3003 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3006 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3008 /* make sure we are connected (sta) joined */
3010 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3013 /* make sure we are joined (ibss) */
3015 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3019 /* use defaults when not associated */
3022 /* free probe-request template */
3023 dev_kfree_skb(wlvif->probereq);
3024 wlvif->probereq = NULL;
3026 /* disable connection monitor features */
3027 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3031 /* Disable the keep-alive feature */
3032 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3036 /* disable beacon filtering */
3037 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3042 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3043 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3045 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3046 ieee80211_chswitch_done(vif, false);
3047 cancel_delayed_work(&wlvif->channel_switch_work);
3050 /* invalidate keep-alive template */
3051 wl1271_acx_keep_alive_config(wl, wlvif,
3052 wlvif->sta.klv_template_id,
3053 ACX_KEEP_ALIVE_TPL_INVALID);
3058 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3060 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3061 wlvif->rate_set = wlvif->basic_rate_set;
3064 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3067 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3069 if (idle == cur_idle)
3073 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3075 /* The current firmware only supports sched_scan in idle */
3076 if (wl->sched_vif == wlvif)
3077 wl->ops->sched_scan_stop(wl, wlvif);
3079 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3083 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3084 struct ieee80211_conf *conf, u32 changed)
3088 if (conf->power_level != wlvif->power_level) {
3089 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3093 wlvif->power_level = conf->power_level;
3099 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3101 struct wl1271 *wl = hw->priv;
3102 struct wl12xx_vif *wlvif;
3103 struct ieee80211_conf *conf = &hw->conf;
3106 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3108 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3110 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3113 mutex_lock(&wl->mutex);
3115 if (changed & IEEE80211_CONF_CHANGE_POWER)
3116 wl->power_level = conf->power_level;
3118 if (unlikely(wl->state != WLCORE_STATE_ON))
3121 ret = wl1271_ps_elp_wakeup(wl);
3125 /* configure each interface */
3126 wl12xx_for_each_wlvif(wl, wlvif) {
3127 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3133 wl1271_ps_elp_sleep(wl);
3136 mutex_unlock(&wl->mutex);
3141 struct wl1271_filter_params {
3144 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3147 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3148 struct netdev_hw_addr_list *mc_list)
3150 struct wl1271_filter_params *fp;
3151 struct netdev_hw_addr *ha;
3153 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3155 wl1271_error("Out of memory setting filters.");
3159 /* update multicast filtering parameters */
3160 fp->mc_list_length = 0;
3161 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3162 fp->enabled = false;
3165 netdev_hw_addr_list_for_each(ha, mc_list) {
3166 memcpy(fp->mc_list[fp->mc_list_length],
3167 ha->addr, ETH_ALEN);
3168 fp->mc_list_length++;
3172 return (u64)(unsigned long)fp;
3175 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3178 FIF_BCN_PRBRESP_PROMISC | \
3182 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3183 unsigned int changed,
3184 unsigned int *total, u64 multicast)
3186 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3187 struct wl1271 *wl = hw->priv;
3188 struct wl12xx_vif *wlvif;
3192 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3193 " total %x", changed, *total);
3195 mutex_lock(&wl->mutex);
3197 *total &= WL1271_SUPPORTED_FILTERS;
3198 changed &= WL1271_SUPPORTED_FILTERS;
3200 if (unlikely(wl->state != WLCORE_STATE_ON))
3203 ret = wl1271_ps_elp_wakeup(wl);
3207 wl12xx_for_each_wlvif(wl, wlvif) {
3208 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3209 if (*total & FIF_ALLMULTI)
3210 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3214 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3217 fp->mc_list_length);
3224 * the fw doesn't provide an api to configure the filters. instead,
3225 * the filters configuration is based on the active roles / ROC
3230 wl1271_ps_elp_sleep(wl);
3233 mutex_unlock(&wl->mutex);
3237 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3238 u8 id, u8 key_type, u8 key_size,
3239 const u8 *key, u8 hlid, u32 tx_seq_32,
3242 struct wl1271_ap_key *ap_key;
3245 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3247 if (key_size > MAX_KEY_SIZE)
3251 * Find next free entry in ap_keys. Also check we are not replacing
3254 for (i = 0; i < MAX_NUM_KEYS; i++) {
3255 if (wlvif->ap.recorded_keys[i] == NULL)
3258 if (wlvif->ap.recorded_keys[i]->id == id) {
3259 wl1271_warning("trying to record key replacement");
3264 if (i == MAX_NUM_KEYS)
3267 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3272 ap_key->key_type = key_type;
3273 ap_key->key_size = key_size;
3274 memcpy(ap_key->key, key, key_size);
3275 ap_key->hlid = hlid;
3276 ap_key->tx_seq_32 = tx_seq_32;
3277 ap_key->tx_seq_16 = tx_seq_16;
3279 wlvif->ap.recorded_keys[i] = ap_key;
3283 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3287 for (i = 0; i < MAX_NUM_KEYS; i++) {
3288 kfree(wlvif->ap.recorded_keys[i]);
3289 wlvif->ap.recorded_keys[i] = NULL;
3293 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3296 struct wl1271_ap_key *key;
3297 bool wep_key_added = false;
3299 for (i = 0; i < MAX_NUM_KEYS; i++) {
3301 if (wlvif->ap.recorded_keys[i] == NULL)
3304 key = wlvif->ap.recorded_keys[i];
3306 if (hlid == WL12XX_INVALID_LINK_ID)
3307 hlid = wlvif->ap.bcast_hlid;
3309 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3310 key->id, key->key_type,
3311 key->key_size, key->key,
3312 hlid, key->tx_seq_32,
3317 if (key->key_type == KEY_WEP)
3318 wep_key_added = true;
3321 if (wep_key_added) {
3322 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3323 wlvif->ap.bcast_hlid);
3329 wl1271_free_ap_keys(wl, wlvif);
3333 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3334 u16 action, u8 id, u8 key_type,
3335 u8 key_size, const u8 *key, u32 tx_seq_32,
3336 u16 tx_seq_16, struct ieee80211_sta *sta)
3339 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3342 struct wl1271_station *wl_sta;
3346 wl_sta = (struct wl1271_station *)sta->drv_priv;
3347 hlid = wl_sta->hlid;
3349 hlid = wlvif->ap.bcast_hlid;
3352 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3354 * We do not support removing keys after AP shutdown.
3355 * Pretend we do to make mac80211 happy.
3357 if (action != KEY_ADD_OR_REPLACE)
3360 ret = wl1271_record_ap_key(wl, wlvif, id,
3362 key, hlid, tx_seq_32,
3365 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3366 id, key_type, key_size,
3367 key, hlid, tx_seq_32,
3375 static const u8 bcast_addr[ETH_ALEN] = {
3376 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3379 addr = sta ? sta->addr : bcast_addr;
3381 if (is_zero_ether_addr(addr)) {
3382 /* We dont support TX only encryption */
3386 /* The wl1271 does not allow to remove unicast keys - they
3387 will be cleared automatically on next CMD_JOIN. Ignore the
3388 request silently, as we dont want the mac80211 to emit
3389 an error message. */
3390 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3393 /* don't remove key if hlid was already deleted */
3394 if (action == KEY_REMOVE &&
3395 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3398 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3399 id, key_type, key_size,
3400 key, addr, tx_seq_32,
3410 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3411 struct ieee80211_vif *vif,
3412 struct ieee80211_sta *sta,
3413 struct ieee80211_key_conf *key_conf)
3415 struct wl1271 *wl = hw->priv;
3417 bool might_change_spare =
3418 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3419 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3421 if (might_change_spare) {
3423 * stop the queues and flush to ensure the next packets are
3424 * in sync with FW spare block accounting
3426 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3427 wl1271_tx_flush(wl);
3430 mutex_lock(&wl->mutex);
3432 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3434 goto out_wake_queues;
3437 ret = wl1271_ps_elp_wakeup(wl);
3439 goto out_wake_queues;
3441 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3443 wl1271_ps_elp_sleep(wl);
3446 if (might_change_spare)
3447 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3449 mutex_unlock(&wl->mutex);
3454 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3455 struct ieee80211_vif *vif,
3456 struct ieee80211_sta *sta,
3457 struct ieee80211_key_conf *key_conf)
3459 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3466 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3468 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3469 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3470 key_conf->cipher, key_conf->keyidx,
3471 key_conf->keylen, key_conf->flags);
3472 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3474 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3476 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3477 hlid = wl_sta->hlid;
3479 hlid = wlvif->ap.bcast_hlid;
3482 hlid = wlvif->sta.hlid;
3484 if (hlid != WL12XX_INVALID_LINK_ID) {
3485 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3486 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3487 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3490 switch (key_conf->cipher) {
3491 case WLAN_CIPHER_SUITE_WEP40:
3492 case WLAN_CIPHER_SUITE_WEP104:
3495 key_conf->hw_key_idx = key_conf->keyidx;
3497 case WLAN_CIPHER_SUITE_TKIP:
3498 key_type = KEY_TKIP;
3499 key_conf->hw_key_idx = key_conf->keyidx;
3501 case WLAN_CIPHER_SUITE_CCMP:
3503 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3505 case WL1271_CIPHER_SUITE_GEM:
3509 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3516 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3517 key_conf->keyidx, key_type,
3518 key_conf->keylen, key_conf->key,
3519 tx_seq_32, tx_seq_16, sta);
3521 wl1271_error("Could not add or replace key");
3526 * reconfiguring arp response if the unicast (or common)
3527 * encryption key type was changed
3529 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3530 (sta || key_type == KEY_WEP) &&
3531 wlvif->encryption_type != key_type) {
3532 wlvif->encryption_type = key_type;
3533 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3535 wl1271_warning("build arp rsp failed: %d", ret);
3542 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3543 key_conf->keyidx, key_type,
3544 key_conf->keylen, key_conf->key,
3547 wl1271_error("Could not remove key");
3553 wl1271_error("Unsupported key cmd 0x%x", cmd);
3559 EXPORT_SYMBOL_GPL(wlcore_set_key);
3561 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3562 struct ieee80211_vif *vif,
3565 struct wl1271 *wl = hw->priv;
3566 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3569 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3572 /* we don't handle unsetting of default key */
3576 mutex_lock(&wl->mutex);
3578 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3583 ret = wl1271_ps_elp_wakeup(wl);
3587 wlvif->default_key = key_idx;
3589 /* the default WEP key needs to be configured at least once */
3590 if (wlvif->encryption_type == KEY_WEP) {
3591 ret = wl12xx_cmd_set_default_wep_key(wl,
3599 wl1271_ps_elp_sleep(wl);
3602 mutex_unlock(&wl->mutex);
3605 void wlcore_regdomain_config(struct wl1271 *wl)
3609 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3612 mutex_lock(&wl->mutex);
3614 if (unlikely(wl->state != WLCORE_STATE_ON))
3617 ret = wl1271_ps_elp_wakeup(wl);
3621 ret = wlcore_cmd_regdomain_config_locked(wl);
3623 wl12xx_queue_recovery_work(wl);
3627 wl1271_ps_elp_sleep(wl);
3629 mutex_unlock(&wl->mutex);
3632 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3633 struct ieee80211_vif *vif,
3634 struct ieee80211_scan_request *hw_req)
3636 struct cfg80211_scan_request *req = &hw_req->req;
3637 struct wl1271 *wl = hw->priv;
3642 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3645 ssid = req->ssids[0].ssid;
3646 len = req->ssids[0].ssid_len;
3649 mutex_lock(&wl->mutex);
3651 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3653 * We cannot return -EBUSY here because cfg80211 will expect
3654 * a call to ieee80211_scan_completed if we do - in this case
3655 * there won't be any call.
3661 ret = wl1271_ps_elp_wakeup(wl);
3665 /* fail if there is any role in ROC */
3666 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3667 /* don't allow scanning right now */
3672 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3674 wl1271_ps_elp_sleep(wl);
3676 mutex_unlock(&wl->mutex);
3681 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3682 struct ieee80211_vif *vif)
3684 struct wl1271 *wl = hw->priv;
3685 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3688 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3690 mutex_lock(&wl->mutex);
3692 if (unlikely(wl->state != WLCORE_STATE_ON))
3695 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3698 ret = wl1271_ps_elp_wakeup(wl);
3702 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3703 ret = wl->ops->scan_stop(wl, wlvif);
3709 * Rearm the tx watchdog just before idling scan. This
3710 * prevents just-finished scans from triggering the watchdog
3712 wl12xx_rearm_tx_watchdog_locked(wl);
3714 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3715 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3716 wl->scan_wlvif = NULL;
3717 wl->scan.req = NULL;
3718 ieee80211_scan_completed(wl->hw, true);
3721 wl1271_ps_elp_sleep(wl);
3723 mutex_unlock(&wl->mutex);
3725 cancel_delayed_work_sync(&wl->scan_complete_work);
3728 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3729 struct ieee80211_vif *vif,
3730 struct cfg80211_sched_scan_request *req,
3731 struct ieee80211_scan_ies *ies)
3733 struct wl1271 *wl = hw->priv;
3734 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3737 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3739 mutex_lock(&wl->mutex);
3741 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3746 ret = wl1271_ps_elp_wakeup(wl);
3750 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3754 wl->sched_vif = wlvif;
3757 wl1271_ps_elp_sleep(wl);
3759 mutex_unlock(&wl->mutex);
3763 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3764 struct ieee80211_vif *vif)
3766 struct wl1271 *wl = hw->priv;
3767 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3770 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3772 mutex_lock(&wl->mutex);
3774 if (unlikely(wl->state != WLCORE_STATE_ON))
3777 ret = wl1271_ps_elp_wakeup(wl);
3781 wl->ops->sched_scan_stop(wl, wlvif);
3783 wl1271_ps_elp_sleep(wl);
3785 mutex_unlock(&wl->mutex);
3790 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3792 struct wl1271 *wl = hw->priv;
3795 mutex_lock(&wl->mutex);
3797 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3802 ret = wl1271_ps_elp_wakeup(wl);
3806 ret = wl1271_acx_frag_threshold(wl, value);
3808 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3810 wl1271_ps_elp_sleep(wl);
3813 mutex_unlock(&wl->mutex);
3818 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3820 struct wl1271 *wl = hw->priv;
3821 struct wl12xx_vif *wlvif;
3824 mutex_lock(&wl->mutex);
3826 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3831 ret = wl1271_ps_elp_wakeup(wl);
3835 wl12xx_for_each_wlvif(wl, wlvif) {
3836 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3838 wl1271_warning("set rts threshold failed: %d", ret);
3840 wl1271_ps_elp_sleep(wl);
3843 mutex_unlock(&wl->mutex);
3848 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3851 const u8 *next, *end = skb->data + skb->len;
3852 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3853 skb->len - ieoffset);
3858 memmove(ie, next, end - next);
3859 skb_trim(skb, skb->len - len);
3862 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3863 unsigned int oui, u8 oui_type,
3867 const u8 *next, *end = skb->data + skb->len;
3868 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3869 skb->data + ieoffset,
3870 skb->len - ieoffset);
3875 memmove(ie, next, end - next);
3876 skb_trim(skb, skb->len - len);
3879 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3880 struct ieee80211_vif *vif)
3882 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3883 struct sk_buff *skb;
3886 skb = ieee80211_proberesp_get(wl->hw, vif);
3890 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3891 CMD_TEMPL_AP_PROBE_RESPONSE,
3900 wl1271_debug(DEBUG_AP, "probe response updated");
3901 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3907 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3908 struct ieee80211_vif *vif,
3910 size_t probe_rsp_len,
3913 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3914 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3915 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3916 int ssid_ie_offset, ie_offset, templ_len;
3919 /* no need to change probe response if the SSID is set correctly */
3920 if (wlvif->ssid_len > 0)
3921 return wl1271_cmd_template_set(wl, wlvif->role_id,
3922 CMD_TEMPL_AP_PROBE_RESPONSE,
3927 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3928 wl1271_error("probe_rsp template too big");
3932 /* start searching from IE offset */
3933 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3935 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3936 probe_rsp_len - ie_offset);
3938 wl1271_error("No SSID in beacon!");
3942 ssid_ie_offset = ptr - probe_rsp_data;
3943 ptr += (ptr[1] + 2);
3945 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3947 /* insert SSID from bss_conf */
3948 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3949 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3950 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3951 bss_conf->ssid, bss_conf->ssid_len);
3952 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3954 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3955 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3956 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3958 return wl1271_cmd_template_set(wl, wlvif->role_id,
3959 CMD_TEMPL_AP_PROBE_RESPONSE,
3965 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3966 struct ieee80211_vif *vif,
3967 struct ieee80211_bss_conf *bss_conf,
3970 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3973 if (changed & BSS_CHANGED_ERP_SLOT) {
3974 if (bss_conf->use_short_slot)
3975 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3977 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3979 wl1271_warning("Set slot time failed %d", ret);
3984 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3985 if (bss_conf->use_short_preamble)
3986 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3988 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3991 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3992 if (bss_conf->use_cts_prot)
3993 ret = wl1271_acx_cts_protect(wl, wlvif,
3996 ret = wl1271_acx_cts_protect(wl, wlvif,
3997 CTSPROTECT_DISABLE);
3999 wl1271_warning("Set ctsprotect failed %d", ret);
4008 static int wlcore_set_beacon_template(struct wl1271 *wl,
4009 struct ieee80211_vif *vif,
4012 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4013 struct ieee80211_hdr *hdr;
4016 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4017 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4025 wl1271_debug(DEBUG_MASTER, "beacon updated");
4027 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4029 dev_kfree_skb(beacon);
4032 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4033 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4035 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4040 dev_kfree_skb(beacon);
4044 wlvif->wmm_enabled =
4045 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4046 WLAN_OUI_TYPE_MICROSOFT_WMM,
4047 beacon->data + ieoffset,
4048 beacon->len - ieoffset);
4051 * In case we already have a probe-resp beacon set explicitly
4052 * by usermode, don't use the beacon data.
4054 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4057 /* remove TIM ie from probe response */
4058 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4061 * remove p2p ie from probe response.
4062 * the fw reponds to probe requests that don't include
4063 * the p2p ie. probe requests with p2p ie will be passed,
4064 * and will be responded by the supplicant (the spec
4065 * forbids including the p2p ie when responding to probe
4066 * requests that didn't include it).
4068 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4069 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4071 hdr = (struct ieee80211_hdr *) beacon->data;
4072 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4073 IEEE80211_STYPE_PROBE_RESP);
4075 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4080 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4081 CMD_TEMPL_PROBE_RESPONSE,
4086 dev_kfree_skb(beacon);
4094 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4095 struct ieee80211_vif *vif,
4096 struct ieee80211_bss_conf *bss_conf,
4099 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4100 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4103 if (changed & BSS_CHANGED_BEACON_INT) {
4104 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4105 bss_conf->beacon_int);
4107 wlvif->beacon_int = bss_conf->beacon_int;
4110 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4111 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4113 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4116 if (changed & BSS_CHANGED_BEACON) {
4117 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4121 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4123 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4130 wl1271_error("beacon info change failed: %d", ret);
4134 /* AP mode changes */
4135 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4136 struct ieee80211_vif *vif,
4137 struct ieee80211_bss_conf *bss_conf,
4140 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4143 if (changed & BSS_CHANGED_BASIC_RATES) {
4144 u32 rates = bss_conf->basic_rates;
4146 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4148 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4149 wlvif->basic_rate_set);
4151 ret = wl1271_init_ap_rates(wl, wlvif);
4153 wl1271_error("AP rate policy change failed %d", ret);
4157 ret = wl1271_ap_init_templates(wl, vif);
4161 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4165 ret = wlcore_set_beacon_template(wl, vif, true);
4170 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4174 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4175 if (bss_conf->enable_beacon) {
4176 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4177 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4181 ret = wl1271_ap_init_hwenc(wl, wlvif);
4185 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4186 wl1271_debug(DEBUG_AP, "started AP");
4189 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4191 * AP might be in ROC in case we have just
4192 * sent auth reply. handle it.
4194 if (test_bit(wlvif->role_id, wl->roc_map))
4195 wl12xx_croc(wl, wlvif->role_id);
4197 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4201 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4202 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4204 wl1271_debug(DEBUG_AP, "stopped AP");
4209 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4213 /* Handle HT information change */
4214 if ((changed & BSS_CHANGED_HT) &&
4215 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4216 ret = wl1271_acx_set_ht_information(wl, wlvif,
4217 bss_conf->ht_operation_mode);
4219 wl1271_warning("Set ht information failed %d", ret);
4228 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4229 struct ieee80211_bss_conf *bss_conf,
4235 wl1271_debug(DEBUG_MAC80211,
4236 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4237 bss_conf->bssid, bss_conf->aid,
4238 bss_conf->beacon_int,
4239 bss_conf->basic_rates, sta_rate_set);
4241 wlvif->beacon_int = bss_conf->beacon_int;
4242 rates = bss_conf->basic_rates;
4243 wlvif->basic_rate_set =
4244 wl1271_tx_enabled_rates_get(wl, rates,
4247 wl1271_tx_min_rate_get(wl,
4248 wlvif->basic_rate_set);
4252 wl1271_tx_enabled_rates_get(wl,
4256 /* we only support sched_scan while not connected */
4257 if (wl->sched_vif == wlvif)
4258 wl->ops->sched_scan_stop(wl, wlvif);
4260 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4264 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4268 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4272 wlcore_set_ssid(wl, wlvif);
4274 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4279 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4283 /* revert back to minimum rates for the current band */
4284 wl1271_set_band_rate(wl, wlvif);
4285 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4287 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4291 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4292 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4293 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4298 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4301 /* STA/IBSS mode changes */
4302 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4303 struct ieee80211_vif *vif,
4304 struct ieee80211_bss_conf *bss_conf,
4307 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4308 bool do_join = false;
4309 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4310 bool ibss_joined = false;
4311 u32 sta_rate_set = 0;
4313 struct ieee80211_sta *sta;
4314 bool sta_exists = false;
4315 struct ieee80211_sta_ht_cap sta_ht_cap;
4318 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4324 if (changed & BSS_CHANGED_IBSS) {
4325 if (bss_conf->ibss_joined) {
4326 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4329 wlcore_unset_assoc(wl, wlvif);
4330 wl12xx_cmd_role_stop_sta(wl, wlvif);
4334 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4337 /* Need to update the SSID (for filtering etc) */
4338 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4341 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4342 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4343 bss_conf->enable_beacon ? "enabled" : "disabled");
4348 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4349 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4351 if (changed & BSS_CHANGED_CQM) {
4352 bool enable = false;
4353 if (bss_conf->cqm_rssi_thold)
4355 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4356 bss_conf->cqm_rssi_thold,
4357 bss_conf->cqm_rssi_hyst);
4360 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4363 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4364 BSS_CHANGED_ASSOC)) {
4366 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4368 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4370 /* save the supp_rates of the ap */
4371 sta_rate_set = sta->supp_rates[wlvif->band];
4372 if (sta->ht_cap.ht_supported)
4374 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4375 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4376 sta_ht_cap = sta->ht_cap;
4383 if (changed & BSS_CHANGED_BSSID) {
4384 if (!is_zero_ether_addr(bss_conf->bssid)) {
4385 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4390 /* Need to update the BSSID (for filtering etc) */
4393 ret = wlcore_clear_bssid(wl, wlvif);
4399 if (changed & BSS_CHANGED_IBSS) {
4400 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4401 bss_conf->ibss_joined);
4403 if (bss_conf->ibss_joined) {
4404 u32 rates = bss_conf->basic_rates;
4405 wlvif->basic_rate_set =
4406 wl1271_tx_enabled_rates_get(wl, rates,
4409 wl1271_tx_min_rate_get(wl,
4410 wlvif->basic_rate_set);
4412 /* by default, use 11b + OFDM rates */
4413 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4414 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4420 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4421 /* enable beacon filtering */
4422 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4427 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4432 ret = wlcore_join(wl, wlvif);
4434 wl1271_warning("cmd join failed %d", ret);
4439 if (changed & BSS_CHANGED_ASSOC) {
4440 if (bss_conf->assoc) {
4441 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4446 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4447 wl12xx_set_authorized(wl, wlvif);
4449 wlcore_unset_assoc(wl, wlvif);
4453 if (changed & BSS_CHANGED_PS) {
4454 if ((bss_conf->ps) &&
4455 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4456 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4460 if (wl->conf.conn.forced_ps) {
4461 ps_mode = STATION_POWER_SAVE_MODE;
4462 ps_mode_str = "forced";
4464 ps_mode = STATION_AUTO_PS_MODE;
4465 ps_mode_str = "auto";
4468 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4470 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4472 wl1271_warning("enter %s ps failed %d",
4474 } else if (!bss_conf->ps &&
4475 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4476 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4478 ret = wl1271_ps_set_mode(wl, wlvif,
4479 STATION_ACTIVE_MODE);
4481 wl1271_warning("exit auto ps failed %d", ret);
4485 /* Handle new association with HT. Do this after join. */
4488 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4490 ret = wlcore_hw_set_peer_cap(wl,
4496 wl1271_warning("Set ht cap failed %d", ret);
4502 ret = wl1271_acx_set_ht_information(wl, wlvif,
4503 bss_conf->ht_operation_mode);
4505 wl1271_warning("Set ht information failed %d",
4512 /* Handle arp filtering. Done after join. */
4513 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4514 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4515 __be32 addr = bss_conf->arp_addr_list[0];
4516 wlvif->sta.qos = bss_conf->qos;
4517 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4519 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4520 wlvif->ip_addr = addr;
4522 * The template should have been configured only upon
4523 * association. however, it seems that the correct ip
4524 * isn't being set (when sending), so we have to
4525 * reconfigure the template upon every ip change.
4527 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4529 wl1271_warning("build arp rsp failed: %d", ret);
4533 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4534 (ACX_ARP_FILTER_ARP_FILTERING |
4535 ACX_ARP_FILTER_AUTO_ARP),
4539 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4550 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4551 struct ieee80211_vif *vif,
4552 struct ieee80211_bss_conf *bss_conf,
4555 struct wl1271 *wl = hw->priv;
4556 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4557 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4560 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4561 wlvif->role_id, (int)changed);
4564 * make sure to cancel pending disconnections if our association
4567 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4568 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4570 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4571 !bss_conf->enable_beacon)
4572 wl1271_tx_flush(wl);
4574 mutex_lock(&wl->mutex);
4576 if (unlikely(wl->state != WLCORE_STATE_ON))
4579 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4582 ret = wl1271_ps_elp_wakeup(wl);
4586 if ((changed & BSS_CHANGED_TXPOWER) &&
4587 bss_conf->txpower != wlvif->power_level) {
4589 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4593 wlvif->power_level = bss_conf->txpower;
4597 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4599 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4601 wl1271_ps_elp_sleep(wl);
4604 mutex_unlock(&wl->mutex);
4607 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4608 struct ieee80211_chanctx_conf *ctx)
4610 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4611 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4612 cfg80211_get_chandef_type(&ctx->def));
4616 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4617 struct ieee80211_chanctx_conf *ctx)
4619 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4620 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4621 cfg80211_get_chandef_type(&ctx->def));
4624 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4625 struct ieee80211_chanctx_conf *ctx,
4628 struct wl1271 *wl = hw->priv;
4629 struct wl12xx_vif *wlvif;
4631 int channel = ieee80211_frequency_to_channel(
4632 ctx->def.chan->center_freq);
4634 wl1271_debug(DEBUG_MAC80211,
4635 "mac80211 change chanctx %d (type %d) changed 0x%x",
4636 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4638 mutex_lock(&wl->mutex);
4640 ret = wl1271_ps_elp_wakeup(wl);
4644 wl12xx_for_each_wlvif(wl, wlvif) {
4645 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4648 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4654 /* start radar if needed */
4655 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4656 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4657 ctx->radar_enabled && !wlvif->radar_enabled &&
4658 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4659 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4660 wlcore_hw_set_cac(wl, wlvif, true);
4661 wlvif->radar_enabled = true;
4665 wl1271_ps_elp_sleep(wl);
4667 mutex_unlock(&wl->mutex);
4670 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4671 struct ieee80211_vif *vif,
4672 struct ieee80211_chanctx_conf *ctx)
4674 struct wl1271 *wl = hw->priv;
4675 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4676 int channel = ieee80211_frequency_to_channel(
4677 ctx->def.chan->center_freq);
4680 wl1271_debug(DEBUG_MAC80211,
4681 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4682 wlvif->role_id, channel,
4683 cfg80211_get_chandef_type(&ctx->def),
4684 ctx->radar_enabled, ctx->def.chan->dfs_state);
4686 mutex_lock(&wl->mutex);
4688 if (unlikely(wl->state != WLCORE_STATE_ON))
4691 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4694 ret = wl1271_ps_elp_wakeup(wl);
4698 wlvif->band = ctx->def.chan->band;
4699 wlvif->channel = channel;
4700 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4702 /* update default rates according to the band */
4703 wl1271_set_band_rate(wl, wlvif);
4705 if (ctx->radar_enabled &&
4706 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4707 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4708 wlcore_hw_set_cac(wl, wlvif, true);
4709 wlvif->radar_enabled = true;
4712 wl1271_ps_elp_sleep(wl);
4714 mutex_unlock(&wl->mutex);
4719 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4720 struct ieee80211_vif *vif,
4721 struct ieee80211_chanctx_conf *ctx)
4723 struct wl1271 *wl = hw->priv;
4724 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4727 wl1271_debug(DEBUG_MAC80211,
4728 "mac80211 unassign chanctx (role %d) %d (type %d)",
4730 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4731 cfg80211_get_chandef_type(&ctx->def));
4733 wl1271_tx_flush(wl);
4735 mutex_lock(&wl->mutex);
4737 if (unlikely(wl->state != WLCORE_STATE_ON))
4740 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4743 ret = wl1271_ps_elp_wakeup(wl);
4747 if (wlvif->radar_enabled) {
4748 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4749 wlcore_hw_set_cac(wl, wlvif, false);
4750 wlvif->radar_enabled = false;
4753 wl1271_ps_elp_sleep(wl);
4755 mutex_unlock(&wl->mutex);
4758 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4759 struct wl12xx_vif *wlvif,
4760 struct ieee80211_chanctx_conf *new_ctx)
4762 int channel = ieee80211_frequency_to_channel(
4763 new_ctx->def.chan->center_freq);
4765 wl1271_debug(DEBUG_MAC80211,
4766 "switch vif (role %d) %d -> %d chan_type: %d",
4767 wlvif->role_id, wlvif->channel, channel,
4768 cfg80211_get_chandef_type(&new_ctx->def));
4770 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4773 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4775 if (wlvif->radar_enabled) {
4776 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4777 wlcore_hw_set_cac(wl, wlvif, false);
4778 wlvif->radar_enabled = false;
4781 wlvif->band = new_ctx->def.chan->band;
4782 wlvif->channel = channel;
4783 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4785 /* start radar if needed */
4786 if (new_ctx->radar_enabled) {
4787 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4788 wlcore_hw_set_cac(wl, wlvif, true);
4789 wlvif->radar_enabled = true;
4796 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4797 struct ieee80211_vif_chanctx_switch *vifs,
4799 enum ieee80211_chanctx_switch_mode mode)
4801 struct wl1271 *wl = hw->priv;
4804 wl1271_debug(DEBUG_MAC80211,
4805 "mac80211 switch chanctx n_vifs %d mode %d",
4808 mutex_lock(&wl->mutex);
4810 ret = wl1271_ps_elp_wakeup(wl);
4814 for (i = 0; i < n_vifs; i++) {
4815 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4817 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4822 wl1271_ps_elp_sleep(wl);
4824 mutex_unlock(&wl->mutex);
4829 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4830 struct ieee80211_vif *vif, u16 queue,
4831 const struct ieee80211_tx_queue_params *params)
4833 struct wl1271 *wl = hw->priv;
4834 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4838 mutex_lock(&wl->mutex);
4840 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4843 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4845 ps_scheme = CONF_PS_SCHEME_LEGACY;
4847 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4850 ret = wl1271_ps_elp_wakeup(wl);
4855 * the txop is confed in units of 32us by the mac80211,
4858 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4859 params->cw_min, params->cw_max,
4860 params->aifs, params->txop << 5);
4864 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4865 CONF_CHANNEL_TYPE_EDCF,
4866 wl1271_tx_get_queue(queue),
4867 ps_scheme, CONF_ACK_POLICY_LEGACY,
4871 wl1271_ps_elp_sleep(wl);
4874 mutex_unlock(&wl->mutex);
4879 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4880 struct ieee80211_vif *vif)
4883 struct wl1271 *wl = hw->priv;
4884 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4885 u64 mactime = ULLONG_MAX;
4888 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4890 mutex_lock(&wl->mutex);
4892 if (unlikely(wl->state != WLCORE_STATE_ON))
4895 ret = wl1271_ps_elp_wakeup(wl);
4899 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4904 wl1271_ps_elp_sleep(wl);
4907 mutex_unlock(&wl->mutex);
4911 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4912 struct survey_info *survey)
4914 struct ieee80211_conf *conf = &hw->conf;
4919 survey->channel = conf->chandef.chan;
4924 static int wl1271_allocate_sta(struct wl1271 *wl,
4925 struct wl12xx_vif *wlvif,
4926 struct ieee80211_sta *sta)
4928 struct wl1271_station *wl_sta;
4932 if (wl->active_sta_count >= wl->max_ap_stations) {
4933 wl1271_warning("could not allocate HLID - too much stations");
4937 wl_sta = (struct wl1271_station *)sta->drv_priv;
4938 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4940 wl1271_warning("could not allocate HLID - too many links");
4944 /* use the previous security seq, if this is a recovery/resume */
4945 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4947 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4948 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4949 wl->active_sta_count++;
4953 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4955 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4958 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4959 __clear_bit(hlid, &wl->ap_ps_map);
4960 __clear_bit(hlid, &wl->ap_fw_ps_map);
4963 * save the last used PN in the private part of iee80211_sta,
4964 * in case of recovery/suspend
4966 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4968 wl12xx_free_link(wl, wlvif, &hlid);
4969 wl->active_sta_count--;
4972 * rearm the tx watchdog when the last STA is freed - give the FW a
4973 * chance to return STA-buffered packets before complaining.
4975 if (wl->active_sta_count == 0)
4976 wl12xx_rearm_tx_watchdog_locked(wl);
4979 static int wl12xx_sta_add(struct wl1271 *wl,
4980 struct wl12xx_vif *wlvif,
4981 struct ieee80211_sta *sta)
4983 struct wl1271_station *wl_sta;
4987 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4989 ret = wl1271_allocate_sta(wl, wlvif, sta);
4993 wl_sta = (struct wl1271_station *)sta->drv_priv;
4994 hlid = wl_sta->hlid;
4996 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4998 wl1271_free_sta(wl, wlvif, hlid);
5003 static int wl12xx_sta_remove(struct wl1271 *wl,
5004 struct wl12xx_vif *wlvif,
5005 struct ieee80211_sta *sta)
5007 struct wl1271_station *wl_sta;
5010 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5012 wl_sta = (struct wl1271_station *)sta->drv_priv;
5014 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5017 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5021 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5025 static void wlcore_roc_if_possible(struct wl1271 *wl,
5026 struct wl12xx_vif *wlvif)
5028 if (find_first_bit(wl->roc_map,
5029 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5032 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5035 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5039 * when wl_sta is NULL, we treat this call as if coming from a
5040 * pending auth reply.
5041 * wl->mutex must be taken and the FW must be awake when the call
5044 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5045 struct wl1271_station *wl_sta, bool in_conn)
5048 if (WARN_ON(wl_sta && wl_sta->in_connection))
5051 if (!wlvif->ap_pending_auth_reply &&
5052 !wlvif->inconn_count)
5053 wlcore_roc_if_possible(wl, wlvif);
5056 wl_sta->in_connection = true;
5057 wlvif->inconn_count++;
5059 wlvif->ap_pending_auth_reply = true;
5062 if (wl_sta && !wl_sta->in_connection)
5065 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5068 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5072 wl_sta->in_connection = false;
5073 wlvif->inconn_count--;
5075 wlvif->ap_pending_auth_reply = false;
5078 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5079 test_bit(wlvif->role_id, wl->roc_map))
5080 wl12xx_croc(wl, wlvif->role_id);
5084 static int wl12xx_update_sta_state(struct wl1271 *wl,
5085 struct wl12xx_vif *wlvif,
5086 struct ieee80211_sta *sta,
5087 enum ieee80211_sta_state old_state,
5088 enum ieee80211_sta_state new_state)
5090 struct wl1271_station *wl_sta;
5091 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5092 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5095 wl_sta = (struct wl1271_station *)sta->drv_priv;
5097 /* Add station (AP mode) */
5099 old_state == IEEE80211_STA_NOTEXIST &&
5100 new_state == IEEE80211_STA_NONE) {
5101 ret = wl12xx_sta_add(wl, wlvif, sta);
5105 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5108 /* Remove station (AP mode) */
5110 old_state == IEEE80211_STA_NONE &&
5111 new_state == IEEE80211_STA_NOTEXIST) {
5113 wl12xx_sta_remove(wl, wlvif, sta);
5115 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5118 /* Authorize station (AP mode) */
5120 new_state == IEEE80211_STA_AUTHORIZED) {
5121 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5125 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5130 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5133 /* Authorize station */
5135 new_state == IEEE80211_STA_AUTHORIZED) {
5136 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5137 ret = wl12xx_set_authorized(wl, wlvif);
5143 old_state == IEEE80211_STA_AUTHORIZED &&
5144 new_state == IEEE80211_STA_ASSOC) {
5145 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5146 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5149 /* save seq number on disassoc (suspend) */
5151 old_state == IEEE80211_STA_ASSOC &&
5152 new_state == IEEE80211_STA_AUTH) {
5153 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5154 wlvif->total_freed_pkts = 0;
5157 /* restore seq number on assoc (resume) */
5159 old_state == IEEE80211_STA_AUTH &&
5160 new_state == IEEE80211_STA_ASSOC) {
5161 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5164 /* clear ROCs on failure or authorization */
5166 (new_state == IEEE80211_STA_AUTHORIZED ||
5167 new_state == IEEE80211_STA_NOTEXIST)) {
5168 if (test_bit(wlvif->role_id, wl->roc_map))
5169 wl12xx_croc(wl, wlvif->role_id);
5173 old_state == IEEE80211_STA_NOTEXIST &&
5174 new_state == IEEE80211_STA_NONE) {
5175 if (find_first_bit(wl->roc_map,
5176 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5177 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5178 wl12xx_roc(wl, wlvif, wlvif->role_id,
5179 wlvif->band, wlvif->channel);
5185 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5186 struct ieee80211_vif *vif,
5187 struct ieee80211_sta *sta,
5188 enum ieee80211_sta_state old_state,
5189 enum ieee80211_sta_state new_state)
5191 struct wl1271 *wl = hw->priv;
5192 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5195 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5196 sta->aid, old_state, new_state);
5198 mutex_lock(&wl->mutex);
5200 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5205 ret = wl1271_ps_elp_wakeup(wl);
5209 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5211 wl1271_ps_elp_sleep(wl);
5213 mutex_unlock(&wl->mutex);
5214 if (new_state < old_state)
5219 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5220 struct ieee80211_vif *vif,
5221 enum ieee80211_ampdu_mlme_action action,
5222 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5225 struct wl1271 *wl = hw->priv;
5226 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5228 u8 hlid, *ba_bitmap;
5230 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5233 /* sanity check - the fields in FW are only 8bits wide */
5234 if (WARN_ON(tid > 0xFF))
5237 mutex_lock(&wl->mutex);
5239 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5244 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5245 hlid = wlvif->sta.hlid;
5246 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5247 struct wl1271_station *wl_sta;
5249 wl_sta = (struct wl1271_station *)sta->drv_priv;
5250 hlid = wl_sta->hlid;
5256 ba_bitmap = &wl->links[hlid].ba_bitmap;
5258 ret = wl1271_ps_elp_wakeup(wl);
5262 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5266 case IEEE80211_AMPDU_RX_START:
5267 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5272 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5274 wl1271_error("exceeded max RX BA sessions");
5278 if (*ba_bitmap & BIT(tid)) {
5280 wl1271_error("cannot enable RX BA session on active "
5285 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5288 *ba_bitmap |= BIT(tid);
5289 wl->ba_rx_session_count++;
5293 case IEEE80211_AMPDU_RX_STOP:
5294 if (!(*ba_bitmap & BIT(tid))) {
5296 * this happens on reconfig - so only output a debug
5297 * message for now, and don't fail the function.
5299 wl1271_debug(DEBUG_MAC80211,
5300 "no active RX BA session on tid: %d",
5306 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5309 *ba_bitmap &= ~BIT(tid);
5310 wl->ba_rx_session_count--;
5315 * The BA initiator session management in FW independently.
5316 * Falling break here on purpose for all TX APDU commands.
5318 case IEEE80211_AMPDU_TX_START:
5319 case IEEE80211_AMPDU_TX_STOP_CONT:
5320 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5321 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5322 case IEEE80211_AMPDU_TX_OPERATIONAL:
5327 wl1271_error("Incorrect ampdu action id=%x\n", action);
5331 wl1271_ps_elp_sleep(wl);
5334 mutex_unlock(&wl->mutex);
5339 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5340 struct ieee80211_vif *vif,
5341 const struct cfg80211_bitrate_mask *mask)
5343 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5344 struct wl1271 *wl = hw->priv;
5347 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5348 mask->control[NL80211_BAND_2GHZ].legacy,
5349 mask->control[NL80211_BAND_5GHZ].legacy);
5351 mutex_lock(&wl->mutex);
5353 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5354 wlvif->bitrate_masks[i] =
5355 wl1271_tx_enabled_rates_get(wl,
5356 mask->control[i].legacy,
5359 if (unlikely(wl->state != WLCORE_STATE_ON))
5362 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5363 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5365 ret = wl1271_ps_elp_wakeup(wl);
5369 wl1271_set_band_rate(wl, wlvif);
5371 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5372 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5374 wl1271_ps_elp_sleep(wl);
5377 mutex_unlock(&wl->mutex);
5382 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5383 struct ieee80211_vif *vif,
5384 struct ieee80211_channel_switch *ch_switch)
5386 struct wl1271 *wl = hw->priv;
5387 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5390 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5392 wl1271_tx_flush(wl);
5394 mutex_lock(&wl->mutex);
5396 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5397 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5398 ieee80211_chswitch_done(vif, false);
5400 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5404 ret = wl1271_ps_elp_wakeup(wl);
5408 /* TODO: change mac80211 to pass vif as param */
5410 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5411 unsigned long delay_usec;
5413 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5417 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5419 /* indicate failure 5 seconds after channel switch time */
5420 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5422 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5423 usecs_to_jiffies(delay_usec) +
5424 msecs_to_jiffies(5000));
5428 wl1271_ps_elp_sleep(wl);
5431 mutex_unlock(&wl->mutex);
5434 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5435 struct wl12xx_vif *wlvif,
5438 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5439 struct sk_buff *beacon =
5440 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5445 return cfg80211_find_ie(eid,
5446 beacon->data + ieoffset,
5447 beacon->len - ieoffset);
5450 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5454 const struct ieee80211_channel_sw_ie *ie_csa;
5456 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5460 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5461 *csa_count = ie_csa->count;
5466 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5467 struct ieee80211_vif *vif,
5468 struct cfg80211_chan_def *chandef)
5470 struct wl1271 *wl = hw->priv;
5471 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5472 struct ieee80211_channel_switch ch_switch = {
5474 .chandef = *chandef,
5478 wl1271_debug(DEBUG_MAC80211,
5479 "mac80211 channel switch beacon (role %d)",
5482 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5484 wl1271_error("error getting beacon (for CSA counter)");
5488 mutex_lock(&wl->mutex);
5490 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5495 ret = wl1271_ps_elp_wakeup(wl);
5499 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5503 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5506 wl1271_ps_elp_sleep(wl);
5508 mutex_unlock(&wl->mutex);
5511 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5512 u32 queues, bool drop)
5514 struct wl1271 *wl = hw->priv;
5516 wl1271_tx_flush(wl);
5519 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5520 struct ieee80211_vif *vif,
5521 struct ieee80211_channel *chan,
5523 enum ieee80211_roc_type type)
5525 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5526 struct wl1271 *wl = hw->priv;
5527 int channel, ret = 0;
5529 channel = ieee80211_frequency_to_channel(chan->center_freq);
5531 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5532 channel, wlvif->role_id);
5534 mutex_lock(&wl->mutex);
5536 if (unlikely(wl->state != WLCORE_STATE_ON))
5539 /* return EBUSY if we can't ROC right now */
5540 if (WARN_ON(wl->roc_vif ||
5541 find_first_bit(wl->roc_map,
5542 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5547 ret = wl1271_ps_elp_wakeup(wl);
5551 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5556 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5557 msecs_to_jiffies(duration));
5559 wl1271_ps_elp_sleep(wl);
5561 mutex_unlock(&wl->mutex);
5565 static int __wlcore_roc_completed(struct wl1271 *wl)
5567 struct wl12xx_vif *wlvif;
5570 /* already completed */
5571 if (unlikely(!wl->roc_vif))
5574 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5576 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5579 ret = wl12xx_stop_dev(wl, wlvif);
5588 static int wlcore_roc_completed(struct wl1271 *wl)
5592 wl1271_debug(DEBUG_MAC80211, "roc complete");
5594 mutex_lock(&wl->mutex);
5596 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5601 ret = wl1271_ps_elp_wakeup(wl);
5605 ret = __wlcore_roc_completed(wl);
5607 wl1271_ps_elp_sleep(wl);
5609 mutex_unlock(&wl->mutex);
5614 static void wlcore_roc_complete_work(struct work_struct *work)
5616 struct delayed_work *dwork;
5620 dwork = container_of(work, struct delayed_work, work);
5621 wl = container_of(dwork, struct wl1271, roc_complete_work);
5623 ret = wlcore_roc_completed(wl);
5625 ieee80211_remain_on_channel_expired(wl->hw);
5628 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5630 struct wl1271 *wl = hw->priv;
5632 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5635 wl1271_tx_flush(wl);
5638 * we can't just flush_work here, because it might deadlock
5639 * (as we might get called from the same workqueue)
5641 cancel_delayed_work_sync(&wl->roc_complete_work);
5642 wlcore_roc_completed(wl);
5647 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5648 struct ieee80211_vif *vif,
5649 struct ieee80211_sta *sta,
5652 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5654 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5656 if (!(changed & IEEE80211_RC_BW_CHANGED))
5659 /* this callback is atomic, so schedule a new work */
5660 wlvif->rc_update_bw = sta->bandwidth;
5661 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5664 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5665 struct ieee80211_vif *vif,
5666 struct ieee80211_sta *sta,
5669 struct wl1271 *wl = hw->priv;
5670 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5673 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5675 mutex_lock(&wl->mutex);
5677 if (unlikely(wl->state != WLCORE_STATE_ON))
5680 ret = wl1271_ps_elp_wakeup(wl);
5684 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5689 wl1271_ps_elp_sleep(wl);
5692 mutex_unlock(&wl->mutex);
5697 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5699 struct wl1271 *wl = hw->priv;
5702 mutex_lock(&wl->mutex);
5704 if (unlikely(wl->state != WLCORE_STATE_ON))
5707 /* packets are considered pending if in the TX queue or the FW */
5708 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5710 mutex_unlock(&wl->mutex);
5715 /* can't be const, mac80211 writes to this */
5716 static struct ieee80211_rate wl1271_rates[] = {
5718 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5719 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5721 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5722 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5723 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5725 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5726 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5727 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5729 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5730 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5731 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5733 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5734 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5736 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5737 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5739 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5740 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5742 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5743 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5745 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5746 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5748 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5749 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5751 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5752 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5754 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5755 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5758 /* can't be const, mac80211 writes to this */
5759 static struct ieee80211_channel wl1271_channels[] = {
5760 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5761 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5762 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5763 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5764 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5765 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5766 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5767 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5768 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5769 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5770 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5771 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5772 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5773 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5776 /* can't be const, mac80211 writes to this */
5777 static struct ieee80211_supported_band wl1271_band_2ghz = {
5778 .channels = wl1271_channels,
5779 .n_channels = ARRAY_SIZE(wl1271_channels),
5780 .bitrates = wl1271_rates,
5781 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5784 /* 5 GHz data rates for WL1273 */
5785 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5787 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5788 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5790 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5791 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5793 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5794 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5796 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5797 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5799 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5800 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5802 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5803 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5805 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5806 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5808 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5809 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5812 /* 5 GHz band channels for WL1273 */
5813 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5814 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5815 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5816 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5817 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5818 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5819 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5820 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5821 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5822 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5823 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5824 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5825 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5826 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5827 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5828 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5829 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5830 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5831 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5832 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5833 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5834 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5835 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5836 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5837 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5838 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5839 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5840 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5841 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5842 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5843 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5844 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5847 static struct ieee80211_supported_band wl1271_band_5ghz = {
5848 .channels = wl1271_channels_5ghz,
5849 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5850 .bitrates = wl1271_rates_5ghz,
5851 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5854 static const struct ieee80211_ops wl1271_ops = {
5855 .start = wl1271_op_start,
5856 .stop = wlcore_op_stop,
5857 .add_interface = wl1271_op_add_interface,
5858 .remove_interface = wl1271_op_remove_interface,
5859 .change_interface = wl12xx_op_change_interface,
5861 .suspend = wl1271_op_suspend,
5862 .resume = wl1271_op_resume,
5864 .config = wl1271_op_config,
5865 .prepare_multicast = wl1271_op_prepare_multicast,
5866 .configure_filter = wl1271_op_configure_filter,
5868 .set_key = wlcore_op_set_key,
5869 .hw_scan = wl1271_op_hw_scan,
5870 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5871 .sched_scan_start = wl1271_op_sched_scan_start,
5872 .sched_scan_stop = wl1271_op_sched_scan_stop,
5873 .bss_info_changed = wl1271_op_bss_info_changed,
5874 .set_frag_threshold = wl1271_op_set_frag_threshold,
5875 .set_rts_threshold = wl1271_op_set_rts_threshold,
5876 .conf_tx = wl1271_op_conf_tx,
5877 .get_tsf = wl1271_op_get_tsf,
5878 .get_survey = wl1271_op_get_survey,
5879 .sta_state = wl12xx_op_sta_state,
5880 .ampdu_action = wl1271_op_ampdu_action,
5881 .tx_frames_pending = wl1271_tx_frames_pending,
5882 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5883 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5884 .channel_switch = wl12xx_op_channel_switch,
5885 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5886 .flush = wlcore_op_flush,
5887 .remain_on_channel = wlcore_op_remain_on_channel,
5888 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5889 .add_chanctx = wlcore_op_add_chanctx,
5890 .remove_chanctx = wlcore_op_remove_chanctx,
5891 .change_chanctx = wlcore_op_change_chanctx,
5892 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5893 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5894 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5895 .sta_rc_update = wlcore_op_sta_rc_update,
5896 .get_rssi = wlcore_op_get_rssi,
5897 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5901 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5907 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5908 wl1271_error("Illegal RX rate from HW: %d", rate);
5912 idx = wl->band_rate_to_idx[band][rate];
5913 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5914 wl1271_error("Unsupported RX rate from HW: %d", rate);
5921 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5925 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5928 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5929 wl1271_warning("NIC part of the MAC address wraps around!");
5931 for (i = 0; i < wl->num_mac_addr; i++) {
5932 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5933 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5934 wl->addresses[i].addr[2] = (u8) oui;
5935 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5936 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5937 wl->addresses[i].addr[5] = (u8) nic;
5941 /* we may be one address short at the most */
5942 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5945 * turn on the LAA bit in the first address and use it as
5948 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5949 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5950 memcpy(&wl->addresses[idx], &wl->addresses[0],
5951 sizeof(wl->addresses[0]));
5953 wl->addresses[idx].addr[0] |= BIT(1);
5956 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5957 wl->hw->wiphy->addresses = wl->addresses;
5960 static int wl12xx_get_hw_info(struct wl1271 *wl)
5964 ret = wl12xx_set_power_on(wl);
5968 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5972 wl->fuse_oui_addr = 0;
5973 wl->fuse_nic_addr = 0;
5975 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5979 if (wl->ops->get_mac)
5980 ret = wl->ops->get_mac(wl);
5983 wl1271_power_off(wl);
5987 static int wl1271_register_hw(struct wl1271 *wl)
5990 u32 oui_addr = 0, nic_addr = 0;
5992 if (wl->mac80211_registered)
5995 if (wl->nvs_len >= 12) {
5996 /* NOTE: The wl->nvs->nvs element must be first, in
5997 * order to simplify the casting, we assume it is at
5998 * the beginning of the wl->nvs structure.
6000 u8 *nvs_ptr = (u8 *)wl->nvs;
6003 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6005 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6008 /* if the MAC address is zeroed in the NVS derive from fuse */
6009 if (oui_addr == 0 && nic_addr == 0) {
6010 oui_addr = wl->fuse_oui_addr;
6011 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6012 nic_addr = wl->fuse_nic_addr + 1;
6015 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6017 ret = ieee80211_register_hw(wl->hw);
6019 wl1271_error("unable to register mac80211 hw: %d", ret);
6023 wl->mac80211_registered = true;
6025 wl1271_debugfs_init(wl);
6027 wl1271_notice("loaded");
6033 static void wl1271_unregister_hw(struct wl1271 *wl)
6036 wl1271_plt_stop(wl);
6038 ieee80211_unregister_hw(wl->hw);
6039 wl->mac80211_registered = false;
6043 static int wl1271_init_ieee80211(struct wl1271 *wl)
6046 static const u32 cipher_suites[] = {
6047 WLAN_CIPHER_SUITE_WEP40,
6048 WLAN_CIPHER_SUITE_WEP104,
6049 WLAN_CIPHER_SUITE_TKIP,
6050 WLAN_CIPHER_SUITE_CCMP,
6051 WL1271_CIPHER_SUITE_GEM,
6054 /* The tx descriptor buffer */
6055 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6057 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6058 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6061 /* FIXME: find a proper value */
6062 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6064 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
6065 IEEE80211_HW_SUPPORTS_PS |
6066 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
6067 IEEE80211_HW_SUPPORTS_UAPSD |
6068 IEEE80211_HW_HAS_RATE_CONTROL |
6069 IEEE80211_HW_CONNECTION_MONITOR |
6070 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
6071 IEEE80211_HW_SPECTRUM_MGMT |
6072 IEEE80211_HW_AP_LINK_PS |
6073 IEEE80211_HW_AMPDU_AGGREGATION |
6074 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
6075 IEEE80211_HW_QUEUE_CONTROL |
6076 IEEE80211_HW_CHANCTX_STA_CSA;
6078 wl->hw->wiphy->cipher_suites = cipher_suites;
6079 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6081 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6082 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
6083 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
6084 wl->hw->wiphy->max_scan_ssids = 1;
6085 wl->hw->wiphy->max_sched_scan_ssids = 16;
6086 wl->hw->wiphy->max_match_sets = 16;
6088 * Maximum length of elements in scanning probe request templates
6089 * should be the maximum length possible for a template, without
6090 * the IEEE80211 header of the template
6092 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6093 sizeof(struct ieee80211_header);
6095 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6096 sizeof(struct ieee80211_header);
6098 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6100 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6101 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6102 WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6103 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6105 /* make sure all our channels fit in the scanned_ch bitmask */
6106 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6107 ARRAY_SIZE(wl1271_channels_5ghz) >
6108 WL1271_MAX_CHANNELS);
6110 * clear channel flags from the previous usage
6111 * and restore max_power & max_antenna_gain values.
6113 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6114 wl1271_band_2ghz.channels[i].flags = 0;
6115 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6116 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6119 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6120 wl1271_band_5ghz.channels[i].flags = 0;
6121 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6122 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6126 * We keep local copies of the band structs because we need to
6127 * modify them on a per-device basis.
6129 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
6130 sizeof(wl1271_band_2ghz));
6131 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
6132 &wl->ht_cap[IEEE80211_BAND_2GHZ],
6133 sizeof(*wl->ht_cap));
6134 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
6135 sizeof(wl1271_band_5ghz));
6136 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
6137 &wl->ht_cap[IEEE80211_BAND_5GHZ],
6138 sizeof(*wl->ht_cap));
6140 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
6141 &wl->bands[IEEE80211_BAND_2GHZ];
6142 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
6143 &wl->bands[IEEE80211_BAND_5GHZ];
6146 * allow 4 queues per mac address we support +
6147 * 1 cab queue per mac + one global offchannel Tx queue
6149 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6151 /* the last queue is the offchannel queue */
6152 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6153 wl->hw->max_rates = 1;
6155 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6157 /* the FW answers probe-requests in AP-mode */
6158 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6159 wl->hw->wiphy->probe_resp_offload =
6160 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6161 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6162 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6164 /* allowed interface combinations */
6165 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6166 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6168 /* register vendor commands */
6169 wlcore_set_vendor_commands(wl->hw->wiphy);
6171 SET_IEEE80211_DEV(wl->hw, wl->dev);
6173 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6174 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6176 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6181 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6184 struct ieee80211_hw *hw;
6189 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6191 wl1271_error("could not alloc ieee80211_hw");
6197 memset(wl, 0, sizeof(*wl));
6199 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6201 wl1271_error("could not alloc wl priv");
6203 goto err_priv_alloc;
6206 INIT_LIST_HEAD(&wl->wlvif_list);
6211 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6212 * we don't allocate any additional resource here, so that's fine.
6214 for (i = 0; i < NUM_TX_QUEUES; i++)
6215 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6216 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6218 skb_queue_head_init(&wl->deferred_rx_queue);
6219 skb_queue_head_init(&wl->deferred_tx_queue);
6221 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6222 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6223 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6224 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6225 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6226 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6227 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6229 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6230 if (!wl->freezable_wq) {
6237 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6238 wl->band = IEEE80211_BAND_2GHZ;
6239 wl->channel_type = NL80211_CHAN_NO_HT;
6241 wl->sg_enabled = true;
6242 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6243 wl->recovery_count = 0;
6246 wl->ap_fw_ps_map = 0;
6248 wl->platform_quirks = 0;
6249 wl->system_hlid = WL12XX_SYSTEM_HLID;
6250 wl->active_sta_count = 0;
6251 wl->active_link_count = 0;
6253 init_waitqueue_head(&wl->fwlog_waitq);
6255 /* The system link is always allocated */
6256 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6258 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6259 for (i = 0; i < wl->num_tx_desc; i++)
6260 wl->tx_frames[i] = NULL;
6262 spin_lock_init(&wl->wl_lock);
6264 wl->state = WLCORE_STATE_OFF;
6265 wl->fw_type = WL12XX_FW_TYPE_NONE;
6266 mutex_init(&wl->mutex);
6267 mutex_init(&wl->flush_mutex);
6268 init_completion(&wl->nvs_loading_complete);
6270 order = get_order(aggr_buf_size);
6271 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6272 if (!wl->aggr_buf) {
6276 wl->aggr_buf_size = aggr_buf_size;
6278 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6279 if (!wl->dummy_packet) {
6284 /* Allocate one page for the FW log */
6285 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6288 goto err_dummy_packet;
6291 wl->mbox_size = mbox_size;
6292 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6298 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6299 if (!wl->buffer_32) {
6310 free_page((unsigned long)wl->fwlog);
6313 dev_kfree_skb(wl->dummy_packet);
6316 free_pages((unsigned long)wl->aggr_buf, order);
6319 destroy_workqueue(wl->freezable_wq);
6322 wl1271_debugfs_exit(wl);
6326 ieee80211_free_hw(hw);
6330 return ERR_PTR(ret);
6332 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6334 int wlcore_free_hw(struct wl1271 *wl)
6336 /* Unblock any fwlog readers */
6337 mutex_lock(&wl->mutex);
6338 wl->fwlog_size = -1;
6339 wake_up_interruptible_all(&wl->fwlog_waitq);
6340 mutex_unlock(&wl->mutex);
6342 wlcore_sysfs_free(wl);
6344 kfree(wl->buffer_32);
6346 free_page((unsigned long)wl->fwlog);
6347 dev_kfree_skb(wl->dummy_packet);
6348 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6350 wl1271_debugfs_exit(wl);
6354 wl->fw_type = WL12XX_FW_TYPE_NONE;
6358 kfree(wl->raw_fw_status);
6359 kfree(wl->fw_status);
6360 kfree(wl->tx_res_if);
6361 destroy_workqueue(wl->freezable_wq);
6364 ieee80211_free_hw(wl->hw);
6368 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6371 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6372 .flags = WIPHY_WOWLAN_ANY,
6373 .n_patterns = WL1271_MAX_RX_FILTERS,
6374 .pattern_min_len = 1,
6375 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6379 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6381 return IRQ_WAKE_THREAD;
6384 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6386 struct wl1271 *wl = context;
6387 struct platform_device *pdev = wl->pdev;
6388 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6389 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6390 unsigned long irqflags;
6392 irq_handler_t hardirq_fn = NULL;
6395 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6397 wl1271_error("Could not allocate nvs data");
6400 wl->nvs_len = fw->size;
6402 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6408 ret = wl->ops->setup(wl);
6412 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6414 /* adjust some runtime configuration parameters */
6415 wlcore_adjust_conf(wl);
6417 wl->irq = platform_get_irq(pdev, 0);
6418 wl->platform_quirks = pdata->platform_quirks;
6419 wl->if_ops = pdev_data->if_ops;
6421 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6422 irqflags = IRQF_TRIGGER_RISING;
6423 hardirq_fn = wlcore_hardirq;
6425 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6428 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6429 irqflags, pdev->name, wl);
6431 wl1271_error("request_irq() failed: %d", ret);
6436 ret = enable_irq_wake(wl->irq);
6438 wl->irq_wake_enabled = true;
6439 device_init_wakeup(wl->dev, 1);
6440 if (pdata->pwr_in_suspend)
6441 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6444 disable_irq(wl->irq);
6446 ret = wl12xx_get_hw_info(wl);
6448 wl1271_error("couldn't get hw info");
6452 ret = wl->ops->identify_chip(wl);
6456 ret = wl1271_init_ieee80211(wl);
6460 ret = wl1271_register_hw(wl);
6464 ret = wlcore_sysfs_init(wl);
6468 wl->initialized = true;
6472 wl1271_unregister_hw(wl);
6475 free_irq(wl->irq, wl);
6481 release_firmware(fw);
6482 complete_all(&wl->nvs_loading_complete);
6485 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6489 if (!wl->ops || !wl->ptable)
6492 wl->dev = &pdev->dev;
6494 platform_set_drvdata(pdev, wl);
6496 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6497 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6500 wl1271_error("request_firmware_nowait failed: %d", ret);
6501 complete_all(&wl->nvs_loading_complete);
6506 EXPORT_SYMBOL_GPL(wlcore_probe);
6508 int wlcore_remove(struct platform_device *pdev)
6510 struct wl1271 *wl = platform_get_drvdata(pdev);
6512 wait_for_completion(&wl->nvs_loading_complete);
6513 if (!wl->initialized)
6516 if (wl->irq_wake_enabled) {
6517 device_init_wakeup(wl->dev, 0);
6518 disable_irq_wake(wl->irq);
6520 wl1271_unregister_hw(wl);
6521 free_irq(wl->irq, wl);
6526 EXPORT_SYMBOL_GPL(wlcore_remove);
6528 u32 wl12xx_debug_level = DEBUG_NONE;
6529 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6530 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6531 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6533 module_param_named(fwlog, fwlog_param, charp, 0);
6534 MODULE_PARM_DESC(fwlog,
6535 "FW logger options: continuous, ondemand, dbgpins or disable");
6537 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6538 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6540 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6541 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6543 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6544 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6546 MODULE_LICENSE("GPL");
6547 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6548 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6549 MODULE_FIRMWARE(WL12XX_NVS_NAME);