3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
40 #include "vendor_cmd.h"
45 #define WL1271_BOOT_RETRIES 3
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery = -1;
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 struct ieee80211_vif *vif,
54 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
62 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
71 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
75 wl1271_info("Association completed.");
79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 struct regulatory_request *request)
82 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
83 struct wl1271 *wl = hw->priv;
85 /* copy the current dfs region */
87 wl->dfs_region = request->dfs_region;
89 wlcore_regdomain_config(wl);
92 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
97 /* we should hold wl->mutex */
98 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
103 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
105 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
111 * this function is being called when the rx_streaming interval
112 * has beed changed or rx_streaming should be disabled
114 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
117 int period = wl->conf.rx_streaming.interval;
119 /* don't reconfigure if rx_streaming is disabled */
120 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
123 /* reconfigure/disable according to new streaming_period */
125 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
126 (wl->conf.rx_streaming.always ||
127 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
128 ret = wl1271_set_rx_streaming(wl, wlvif, true);
130 ret = wl1271_set_rx_streaming(wl, wlvif, false);
131 /* don't cancel_work_sync since we might deadlock */
132 del_timer_sync(&wlvif->rx_streaming_timer);
138 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
141 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
142 rx_streaming_enable_work);
143 struct wl1271 *wl = wlvif->wl;
145 mutex_lock(&wl->mutex);
147 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
148 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
149 (!wl->conf.rx_streaming.always &&
150 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 if (!wl->conf.rx_streaming.interval)
156 ret = wl1271_ps_elp_wakeup(wl);
160 ret = wl1271_set_rx_streaming(wl, wlvif, true);
164 /* stop it after some time of inactivity */
165 mod_timer(&wlvif->rx_streaming_timer,
166 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
169 wl1271_ps_elp_sleep(wl);
171 mutex_unlock(&wl->mutex);
174 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
177 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
178 rx_streaming_disable_work);
179 struct wl1271 *wl = wlvif->wl;
181 mutex_lock(&wl->mutex);
183 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
186 ret = wl1271_ps_elp_wakeup(wl);
190 ret = wl1271_set_rx_streaming(wl, wlvif, false);
195 wl1271_ps_elp_sleep(wl);
197 mutex_unlock(&wl->mutex);
200 static void wl1271_rx_streaming_timer(unsigned long data)
202 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
203 struct wl1271 *wl = wlvif->wl;
204 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
207 /* wl->mutex must be taken */
208 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
210 /* if the watchdog is not armed, don't do anything */
211 if (wl->tx_allocated_blocks == 0)
214 cancel_delayed_work(&wl->tx_watchdog_work);
215 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
216 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
219 static void wlcore_rc_update_work(struct work_struct *work)
222 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
224 struct wl1271 *wl = wlvif->wl;
226 mutex_lock(&wl->mutex);
228 if (unlikely(wl->state != WLCORE_STATE_ON))
231 ret = wl1271_ps_elp_wakeup(wl);
235 wlcore_hw_sta_rc_update(wl, wlvif);
237 wl1271_ps_elp_sleep(wl);
239 mutex_unlock(&wl->mutex);
242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
244 struct delayed_work *dwork;
247 dwork = container_of(work, struct delayed_work, work);
248 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
250 mutex_lock(&wl->mutex);
252 if (unlikely(wl->state != WLCORE_STATE_ON))
255 /* Tx went out in the meantime - everything is ok */
256 if (unlikely(wl->tx_allocated_blocks == 0))
260 * if a ROC is in progress, we might not have any Tx for a long
261 * time (e.g. pending Tx on the non-ROC channels)
263 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 wl->conf.tx.tx_watchdog_timeout);
266 wl12xx_rearm_tx_watchdog_locked(wl);
271 * if a scan is in progress, we might not have any Tx for a long
274 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 wl->conf.tx.tx_watchdog_timeout);
277 wl12xx_rearm_tx_watchdog_locked(wl);
282 * AP might cache a frame for a long time for a sleeping station,
283 * so rearm the timer if there's an AP interface with stations. If
284 * Tx is genuinely stuck we will most hopefully discover it when all
285 * stations are removed due to inactivity.
287 if (wl->active_sta_count) {
288 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
290 wl->conf.tx.tx_watchdog_timeout,
291 wl->active_sta_count);
292 wl12xx_rearm_tx_watchdog_locked(wl);
296 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_queue_recovery_work(wl);
301 mutex_unlock(&wl->mutex);
304 static void wlcore_adjust_conf(struct wl1271 *wl)
306 /* Adjust settings according to optional module parameters */
308 /* Firmware Logger params */
309 if (fwlog_mem_blocks != -1) {
310 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
311 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
312 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
315 "Illegal fwlog_mem_blocks=%d using default %d",
316 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
321 if (!strcmp(fwlog_param, "continuous")) {
322 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
323 } else if (!strcmp(fwlog_param, "ondemand")) {
324 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
325 } else if (!strcmp(fwlog_param, "dbgpins")) {
326 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
327 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
328 } else if (!strcmp(fwlog_param, "disable")) {
329 wl->conf.fwlog.mem_blocks = 0;
330 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
332 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
336 if (bug_on_recovery != -1)
337 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
339 if (no_recovery != -1)
340 wl->conf.recovery.no_recovery = (u8) no_recovery;
343 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
344 struct wl12xx_vif *wlvif,
349 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
352 * Wake up from high level PS if the STA is asleep with too little
353 * packets in FW or if the STA is awake.
355 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_end(wl, wlvif, hlid);
359 * Start high-level PS if the STA is asleep with enough blocks in FW.
360 * Make an exception if this is the only connected link. In this
361 * case FW-memory congestion is less of a problem.
362 * Note that a single connected STA means 2*ap_count + 1 active links,
363 * since we must account for the global and broadcast AP links
364 * for each AP. The "fw_ps" check assures us the other link is a STA
365 * connected to the AP. Otherwise the FW would not set the PSM bit.
367 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
368 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
369 wl12xx_ps_link_start(wl, wlvif, hlid, true);
372 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
373 struct wl12xx_vif *wlvif,
374 struct wl_fw_status *status)
376 unsigned long cur_fw_ps_map;
379 cur_fw_ps_map = status->link_ps_bitmap;
380 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
381 wl1271_debug(DEBUG_PSM,
382 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
383 wl->ap_fw_ps_map, cur_fw_ps_map,
384 wl->ap_fw_ps_map ^ cur_fw_ps_map);
386 wl->ap_fw_ps_map = cur_fw_ps_map;
389 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
390 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
391 wl->links[hlid].allocated_pkts);
394 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
396 struct wl12xx_vif *wlvif;
398 u32 old_tx_blk_count = wl->tx_blocks_available;
399 int avail, freed_blocks;
402 struct wl1271_link *lnk;
404 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
406 wl->fw_status_len, false);
410 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
412 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
413 "drv_rx_counter = %d, tx_results_counter = %d)",
415 status->fw_rx_counter,
416 status->drv_rx_counter,
417 status->tx_results_counter);
419 for (i = 0; i < NUM_TX_QUEUES; i++) {
420 /* prevent wrap-around in freed-packets counter */
421 wl->tx_allocated_pkts[i] -=
422 (status->counters.tx_released_pkts[i] -
423 wl->tx_pkts_freed[i]) & 0xff;
425 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
429 for_each_set_bit(i, wl->links_map, wl->num_links) {
433 /* prevent wrap-around in freed-packets counter */
434 diff = (status->counters.tx_lnk_free_pkts[i] -
435 lnk->prev_freed_pkts) & 0xff;
440 lnk->allocated_pkts -= diff;
441 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
443 /* accumulate the prev_freed_pkts counter */
444 lnk->total_freed_pkts += diff;
447 /* prevent wrap-around in total blocks counter */
448 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
449 freed_blocks = status->total_released_blks -
452 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
453 status->total_released_blks;
455 wl->tx_blocks_freed = status->total_released_blks;
457 wl->tx_allocated_blocks -= freed_blocks;
460 * If the FW freed some blocks:
461 * If we still have allocated blocks - re-arm the timer, Tx is
462 * not stuck. Otherwise, cancel the timer (no Tx currently).
465 if (wl->tx_allocated_blocks)
466 wl12xx_rearm_tx_watchdog_locked(wl);
468 cancel_delayed_work(&wl->tx_watchdog_work);
471 avail = status->tx_total - wl->tx_allocated_blocks;
474 * The FW might change the total number of TX memblocks before
475 * we get a notification about blocks being released. Thus, the
476 * available blocks calculation might yield a temporary result
477 * which is lower than the actual available blocks. Keeping in
478 * mind that only blocks that were allocated can be moved from
479 * TX to RX, tx_blocks_available should never decrease here.
481 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
484 /* if more blocks are available now, tx work can be scheduled */
485 if (wl->tx_blocks_available > old_tx_blk_count)
486 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
488 /* for AP update num of allocated TX blocks per link and ps status */
489 wl12xx_for_each_wlvif_ap(wl, wlvif) {
490 wl12xx_irq_update_links_status(wl, wlvif, status);
493 /* update the host-chipset time offset */
495 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
496 (s64)(status->fw_localtime);
498 wl->fw_fast_lnk_map = status->link_fast_bitmap;
503 static void wl1271_flush_deferred_work(struct wl1271 *wl)
507 /* Pass all received frames to the network stack */
508 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
509 ieee80211_rx_ni(wl->hw, skb);
511 /* Return sent skbs to the network stack */
512 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
513 ieee80211_tx_status_ni(wl->hw, skb);
516 static void wl1271_netstack_work(struct work_struct *work)
519 container_of(work, struct wl1271, netstack_work);
522 wl1271_flush_deferred_work(wl);
523 } while (skb_queue_len(&wl->deferred_rx_queue));
526 #define WL1271_IRQ_MAX_LOOPS 256
528 static int wlcore_irq_locked(struct wl1271 *wl)
532 int loopcount = WL1271_IRQ_MAX_LOOPS;
534 unsigned int defer_count;
538 * In case edge triggered interrupt must be used, we cannot iterate
539 * more than once without introducing race conditions with the hardirq.
541 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
544 wl1271_debug(DEBUG_IRQ, "IRQ work");
546 if (unlikely(wl->state != WLCORE_STATE_ON))
549 ret = wl1271_ps_elp_wakeup(wl);
553 while (!done && loopcount--) {
555 * In order to avoid a race with the hardirq, clear the flag
556 * before acknowledging the chip. Since the mutex is held,
557 * wl1271_ps_elp_wakeup cannot be called concurrently.
559 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
560 smp_mb__after_atomic();
562 ret = wlcore_fw_status(wl, wl->fw_status);
566 wlcore_hw_tx_immediate_compl(wl);
568 intr = wl->fw_status->intr;
569 intr &= WLCORE_ALL_INTR_MASK;
575 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
576 wl1271_error("HW watchdog interrupt received! starting recovery.");
577 wl->watchdog_recovery = true;
580 /* restarting the chip. ignore any other interrupt. */
584 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
585 wl1271_error("SW watchdog interrupt received! "
586 "starting recovery.");
587 wl->watchdog_recovery = true;
590 /* restarting the chip. ignore any other interrupt. */
594 if (likely(intr & WL1271_ACX_INTR_DATA)) {
595 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
597 ret = wlcore_rx(wl, wl->fw_status);
601 /* Check if any tx blocks were freed */
602 spin_lock_irqsave(&wl->wl_lock, flags);
603 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
604 wl1271_tx_total_queue_count(wl) > 0) {
605 spin_unlock_irqrestore(&wl->wl_lock, flags);
607 * In order to avoid starvation of the TX path,
608 * call the work function directly.
610 ret = wlcore_tx_work_locked(wl);
614 spin_unlock_irqrestore(&wl->wl_lock, flags);
617 /* check for tx results */
618 ret = wlcore_hw_tx_delayed_compl(wl);
622 /* Make sure the deferred queues don't get too long */
623 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
624 skb_queue_len(&wl->deferred_rx_queue);
625 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
626 wl1271_flush_deferred_work(wl);
629 if (intr & WL1271_ACX_INTR_EVENT_A) {
630 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
631 ret = wl1271_event_handle(wl, 0);
636 if (intr & WL1271_ACX_INTR_EVENT_B) {
637 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
638 ret = wl1271_event_handle(wl, 1);
643 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
644 wl1271_debug(DEBUG_IRQ,
645 "WL1271_ACX_INTR_INIT_COMPLETE");
647 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
648 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
651 wl1271_ps_elp_sleep(wl);
657 static irqreturn_t wlcore_irq(int irq, void *cookie)
661 struct wl1271 *wl = cookie;
663 /* complete the ELP completion */
664 spin_lock_irqsave(&wl->wl_lock, flags);
665 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
667 complete(wl->elp_compl);
668 wl->elp_compl = NULL;
671 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
672 /* don't enqueue a work right now. mark it as pending */
673 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
674 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
675 disable_irq_nosync(wl->irq);
676 pm_wakeup_event(wl->dev, 0);
677 spin_unlock_irqrestore(&wl->wl_lock, flags);
680 spin_unlock_irqrestore(&wl->wl_lock, flags);
682 /* TX might be handled here, avoid redundant work */
683 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
684 cancel_work_sync(&wl->tx_work);
686 mutex_lock(&wl->mutex);
688 ret = wlcore_irq_locked(wl);
690 wl12xx_queue_recovery_work(wl);
692 spin_lock_irqsave(&wl->wl_lock, flags);
693 /* In case TX was not handled here, queue TX work */
694 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
695 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
696 wl1271_tx_total_queue_count(wl) > 0)
697 ieee80211_queue_work(wl->hw, &wl->tx_work);
698 spin_unlock_irqrestore(&wl->wl_lock, flags);
700 mutex_unlock(&wl->mutex);
705 struct vif_counter_data {
708 struct ieee80211_vif *cur_vif;
709 bool cur_vif_running;
712 static void wl12xx_vif_count_iter(void *data, u8 *mac,
713 struct ieee80211_vif *vif)
715 struct vif_counter_data *counter = data;
718 if (counter->cur_vif == vif)
719 counter->cur_vif_running = true;
722 /* caller must not hold wl->mutex, as it might deadlock */
723 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
724 struct ieee80211_vif *cur_vif,
725 struct vif_counter_data *data)
727 memset(data, 0, sizeof(*data));
728 data->cur_vif = cur_vif;
730 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
731 wl12xx_vif_count_iter, data);
734 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
736 const struct firmware *fw;
738 enum wl12xx_fw_type fw_type;
742 fw_type = WL12XX_FW_TYPE_PLT;
743 fw_name = wl->plt_fw_name;
746 * we can't call wl12xx_get_vif_count() here because
747 * wl->mutex is taken, so use the cached last_vif_count value
749 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
750 fw_type = WL12XX_FW_TYPE_MULTI;
751 fw_name = wl->mr_fw_name;
753 fw_type = WL12XX_FW_TYPE_NORMAL;
754 fw_name = wl->sr_fw_name;
758 if (wl->fw_type == fw_type)
761 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
763 ret = request_firmware(&fw, fw_name, wl->dev);
766 wl1271_error("could not get firmware %s: %d", fw_name, ret);
771 wl1271_error("firmware size is not multiple of 32 bits: %zu",
778 wl->fw_type = WL12XX_FW_TYPE_NONE;
779 wl->fw_len = fw->size;
780 wl->fw = vmalloc(wl->fw_len);
783 wl1271_error("could not allocate memory for the firmware");
788 memcpy(wl->fw, fw->data, wl->fw_len);
790 wl->fw_type = fw_type;
792 release_firmware(fw);
797 void wl12xx_queue_recovery_work(struct wl1271 *wl)
799 /* Avoid a recursive recovery */
800 if (wl->state == WLCORE_STATE_ON) {
801 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
804 wl->state = WLCORE_STATE_RESTARTING;
805 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
806 wl1271_ps_elp_wakeup(wl);
807 wlcore_disable_interrupts_nosync(wl);
808 ieee80211_queue_work(wl->hw, &wl->recovery_work);
812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
816 /* Make sure we have enough room */
817 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
819 /* Fill the FW log file, consumed by the sysfs fwlog entry */
820 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 wl->fwlog_size += len;
826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
828 struct wlcore_partition_set part, old_part;
835 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
836 (wl->conf.fwlog.mem_blocks == 0))
839 wl1271_info("Reading FW panic log");
841 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
846 * Make sure the chip is awake and the logger isn't active.
847 * Do not send a stop fwlog command if the fw is hanged or if
848 * dbgpins are used (due to some fw bug).
850 if (wl1271_ps_elp_wakeup(wl))
852 if (!wl->watchdog_recovery &&
853 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
854 wl12xx_cmd_stop_fwlog(wl);
856 /* Read the first memory block address */
857 ret = wlcore_fw_status(wl, wl->fw_status);
861 addr = wl->fw_status->log_start_addr;
865 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
866 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
867 end_of_log = wl->fwlog_end;
869 offset = sizeof(addr);
873 old_part = wl->curr_part;
874 memset(&part, 0, sizeof(part));
876 /* Traverse the memory blocks linked list */
878 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
879 part.mem.size = PAGE_SIZE;
881 ret = wlcore_set_partition(wl, &part);
883 wl1271_error("%s: set_partition start=0x%X size=%d",
884 __func__, part.mem.start, part.mem.size);
888 memset(block, 0, wl->fw_mem_block_size);
889 ret = wlcore_read_hwaddr(wl, addr, block,
890 wl->fw_mem_block_size, false);
896 * Memory blocks are linked to one another. The first 4 bytes
897 * of each memory block hold the hardware address of the next
898 * one. The last memory block points to the first one in
899 * on demand mode and is equal to 0x2000000 in continuous mode.
901 addr = le32_to_cpup((__le32 *)block);
903 if (!wl12xx_copy_fwlog(wl, block + offset,
904 wl->fw_mem_block_size - offset))
906 } while (addr && (addr != end_of_log));
908 wake_up_interruptible(&wl->fwlog_waitq);
912 wlcore_set_partition(wl, &old_part);
915 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
916 u8 hlid, struct ieee80211_sta *sta)
918 struct wl1271_station *wl_sta;
919 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
921 wl_sta = (void *)sta->drv_priv;
922 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
925 * increment the initial seq number on recovery to account for
926 * transmitted packets that we haven't yet got in the FW status
928 if (wlvif->encryption_type == KEY_GEM)
929 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
931 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
932 wl_sta->total_freed_pkts += sqn_recovery_padding;
935 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
936 struct wl12xx_vif *wlvif,
937 u8 hlid, const u8 *addr)
939 struct ieee80211_sta *sta;
940 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
942 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
943 is_zero_ether_addr(addr)))
947 sta = ieee80211_find_sta(vif, addr);
949 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
953 static void wlcore_print_recovery(struct wl1271 *wl)
959 wl1271_info("Hardware recovery in progress. FW ver: %s",
960 wl->chip.fw_ver_str);
962 /* change partitions momentarily so we can read the FW pc */
963 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
967 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
971 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
975 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
976 pc, hint_sts, ++wl->recovery_count);
978 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
982 static void wl1271_recovery_work(struct work_struct *work)
985 container_of(work, struct wl1271, recovery_work);
986 struct wl12xx_vif *wlvif;
987 struct ieee80211_vif *vif;
989 mutex_lock(&wl->mutex);
991 if (wl->state == WLCORE_STATE_OFF || wl->plt)
994 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
995 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
996 wl12xx_read_fwlog_panic(wl);
997 wlcore_print_recovery(wl);
1000 BUG_ON(wl->conf.recovery.bug_on_recovery &&
1001 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1003 if (wl->conf.recovery.no_recovery) {
1004 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1008 /* Prevent spurious TX during FW restart */
1009 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1011 /* reboot the chipset */
1012 while (!list_empty(&wl->wlvif_list)) {
1013 wlvif = list_first_entry(&wl->wlvif_list,
1014 struct wl12xx_vif, list);
1015 vif = wl12xx_wlvif_to_vif(wlvif);
1017 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1018 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1019 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1020 vif->bss_conf.bssid);
1023 __wl1271_op_remove_interface(wl, vif, false);
1026 wlcore_op_stop_locked(wl);
1028 ieee80211_restart_hw(wl->hw);
1031 * Its safe to enable TX now - the queues are stopped after a request
1032 * to restart the HW.
1034 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1037 wl->watchdog_recovery = false;
1038 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1039 mutex_unlock(&wl->mutex);
1042 static int wlcore_fw_wakeup(struct wl1271 *wl)
1044 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1047 static int wl1271_setup(struct wl1271 *wl)
1049 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1050 if (!wl->raw_fw_status)
1053 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1057 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1063 kfree(wl->fw_status);
1064 kfree(wl->raw_fw_status);
1068 static int wl12xx_set_power_on(struct wl1271 *wl)
1072 msleep(WL1271_PRE_POWER_ON_SLEEP);
1073 ret = wl1271_power_on(wl);
1076 msleep(WL1271_POWER_ON_SLEEP);
1077 wl1271_io_reset(wl);
1080 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1084 /* ELP module wake up */
1085 ret = wlcore_fw_wakeup(wl);
1093 wl1271_power_off(wl);
1097 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1101 ret = wl12xx_set_power_on(wl);
1106 * For wl127x based devices we could use the default block
1107 * size (512 bytes), but due to a bug in the sdio driver, we
1108 * need to set it explicitly after the chip is powered on. To
1109 * simplify the code and since the performance impact is
1110 * negligible, we use the same block size for all different
1113 * Check if the bus supports blocksize alignment and, if it
1114 * doesn't, make sure we don't have the quirk.
1116 if (!wl1271_set_block_size(wl))
1117 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1119 /* TODO: make sure the lower driver has set things up correctly */
1121 ret = wl1271_setup(wl);
1125 ret = wl12xx_fetch_firmware(wl, plt);
1133 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1135 int retries = WL1271_BOOT_RETRIES;
1136 struct wiphy *wiphy = wl->hw->wiphy;
1138 static const char* const PLT_MODE[] = {
1147 mutex_lock(&wl->mutex);
1149 wl1271_notice("power up");
1151 if (wl->state != WLCORE_STATE_OFF) {
1152 wl1271_error("cannot go into PLT state because not "
1153 "in off state: %d", wl->state);
1158 /* Indicate to lower levels that we are now in PLT mode */
1160 wl->plt_mode = plt_mode;
1164 ret = wl12xx_chip_wakeup(wl, true);
1168 if (plt_mode != PLT_CHIP_AWAKE) {
1169 ret = wl->ops->plt_init(wl);
1174 wl->state = WLCORE_STATE_ON;
1175 wl1271_notice("firmware booted in PLT mode %s (%s)",
1177 wl->chip.fw_ver_str);
1179 /* update hw/fw version info in wiphy struct */
1180 wiphy->hw_version = wl->chip.id;
1181 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1182 sizeof(wiphy->fw_version));
1187 wl1271_power_off(wl);
1191 wl->plt_mode = PLT_OFF;
1193 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1194 WL1271_BOOT_RETRIES);
1196 mutex_unlock(&wl->mutex);
1201 int wl1271_plt_stop(struct wl1271 *wl)
1205 wl1271_notice("power down");
1208 * Interrupts must be disabled before setting the state to OFF.
1209 * Otherwise, the interrupt handler might be called and exit without
1210 * reading the interrupt status.
1212 wlcore_disable_interrupts(wl);
1213 mutex_lock(&wl->mutex);
1215 mutex_unlock(&wl->mutex);
1218 * This will not necessarily enable interrupts as interrupts
1219 * may have been disabled when op_stop was called. It will,
1220 * however, balance the above call to disable_interrupts().
1222 wlcore_enable_interrupts(wl);
1224 wl1271_error("cannot power down because not in PLT "
1225 "state: %d", wl->state);
1230 mutex_unlock(&wl->mutex);
1232 wl1271_flush_deferred_work(wl);
1233 cancel_work_sync(&wl->netstack_work);
1234 cancel_work_sync(&wl->recovery_work);
1235 cancel_delayed_work_sync(&wl->elp_work);
1236 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1238 mutex_lock(&wl->mutex);
1239 wl1271_power_off(wl);
1241 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1242 wl->state = WLCORE_STATE_OFF;
1244 wl->plt_mode = PLT_OFF;
1246 mutex_unlock(&wl->mutex);
1252 static void wl1271_op_tx(struct ieee80211_hw *hw,
1253 struct ieee80211_tx_control *control,
1254 struct sk_buff *skb)
1256 struct wl1271 *wl = hw->priv;
1257 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1258 struct ieee80211_vif *vif = info->control.vif;
1259 struct wl12xx_vif *wlvif = NULL;
1260 unsigned long flags;
1265 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1266 ieee80211_free_txskb(hw, skb);
1270 wlvif = wl12xx_vif_to_data(vif);
1271 mapping = skb_get_queue_mapping(skb);
1272 q = wl1271_tx_get_queue(mapping);
1274 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1276 spin_lock_irqsave(&wl->wl_lock, flags);
1279 * drop the packet if the link is invalid or the queue is stopped
1280 * for any reason but watermark. Watermark is a "soft"-stop so we
1281 * allow these packets through.
1283 if (hlid == WL12XX_INVALID_LINK_ID ||
1284 (!test_bit(hlid, wlvif->links_map)) ||
1285 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1286 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1287 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1288 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1289 ieee80211_free_txskb(hw, skb);
1293 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1295 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1297 wl->tx_queue_count[q]++;
1298 wlvif->tx_queue_count[q]++;
1301 * The workqueue is slow to process the tx_queue and we need stop
1302 * the queue here, otherwise the queue will get too long.
1304 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1305 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1306 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1307 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1308 wlcore_stop_queue_locked(wl, wlvif, q,
1309 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1313 * The chip specific setup must run before the first TX packet -
1314 * before that, the tx_work will not be initialized!
1317 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1318 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1319 ieee80211_queue_work(wl->hw, &wl->tx_work);
1322 spin_unlock_irqrestore(&wl->wl_lock, flags);
1325 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1327 unsigned long flags;
1330 /* no need to queue a new dummy packet if one is already pending */
1331 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1334 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1336 spin_lock_irqsave(&wl->wl_lock, flags);
1337 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1338 wl->tx_queue_count[q]++;
1339 spin_unlock_irqrestore(&wl->wl_lock, flags);
1341 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1342 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1343 return wlcore_tx_work_locked(wl);
1346 * If the FW TX is busy, TX work will be scheduled by the threaded
1347 * interrupt handler function
1353 * The size of the dummy packet should be at least 1400 bytes. However, in
1354 * order to minimize the number of bus transactions, aligning it to 512 bytes
1355 * boundaries could be beneficial, performance wise
1357 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1359 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1361 struct sk_buff *skb;
1362 struct ieee80211_hdr_3addr *hdr;
1363 unsigned int dummy_packet_size;
1365 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1366 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1368 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1370 wl1271_warning("Failed to allocate a dummy packet skb");
1374 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1376 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1377 memset(hdr, 0, sizeof(*hdr));
1378 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1379 IEEE80211_STYPE_NULLFUNC |
1380 IEEE80211_FCTL_TODS);
1382 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1384 /* Dummy packets require the TID to be management */
1385 skb->priority = WL1271_TID_MGMT;
1387 /* Initialize all fields that might be used */
1388 skb_set_queue_mapping(skb, 0);
1389 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1397 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1399 int num_fields = 0, in_field = 0, fields_size = 0;
1400 int i, pattern_len = 0;
1403 wl1271_warning("No mask in WoWLAN pattern");
1408 * The pattern is broken up into segments of bytes at different offsets
1409 * that need to be checked by the FW filter. Each segment is called
1410 * a field in the FW API. We verify that the total number of fields
1411 * required for this pattern won't exceed FW limits (8)
1412 * as well as the total fields buffer won't exceed the FW limit.
1413 * Note that if there's a pattern which crosses Ethernet/IP header
1414 * boundary a new field is required.
1416 for (i = 0; i < p->pattern_len; i++) {
1417 if (test_bit(i, (unsigned long *)p->mask)) {
1422 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1424 fields_size += pattern_len +
1425 RX_FILTER_FIELD_OVERHEAD;
1433 fields_size += pattern_len +
1434 RX_FILTER_FIELD_OVERHEAD;
1441 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1445 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1446 wl1271_warning("RX Filter too complex. Too many segments");
1450 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1451 wl1271_warning("RX filter pattern is too big");
1458 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1460 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1463 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1470 for (i = 0; i < filter->num_fields; i++)
1471 kfree(filter->fields[i].pattern);
1476 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1477 u16 offset, u8 flags,
1478 const u8 *pattern, u8 len)
1480 struct wl12xx_rx_filter_field *field;
1482 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1483 wl1271_warning("Max fields per RX filter. can't alloc another");
1487 field = &filter->fields[filter->num_fields];
1489 field->pattern = kzalloc(len, GFP_KERNEL);
1490 if (!field->pattern) {
1491 wl1271_warning("Failed to allocate RX filter pattern");
1495 filter->num_fields++;
1497 field->offset = cpu_to_le16(offset);
1498 field->flags = flags;
1500 memcpy(field->pattern, pattern, len);
1505 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1507 int i, fields_size = 0;
1509 for (i = 0; i < filter->num_fields; i++)
1510 fields_size += filter->fields[i].len +
1511 sizeof(struct wl12xx_rx_filter_field) -
1517 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1521 struct wl12xx_rx_filter_field *field;
1523 for (i = 0; i < filter->num_fields; i++) {
1524 field = (struct wl12xx_rx_filter_field *)buf;
1526 field->offset = filter->fields[i].offset;
1527 field->flags = filter->fields[i].flags;
1528 field->len = filter->fields[i].len;
1530 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1531 buf += sizeof(struct wl12xx_rx_filter_field) -
1532 sizeof(u8 *) + field->len;
1537 * Allocates an RX filter returned through f
1538 * which needs to be freed using rx_filter_free()
1541 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1542 struct wl12xx_rx_filter **f)
1545 struct wl12xx_rx_filter *filter;
1549 filter = wl1271_rx_filter_alloc();
1551 wl1271_warning("Failed to alloc rx filter");
1557 while (i < p->pattern_len) {
1558 if (!test_bit(i, (unsigned long *)p->mask)) {
1563 for (j = i; j < p->pattern_len; j++) {
1564 if (!test_bit(j, (unsigned long *)p->mask))
1567 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1568 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1572 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1574 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1576 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1577 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1582 ret = wl1271_rx_filter_alloc_field(filter,
1585 &p->pattern[i], len);
1592 filter->action = FILTER_SIGNAL;
1598 wl1271_rx_filter_free(filter);
1604 static int wl1271_configure_wowlan(struct wl1271 *wl,
1605 struct cfg80211_wowlan *wow)
1609 if (!wow || wow->any || !wow->n_patterns) {
1610 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1615 ret = wl1271_rx_filter_clear_all(wl);
1622 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1625 /* Validate all incoming patterns before clearing current FW state */
1626 for (i = 0; i < wow->n_patterns; i++) {
1627 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1629 wl1271_warning("Bad wowlan pattern %d", i);
1634 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1638 ret = wl1271_rx_filter_clear_all(wl);
1642 /* Translate WoWLAN patterns into filters */
1643 for (i = 0; i < wow->n_patterns; i++) {
1644 struct cfg80211_pkt_pattern *p;
1645 struct wl12xx_rx_filter *filter = NULL;
1647 p = &wow->patterns[i];
1649 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1651 wl1271_warning("Failed to create an RX filter from "
1652 "wowlan pattern %d", i);
1656 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1658 wl1271_rx_filter_free(filter);
1663 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1669 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1670 struct wl12xx_vif *wlvif,
1671 struct cfg80211_wowlan *wow)
1675 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1678 ret = wl1271_configure_wowlan(wl, wow);
1682 if ((wl->conf.conn.suspend_wake_up_event ==
1683 wl->conf.conn.wake_up_event) &&
1684 (wl->conf.conn.suspend_listen_interval ==
1685 wl->conf.conn.listen_interval))
1688 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1689 wl->conf.conn.suspend_wake_up_event,
1690 wl->conf.conn.suspend_listen_interval);
1693 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1699 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1700 struct wl12xx_vif *wlvif,
1701 struct cfg80211_wowlan *wow)
1705 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1708 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1712 ret = wl1271_configure_wowlan(wl, wow);
1721 static int wl1271_configure_suspend(struct wl1271 *wl,
1722 struct wl12xx_vif *wlvif,
1723 struct cfg80211_wowlan *wow)
1725 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1726 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1727 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1728 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1732 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1735 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1736 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1738 if ((!is_ap) && (!is_sta))
1741 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1742 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1745 wl1271_configure_wowlan(wl, NULL);
1748 if ((wl->conf.conn.suspend_wake_up_event ==
1749 wl->conf.conn.wake_up_event) &&
1750 (wl->conf.conn.suspend_listen_interval ==
1751 wl->conf.conn.listen_interval))
1754 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1755 wl->conf.conn.wake_up_event,
1756 wl->conf.conn.listen_interval);
1759 wl1271_error("resume: wake up conditions failed: %d",
1763 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1767 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1768 struct cfg80211_wowlan *wow)
1770 struct wl1271 *wl = hw->priv;
1771 struct wl12xx_vif *wlvif;
1774 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1777 /* we want to perform the recovery before suspending */
1778 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1779 wl1271_warning("postponing suspend to perform recovery");
1783 wl1271_tx_flush(wl);
1785 mutex_lock(&wl->mutex);
1787 ret = wl1271_ps_elp_wakeup(wl);
1791 wl->wow_enabled = true;
1792 wl12xx_for_each_wlvif(wl, wlvif) {
1793 ret = wl1271_configure_suspend(wl, wlvif, wow);
1795 mutex_unlock(&wl->mutex);
1796 wl1271_warning("couldn't prepare device to suspend");
1801 /* disable fast link flow control notifications from FW */
1802 ret = wlcore_hw_interrupt_notify(wl, false);
1806 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1807 ret = wlcore_hw_rx_ba_filter(wl,
1808 !!wl->conf.conn.suspend_rx_ba_activity);
1813 wl1271_ps_elp_sleep(wl);
1814 mutex_unlock(&wl->mutex);
1817 wl1271_warning("couldn't prepare device to suspend");
1821 /* flush any remaining work */
1822 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1825 * disable and re-enable interrupts in order to flush
1828 wlcore_disable_interrupts(wl);
1831 * set suspended flag to avoid triggering a new threaded_irq
1832 * work. no need for spinlock as interrupts are disabled.
1834 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1836 wlcore_enable_interrupts(wl);
1837 flush_work(&wl->tx_work);
1838 flush_delayed_work(&wl->elp_work);
1841 * Cancel the watchdog even if above tx_flush failed. We will detect
1842 * it on resume anyway.
1844 cancel_delayed_work(&wl->tx_watchdog_work);
1849 static int wl1271_op_resume(struct ieee80211_hw *hw)
1851 struct wl1271 *wl = hw->priv;
1852 struct wl12xx_vif *wlvif;
1853 unsigned long flags;
1854 bool run_irq_work = false, pending_recovery;
1857 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1859 WARN_ON(!wl->wow_enabled);
1862 * re-enable irq_work enqueuing, and call irq_work directly if
1863 * there is a pending work.
1865 spin_lock_irqsave(&wl->wl_lock, flags);
1866 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1867 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1868 run_irq_work = true;
1869 spin_unlock_irqrestore(&wl->wl_lock, flags);
1871 mutex_lock(&wl->mutex);
1873 /* test the recovery flag before calling any SDIO functions */
1874 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1878 wl1271_debug(DEBUG_MAC80211,
1879 "run postponed irq_work directly");
1881 /* don't talk to the HW if recovery is pending */
1882 if (!pending_recovery) {
1883 ret = wlcore_irq_locked(wl);
1885 wl12xx_queue_recovery_work(wl);
1888 wlcore_enable_interrupts(wl);
1891 if (pending_recovery) {
1892 wl1271_warning("queuing forgotten recovery on resume");
1893 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1897 ret = wl1271_ps_elp_wakeup(wl);
1901 wl12xx_for_each_wlvif(wl, wlvif) {
1902 wl1271_configure_resume(wl, wlvif);
1905 ret = wlcore_hw_interrupt_notify(wl, true);
1909 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1910 ret = wlcore_hw_rx_ba_filter(wl, false);
1915 wl1271_ps_elp_sleep(wl);
1918 wl->wow_enabled = false;
1921 * Set a flag to re-init the watchdog on the first Tx after resume.
1922 * That way we avoid possible conditions where Tx-complete interrupts
1923 * fail to arrive and we perform a spurious recovery.
1925 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1926 mutex_unlock(&wl->mutex);
1932 static int wl1271_op_start(struct ieee80211_hw *hw)
1934 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1937 * We have to delay the booting of the hardware because
1938 * we need to know the local MAC address before downloading and
1939 * initializing the firmware. The MAC address cannot be changed
1940 * after boot, and without the proper MAC address, the firmware
1941 * will not function properly.
1943 * The MAC address is first known when the corresponding interface
1944 * is added. That is where we will initialize the hardware.
1950 static void wlcore_op_stop_locked(struct wl1271 *wl)
1954 if (wl->state == WLCORE_STATE_OFF) {
1955 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1957 wlcore_enable_interrupts(wl);
1963 * this must be before the cancel_work calls below, so that the work
1964 * functions don't perform further work.
1966 wl->state = WLCORE_STATE_OFF;
1969 * Use the nosync variant to disable interrupts, so the mutex could be
1970 * held while doing so without deadlocking.
1972 wlcore_disable_interrupts_nosync(wl);
1974 mutex_unlock(&wl->mutex);
1976 wlcore_synchronize_interrupts(wl);
1977 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1978 cancel_work_sync(&wl->recovery_work);
1979 wl1271_flush_deferred_work(wl);
1980 cancel_delayed_work_sync(&wl->scan_complete_work);
1981 cancel_work_sync(&wl->netstack_work);
1982 cancel_work_sync(&wl->tx_work);
1983 cancel_delayed_work_sync(&wl->elp_work);
1984 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1986 /* let's notify MAC80211 about the remaining pending TX frames */
1987 mutex_lock(&wl->mutex);
1988 wl12xx_tx_reset(wl);
1990 wl1271_power_off(wl);
1992 * In case a recovery was scheduled, interrupts were disabled to avoid
1993 * an interrupt storm. Now that the power is down, it is safe to
1994 * re-enable interrupts to balance the disable depth
1996 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1997 wlcore_enable_interrupts(wl);
1999 wl->band = IEEE80211_BAND_2GHZ;
2002 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2003 wl->channel_type = NL80211_CHAN_NO_HT;
2004 wl->tx_blocks_available = 0;
2005 wl->tx_allocated_blocks = 0;
2006 wl->tx_results_count = 0;
2007 wl->tx_packets_count = 0;
2008 wl->time_offset = 0;
2009 wl->ap_fw_ps_map = 0;
2011 wl->sleep_auth = WL1271_PSM_ILLEGAL;
2012 memset(wl->roles_map, 0, sizeof(wl->roles_map));
2013 memset(wl->links_map, 0, sizeof(wl->links_map));
2014 memset(wl->roc_map, 0, sizeof(wl->roc_map));
2015 memset(wl->session_ids, 0, sizeof(wl->session_ids));
2016 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2017 wl->active_sta_count = 0;
2018 wl->active_link_count = 0;
2020 /* The system link is always allocated */
2021 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2022 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2023 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2026 * this is performed after the cancel_work calls and the associated
2027 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2028 * get executed before all these vars have been reset.
2032 wl->tx_blocks_freed = 0;
2034 for (i = 0; i < NUM_TX_QUEUES; i++) {
2035 wl->tx_pkts_freed[i] = 0;
2036 wl->tx_allocated_pkts[i] = 0;
2039 wl1271_debugfs_reset(wl);
2041 kfree(wl->raw_fw_status);
2042 wl->raw_fw_status = NULL;
2043 kfree(wl->fw_status);
2044 wl->fw_status = NULL;
2045 kfree(wl->tx_res_if);
2046 wl->tx_res_if = NULL;
2047 kfree(wl->target_mem_map);
2048 wl->target_mem_map = NULL;
2051 * FW channels must be re-calibrated after recovery,
2052 * save current Reg-Domain channel configuration and clear it.
2054 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2055 sizeof(wl->reg_ch_conf_pending));
2056 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2059 static void wlcore_op_stop(struct ieee80211_hw *hw)
2061 struct wl1271 *wl = hw->priv;
2063 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2065 mutex_lock(&wl->mutex);
2067 wlcore_op_stop_locked(wl);
2069 mutex_unlock(&wl->mutex);
2072 static void wlcore_channel_switch_work(struct work_struct *work)
2074 struct delayed_work *dwork;
2076 struct ieee80211_vif *vif;
2077 struct wl12xx_vif *wlvif;
2080 dwork = container_of(work, struct delayed_work, work);
2081 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2084 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2086 mutex_lock(&wl->mutex);
2088 if (unlikely(wl->state != WLCORE_STATE_ON))
2091 /* check the channel switch is still ongoing */
2092 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2095 vif = wl12xx_wlvif_to_vif(wlvif);
2096 ieee80211_chswitch_done(vif, false);
2098 ret = wl1271_ps_elp_wakeup(wl);
2102 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2104 wl1271_ps_elp_sleep(wl);
2106 mutex_unlock(&wl->mutex);
2109 static void wlcore_connection_loss_work(struct work_struct *work)
2111 struct delayed_work *dwork;
2113 struct ieee80211_vif *vif;
2114 struct wl12xx_vif *wlvif;
2116 dwork = container_of(work, struct delayed_work, work);
2117 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2120 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2122 mutex_lock(&wl->mutex);
2124 if (unlikely(wl->state != WLCORE_STATE_ON))
2127 /* Call mac80211 connection loss */
2128 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2131 vif = wl12xx_wlvif_to_vif(wlvif);
2132 ieee80211_connection_loss(vif);
2134 mutex_unlock(&wl->mutex);
2137 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2139 struct delayed_work *dwork;
2141 struct wl12xx_vif *wlvif;
2142 unsigned long time_spare;
2145 dwork = container_of(work, struct delayed_work, work);
2146 wlvif = container_of(dwork, struct wl12xx_vif,
2147 pending_auth_complete_work);
2150 mutex_lock(&wl->mutex);
2152 if (unlikely(wl->state != WLCORE_STATE_ON))
2156 * Make sure a second really passed since the last auth reply. Maybe
2157 * a second auth reply arrived while we were stuck on the mutex.
2158 * Check for a little less than the timeout to protect from scheduler
2161 time_spare = jiffies +
2162 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2163 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2166 ret = wl1271_ps_elp_wakeup(wl);
2170 /* cancel the ROC if active */
2171 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2173 wl1271_ps_elp_sleep(wl);
2175 mutex_unlock(&wl->mutex);
2178 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2180 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2181 WL12XX_MAX_RATE_POLICIES);
2182 if (policy >= WL12XX_MAX_RATE_POLICIES)
2185 __set_bit(policy, wl->rate_policies_map);
2190 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2192 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2195 __clear_bit(*idx, wl->rate_policies_map);
2196 *idx = WL12XX_MAX_RATE_POLICIES;
2199 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2201 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2202 WLCORE_MAX_KLV_TEMPLATES);
2203 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2206 __set_bit(policy, wl->klv_templates_map);
2211 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2213 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2216 __clear_bit(*idx, wl->klv_templates_map);
2217 *idx = WLCORE_MAX_KLV_TEMPLATES;
2220 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2222 switch (wlvif->bss_type) {
2223 case BSS_TYPE_AP_BSS:
2225 return WL1271_ROLE_P2P_GO;
2227 return WL1271_ROLE_AP;
2229 case BSS_TYPE_STA_BSS:
2231 return WL1271_ROLE_P2P_CL;
2233 return WL1271_ROLE_STA;
2236 return WL1271_ROLE_IBSS;
2239 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2241 return WL12XX_INVALID_ROLE_TYPE;
2244 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2246 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2249 /* clear everything but the persistent data */
2250 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2252 switch (ieee80211_vif_type_p2p(vif)) {
2253 case NL80211_IFTYPE_P2P_CLIENT:
2256 case NL80211_IFTYPE_STATION:
2257 wlvif->bss_type = BSS_TYPE_STA_BSS;
2259 case NL80211_IFTYPE_ADHOC:
2260 wlvif->bss_type = BSS_TYPE_IBSS;
2262 case NL80211_IFTYPE_P2P_GO:
2265 case NL80211_IFTYPE_AP:
2266 wlvif->bss_type = BSS_TYPE_AP_BSS;
2269 wlvif->bss_type = MAX_BSS_TYPE;
2273 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2274 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2275 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2277 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2278 wlvif->bss_type == BSS_TYPE_IBSS) {
2279 /* init sta/ibss data */
2280 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2281 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2282 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2283 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2284 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2285 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2286 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2287 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2290 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2291 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2292 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2293 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2294 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2295 wl12xx_allocate_rate_policy(wl,
2296 &wlvif->ap.ucast_rate_idx[i]);
2297 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2299 * TODO: check if basic_rate shouldn't be
2300 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2301 * instead (the same thing for STA above).
2303 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2304 /* TODO: this seems to be used only for STA, check it */
2305 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2308 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2309 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2310 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2313 * mac80211 configures some values globally, while we treat them
2314 * per-interface. thus, on init, we have to copy them from wl
2316 wlvif->band = wl->band;
2317 wlvif->channel = wl->channel;
2318 wlvif->power_level = wl->power_level;
2319 wlvif->channel_type = wl->channel_type;
2321 INIT_WORK(&wlvif->rx_streaming_enable_work,
2322 wl1271_rx_streaming_enable_work);
2323 INIT_WORK(&wlvif->rx_streaming_disable_work,
2324 wl1271_rx_streaming_disable_work);
2325 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2326 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2327 wlcore_channel_switch_work);
2328 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2329 wlcore_connection_loss_work);
2330 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2331 wlcore_pending_auth_complete_work);
2332 INIT_LIST_HEAD(&wlvif->list);
2334 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2335 (unsigned long) wlvif);
2339 static int wl12xx_init_fw(struct wl1271 *wl)
2341 int retries = WL1271_BOOT_RETRIES;
2342 bool booted = false;
2343 struct wiphy *wiphy = wl->hw->wiphy;
2348 ret = wl12xx_chip_wakeup(wl, false);
2352 ret = wl->ops->boot(wl);
2356 ret = wl1271_hw_init(wl);
2364 mutex_unlock(&wl->mutex);
2365 /* Unlocking the mutex in the middle of handling is
2366 inherently unsafe. In this case we deem it safe to do,
2367 because we need to let any possibly pending IRQ out of
2368 the system (and while we are WLCORE_STATE_OFF the IRQ
2369 work function will not do anything.) Also, any other
2370 possible concurrent operations will fail due to the
2371 current state, hence the wl1271 struct should be safe. */
2372 wlcore_disable_interrupts(wl);
2373 wl1271_flush_deferred_work(wl);
2374 cancel_work_sync(&wl->netstack_work);
2375 mutex_lock(&wl->mutex);
2377 wl1271_power_off(wl);
2381 wl1271_error("firmware boot failed despite %d retries",
2382 WL1271_BOOT_RETRIES);
2386 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2388 /* update hw/fw version info in wiphy struct */
2389 wiphy->hw_version = wl->chip.id;
2390 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2391 sizeof(wiphy->fw_version));
2394 * Now we know if 11a is supported (info from the NVS), so disable
2395 * 11a channels if not supported
2397 if (!wl->enable_11a)
2398 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2400 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2401 wl->enable_11a ? "" : "not ");
2403 wl->state = WLCORE_STATE_ON;
2408 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2410 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2414 * Check whether a fw switch (i.e. moving from one loaded
2415 * fw to another) is needed. This function is also responsible
2416 * for updating wl->last_vif_count, so it must be called before
2417 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2420 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2421 struct vif_counter_data vif_counter_data,
2424 enum wl12xx_fw_type current_fw = wl->fw_type;
2425 u8 vif_count = vif_counter_data.counter;
2427 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2430 /* increase the vif count if this is a new vif */
2431 if (add && !vif_counter_data.cur_vif_running)
2434 wl->last_vif_count = vif_count;
2436 /* no need for fw change if the device is OFF */
2437 if (wl->state == WLCORE_STATE_OFF)
2440 /* no need for fw change if a single fw is used */
2441 if (!wl->mr_fw_name)
2444 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2446 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2453 * Enter "forced psm". Make sure the sta is in psm against the ap,
2454 * to make the fw switch a bit more disconnection-persistent.
2456 static void wl12xx_force_active_psm(struct wl1271 *wl)
2458 struct wl12xx_vif *wlvif;
2460 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2461 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2465 struct wlcore_hw_queue_iter_data {
2466 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2468 struct ieee80211_vif *vif;
2469 /* is the current vif among those iterated */
2473 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2474 struct ieee80211_vif *vif)
2476 struct wlcore_hw_queue_iter_data *iter_data = data;
2478 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2481 if (iter_data->cur_running || vif == iter_data->vif) {
2482 iter_data->cur_running = true;
2486 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2489 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2490 struct wl12xx_vif *wlvif)
2492 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2493 struct wlcore_hw_queue_iter_data iter_data = {};
2496 iter_data.vif = vif;
2498 /* mark all bits taken by active interfaces */
2499 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2500 IEEE80211_IFACE_ITER_RESUME_ALL,
2501 wlcore_hw_queue_iter, &iter_data);
2503 /* the current vif is already running in mac80211 (resume/recovery) */
2504 if (iter_data.cur_running) {
2505 wlvif->hw_queue_base = vif->hw_queue[0];
2506 wl1271_debug(DEBUG_MAC80211,
2507 "using pre-allocated hw queue base %d",
2508 wlvif->hw_queue_base);
2510 /* interface type might have changed type */
2511 goto adjust_cab_queue;
2514 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2515 WLCORE_NUM_MAC_ADDRESSES);
2516 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2519 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2520 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2521 wlvif->hw_queue_base);
2523 for (i = 0; i < NUM_TX_QUEUES; i++) {
2524 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2525 /* register hw queues in mac80211 */
2526 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2530 /* the last places are reserved for cab queues per interface */
2531 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2532 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2533 wlvif->hw_queue_base / NUM_TX_QUEUES;
2535 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2540 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2541 struct ieee80211_vif *vif)
2543 struct wl1271 *wl = hw->priv;
2544 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2545 struct vif_counter_data vif_count;
2550 wl1271_error("Adding Interface not allowed while in PLT mode");
2554 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2555 IEEE80211_VIF_SUPPORTS_UAPSD |
2556 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2558 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2559 ieee80211_vif_type_p2p(vif), vif->addr);
2561 wl12xx_get_vif_count(hw, vif, &vif_count);
2563 mutex_lock(&wl->mutex);
2564 ret = wl1271_ps_elp_wakeup(wl);
2569 * in some very corner case HW recovery scenarios its possible to
2570 * get here before __wl1271_op_remove_interface is complete, so
2571 * opt out if that is the case.
2573 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2574 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2580 ret = wl12xx_init_vif_data(wl, vif);
2585 role_type = wl12xx_get_role_type(wl, wlvif);
2586 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2591 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2595 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2596 wl12xx_force_active_psm(wl);
2597 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2598 mutex_unlock(&wl->mutex);
2599 wl1271_recovery_work(&wl->recovery_work);
2604 * TODO: after the nvs issue will be solved, move this block
2605 * to start(), and make sure here the driver is ON.
2607 if (wl->state == WLCORE_STATE_OFF) {
2609 * we still need this in order to configure the fw
2610 * while uploading the nvs
2612 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2614 ret = wl12xx_init_fw(wl);
2619 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2620 role_type, &wlvif->role_id);
2624 ret = wl1271_init_vif_specific(wl, vif);
2628 list_add(&wlvif->list, &wl->wlvif_list);
2629 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2631 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2636 wl1271_ps_elp_sleep(wl);
2638 mutex_unlock(&wl->mutex);
2643 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2644 struct ieee80211_vif *vif,
2645 bool reset_tx_queues)
2647 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2649 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2651 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2653 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2656 /* because of hardware recovery, we may get here twice */
2657 if (wl->state == WLCORE_STATE_OFF)
2660 wl1271_info("down");
2662 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2663 wl->scan_wlvif == wlvif) {
2665 * Rearm the tx watchdog just before idling scan. This
2666 * prevents just-finished scans from triggering the watchdog
2668 wl12xx_rearm_tx_watchdog_locked(wl);
2670 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2671 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2672 wl->scan_wlvif = NULL;
2673 wl->scan.req = NULL;
2674 ieee80211_scan_completed(wl->hw, true);
2677 if (wl->sched_vif == wlvif)
2678 wl->sched_vif = NULL;
2680 if (wl->roc_vif == vif) {
2682 ieee80211_remain_on_channel_expired(wl->hw);
2685 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2686 /* disable active roles */
2687 ret = wl1271_ps_elp_wakeup(wl);
2691 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2692 wlvif->bss_type == BSS_TYPE_IBSS) {
2693 if (wl12xx_dev_role_started(wlvif))
2694 wl12xx_stop_dev(wl, wlvif);
2697 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2701 wl1271_ps_elp_sleep(wl);
2704 wl12xx_tx_reset_wlvif(wl, wlvif);
2706 /* clear all hlids (except system_hlid) */
2707 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2709 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2710 wlvif->bss_type == BSS_TYPE_IBSS) {
2711 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2712 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2713 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2714 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2715 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2717 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2718 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2719 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2720 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2721 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2722 wl12xx_free_rate_policy(wl,
2723 &wlvif->ap.ucast_rate_idx[i]);
2724 wl1271_free_ap_keys(wl, wlvif);
2727 dev_kfree_skb(wlvif->probereq);
2728 wlvif->probereq = NULL;
2729 if (wl->last_wlvif == wlvif)
2730 wl->last_wlvif = NULL;
2731 list_del(&wlvif->list);
2732 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2733 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2734 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2742 * Last AP, have more stations. Configure sleep auth according to STA.
2743 * Don't do thin on unintended recovery.
2745 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2746 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2749 if (wl->ap_count == 0 && is_ap) {
2750 /* mask ap events */
2751 wl->event_mask &= ~wl->ap_event_mask;
2752 wl1271_event_unmask(wl);
2755 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2756 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2757 /* Configure for power according to debugfs */
2758 if (sta_auth != WL1271_PSM_ILLEGAL)
2759 wl1271_acx_sleep_auth(wl, sta_auth);
2760 /* Configure for ELP power saving */
2762 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2766 mutex_unlock(&wl->mutex);
2768 del_timer_sync(&wlvif->rx_streaming_timer);
2769 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2770 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2771 cancel_work_sync(&wlvif->rc_update_work);
2772 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2773 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2774 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2776 mutex_lock(&wl->mutex);
2779 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2780 struct ieee80211_vif *vif)
2782 struct wl1271 *wl = hw->priv;
2783 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2784 struct wl12xx_vif *iter;
2785 struct vif_counter_data vif_count;
2787 wl12xx_get_vif_count(hw, vif, &vif_count);
2788 mutex_lock(&wl->mutex);
2790 if (wl->state == WLCORE_STATE_OFF ||
2791 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2795 * wl->vif can be null here if someone shuts down the interface
2796 * just when hardware recovery has been started.
2798 wl12xx_for_each_wlvif(wl, iter) {
2802 __wl1271_op_remove_interface(wl, vif, true);
2805 WARN_ON(iter != wlvif);
2806 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2807 wl12xx_force_active_psm(wl);
2808 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2809 wl12xx_queue_recovery_work(wl);
2812 mutex_unlock(&wl->mutex);
2815 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2816 struct ieee80211_vif *vif,
2817 enum nl80211_iftype new_type, bool p2p)
2819 struct wl1271 *wl = hw->priv;
2822 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2823 wl1271_op_remove_interface(hw, vif);
2825 vif->type = new_type;
2827 ret = wl1271_op_add_interface(hw, vif);
2829 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2833 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2836 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2839 * One of the side effects of the JOIN command is that is clears
2840 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2841 * to a WPA/WPA2 access point will therefore kill the data-path.
2842 * Currently the only valid scenario for JOIN during association
2843 * is on roaming, in which case we will also be given new keys.
2844 * Keep the below message for now, unless it starts bothering
2845 * users who really like to roam a lot :)
2847 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2848 wl1271_info("JOIN while associated.");
2850 /* clear encryption type */
2851 wlvif->encryption_type = KEY_NONE;
2854 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2856 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2858 * TODO: this is an ugly workaround for wl12xx fw
2859 * bug - we are not able to tx/rx after the first
2860 * start_sta, so make dummy start+stop calls,
2861 * and then call start_sta again.
2862 * this should be fixed in the fw.
2864 wl12xx_cmd_role_start_sta(wl, wlvif);
2865 wl12xx_cmd_role_stop_sta(wl, wlvif);
2868 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2874 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2878 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2882 wl1271_error("No SSID in IEs!");
2887 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2888 wl1271_error("SSID is too long!");
2892 wlvif->ssid_len = ssid_len;
2893 memcpy(wlvif->ssid, ptr+2, ssid_len);
2897 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2899 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2900 struct sk_buff *skb;
2903 /* we currently only support setting the ssid from the ap probe req */
2904 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2907 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2911 ieoffset = offsetof(struct ieee80211_mgmt,
2912 u.probe_req.variable);
2913 wl1271_ssid_set(wlvif, skb, ieoffset);
2919 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2920 struct ieee80211_bss_conf *bss_conf,
2926 wlvif->aid = bss_conf->aid;
2927 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2928 wlvif->beacon_int = bss_conf->beacon_int;
2929 wlvif->wmm_enabled = bss_conf->qos;
2931 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2934 * with wl1271, we don't need to update the
2935 * beacon_int and dtim_period, because the firmware
2936 * updates it by itself when the first beacon is
2937 * received after a join.
2939 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2944 * Get a template for hardware connection maintenance
2946 dev_kfree_skb(wlvif->probereq);
2947 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2950 ieoffset = offsetof(struct ieee80211_mgmt,
2951 u.probe_req.variable);
2952 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2954 /* enable the connection monitoring feature */
2955 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2960 * The join command disable the keep-alive mode, shut down its process,
2961 * and also clear the template config, so we need to reset it all after
2962 * the join. The acx_aid starts the keep-alive process, and the order
2963 * of the commands below is relevant.
2965 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2969 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2973 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2977 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2978 wlvif->sta.klv_template_id,
2979 ACX_KEEP_ALIVE_TPL_VALID);
2984 * The default fw psm configuration is AUTO, while mac80211 default
2985 * setting is off (ACTIVE), so sync the fw with the correct value.
2987 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2993 wl1271_tx_enabled_rates_get(wl,
2996 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3004 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3007 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3009 /* make sure we are connected (sta) joined */
3011 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3014 /* make sure we are joined (ibss) */
3016 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3020 /* use defaults when not associated */
3023 /* free probe-request template */
3024 dev_kfree_skb(wlvif->probereq);
3025 wlvif->probereq = NULL;
3027 /* disable connection monitor features */
3028 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3032 /* Disable the keep-alive feature */
3033 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3037 /* disable beacon filtering */
3038 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3043 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3044 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3046 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3047 ieee80211_chswitch_done(vif, false);
3048 cancel_delayed_work(&wlvif->channel_switch_work);
3051 /* invalidate keep-alive template */
3052 wl1271_acx_keep_alive_config(wl, wlvif,
3053 wlvif->sta.klv_template_id,
3054 ACX_KEEP_ALIVE_TPL_INVALID);
3059 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3061 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3062 wlvif->rate_set = wlvif->basic_rate_set;
3065 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3068 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3070 if (idle == cur_idle)
3074 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3076 /* The current firmware only supports sched_scan in idle */
3077 if (wl->sched_vif == wlvif)
3078 wl->ops->sched_scan_stop(wl, wlvif);
3080 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3084 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3085 struct ieee80211_conf *conf, u32 changed)
3089 if (conf->power_level != wlvif->power_level) {
3090 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3094 wlvif->power_level = conf->power_level;
3100 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3102 struct wl1271 *wl = hw->priv;
3103 struct wl12xx_vif *wlvif;
3104 struct ieee80211_conf *conf = &hw->conf;
3107 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3109 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3111 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3114 mutex_lock(&wl->mutex);
3116 if (changed & IEEE80211_CONF_CHANGE_POWER)
3117 wl->power_level = conf->power_level;
3119 if (unlikely(wl->state != WLCORE_STATE_ON))
3122 ret = wl1271_ps_elp_wakeup(wl);
3126 /* configure each interface */
3127 wl12xx_for_each_wlvif(wl, wlvif) {
3128 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3134 wl1271_ps_elp_sleep(wl);
3137 mutex_unlock(&wl->mutex);
3142 struct wl1271_filter_params {
3145 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3148 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3149 struct netdev_hw_addr_list *mc_list)
3151 struct wl1271_filter_params *fp;
3152 struct netdev_hw_addr *ha;
3154 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3156 wl1271_error("Out of memory setting filters.");
3160 /* update multicast filtering parameters */
3161 fp->mc_list_length = 0;
3162 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3163 fp->enabled = false;
3166 netdev_hw_addr_list_for_each(ha, mc_list) {
3167 memcpy(fp->mc_list[fp->mc_list_length],
3168 ha->addr, ETH_ALEN);
3169 fp->mc_list_length++;
3173 return (u64)(unsigned long)fp;
3176 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3179 FIF_BCN_PRBRESP_PROMISC | \
3183 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3184 unsigned int changed,
3185 unsigned int *total, u64 multicast)
3187 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3188 struct wl1271 *wl = hw->priv;
3189 struct wl12xx_vif *wlvif;
3193 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3194 " total %x", changed, *total);
3196 mutex_lock(&wl->mutex);
3198 *total &= WL1271_SUPPORTED_FILTERS;
3199 changed &= WL1271_SUPPORTED_FILTERS;
3201 if (unlikely(wl->state != WLCORE_STATE_ON))
3204 ret = wl1271_ps_elp_wakeup(wl);
3208 wl12xx_for_each_wlvif(wl, wlvif) {
3209 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3210 if (*total & FIF_ALLMULTI)
3211 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3215 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3218 fp->mc_list_length);
3225 * the fw doesn't provide an api to configure the filters. instead,
3226 * the filters configuration is based on the active roles / ROC
3231 wl1271_ps_elp_sleep(wl);
3234 mutex_unlock(&wl->mutex);
3238 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3239 u8 id, u8 key_type, u8 key_size,
3240 const u8 *key, u8 hlid, u32 tx_seq_32,
3243 struct wl1271_ap_key *ap_key;
3246 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3248 if (key_size > MAX_KEY_SIZE)
3252 * Find next free entry in ap_keys. Also check we are not replacing
3255 for (i = 0; i < MAX_NUM_KEYS; i++) {
3256 if (wlvif->ap.recorded_keys[i] == NULL)
3259 if (wlvif->ap.recorded_keys[i]->id == id) {
3260 wl1271_warning("trying to record key replacement");
3265 if (i == MAX_NUM_KEYS)
3268 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3273 ap_key->key_type = key_type;
3274 ap_key->key_size = key_size;
3275 memcpy(ap_key->key, key, key_size);
3276 ap_key->hlid = hlid;
3277 ap_key->tx_seq_32 = tx_seq_32;
3278 ap_key->tx_seq_16 = tx_seq_16;
3280 wlvif->ap.recorded_keys[i] = ap_key;
3284 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3288 for (i = 0; i < MAX_NUM_KEYS; i++) {
3289 kfree(wlvif->ap.recorded_keys[i]);
3290 wlvif->ap.recorded_keys[i] = NULL;
3294 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3297 struct wl1271_ap_key *key;
3298 bool wep_key_added = false;
3300 for (i = 0; i < MAX_NUM_KEYS; i++) {
3302 if (wlvif->ap.recorded_keys[i] == NULL)
3305 key = wlvif->ap.recorded_keys[i];
3307 if (hlid == WL12XX_INVALID_LINK_ID)
3308 hlid = wlvif->ap.bcast_hlid;
3310 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3311 key->id, key->key_type,
3312 key->key_size, key->key,
3313 hlid, key->tx_seq_32,
3318 if (key->key_type == KEY_WEP)
3319 wep_key_added = true;
3322 if (wep_key_added) {
3323 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3324 wlvif->ap.bcast_hlid);
3330 wl1271_free_ap_keys(wl, wlvif);
3334 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3335 u16 action, u8 id, u8 key_type,
3336 u8 key_size, const u8 *key, u32 tx_seq_32,
3337 u16 tx_seq_16, struct ieee80211_sta *sta)
3340 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3343 struct wl1271_station *wl_sta;
3347 wl_sta = (struct wl1271_station *)sta->drv_priv;
3348 hlid = wl_sta->hlid;
3350 hlid = wlvif->ap.bcast_hlid;
3353 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3355 * We do not support removing keys after AP shutdown.
3356 * Pretend we do to make mac80211 happy.
3358 if (action != KEY_ADD_OR_REPLACE)
3361 ret = wl1271_record_ap_key(wl, wlvif, id,
3363 key, hlid, tx_seq_32,
3366 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3367 id, key_type, key_size,
3368 key, hlid, tx_seq_32,
3376 static const u8 bcast_addr[ETH_ALEN] = {
3377 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3380 addr = sta ? sta->addr : bcast_addr;
3382 if (is_zero_ether_addr(addr)) {
3383 /* We dont support TX only encryption */
3387 /* The wl1271 does not allow to remove unicast keys - they
3388 will be cleared automatically on next CMD_JOIN. Ignore the
3389 request silently, as we dont want the mac80211 to emit
3390 an error message. */
3391 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3394 /* don't remove key if hlid was already deleted */
3395 if (action == KEY_REMOVE &&
3396 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3399 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3400 id, key_type, key_size,
3401 key, addr, tx_seq_32,
3411 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3412 struct ieee80211_vif *vif,
3413 struct ieee80211_sta *sta,
3414 struct ieee80211_key_conf *key_conf)
3416 struct wl1271 *wl = hw->priv;
3418 bool might_change_spare =
3419 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3420 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3422 if (might_change_spare) {
3424 * stop the queues and flush to ensure the next packets are
3425 * in sync with FW spare block accounting
3427 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3428 wl1271_tx_flush(wl);
3431 mutex_lock(&wl->mutex);
3433 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3435 goto out_wake_queues;
3438 ret = wl1271_ps_elp_wakeup(wl);
3440 goto out_wake_queues;
3442 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3444 wl1271_ps_elp_sleep(wl);
3447 if (might_change_spare)
3448 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3450 mutex_unlock(&wl->mutex);
3455 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3456 struct ieee80211_vif *vif,
3457 struct ieee80211_sta *sta,
3458 struct ieee80211_key_conf *key_conf)
3460 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3467 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3469 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3470 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3471 key_conf->cipher, key_conf->keyidx,
3472 key_conf->keylen, key_conf->flags);
3473 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3475 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3477 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3478 hlid = wl_sta->hlid;
3480 hlid = wlvif->ap.bcast_hlid;
3483 hlid = wlvif->sta.hlid;
3485 if (hlid != WL12XX_INVALID_LINK_ID) {
3486 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3487 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3488 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3491 switch (key_conf->cipher) {
3492 case WLAN_CIPHER_SUITE_WEP40:
3493 case WLAN_CIPHER_SUITE_WEP104:
3496 key_conf->hw_key_idx = key_conf->keyidx;
3498 case WLAN_CIPHER_SUITE_TKIP:
3499 key_type = KEY_TKIP;
3500 key_conf->hw_key_idx = key_conf->keyidx;
3502 case WLAN_CIPHER_SUITE_CCMP:
3504 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3506 case WL1271_CIPHER_SUITE_GEM:
3510 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3517 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3518 key_conf->keyidx, key_type,
3519 key_conf->keylen, key_conf->key,
3520 tx_seq_32, tx_seq_16, sta);
3522 wl1271_error("Could not add or replace key");
3527 * reconfiguring arp response if the unicast (or common)
3528 * encryption key type was changed
3530 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3531 (sta || key_type == KEY_WEP) &&
3532 wlvif->encryption_type != key_type) {
3533 wlvif->encryption_type = key_type;
3534 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3536 wl1271_warning("build arp rsp failed: %d", ret);
3543 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3544 key_conf->keyidx, key_type,
3545 key_conf->keylen, key_conf->key,
3548 wl1271_error("Could not remove key");
3554 wl1271_error("Unsupported key cmd 0x%x", cmd);
3560 EXPORT_SYMBOL_GPL(wlcore_set_key);
3562 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3563 struct ieee80211_vif *vif,
3566 struct wl1271 *wl = hw->priv;
3567 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3570 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3573 /* we don't handle unsetting of default key */
3577 mutex_lock(&wl->mutex);
3579 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3584 ret = wl1271_ps_elp_wakeup(wl);
3588 wlvif->default_key = key_idx;
3590 /* the default WEP key needs to be configured at least once */
3591 if (wlvif->encryption_type == KEY_WEP) {
3592 ret = wl12xx_cmd_set_default_wep_key(wl,
3600 wl1271_ps_elp_sleep(wl);
3603 mutex_unlock(&wl->mutex);
3606 void wlcore_regdomain_config(struct wl1271 *wl)
3610 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3613 mutex_lock(&wl->mutex);
3615 if (unlikely(wl->state != WLCORE_STATE_ON))
3618 ret = wl1271_ps_elp_wakeup(wl);
3622 ret = wlcore_cmd_regdomain_config_locked(wl);
3624 wl12xx_queue_recovery_work(wl);
3628 wl1271_ps_elp_sleep(wl);
3630 mutex_unlock(&wl->mutex);
3633 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3634 struct ieee80211_vif *vif,
3635 struct ieee80211_scan_request *hw_req)
3637 struct cfg80211_scan_request *req = &hw_req->req;
3638 struct wl1271 *wl = hw->priv;
3643 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3646 ssid = req->ssids[0].ssid;
3647 len = req->ssids[0].ssid_len;
3650 mutex_lock(&wl->mutex);
3652 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3654 * We cannot return -EBUSY here because cfg80211 will expect
3655 * a call to ieee80211_scan_completed if we do - in this case
3656 * there won't be any call.
3662 ret = wl1271_ps_elp_wakeup(wl);
3666 /* fail if there is any role in ROC */
3667 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3668 /* don't allow scanning right now */
3673 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3675 wl1271_ps_elp_sleep(wl);
3677 mutex_unlock(&wl->mutex);
3682 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3683 struct ieee80211_vif *vif)
3685 struct wl1271 *wl = hw->priv;
3686 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3689 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3691 mutex_lock(&wl->mutex);
3693 if (unlikely(wl->state != WLCORE_STATE_ON))
3696 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3699 ret = wl1271_ps_elp_wakeup(wl);
3703 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3704 ret = wl->ops->scan_stop(wl, wlvif);
3710 * Rearm the tx watchdog just before idling scan. This
3711 * prevents just-finished scans from triggering the watchdog
3713 wl12xx_rearm_tx_watchdog_locked(wl);
3715 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3716 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3717 wl->scan_wlvif = NULL;
3718 wl->scan.req = NULL;
3719 ieee80211_scan_completed(wl->hw, true);
3722 wl1271_ps_elp_sleep(wl);
3724 mutex_unlock(&wl->mutex);
3726 cancel_delayed_work_sync(&wl->scan_complete_work);
3729 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3730 struct ieee80211_vif *vif,
3731 struct cfg80211_sched_scan_request *req,
3732 struct ieee80211_scan_ies *ies)
3734 struct wl1271 *wl = hw->priv;
3735 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3738 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3740 mutex_lock(&wl->mutex);
3742 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3747 ret = wl1271_ps_elp_wakeup(wl);
3751 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3755 wl->sched_vif = wlvif;
3758 wl1271_ps_elp_sleep(wl);
3760 mutex_unlock(&wl->mutex);
3764 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3765 struct ieee80211_vif *vif)
3767 struct wl1271 *wl = hw->priv;
3768 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3771 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3773 mutex_lock(&wl->mutex);
3775 if (unlikely(wl->state != WLCORE_STATE_ON))
3778 ret = wl1271_ps_elp_wakeup(wl);
3782 wl->ops->sched_scan_stop(wl, wlvif);
3784 wl1271_ps_elp_sleep(wl);
3786 mutex_unlock(&wl->mutex);
3791 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3793 struct wl1271 *wl = hw->priv;
3796 mutex_lock(&wl->mutex);
3798 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3803 ret = wl1271_ps_elp_wakeup(wl);
3807 ret = wl1271_acx_frag_threshold(wl, value);
3809 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3811 wl1271_ps_elp_sleep(wl);
3814 mutex_unlock(&wl->mutex);
3819 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3821 struct wl1271 *wl = hw->priv;
3822 struct wl12xx_vif *wlvif;
3825 mutex_lock(&wl->mutex);
3827 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3832 ret = wl1271_ps_elp_wakeup(wl);
3836 wl12xx_for_each_wlvif(wl, wlvif) {
3837 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3839 wl1271_warning("set rts threshold failed: %d", ret);
3841 wl1271_ps_elp_sleep(wl);
3844 mutex_unlock(&wl->mutex);
3849 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3852 const u8 *next, *end = skb->data + skb->len;
3853 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3854 skb->len - ieoffset);
3859 memmove(ie, next, end - next);
3860 skb_trim(skb, skb->len - len);
3863 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3864 unsigned int oui, u8 oui_type,
3868 const u8 *next, *end = skb->data + skb->len;
3869 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3870 skb->data + ieoffset,
3871 skb->len - ieoffset);
3876 memmove(ie, next, end - next);
3877 skb_trim(skb, skb->len - len);
3880 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3881 struct ieee80211_vif *vif)
3883 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3884 struct sk_buff *skb;
3887 skb = ieee80211_proberesp_get(wl->hw, vif);
3891 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3892 CMD_TEMPL_AP_PROBE_RESPONSE,
3901 wl1271_debug(DEBUG_AP, "probe response updated");
3902 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3908 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3909 struct ieee80211_vif *vif,
3911 size_t probe_rsp_len,
3914 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3915 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3916 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3917 int ssid_ie_offset, ie_offset, templ_len;
3920 /* no need to change probe response if the SSID is set correctly */
3921 if (wlvif->ssid_len > 0)
3922 return wl1271_cmd_template_set(wl, wlvif->role_id,
3923 CMD_TEMPL_AP_PROBE_RESPONSE,
3928 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3929 wl1271_error("probe_rsp template too big");
3933 /* start searching from IE offset */
3934 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3936 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3937 probe_rsp_len - ie_offset);
3939 wl1271_error("No SSID in beacon!");
3943 ssid_ie_offset = ptr - probe_rsp_data;
3944 ptr += (ptr[1] + 2);
3946 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3948 /* insert SSID from bss_conf */
3949 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3950 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3951 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3952 bss_conf->ssid, bss_conf->ssid_len);
3953 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3955 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3956 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3957 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3959 return wl1271_cmd_template_set(wl, wlvif->role_id,
3960 CMD_TEMPL_AP_PROBE_RESPONSE,
3966 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3967 struct ieee80211_vif *vif,
3968 struct ieee80211_bss_conf *bss_conf,
3971 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3974 if (changed & BSS_CHANGED_ERP_SLOT) {
3975 if (bss_conf->use_short_slot)
3976 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3978 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3980 wl1271_warning("Set slot time failed %d", ret);
3985 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3986 if (bss_conf->use_short_preamble)
3987 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3989 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3992 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3993 if (bss_conf->use_cts_prot)
3994 ret = wl1271_acx_cts_protect(wl, wlvif,
3997 ret = wl1271_acx_cts_protect(wl, wlvif,
3998 CTSPROTECT_DISABLE);
4000 wl1271_warning("Set ctsprotect failed %d", ret);
4009 static int wlcore_set_beacon_template(struct wl1271 *wl,
4010 struct ieee80211_vif *vif,
4013 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4014 struct ieee80211_hdr *hdr;
4017 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4018 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4026 wl1271_debug(DEBUG_MASTER, "beacon updated");
4028 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4030 dev_kfree_skb(beacon);
4033 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4034 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4036 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4041 dev_kfree_skb(beacon);
4045 wlvif->wmm_enabled =
4046 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4047 WLAN_OUI_TYPE_MICROSOFT_WMM,
4048 beacon->data + ieoffset,
4049 beacon->len - ieoffset);
4052 * In case we already have a probe-resp beacon set explicitly
4053 * by usermode, don't use the beacon data.
4055 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4058 /* remove TIM ie from probe response */
4059 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4062 * remove p2p ie from probe response.
4063 * the fw reponds to probe requests that don't include
4064 * the p2p ie. probe requests with p2p ie will be passed,
4065 * and will be responded by the supplicant (the spec
4066 * forbids including the p2p ie when responding to probe
4067 * requests that didn't include it).
4069 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4070 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4072 hdr = (struct ieee80211_hdr *) beacon->data;
4073 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4074 IEEE80211_STYPE_PROBE_RESP);
4076 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4081 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4082 CMD_TEMPL_PROBE_RESPONSE,
4087 dev_kfree_skb(beacon);
4095 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4096 struct ieee80211_vif *vif,
4097 struct ieee80211_bss_conf *bss_conf,
4100 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4101 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4104 if (changed & BSS_CHANGED_BEACON_INT) {
4105 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4106 bss_conf->beacon_int);
4108 wlvif->beacon_int = bss_conf->beacon_int;
4111 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4112 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4114 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4117 if (changed & BSS_CHANGED_BEACON) {
4118 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4122 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4124 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4131 wl1271_error("beacon info change failed: %d", ret);
4135 /* AP mode changes */
4136 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4137 struct ieee80211_vif *vif,
4138 struct ieee80211_bss_conf *bss_conf,
4141 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4144 if (changed & BSS_CHANGED_BASIC_RATES) {
4145 u32 rates = bss_conf->basic_rates;
4147 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4149 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4150 wlvif->basic_rate_set);
4152 ret = wl1271_init_ap_rates(wl, wlvif);
4154 wl1271_error("AP rate policy change failed %d", ret);
4158 ret = wl1271_ap_init_templates(wl, vif);
4162 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4166 ret = wlcore_set_beacon_template(wl, vif, true);
4171 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4175 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4176 if (bss_conf->enable_beacon) {
4177 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4178 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4182 ret = wl1271_ap_init_hwenc(wl, wlvif);
4186 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4187 wl1271_debug(DEBUG_AP, "started AP");
4190 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4192 * AP might be in ROC in case we have just
4193 * sent auth reply. handle it.
4195 if (test_bit(wlvif->role_id, wl->roc_map))
4196 wl12xx_croc(wl, wlvif->role_id);
4198 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4202 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4203 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4205 wl1271_debug(DEBUG_AP, "stopped AP");
4210 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4214 /* Handle HT information change */
4215 if ((changed & BSS_CHANGED_HT) &&
4216 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4217 ret = wl1271_acx_set_ht_information(wl, wlvif,
4218 bss_conf->ht_operation_mode);
4220 wl1271_warning("Set ht information failed %d", ret);
4229 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4230 struct ieee80211_bss_conf *bss_conf,
4236 wl1271_debug(DEBUG_MAC80211,
4237 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4238 bss_conf->bssid, bss_conf->aid,
4239 bss_conf->beacon_int,
4240 bss_conf->basic_rates, sta_rate_set);
4242 wlvif->beacon_int = bss_conf->beacon_int;
4243 rates = bss_conf->basic_rates;
4244 wlvif->basic_rate_set =
4245 wl1271_tx_enabled_rates_get(wl, rates,
4248 wl1271_tx_min_rate_get(wl,
4249 wlvif->basic_rate_set);
4253 wl1271_tx_enabled_rates_get(wl,
4257 /* we only support sched_scan while not connected */
4258 if (wl->sched_vif == wlvif)
4259 wl->ops->sched_scan_stop(wl, wlvif);
4261 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4265 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4269 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4273 wlcore_set_ssid(wl, wlvif);
4275 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4280 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4284 /* revert back to minimum rates for the current band */
4285 wl1271_set_band_rate(wl, wlvif);
4286 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4288 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4292 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4293 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4294 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4299 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4302 /* STA/IBSS mode changes */
4303 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4304 struct ieee80211_vif *vif,
4305 struct ieee80211_bss_conf *bss_conf,
4308 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4309 bool do_join = false;
4310 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4311 bool ibss_joined = false;
4312 u32 sta_rate_set = 0;
4314 struct ieee80211_sta *sta;
4315 bool sta_exists = false;
4316 struct ieee80211_sta_ht_cap sta_ht_cap;
4319 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4325 if (changed & BSS_CHANGED_IBSS) {
4326 if (bss_conf->ibss_joined) {
4327 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4330 wlcore_unset_assoc(wl, wlvif);
4331 wl12xx_cmd_role_stop_sta(wl, wlvif);
4335 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4338 /* Need to update the SSID (for filtering etc) */
4339 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4342 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4343 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4344 bss_conf->enable_beacon ? "enabled" : "disabled");
4349 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4350 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4352 if (changed & BSS_CHANGED_CQM) {
4353 bool enable = false;
4354 if (bss_conf->cqm_rssi_thold)
4356 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4357 bss_conf->cqm_rssi_thold,
4358 bss_conf->cqm_rssi_hyst);
4361 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4364 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4365 BSS_CHANGED_ASSOC)) {
4367 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4369 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4371 /* save the supp_rates of the ap */
4372 sta_rate_set = sta->supp_rates[wlvif->band];
4373 if (sta->ht_cap.ht_supported)
4375 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4376 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4377 sta_ht_cap = sta->ht_cap;
4384 if (changed & BSS_CHANGED_BSSID) {
4385 if (!is_zero_ether_addr(bss_conf->bssid)) {
4386 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4391 /* Need to update the BSSID (for filtering etc) */
4394 ret = wlcore_clear_bssid(wl, wlvif);
4400 if (changed & BSS_CHANGED_IBSS) {
4401 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4402 bss_conf->ibss_joined);
4404 if (bss_conf->ibss_joined) {
4405 u32 rates = bss_conf->basic_rates;
4406 wlvif->basic_rate_set =
4407 wl1271_tx_enabled_rates_get(wl, rates,
4410 wl1271_tx_min_rate_get(wl,
4411 wlvif->basic_rate_set);
4413 /* by default, use 11b + OFDM rates */
4414 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4415 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4421 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4422 /* enable beacon filtering */
4423 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4428 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4433 ret = wlcore_join(wl, wlvif);
4435 wl1271_warning("cmd join failed %d", ret);
4440 if (changed & BSS_CHANGED_ASSOC) {
4441 if (bss_conf->assoc) {
4442 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4447 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4448 wl12xx_set_authorized(wl, wlvif);
4450 wlcore_unset_assoc(wl, wlvif);
4454 if (changed & BSS_CHANGED_PS) {
4455 if ((bss_conf->ps) &&
4456 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4457 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4461 if (wl->conf.conn.forced_ps) {
4462 ps_mode = STATION_POWER_SAVE_MODE;
4463 ps_mode_str = "forced";
4465 ps_mode = STATION_AUTO_PS_MODE;
4466 ps_mode_str = "auto";
4469 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4471 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4473 wl1271_warning("enter %s ps failed %d",
4475 } else if (!bss_conf->ps &&
4476 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4477 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4479 ret = wl1271_ps_set_mode(wl, wlvif,
4480 STATION_ACTIVE_MODE);
4482 wl1271_warning("exit auto ps failed %d", ret);
4486 /* Handle new association with HT. Do this after join. */
4489 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4491 ret = wlcore_hw_set_peer_cap(wl,
4497 wl1271_warning("Set ht cap failed %d", ret);
4503 ret = wl1271_acx_set_ht_information(wl, wlvif,
4504 bss_conf->ht_operation_mode);
4506 wl1271_warning("Set ht information failed %d",
4513 /* Handle arp filtering. Done after join. */
4514 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4515 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4516 __be32 addr = bss_conf->arp_addr_list[0];
4517 wlvif->sta.qos = bss_conf->qos;
4518 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4520 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4521 wlvif->ip_addr = addr;
4523 * The template should have been configured only upon
4524 * association. however, it seems that the correct ip
4525 * isn't being set (when sending), so we have to
4526 * reconfigure the template upon every ip change.
4528 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4530 wl1271_warning("build arp rsp failed: %d", ret);
4534 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4535 (ACX_ARP_FILTER_ARP_FILTERING |
4536 ACX_ARP_FILTER_AUTO_ARP),
4540 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4551 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4552 struct ieee80211_vif *vif,
4553 struct ieee80211_bss_conf *bss_conf,
4556 struct wl1271 *wl = hw->priv;
4557 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4558 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4561 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4562 wlvif->role_id, (int)changed);
4565 * make sure to cancel pending disconnections if our association
4568 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4569 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4571 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4572 !bss_conf->enable_beacon)
4573 wl1271_tx_flush(wl);
4575 mutex_lock(&wl->mutex);
4577 if (unlikely(wl->state != WLCORE_STATE_ON))
4580 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4583 ret = wl1271_ps_elp_wakeup(wl);
4587 if ((changed & BSS_CHANGED_TXPOWER) &&
4588 bss_conf->txpower != wlvif->power_level) {
4590 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4594 wlvif->power_level = bss_conf->txpower;
4598 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4600 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4602 wl1271_ps_elp_sleep(wl);
4605 mutex_unlock(&wl->mutex);
4608 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4609 struct ieee80211_chanctx_conf *ctx)
4611 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4612 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4613 cfg80211_get_chandef_type(&ctx->def));
4617 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4618 struct ieee80211_chanctx_conf *ctx)
4620 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4621 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4622 cfg80211_get_chandef_type(&ctx->def));
4625 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4626 struct ieee80211_chanctx_conf *ctx,
4629 struct wl1271 *wl = hw->priv;
4630 struct wl12xx_vif *wlvif;
4632 int channel = ieee80211_frequency_to_channel(
4633 ctx->def.chan->center_freq);
4635 wl1271_debug(DEBUG_MAC80211,
4636 "mac80211 change chanctx %d (type %d) changed 0x%x",
4637 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4639 mutex_lock(&wl->mutex);
4641 ret = wl1271_ps_elp_wakeup(wl);
4645 wl12xx_for_each_wlvif(wl, wlvif) {
4646 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4649 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4655 /* start radar if needed */
4656 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4657 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4658 ctx->radar_enabled && !wlvif->radar_enabled &&
4659 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4660 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4661 wlcore_hw_set_cac(wl, wlvif, true);
4662 wlvif->radar_enabled = true;
4666 wl1271_ps_elp_sleep(wl);
4668 mutex_unlock(&wl->mutex);
4671 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4672 struct ieee80211_vif *vif,
4673 struct ieee80211_chanctx_conf *ctx)
4675 struct wl1271 *wl = hw->priv;
4676 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4677 int channel = ieee80211_frequency_to_channel(
4678 ctx->def.chan->center_freq);
4681 wl1271_debug(DEBUG_MAC80211,
4682 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4683 wlvif->role_id, channel,
4684 cfg80211_get_chandef_type(&ctx->def),
4685 ctx->radar_enabled, ctx->def.chan->dfs_state);
4687 mutex_lock(&wl->mutex);
4689 if (unlikely(wl->state != WLCORE_STATE_ON))
4692 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4695 ret = wl1271_ps_elp_wakeup(wl);
4699 wlvif->band = ctx->def.chan->band;
4700 wlvif->channel = channel;
4701 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4703 /* update default rates according to the band */
4704 wl1271_set_band_rate(wl, wlvif);
4706 if (ctx->radar_enabled &&
4707 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4708 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4709 wlcore_hw_set_cac(wl, wlvif, true);
4710 wlvif->radar_enabled = true;
4713 wl1271_ps_elp_sleep(wl);
4715 mutex_unlock(&wl->mutex);
4720 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4721 struct ieee80211_vif *vif,
4722 struct ieee80211_chanctx_conf *ctx)
4724 struct wl1271 *wl = hw->priv;
4725 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4728 wl1271_debug(DEBUG_MAC80211,
4729 "mac80211 unassign chanctx (role %d) %d (type %d)",
4731 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4732 cfg80211_get_chandef_type(&ctx->def));
4734 wl1271_tx_flush(wl);
4736 mutex_lock(&wl->mutex);
4738 if (unlikely(wl->state != WLCORE_STATE_ON))
4741 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4744 ret = wl1271_ps_elp_wakeup(wl);
4748 if (wlvif->radar_enabled) {
4749 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4750 wlcore_hw_set_cac(wl, wlvif, false);
4751 wlvif->radar_enabled = false;
4754 wl1271_ps_elp_sleep(wl);
4756 mutex_unlock(&wl->mutex);
4759 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4760 struct wl12xx_vif *wlvif,
4761 struct ieee80211_chanctx_conf *new_ctx)
4763 int channel = ieee80211_frequency_to_channel(
4764 new_ctx->def.chan->center_freq);
4766 wl1271_debug(DEBUG_MAC80211,
4767 "switch vif (role %d) %d -> %d chan_type: %d",
4768 wlvif->role_id, wlvif->channel, channel,
4769 cfg80211_get_chandef_type(&new_ctx->def));
4771 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4774 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4776 if (wlvif->radar_enabled) {
4777 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4778 wlcore_hw_set_cac(wl, wlvif, false);
4779 wlvif->radar_enabled = false;
4782 wlvif->band = new_ctx->def.chan->band;
4783 wlvif->channel = channel;
4784 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4786 /* start radar if needed */
4787 if (new_ctx->radar_enabled) {
4788 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4789 wlcore_hw_set_cac(wl, wlvif, true);
4790 wlvif->radar_enabled = true;
4797 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4798 struct ieee80211_vif_chanctx_switch *vifs,
4800 enum ieee80211_chanctx_switch_mode mode)
4802 struct wl1271 *wl = hw->priv;
4805 wl1271_debug(DEBUG_MAC80211,
4806 "mac80211 switch chanctx n_vifs %d mode %d",
4809 mutex_lock(&wl->mutex);
4811 ret = wl1271_ps_elp_wakeup(wl);
4815 for (i = 0; i < n_vifs; i++) {
4816 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4818 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4823 wl1271_ps_elp_sleep(wl);
4825 mutex_unlock(&wl->mutex);
4830 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4831 struct ieee80211_vif *vif, u16 queue,
4832 const struct ieee80211_tx_queue_params *params)
4834 struct wl1271 *wl = hw->priv;
4835 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4839 mutex_lock(&wl->mutex);
4841 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4844 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4846 ps_scheme = CONF_PS_SCHEME_LEGACY;
4848 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4851 ret = wl1271_ps_elp_wakeup(wl);
4856 * the txop is confed in units of 32us by the mac80211,
4859 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4860 params->cw_min, params->cw_max,
4861 params->aifs, params->txop << 5);
4865 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4866 CONF_CHANNEL_TYPE_EDCF,
4867 wl1271_tx_get_queue(queue),
4868 ps_scheme, CONF_ACK_POLICY_LEGACY,
4872 wl1271_ps_elp_sleep(wl);
4875 mutex_unlock(&wl->mutex);
4880 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4881 struct ieee80211_vif *vif)
4884 struct wl1271 *wl = hw->priv;
4885 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4886 u64 mactime = ULLONG_MAX;
4889 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4891 mutex_lock(&wl->mutex);
4893 if (unlikely(wl->state != WLCORE_STATE_ON))
4896 ret = wl1271_ps_elp_wakeup(wl);
4900 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4905 wl1271_ps_elp_sleep(wl);
4908 mutex_unlock(&wl->mutex);
4912 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4913 struct survey_info *survey)
4915 struct ieee80211_conf *conf = &hw->conf;
4920 survey->channel = conf->chandef.chan;
4925 static int wl1271_allocate_sta(struct wl1271 *wl,
4926 struct wl12xx_vif *wlvif,
4927 struct ieee80211_sta *sta)
4929 struct wl1271_station *wl_sta;
4933 if (wl->active_sta_count >= wl->max_ap_stations) {
4934 wl1271_warning("could not allocate HLID - too much stations");
4938 wl_sta = (struct wl1271_station *)sta->drv_priv;
4939 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4941 wl1271_warning("could not allocate HLID - too many links");
4945 /* use the previous security seq, if this is a recovery/resume */
4946 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4948 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4949 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4950 wl->active_sta_count++;
4954 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4956 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4959 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4960 __clear_bit(hlid, &wl->ap_ps_map);
4961 __clear_bit(hlid, &wl->ap_fw_ps_map);
4964 * save the last used PN in the private part of iee80211_sta,
4965 * in case of recovery/suspend
4967 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4969 wl12xx_free_link(wl, wlvif, &hlid);
4970 wl->active_sta_count--;
4973 * rearm the tx watchdog when the last STA is freed - give the FW a
4974 * chance to return STA-buffered packets before complaining.
4976 if (wl->active_sta_count == 0)
4977 wl12xx_rearm_tx_watchdog_locked(wl);
4980 static int wl12xx_sta_add(struct wl1271 *wl,
4981 struct wl12xx_vif *wlvif,
4982 struct ieee80211_sta *sta)
4984 struct wl1271_station *wl_sta;
4988 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4990 ret = wl1271_allocate_sta(wl, wlvif, sta);
4994 wl_sta = (struct wl1271_station *)sta->drv_priv;
4995 hlid = wl_sta->hlid;
4997 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4999 wl1271_free_sta(wl, wlvif, hlid);
5004 static int wl12xx_sta_remove(struct wl1271 *wl,
5005 struct wl12xx_vif *wlvif,
5006 struct ieee80211_sta *sta)
5008 struct wl1271_station *wl_sta;
5011 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5013 wl_sta = (struct wl1271_station *)sta->drv_priv;
5015 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5018 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5022 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5026 static void wlcore_roc_if_possible(struct wl1271 *wl,
5027 struct wl12xx_vif *wlvif)
5029 if (find_first_bit(wl->roc_map,
5030 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5033 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5036 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5040 * when wl_sta is NULL, we treat this call as if coming from a
5041 * pending auth reply.
5042 * wl->mutex must be taken and the FW must be awake when the call
5045 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5046 struct wl1271_station *wl_sta, bool in_conn)
5049 if (WARN_ON(wl_sta && wl_sta->in_connection))
5052 if (!wlvif->ap_pending_auth_reply &&
5053 !wlvif->inconn_count)
5054 wlcore_roc_if_possible(wl, wlvif);
5057 wl_sta->in_connection = true;
5058 wlvif->inconn_count++;
5060 wlvif->ap_pending_auth_reply = true;
5063 if (wl_sta && !wl_sta->in_connection)
5066 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5069 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5073 wl_sta->in_connection = false;
5074 wlvif->inconn_count--;
5076 wlvif->ap_pending_auth_reply = false;
5079 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5080 test_bit(wlvif->role_id, wl->roc_map))
5081 wl12xx_croc(wl, wlvif->role_id);
5085 static int wl12xx_update_sta_state(struct wl1271 *wl,
5086 struct wl12xx_vif *wlvif,
5087 struct ieee80211_sta *sta,
5088 enum ieee80211_sta_state old_state,
5089 enum ieee80211_sta_state new_state)
5091 struct wl1271_station *wl_sta;
5092 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5093 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5096 wl_sta = (struct wl1271_station *)sta->drv_priv;
5098 /* Add station (AP mode) */
5100 old_state == IEEE80211_STA_NOTEXIST &&
5101 new_state == IEEE80211_STA_NONE) {
5102 ret = wl12xx_sta_add(wl, wlvif, sta);
5106 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5109 /* Remove station (AP mode) */
5111 old_state == IEEE80211_STA_NONE &&
5112 new_state == IEEE80211_STA_NOTEXIST) {
5114 wl12xx_sta_remove(wl, wlvif, sta);
5116 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5119 /* Authorize station (AP mode) */
5121 new_state == IEEE80211_STA_AUTHORIZED) {
5122 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5126 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5131 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5134 /* Authorize station */
5136 new_state == IEEE80211_STA_AUTHORIZED) {
5137 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5138 ret = wl12xx_set_authorized(wl, wlvif);
5144 old_state == IEEE80211_STA_AUTHORIZED &&
5145 new_state == IEEE80211_STA_ASSOC) {
5146 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5147 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5150 /* save seq number on disassoc (suspend) */
5152 old_state == IEEE80211_STA_ASSOC &&
5153 new_state == IEEE80211_STA_AUTH) {
5154 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5155 wlvif->total_freed_pkts = 0;
5158 /* restore seq number on assoc (resume) */
5160 old_state == IEEE80211_STA_AUTH &&
5161 new_state == IEEE80211_STA_ASSOC) {
5162 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5165 /* clear ROCs on failure or authorization */
5167 (new_state == IEEE80211_STA_AUTHORIZED ||
5168 new_state == IEEE80211_STA_NOTEXIST)) {
5169 if (test_bit(wlvif->role_id, wl->roc_map))
5170 wl12xx_croc(wl, wlvif->role_id);
5174 old_state == IEEE80211_STA_NOTEXIST &&
5175 new_state == IEEE80211_STA_NONE) {
5176 if (find_first_bit(wl->roc_map,
5177 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5178 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5179 wl12xx_roc(wl, wlvif, wlvif->role_id,
5180 wlvif->band, wlvif->channel);
5186 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5187 struct ieee80211_vif *vif,
5188 struct ieee80211_sta *sta,
5189 enum ieee80211_sta_state old_state,
5190 enum ieee80211_sta_state new_state)
5192 struct wl1271 *wl = hw->priv;
5193 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5196 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5197 sta->aid, old_state, new_state);
5199 mutex_lock(&wl->mutex);
5201 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5206 ret = wl1271_ps_elp_wakeup(wl);
5210 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5212 wl1271_ps_elp_sleep(wl);
5214 mutex_unlock(&wl->mutex);
5215 if (new_state < old_state)
5220 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5221 struct ieee80211_vif *vif,
5222 enum ieee80211_ampdu_mlme_action action,
5223 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5226 struct wl1271 *wl = hw->priv;
5227 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5229 u8 hlid, *ba_bitmap;
5231 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5234 /* sanity check - the fields in FW are only 8bits wide */
5235 if (WARN_ON(tid > 0xFF))
5238 mutex_lock(&wl->mutex);
5240 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5245 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5246 hlid = wlvif->sta.hlid;
5247 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5248 struct wl1271_station *wl_sta;
5250 wl_sta = (struct wl1271_station *)sta->drv_priv;
5251 hlid = wl_sta->hlid;
5257 ba_bitmap = &wl->links[hlid].ba_bitmap;
5259 ret = wl1271_ps_elp_wakeup(wl);
5263 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5267 case IEEE80211_AMPDU_RX_START:
5268 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5273 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5275 wl1271_error("exceeded max RX BA sessions");
5279 if (*ba_bitmap & BIT(tid)) {
5281 wl1271_error("cannot enable RX BA session on active "
5286 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5289 *ba_bitmap |= BIT(tid);
5290 wl->ba_rx_session_count++;
5294 case IEEE80211_AMPDU_RX_STOP:
5295 if (!(*ba_bitmap & BIT(tid))) {
5297 * this happens on reconfig - so only output a debug
5298 * message for now, and don't fail the function.
5300 wl1271_debug(DEBUG_MAC80211,
5301 "no active RX BA session on tid: %d",
5307 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5310 *ba_bitmap &= ~BIT(tid);
5311 wl->ba_rx_session_count--;
5316 * The BA initiator session management in FW independently.
5317 * Falling break here on purpose for all TX APDU commands.
5319 case IEEE80211_AMPDU_TX_START:
5320 case IEEE80211_AMPDU_TX_STOP_CONT:
5321 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5322 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5323 case IEEE80211_AMPDU_TX_OPERATIONAL:
5328 wl1271_error("Incorrect ampdu action id=%x\n", action);
5332 wl1271_ps_elp_sleep(wl);
5335 mutex_unlock(&wl->mutex);
5340 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5341 struct ieee80211_vif *vif,
5342 const struct cfg80211_bitrate_mask *mask)
5344 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5345 struct wl1271 *wl = hw->priv;
5348 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5349 mask->control[NL80211_BAND_2GHZ].legacy,
5350 mask->control[NL80211_BAND_5GHZ].legacy);
5352 mutex_lock(&wl->mutex);
5354 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5355 wlvif->bitrate_masks[i] =
5356 wl1271_tx_enabled_rates_get(wl,
5357 mask->control[i].legacy,
5360 if (unlikely(wl->state != WLCORE_STATE_ON))
5363 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5364 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5366 ret = wl1271_ps_elp_wakeup(wl);
5370 wl1271_set_band_rate(wl, wlvif);
5372 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5373 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5375 wl1271_ps_elp_sleep(wl);
5378 mutex_unlock(&wl->mutex);
5383 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5384 struct ieee80211_vif *vif,
5385 struct ieee80211_channel_switch *ch_switch)
5387 struct wl1271 *wl = hw->priv;
5388 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5391 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5393 wl1271_tx_flush(wl);
5395 mutex_lock(&wl->mutex);
5397 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5398 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5399 ieee80211_chswitch_done(vif, false);
5401 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5405 ret = wl1271_ps_elp_wakeup(wl);
5409 /* TODO: change mac80211 to pass vif as param */
5411 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5412 unsigned long delay_usec;
5414 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5418 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5420 /* indicate failure 5 seconds after channel switch time */
5421 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5423 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5424 usecs_to_jiffies(delay_usec) +
5425 msecs_to_jiffies(5000));
5429 wl1271_ps_elp_sleep(wl);
5432 mutex_unlock(&wl->mutex);
5435 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5436 struct wl12xx_vif *wlvif,
5439 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5440 struct sk_buff *beacon =
5441 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5446 return cfg80211_find_ie(eid,
5447 beacon->data + ieoffset,
5448 beacon->len - ieoffset);
5451 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5455 const struct ieee80211_channel_sw_ie *ie_csa;
5457 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5461 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5462 *csa_count = ie_csa->count;
5467 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5468 struct ieee80211_vif *vif,
5469 struct cfg80211_chan_def *chandef)
5471 struct wl1271 *wl = hw->priv;
5472 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5473 struct ieee80211_channel_switch ch_switch = {
5475 .chandef = *chandef,
5479 wl1271_debug(DEBUG_MAC80211,
5480 "mac80211 channel switch beacon (role %d)",
5483 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5485 wl1271_error("error getting beacon (for CSA counter)");
5489 mutex_lock(&wl->mutex);
5491 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5496 ret = wl1271_ps_elp_wakeup(wl);
5500 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5504 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5507 wl1271_ps_elp_sleep(wl);
5509 mutex_unlock(&wl->mutex);
5512 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5513 u32 queues, bool drop)
5515 struct wl1271 *wl = hw->priv;
5517 wl1271_tx_flush(wl);
5520 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5521 struct ieee80211_vif *vif,
5522 struct ieee80211_channel *chan,
5524 enum ieee80211_roc_type type)
5526 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5527 struct wl1271 *wl = hw->priv;
5528 int channel, ret = 0;
5530 channel = ieee80211_frequency_to_channel(chan->center_freq);
5532 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5533 channel, wlvif->role_id);
5535 mutex_lock(&wl->mutex);
5537 if (unlikely(wl->state != WLCORE_STATE_ON))
5540 /* return EBUSY if we can't ROC right now */
5541 if (WARN_ON(wl->roc_vif ||
5542 find_first_bit(wl->roc_map,
5543 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5548 ret = wl1271_ps_elp_wakeup(wl);
5552 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5557 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5558 msecs_to_jiffies(duration));
5560 wl1271_ps_elp_sleep(wl);
5562 mutex_unlock(&wl->mutex);
5566 static int __wlcore_roc_completed(struct wl1271 *wl)
5568 struct wl12xx_vif *wlvif;
5571 /* already completed */
5572 if (unlikely(!wl->roc_vif))
5575 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5577 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5580 ret = wl12xx_stop_dev(wl, wlvif);
5589 static int wlcore_roc_completed(struct wl1271 *wl)
5593 wl1271_debug(DEBUG_MAC80211, "roc complete");
5595 mutex_lock(&wl->mutex);
5597 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5602 ret = wl1271_ps_elp_wakeup(wl);
5606 ret = __wlcore_roc_completed(wl);
5608 wl1271_ps_elp_sleep(wl);
5610 mutex_unlock(&wl->mutex);
5615 static void wlcore_roc_complete_work(struct work_struct *work)
5617 struct delayed_work *dwork;
5621 dwork = container_of(work, struct delayed_work, work);
5622 wl = container_of(dwork, struct wl1271, roc_complete_work);
5624 ret = wlcore_roc_completed(wl);
5626 ieee80211_remain_on_channel_expired(wl->hw);
5629 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5631 struct wl1271 *wl = hw->priv;
5633 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5636 wl1271_tx_flush(wl);
5639 * we can't just flush_work here, because it might deadlock
5640 * (as we might get called from the same workqueue)
5642 cancel_delayed_work_sync(&wl->roc_complete_work);
5643 wlcore_roc_completed(wl);
5648 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5649 struct ieee80211_vif *vif,
5650 struct ieee80211_sta *sta,
5653 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5655 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5657 if (!(changed & IEEE80211_RC_BW_CHANGED))
5660 /* this callback is atomic, so schedule a new work */
5661 wlvif->rc_update_bw = sta->bandwidth;
5662 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5665 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5666 struct ieee80211_vif *vif,
5667 struct ieee80211_sta *sta,
5668 struct station_info *sinfo)
5670 struct wl1271 *wl = hw->priv;
5671 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5675 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5677 mutex_lock(&wl->mutex);
5679 if (unlikely(wl->state != WLCORE_STATE_ON))
5682 ret = wl1271_ps_elp_wakeup(wl);
5686 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5690 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5691 sinfo->signal = rssi_dbm;
5694 wl1271_ps_elp_sleep(wl);
5697 mutex_unlock(&wl->mutex);
5700 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5702 struct wl1271 *wl = hw->priv;
5705 mutex_lock(&wl->mutex);
5707 if (unlikely(wl->state != WLCORE_STATE_ON))
5710 /* packets are considered pending if in the TX queue or the FW */
5711 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5713 mutex_unlock(&wl->mutex);
5718 /* can't be const, mac80211 writes to this */
5719 static struct ieee80211_rate wl1271_rates[] = {
5721 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5722 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5724 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5725 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5726 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5728 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5729 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5730 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5732 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5733 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5734 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5736 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5737 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5739 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5740 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5742 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5743 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5745 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5746 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5748 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5749 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5751 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5752 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5754 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5755 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5757 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5758 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5761 /* can't be const, mac80211 writes to this */
5762 static struct ieee80211_channel wl1271_channels[] = {
5763 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5764 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5765 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5766 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5767 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5768 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5769 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5770 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5771 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5772 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5773 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5774 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5775 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5776 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5779 /* can't be const, mac80211 writes to this */
5780 static struct ieee80211_supported_band wl1271_band_2ghz = {
5781 .channels = wl1271_channels,
5782 .n_channels = ARRAY_SIZE(wl1271_channels),
5783 .bitrates = wl1271_rates,
5784 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5787 /* 5 GHz data rates for WL1273 */
5788 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5790 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5791 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5793 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5794 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5796 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5797 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5799 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5800 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5802 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5803 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5805 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5806 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5808 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5809 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5811 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5812 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5815 /* 5 GHz band channels for WL1273 */
5816 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5817 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5818 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5819 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5820 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5821 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5822 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5823 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5824 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5825 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5826 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5827 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5828 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5829 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5830 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5831 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5832 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5833 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5834 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5835 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5836 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5837 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5838 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5839 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5840 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5841 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5842 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5843 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5844 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5845 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5846 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5847 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5850 static struct ieee80211_supported_band wl1271_band_5ghz = {
5851 .channels = wl1271_channels_5ghz,
5852 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5853 .bitrates = wl1271_rates_5ghz,
5854 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5857 static const struct ieee80211_ops wl1271_ops = {
5858 .start = wl1271_op_start,
5859 .stop = wlcore_op_stop,
5860 .add_interface = wl1271_op_add_interface,
5861 .remove_interface = wl1271_op_remove_interface,
5862 .change_interface = wl12xx_op_change_interface,
5864 .suspend = wl1271_op_suspend,
5865 .resume = wl1271_op_resume,
5867 .config = wl1271_op_config,
5868 .prepare_multicast = wl1271_op_prepare_multicast,
5869 .configure_filter = wl1271_op_configure_filter,
5871 .set_key = wlcore_op_set_key,
5872 .hw_scan = wl1271_op_hw_scan,
5873 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5874 .sched_scan_start = wl1271_op_sched_scan_start,
5875 .sched_scan_stop = wl1271_op_sched_scan_stop,
5876 .bss_info_changed = wl1271_op_bss_info_changed,
5877 .set_frag_threshold = wl1271_op_set_frag_threshold,
5878 .set_rts_threshold = wl1271_op_set_rts_threshold,
5879 .conf_tx = wl1271_op_conf_tx,
5880 .get_tsf = wl1271_op_get_tsf,
5881 .get_survey = wl1271_op_get_survey,
5882 .sta_state = wl12xx_op_sta_state,
5883 .ampdu_action = wl1271_op_ampdu_action,
5884 .tx_frames_pending = wl1271_tx_frames_pending,
5885 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5886 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5887 .channel_switch = wl12xx_op_channel_switch,
5888 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5889 .flush = wlcore_op_flush,
5890 .remain_on_channel = wlcore_op_remain_on_channel,
5891 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5892 .add_chanctx = wlcore_op_add_chanctx,
5893 .remove_chanctx = wlcore_op_remove_chanctx,
5894 .change_chanctx = wlcore_op_change_chanctx,
5895 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5896 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5897 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5898 .sta_rc_update = wlcore_op_sta_rc_update,
5899 .sta_statistics = wlcore_op_sta_statistics,
5900 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5904 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5910 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5911 wl1271_error("Illegal RX rate from HW: %d", rate);
5915 idx = wl->band_rate_to_idx[band][rate];
5916 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5917 wl1271_error("Unsupported RX rate from HW: %d", rate);
5924 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5928 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5931 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5932 wl1271_warning("NIC part of the MAC address wraps around!");
5934 for (i = 0; i < wl->num_mac_addr; i++) {
5935 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5936 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5937 wl->addresses[i].addr[2] = (u8) oui;
5938 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5939 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5940 wl->addresses[i].addr[5] = (u8) nic;
5944 /* we may be one address short at the most */
5945 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5948 * turn on the LAA bit in the first address and use it as
5951 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5952 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5953 memcpy(&wl->addresses[idx], &wl->addresses[0],
5954 sizeof(wl->addresses[0]));
5956 wl->addresses[idx].addr[0] |= BIT(1);
5959 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5960 wl->hw->wiphy->addresses = wl->addresses;
5963 static int wl12xx_get_hw_info(struct wl1271 *wl)
5967 ret = wl12xx_set_power_on(wl);
5971 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5975 wl->fuse_oui_addr = 0;
5976 wl->fuse_nic_addr = 0;
5978 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5982 if (wl->ops->get_mac)
5983 ret = wl->ops->get_mac(wl);
5986 wl1271_power_off(wl);
5990 static int wl1271_register_hw(struct wl1271 *wl)
5993 u32 oui_addr = 0, nic_addr = 0;
5995 if (wl->mac80211_registered)
5998 if (wl->nvs_len >= 12) {
5999 /* NOTE: The wl->nvs->nvs element must be first, in
6000 * order to simplify the casting, we assume it is at
6001 * the beginning of the wl->nvs structure.
6003 u8 *nvs_ptr = (u8 *)wl->nvs;
6006 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6008 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6011 /* if the MAC address is zeroed in the NVS derive from fuse */
6012 if (oui_addr == 0 && nic_addr == 0) {
6013 oui_addr = wl->fuse_oui_addr;
6014 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6015 nic_addr = wl->fuse_nic_addr + 1;
6018 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6020 ret = ieee80211_register_hw(wl->hw);
6022 wl1271_error("unable to register mac80211 hw: %d", ret);
6026 wl->mac80211_registered = true;
6028 wl1271_debugfs_init(wl);
6030 wl1271_notice("loaded");
6036 static void wl1271_unregister_hw(struct wl1271 *wl)
6039 wl1271_plt_stop(wl);
6041 ieee80211_unregister_hw(wl->hw);
6042 wl->mac80211_registered = false;
6046 static int wl1271_init_ieee80211(struct wl1271 *wl)
6049 static const u32 cipher_suites[] = {
6050 WLAN_CIPHER_SUITE_WEP40,
6051 WLAN_CIPHER_SUITE_WEP104,
6052 WLAN_CIPHER_SUITE_TKIP,
6053 WLAN_CIPHER_SUITE_CCMP,
6054 WL1271_CIPHER_SUITE_GEM,
6057 /* The tx descriptor buffer */
6058 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6060 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6061 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6064 /* FIXME: find a proper value */
6065 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6067 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
6068 IEEE80211_HW_SUPPORTS_PS |
6069 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
6070 IEEE80211_HW_HAS_RATE_CONTROL |
6071 IEEE80211_HW_CONNECTION_MONITOR |
6072 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
6073 IEEE80211_HW_SPECTRUM_MGMT |
6074 IEEE80211_HW_AP_LINK_PS |
6075 IEEE80211_HW_AMPDU_AGGREGATION |
6076 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
6077 IEEE80211_HW_QUEUE_CONTROL |
6078 IEEE80211_HW_CHANCTX_STA_CSA;
6080 wl->hw->wiphy->cipher_suites = cipher_suites;
6081 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6083 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6084 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
6085 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
6086 wl->hw->wiphy->max_scan_ssids = 1;
6087 wl->hw->wiphy->max_sched_scan_ssids = 16;
6088 wl->hw->wiphy->max_match_sets = 16;
6090 * Maximum length of elements in scanning probe request templates
6091 * should be the maximum length possible for a template, without
6092 * the IEEE80211 header of the template
6094 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6095 sizeof(struct ieee80211_header);
6097 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6098 sizeof(struct ieee80211_header);
6100 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6102 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6103 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6104 WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6105 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6107 /* make sure all our channels fit in the scanned_ch bitmask */
6108 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6109 ARRAY_SIZE(wl1271_channels_5ghz) >
6110 WL1271_MAX_CHANNELS);
6112 * clear channel flags from the previous usage
6113 * and restore max_power & max_antenna_gain values.
6115 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6116 wl1271_band_2ghz.channels[i].flags = 0;
6117 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6118 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6121 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6122 wl1271_band_5ghz.channels[i].flags = 0;
6123 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6124 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6128 * We keep local copies of the band structs because we need to
6129 * modify them on a per-device basis.
6131 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
6132 sizeof(wl1271_band_2ghz));
6133 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
6134 &wl->ht_cap[IEEE80211_BAND_2GHZ],
6135 sizeof(*wl->ht_cap));
6136 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
6137 sizeof(wl1271_band_5ghz));
6138 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
6139 &wl->ht_cap[IEEE80211_BAND_5GHZ],
6140 sizeof(*wl->ht_cap));
6142 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
6143 &wl->bands[IEEE80211_BAND_2GHZ];
6144 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
6145 &wl->bands[IEEE80211_BAND_5GHZ];
6148 * allow 4 queues per mac address we support +
6149 * 1 cab queue per mac + one global offchannel Tx queue
6151 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6153 /* the last queue is the offchannel queue */
6154 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6155 wl->hw->max_rates = 1;
6157 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6159 /* the FW answers probe-requests in AP-mode */
6160 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6161 wl->hw->wiphy->probe_resp_offload =
6162 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6163 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6164 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6166 /* allowed interface combinations */
6167 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6168 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6170 /* register vendor commands */
6171 wlcore_set_vendor_commands(wl->hw->wiphy);
6173 SET_IEEE80211_DEV(wl->hw, wl->dev);
6175 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6176 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6178 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6183 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6186 struct ieee80211_hw *hw;
6191 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6193 wl1271_error("could not alloc ieee80211_hw");
6199 memset(wl, 0, sizeof(*wl));
6201 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6203 wl1271_error("could not alloc wl priv");
6205 goto err_priv_alloc;
6208 INIT_LIST_HEAD(&wl->wlvif_list);
6213 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6214 * we don't allocate any additional resource here, so that's fine.
6216 for (i = 0; i < NUM_TX_QUEUES; i++)
6217 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6218 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6220 skb_queue_head_init(&wl->deferred_rx_queue);
6221 skb_queue_head_init(&wl->deferred_tx_queue);
6223 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6224 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6225 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6226 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6227 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6228 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6229 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6231 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6232 if (!wl->freezable_wq) {
6239 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6240 wl->band = IEEE80211_BAND_2GHZ;
6241 wl->channel_type = NL80211_CHAN_NO_HT;
6243 wl->sg_enabled = true;
6244 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6245 wl->recovery_count = 0;
6248 wl->ap_fw_ps_map = 0;
6250 wl->platform_quirks = 0;
6251 wl->system_hlid = WL12XX_SYSTEM_HLID;
6252 wl->active_sta_count = 0;
6253 wl->active_link_count = 0;
6255 init_waitqueue_head(&wl->fwlog_waitq);
6257 /* The system link is always allocated */
6258 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6260 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6261 for (i = 0; i < wl->num_tx_desc; i++)
6262 wl->tx_frames[i] = NULL;
6264 spin_lock_init(&wl->wl_lock);
6266 wl->state = WLCORE_STATE_OFF;
6267 wl->fw_type = WL12XX_FW_TYPE_NONE;
6268 mutex_init(&wl->mutex);
6269 mutex_init(&wl->flush_mutex);
6270 init_completion(&wl->nvs_loading_complete);
6272 order = get_order(aggr_buf_size);
6273 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6274 if (!wl->aggr_buf) {
6278 wl->aggr_buf_size = aggr_buf_size;
6280 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6281 if (!wl->dummy_packet) {
6286 /* Allocate one page for the FW log */
6287 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6290 goto err_dummy_packet;
6293 wl->mbox_size = mbox_size;
6294 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6300 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6301 if (!wl->buffer_32) {
6312 free_page((unsigned long)wl->fwlog);
6315 dev_kfree_skb(wl->dummy_packet);
6318 free_pages((unsigned long)wl->aggr_buf, order);
6321 destroy_workqueue(wl->freezable_wq);
6324 wl1271_debugfs_exit(wl);
6328 ieee80211_free_hw(hw);
6332 return ERR_PTR(ret);
6334 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6336 int wlcore_free_hw(struct wl1271 *wl)
6338 /* Unblock any fwlog readers */
6339 mutex_lock(&wl->mutex);
6340 wl->fwlog_size = -1;
6341 wake_up_interruptible_all(&wl->fwlog_waitq);
6342 mutex_unlock(&wl->mutex);
6344 wlcore_sysfs_free(wl);
6346 kfree(wl->buffer_32);
6348 free_page((unsigned long)wl->fwlog);
6349 dev_kfree_skb(wl->dummy_packet);
6350 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6352 wl1271_debugfs_exit(wl);
6356 wl->fw_type = WL12XX_FW_TYPE_NONE;
6360 kfree(wl->raw_fw_status);
6361 kfree(wl->fw_status);
6362 kfree(wl->tx_res_if);
6363 destroy_workqueue(wl->freezable_wq);
6366 ieee80211_free_hw(wl->hw);
6370 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6373 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6374 .flags = WIPHY_WOWLAN_ANY,
6375 .n_patterns = WL1271_MAX_RX_FILTERS,
6376 .pattern_min_len = 1,
6377 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6381 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6383 return IRQ_WAKE_THREAD;
6386 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6388 struct wl1271 *wl = context;
6389 struct platform_device *pdev = wl->pdev;
6390 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6391 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6392 unsigned long irqflags;
6394 irq_handler_t hardirq_fn = NULL;
6397 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6399 wl1271_error("Could not allocate nvs data");
6402 wl->nvs_len = fw->size;
6404 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6410 ret = wl->ops->setup(wl);
6414 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6416 /* adjust some runtime configuration parameters */
6417 wlcore_adjust_conf(wl);
6419 wl->irq = platform_get_irq(pdev, 0);
6420 wl->platform_quirks = pdata->platform_quirks;
6421 wl->if_ops = pdev_data->if_ops;
6423 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6424 irqflags = IRQF_TRIGGER_RISING;
6425 hardirq_fn = wlcore_hardirq;
6427 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6430 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6431 irqflags, pdev->name, wl);
6433 wl1271_error("request_irq() failed: %d", ret);
6438 ret = enable_irq_wake(wl->irq);
6440 wl->irq_wake_enabled = true;
6441 device_init_wakeup(wl->dev, 1);
6442 if (pdata->pwr_in_suspend)
6443 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6446 disable_irq(wl->irq);
6448 ret = wl12xx_get_hw_info(wl);
6450 wl1271_error("couldn't get hw info");
6454 ret = wl->ops->identify_chip(wl);
6458 ret = wl1271_init_ieee80211(wl);
6462 ret = wl1271_register_hw(wl);
6466 ret = wlcore_sysfs_init(wl);
6470 wl->initialized = true;
6474 wl1271_unregister_hw(wl);
6477 free_irq(wl->irq, wl);
6483 release_firmware(fw);
6484 complete_all(&wl->nvs_loading_complete);
6487 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6491 if (!wl->ops || !wl->ptable)
6494 wl->dev = &pdev->dev;
6496 platform_set_drvdata(pdev, wl);
6498 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6499 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6502 wl1271_error("request_firmware_nowait failed: %d", ret);
6503 complete_all(&wl->nvs_loading_complete);
6508 EXPORT_SYMBOL_GPL(wlcore_probe);
6510 int wlcore_remove(struct platform_device *pdev)
6512 struct wl1271 *wl = platform_get_drvdata(pdev);
6514 wait_for_completion(&wl->nvs_loading_complete);
6515 if (!wl->initialized)
6518 if (wl->irq_wake_enabled) {
6519 device_init_wakeup(wl->dev, 0);
6520 disable_irq_wake(wl->irq);
6522 wl1271_unregister_hw(wl);
6523 free_irq(wl->irq, wl);
6528 EXPORT_SYMBOL_GPL(wlcore_remove);
6530 u32 wl12xx_debug_level = DEBUG_NONE;
6531 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6532 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6533 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6535 module_param_named(fwlog, fwlog_param, charp, 0);
6536 MODULE_PARM_DESC(fwlog,
6537 "FW logger options: continuous, ondemand, dbgpins or disable");
6539 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6540 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6542 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6543 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6545 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6546 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6548 MODULE_LICENSE("GPL");
6549 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6550 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6551 MODULE_FIRMWARE(WL12XX_NVS_NAME);