3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
33 #include "wl12xx_80211.h"
40 #include "vendor_cmd.h"
45 #define WL1271_BOOT_RETRIES 3
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery = -1;
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 struct ieee80211_vif *vif,
54 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
62 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
71 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
75 wl1271_info("Association completed.");
79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 struct regulatory_request *request)
82 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
83 struct wl1271 *wl = hw->priv;
85 /* copy the current dfs region */
87 wl->dfs_region = request->dfs_region;
89 wlcore_regdomain_config(wl);
92 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
97 /* we should hold wl->mutex */
98 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
103 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
105 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
111 * this function is being called when the rx_streaming interval
112 * has beed changed or rx_streaming should be disabled
114 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
117 int period = wl->conf.rx_streaming.interval;
119 /* don't reconfigure if rx_streaming is disabled */
120 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
123 /* reconfigure/disable according to new streaming_period */
125 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
126 (wl->conf.rx_streaming.always ||
127 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
128 ret = wl1271_set_rx_streaming(wl, wlvif, true);
130 ret = wl1271_set_rx_streaming(wl, wlvif, false);
131 /* don't cancel_work_sync since we might deadlock */
132 del_timer_sync(&wlvif->rx_streaming_timer);
138 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
141 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
142 rx_streaming_enable_work);
143 struct wl1271 *wl = wlvif->wl;
145 mutex_lock(&wl->mutex);
147 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
148 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
149 (!wl->conf.rx_streaming.always &&
150 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 if (!wl->conf.rx_streaming.interval)
156 ret = wl1271_ps_elp_wakeup(wl);
160 ret = wl1271_set_rx_streaming(wl, wlvif, true);
164 /* stop it after some time of inactivity */
165 mod_timer(&wlvif->rx_streaming_timer,
166 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
169 wl1271_ps_elp_sleep(wl);
171 mutex_unlock(&wl->mutex);
174 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
177 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
178 rx_streaming_disable_work);
179 struct wl1271 *wl = wlvif->wl;
181 mutex_lock(&wl->mutex);
183 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
186 ret = wl1271_ps_elp_wakeup(wl);
190 ret = wl1271_set_rx_streaming(wl, wlvif, false);
195 wl1271_ps_elp_sleep(wl);
197 mutex_unlock(&wl->mutex);
200 static void wl1271_rx_streaming_timer(unsigned long data)
202 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
203 struct wl1271 *wl = wlvif->wl;
204 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
207 /* wl->mutex must be taken */
208 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
210 /* if the watchdog is not armed, don't do anything */
211 if (wl->tx_allocated_blocks == 0)
214 cancel_delayed_work(&wl->tx_watchdog_work);
215 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
216 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
219 static void wlcore_rc_update_work(struct work_struct *work)
222 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
224 struct wl1271 *wl = wlvif->wl;
226 mutex_lock(&wl->mutex);
228 if (unlikely(wl->state != WLCORE_STATE_ON))
231 ret = wl1271_ps_elp_wakeup(wl);
235 wlcore_hw_sta_rc_update(wl, wlvif);
237 wl1271_ps_elp_sleep(wl);
239 mutex_unlock(&wl->mutex);
242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
244 struct delayed_work *dwork;
247 dwork = container_of(work, struct delayed_work, work);
248 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
250 mutex_lock(&wl->mutex);
252 if (unlikely(wl->state != WLCORE_STATE_ON))
255 /* Tx went out in the meantime - everything is ok */
256 if (unlikely(wl->tx_allocated_blocks == 0))
260 * if a ROC is in progress, we might not have any Tx for a long
261 * time (e.g. pending Tx on the non-ROC channels)
263 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 wl->conf.tx.tx_watchdog_timeout);
266 wl12xx_rearm_tx_watchdog_locked(wl);
271 * if a scan is in progress, we might not have any Tx for a long
274 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 wl->conf.tx.tx_watchdog_timeout);
277 wl12xx_rearm_tx_watchdog_locked(wl);
282 * AP might cache a frame for a long time for a sleeping station,
283 * so rearm the timer if there's an AP interface with stations. If
284 * Tx is genuinely stuck we will most hopefully discover it when all
285 * stations are removed due to inactivity.
287 if (wl->active_sta_count) {
288 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
290 wl->conf.tx.tx_watchdog_timeout,
291 wl->active_sta_count);
292 wl12xx_rearm_tx_watchdog_locked(wl);
296 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_queue_recovery_work(wl);
301 mutex_unlock(&wl->mutex);
304 static void wlcore_adjust_conf(struct wl1271 *wl)
306 /* Adjust settings according to optional module parameters */
308 /* Firmware Logger params */
309 if (fwlog_mem_blocks != -1) {
310 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
311 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
312 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
315 "Illegal fwlog_mem_blocks=%d using default %d",
316 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
321 if (!strcmp(fwlog_param, "continuous")) {
322 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
323 } else if (!strcmp(fwlog_param, "ondemand")) {
324 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
325 } else if (!strcmp(fwlog_param, "dbgpins")) {
326 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
327 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
328 } else if (!strcmp(fwlog_param, "disable")) {
329 wl->conf.fwlog.mem_blocks = 0;
330 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
332 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
336 if (bug_on_recovery != -1)
337 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
339 if (no_recovery != -1)
340 wl->conf.recovery.no_recovery = (u8) no_recovery;
343 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
344 struct wl12xx_vif *wlvif,
349 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
352 * Wake up from high level PS if the STA is asleep with too little
353 * packets in FW or if the STA is awake.
355 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_end(wl, wlvif, hlid);
359 * Start high-level PS if the STA is asleep with enough blocks in FW.
360 * Make an exception if this is the only connected link. In this
361 * case FW-memory congestion is less of a problem.
362 * Note that a single connected STA means 2*ap_count + 1 active links,
363 * since we must account for the global and broadcast AP links
364 * for each AP. The "fw_ps" check assures us the other link is a STA
365 * connected to the AP. Otherwise the FW would not set the PSM bit.
367 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
368 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
369 wl12xx_ps_link_start(wl, wlvif, hlid, true);
372 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
373 struct wl12xx_vif *wlvif,
374 struct wl_fw_status *status)
376 unsigned long cur_fw_ps_map;
379 cur_fw_ps_map = status->link_ps_bitmap;
380 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
381 wl1271_debug(DEBUG_PSM,
382 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
383 wl->ap_fw_ps_map, cur_fw_ps_map,
384 wl->ap_fw_ps_map ^ cur_fw_ps_map);
386 wl->ap_fw_ps_map = cur_fw_ps_map;
389 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
390 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
391 wl->links[hlid].allocated_pkts);
394 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
396 struct wl12xx_vif *wlvif;
398 u32 old_tx_blk_count = wl->tx_blocks_available;
399 int avail, freed_blocks;
402 struct wl1271_link *lnk;
404 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
406 wl->fw_status_len, false);
410 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
412 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
413 "drv_rx_counter = %d, tx_results_counter = %d)",
415 status->fw_rx_counter,
416 status->drv_rx_counter,
417 status->tx_results_counter);
419 for (i = 0; i < NUM_TX_QUEUES; i++) {
420 /* prevent wrap-around in freed-packets counter */
421 wl->tx_allocated_pkts[i] -=
422 (status->counters.tx_released_pkts[i] -
423 wl->tx_pkts_freed[i]) & 0xff;
425 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
429 for_each_set_bit(i, wl->links_map, wl->num_links) {
433 /* prevent wrap-around in freed-packets counter */
434 diff = (status->counters.tx_lnk_free_pkts[i] -
435 lnk->prev_freed_pkts) & 0xff;
440 lnk->allocated_pkts -= diff;
441 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
443 /* accumulate the prev_freed_pkts counter */
444 lnk->total_freed_pkts += diff;
447 /* prevent wrap-around in total blocks counter */
448 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
449 freed_blocks = status->total_released_blks -
452 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
453 status->total_released_blks;
455 wl->tx_blocks_freed = status->total_released_blks;
457 wl->tx_allocated_blocks -= freed_blocks;
460 * If the FW freed some blocks:
461 * If we still have allocated blocks - re-arm the timer, Tx is
462 * not stuck. Otherwise, cancel the timer (no Tx currently).
465 if (wl->tx_allocated_blocks)
466 wl12xx_rearm_tx_watchdog_locked(wl);
468 cancel_delayed_work(&wl->tx_watchdog_work);
471 avail = status->tx_total - wl->tx_allocated_blocks;
474 * The FW might change the total number of TX memblocks before
475 * we get a notification about blocks being released. Thus, the
476 * available blocks calculation might yield a temporary result
477 * which is lower than the actual available blocks. Keeping in
478 * mind that only blocks that were allocated can be moved from
479 * TX to RX, tx_blocks_available should never decrease here.
481 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
484 /* if more blocks are available now, tx work can be scheduled */
485 if (wl->tx_blocks_available > old_tx_blk_count)
486 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
488 /* for AP update num of allocated TX blocks per link and ps status */
489 wl12xx_for_each_wlvif_ap(wl, wlvif) {
490 wl12xx_irq_update_links_status(wl, wlvif, status);
493 /* update the host-chipset time offset */
495 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
496 (s64)(status->fw_localtime);
498 wl->fw_fast_lnk_map = status->link_fast_bitmap;
503 static void wl1271_flush_deferred_work(struct wl1271 *wl)
507 /* Pass all received frames to the network stack */
508 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
509 ieee80211_rx_ni(wl->hw, skb);
511 /* Return sent skbs to the network stack */
512 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
513 ieee80211_tx_status_ni(wl->hw, skb);
516 static void wl1271_netstack_work(struct work_struct *work)
519 container_of(work, struct wl1271, netstack_work);
522 wl1271_flush_deferred_work(wl);
523 } while (skb_queue_len(&wl->deferred_rx_queue));
526 #define WL1271_IRQ_MAX_LOOPS 256
528 static int wlcore_irq_locked(struct wl1271 *wl)
532 int loopcount = WL1271_IRQ_MAX_LOOPS;
534 unsigned int defer_count;
538 * In case edge triggered interrupt must be used, we cannot iterate
539 * more than once without introducing race conditions with the hardirq.
541 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
544 wl1271_debug(DEBUG_IRQ, "IRQ work");
546 if (unlikely(wl->state != WLCORE_STATE_ON))
549 ret = wl1271_ps_elp_wakeup(wl);
553 while (!done && loopcount--) {
555 * In order to avoid a race with the hardirq, clear the flag
556 * before acknowledging the chip. Since the mutex is held,
557 * wl1271_ps_elp_wakeup cannot be called concurrently.
559 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
560 smp_mb__after_atomic();
562 ret = wlcore_fw_status(wl, wl->fw_status);
566 wlcore_hw_tx_immediate_compl(wl);
568 intr = wl->fw_status->intr;
569 intr &= WLCORE_ALL_INTR_MASK;
575 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
576 wl1271_error("HW watchdog interrupt received! starting recovery.");
577 wl->watchdog_recovery = true;
580 /* restarting the chip. ignore any other interrupt. */
584 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
585 wl1271_error("SW watchdog interrupt received! "
586 "starting recovery.");
587 wl->watchdog_recovery = true;
590 /* restarting the chip. ignore any other interrupt. */
594 if (likely(intr & WL1271_ACX_INTR_DATA)) {
595 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
597 ret = wlcore_rx(wl, wl->fw_status);
601 /* Check if any tx blocks were freed */
602 spin_lock_irqsave(&wl->wl_lock, flags);
603 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
604 wl1271_tx_total_queue_count(wl) > 0) {
605 spin_unlock_irqrestore(&wl->wl_lock, flags);
607 * In order to avoid starvation of the TX path,
608 * call the work function directly.
610 ret = wlcore_tx_work_locked(wl);
614 spin_unlock_irqrestore(&wl->wl_lock, flags);
617 /* check for tx results */
618 ret = wlcore_hw_tx_delayed_compl(wl);
622 /* Make sure the deferred queues don't get too long */
623 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
624 skb_queue_len(&wl->deferred_rx_queue);
625 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
626 wl1271_flush_deferred_work(wl);
629 if (intr & WL1271_ACX_INTR_EVENT_A) {
630 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
631 ret = wl1271_event_handle(wl, 0);
636 if (intr & WL1271_ACX_INTR_EVENT_B) {
637 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
638 ret = wl1271_event_handle(wl, 1);
643 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
644 wl1271_debug(DEBUG_IRQ,
645 "WL1271_ACX_INTR_INIT_COMPLETE");
647 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
648 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
651 wl1271_ps_elp_sleep(wl);
657 static irqreturn_t wlcore_irq(int irq, void *cookie)
661 struct wl1271 *wl = cookie;
663 /* complete the ELP completion */
664 spin_lock_irqsave(&wl->wl_lock, flags);
665 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
667 complete(wl->elp_compl);
668 wl->elp_compl = NULL;
671 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
672 /* don't enqueue a work right now. mark it as pending */
673 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
674 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
675 disable_irq_nosync(wl->irq);
676 pm_wakeup_event(wl->dev, 0);
677 spin_unlock_irqrestore(&wl->wl_lock, flags);
680 spin_unlock_irqrestore(&wl->wl_lock, flags);
682 /* TX might be handled here, avoid redundant work */
683 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
684 cancel_work_sync(&wl->tx_work);
686 mutex_lock(&wl->mutex);
688 ret = wlcore_irq_locked(wl);
690 wl12xx_queue_recovery_work(wl);
692 spin_lock_irqsave(&wl->wl_lock, flags);
693 /* In case TX was not handled here, queue TX work */
694 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
695 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
696 wl1271_tx_total_queue_count(wl) > 0)
697 ieee80211_queue_work(wl->hw, &wl->tx_work);
698 spin_unlock_irqrestore(&wl->wl_lock, flags);
700 mutex_unlock(&wl->mutex);
705 struct vif_counter_data {
708 struct ieee80211_vif *cur_vif;
709 bool cur_vif_running;
712 static void wl12xx_vif_count_iter(void *data, u8 *mac,
713 struct ieee80211_vif *vif)
715 struct vif_counter_data *counter = data;
718 if (counter->cur_vif == vif)
719 counter->cur_vif_running = true;
722 /* caller must not hold wl->mutex, as it might deadlock */
723 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
724 struct ieee80211_vif *cur_vif,
725 struct vif_counter_data *data)
727 memset(data, 0, sizeof(*data));
728 data->cur_vif = cur_vif;
730 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
731 wl12xx_vif_count_iter, data);
734 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
736 const struct firmware *fw;
738 enum wl12xx_fw_type fw_type;
742 fw_type = WL12XX_FW_TYPE_PLT;
743 fw_name = wl->plt_fw_name;
746 * we can't call wl12xx_get_vif_count() here because
747 * wl->mutex is taken, so use the cached last_vif_count value
749 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
750 fw_type = WL12XX_FW_TYPE_MULTI;
751 fw_name = wl->mr_fw_name;
753 fw_type = WL12XX_FW_TYPE_NORMAL;
754 fw_name = wl->sr_fw_name;
758 if (wl->fw_type == fw_type)
761 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
763 ret = request_firmware(&fw, fw_name, wl->dev);
766 wl1271_error("could not get firmware %s: %d", fw_name, ret);
771 wl1271_error("firmware size is not multiple of 32 bits: %zu",
778 wl->fw_type = WL12XX_FW_TYPE_NONE;
779 wl->fw_len = fw->size;
780 wl->fw = vmalloc(wl->fw_len);
783 wl1271_error("could not allocate memory for the firmware");
788 memcpy(wl->fw, fw->data, wl->fw_len);
790 wl->fw_type = fw_type;
792 release_firmware(fw);
797 void wl12xx_queue_recovery_work(struct wl1271 *wl)
799 /* Avoid a recursive recovery */
800 if (wl->state == WLCORE_STATE_ON) {
801 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
804 wl->state = WLCORE_STATE_RESTARTING;
805 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
806 wl1271_ps_elp_wakeup(wl);
807 wlcore_disable_interrupts_nosync(wl);
808 ieee80211_queue_work(wl->hw, &wl->recovery_work);
812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
816 /* Make sure we have enough room */
817 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
819 /* Fill the FW log file, consumed by the sysfs fwlog entry */
820 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 wl->fwlog_size += len;
826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
828 struct wlcore_partition_set part, old_part;
835 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
836 (wl->conf.fwlog.mem_blocks == 0))
839 wl1271_info("Reading FW panic log");
841 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
846 * Make sure the chip is awake and the logger isn't active.
847 * Do not send a stop fwlog command if the fw is hanged or if
848 * dbgpins are used (due to some fw bug).
850 if (wl1271_ps_elp_wakeup(wl))
852 if (!wl->watchdog_recovery &&
853 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
854 wl12xx_cmd_stop_fwlog(wl);
856 /* Read the first memory block address */
857 ret = wlcore_fw_status(wl, wl->fw_status);
861 addr = wl->fw_status->log_start_addr;
865 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
866 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
867 end_of_log = wl->fwlog_end;
869 offset = sizeof(addr);
873 old_part = wl->curr_part;
874 memset(&part, 0, sizeof(part));
876 /* Traverse the memory blocks linked list */
878 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
879 part.mem.size = PAGE_SIZE;
881 ret = wlcore_set_partition(wl, &part);
883 wl1271_error("%s: set_partition start=0x%X size=%d",
884 __func__, part.mem.start, part.mem.size);
888 memset(block, 0, wl->fw_mem_block_size);
889 ret = wlcore_read_hwaddr(wl, addr, block,
890 wl->fw_mem_block_size, false);
896 * Memory blocks are linked to one another. The first 4 bytes
897 * of each memory block hold the hardware address of the next
898 * one. The last memory block points to the first one in
899 * on demand mode and is equal to 0x2000000 in continuous mode.
901 addr = le32_to_cpup((__le32 *)block);
903 if (!wl12xx_copy_fwlog(wl, block + offset,
904 wl->fw_mem_block_size - offset))
906 } while (addr && (addr != end_of_log));
908 wake_up_interruptible(&wl->fwlog_waitq);
912 wlcore_set_partition(wl, &old_part);
915 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
916 u8 hlid, struct ieee80211_sta *sta)
918 struct wl1271_station *wl_sta;
919 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
921 wl_sta = (void *)sta->drv_priv;
922 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
925 * increment the initial seq number on recovery to account for
926 * transmitted packets that we haven't yet got in the FW status
928 if (wlvif->encryption_type == KEY_GEM)
929 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
931 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
932 wl_sta->total_freed_pkts += sqn_recovery_padding;
935 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
936 struct wl12xx_vif *wlvif,
937 u8 hlid, const u8 *addr)
939 struct ieee80211_sta *sta;
940 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
942 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
943 is_zero_ether_addr(addr)))
947 sta = ieee80211_find_sta(vif, addr);
949 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
953 static void wlcore_print_recovery(struct wl1271 *wl)
959 wl1271_info("Hardware recovery in progress. FW ver: %s",
960 wl->chip.fw_ver_str);
962 /* change partitions momentarily so we can read the FW pc */
963 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
967 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
971 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
975 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
976 pc, hint_sts, ++wl->recovery_count);
978 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
982 static void wl1271_recovery_work(struct work_struct *work)
985 container_of(work, struct wl1271, recovery_work);
986 struct wl12xx_vif *wlvif;
987 struct ieee80211_vif *vif;
989 mutex_lock(&wl->mutex);
991 if (wl->state == WLCORE_STATE_OFF || wl->plt)
994 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
995 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
996 wl12xx_read_fwlog_panic(wl);
997 wlcore_print_recovery(wl);
1000 BUG_ON(wl->conf.recovery.bug_on_recovery &&
1001 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1003 if (wl->conf.recovery.no_recovery) {
1004 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1008 /* Prevent spurious TX during FW restart */
1009 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1011 /* reboot the chipset */
1012 while (!list_empty(&wl->wlvif_list)) {
1013 wlvif = list_first_entry(&wl->wlvif_list,
1014 struct wl12xx_vif, list);
1015 vif = wl12xx_wlvif_to_vif(wlvif);
1017 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1018 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1019 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1020 vif->bss_conf.bssid);
1023 __wl1271_op_remove_interface(wl, vif, false);
1026 wlcore_op_stop_locked(wl);
1028 ieee80211_restart_hw(wl->hw);
1031 * Its safe to enable TX now - the queues are stopped after a request
1032 * to restart the HW.
1034 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1037 wl->watchdog_recovery = false;
1038 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1039 mutex_unlock(&wl->mutex);
1042 static int wlcore_fw_wakeup(struct wl1271 *wl)
1044 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1047 static int wl1271_setup(struct wl1271 *wl)
1049 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1050 if (!wl->raw_fw_status)
1053 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1057 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1063 kfree(wl->fw_status);
1064 kfree(wl->raw_fw_status);
1068 static int wl12xx_set_power_on(struct wl1271 *wl)
1072 msleep(WL1271_PRE_POWER_ON_SLEEP);
1073 ret = wl1271_power_on(wl);
1076 msleep(WL1271_POWER_ON_SLEEP);
1077 wl1271_io_reset(wl);
1080 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1084 /* ELP module wake up */
1085 ret = wlcore_fw_wakeup(wl);
1093 wl1271_power_off(wl);
1097 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1101 ret = wl12xx_set_power_on(wl);
1106 * For wl127x based devices we could use the default block
1107 * size (512 bytes), but due to a bug in the sdio driver, we
1108 * need to set it explicitly after the chip is powered on. To
1109 * simplify the code and since the performance impact is
1110 * negligible, we use the same block size for all different
1113 * Check if the bus supports blocksize alignment and, if it
1114 * doesn't, make sure we don't have the quirk.
1116 if (!wl1271_set_block_size(wl))
1117 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1119 /* TODO: make sure the lower driver has set things up correctly */
1121 ret = wl1271_setup(wl);
1125 ret = wl12xx_fetch_firmware(wl, plt);
1133 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1135 int retries = WL1271_BOOT_RETRIES;
1136 struct wiphy *wiphy = wl->hw->wiphy;
1138 static const char* const PLT_MODE[] = {
1147 mutex_lock(&wl->mutex);
1149 wl1271_notice("power up");
1151 if (wl->state != WLCORE_STATE_OFF) {
1152 wl1271_error("cannot go into PLT state because not "
1153 "in off state: %d", wl->state);
1158 /* Indicate to lower levels that we are now in PLT mode */
1160 wl->plt_mode = plt_mode;
1164 ret = wl12xx_chip_wakeup(wl, true);
1168 if (plt_mode != PLT_CHIP_AWAKE) {
1169 ret = wl->ops->plt_init(wl);
1174 wl->state = WLCORE_STATE_ON;
1175 wl1271_notice("firmware booted in PLT mode %s (%s)",
1177 wl->chip.fw_ver_str);
1179 /* update hw/fw version info in wiphy struct */
1180 wiphy->hw_version = wl->chip.id;
1181 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1182 sizeof(wiphy->fw_version));
1187 wl1271_power_off(wl);
1191 wl->plt_mode = PLT_OFF;
1193 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1194 WL1271_BOOT_RETRIES);
1196 mutex_unlock(&wl->mutex);
1201 int wl1271_plt_stop(struct wl1271 *wl)
1205 wl1271_notice("power down");
1208 * Interrupts must be disabled before setting the state to OFF.
1209 * Otherwise, the interrupt handler might be called and exit without
1210 * reading the interrupt status.
1212 wlcore_disable_interrupts(wl);
1213 mutex_lock(&wl->mutex);
1215 mutex_unlock(&wl->mutex);
1218 * This will not necessarily enable interrupts as interrupts
1219 * may have been disabled when op_stop was called. It will,
1220 * however, balance the above call to disable_interrupts().
1222 wlcore_enable_interrupts(wl);
1224 wl1271_error("cannot power down because not in PLT "
1225 "state: %d", wl->state);
1230 mutex_unlock(&wl->mutex);
1232 wl1271_flush_deferred_work(wl);
1233 cancel_work_sync(&wl->netstack_work);
1234 cancel_work_sync(&wl->recovery_work);
1235 cancel_delayed_work_sync(&wl->elp_work);
1236 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1238 mutex_lock(&wl->mutex);
1239 wl1271_power_off(wl);
1241 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1242 wl->state = WLCORE_STATE_OFF;
1244 wl->plt_mode = PLT_OFF;
1246 mutex_unlock(&wl->mutex);
1252 static void wl1271_op_tx(struct ieee80211_hw *hw,
1253 struct ieee80211_tx_control *control,
1254 struct sk_buff *skb)
1256 struct wl1271 *wl = hw->priv;
1257 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1258 struct ieee80211_vif *vif = info->control.vif;
1259 struct wl12xx_vif *wlvif = NULL;
1260 unsigned long flags;
1265 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1266 ieee80211_free_txskb(hw, skb);
1270 wlvif = wl12xx_vif_to_data(vif);
1271 mapping = skb_get_queue_mapping(skb);
1272 q = wl1271_tx_get_queue(mapping);
1274 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1276 spin_lock_irqsave(&wl->wl_lock, flags);
1279 * drop the packet if the link is invalid or the queue is stopped
1280 * for any reason but watermark. Watermark is a "soft"-stop so we
1281 * allow these packets through.
1283 if (hlid == WL12XX_INVALID_LINK_ID ||
1284 (!test_bit(hlid, wlvif->links_map)) ||
1285 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1286 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1287 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1288 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1289 ieee80211_free_txskb(hw, skb);
1293 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1295 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1297 wl->tx_queue_count[q]++;
1298 wlvif->tx_queue_count[q]++;
1301 * The workqueue is slow to process the tx_queue and we need stop
1302 * the queue here, otherwise the queue will get too long.
1304 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1305 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1306 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1307 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1308 wlcore_stop_queue_locked(wl, wlvif, q,
1309 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1313 * The chip specific setup must run before the first TX packet -
1314 * before that, the tx_work will not be initialized!
1317 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1318 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1319 ieee80211_queue_work(wl->hw, &wl->tx_work);
1322 spin_unlock_irqrestore(&wl->wl_lock, flags);
1325 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1327 unsigned long flags;
1330 /* no need to queue a new dummy packet if one is already pending */
1331 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1334 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1336 spin_lock_irqsave(&wl->wl_lock, flags);
1337 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1338 wl->tx_queue_count[q]++;
1339 spin_unlock_irqrestore(&wl->wl_lock, flags);
1341 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1342 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1343 return wlcore_tx_work_locked(wl);
1346 * If the FW TX is busy, TX work will be scheduled by the threaded
1347 * interrupt handler function
1353 * The size of the dummy packet should be at least 1400 bytes. However, in
1354 * order to minimize the number of bus transactions, aligning it to 512 bytes
1355 * boundaries could be beneficial, performance wise
1357 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1359 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1361 struct sk_buff *skb;
1362 struct ieee80211_hdr_3addr *hdr;
1363 unsigned int dummy_packet_size;
1365 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1366 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1368 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1370 wl1271_warning("Failed to allocate a dummy packet skb");
1374 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1376 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1377 memset(hdr, 0, sizeof(*hdr));
1378 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1379 IEEE80211_STYPE_NULLFUNC |
1380 IEEE80211_FCTL_TODS);
1382 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1384 /* Dummy packets require the TID to be management */
1385 skb->priority = WL1271_TID_MGMT;
1387 /* Initialize all fields that might be used */
1388 skb_set_queue_mapping(skb, 0);
1389 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1397 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1399 int num_fields = 0, in_field = 0, fields_size = 0;
1400 int i, pattern_len = 0;
1403 wl1271_warning("No mask in WoWLAN pattern");
1408 * The pattern is broken up into segments of bytes at different offsets
1409 * that need to be checked by the FW filter. Each segment is called
1410 * a field in the FW API. We verify that the total number of fields
1411 * required for this pattern won't exceed FW limits (8)
1412 * as well as the total fields buffer won't exceed the FW limit.
1413 * Note that if there's a pattern which crosses Ethernet/IP header
1414 * boundary a new field is required.
1416 for (i = 0; i < p->pattern_len; i++) {
1417 if (test_bit(i, (unsigned long *)p->mask)) {
1422 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1424 fields_size += pattern_len +
1425 RX_FILTER_FIELD_OVERHEAD;
1433 fields_size += pattern_len +
1434 RX_FILTER_FIELD_OVERHEAD;
1441 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1445 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1446 wl1271_warning("RX Filter too complex. Too many segments");
1450 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1451 wl1271_warning("RX filter pattern is too big");
1458 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1460 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1463 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1470 for (i = 0; i < filter->num_fields; i++)
1471 kfree(filter->fields[i].pattern);
1476 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1477 u16 offset, u8 flags,
1478 const u8 *pattern, u8 len)
1480 struct wl12xx_rx_filter_field *field;
1482 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1483 wl1271_warning("Max fields per RX filter. can't alloc another");
1487 field = &filter->fields[filter->num_fields];
1489 field->pattern = kzalloc(len, GFP_KERNEL);
1490 if (!field->pattern) {
1491 wl1271_warning("Failed to allocate RX filter pattern");
1495 filter->num_fields++;
1497 field->offset = cpu_to_le16(offset);
1498 field->flags = flags;
1500 memcpy(field->pattern, pattern, len);
1505 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1507 int i, fields_size = 0;
1509 for (i = 0; i < filter->num_fields; i++)
1510 fields_size += filter->fields[i].len +
1511 sizeof(struct wl12xx_rx_filter_field) -
1517 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1521 struct wl12xx_rx_filter_field *field;
1523 for (i = 0; i < filter->num_fields; i++) {
1524 field = (struct wl12xx_rx_filter_field *)buf;
1526 field->offset = filter->fields[i].offset;
1527 field->flags = filter->fields[i].flags;
1528 field->len = filter->fields[i].len;
1530 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1531 buf += sizeof(struct wl12xx_rx_filter_field) -
1532 sizeof(u8 *) + field->len;
1537 * Allocates an RX filter returned through f
1538 * which needs to be freed using rx_filter_free()
1541 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1542 struct wl12xx_rx_filter **f)
1545 struct wl12xx_rx_filter *filter;
1549 filter = wl1271_rx_filter_alloc();
1551 wl1271_warning("Failed to alloc rx filter");
1557 while (i < p->pattern_len) {
1558 if (!test_bit(i, (unsigned long *)p->mask)) {
1563 for (j = i; j < p->pattern_len; j++) {
1564 if (!test_bit(j, (unsigned long *)p->mask))
1567 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1568 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1572 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1574 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1576 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1577 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1582 ret = wl1271_rx_filter_alloc_field(filter,
1585 &p->pattern[i], len);
1592 filter->action = FILTER_SIGNAL;
1598 wl1271_rx_filter_free(filter);
1604 static int wl1271_configure_wowlan(struct wl1271 *wl,
1605 struct cfg80211_wowlan *wow)
1609 if (!wow || wow->any || !wow->n_patterns) {
1610 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1615 ret = wl1271_rx_filter_clear_all(wl);
1622 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1625 /* Validate all incoming patterns before clearing current FW state */
1626 for (i = 0; i < wow->n_patterns; i++) {
1627 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1629 wl1271_warning("Bad wowlan pattern %d", i);
1634 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1638 ret = wl1271_rx_filter_clear_all(wl);
1642 /* Translate WoWLAN patterns into filters */
1643 for (i = 0; i < wow->n_patterns; i++) {
1644 struct cfg80211_pkt_pattern *p;
1645 struct wl12xx_rx_filter *filter = NULL;
1647 p = &wow->patterns[i];
1649 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1651 wl1271_warning("Failed to create an RX filter from "
1652 "wowlan pattern %d", i);
1656 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1658 wl1271_rx_filter_free(filter);
1663 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1669 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1670 struct wl12xx_vif *wlvif,
1671 struct cfg80211_wowlan *wow)
1675 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1678 ret = wl1271_configure_wowlan(wl, wow);
1682 if ((wl->conf.conn.suspend_wake_up_event ==
1683 wl->conf.conn.wake_up_event) &&
1684 (wl->conf.conn.suspend_listen_interval ==
1685 wl->conf.conn.listen_interval))
1688 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1689 wl->conf.conn.suspend_wake_up_event,
1690 wl->conf.conn.suspend_listen_interval);
1693 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1699 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1700 struct wl12xx_vif *wlvif,
1701 struct cfg80211_wowlan *wow)
1705 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1708 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1712 ret = wl1271_configure_wowlan(wl, wow);
1721 static int wl1271_configure_suspend(struct wl1271 *wl,
1722 struct wl12xx_vif *wlvif,
1723 struct cfg80211_wowlan *wow)
1725 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1726 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1727 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1728 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1732 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1735 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1736 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1738 if ((!is_ap) && (!is_sta))
1741 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1742 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1745 wl1271_configure_wowlan(wl, NULL);
1748 if ((wl->conf.conn.suspend_wake_up_event ==
1749 wl->conf.conn.wake_up_event) &&
1750 (wl->conf.conn.suspend_listen_interval ==
1751 wl->conf.conn.listen_interval))
1754 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1755 wl->conf.conn.wake_up_event,
1756 wl->conf.conn.listen_interval);
1759 wl1271_error("resume: wake up conditions failed: %d",
1763 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1767 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1768 struct cfg80211_wowlan *wow)
1770 struct wl1271 *wl = hw->priv;
1771 struct wl12xx_vif *wlvif;
1774 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1777 /* we want to perform the recovery before suspending */
1778 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1779 wl1271_warning("postponing suspend to perform recovery");
1783 wl1271_tx_flush(wl);
1785 mutex_lock(&wl->mutex);
1787 ret = wl1271_ps_elp_wakeup(wl);
1789 mutex_unlock(&wl->mutex);
1793 wl->wow_enabled = true;
1794 wl12xx_for_each_wlvif(wl, wlvif) {
1795 ret = wl1271_configure_suspend(wl, wlvif, wow);
1797 mutex_unlock(&wl->mutex);
1798 wl1271_warning("couldn't prepare device to suspend");
1803 /* disable fast link flow control notifications from FW */
1804 ret = wlcore_hw_interrupt_notify(wl, false);
1808 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1809 ret = wlcore_hw_rx_ba_filter(wl,
1810 !!wl->conf.conn.suspend_rx_ba_activity);
1815 wl1271_ps_elp_sleep(wl);
1816 mutex_unlock(&wl->mutex);
1819 wl1271_warning("couldn't prepare device to suspend");
1823 /* flush any remaining work */
1824 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1827 * disable and re-enable interrupts in order to flush
1830 wlcore_disable_interrupts(wl);
1833 * set suspended flag to avoid triggering a new threaded_irq
1834 * work. no need for spinlock as interrupts are disabled.
1836 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1838 wlcore_enable_interrupts(wl);
1839 flush_work(&wl->tx_work);
1840 flush_delayed_work(&wl->elp_work);
1843 * Cancel the watchdog even if above tx_flush failed. We will detect
1844 * it on resume anyway.
1846 cancel_delayed_work(&wl->tx_watchdog_work);
1851 static int wl1271_op_resume(struct ieee80211_hw *hw)
1853 struct wl1271 *wl = hw->priv;
1854 struct wl12xx_vif *wlvif;
1855 unsigned long flags;
1856 bool run_irq_work = false, pending_recovery;
1859 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1861 WARN_ON(!wl->wow_enabled);
1864 * re-enable irq_work enqueuing, and call irq_work directly if
1865 * there is a pending work.
1867 spin_lock_irqsave(&wl->wl_lock, flags);
1868 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1869 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1870 run_irq_work = true;
1871 spin_unlock_irqrestore(&wl->wl_lock, flags);
1873 mutex_lock(&wl->mutex);
1875 /* test the recovery flag before calling any SDIO functions */
1876 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1880 wl1271_debug(DEBUG_MAC80211,
1881 "run postponed irq_work directly");
1883 /* don't talk to the HW if recovery is pending */
1884 if (!pending_recovery) {
1885 ret = wlcore_irq_locked(wl);
1887 wl12xx_queue_recovery_work(wl);
1890 wlcore_enable_interrupts(wl);
1893 if (pending_recovery) {
1894 wl1271_warning("queuing forgotten recovery on resume");
1895 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1899 ret = wl1271_ps_elp_wakeup(wl);
1903 wl12xx_for_each_wlvif(wl, wlvif) {
1904 wl1271_configure_resume(wl, wlvif);
1907 ret = wlcore_hw_interrupt_notify(wl, true);
1911 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1912 ret = wlcore_hw_rx_ba_filter(wl, false);
1917 wl1271_ps_elp_sleep(wl);
1920 wl->wow_enabled = false;
1923 * Set a flag to re-init the watchdog on the first Tx after resume.
1924 * That way we avoid possible conditions where Tx-complete interrupts
1925 * fail to arrive and we perform a spurious recovery.
1927 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1928 mutex_unlock(&wl->mutex);
1934 static int wl1271_op_start(struct ieee80211_hw *hw)
1936 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1939 * We have to delay the booting of the hardware because
1940 * we need to know the local MAC address before downloading and
1941 * initializing the firmware. The MAC address cannot be changed
1942 * after boot, and without the proper MAC address, the firmware
1943 * will not function properly.
1945 * The MAC address is first known when the corresponding interface
1946 * is added. That is where we will initialize the hardware.
1952 static void wlcore_op_stop_locked(struct wl1271 *wl)
1956 if (wl->state == WLCORE_STATE_OFF) {
1957 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1959 wlcore_enable_interrupts(wl);
1965 * this must be before the cancel_work calls below, so that the work
1966 * functions don't perform further work.
1968 wl->state = WLCORE_STATE_OFF;
1971 * Use the nosync variant to disable interrupts, so the mutex could be
1972 * held while doing so without deadlocking.
1974 wlcore_disable_interrupts_nosync(wl);
1976 mutex_unlock(&wl->mutex);
1978 wlcore_synchronize_interrupts(wl);
1979 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1980 cancel_work_sync(&wl->recovery_work);
1981 wl1271_flush_deferred_work(wl);
1982 cancel_delayed_work_sync(&wl->scan_complete_work);
1983 cancel_work_sync(&wl->netstack_work);
1984 cancel_work_sync(&wl->tx_work);
1985 cancel_delayed_work_sync(&wl->elp_work);
1986 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1988 /* let's notify MAC80211 about the remaining pending TX frames */
1989 mutex_lock(&wl->mutex);
1990 wl12xx_tx_reset(wl);
1992 wl1271_power_off(wl);
1994 * In case a recovery was scheduled, interrupts were disabled to avoid
1995 * an interrupt storm. Now that the power is down, it is safe to
1996 * re-enable interrupts to balance the disable depth
1998 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1999 wlcore_enable_interrupts(wl);
2001 wl->band = IEEE80211_BAND_2GHZ;
2004 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2005 wl->channel_type = NL80211_CHAN_NO_HT;
2006 wl->tx_blocks_available = 0;
2007 wl->tx_allocated_blocks = 0;
2008 wl->tx_results_count = 0;
2009 wl->tx_packets_count = 0;
2010 wl->time_offset = 0;
2011 wl->ap_fw_ps_map = 0;
2013 wl->sleep_auth = WL1271_PSM_ILLEGAL;
2014 memset(wl->roles_map, 0, sizeof(wl->roles_map));
2015 memset(wl->links_map, 0, sizeof(wl->links_map));
2016 memset(wl->roc_map, 0, sizeof(wl->roc_map));
2017 memset(wl->session_ids, 0, sizeof(wl->session_ids));
2018 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2019 wl->active_sta_count = 0;
2020 wl->active_link_count = 0;
2022 /* The system link is always allocated */
2023 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2024 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2025 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2028 * this is performed after the cancel_work calls and the associated
2029 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2030 * get executed before all these vars have been reset.
2034 wl->tx_blocks_freed = 0;
2036 for (i = 0; i < NUM_TX_QUEUES; i++) {
2037 wl->tx_pkts_freed[i] = 0;
2038 wl->tx_allocated_pkts[i] = 0;
2041 wl1271_debugfs_reset(wl);
2043 kfree(wl->raw_fw_status);
2044 wl->raw_fw_status = NULL;
2045 kfree(wl->fw_status);
2046 wl->fw_status = NULL;
2047 kfree(wl->tx_res_if);
2048 wl->tx_res_if = NULL;
2049 kfree(wl->target_mem_map);
2050 wl->target_mem_map = NULL;
2053 * FW channels must be re-calibrated after recovery,
2054 * save current Reg-Domain channel configuration and clear it.
2056 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2057 sizeof(wl->reg_ch_conf_pending));
2058 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2061 static void wlcore_op_stop(struct ieee80211_hw *hw)
2063 struct wl1271 *wl = hw->priv;
2065 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2067 mutex_lock(&wl->mutex);
2069 wlcore_op_stop_locked(wl);
2071 mutex_unlock(&wl->mutex);
2074 static void wlcore_channel_switch_work(struct work_struct *work)
2076 struct delayed_work *dwork;
2078 struct ieee80211_vif *vif;
2079 struct wl12xx_vif *wlvif;
2082 dwork = container_of(work, struct delayed_work, work);
2083 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2086 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2088 mutex_lock(&wl->mutex);
2090 if (unlikely(wl->state != WLCORE_STATE_ON))
2093 /* check the channel switch is still ongoing */
2094 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2097 vif = wl12xx_wlvif_to_vif(wlvif);
2098 ieee80211_chswitch_done(vif, false);
2100 ret = wl1271_ps_elp_wakeup(wl);
2104 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2106 wl1271_ps_elp_sleep(wl);
2108 mutex_unlock(&wl->mutex);
2111 static void wlcore_connection_loss_work(struct work_struct *work)
2113 struct delayed_work *dwork;
2115 struct ieee80211_vif *vif;
2116 struct wl12xx_vif *wlvif;
2118 dwork = container_of(work, struct delayed_work, work);
2119 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2122 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2124 mutex_lock(&wl->mutex);
2126 if (unlikely(wl->state != WLCORE_STATE_ON))
2129 /* Call mac80211 connection loss */
2130 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2133 vif = wl12xx_wlvif_to_vif(wlvif);
2134 ieee80211_connection_loss(vif);
2136 mutex_unlock(&wl->mutex);
2139 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2141 struct delayed_work *dwork;
2143 struct wl12xx_vif *wlvif;
2144 unsigned long time_spare;
2147 dwork = container_of(work, struct delayed_work, work);
2148 wlvif = container_of(dwork, struct wl12xx_vif,
2149 pending_auth_complete_work);
2152 mutex_lock(&wl->mutex);
2154 if (unlikely(wl->state != WLCORE_STATE_ON))
2158 * Make sure a second really passed since the last auth reply. Maybe
2159 * a second auth reply arrived while we were stuck on the mutex.
2160 * Check for a little less than the timeout to protect from scheduler
2163 time_spare = jiffies +
2164 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2165 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2168 ret = wl1271_ps_elp_wakeup(wl);
2172 /* cancel the ROC if active */
2173 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2175 wl1271_ps_elp_sleep(wl);
2177 mutex_unlock(&wl->mutex);
2180 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2182 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2183 WL12XX_MAX_RATE_POLICIES);
2184 if (policy >= WL12XX_MAX_RATE_POLICIES)
2187 __set_bit(policy, wl->rate_policies_map);
2192 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2194 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2197 __clear_bit(*idx, wl->rate_policies_map);
2198 *idx = WL12XX_MAX_RATE_POLICIES;
2201 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2203 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2204 WLCORE_MAX_KLV_TEMPLATES);
2205 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2208 __set_bit(policy, wl->klv_templates_map);
2213 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2215 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2218 __clear_bit(*idx, wl->klv_templates_map);
2219 *idx = WLCORE_MAX_KLV_TEMPLATES;
2222 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2224 switch (wlvif->bss_type) {
2225 case BSS_TYPE_AP_BSS:
2227 return WL1271_ROLE_P2P_GO;
2229 return WL1271_ROLE_AP;
2231 case BSS_TYPE_STA_BSS:
2233 return WL1271_ROLE_P2P_CL;
2235 return WL1271_ROLE_STA;
2238 return WL1271_ROLE_IBSS;
2241 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2243 return WL12XX_INVALID_ROLE_TYPE;
2246 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2248 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2251 /* clear everything but the persistent data */
2252 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2254 switch (ieee80211_vif_type_p2p(vif)) {
2255 case NL80211_IFTYPE_P2P_CLIENT:
2258 case NL80211_IFTYPE_STATION:
2259 wlvif->bss_type = BSS_TYPE_STA_BSS;
2261 case NL80211_IFTYPE_ADHOC:
2262 wlvif->bss_type = BSS_TYPE_IBSS;
2264 case NL80211_IFTYPE_P2P_GO:
2267 case NL80211_IFTYPE_AP:
2268 wlvif->bss_type = BSS_TYPE_AP_BSS;
2271 wlvif->bss_type = MAX_BSS_TYPE;
2275 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2276 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2277 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2279 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2280 wlvif->bss_type == BSS_TYPE_IBSS) {
2281 /* init sta/ibss data */
2282 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2283 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2284 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2285 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2286 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2287 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2288 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2289 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2292 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2293 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2294 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2295 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2296 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2297 wl12xx_allocate_rate_policy(wl,
2298 &wlvif->ap.ucast_rate_idx[i]);
2299 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2301 * TODO: check if basic_rate shouldn't be
2302 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2303 * instead (the same thing for STA above).
2305 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2306 /* TODO: this seems to be used only for STA, check it */
2307 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2310 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2311 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2312 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2315 * mac80211 configures some values globally, while we treat them
2316 * per-interface. thus, on init, we have to copy them from wl
2318 wlvif->band = wl->band;
2319 wlvif->channel = wl->channel;
2320 wlvif->power_level = wl->power_level;
2321 wlvif->channel_type = wl->channel_type;
2323 INIT_WORK(&wlvif->rx_streaming_enable_work,
2324 wl1271_rx_streaming_enable_work);
2325 INIT_WORK(&wlvif->rx_streaming_disable_work,
2326 wl1271_rx_streaming_disable_work);
2327 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2328 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2329 wlcore_channel_switch_work);
2330 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2331 wlcore_connection_loss_work);
2332 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2333 wlcore_pending_auth_complete_work);
2334 INIT_LIST_HEAD(&wlvif->list);
2336 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2337 (unsigned long) wlvif);
2341 static int wl12xx_init_fw(struct wl1271 *wl)
2343 int retries = WL1271_BOOT_RETRIES;
2344 bool booted = false;
2345 struct wiphy *wiphy = wl->hw->wiphy;
2350 ret = wl12xx_chip_wakeup(wl, false);
2354 ret = wl->ops->boot(wl);
2358 ret = wl1271_hw_init(wl);
2366 mutex_unlock(&wl->mutex);
2367 /* Unlocking the mutex in the middle of handling is
2368 inherently unsafe. In this case we deem it safe to do,
2369 because we need to let any possibly pending IRQ out of
2370 the system (and while we are WLCORE_STATE_OFF the IRQ
2371 work function will not do anything.) Also, any other
2372 possible concurrent operations will fail due to the
2373 current state, hence the wl1271 struct should be safe. */
2374 wlcore_disable_interrupts(wl);
2375 wl1271_flush_deferred_work(wl);
2376 cancel_work_sync(&wl->netstack_work);
2377 mutex_lock(&wl->mutex);
2379 wl1271_power_off(wl);
2383 wl1271_error("firmware boot failed despite %d retries",
2384 WL1271_BOOT_RETRIES);
2388 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2390 /* update hw/fw version info in wiphy struct */
2391 wiphy->hw_version = wl->chip.id;
2392 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2393 sizeof(wiphy->fw_version));
2396 * Now we know if 11a is supported (info from the NVS), so disable
2397 * 11a channels if not supported
2399 if (!wl->enable_11a)
2400 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2402 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2403 wl->enable_11a ? "" : "not ");
2405 wl->state = WLCORE_STATE_ON;
2410 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2412 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2416 * Check whether a fw switch (i.e. moving from one loaded
2417 * fw to another) is needed. This function is also responsible
2418 * for updating wl->last_vif_count, so it must be called before
2419 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2422 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2423 struct vif_counter_data vif_counter_data,
2426 enum wl12xx_fw_type current_fw = wl->fw_type;
2427 u8 vif_count = vif_counter_data.counter;
2429 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2432 /* increase the vif count if this is a new vif */
2433 if (add && !vif_counter_data.cur_vif_running)
2436 wl->last_vif_count = vif_count;
2438 /* no need for fw change if the device is OFF */
2439 if (wl->state == WLCORE_STATE_OFF)
2442 /* no need for fw change if a single fw is used */
2443 if (!wl->mr_fw_name)
2446 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2448 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2455 * Enter "forced psm". Make sure the sta is in psm against the ap,
2456 * to make the fw switch a bit more disconnection-persistent.
2458 static void wl12xx_force_active_psm(struct wl1271 *wl)
2460 struct wl12xx_vif *wlvif;
2462 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2463 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2467 struct wlcore_hw_queue_iter_data {
2468 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2470 struct ieee80211_vif *vif;
2471 /* is the current vif among those iterated */
2475 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2476 struct ieee80211_vif *vif)
2478 struct wlcore_hw_queue_iter_data *iter_data = data;
2480 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2483 if (iter_data->cur_running || vif == iter_data->vif) {
2484 iter_data->cur_running = true;
2488 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2491 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2492 struct wl12xx_vif *wlvif)
2494 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2495 struct wlcore_hw_queue_iter_data iter_data = {};
2498 iter_data.vif = vif;
2500 /* mark all bits taken by active interfaces */
2501 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2502 IEEE80211_IFACE_ITER_RESUME_ALL,
2503 wlcore_hw_queue_iter, &iter_data);
2505 /* the current vif is already running in mac80211 (resume/recovery) */
2506 if (iter_data.cur_running) {
2507 wlvif->hw_queue_base = vif->hw_queue[0];
2508 wl1271_debug(DEBUG_MAC80211,
2509 "using pre-allocated hw queue base %d",
2510 wlvif->hw_queue_base);
2512 /* interface type might have changed type */
2513 goto adjust_cab_queue;
2516 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2517 WLCORE_NUM_MAC_ADDRESSES);
2518 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2521 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2522 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2523 wlvif->hw_queue_base);
2525 for (i = 0; i < NUM_TX_QUEUES; i++) {
2526 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2527 /* register hw queues in mac80211 */
2528 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2532 /* the last places are reserved for cab queues per interface */
2533 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2534 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2535 wlvif->hw_queue_base / NUM_TX_QUEUES;
2537 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2542 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2543 struct ieee80211_vif *vif)
2545 struct wl1271 *wl = hw->priv;
2546 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2547 struct vif_counter_data vif_count;
2552 wl1271_error("Adding Interface not allowed while in PLT mode");
2556 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2557 IEEE80211_VIF_SUPPORTS_UAPSD |
2558 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2560 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2561 ieee80211_vif_type_p2p(vif), vif->addr);
2563 wl12xx_get_vif_count(hw, vif, &vif_count);
2565 mutex_lock(&wl->mutex);
2566 ret = wl1271_ps_elp_wakeup(wl);
2571 * in some very corner case HW recovery scenarios its possible to
2572 * get here before __wl1271_op_remove_interface is complete, so
2573 * opt out if that is the case.
2575 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2576 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2582 ret = wl12xx_init_vif_data(wl, vif);
2587 role_type = wl12xx_get_role_type(wl, wlvif);
2588 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2593 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2597 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2598 wl12xx_force_active_psm(wl);
2599 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2600 mutex_unlock(&wl->mutex);
2601 wl1271_recovery_work(&wl->recovery_work);
2606 * TODO: after the nvs issue will be solved, move this block
2607 * to start(), and make sure here the driver is ON.
2609 if (wl->state == WLCORE_STATE_OFF) {
2611 * we still need this in order to configure the fw
2612 * while uploading the nvs
2614 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2616 ret = wl12xx_init_fw(wl);
2621 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2622 role_type, &wlvif->role_id);
2626 ret = wl1271_init_vif_specific(wl, vif);
2630 list_add(&wlvif->list, &wl->wlvif_list);
2631 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2633 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2638 wl1271_ps_elp_sleep(wl);
2640 mutex_unlock(&wl->mutex);
2645 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2646 struct ieee80211_vif *vif,
2647 bool reset_tx_queues)
2649 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2651 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2653 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2655 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2658 /* because of hardware recovery, we may get here twice */
2659 if (wl->state == WLCORE_STATE_OFF)
2662 wl1271_info("down");
2664 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2665 wl->scan_wlvif == wlvif) {
2667 * Rearm the tx watchdog just before idling scan. This
2668 * prevents just-finished scans from triggering the watchdog
2670 wl12xx_rearm_tx_watchdog_locked(wl);
2672 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2673 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2674 wl->scan_wlvif = NULL;
2675 wl->scan.req = NULL;
2676 ieee80211_scan_completed(wl->hw, true);
2679 if (wl->sched_vif == wlvif)
2680 wl->sched_vif = NULL;
2682 if (wl->roc_vif == vif) {
2684 ieee80211_remain_on_channel_expired(wl->hw);
2687 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2688 /* disable active roles */
2689 ret = wl1271_ps_elp_wakeup(wl);
2693 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2694 wlvif->bss_type == BSS_TYPE_IBSS) {
2695 if (wl12xx_dev_role_started(wlvif))
2696 wl12xx_stop_dev(wl, wlvif);
2699 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2703 wl1271_ps_elp_sleep(wl);
2706 wl12xx_tx_reset_wlvif(wl, wlvif);
2708 /* clear all hlids (except system_hlid) */
2709 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2711 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2712 wlvif->bss_type == BSS_TYPE_IBSS) {
2713 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2714 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2715 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2716 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2717 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2719 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2720 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2721 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2722 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2723 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2724 wl12xx_free_rate_policy(wl,
2725 &wlvif->ap.ucast_rate_idx[i]);
2726 wl1271_free_ap_keys(wl, wlvif);
2729 dev_kfree_skb(wlvif->probereq);
2730 wlvif->probereq = NULL;
2731 if (wl->last_wlvif == wlvif)
2732 wl->last_wlvif = NULL;
2733 list_del(&wlvif->list);
2734 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2735 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2736 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2744 * Last AP, have more stations. Configure sleep auth according to STA.
2745 * Don't do thin on unintended recovery.
2747 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2748 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2751 if (wl->ap_count == 0 && is_ap) {
2752 /* mask ap events */
2753 wl->event_mask &= ~wl->ap_event_mask;
2754 wl1271_event_unmask(wl);
2757 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2758 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2759 /* Configure for power according to debugfs */
2760 if (sta_auth != WL1271_PSM_ILLEGAL)
2761 wl1271_acx_sleep_auth(wl, sta_auth);
2762 /* Configure for ELP power saving */
2764 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2768 mutex_unlock(&wl->mutex);
2770 del_timer_sync(&wlvif->rx_streaming_timer);
2771 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2772 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2773 cancel_work_sync(&wlvif->rc_update_work);
2774 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2775 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2776 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2778 mutex_lock(&wl->mutex);
2781 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2782 struct ieee80211_vif *vif)
2784 struct wl1271 *wl = hw->priv;
2785 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2786 struct wl12xx_vif *iter;
2787 struct vif_counter_data vif_count;
2789 wl12xx_get_vif_count(hw, vif, &vif_count);
2790 mutex_lock(&wl->mutex);
2792 if (wl->state == WLCORE_STATE_OFF ||
2793 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2797 * wl->vif can be null here if someone shuts down the interface
2798 * just when hardware recovery has been started.
2800 wl12xx_for_each_wlvif(wl, iter) {
2804 __wl1271_op_remove_interface(wl, vif, true);
2807 WARN_ON(iter != wlvif);
2808 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2809 wl12xx_force_active_psm(wl);
2810 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2811 wl12xx_queue_recovery_work(wl);
2814 mutex_unlock(&wl->mutex);
2817 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2818 struct ieee80211_vif *vif,
2819 enum nl80211_iftype new_type, bool p2p)
2821 struct wl1271 *wl = hw->priv;
2824 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2825 wl1271_op_remove_interface(hw, vif);
2827 vif->type = new_type;
2829 ret = wl1271_op_add_interface(hw, vif);
2831 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2835 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2838 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2841 * One of the side effects of the JOIN command is that is clears
2842 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2843 * to a WPA/WPA2 access point will therefore kill the data-path.
2844 * Currently the only valid scenario for JOIN during association
2845 * is on roaming, in which case we will also be given new keys.
2846 * Keep the below message for now, unless it starts bothering
2847 * users who really like to roam a lot :)
2849 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2850 wl1271_info("JOIN while associated.");
2852 /* clear encryption type */
2853 wlvif->encryption_type = KEY_NONE;
2856 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2858 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2860 * TODO: this is an ugly workaround for wl12xx fw
2861 * bug - we are not able to tx/rx after the first
2862 * start_sta, so make dummy start+stop calls,
2863 * and then call start_sta again.
2864 * this should be fixed in the fw.
2866 wl12xx_cmd_role_start_sta(wl, wlvif);
2867 wl12xx_cmd_role_stop_sta(wl, wlvif);
2870 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2876 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2880 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2884 wl1271_error("No SSID in IEs!");
2889 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2890 wl1271_error("SSID is too long!");
2894 wlvif->ssid_len = ssid_len;
2895 memcpy(wlvif->ssid, ptr+2, ssid_len);
2899 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2901 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2902 struct sk_buff *skb;
2905 /* we currently only support setting the ssid from the ap probe req */
2906 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2909 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2913 ieoffset = offsetof(struct ieee80211_mgmt,
2914 u.probe_req.variable);
2915 wl1271_ssid_set(wlvif, skb, ieoffset);
2921 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2922 struct ieee80211_bss_conf *bss_conf,
2928 wlvif->aid = bss_conf->aid;
2929 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2930 wlvif->beacon_int = bss_conf->beacon_int;
2931 wlvif->wmm_enabled = bss_conf->qos;
2933 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2936 * with wl1271, we don't need to update the
2937 * beacon_int and dtim_period, because the firmware
2938 * updates it by itself when the first beacon is
2939 * received after a join.
2941 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2946 * Get a template for hardware connection maintenance
2948 dev_kfree_skb(wlvif->probereq);
2949 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2952 ieoffset = offsetof(struct ieee80211_mgmt,
2953 u.probe_req.variable);
2954 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2956 /* enable the connection monitoring feature */
2957 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2962 * The join command disable the keep-alive mode, shut down its process,
2963 * and also clear the template config, so we need to reset it all after
2964 * the join. The acx_aid starts the keep-alive process, and the order
2965 * of the commands below is relevant.
2967 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2971 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2975 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2979 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2980 wlvif->sta.klv_template_id,
2981 ACX_KEEP_ALIVE_TPL_VALID);
2986 * The default fw psm configuration is AUTO, while mac80211 default
2987 * setting is off (ACTIVE), so sync the fw with the correct value.
2989 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2995 wl1271_tx_enabled_rates_get(wl,
2998 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3006 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3009 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3011 /* make sure we are connected (sta) joined */
3013 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3016 /* make sure we are joined (ibss) */
3018 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3022 /* use defaults when not associated */
3025 /* free probe-request template */
3026 dev_kfree_skb(wlvif->probereq);
3027 wlvif->probereq = NULL;
3029 /* disable connection monitor features */
3030 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3034 /* Disable the keep-alive feature */
3035 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3039 /* disable beacon filtering */
3040 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3045 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3046 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3048 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3049 ieee80211_chswitch_done(vif, false);
3050 cancel_delayed_work(&wlvif->channel_switch_work);
3053 /* invalidate keep-alive template */
3054 wl1271_acx_keep_alive_config(wl, wlvif,
3055 wlvif->sta.klv_template_id,
3056 ACX_KEEP_ALIVE_TPL_INVALID);
3061 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3063 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3064 wlvif->rate_set = wlvif->basic_rate_set;
3067 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3070 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3072 if (idle == cur_idle)
3076 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3078 /* The current firmware only supports sched_scan in idle */
3079 if (wl->sched_vif == wlvif)
3080 wl->ops->sched_scan_stop(wl, wlvif);
3082 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3086 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3087 struct ieee80211_conf *conf, u32 changed)
3091 if (conf->power_level != wlvif->power_level) {
3092 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3096 wlvif->power_level = conf->power_level;
3102 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3104 struct wl1271 *wl = hw->priv;
3105 struct wl12xx_vif *wlvif;
3106 struct ieee80211_conf *conf = &hw->conf;
3109 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3111 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3113 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3116 mutex_lock(&wl->mutex);
3118 if (changed & IEEE80211_CONF_CHANGE_POWER)
3119 wl->power_level = conf->power_level;
3121 if (unlikely(wl->state != WLCORE_STATE_ON))
3124 ret = wl1271_ps_elp_wakeup(wl);
3128 /* configure each interface */
3129 wl12xx_for_each_wlvif(wl, wlvif) {
3130 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3136 wl1271_ps_elp_sleep(wl);
3139 mutex_unlock(&wl->mutex);
3144 struct wl1271_filter_params {
3147 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3150 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3151 struct netdev_hw_addr_list *mc_list)
3153 struct wl1271_filter_params *fp;
3154 struct netdev_hw_addr *ha;
3156 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3158 wl1271_error("Out of memory setting filters.");
3162 /* update multicast filtering parameters */
3163 fp->mc_list_length = 0;
3164 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3165 fp->enabled = false;
3168 netdev_hw_addr_list_for_each(ha, mc_list) {
3169 memcpy(fp->mc_list[fp->mc_list_length],
3170 ha->addr, ETH_ALEN);
3171 fp->mc_list_length++;
3175 return (u64)(unsigned long)fp;
3178 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3180 FIF_BCN_PRBRESP_PROMISC | \
3184 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3185 unsigned int changed,
3186 unsigned int *total, u64 multicast)
3188 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3189 struct wl1271 *wl = hw->priv;
3190 struct wl12xx_vif *wlvif;
3194 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3195 " total %x", changed, *total);
3197 mutex_lock(&wl->mutex);
3199 *total &= WL1271_SUPPORTED_FILTERS;
3200 changed &= WL1271_SUPPORTED_FILTERS;
3202 if (unlikely(wl->state != WLCORE_STATE_ON))
3205 ret = wl1271_ps_elp_wakeup(wl);
3209 wl12xx_for_each_wlvif(wl, wlvif) {
3210 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3211 if (*total & FIF_ALLMULTI)
3212 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3216 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3219 fp->mc_list_length);
3226 * the fw doesn't provide an api to configure the filters. instead,
3227 * the filters configuration is based on the active roles / ROC
3232 wl1271_ps_elp_sleep(wl);
3235 mutex_unlock(&wl->mutex);
3239 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3240 u8 id, u8 key_type, u8 key_size,
3241 const u8 *key, u8 hlid, u32 tx_seq_32,
3244 struct wl1271_ap_key *ap_key;
3247 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3249 if (key_size > MAX_KEY_SIZE)
3253 * Find next free entry in ap_keys. Also check we are not replacing
3256 for (i = 0; i < MAX_NUM_KEYS; i++) {
3257 if (wlvif->ap.recorded_keys[i] == NULL)
3260 if (wlvif->ap.recorded_keys[i]->id == id) {
3261 wl1271_warning("trying to record key replacement");
3266 if (i == MAX_NUM_KEYS)
3269 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3274 ap_key->key_type = key_type;
3275 ap_key->key_size = key_size;
3276 memcpy(ap_key->key, key, key_size);
3277 ap_key->hlid = hlid;
3278 ap_key->tx_seq_32 = tx_seq_32;
3279 ap_key->tx_seq_16 = tx_seq_16;
3281 wlvif->ap.recorded_keys[i] = ap_key;
3285 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3289 for (i = 0; i < MAX_NUM_KEYS; i++) {
3290 kfree(wlvif->ap.recorded_keys[i]);
3291 wlvif->ap.recorded_keys[i] = NULL;
3295 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3298 struct wl1271_ap_key *key;
3299 bool wep_key_added = false;
3301 for (i = 0; i < MAX_NUM_KEYS; i++) {
3303 if (wlvif->ap.recorded_keys[i] == NULL)
3306 key = wlvif->ap.recorded_keys[i];
3308 if (hlid == WL12XX_INVALID_LINK_ID)
3309 hlid = wlvif->ap.bcast_hlid;
3311 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3312 key->id, key->key_type,
3313 key->key_size, key->key,
3314 hlid, key->tx_seq_32,
3319 if (key->key_type == KEY_WEP)
3320 wep_key_added = true;
3323 if (wep_key_added) {
3324 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3325 wlvif->ap.bcast_hlid);
3331 wl1271_free_ap_keys(wl, wlvif);
3335 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3336 u16 action, u8 id, u8 key_type,
3337 u8 key_size, const u8 *key, u32 tx_seq_32,
3338 u16 tx_seq_16, struct ieee80211_sta *sta)
3341 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3344 struct wl1271_station *wl_sta;
3348 wl_sta = (struct wl1271_station *)sta->drv_priv;
3349 hlid = wl_sta->hlid;
3351 hlid = wlvif->ap.bcast_hlid;
3354 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3356 * We do not support removing keys after AP shutdown.
3357 * Pretend we do to make mac80211 happy.
3359 if (action != KEY_ADD_OR_REPLACE)
3362 ret = wl1271_record_ap_key(wl, wlvif, id,
3364 key, hlid, tx_seq_32,
3367 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3368 id, key_type, key_size,
3369 key, hlid, tx_seq_32,
3377 static const u8 bcast_addr[ETH_ALEN] = {
3378 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3381 addr = sta ? sta->addr : bcast_addr;
3383 if (is_zero_ether_addr(addr)) {
3384 /* We dont support TX only encryption */
3388 /* The wl1271 does not allow to remove unicast keys - they
3389 will be cleared automatically on next CMD_JOIN. Ignore the
3390 request silently, as we dont want the mac80211 to emit
3391 an error message. */
3392 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3395 /* don't remove key if hlid was already deleted */
3396 if (action == KEY_REMOVE &&
3397 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3400 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3401 id, key_type, key_size,
3402 key, addr, tx_seq_32,
3412 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3413 struct ieee80211_vif *vif,
3414 struct ieee80211_sta *sta,
3415 struct ieee80211_key_conf *key_conf)
3417 struct wl1271 *wl = hw->priv;
3419 bool might_change_spare =
3420 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3421 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3423 if (might_change_spare) {
3425 * stop the queues and flush to ensure the next packets are
3426 * in sync with FW spare block accounting
3428 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3429 wl1271_tx_flush(wl);
3432 mutex_lock(&wl->mutex);
3434 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3436 goto out_wake_queues;
3439 ret = wl1271_ps_elp_wakeup(wl);
3441 goto out_wake_queues;
3443 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3445 wl1271_ps_elp_sleep(wl);
3448 if (might_change_spare)
3449 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3451 mutex_unlock(&wl->mutex);
3456 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3457 struct ieee80211_vif *vif,
3458 struct ieee80211_sta *sta,
3459 struct ieee80211_key_conf *key_conf)
3461 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3468 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3470 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3471 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3472 key_conf->cipher, key_conf->keyidx,
3473 key_conf->keylen, key_conf->flags);
3474 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3476 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3478 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3479 hlid = wl_sta->hlid;
3481 hlid = wlvif->ap.bcast_hlid;
3484 hlid = wlvif->sta.hlid;
3486 if (hlid != WL12XX_INVALID_LINK_ID) {
3487 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3488 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3489 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3492 switch (key_conf->cipher) {
3493 case WLAN_CIPHER_SUITE_WEP40:
3494 case WLAN_CIPHER_SUITE_WEP104:
3497 key_conf->hw_key_idx = key_conf->keyidx;
3499 case WLAN_CIPHER_SUITE_TKIP:
3500 key_type = KEY_TKIP;
3501 key_conf->hw_key_idx = key_conf->keyidx;
3503 case WLAN_CIPHER_SUITE_CCMP:
3505 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3507 case WL1271_CIPHER_SUITE_GEM:
3511 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3518 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3519 key_conf->keyidx, key_type,
3520 key_conf->keylen, key_conf->key,
3521 tx_seq_32, tx_seq_16, sta);
3523 wl1271_error("Could not add or replace key");
3528 * reconfiguring arp response if the unicast (or common)
3529 * encryption key type was changed
3531 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3532 (sta || key_type == KEY_WEP) &&
3533 wlvif->encryption_type != key_type) {
3534 wlvif->encryption_type = key_type;
3535 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3537 wl1271_warning("build arp rsp failed: %d", ret);
3544 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3545 key_conf->keyidx, key_type,
3546 key_conf->keylen, key_conf->key,
3549 wl1271_error("Could not remove key");
3555 wl1271_error("Unsupported key cmd 0x%x", cmd);
3561 EXPORT_SYMBOL_GPL(wlcore_set_key);
3563 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3564 struct ieee80211_vif *vif,
3567 struct wl1271 *wl = hw->priv;
3568 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3571 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3574 /* we don't handle unsetting of default key */
3578 mutex_lock(&wl->mutex);
3580 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3585 ret = wl1271_ps_elp_wakeup(wl);
3589 wlvif->default_key = key_idx;
3591 /* the default WEP key needs to be configured at least once */
3592 if (wlvif->encryption_type == KEY_WEP) {
3593 ret = wl12xx_cmd_set_default_wep_key(wl,
3601 wl1271_ps_elp_sleep(wl);
3604 mutex_unlock(&wl->mutex);
3607 void wlcore_regdomain_config(struct wl1271 *wl)
3611 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3614 mutex_lock(&wl->mutex);
3616 if (unlikely(wl->state != WLCORE_STATE_ON))
3619 ret = wl1271_ps_elp_wakeup(wl);
3623 ret = wlcore_cmd_regdomain_config_locked(wl);
3625 wl12xx_queue_recovery_work(wl);
3629 wl1271_ps_elp_sleep(wl);
3631 mutex_unlock(&wl->mutex);
3634 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3635 struct ieee80211_vif *vif,
3636 struct ieee80211_scan_request *hw_req)
3638 struct cfg80211_scan_request *req = &hw_req->req;
3639 struct wl1271 *wl = hw->priv;
3644 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3647 ssid = req->ssids[0].ssid;
3648 len = req->ssids[0].ssid_len;
3651 mutex_lock(&wl->mutex);
3653 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3655 * We cannot return -EBUSY here because cfg80211 will expect
3656 * a call to ieee80211_scan_completed if we do - in this case
3657 * there won't be any call.
3663 ret = wl1271_ps_elp_wakeup(wl);
3667 /* fail if there is any role in ROC */
3668 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3669 /* don't allow scanning right now */
3674 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3676 wl1271_ps_elp_sleep(wl);
3678 mutex_unlock(&wl->mutex);
3683 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3684 struct ieee80211_vif *vif)
3686 struct wl1271 *wl = hw->priv;
3687 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3690 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3692 mutex_lock(&wl->mutex);
3694 if (unlikely(wl->state != WLCORE_STATE_ON))
3697 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3700 ret = wl1271_ps_elp_wakeup(wl);
3704 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3705 ret = wl->ops->scan_stop(wl, wlvif);
3711 * Rearm the tx watchdog just before idling scan. This
3712 * prevents just-finished scans from triggering the watchdog
3714 wl12xx_rearm_tx_watchdog_locked(wl);
3716 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3717 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3718 wl->scan_wlvif = NULL;
3719 wl->scan.req = NULL;
3720 ieee80211_scan_completed(wl->hw, true);
3723 wl1271_ps_elp_sleep(wl);
3725 mutex_unlock(&wl->mutex);
3727 cancel_delayed_work_sync(&wl->scan_complete_work);
3730 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3731 struct ieee80211_vif *vif,
3732 struct cfg80211_sched_scan_request *req,
3733 struct ieee80211_scan_ies *ies)
3735 struct wl1271 *wl = hw->priv;
3736 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3739 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3741 mutex_lock(&wl->mutex);
3743 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3748 ret = wl1271_ps_elp_wakeup(wl);
3752 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3756 wl->sched_vif = wlvif;
3759 wl1271_ps_elp_sleep(wl);
3761 mutex_unlock(&wl->mutex);
3765 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3766 struct ieee80211_vif *vif)
3768 struct wl1271 *wl = hw->priv;
3769 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3772 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3774 mutex_lock(&wl->mutex);
3776 if (unlikely(wl->state != WLCORE_STATE_ON))
3779 ret = wl1271_ps_elp_wakeup(wl);
3783 wl->ops->sched_scan_stop(wl, wlvif);
3785 wl1271_ps_elp_sleep(wl);
3787 mutex_unlock(&wl->mutex);
3792 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3794 struct wl1271 *wl = hw->priv;
3797 mutex_lock(&wl->mutex);
3799 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3804 ret = wl1271_ps_elp_wakeup(wl);
3808 ret = wl1271_acx_frag_threshold(wl, value);
3810 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3812 wl1271_ps_elp_sleep(wl);
3815 mutex_unlock(&wl->mutex);
3820 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3822 struct wl1271 *wl = hw->priv;
3823 struct wl12xx_vif *wlvif;
3826 mutex_lock(&wl->mutex);
3828 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3833 ret = wl1271_ps_elp_wakeup(wl);
3837 wl12xx_for_each_wlvif(wl, wlvif) {
3838 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3840 wl1271_warning("set rts threshold failed: %d", ret);
3842 wl1271_ps_elp_sleep(wl);
3845 mutex_unlock(&wl->mutex);
3850 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3853 const u8 *next, *end = skb->data + skb->len;
3854 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3855 skb->len - ieoffset);
3860 memmove(ie, next, end - next);
3861 skb_trim(skb, skb->len - len);
3864 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3865 unsigned int oui, u8 oui_type,
3869 const u8 *next, *end = skb->data + skb->len;
3870 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3871 skb->data + ieoffset,
3872 skb->len - ieoffset);
3877 memmove(ie, next, end - next);
3878 skb_trim(skb, skb->len - len);
3881 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3882 struct ieee80211_vif *vif)
3884 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3885 struct sk_buff *skb;
3888 skb = ieee80211_proberesp_get(wl->hw, vif);
3892 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3893 CMD_TEMPL_AP_PROBE_RESPONSE,
3902 wl1271_debug(DEBUG_AP, "probe response updated");
3903 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3909 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3910 struct ieee80211_vif *vif,
3912 size_t probe_rsp_len,
3915 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3916 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3917 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3918 int ssid_ie_offset, ie_offset, templ_len;
3921 /* no need to change probe response if the SSID is set correctly */
3922 if (wlvif->ssid_len > 0)
3923 return wl1271_cmd_template_set(wl, wlvif->role_id,
3924 CMD_TEMPL_AP_PROBE_RESPONSE,
3929 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3930 wl1271_error("probe_rsp template too big");
3934 /* start searching from IE offset */
3935 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3937 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3938 probe_rsp_len - ie_offset);
3940 wl1271_error("No SSID in beacon!");
3944 ssid_ie_offset = ptr - probe_rsp_data;
3945 ptr += (ptr[1] + 2);
3947 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3949 /* insert SSID from bss_conf */
3950 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3951 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3952 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3953 bss_conf->ssid, bss_conf->ssid_len);
3954 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3956 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3957 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3958 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3960 return wl1271_cmd_template_set(wl, wlvif->role_id,
3961 CMD_TEMPL_AP_PROBE_RESPONSE,
3967 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3968 struct ieee80211_vif *vif,
3969 struct ieee80211_bss_conf *bss_conf,
3972 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3975 if (changed & BSS_CHANGED_ERP_SLOT) {
3976 if (bss_conf->use_short_slot)
3977 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3979 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3981 wl1271_warning("Set slot time failed %d", ret);
3986 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3987 if (bss_conf->use_short_preamble)
3988 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3990 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3993 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3994 if (bss_conf->use_cts_prot)
3995 ret = wl1271_acx_cts_protect(wl, wlvif,
3998 ret = wl1271_acx_cts_protect(wl, wlvif,
3999 CTSPROTECT_DISABLE);
4001 wl1271_warning("Set ctsprotect failed %d", ret);
4010 static int wlcore_set_beacon_template(struct wl1271 *wl,
4011 struct ieee80211_vif *vif,
4014 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4015 struct ieee80211_hdr *hdr;
4018 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4019 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4027 wl1271_debug(DEBUG_MASTER, "beacon updated");
4029 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4031 dev_kfree_skb(beacon);
4034 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4035 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4037 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4042 dev_kfree_skb(beacon);
4046 wlvif->wmm_enabled =
4047 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4048 WLAN_OUI_TYPE_MICROSOFT_WMM,
4049 beacon->data + ieoffset,
4050 beacon->len - ieoffset);
4053 * In case we already have a probe-resp beacon set explicitly
4054 * by usermode, don't use the beacon data.
4056 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4059 /* remove TIM ie from probe response */
4060 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4063 * remove p2p ie from probe response.
4064 * the fw reponds to probe requests that don't include
4065 * the p2p ie. probe requests with p2p ie will be passed,
4066 * and will be responded by the supplicant (the spec
4067 * forbids including the p2p ie when responding to probe
4068 * requests that didn't include it).
4070 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4071 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4073 hdr = (struct ieee80211_hdr *) beacon->data;
4074 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4075 IEEE80211_STYPE_PROBE_RESP);
4077 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4082 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4083 CMD_TEMPL_PROBE_RESPONSE,
4088 dev_kfree_skb(beacon);
4096 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4097 struct ieee80211_vif *vif,
4098 struct ieee80211_bss_conf *bss_conf,
4101 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4102 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4105 if (changed & BSS_CHANGED_BEACON_INT) {
4106 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4107 bss_conf->beacon_int);
4109 wlvif->beacon_int = bss_conf->beacon_int;
4112 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4113 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4115 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4118 if (changed & BSS_CHANGED_BEACON) {
4119 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4123 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4125 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4132 wl1271_error("beacon info change failed: %d", ret);
4136 /* AP mode changes */
4137 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4138 struct ieee80211_vif *vif,
4139 struct ieee80211_bss_conf *bss_conf,
4142 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4145 if (changed & BSS_CHANGED_BASIC_RATES) {
4146 u32 rates = bss_conf->basic_rates;
4148 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4150 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4151 wlvif->basic_rate_set);
4153 ret = wl1271_init_ap_rates(wl, wlvif);
4155 wl1271_error("AP rate policy change failed %d", ret);
4159 ret = wl1271_ap_init_templates(wl, vif);
4163 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4167 ret = wlcore_set_beacon_template(wl, vif, true);
4172 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4176 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4177 if (bss_conf->enable_beacon) {
4178 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4179 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4183 ret = wl1271_ap_init_hwenc(wl, wlvif);
4187 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4188 wl1271_debug(DEBUG_AP, "started AP");
4191 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4193 * AP might be in ROC in case we have just
4194 * sent auth reply. handle it.
4196 if (test_bit(wlvif->role_id, wl->roc_map))
4197 wl12xx_croc(wl, wlvif->role_id);
4199 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4203 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4204 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4206 wl1271_debug(DEBUG_AP, "stopped AP");
4211 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4215 /* Handle HT information change */
4216 if ((changed & BSS_CHANGED_HT) &&
4217 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4218 ret = wl1271_acx_set_ht_information(wl, wlvif,
4219 bss_conf->ht_operation_mode);
4221 wl1271_warning("Set ht information failed %d", ret);
4230 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4231 struct ieee80211_bss_conf *bss_conf,
4237 wl1271_debug(DEBUG_MAC80211,
4238 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4239 bss_conf->bssid, bss_conf->aid,
4240 bss_conf->beacon_int,
4241 bss_conf->basic_rates, sta_rate_set);
4243 wlvif->beacon_int = bss_conf->beacon_int;
4244 rates = bss_conf->basic_rates;
4245 wlvif->basic_rate_set =
4246 wl1271_tx_enabled_rates_get(wl, rates,
4249 wl1271_tx_min_rate_get(wl,
4250 wlvif->basic_rate_set);
4254 wl1271_tx_enabled_rates_get(wl,
4258 /* we only support sched_scan while not connected */
4259 if (wl->sched_vif == wlvif)
4260 wl->ops->sched_scan_stop(wl, wlvif);
4262 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4266 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4270 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4274 wlcore_set_ssid(wl, wlvif);
4276 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4281 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4285 /* revert back to minimum rates for the current band */
4286 wl1271_set_band_rate(wl, wlvif);
4287 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4289 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4293 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4294 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4295 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4300 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4303 /* STA/IBSS mode changes */
4304 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4305 struct ieee80211_vif *vif,
4306 struct ieee80211_bss_conf *bss_conf,
4309 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4310 bool do_join = false;
4311 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4312 bool ibss_joined = false;
4313 u32 sta_rate_set = 0;
4315 struct ieee80211_sta *sta;
4316 bool sta_exists = false;
4317 struct ieee80211_sta_ht_cap sta_ht_cap;
4320 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4326 if (changed & BSS_CHANGED_IBSS) {
4327 if (bss_conf->ibss_joined) {
4328 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4331 wlcore_unset_assoc(wl, wlvif);
4332 wl12xx_cmd_role_stop_sta(wl, wlvif);
4336 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4339 /* Need to update the SSID (for filtering etc) */
4340 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4343 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4344 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4345 bss_conf->enable_beacon ? "enabled" : "disabled");
4350 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4351 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4353 if (changed & BSS_CHANGED_CQM) {
4354 bool enable = false;
4355 if (bss_conf->cqm_rssi_thold)
4357 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4358 bss_conf->cqm_rssi_thold,
4359 bss_conf->cqm_rssi_hyst);
4362 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4365 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4366 BSS_CHANGED_ASSOC)) {
4368 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4370 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4372 /* save the supp_rates of the ap */
4373 sta_rate_set = sta->supp_rates[wlvif->band];
4374 if (sta->ht_cap.ht_supported)
4376 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4377 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4378 sta_ht_cap = sta->ht_cap;
4385 if (changed & BSS_CHANGED_BSSID) {
4386 if (!is_zero_ether_addr(bss_conf->bssid)) {
4387 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4392 /* Need to update the BSSID (for filtering etc) */
4395 ret = wlcore_clear_bssid(wl, wlvif);
4401 if (changed & BSS_CHANGED_IBSS) {
4402 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4403 bss_conf->ibss_joined);
4405 if (bss_conf->ibss_joined) {
4406 u32 rates = bss_conf->basic_rates;
4407 wlvif->basic_rate_set =
4408 wl1271_tx_enabled_rates_get(wl, rates,
4411 wl1271_tx_min_rate_get(wl,
4412 wlvif->basic_rate_set);
4414 /* by default, use 11b + OFDM rates */
4415 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4416 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4422 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4423 /* enable beacon filtering */
4424 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4429 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4434 ret = wlcore_join(wl, wlvif);
4436 wl1271_warning("cmd join failed %d", ret);
4441 if (changed & BSS_CHANGED_ASSOC) {
4442 if (bss_conf->assoc) {
4443 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4448 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4449 wl12xx_set_authorized(wl, wlvif);
4451 wlcore_unset_assoc(wl, wlvif);
4455 if (changed & BSS_CHANGED_PS) {
4456 if ((bss_conf->ps) &&
4457 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4458 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4462 if (wl->conf.conn.forced_ps) {
4463 ps_mode = STATION_POWER_SAVE_MODE;
4464 ps_mode_str = "forced";
4466 ps_mode = STATION_AUTO_PS_MODE;
4467 ps_mode_str = "auto";
4470 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4472 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4474 wl1271_warning("enter %s ps failed %d",
4476 } else if (!bss_conf->ps &&
4477 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4478 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4480 ret = wl1271_ps_set_mode(wl, wlvif,
4481 STATION_ACTIVE_MODE);
4483 wl1271_warning("exit auto ps failed %d", ret);
4487 /* Handle new association with HT. Do this after join. */
4490 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4492 ret = wlcore_hw_set_peer_cap(wl,
4498 wl1271_warning("Set ht cap failed %d", ret);
4504 ret = wl1271_acx_set_ht_information(wl, wlvif,
4505 bss_conf->ht_operation_mode);
4507 wl1271_warning("Set ht information failed %d",
4514 /* Handle arp filtering. Done after join. */
4515 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4516 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4517 __be32 addr = bss_conf->arp_addr_list[0];
4518 wlvif->sta.qos = bss_conf->qos;
4519 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4521 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4522 wlvif->ip_addr = addr;
4524 * The template should have been configured only upon
4525 * association. however, it seems that the correct ip
4526 * isn't being set (when sending), so we have to
4527 * reconfigure the template upon every ip change.
4529 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4531 wl1271_warning("build arp rsp failed: %d", ret);
4535 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4536 (ACX_ARP_FILTER_ARP_FILTERING |
4537 ACX_ARP_FILTER_AUTO_ARP),
4541 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4552 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4553 struct ieee80211_vif *vif,
4554 struct ieee80211_bss_conf *bss_conf,
4557 struct wl1271 *wl = hw->priv;
4558 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4559 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4562 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4563 wlvif->role_id, (int)changed);
4566 * make sure to cancel pending disconnections if our association
4569 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4570 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4572 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4573 !bss_conf->enable_beacon)
4574 wl1271_tx_flush(wl);
4576 mutex_lock(&wl->mutex);
4578 if (unlikely(wl->state != WLCORE_STATE_ON))
4581 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4584 ret = wl1271_ps_elp_wakeup(wl);
4588 if ((changed & BSS_CHANGED_TXPOWER) &&
4589 bss_conf->txpower != wlvif->power_level) {
4591 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4595 wlvif->power_level = bss_conf->txpower;
4599 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4601 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4603 wl1271_ps_elp_sleep(wl);
4606 mutex_unlock(&wl->mutex);
4609 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4610 struct ieee80211_chanctx_conf *ctx)
4612 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4613 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4614 cfg80211_get_chandef_type(&ctx->def));
4618 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4619 struct ieee80211_chanctx_conf *ctx)
4621 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4622 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4623 cfg80211_get_chandef_type(&ctx->def));
4626 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4627 struct ieee80211_chanctx_conf *ctx,
4630 struct wl1271 *wl = hw->priv;
4631 struct wl12xx_vif *wlvif;
4633 int channel = ieee80211_frequency_to_channel(
4634 ctx->def.chan->center_freq);
4636 wl1271_debug(DEBUG_MAC80211,
4637 "mac80211 change chanctx %d (type %d) changed 0x%x",
4638 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4640 mutex_lock(&wl->mutex);
4642 ret = wl1271_ps_elp_wakeup(wl);
4646 wl12xx_for_each_wlvif(wl, wlvif) {
4647 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4650 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4656 /* start radar if needed */
4657 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4658 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4659 ctx->radar_enabled && !wlvif->radar_enabled &&
4660 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4661 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4662 wlcore_hw_set_cac(wl, wlvif, true);
4663 wlvif->radar_enabled = true;
4667 wl1271_ps_elp_sleep(wl);
4669 mutex_unlock(&wl->mutex);
4672 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4673 struct ieee80211_vif *vif,
4674 struct ieee80211_chanctx_conf *ctx)
4676 struct wl1271 *wl = hw->priv;
4677 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4678 int channel = ieee80211_frequency_to_channel(
4679 ctx->def.chan->center_freq);
4682 wl1271_debug(DEBUG_MAC80211,
4683 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4684 wlvif->role_id, channel,
4685 cfg80211_get_chandef_type(&ctx->def),
4686 ctx->radar_enabled, ctx->def.chan->dfs_state);
4688 mutex_lock(&wl->mutex);
4690 if (unlikely(wl->state != WLCORE_STATE_ON))
4693 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4696 ret = wl1271_ps_elp_wakeup(wl);
4700 wlvif->band = ctx->def.chan->band;
4701 wlvif->channel = channel;
4702 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4704 /* update default rates according to the band */
4705 wl1271_set_band_rate(wl, wlvif);
4707 if (ctx->radar_enabled &&
4708 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4709 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4710 wlcore_hw_set_cac(wl, wlvif, true);
4711 wlvif->radar_enabled = true;
4714 wl1271_ps_elp_sleep(wl);
4716 mutex_unlock(&wl->mutex);
4721 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4722 struct ieee80211_vif *vif,
4723 struct ieee80211_chanctx_conf *ctx)
4725 struct wl1271 *wl = hw->priv;
4726 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4729 wl1271_debug(DEBUG_MAC80211,
4730 "mac80211 unassign chanctx (role %d) %d (type %d)",
4732 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4733 cfg80211_get_chandef_type(&ctx->def));
4735 wl1271_tx_flush(wl);
4737 mutex_lock(&wl->mutex);
4739 if (unlikely(wl->state != WLCORE_STATE_ON))
4742 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4745 ret = wl1271_ps_elp_wakeup(wl);
4749 if (wlvif->radar_enabled) {
4750 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4751 wlcore_hw_set_cac(wl, wlvif, false);
4752 wlvif->radar_enabled = false;
4755 wl1271_ps_elp_sleep(wl);
4757 mutex_unlock(&wl->mutex);
4760 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4761 struct wl12xx_vif *wlvif,
4762 struct ieee80211_chanctx_conf *new_ctx)
4764 int channel = ieee80211_frequency_to_channel(
4765 new_ctx->def.chan->center_freq);
4767 wl1271_debug(DEBUG_MAC80211,
4768 "switch vif (role %d) %d -> %d chan_type: %d",
4769 wlvif->role_id, wlvif->channel, channel,
4770 cfg80211_get_chandef_type(&new_ctx->def));
4772 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4775 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4777 if (wlvif->radar_enabled) {
4778 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4779 wlcore_hw_set_cac(wl, wlvif, false);
4780 wlvif->radar_enabled = false;
4783 wlvif->band = new_ctx->def.chan->band;
4784 wlvif->channel = channel;
4785 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4787 /* start radar if needed */
4788 if (new_ctx->radar_enabled) {
4789 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4790 wlcore_hw_set_cac(wl, wlvif, true);
4791 wlvif->radar_enabled = true;
4798 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4799 struct ieee80211_vif_chanctx_switch *vifs,
4801 enum ieee80211_chanctx_switch_mode mode)
4803 struct wl1271 *wl = hw->priv;
4806 wl1271_debug(DEBUG_MAC80211,
4807 "mac80211 switch chanctx n_vifs %d mode %d",
4810 mutex_lock(&wl->mutex);
4812 ret = wl1271_ps_elp_wakeup(wl);
4816 for (i = 0; i < n_vifs; i++) {
4817 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4819 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4824 wl1271_ps_elp_sleep(wl);
4826 mutex_unlock(&wl->mutex);
4831 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4832 struct ieee80211_vif *vif, u16 queue,
4833 const struct ieee80211_tx_queue_params *params)
4835 struct wl1271 *wl = hw->priv;
4836 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4840 mutex_lock(&wl->mutex);
4842 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4845 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4847 ps_scheme = CONF_PS_SCHEME_LEGACY;
4849 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4852 ret = wl1271_ps_elp_wakeup(wl);
4857 * the txop is confed in units of 32us by the mac80211,
4860 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4861 params->cw_min, params->cw_max,
4862 params->aifs, params->txop << 5);
4866 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4867 CONF_CHANNEL_TYPE_EDCF,
4868 wl1271_tx_get_queue(queue),
4869 ps_scheme, CONF_ACK_POLICY_LEGACY,
4873 wl1271_ps_elp_sleep(wl);
4876 mutex_unlock(&wl->mutex);
4881 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4882 struct ieee80211_vif *vif)
4885 struct wl1271 *wl = hw->priv;
4886 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4887 u64 mactime = ULLONG_MAX;
4890 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4892 mutex_lock(&wl->mutex);
4894 if (unlikely(wl->state != WLCORE_STATE_ON))
4897 ret = wl1271_ps_elp_wakeup(wl);
4901 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4906 wl1271_ps_elp_sleep(wl);
4909 mutex_unlock(&wl->mutex);
4913 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4914 struct survey_info *survey)
4916 struct ieee80211_conf *conf = &hw->conf;
4921 survey->channel = conf->chandef.chan;
4926 static int wl1271_allocate_sta(struct wl1271 *wl,
4927 struct wl12xx_vif *wlvif,
4928 struct ieee80211_sta *sta)
4930 struct wl1271_station *wl_sta;
4934 if (wl->active_sta_count >= wl->max_ap_stations) {
4935 wl1271_warning("could not allocate HLID - too much stations");
4939 wl_sta = (struct wl1271_station *)sta->drv_priv;
4940 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4942 wl1271_warning("could not allocate HLID - too many links");
4946 /* use the previous security seq, if this is a recovery/resume */
4947 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4949 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4950 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4951 wl->active_sta_count++;
4955 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4957 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4960 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4961 __clear_bit(hlid, &wl->ap_ps_map);
4962 __clear_bit(hlid, &wl->ap_fw_ps_map);
4965 * save the last used PN in the private part of iee80211_sta,
4966 * in case of recovery/suspend
4968 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4970 wl12xx_free_link(wl, wlvif, &hlid);
4971 wl->active_sta_count--;
4974 * rearm the tx watchdog when the last STA is freed - give the FW a
4975 * chance to return STA-buffered packets before complaining.
4977 if (wl->active_sta_count == 0)
4978 wl12xx_rearm_tx_watchdog_locked(wl);
4981 static int wl12xx_sta_add(struct wl1271 *wl,
4982 struct wl12xx_vif *wlvif,
4983 struct ieee80211_sta *sta)
4985 struct wl1271_station *wl_sta;
4989 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4991 ret = wl1271_allocate_sta(wl, wlvif, sta);
4995 wl_sta = (struct wl1271_station *)sta->drv_priv;
4996 hlid = wl_sta->hlid;
4998 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5000 wl1271_free_sta(wl, wlvif, hlid);
5005 static int wl12xx_sta_remove(struct wl1271 *wl,
5006 struct wl12xx_vif *wlvif,
5007 struct ieee80211_sta *sta)
5009 struct wl1271_station *wl_sta;
5012 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5014 wl_sta = (struct wl1271_station *)sta->drv_priv;
5016 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5019 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5023 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5027 static void wlcore_roc_if_possible(struct wl1271 *wl,
5028 struct wl12xx_vif *wlvif)
5030 if (find_first_bit(wl->roc_map,
5031 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5034 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5037 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5041 * when wl_sta is NULL, we treat this call as if coming from a
5042 * pending auth reply.
5043 * wl->mutex must be taken and the FW must be awake when the call
5046 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5047 struct wl1271_station *wl_sta, bool in_conn)
5050 if (WARN_ON(wl_sta && wl_sta->in_connection))
5053 if (!wlvif->ap_pending_auth_reply &&
5054 !wlvif->inconn_count)
5055 wlcore_roc_if_possible(wl, wlvif);
5058 wl_sta->in_connection = true;
5059 wlvif->inconn_count++;
5061 wlvif->ap_pending_auth_reply = true;
5064 if (wl_sta && !wl_sta->in_connection)
5067 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5070 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5074 wl_sta->in_connection = false;
5075 wlvif->inconn_count--;
5077 wlvif->ap_pending_auth_reply = false;
5080 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5081 test_bit(wlvif->role_id, wl->roc_map))
5082 wl12xx_croc(wl, wlvif->role_id);
5086 static int wl12xx_update_sta_state(struct wl1271 *wl,
5087 struct wl12xx_vif *wlvif,
5088 struct ieee80211_sta *sta,
5089 enum ieee80211_sta_state old_state,
5090 enum ieee80211_sta_state new_state)
5092 struct wl1271_station *wl_sta;
5093 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5094 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5097 wl_sta = (struct wl1271_station *)sta->drv_priv;
5099 /* Add station (AP mode) */
5101 old_state == IEEE80211_STA_NOTEXIST &&
5102 new_state == IEEE80211_STA_NONE) {
5103 ret = wl12xx_sta_add(wl, wlvif, sta);
5107 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5110 /* Remove station (AP mode) */
5112 old_state == IEEE80211_STA_NONE &&
5113 new_state == IEEE80211_STA_NOTEXIST) {
5115 wl12xx_sta_remove(wl, wlvif, sta);
5117 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5120 /* Authorize station (AP mode) */
5122 new_state == IEEE80211_STA_AUTHORIZED) {
5123 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5127 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5132 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5135 /* Authorize station */
5137 new_state == IEEE80211_STA_AUTHORIZED) {
5138 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5139 ret = wl12xx_set_authorized(wl, wlvif);
5145 old_state == IEEE80211_STA_AUTHORIZED &&
5146 new_state == IEEE80211_STA_ASSOC) {
5147 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5148 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5151 /* save seq number on disassoc (suspend) */
5153 old_state == IEEE80211_STA_ASSOC &&
5154 new_state == IEEE80211_STA_AUTH) {
5155 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5156 wlvif->total_freed_pkts = 0;
5159 /* restore seq number on assoc (resume) */
5161 old_state == IEEE80211_STA_AUTH &&
5162 new_state == IEEE80211_STA_ASSOC) {
5163 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5166 /* clear ROCs on failure or authorization */
5168 (new_state == IEEE80211_STA_AUTHORIZED ||
5169 new_state == IEEE80211_STA_NOTEXIST)) {
5170 if (test_bit(wlvif->role_id, wl->roc_map))
5171 wl12xx_croc(wl, wlvif->role_id);
5175 old_state == IEEE80211_STA_NOTEXIST &&
5176 new_state == IEEE80211_STA_NONE) {
5177 if (find_first_bit(wl->roc_map,
5178 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5179 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5180 wl12xx_roc(wl, wlvif, wlvif->role_id,
5181 wlvif->band, wlvif->channel);
5187 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5188 struct ieee80211_vif *vif,
5189 struct ieee80211_sta *sta,
5190 enum ieee80211_sta_state old_state,
5191 enum ieee80211_sta_state new_state)
5193 struct wl1271 *wl = hw->priv;
5194 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5197 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5198 sta->aid, old_state, new_state);
5200 mutex_lock(&wl->mutex);
5202 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5207 ret = wl1271_ps_elp_wakeup(wl);
5211 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5213 wl1271_ps_elp_sleep(wl);
5215 mutex_unlock(&wl->mutex);
5216 if (new_state < old_state)
5221 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5222 struct ieee80211_vif *vif,
5223 enum ieee80211_ampdu_mlme_action action,
5224 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5227 struct wl1271 *wl = hw->priv;
5228 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5230 u8 hlid, *ba_bitmap;
5232 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5235 /* sanity check - the fields in FW are only 8bits wide */
5236 if (WARN_ON(tid > 0xFF))
5239 mutex_lock(&wl->mutex);
5241 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5246 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5247 hlid = wlvif->sta.hlid;
5248 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5249 struct wl1271_station *wl_sta;
5251 wl_sta = (struct wl1271_station *)sta->drv_priv;
5252 hlid = wl_sta->hlid;
5258 ba_bitmap = &wl->links[hlid].ba_bitmap;
5260 ret = wl1271_ps_elp_wakeup(wl);
5264 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5268 case IEEE80211_AMPDU_RX_START:
5269 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5274 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5276 wl1271_error("exceeded max RX BA sessions");
5280 if (*ba_bitmap & BIT(tid)) {
5282 wl1271_error("cannot enable RX BA session on active "
5287 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5290 *ba_bitmap |= BIT(tid);
5291 wl->ba_rx_session_count++;
5295 case IEEE80211_AMPDU_RX_STOP:
5296 if (!(*ba_bitmap & BIT(tid))) {
5298 * this happens on reconfig - so only output a debug
5299 * message for now, and don't fail the function.
5301 wl1271_debug(DEBUG_MAC80211,
5302 "no active RX BA session on tid: %d",
5308 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5311 *ba_bitmap &= ~BIT(tid);
5312 wl->ba_rx_session_count--;
5317 * The BA initiator session management in FW independently.
5318 * Falling break here on purpose for all TX APDU commands.
5320 case IEEE80211_AMPDU_TX_START:
5321 case IEEE80211_AMPDU_TX_STOP_CONT:
5322 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5323 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5324 case IEEE80211_AMPDU_TX_OPERATIONAL:
5329 wl1271_error("Incorrect ampdu action id=%x\n", action);
5333 wl1271_ps_elp_sleep(wl);
5336 mutex_unlock(&wl->mutex);
5341 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5342 struct ieee80211_vif *vif,
5343 const struct cfg80211_bitrate_mask *mask)
5345 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5346 struct wl1271 *wl = hw->priv;
5349 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5350 mask->control[NL80211_BAND_2GHZ].legacy,
5351 mask->control[NL80211_BAND_5GHZ].legacy);
5353 mutex_lock(&wl->mutex);
5355 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5356 wlvif->bitrate_masks[i] =
5357 wl1271_tx_enabled_rates_get(wl,
5358 mask->control[i].legacy,
5361 if (unlikely(wl->state != WLCORE_STATE_ON))
5364 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5365 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5367 ret = wl1271_ps_elp_wakeup(wl);
5371 wl1271_set_band_rate(wl, wlvif);
5373 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5374 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5376 wl1271_ps_elp_sleep(wl);
5379 mutex_unlock(&wl->mutex);
5384 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5385 struct ieee80211_vif *vif,
5386 struct ieee80211_channel_switch *ch_switch)
5388 struct wl1271 *wl = hw->priv;
5389 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5392 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5394 wl1271_tx_flush(wl);
5396 mutex_lock(&wl->mutex);
5398 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5399 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5400 ieee80211_chswitch_done(vif, false);
5402 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5406 ret = wl1271_ps_elp_wakeup(wl);
5410 /* TODO: change mac80211 to pass vif as param */
5412 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5413 unsigned long delay_usec;
5415 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5419 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5421 /* indicate failure 5 seconds after channel switch time */
5422 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5424 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5425 usecs_to_jiffies(delay_usec) +
5426 msecs_to_jiffies(5000));
5430 wl1271_ps_elp_sleep(wl);
5433 mutex_unlock(&wl->mutex);
5436 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5437 struct wl12xx_vif *wlvif,
5440 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5441 struct sk_buff *beacon =
5442 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5447 return cfg80211_find_ie(eid,
5448 beacon->data + ieoffset,
5449 beacon->len - ieoffset);
5452 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5456 const struct ieee80211_channel_sw_ie *ie_csa;
5458 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5462 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5463 *csa_count = ie_csa->count;
5468 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5469 struct ieee80211_vif *vif,
5470 struct cfg80211_chan_def *chandef)
5472 struct wl1271 *wl = hw->priv;
5473 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5474 struct ieee80211_channel_switch ch_switch = {
5476 .chandef = *chandef,
5480 wl1271_debug(DEBUG_MAC80211,
5481 "mac80211 channel switch beacon (role %d)",
5484 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5486 wl1271_error("error getting beacon (for CSA counter)");
5490 mutex_lock(&wl->mutex);
5492 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5497 ret = wl1271_ps_elp_wakeup(wl);
5501 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5505 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5508 wl1271_ps_elp_sleep(wl);
5510 mutex_unlock(&wl->mutex);
5513 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5514 u32 queues, bool drop)
5516 struct wl1271 *wl = hw->priv;
5518 wl1271_tx_flush(wl);
5521 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5522 struct ieee80211_vif *vif,
5523 struct ieee80211_channel *chan,
5525 enum ieee80211_roc_type type)
5527 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5528 struct wl1271 *wl = hw->priv;
5529 int channel, ret = 0;
5531 channel = ieee80211_frequency_to_channel(chan->center_freq);
5533 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5534 channel, wlvif->role_id);
5536 mutex_lock(&wl->mutex);
5538 if (unlikely(wl->state != WLCORE_STATE_ON))
5541 /* return EBUSY if we can't ROC right now */
5542 if (WARN_ON(wl->roc_vif ||
5543 find_first_bit(wl->roc_map,
5544 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5549 ret = wl1271_ps_elp_wakeup(wl);
5553 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5558 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5559 msecs_to_jiffies(duration));
5561 wl1271_ps_elp_sleep(wl);
5563 mutex_unlock(&wl->mutex);
5567 static int __wlcore_roc_completed(struct wl1271 *wl)
5569 struct wl12xx_vif *wlvif;
5572 /* already completed */
5573 if (unlikely(!wl->roc_vif))
5576 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5578 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5581 ret = wl12xx_stop_dev(wl, wlvif);
5590 static int wlcore_roc_completed(struct wl1271 *wl)
5594 wl1271_debug(DEBUG_MAC80211, "roc complete");
5596 mutex_lock(&wl->mutex);
5598 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5603 ret = wl1271_ps_elp_wakeup(wl);
5607 ret = __wlcore_roc_completed(wl);
5609 wl1271_ps_elp_sleep(wl);
5611 mutex_unlock(&wl->mutex);
5616 static void wlcore_roc_complete_work(struct work_struct *work)
5618 struct delayed_work *dwork;
5622 dwork = container_of(work, struct delayed_work, work);
5623 wl = container_of(dwork, struct wl1271, roc_complete_work);
5625 ret = wlcore_roc_completed(wl);
5627 ieee80211_remain_on_channel_expired(wl->hw);
5630 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5632 struct wl1271 *wl = hw->priv;
5634 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5637 wl1271_tx_flush(wl);
5640 * we can't just flush_work here, because it might deadlock
5641 * (as we might get called from the same workqueue)
5643 cancel_delayed_work_sync(&wl->roc_complete_work);
5644 wlcore_roc_completed(wl);
5649 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5650 struct ieee80211_vif *vif,
5651 struct ieee80211_sta *sta,
5654 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5656 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5658 if (!(changed & IEEE80211_RC_BW_CHANGED))
5661 /* this callback is atomic, so schedule a new work */
5662 wlvif->rc_update_bw = sta->bandwidth;
5663 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5666 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5667 struct ieee80211_vif *vif,
5668 struct ieee80211_sta *sta,
5669 struct station_info *sinfo)
5671 struct wl1271 *wl = hw->priv;
5672 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5676 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5678 mutex_lock(&wl->mutex);
5680 if (unlikely(wl->state != WLCORE_STATE_ON))
5683 ret = wl1271_ps_elp_wakeup(wl);
5687 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5691 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5692 sinfo->signal = rssi_dbm;
5695 wl1271_ps_elp_sleep(wl);
5698 mutex_unlock(&wl->mutex);
5701 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5703 struct wl1271 *wl = hw->priv;
5706 mutex_lock(&wl->mutex);
5708 if (unlikely(wl->state != WLCORE_STATE_ON))
5711 /* packets are considered pending if in the TX queue or the FW */
5712 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5714 mutex_unlock(&wl->mutex);
5719 /* can't be const, mac80211 writes to this */
5720 static struct ieee80211_rate wl1271_rates[] = {
5722 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5723 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5725 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5726 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5727 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5729 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5730 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5731 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5733 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5734 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5735 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5737 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5738 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5740 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5741 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5743 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5744 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5746 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5747 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5749 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5750 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5752 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5753 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5755 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5756 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5758 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5759 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5762 /* can't be const, mac80211 writes to this */
5763 static struct ieee80211_channel wl1271_channels[] = {
5764 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5765 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5766 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5767 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5768 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5769 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5770 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5771 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5772 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5773 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5774 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5775 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5776 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5777 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5780 /* can't be const, mac80211 writes to this */
5781 static struct ieee80211_supported_band wl1271_band_2ghz = {
5782 .channels = wl1271_channels,
5783 .n_channels = ARRAY_SIZE(wl1271_channels),
5784 .bitrates = wl1271_rates,
5785 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5788 /* 5 GHz data rates for WL1273 */
5789 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5791 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5792 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5794 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5795 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5797 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5798 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5800 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5801 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5803 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5804 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5806 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5807 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5809 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5810 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5812 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5813 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5816 /* 5 GHz band channels for WL1273 */
5817 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5818 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5819 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5820 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5821 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5822 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5823 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5824 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5825 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5826 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5827 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5828 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5829 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5830 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5831 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5832 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5833 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5834 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5835 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5836 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5837 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5838 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5839 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5840 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5841 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5842 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5843 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5844 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5845 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5846 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5847 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5848 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5851 static struct ieee80211_supported_band wl1271_band_5ghz = {
5852 .channels = wl1271_channels_5ghz,
5853 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5854 .bitrates = wl1271_rates_5ghz,
5855 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5858 static const struct ieee80211_ops wl1271_ops = {
5859 .start = wl1271_op_start,
5860 .stop = wlcore_op_stop,
5861 .add_interface = wl1271_op_add_interface,
5862 .remove_interface = wl1271_op_remove_interface,
5863 .change_interface = wl12xx_op_change_interface,
5865 .suspend = wl1271_op_suspend,
5866 .resume = wl1271_op_resume,
5868 .config = wl1271_op_config,
5869 .prepare_multicast = wl1271_op_prepare_multicast,
5870 .configure_filter = wl1271_op_configure_filter,
5872 .set_key = wlcore_op_set_key,
5873 .hw_scan = wl1271_op_hw_scan,
5874 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5875 .sched_scan_start = wl1271_op_sched_scan_start,
5876 .sched_scan_stop = wl1271_op_sched_scan_stop,
5877 .bss_info_changed = wl1271_op_bss_info_changed,
5878 .set_frag_threshold = wl1271_op_set_frag_threshold,
5879 .set_rts_threshold = wl1271_op_set_rts_threshold,
5880 .conf_tx = wl1271_op_conf_tx,
5881 .get_tsf = wl1271_op_get_tsf,
5882 .get_survey = wl1271_op_get_survey,
5883 .sta_state = wl12xx_op_sta_state,
5884 .ampdu_action = wl1271_op_ampdu_action,
5885 .tx_frames_pending = wl1271_tx_frames_pending,
5886 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5887 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5888 .channel_switch = wl12xx_op_channel_switch,
5889 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5890 .flush = wlcore_op_flush,
5891 .remain_on_channel = wlcore_op_remain_on_channel,
5892 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5893 .add_chanctx = wlcore_op_add_chanctx,
5894 .remove_chanctx = wlcore_op_remove_chanctx,
5895 .change_chanctx = wlcore_op_change_chanctx,
5896 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5897 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5898 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5899 .sta_rc_update = wlcore_op_sta_rc_update,
5900 .sta_statistics = wlcore_op_sta_statistics,
5901 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5905 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5911 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5912 wl1271_error("Illegal RX rate from HW: %d", rate);
5916 idx = wl->band_rate_to_idx[band][rate];
5917 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5918 wl1271_error("Unsupported RX rate from HW: %d", rate);
5925 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5929 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5932 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5933 wl1271_warning("NIC part of the MAC address wraps around!");
5935 for (i = 0; i < wl->num_mac_addr; i++) {
5936 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5937 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5938 wl->addresses[i].addr[2] = (u8) oui;
5939 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5940 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5941 wl->addresses[i].addr[5] = (u8) nic;
5945 /* we may be one address short at the most */
5946 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5949 * turn on the LAA bit in the first address and use it as
5952 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5953 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5954 memcpy(&wl->addresses[idx], &wl->addresses[0],
5955 sizeof(wl->addresses[0]));
5957 wl->addresses[idx].addr[0] |= BIT(1);
5960 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5961 wl->hw->wiphy->addresses = wl->addresses;
5964 static int wl12xx_get_hw_info(struct wl1271 *wl)
5968 ret = wl12xx_set_power_on(wl);
5972 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5976 wl->fuse_oui_addr = 0;
5977 wl->fuse_nic_addr = 0;
5979 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5983 if (wl->ops->get_mac)
5984 ret = wl->ops->get_mac(wl);
5987 wl1271_power_off(wl);
5991 static int wl1271_register_hw(struct wl1271 *wl)
5994 u32 oui_addr = 0, nic_addr = 0;
5996 if (wl->mac80211_registered)
5999 if (wl->nvs_len >= 12) {
6000 /* NOTE: The wl->nvs->nvs element must be first, in
6001 * order to simplify the casting, we assume it is at
6002 * the beginning of the wl->nvs structure.
6004 u8 *nvs_ptr = (u8 *)wl->nvs;
6007 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6009 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6012 /* if the MAC address is zeroed in the NVS derive from fuse */
6013 if (oui_addr == 0 && nic_addr == 0) {
6014 oui_addr = wl->fuse_oui_addr;
6015 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6016 nic_addr = wl->fuse_nic_addr + 1;
6019 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6021 ret = ieee80211_register_hw(wl->hw);
6023 wl1271_error("unable to register mac80211 hw: %d", ret);
6027 wl->mac80211_registered = true;
6029 wl1271_debugfs_init(wl);
6031 wl1271_notice("loaded");
6037 static void wl1271_unregister_hw(struct wl1271 *wl)
6040 wl1271_plt_stop(wl);
6042 ieee80211_unregister_hw(wl->hw);
6043 wl->mac80211_registered = false;
6047 static int wl1271_init_ieee80211(struct wl1271 *wl)
6050 static const u32 cipher_suites[] = {
6051 WLAN_CIPHER_SUITE_WEP40,
6052 WLAN_CIPHER_SUITE_WEP104,
6053 WLAN_CIPHER_SUITE_TKIP,
6054 WLAN_CIPHER_SUITE_CCMP,
6055 WL1271_CIPHER_SUITE_GEM,
6058 /* The tx descriptor buffer */
6059 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6061 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6062 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6065 /* FIXME: find a proper value */
6066 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6068 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
6069 IEEE80211_HW_SUPPORTS_PS |
6070 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
6071 IEEE80211_HW_HAS_RATE_CONTROL |
6072 IEEE80211_HW_CONNECTION_MONITOR |
6073 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
6074 IEEE80211_HW_SPECTRUM_MGMT |
6075 IEEE80211_HW_AP_LINK_PS |
6076 IEEE80211_HW_AMPDU_AGGREGATION |
6077 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
6078 IEEE80211_HW_QUEUE_CONTROL |
6079 IEEE80211_HW_CHANCTX_STA_CSA |
6080 IEEE80211_HW_SUPPORT_FAST_XMIT;
6082 wl->hw->wiphy->cipher_suites = cipher_suites;
6083 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6085 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6086 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
6087 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
6088 wl->hw->wiphy->max_scan_ssids = 1;
6089 wl->hw->wiphy->max_sched_scan_ssids = 16;
6090 wl->hw->wiphy->max_match_sets = 16;
6092 * Maximum length of elements in scanning probe request templates
6093 * should be the maximum length possible for a template, without
6094 * the IEEE80211 header of the template
6096 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6097 sizeof(struct ieee80211_header);
6099 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6100 sizeof(struct ieee80211_header);
6102 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6104 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6105 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6106 WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6107 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6109 /* make sure all our channels fit in the scanned_ch bitmask */
6110 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6111 ARRAY_SIZE(wl1271_channels_5ghz) >
6112 WL1271_MAX_CHANNELS);
6114 * clear channel flags from the previous usage
6115 * and restore max_power & max_antenna_gain values.
6117 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6118 wl1271_band_2ghz.channels[i].flags = 0;
6119 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6120 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6123 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6124 wl1271_band_5ghz.channels[i].flags = 0;
6125 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6126 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6130 * We keep local copies of the band structs because we need to
6131 * modify them on a per-device basis.
6133 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
6134 sizeof(wl1271_band_2ghz));
6135 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
6136 &wl->ht_cap[IEEE80211_BAND_2GHZ],
6137 sizeof(*wl->ht_cap));
6138 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
6139 sizeof(wl1271_band_5ghz));
6140 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
6141 &wl->ht_cap[IEEE80211_BAND_5GHZ],
6142 sizeof(*wl->ht_cap));
6144 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
6145 &wl->bands[IEEE80211_BAND_2GHZ];
6146 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
6147 &wl->bands[IEEE80211_BAND_5GHZ];
6150 * allow 4 queues per mac address we support +
6151 * 1 cab queue per mac + one global offchannel Tx queue
6153 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6155 /* the last queue is the offchannel queue */
6156 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6157 wl->hw->max_rates = 1;
6159 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6161 /* the FW answers probe-requests in AP-mode */
6162 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6163 wl->hw->wiphy->probe_resp_offload =
6164 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6165 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6166 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6168 /* allowed interface combinations */
6169 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6170 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6172 /* register vendor commands */
6173 wlcore_set_vendor_commands(wl->hw->wiphy);
6175 SET_IEEE80211_DEV(wl->hw, wl->dev);
6177 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6178 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6180 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6185 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6188 struct ieee80211_hw *hw;
6193 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6195 wl1271_error("could not alloc ieee80211_hw");
6201 memset(wl, 0, sizeof(*wl));
6203 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6205 wl1271_error("could not alloc wl priv");
6207 goto err_priv_alloc;
6210 INIT_LIST_HEAD(&wl->wlvif_list);
6215 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6216 * we don't allocate any additional resource here, so that's fine.
6218 for (i = 0; i < NUM_TX_QUEUES; i++)
6219 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6220 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6222 skb_queue_head_init(&wl->deferred_rx_queue);
6223 skb_queue_head_init(&wl->deferred_tx_queue);
6225 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6226 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6227 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6228 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6229 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6230 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6231 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6233 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6234 if (!wl->freezable_wq) {
6241 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6242 wl->band = IEEE80211_BAND_2GHZ;
6243 wl->channel_type = NL80211_CHAN_NO_HT;
6245 wl->sg_enabled = true;
6246 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6247 wl->recovery_count = 0;
6250 wl->ap_fw_ps_map = 0;
6252 wl->system_hlid = WL12XX_SYSTEM_HLID;
6253 wl->active_sta_count = 0;
6254 wl->active_link_count = 0;
6256 init_waitqueue_head(&wl->fwlog_waitq);
6258 /* The system link is always allocated */
6259 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6261 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6262 for (i = 0; i < wl->num_tx_desc; i++)
6263 wl->tx_frames[i] = NULL;
6265 spin_lock_init(&wl->wl_lock);
6267 wl->state = WLCORE_STATE_OFF;
6268 wl->fw_type = WL12XX_FW_TYPE_NONE;
6269 mutex_init(&wl->mutex);
6270 mutex_init(&wl->flush_mutex);
6271 init_completion(&wl->nvs_loading_complete);
6273 order = get_order(aggr_buf_size);
6274 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6275 if (!wl->aggr_buf) {
6279 wl->aggr_buf_size = aggr_buf_size;
6281 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6282 if (!wl->dummy_packet) {
6287 /* Allocate one page for the FW log */
6288 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6291 goto err_dummy_packet;
6294 wl->mbox_size = mbox_size;
6295 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6301 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6302 if (!wl->buffer_32) {
6313 free_page((unsigned long)wl->fwlog);
6316 dev_kfree_skb(wl->dummy_packet);
6319 free_pages((unsigned long)wl->aggr_buf, order);
6322 destroy_workqueue(wl->freezable_wq);
6325 wl1271_debugfs_exit(wl);
6329 ieee80211_free_hw(hw);
6333 return ERR_PTR(ret);
6335 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6337 int wlcore_free_hw(struct wl1271 *wl)
6339 /* Unblock any fwlog readers */
6340 mutex_lock(&wl->mutex);
6341 wl->fwlog_size = -1;
6342 wake_up_interruptible_all(&wl->fwlog_waitq);
6343 mutex_unlock(&wl->mutex);
6345 wlcore_sysfs_free(wl);
6347 kfree(wl->buffer_32);
6349 free_page((unsigned long)wl->fwlog);
6350 dev_kfree_skb(wl->dummy_packet);
6351 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6353 wl1271_debugfs_exit(wl);
6357 wl->fw_type = WL12XX_FW_TYPE_NONE;
6361 kfree(wl->raw_fw_status);
6362 kfree(wl->fw_status);
6363 kfree(wl->tx_res_if);
6364 destroy_workqueue(wl->freezable_wq);
6367 ieee80211_free_hw(wl->hw);
6371 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6374 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6375 .flags = WIPHY_WOWLAN_ANY,
6376 .n_patterns = WL1271_MAX_RX_FILTERS,
6377 .pattern_min_len = 1,
6378 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6382 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6384 return IRQ_WAKE_THREAD;
6387 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6389 struct wl1271 *wl = context;
6390 struct platform_device *pdev = wl->pdev;
6391 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6392 struct resource *res;
6395 irq_handler_t hardirq_fn = NULL;
6398 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6400 wl1271_error("Could not allocate nvs data");
6403 wl->nvs_len = fw->size;
6405 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6411 ret = wl->ops->setup(wl);
6415 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6417 /* adjust some runtime configuration parameters */
6418 wlcore_adjust_conf(wl);
6420 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6422 wl1271_error("Could not get IRQ resource");
6426 wl->irq = res->start;
6427 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6428 wl->if_ops = pdev_data->if_ops;
6430 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6431 hardirq_fn = wlcore_hardirq;
6433 wl->irq_flags |= IRQF_ONESHOT;
6435 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6436 wl->irq_flags, pdev->name, wl);
6438 wl1271_error("request_irq() failed: %d", ret);
6443 ret = enable_irq_wake(wl->irq);
6445 wl->irq_wake_enabled = true;
6446 device_init_wakeup(wl->dev, 1);
6447 if (pdev_data->pwr_in_suspend)
6448 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6451 disable_irq(wl->irq);
6453 ret = wl12xx_get_hw_info(wl);
6455 wl1271_error("couldn't get hw info");
6459 ret = wl->ops->identify_chip(wl);
6463 ret = wl1271_init_ieee80211(wl);
6467 ret = wl1271_register_hw(wl);
6471 ret = wlcore_sysfs_init(wl);
6475 wl->initialized = true;
6479 wl1271_unregister_hw(wl);
6482 free_irq(wl->irq, wl);
6488 release_firmware(fw);
6489 complete_all(&wl->nvs_loading_complete);
6492 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6496 if (!wl->ops || !wl->ptable)
6499 wl->dev = &pdev->dev;
6501 platform_set_drvdata(pdev, wl);
6503 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6504 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6507 wl1271_error("request_firmware_nowait failed: %d", ret);
6508 complete_all(&wl->nvs_loading_complete);
6513 EXPORT_SYMBOL_GPL(wlcore_probe);
6515 int wlcore_remove(struct platform_device *pdev)
6517 struct wl1271 *wl = platform_get_drvdata(pdev);
6519 wait_for_completion(&wl->nvs_loading_complete);
6520 if (!wl->initialized)
6523 if (wl->irq_wake_enabled) {
6524 device_init_wakeup(wl->dev, 0);
6525 disable_irq_wake(wl->irq);
6527 wl1271_unregister_hw(wl);
6528 free_irq(wl->irq, wl);
6533 EXPORT_SYMBOL_GPL(wlcore_remove);
6535 u32 wl12xx_debug_level = DEBUG_NONE;
6536 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6537 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6538 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6540 module_param_named(fwlog, fwlog_param, charp, 0);
6541 MODULE_PARM_DESC(fwlog,
6542 "FW logger options: continuous, ondemand, dbgpins or disable");
6544 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6545 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6547 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6548 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6550 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6551 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6553 MODULE_LICENSE("GPL");
6554 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6555 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6556 MODULE_FIRMWARE(WL12XX_NVS_NAME);