3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
33 #include "wl12xx_80211.h"
40 #include "vendor_cmd.h"
45 #define WL1271_BOOT_RETRIES 3
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery = -1;
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 struct ieee80211_vif *vif,
54 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
62 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
71 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
75 wl1271_info("Association completed.");
79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 struct regulatory_request *request)
82 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
83 struct wl1271 *wl = hw->priv;
85 /* copy the current dfs region */
87 wl->dfs_region = request->dfs_region;
89 wlcore_regdomain_config(wl);
92 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
97 /* we should hold wl->mutex */
98 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
103 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
105 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
111 * this function is being called when the rx_streaming interval
112 * has beed changed or rx_streaming should be disabled
114 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
117 int period = wl->conf.rx_streaming.interval;
119 /* don't reconfigure if rx_streaming is disabled */
120 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
123 /* reconfigure/disable according to new streaming_period */
125 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
126 (wl->conf.rx_streaming.always ||
127 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
128 ret = wl1271_set_rx_streaming(wl, wlvif, true);
130 ret = wl1271_set_rx_streaming(wl, wlvif, false);
131 /* don't cancel_work_sync since we might deadlock */
132 del_timer_sync(&wlvif->rx_streaming_timer);
138 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
141 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
142 rx_streaming_enable_work);
143 struct wl1271 *wl = wlvif->wl;
145 mutex_lock(&wl->mutex);
147 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
148 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
149 (!wl->conf.rx_streaming.always &&
150 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 if (!wl->conf.rx_streaming.interval)
156 ret = wl1271_ps_elp_wakeup(wl);
160 ret = wl1271_set_rx_streaming(wl, wlvif, true);
164 /* stop it after some time of inactivity */
165 mod_timer(&wlvif->rx_streaming_timer,
166 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
169 wl1271_ps_elp_sleep(wl);
171 mutex_unlock(&wl->mutex);
174 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
177 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
178 rx_streaming_disable_work);
179 struct wl1271 *wl = wlvif->wl;
181 mutex_lock(&wl->mutex);
183 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
186 ret = wl1271_ps_elp_wakeup(wl);
190 ret = wl1271_set_rx_streaming(wl, wlvif, false);
195 wl1271_ps_elp_sleep(wl);
197 mutex_unlock(&wl->mutex);
200 static void wl1271_rx_streaming_timer(unsigned long data)
202 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
203 struct wl1271 *wl = wlvif->wl;
204 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
207 /* wl->mutex must be taken */
208 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
210 /* if the watchdog is not armed, don't do anything */
211 if (wl->tx_allocated_blocks == 0)
214 cancel_delayed_work(&wl->tx_watchdog_work);
215 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
216 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
219 static void wlcore_rc_update_work(struct work_struct *work)
222 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
224 struct wl1271 *wl = wlvif->wl;
226 mutex_lock(&wl->mutex);
228 if (unlikely(wl->state != WLCORE_STATE_ON))
231 ret = wl1271_ps_elp_wakeup(wl);
235 wlcore_hw_sta_rc_update(wl, wlvif);
237 wl1271_ps_elp_sleep(wl);
239 mutex_unlock(&wl->mutex);
242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
244 struct delayed_work *dwork;
247 dwork = container_of(work, struct delayed_work, work);
248 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
250 mutex_lock(&wl->mutex);
252 if (unlikely(wl->state != WLCORE_STATE_ON))
255 /* Tx went out in the meantime - everything is ok */
256 if (unlikely(wl->tx_allocated_blocks == 0))
260 * if a ROC is in progress, we might not have any Tx for a long
261 * time (e.g. pending Tx on the non-ROC channels)
263 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 wl->conf.tx.tx_watchdog_timeout);
266 wl12xx_rearm_tx_watchdog_locked(wl);
271 * if a scan is in progress, we might not have any Tx for a long
274 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 wl->conf.tx.tx_watchdog_timeout);
277 wl12xx_rearm_tx_watchdog_locked(wl);
282 * AP might cache a frame for a long time for a sleeping station,
283 * so rearm the timer if there's an AP interface with stations. If
284 * Tx is genuinely stuck we will most hopefully discover it when all
285 * stations are removed due to inactivity.
287 if (wl->active_sta_count) {
288 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
290 wl->conf.tx.tx_watchdog_timeout,
291 wl->active_sta_count);
292 wl12xx_rearm_tx_watchdog_locked(wl);
296 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_queue_recovery_work(wl);
301 mutex_unlock(&wl->mutex);
304 static void wlcore_adjust_conf(struct wl1271 *wl)
306 /* Adjust settings according to optional module parameters */
308 /* Firmware Logger params */
309 if (fwlog_mem_blocks != -1) {
310 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
311 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
312 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
315 "Illegal fwlog_mem_blocks=%d using default %d",
316 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
321 if (!strcmp(fwlog_param, "continuous")) {
322 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
323 } else if (!strcmp(fwlog_param, "ondemand")) {
324 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
325 } else if (!strcmp(fwlog_param, "dbgpins")) {
326 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
327 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
328 } else if (!strcmp(fwlog_param, "disable")) {
329 wl->conf.fwlog.mem_blocks = 0;
330 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
332 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
336 if (bug_on_recovery != -1)
337 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
339 if (no_recovery != -1)
340 wl->conf.recovery.no_recovery = (u8) no_recovery;
343 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
344 struct wl12xx_vif *wlvif,
349 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
352 * Wake up from high level PS if the STA is asleep with too little
353 * packets in FW or if the STA is awake.
355 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_end(wl, wlvif, hlid);
359 * Start high-level PS if the STA is asleep with enough blocks in FW.
360 * Make an exception if this is the only connected link. In this
361 * case FW-memory congestion is less of a problem.
362 * Note that a single connected STA means 2*ap_count + 1 active links,
363 * since we must account for the global and broadcast AP links
364 * for each AP. The "fw_ps" check assures us the other link is a STA
365 * connected to the AP. Otherwise the FW would not set the PSM bit.
367 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
368 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
369 wl12xx_ps_link_start(wl, wlvif, hlid, true);
372 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
373 struct wl12xx_vif *wlvif,
374 struct wl_fw_status *status)
376 unsigned long cur_fw_ps_map;
379 cur_fw_ps_map = status->link_ps_bitmap;
380 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
381 wl1271_debug(DEBUG_PSM,
382 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
383 wl->ap_fw_ps_map, cur_fw_ps_map,
384 wl->ap_fw_ps_map ^ cur_fw_ps_map);
386 wl->ap_fw_ps_map = cur_fw_ps_map;
389 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
390 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
391 wl->links[hlid].allocated_pkts);
394 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
396 struct wl12xx_vif *wlvif;
398 u32 old_tx_blk_count = wl->tx_blocks_available;
399 int avail, freed_blocks;
402 struct wl1271_link *lnk;
404 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
406 wl->fw_status_len, false);
410 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
412 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
413 "drv_rx_counter = %d, tx_results_counter = %d)",
415 status->fw_rx_counter,
416 status->drv_rx_counter,
417 status->tx_results_counter);
419 for (i = 0; i < NUM_TX_QUEUES; i++) {
420 /* prevent wrap-around in freed-packets counter */
421 wl->tx_allocated_pkts[i] -=
422 (status->counters.tx_released_pkts[i] -
423 wl->tx_pkts_freed[i]) & 0xff;
425 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
429 for_each_set_bit(i, wl->links_map, wl->num_links) {
433 /* prevent wrap-around in freed-packets counter */
434 diff = (status->counters.tx_lnk_free_pkts[i] -
435 lnk->prev_freed_pkts) & 0xff;
440 lnk->allocated_pkts -= diff;
441 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
443 /* accumulate the prev_freed_pkts counter */
444 lnk->total_freed_pkts += diff;
447 /* prevent wrap-around in total blocks counter */
448 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
449 freed_blocks = status->total_released_blks -
452 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
453 status->total_released_blks;
455 wl->tx_blocks_freed = status->total_released_blks;
457 wl->tx_allocated_blocks -= freed_blocks;
460 * If the FW freed some blocks:
461 * If we still have allocated blocks - re-arm the timer, Tx is
462 * not stuck. Otherwise, cancel the timer (no Tx currently).
465 if (wl->tx_allocated_blocks)
466 wl12xx_rearm_tx_watchdog_locked(wl);
468 cancel_delayed_work(&wl->tx_watchdog_work);
471 avail = status->tx_total - wl->tx_allocated_blocks;
474 * The FW might change the total number of TX memblocks before
475 * we get a notification about blocks being released. Thus, the
476 * available blocks calculation might yield a temporary result
477 * which is lower than the actual available blocks. Keeping in
478 * mind that only blocks that were allocated can be moved from
479 * TX to RX, tx_blocks_available should never decrease here.
481 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
484 /* if more blocks are available now, tx work can be scheduled */
485 if (wl->tx_blocks_available > old_tx_blk_count)
486 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
488 /* for AP update num of allocated TX blocks per link and ps status */
489 wl12xx_for_each_wlvif_ap(wl, wlvif) {
490 wl12xx_irq_update_links_status(wl, wlvif, status);
493 /* update the host-chipset time offset */
495 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
496 (s64)(status->fw_localtime);
498 wl->fw_fast_lnk_map = status->link_fast_bitmap;
503 static void wl1271_flush_deferred_work(struct wl1271 *wl)
507 /* Pass all received frames to the network stack */
508 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
509 ieee80211_rx_ni(wl->hw, skb);
511 /* Return sent skbs to the network stack */
512 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
513 ieee80211_tx_status_ni(wl->hw, skb);
516 static void wl1271_netstack_work(struct work_struct *work)
519 container_of(work, struct wl1271, netstack_work);
522 wl1271_flush_deferred_work(wl);
523 } while (skb_queue_len(&wl->deferred_rx_queue));
526 #define WL1271_IRQ_MAX_LOOPS 256
528 static int wlcore_irq_locked(struct wl1271 *wl)
532 int loopcount = WL1271_IRQ_MAX_LOOPS;
534 unsigned int defer_count;
538 * In case edge triggered interrupt must be used, we cannot iterate
539 * more than once without introducing race conditions with the hardirq.
541 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
544 wl1271_debug(DEBUG_IRQ, "IRQ work");
546 if (unlikely(wl->state != WLCORE_STATE_ON))
549 ret = wl1271_ps_elp_wakeup(wl);
553 while (!done && loopcount--) {
555 * In order to avoid a race with the hardirq, clear the flag
556 * before acknowledging the chip. Since the mutex is held,
557 * wl1271_ps_elp_wakeup cannot be called concurrently.
559 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
560 smp_mb__after_atomic();
562 ret = wlcore_fw_status(wl, wl->fw_status);
566 wlcore_hw_tx_immediate_compl(wl);
568 intr = wl->fw_status->intr;
569 intr &= WLCORE_ALL_INTR_MASK;
575 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
576 wl1271_error("HW watchdog interrupt received! starting recovery.");
577 wl->watchdog_recovery = true;
580 /* restarting the chip. ignore any other interrupt. */
584 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
585 wl1271_error("SW watchdog interrupt received! "
586 "starting recovery.");
587 wl->watchdog_recovery = true;
590 /* restarting the chip. ignore any other interrupt. */
594 if (likely(intr & WL1271_ACX_INTR_DATA)) {
595 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
597 ret = wlcore_rx(wl, wl->fw_status);
601 /* Check if any tx blocks were freed */
602 spin_lock_irqsave(&wl->wl_lock, flags);
603 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
604 wl1271_tx_total_queue_count(wl) > 0) {
605 spin_unlock_irqrestore(&wl->wl_lock, flags);
607 * In order to avoid starvation of the TX path,
608 * call the work function directly.
610 ret = wlcore_tx_work_locked(wl);
614 spin_unlock_irqrestore(&wl->wl_lock, flags);
617 /* check for tx results */
618 ret = wlcore_hw_tx_delayed_compl(wl);
622 /* Make sure the deferred queues don't get too long */
623 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
624 skb_queue_len(&wl->deferred_rx_queue);
625 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
626 wl1271_flush_deferred_work(wl);
629 if (intr & WL1271_ACX_INTR_EVENT_A) {
630 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
631 ret = wl1271_event_handle(wl, 0);
636 if (intr & WL1271_ACX_INTR_EVENT_B) {
637 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
638 ret = wl1271_event_handle(wl, 1);
643 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
644 wl1271_debug(DEBUG_IRQ,
645 "WL1271_ACX_INTR_INIT_COMPLETE");
647 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
648 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
651 wl1271_ps_elp_sleep(wl);
657 static irqreturn_t wlcore_irq(int irq, void *cookie)
661 struct wl1271 *wl = cookie;
663 /* complete the ELP completion */
664 spin_lock_irqsave(&wl->wl_lock, flags);
665 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
667 complete(wl->elp_compl);
668 wl->elp_compl = NULL;
671 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
672 /* don't enqueue a work right now. mark it as pending */
673 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
674 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
675 disable_irq_nosync(wl->irq);
676 pm_wakeup_event(wl->dev, 0);
677 spin_unlock_irqrestore(&wl->wl_lock, flags);
680 spin_unlock_irqrestore(&wl->wl_lock, flags);
682 /* TX might be handled here, avoid redundant work */
683 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
684 cancel_work_sync(&wl->tx_work);
686 mutex_lock(&wl->mutex);
688 ret = wlcore_irq_locked(wl);
690 wl12xx_queue_recovery_work(wl);
692 spin_lock_irqsave(&wl->wl_lock, flags);
693 /* In case TX was not handled here, queue TX work */
694 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
695 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
696 wl1271_tx_total_queue_count(wl) > 0)
697 ieee80211_queue_work(wl->hw, &wl->tx_work);
698 spin_unlock_irqrestore(&wl->wl_lock, flags);
700 mutex_unlock(&wl->mutex);
705 struct vif_counter_data {
708 struct ieee80211_vif *cur_vif;
709 bool cur_vif_running;
712 static void wl12xx_vif_count_iter(void *data, u8 *mac,
713 struct ieee80211_vif *vif)
715 struct vif_counter_data *counter = data;
718 if (counter->cur_vif == vif)
719 counter->cur_vif_running = true;
722 /* caller must not hold wl->mutex, as it might deadlock */
723 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
724 struct ieee80211_vif *cur_vif,
725 struct vif_counter_data *data)
727 memset(data, 0, sizeof(*data));
728 data->cur_vif = cur_vif;
730 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
731 wl12xx_vif_count_iter, data);
734 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
736 const struct firmware *fw;
738 enum wl12xx_fw_type fw_type;
742 fw_type = WL12XX_FW_TYPE_PLT;
743 fw_name = wl->plt_fw_name;
746 * we can't call wl12xx_get_vif_count() here because
747 * wl->mutex is taken, so use the cached last_vif_count value
749 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
750 fw_type = WL12XX_FW_TYPE_MULTI;
751 fw_name = wl->mr_fw_name;
753 fw_type = WL12XX_FW_TYPE_NORMAL;
754 fw_name = wl->sr_fw_name;
758 if (wl->fw_type == fw_type)
761 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
763 ret = request_firmware(&fw, fw_name, wl->dev);
766 wl1271_error("could not get firmware %s: %d", fw_name, ret);
771 wl1271_error("firmware size is not multiple of 32 bits: %zu",
778 wl->fw_type = WL12XX_FW_TYPE_NONE;
779 wl->fw_len = fw->size;
780 wl->fw = vmalloc(wl->fw_len);
783 wl1271_error("could not allocate memory for the firmware");
788 memcpy(wl->fw, fw->data, wl->fw_len);
790 wl->fw_type = fw_type;
792 release_firmware(fw);
797 void wl12xx_queue_recovery_work(struct wl1271 *wl)
799 /* Avoid a recursive recovery */
800 if (wl->state == WLCORE_STATE_ON) {
801 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
804 wl->state = WLCORE_STATE_RESTARTING;
805 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
806 wl1271_ps_elp_wakeup(wl);
807 wlcore_disable_interrupts_nosync(wl);
808 ieee80211_queue_work(wl->hw, &wl->recovery_work);
812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
816 /* Make sure we have enough room */
817 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
819 /* Fill the FW log file, consumed by the sysfs fwlog entry */
820 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 wl->fwlog_size += len;
826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
828 struct wlcore_partition_set part, old_part;
835 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
836 (wl->conf.fwlog.mem_blocks == 0))
839 wl1271_info("Reading FW panic log");
841 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
846 * Make sure the chip is awake and the logger isn't active.
847 * Do not send a stop fwlog command if the fw is hanged or if
848 * dbgpins are used (due to some fw bug).
850 if (wl1271_ps_elp_wakeup(wl))
852 if (!wl->watchdog_recovery &&
853 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
854 wl12xx_cmd_stop_fwlog(wl);
856 /* Read the first memory block address */
857 ret = wlcore_fw_status(wl, wl->fw_status);
861 addr = wl->fw_status->log_start_addr;
865 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
866 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
867 end_of_log = wl->fwlog_end;
869 offset = sizeof(addr);
873 old_part = wl->curr_part;
874 memset(&part, 0, sizeof(part));
876 /* Traverse the memory blocks linked list */
878 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
879 part.mem.size = PAGE_SIZE;
881 ret = wlcore_set_partition(wl, &part);
883 wl1271_error("%s: set_partition start=0x%X size=%d",
884 __func__, part.mem.start, part.mem.size);
888 memset(block, 0, wl->fw_mem_block_size);
889 ret = wlcore_read_hwaddr(wl, addr, block,
890 wl->fw_mem_block_size, false);
896 * Memory blocks are linked to one another. The first 4 bytes
897 * of each memory block hold the hardware address of the next
898 * one. The last memory block points to the first one in
899 * on demand mode and is equal to 0x2000000 in continuous mode.
901 addr = le32_to_cpup((__le32 *)block);
903 if (!wl12xx_copy_fwlog(wl, block + offset,
904 wl->fw_mem_block_size - offset))
906 } while (addr && (addr != end_of_log));
908 wake_up_interruptible(&wl->fwlog_waitq);
912 wlcore_set_partition(wl, &old_part);
915 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
916 u8 hlid, struct ieee80211_sta *sta)
918 struct wl1271_station *wl_sta;
919 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
921 wl_sta = (void *)sta->drv_priv;
922 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
925 * increment the initial seq number on recovery to account for
926 * transmitted packets that we haven't yet got in the FW status
928 if (wlvif->encryption_type == KEY_GEM)
929 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
931 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
932 wl_sta->total_freed_pkts += sqn_recovery_padding;
935 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
936 struct wl12xx_vif *wlvif,
937 u8 hlid, const u8 *addr)
939 struct ieee80211_sta *sta;
940 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
942 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
943 is_zero_ether_addr(addr)))
947 sta = ieee80211_find_sta(vif, addr);
949 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
953 static void wlcore_print_recovery(struct wl1271 *wl)
959 wl1271_info("Hardware recovery in progress. FW ver: %s",
960 wl->chip.fw_ver_str);
962 /* change partitions momentarily so we can read the FW pc */
963 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
967 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
971 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
975 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
976 pc, hint_sts, ++wl->recovery_count);
978 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
982 static void wl1271_recovery_work(struct work_struct *work)
985 container_of(work, struct wl1271, recovery_work);
986 struct wl12xx_vif *wlvif;
987 struct ieee80211_vif *vif;
989 mutex_lock(&wl->mutex);
991 if (wl->state == WLCORE_STATE_OFF || wl->plt)
994 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
995 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
996 wl12xx_read_fwlog_panic(wl);
997 wlcore_print_recovery(wl);
1000 BUG_ON(wl->conf.recovery.bug_on_recovery &&
1001 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1003 if (wl->conf.recovery.no_recovery) {
1004 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1008 /* Prevent spurious TX during FW restart */
1009 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1011 /* reboot the chipset */
1012 while (!list_empty(&wl->wlvif_list)) {
1013 wlvif = list_first_entry(&wl->wlvif_list,
1014 struct wl12xx_vif, list);
1015 vif = wl12xx_wlvif_to_vif(wlvif);
1017 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1018 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1019 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1020 vif->bss_conf.bssid);
1023 __wl1271_op_remove_interface(wl, vif, false);
1026 wlcore_op_stop_locked(wl);
1028 ieee80211_restart_hw(wl->hw);
1031 * Its safe to enable TX now - the queues are stopped after a request
1032 * to restart the HW.
1034 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1037 wl->watchdog_recovery = false;
1038 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1039 mutex_unlock(&wl->mutex);
1042 static int wlcore_fw_wakeup(struct wl1271 *wl)
1044 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1047 static int wl1271_setup(struct wl1271 *wl)
1049 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1050 if (!wl->raw_fw_status)
1053 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1057 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1063 kfree(wl->fw_status);
1064 kfree(wl->raw_fw_status);
1068 static int wl12xx_set_power_on(struct wl1271 *wl)
1072 msleep(WL1271_PRE_POWER_ON_SLEEP);
1073 ret = wl1271_power_on(wl);
1076 msleep(WL1271_POWER_ON_SLEEP);
1077 wl1271_io_reset(wl);
1080 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1084 /* ELP module wake up */
1085 ret = wlcore_fw_wakeup(wl);
1093 wl1271_power_off(wl);
1097 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1101 ret = wl12xx_set_power_on(wl);
1106 * For wl127x based devices we could use the default block
1107 * size (512 bytes), but due to a bug in the sdio driver, we
1108 * need to set it explicitly after the chip is powered on. To
1109 * simplify the code and since the performance impact is
1110 * negligible, we use the same block size for all different
1113 * Check if the bus supports blocksize alignment and, if it
1114 * doesn't, make sure we don't have the quirk.
1116 if (!wl1271_set_block_size(wl))
1117 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1119 /* TODO: make sure the lower driver has set things up correctly */
1121 ret = wl1271_setup(wl);
1125 ret = wl12xx_fetch_firmware(wl, plt);
1133 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1135 int retries = WL1271_BOOT_RETRIES;
1136 struct wiphy *wiphy = wl->hw->wiphy;
1138 static const char* const PLT_MODE[] = {
1147 mutex_lock(&wl->mutex);
1149 wl1271_notice("power up");
1151 if (wl->state != WLCORE_STATE_OFF) {
1152 wl1271_error("cannot go into PLT state because not "
1153 "in off state: %d", wl->state);
1158 /* Indicate to lower levels that we are now in PLT mode */
1160 wl->plt_mode = plt_mode;
1164 ret = wl12xx_chip_wakeup(wl, true);
1168 if (plt_mode != PLT_CHIP_AWAKE) {
1169 ret = wl->ops->plt_init(wl);
1174 wl->state = WLCORE_STATE_ON;
1175 wl1271_notice("firmware booted in PLT mode %s (%s)",
1177 wl->chip.fw_ver_str);
1179 /* update hw/fw version info in wiphy struct */
1180 wiphy->hw_version = wl->chip.id;
1181 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1182 sizeof(wiphy->fw_version));
1187 wl1271_power_off(wl);
1191 wl->plt_mode = PLT_OFF;
1193 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1194 WL1271_BOOT_RETRIES);
1196 mutex_unlock(&wl->mutex);
1201 int wl1271_plt_stop(struct wl1271 *wl)
1205 wl1271_notice("power down");
1208 * Interrupts must be disabled before setting the state to OFF.
1209 * Otherwise, the interrupt handler might be called and exit without
1210 * reading the interrupt status.
1212 wlcore_disable_interrupts(wl);
1213 mutex_lock(&wl->mutex);
1215 mutex_unlock(&wl->mutex);
1218 * This will not necessarily enable interrupts as interrupts
1219 * may have been disabled when op_stop was called. It will,
1220 * however, balance the above call to disable_interrupts().
1222 wlcore_enable_interrupts(wl);
1224 wl1271_error("cannot power down because not in PLT "
1225 "state: %d", wl->state);
1230 mutex_unlock(&wl->mutex);
1232 wl1271_flush_deferred_work(wl);
1233 cancel_work_sync(&wl->netstack_work);
1234 cancel_work_sync(&wl->recovery_work);
1235 cancel_delayed_work_sync(&wl->elp_work);
1236 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1238 mutex_lock(&wl->mutex);
1239 wl1271_power_off(wl);
1241 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1242 wl->state = WLCORE_STATE_OFF;
1244 wl->plt_mode = PLT_OFF;
1246 mutex_unlock(&wl->mutex);
1252 static void wl1271_op_tx(struct ieee80211_hw *hw,
1253 struct ieee80211_tx_control *control,
1254 struct sk_buff *skb)
1256 struct wl1271 *wl = hw->priv;
1257 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1258 struct ieee80211_vif *vif = info->control.vif;
1259 struct wl12xx_vif *wlvif = NULL;
1260 unsigned long flags;
1265 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1266 ieee80211_free_txskb(hw, skb);
1270 wlvif = wl12xx_vif_to_data(vif);
1271 mapping = skb_get_queue_mapping(skb);
1272 q = wl1271_tx_get_queue(mapping);
1274 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1276 spin_lock_irqsave(&wl->wl_lock, flags);
1279 * drop the packet if the link is invalid or the queue is stopped
1280 * for any reason but watermark. Watermark is a "soft"-stop so we
1281 * allow these packets through.
1283 if (hlid == WL12XX_INVALID_LINK_ID ||
1284 (!test_bit(hlid, wlvif->links_map)) ||
1285 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1286 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1287 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1288 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1289 ieee80211_free_txskb(hw, skb);
1293 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1295 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1297 wl->tx_queue_count[q]++;
1298 wlvif->tx_queue_count[q]++;
1301 * The workqueue is slow to process the tx_queue and we need stop
1302 * the queue here, otherwise the queue will get too long.
1304 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1305 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1306 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1307 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1308 wlcore_stop_queue_locked(wl, wlvif, q,
1309 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1313 * The chip specific setup must run before the first TX packet -
1314 * before that, the tx_work will not be initialized!
1317 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1318 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1319 ieee80211_queue_work(wl->hw, &wl->tx_work);
1322 spin_unlock_irqrestore(&wl->wl_lock, flags);
1325 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1327 unsigned long flags;
1330 /* no need to queue a new dummy packet if one is already pending */
1331 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1334 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1336 spin_lock_irqsave(&wl->wl_lock, flags);
1337 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1338 wl->tx_queue_count[q]++;
1339 spin_unlock_irqrestore(&wl->wl_lock, flags);
1341 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1342 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1343 return wlcore_tx_work_locked(wl);
1346 * If the FW TX is busy, TX work will be scheduled by the threaded
1347 * interrupt handler function
1353 * The size of the dummy packet should be at least 1400 bytes. However, in
1354 * order to minimize the number of bus transactions, aligning it to 512 bytes
1355 * boundaries could be beneficial, performance wise
1357 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1359 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1361 struct sk_buff *skb;
1362 struct ieee80211_hdr_3addr *hdr;
1363 unsigned int dummy_packet_size;
1365 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1366 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1368 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1370 wl1271_warning("Failed to allocate a dummy packet skb");
1374 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1376 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1377 memset(hdr, 0, sizeof(*hdr));
1378 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1379 IEEE80211_STYPE_NULLFUNC |
1380 IEEE80211_FCTL_TODS);
1382 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1384 /* Dummy packets require the TID to be management */
1385 skb->priority = WL1271_TID_MGMT;
1387 /* Initialize all fields that might be used */
1388 skb_set_queue_mapping(skb, 0);
1389 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1397 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1399 int num_fields = 0, in_field = 0, fields_size = 0;
1400 int i, pattern_len = 0;
1403 wl1271_warning("No mask in WoWLAN pattern");
1408 * The pattern is broken up into segments of bytes at different offsets
1409 * that need to be checked by the FW filter. Each segment is called
1410 * a field in the FW API. We verify that the total number of fields
1411 * required for this pattern won't exceed FW limits (8)
1412 * as well as the total fields buffer won't exceed the FW limit.
1413 * Note that if there's a pattern which crosses Ethernet/IP header
1414 * boundary a new field is required.
1416 for (i = 0; i < p->pattern_len; i++) {
1417 if (test_bit(i, (unsigned long *)p->mask)) {
1422 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1424 fields_size += pattern_len +
1425 RX_FILTER_FIELD_OVERHEAD;
1433 fields_size += pattern_len +
1434 RX_FILTER_FIELD_OVERHEAD;
1441 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1445 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1446 wl1271_warning("RX Filter too complex. Too many segments");
1450 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1451 wl1271_warning("RX filter pattern is too big");
1458 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1460 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1463 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1470 for (i = 0; i < filter->num_fields; i++)
1471 kfree(filter->fields[i].pattern);
1476 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1477 u16 offset, u8 flags,
1478 const u8 *pattern, u8 len)
1480 struct wl12xx_rx_filter_field *field;
1482 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1483 wl1271_warning("Max fields per RX filter. can't alloc another");
1487 field = &filter->fields[filter->num_fields];
1489 field->pattern = kzalloc(len, GFP_KERNEL);
1490 if (!field->pattern) {
1491 wl1271_warning("Failed to allocate RX filter pattern");
1495 filter->num_fields++;
1497 field->offset = cpu_to_le16(offset);
1498 field->flags = flags;
1500 memcpy(field->pattern, pattern, len);
1505 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1507 int i, fields_size = 0;
1509 for (i = 0; i < filter->num_fields; i++)
1510 fields_size += filter->fields[i].len +
1511 sizeof(struct wl12xx_rx_filter_field) -
1517 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1521 struct wl12xx_rx_filter_field *field;
1523 for (i = 0; i < filter->num_fields; i++) {
1524 field = (struct wl12xx_rx_filter_field *)buf;
1526 field->offset = filter->fields[i].offset;
1527 field->flags = filter->fields[i].flags;
1528 field->len = filter->fields[i].len;
1530 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1531 buf += sizeof(struct wl12xx_rx_filter_field) -
1532 sizeof(u8 *) + field->len;
1537 * Allocates an RX filter returned through f
1538 * which needs to be freed using rx_filter_free()
1541 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1542 struct wl12xx_rx_filter **f)
1545 struct wl12xx_rx_filter *filter;
1549 filter = wl1271_rx_filter_alloc();
1551 wl1271_warning("Failed to alloc rx filter");
1557 while (i < p->pattern_len) {
1558 if (!test_bit(i, (unsigned long *)p->mask)) {
1563 for (j = i; j < p->pattern_len; j++) {
1564 if (!test_bit(j, (unsigned long *)p->mask))
1567 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1568 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1572 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1574 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1576 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1577 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1582 ret = wl1271_rx_filter_alloc_field(filter,
1585 &p->pattern[i], len);
1592 filter->action = FILTER_SIGNAL;
1598 wl1271_rx_filter_free(filter);
1604 static int wl1271_configure_wowlan(struct wl1271 *wl,
1605 struct cfg80211_wowlan *wow)
1609 if (!wow || wow->any || !wow->n_patterns) {
1610 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1615 ret = wl1271_rx_filter_clear_all(wl);
1622 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1625 /* Validate all incoming patterns before clearing current FW state */
1626 for (i = 0; i < wow->n_patterns; i++) {
1627 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1629 wl1271_warning("Bad wowlan pattern %d", i);
1634 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1638 ret = wl1271_rx_filter_clear_all(wl);
1642 /* Translate WoWLAN patterns into filters */
1643 for (i = 0; i < wow->n_patterns; i++) {
1644 struct cfg80211_pkt_pattern *p;
1645 struct wl12xx_rx_filter *filter = NULL;
1647 p = &wow->patterns[i];
1649 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1651 wl1271_warning("Failed to create an RX filter from "
1652 "wowlan pattern %d", i);
1656 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1658 wl1271_rx_filter_free(filter);
1663 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1669 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1670 struct wl12xx_vif *wlvif,
1671 struct cfg80211_wowlan *wow)
1675 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1678 ret = wl1271_configure_wowlan(wl, wow);
1682 if ((wl->conf.conn.suspend_wake_up_event ==
1683 wl->conf.conn.wake_up_event) &&
1684 (wl->conf.conn.suspend_listen_interval ==
1685 wl->conf.conn.listen_interval))
1688 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1689 wl->conf.conn.suspend_wake_up_event,
1690 wl->conf.conn.suspend_listen_interval);
1693 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1699 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1700 struct wl12xx_vif *wlvif,
1701 struct cfg80211_wowlan *wow)
1705 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1708 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1712 ret = wl1271_configure_wowlan(wl, wow);
1721 static int wl1271_configure_suspend(struct wl1271 *wl,
1722 struct wl12xx_vif *wlvif,
1723 struct cfg80211_wowlan *wow)
1725 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1726 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1727 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1728 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1732 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1735 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1736 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1738 if ((!is_ap) && (!is_sta))
1741 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1742 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1745 wl1271_configure_wowlan(wl, NULL);
1748 if ((wl->conf.conn.suspend_wake_up_event ==
1749 wl->conf.conn.wake_up_event) &&
1750 (wl->conf.conn.suspend_listen_interval ==
1751 wl->conf.conn.listen_interval))
1754 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1755 wl->conf.conn.wake_up_event,
1756 wl->conf.conn.listen_interval);
1759 wl1271_error("resume: wake up conditions failed: %d",
1763 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1767 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1768 struct cfg80211_wowlan *wow)
1770 struct wl1271 *wl = hw->priv;
1771 struct wl12xx_vif *wlvif;
1774 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1777 /* we want to perform the recovery before suspending */
1778 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1779 wl1271_warning("postponing suspend to perform recovery");
1783 wl1271_tx_flush(wl);
1785 mutex_lock(&wl->mutex);
1787 ret = wl1271_ps_elp_wakeup(wl);
1789 mutex_unlock(&wl->mutex);
1793 wl->wow_enabled = true;
1794 wl12xx_for_each_wlvif(wl, wlvif) {
1795 if (wlcore_is_p2p_mgmt(wlvif))
1798 ret = wl1271_configure_suspend(wl, wlvif, wow);
1800 mutex_unlock(&wl->mutex);
1801 wl1271_warning("couldn't prepare device to suspend");
1806 /* disable fast link flow control notifications from FW */
1807 ret = wlcore_hw_interrupt_notify(wl, false);
1811 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1812 ret = wlcore_hw_rx_ba_filter(wl,
1813 !!wl->conf.conn.suspend_rx_ba_activity);
1818 wl1271_ps_elp_sleep(wl);
1819 mutex_unlock(&wl->mutex);
1822 wl1271_warning("couldn't prepare device to suspend");
1826 /* flush any remaining work */
1827 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1830 * disable and re-enable interrupts in order to flush
1833 wlcore_disable_interrupts(wl);
1836 * set suspended flag to avoid triggering a new threaded_irq
1837 * work. no need for spinlock as interrupts are disabled.
1839 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1841 wlcore_enable_interrupts(wl);
1842 flush_work(&wl->tx_work);
1843 flush_delayed_work(&wl->elp_work);
1846 * Cancel the watchdog even if above tx_flush failed. We will detect
1847 * it on resume anyway.
1849 cancel_delayed_work(&wl->tx_watchdog_work);
1854 static int wl1271_op_resume(struct ieee80211_hw *hw)
1856 struct wl1271 *wl = hw->priv;
1857 struct wl12xx_vif *wlvif;
1858 unsigned long flags;
1859 bool run_irq_work = false, pending_recovery;
1862 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1864 WARN_ON(!wl->wow_enabled);
1867 * re-enable irq_work enqueuing, and call irq_work directly if
1868 * there is a pending work.
1870 spin_lock_irqsave(&wl->wl_lock, flags);
1871 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1872 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1873 run_irq_work = true;
1874 spin_unlock_irqrestore(&wl->wl_lock, flags);
1876 mutex_lock(&wl->mutex);
1878 /* test the recovery flag before calling any SDIO functions */
1879 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1883 wl1271_debug(DEBUG_MAC80211,
1884 "run postponed irq_work directly");
1886 /* don't talk to the HW if recovery is pending */
1887 if (!pending_recovery) {
1888 ret = wlcore_irq_locked(wl);
1890 wl12xx_queue_recovery_work(wl);
1893 wlcore_enable_interrupts(wl);
1896 if (pending_recovery) {
1897 wl1271_warning("queuing forgotten recovery on resume");
1898 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1902 ret = wl1271_ps_elp_wakeup(wl);
1906 wl12xx_for_each_wlvif(wl, wlvif) {
1907 if (wlcore_is_p2p_mgmt(wlvif))
1910 wl1271_configure_resume(wl, wlvif);
1913 ret = wlcore_hw_interrupt_notify(wl, true);
1917 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1918 ret = wlcore_hw_rx_ba_filter(wl, false);
1923 wl1271_ps_elp_sleep(wl);
1926 wl->wow_enabled = false;
1929 * Set a flag to re-init the watchdog on the first Tx after resume.
1930 * That way we avoid possible conditions where Tx-complete interrupts
1931 * fail to arrive and we perform a spurious recovery.
1933 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1934 mutex_unlock(&wl->mutex);
1940 static int wl1271_op_start(struct ieee80211_hw *hw)
1942 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1945 * We have to delay the booting of the hardware because
1946 * we need to know the local MAC address before downloading and
1947 * initializing the firmware. The MAC address cannot be changed
1948 * after boot, and without the proper MAC address, the firmware
1949 * will not function properly.
1951 * The MAC address is first known when the corresponding interface
1952 * is added. That is where we will initialize the hardware.
1958 static void wlcore_op_stop_locked(struct wl1271 *wl)
1962 if (wl->state == WLCORE_STATE_OFF) {
1963 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1965 wlcore_enable_interrupts(wl);
1971 * this must be before the cancel_work calls below, so that the work
1972 * functions don't perform further work.
1974 wl->state = WLCORE_STATE_OFF;
1977 * Use the nosync variant to disable interrupts, so the mutex could be
1978 * held while doing so without deadlocking.
1980 wlcore_disable_interrupts_nosync(wl);
1982 mutex_unlock(&wl->mutex);
1984 wlcore_synchronize_interrupts(wl);
1985 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1986 cancel_work_sync(&wl->recovery_work);
1987 wl1271_flush_deferred_work(wl);
1988 cancel_delayed_work_sync(&wl->scan_complete_work);
1989 cancel_work_sync(&wl->netstack_work);
1990 cancel_work_sync(&wl->tx_work);
1991 cancel_delayed_work_sync(&wl->elp_work);
1992 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1994 /* let's notify MAC80211 about the remaining pending TX frames */
1995 mutex_lock(&wl->mutex);
1996 wl12xx_tx_reset(wl);
1998 wl1271_power_off(wl);
2000 * In case a recovery was scheduled, interrupts were disabled to avoid
2001 * an interrupt storm. Now that the power is down, it is safe to
2002 * re-enable interrupts to balance the disable depth
2004 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2005 wlcore_enable_interrupts(wl);
2007 wl->band = IEEE80211_BAND_2GHZ;
2010 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2011 wl->channel_type = NL80211_CHAN_NO_HT;
2012 wl->tx_blocks_available = 0;
2013 wl->tx_allocated_blocks = 0;
2014 wl->tx_results_count = 0;
2015 wl->tx_packets_count = 0;
2016 wl->time_offset = 0;
2017 wl->ap_fw_ps_map = 0;
2019 wl->sleep_auth = WL1271_PSM_ILLEGAL;
2020 memset(wl->roles_map, 0, sizeof(wl->roles_map));
2021 memset(wl->links_map, 0, sizeof(wl->links_map));
2022 memset(wl->roc_map, 0, sizeof(wl->roc_map));
2023 memset(wl->session_ids, 0, sizeof(wl->session_ids));
2024 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2025 wl->active_sta_count = 0;
2026 wl->active_link_count = 0;
2028 /* The system link is always allocated */
2029 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2030 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2031 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2034 * this is performed after the cancel_work calls and the associated
2035 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2036 * get executed before all these vars have been reset.
2040 wl->tx_blocks_freed = 0;
2042 for (i = 0; i < NUM_TX_QUEUES; i++) {
2043 wl->tx_pkts_freed[i] = 0;
2044 wl->tx_allocated_pkts[i] = 0;
2047 wl1271_debugfs_reset(wl);
2049 kfree(wl->raw_fw_status);
2050 wl->raw_fw_status = NULL;
2051 kfree(wl->fw_status);
2052 wl->fw_status = NULL;
2053 kfree(wl->tx_res_if);
2054 wl->tx_res_if = NULL;
2055 kfree(wl->target_mem_map);
2056 wl->target_mem_map = NULL;
2059 * FW channels must be re-calibrated after recovery,
2060 * save current Reg-Domain channel configuration and clear it.
2062 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2063 sizeof(wl->reg_ch_conf_pending));
2064 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2067 static void wlcore_op_stop(struct ieee80211_hw *hw)
2069 struct wl1271 *wl = hw->priv;
2071 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2073 mutex_lock(&wl->mutex);
2075 wlcore_op_stop_locked(wl);
2077 mutex_unlock(&wl->mutex);
2080 static void wlcore_channel_switch_work(struct work_struct *work)
2082 struct delayed_work *dwork;
2084 struct ieee80211_vif *vif;
2085 struct wl12xx_vif *wlvif;
2088 dwork = container_of(work, struct delayed_work, work);
2089 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2092 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2094 mutex_lock(&wl->mutex);
2096 if (unlikely(wl->state != WLCORE_STATE_ON))
2099 /* check the channel switch is still ongoing */
2100 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2103 vif = wl12xx_wlvif_to_vif(wlvif);
2104 ieee80211_chswitch_done(vif, false);
2106 ret = wl1271_ps_elp_wakeup(wl);
2110 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2112 wl1271_ps_elp_sleep(wl);
2114 mutex_unlock(&wl->mutex);
2117 static void wlcore_connection_loss_work(struct work_struct *work)
2119 struct delayed_work *dwork;
2121 struct ieee80211_vif *vif;
2122 struct wl12xx_vif *wlvif;
2124 dwork = container_of(work, struct delayed_work, work);
2125 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2128 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2130 mutex_lock(&wl->mutex);
2132 if (unlikely(wl->state != WLCORE_STATE_ON))
2135 /* Call mac80211 connection loss */
2136 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2139 vif = wl12xx_wlvif_to_vif(wlvif);
2140 ieee80211_connection_loss(vif);
2142 mutex_unlock(&wl->mutex);
2145 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2147 struct delayed_work *dwork;
2149 struct wl12xx_vif *wlvif;
2150 unsigned long time_spare;
2153 dwork = container_of(work, struct delayed_work, work);
2154 wlvif = container_of(dwork, struct wl12xx_vif,
2155 pending_auth_complete_work);
2158 mutex_lock(&wl->mutex);
2160 if (unlikely(wl->state != WLCORE_STATE_ON))
2164 * Make sure a second really passed since the last auth reply. Maybe
2165 * a second auth reply arrived while we were stuck on the mutex.
2166 * Check for a little less than the timeout to protect from scheduler
2169 time_spare = jiffies +
2170 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2171 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2174 ret = wl1271_ps_elp_wakeup(wl);
2178 /* cancel the ROC if active */
2179 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2181 wl1271_ps_elp_sleep(wl);
2183 mutex_unlock(&wl->mutex);
2186 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2188 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2189 WL12XX_MAX_RATE_POLICIES);
2190 if (policy >= WL12XX_MAX_RATE_POLICIES)
2193 __set_bit(policy, wl->rate_policies_map);
2198 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2200 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2203 __clear_bit(*idx, wl->rate_policies_map);
2204 *idx = WL12XX_MAX_RATE_POLICIES;
2207 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2209 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2210 WLCORE_MAX_KLV_TEMPLATES);
2211 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2214 __set_bit(policy, wl->klv_templates_map);
2219 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2221 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2224 __clear_bit(*idx, wl->klv_templates_map);
2225 *idx = WLCORE_MAX_KLV_TEMPLATES;
2228 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2230 switch (wlvif->bss_type) {
2231 case BSS_TYPE_AP_BSS:
2233 return WL1271_ROLE_P2P_GO;
2235 return WL1271_ROLE_AP;
2237 case BSS_TYPE_STA_BSS:
2239 return WL1271_ROLE_P2P_CL;
2241 return WL1271_ROLE_STA;
2244 return WL1271_ROLE_IBSS;
2247 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2249 return WL12XX_INVALID_ROLE_TYPE;
2252 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2254 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2257 /* clear everything but the persistent data */
2258 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2260 switch (ieee80211_vif_type_p2p(vif)) {
2261 case NL80211_IFTYPE_P2P_CLIENT:
2264 case NL80211_IFTYPE_STATION:
2265 case NL80211_IFTYPE_P2P_DEVICE:
2266 wlvif->bss_type = BSS_TYPE_STA_BSS;
2268 case NL80211_IFTYPE_ADHOC:
2269 wlvif->bss_type = BSS_TYPE_IBSS;
2271 case NL80211_IFTYPE_P2P_GO:
2274 case NL80211_IFTYPE_AP:
2275 wlvif->bss_type = BSS_TYPE_AP_BSS;
2278 wlvif->bss_type = MAX_BSS_TYPE;
2282 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2283 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2284 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2286 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2287 wlvif->bss_type == BSS_TYPE_IBSS) {
2288 /* init sta/ibss data */
2289 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2290 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2291 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2292 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2293 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2294 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2295 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2296 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2299 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2300 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2301 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2302 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2303 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2304 wl12xx_allocate_rate_policy(wl,
2305 &wlvif->ap.ucast_rate_idx[i]);
2306 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2308 * TODO: check if basic_rate shouldn't be
2309 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2310 * instead (the same thing for STA above).
2312 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2313 /* TODO: this seems to be used only for STA, check it */
2314 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2317 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2318 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2319 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2322 * mac80211 configures some values globally, while we treat them
2323 * per-interface. thus, on init, we have to copy them from wl
2325 wlvif->band = wl->band;
2326 wlvif->channel = wl->channel;
2327 wlvif->power_level = wl->power_level;
2328 wlvif->channel_type = wl->channel_type;
2330 INIT_WORK(&wlvif->rx_streaming_enable_work,
2331 wl1271_rx_streaming_enable_work);
2332 INIT_WORK(&wlvif->rx_streaming_disable_work,
2333 wl1271_rx_streaming_disable_work);
2334 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2335 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2336 wlcore_channel_switch_work);
2337 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2338 wlcore_connection_loss_work);
2339 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2340 wlcore_pending_auth_complete_work);
2341 INIT_LIST_HEAD(&wlvif->list);
2343 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2344 (unsigned long) wlvif);
2348 static int wl12xx_init_fw(struct wl1271 *wl)
2350 int retries = WL1271_BOOT_RETRIES;
2351 bool booted = false;
2352 struct wiphy *wiphy = wl->hw->wiphy;
2357 ret = wl12xx_chip_wakeup(wl, false);
2361 ret = wl->ops->boot(wl);
2365 ret = wl1271_hw_init(wl);
2373 mutex_unlock(&wl->mutex);
2374 /* Unlocking the mutex in the middle of handling is
2375 inherently unsafe. In this case we deem it safe to do,
2376 because we need to let any possibly pending IRQ out of
2377 the system (and while we are WLCORE_STATE_OFF the IRQ
2378 work function will not do anything.) Also, any other
2379 possible concurrent operations will fail due to the
2380 current state, hence the wl1271 struct should be safe. */
2381 wlcore_disable_interrupts(wl);
2382 wl1271_flush_deferred_work(wl);
2383 cancel_work_sync(&wl->netstack_work);
2384 mutex_lock(&wl->mutex);
2386 wl1271_power_off(wl);
2390 wl1271_error("firmware boot failed despite %d retries",
2391 WL1271_BOOT_RETRIES);
2395 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2397 /* update hw/fw version info in wiphy struct */
2398 wiphy->hw_version = wl->chip.id;
2399 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2400 sizeof(wiphy->fw_version));
2403 * Now we know if 11a is supported (info from the NVS), so disable
2404 * 11a channels if not supported
2406 if (!wl->enable_11a)
2407 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2409 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2410 wl->enable_11a ? "" : "not ");
2412 wl->state = WLCORE_STATE_ON;
2417 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2419 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2423 * Check whether a fw switch (i.e. moving from one loaded
2424 * fw to another) is needed. This function is also responsible
2425 * for updating wl->last_vif_count, so it must be called before
2426 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2429 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2430 struct vif_counter_data vif_counter_data,
2433 enum wl12xx_fw_type current_fw = wl->fw_type;
2434 u8 vif_count = vif_counter_data.counter;
2436 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2439 /* increase the vif count if this is a new vif */
2440 if (add && !vif_counter_data.cur_vif_running)
2443 wl->last_vif_count = vif_count;
2445 /* no need for fw change if the device is OFF */
2446 if (wl->state == WLCORE_STATE_OFF)
2449 /* no need for fw change if a single fw is used */
2450 if (!wl->mr_fw_name)
2453 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2455 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2462 * Enter "forced psm". Make sure the sta is in psm against the ap,
2463 * to make the fw switch a bit more disconnection-persistent.
2465 static void wl12xx_force_active_psm(struct wl1271 *wl)
2467 struct wl12xx_vif *wlvif;
2469 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2470 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2474 struct wlcore_hw_queue_iter_data {
2475 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2477 struct ieee80211_vif *vif;
2478 /* is the current vif among those iterated */
2482 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2483 struct ieee80211_vif *vif)
2485 struct wlcore_hw_queue_iter_data *iter_data = data;
2487 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2488 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2491 if (iter_data->cur_running || vif == iter_data->vif) {
2492 iter_data->cur_running = true;
2496 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2499 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2500 struct wl12xx_vif *wlvif)
2502 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2503 struct wlcore_hw_queue_iter_data iter_data = {};
2506 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2507 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2511 iter_data.vif = vif;
2513 /* mark all bits taken by active interfaces */
2514 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2515 IEEE80211_IFACE_ITER_RESUME_ALL,
2516 wlcore_hw_queue_iter, &iter_data);
2518 /* the current vif is already running in mac80211 (resume/recovery) */
2519 if (iter_data.cur_running) {
2520 wlvif->hw_queue_base = vif->hw_queue[0];
2521 wl1271_debug(DEBUG_MAC80211,
2522 "using pre-allocated hw queue base %d",
2523 wlvif->hw_queue_base);
2525 /* interface type might have changed type */
2526 goto adjust_cab_queue;
2529 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2530 WLCORE_NUM_MAC_ADDRESSES);
2531 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2534 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2535 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2536 wlvif->hw_queue_base);
2538 for (i = 0; i < NUM_TX_QUEUES; i++) {
2539 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2540 /* register hw queues in mac80211 */
2541 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2545 /* the last places are reserved for cab queues per interface */
2546 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2547 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2548 wlvif->hw_queue_base / NUM_TX_QUEUES;
2550 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2555 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2556 struct ieee80211_vif *vif)
2558 struct wl1271 *wl = hw->priv;
2559 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2560 struct vif_counter_data vif_count;
2565 wl1271_error("Adding Interface not allowed while in PLT mode");
2569 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2570 IEEE80211_VIF_SUPPORTS_UAPSD |
2571 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2573 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2574 ieee80211_vif_type_p2p(vif), vif->addr);
2576 wl12xx_get_vif_count(hw, vif, &vif_count);
2578 mutex_lock(&wl->mutex);
2579 ret = wl1271_ps_elp_wakeup(wl);
2584 * in some very corner case HW recovery scenarios its possible to
2585 * get here before __wl1271_op_remove_interface is complete, so
2586 * opt out if that is the case.
2588 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2589 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2595 ret = wl12xx_init_vif_data(wl, vif);
2600 role_type = wl12xx_get_role_type(wl, wlvif);
2601 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2606 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2610 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2611 wl12xx_force_active_psm(wl);
2612 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2613 mutex_unlock(&wl->mutex);
2614 wl1271_recovery_work(&wl->recovery_work);
2619 * TODO: after the nvs issue will be solved, move this block
2620 * to start(), and make sure here the driver is ON.
2622 if (wl->state == WLCORE_STATE_OFF) {
2624 * we still need this in order to configure the fw
2625 * while uploading the nvs
2627 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2629 ret = wl12xx_init_fw(wl);
2634 if (!wlcore_is_p2p_mgmt(wlvif)) {
2635 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2636 role_type, &wlvif->role_id);
2640 ret = wl1271_init_vif_specific(wl, vif);
2645 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2646 &wlvif->dev_role_id);
2650 /* needed mainly for configuring rate policies */
2651 ret = wl1271_sta_hw_init(wl, wlvif);
2656 list_add(&wlvif->list, &wl->wlvif_list);
2657 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2659 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2664 wl1271_ps_elp_sleep(wl);
2666 mutex_unlock(&wl->mutex);
2671 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2672 struct ieee80211_vif *vif,
2673 bool reset_tx_queues)
2675 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2677 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2679 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2681 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2684 /* because of hardware recovery, we may get here twice */
2685 if (wl->state == WLCORE_STATE_OFF)
2688 wl1271_info("down");
2690 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2691 wl->scan_wlvif == wlvif) {
2693 * Rearm the tx watchdog just before idling scan. This
2694 * prevents just-finished scans from triggering the watchdog
2696 wl12xx_rearm_tx_watchdog_locked(wl);
2698 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2699 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2700 wl->scan_wlvif = NULL;
2701 wl->scan.req = NULL;
2702 ieee80211_scan_completed(wl->hw, true);
2705 if (wl->sched_vif == wlvif)
2706 wl->sched_vif = NULL;
2708 if (wl->roc_vif == vif) {
2710 ieee80211_remain_on_channel_expired(wl->hw);
2713 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2714 /* disable active roles */
2715 ret = wl1271_ps_elp_wakeup(wl);
2719 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2720 wlvif->bss_type == BSS_TYPE_IBSS) {
2721 if (wl12xx_dev_role_started(wlvif))
2722 wl12xx_stop_dev(wl, wlvif);
2725 if (!wlcore_is_p2p_mgmt(wlvif)) {
2726 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2730 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2735 wl1271_ps_elp_sleep(wl);
2738 wl12xx_tx_reset_wlvif(wl, wlvif);
2740 /* clear all hlids (except system_hlid) */
2741 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2743 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2744 wlvif->bss_type == BSS_TYPE_IBSS) {
2745 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2746 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2747 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2748 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2749 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2751 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2752 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2753 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2754 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2755 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2756 wl12xx_free_rate_policy(wl,
2757 &wlvif->ap.ucast_rate_idx[i]);
2758 wl1271_free_ap_keys(wl, wlvif);
2761 dev_kfree_skb(wlvif->probereq);
2762 wlvif->probereq = NULL;
2763 if (wl->last_wlvif == wlvif)
2764 wl->last_wlvif = NULL;
2765 list_del(&wlvif->list);
2766 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2767 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2768 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2776 * Last AP, have more stations. Configure sleep auth according to STA.
2777 * Don't do thin on unintended recovery.
2779 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2780 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2783 if (wl->ap_count == 0 && is_ap) {
2784 /* mask ap events */
2785 wl->event_mask &= ~wl->ap_event_mask;
2786 wl1271_event_unmask(wl);
2789 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2790 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2791 /* Configure for power according to debugfs */
2792 if (sta_auth != WL1271_PSM_ILLEGAL)
2793 wl1271_acx_sleep_auth(wl, sta_auth);
2794 /* Configure for ELP power saving */
2796 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2800 mutex_unlock(&wl->mutex);
2802 del_timer_sync(&wlvif->rx_streaming_timer);
2803 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2804 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2805 cancel_work_sync(&wlvif->rc_update_work);
2806 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2807 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2808 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2810 mutex_lock(&wl->mutex);
2813 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2814 struct ieee80211_vif *vif)
2816 struct wl1271 *wl = hw->priv;
2817 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2818 struct wl12xx_vif *iter;
2819 struct vif_counter_data vif_count;
2821 wl12xx_get_vif_count(hw, vif, &vif_count);
2822 mutex_lock(&wl->mutex);
2824 if (wl->state == WLCORE_STATE_OFF ||
2825 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2829 * wl->vif can be null here if someone shuts down the interface
2830 * just when hardware recovery has been started.
2832 wl12xx_for_each_wlvif(wl, iter) {
2836 __wl1271_op_remove_interface(wl, vif, true);
2839 WARN_ON(iter != wlvif);
2840 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2841 wl12xx_force_active_psm(wl);
2842 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2843 wl12xx_queue_recovery_work(wl);
2846 mutex_unlock(&wl->mutex);
2849 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2850 struct ieee80211_vif *vif,
2851 enum nl80211_iftype new_type, bool p2p)
2853 struct wl1271 *wl = hw->priv;
2856 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2857 wl1271_op_remove_interface(hw, vif);
2859 vif->type = new_type;
2861 ret = wl1271_op_add_interface(hw, vif);
2863 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2867 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2870 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2873 * One of the side effects of the JOIN command is that is clears
2874 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2875 * to a WPA/WPA2 access point will therefore kill the data-path.
2876 * Currently the only valid scenario for JOIN during association
2877 * is on roaming, in which case we will also be given new keys.
2878 * Keep the below message for now, unless it starts bothering
2879 * users who really like to roam a lot :)
2881 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2882 wl1271_info("JOIN while associated.");
2884 /* clear encryption type */
2885 wlvif->encryption_type = KEY_NONE;
2888 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2890 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2892 * TODO: this is an ugly workaround for wl12xx fw
2893 * bug - we are not able to tx/rx after the first
2894 * start_sta, so make dummy start+stop calls,
2895 * and then call start_sta again.
2896 * this should be fixed in the fw.
2898 wl12xx_cmd_role_start_sta(wl, wlvif);
2899 wl12xx_cmd_role_stop_sta(wl, wlvif);
2902 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2908 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2912 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2916 wl1271_error("No SSID in IEs!");
2921 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2922 wl1271_error("SSID is too long!");
2926 wlvif->ssid_len = ssid_len;
2927 memcpy(wlvif->ssid, ptr+2, ssid_len);
2931 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2933 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2934 struct sk_buff *skb;
2937 /* we currently only support setting the ssid from the ap probe req */
2938 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2941 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2945 ieoffset = offsetof(struct ieee80211_mgmt,
2946 u.probe_req.variable);
2947 wl1271_ssid_set(wlvif, skb, ieoffset);
2953 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2954 struct ieee80211_bss_conf *bss_conf,
2960 wlvif->aid = bss_conf->aid;
2961 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2962 wlvif->beacon_int = bss_conf->beacon_int;
2963 wlvif->wmm_enabled = bss_conf->qos;
2965 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2968 * with wl1271, we don't need to update the
2969 * beacon_int and dtim_period, because the firmware
2970 * updates it by itself when the first beacon is
2971 * received after a join.
2973 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2978 * Get a template for hardware connection maintenance
2980 dev_kfree_skb(wlvif->probereq);
2981 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2984 ieoffset = offsetof(struct ieee80211_mgmt,
2985 u.probe_req.variable);
2986 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2988 /* enable the connection monitoring feature */
2989 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2994 * The join command disable the keep-alive mode, shut down its process,
2995 * and also clear the template config, so we need to reset it all after
2996 * the join. The acx_aid starts the keep-alive process, and the order
2997 * of the commands below is relevant.
2999 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
3003 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
3007 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
3011 ret = wl1271_acx_keep_alive_config(wl, wlvif,
3012 wlvif->sta.klv_template_id,
3013 ACX_KEEP_ALIVE_TPL_VALID);
3018 * The default fw psm configuration is AUTO, while mac80211 default
3019 * setting is off (ACTIVE), so sync the fw with the correct value.
3021 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3027 wl1271_tx_enabled_rates_get(wl,
3030 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3038 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3041 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3043 /* make sure we are connected (sta) joined */
3045 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3048 /* make sure we are joined (ibss) */
3050 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3054 /* use defaults when not associated */
3057 /* free probe-request template */
3058 dev_kfree_skb(wlvif->probereq);
3059 wlvif->probereq = NULL;
3061 /* disable connection monitor features */
3062 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3066 /* Disable the keep-alive feature */
3067 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3071 /* disable beacon filtering */
3072 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3077 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3078 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3080 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3081 ieee80211_chswitch_done(vif, false);
3082 cancel_delayed_work(&wlvif->channel_switch_work);
3085 /* invalidate keep-alive template */
3086 wl1271_acx_keep_alive_config(wl, wlvif,
3087 wlvif->sta.klv_template_id,
3088 ACX_KEEP_ALIVE_TPL_INVALID);
3093 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3095 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3096 wlvif->rate_set = wlvif->basic_rate_set;
3099 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3102 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3104 if (idle == cur_idle)
3108 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3110 /* The current firmware only supports sched_scan in idle */
3111 if (wl->sched_vif == wlvif)
3112 wl->ops->sched_scan_stop(wl, wlvif);
3114 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3118 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3119 struct ieee80211_conf *conf, u32 changed)
3123 if (wlcore_is_p2p_mgmt(wlvif))
3126 if (conf->power_level != wlvif->power_level) {
3127 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3131 wlvif->power_level = conf->power_level;
3137 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3139 struct wl1271 *wl = hw->priv;
3140 struct wl12xx_vif *wlvif;
3141 struct ieee80211_conf *conf = &hw->conf;
3144 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3146 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3148 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3151 mutex_lock(&wl->mutex);
3153 if (changed & IEEE80211_CONF_CHANGE_POWER)
3154 wl->power_level = conf->power_level;
3156 if (unlikely(wl->state != WLCORE_STATE_ON))
3159 ret = wl1271_ps_elp_wakeup(wl);
3163 /* configure each interface */
3164 wl12xx_for_each_wlvif(wl, wlvif) {
3165 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3171 wl1271_ps_elp_sleep(wl);
3174 mutex_unlock(&wl->mutex);
3179 struct wl1271_filter_params {
3182 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3185 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3186 struct netdev_hw_addr_list *mc_list)
3188 struct wl1271_filter_params *fp;
3189 struct netdev_hw_addr *ha;
3191 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3193 wl1271_error("Out of memory setting filters.");
3197 /* update multicast filtering parameters */
3198 fp->mc_list_length = 0;
3199 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3200 fp->enabled = false;
3203 netdev_hw_addr_list_for_each(ha, mc_list) {
3204 memcpy(fp->mc_list[fp->mc_list_length],
3205 ha->addr, ETH_ALEN);
3206 fp->mc_list_length++;
3210 return (u64)(unsigned long)fp;
3213 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3215 FIF_BCN_PRBRESP_PROMISC | \
3219 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3220 unsigned int changed,
3221 unsigned int *total, u64 multicast)
3223 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3224 struct wl1271 *wl = hw->priv;
3225 struct wl12xx_vif *wlvif;
3229 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3230 " total %x", changed, *total);
3232 mutex_lock(&wl->mutex);
3234 *total &= WL1271_SUPPORTED_FILTERS;
3235 changed &= WL1271_SUPPORTED_FILTERS;
3237 if (unlikely(wl->state != WLCORE_STATE_ON))
3240 ret = wl1271_ps_elp_wakeup(wl);
3244 wl12xx_for_each_wlvif(wl, wlvif) {
3245 if (wlcore_is_p2p_mgmt(wlvif))
3248 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3249 if (*total & FIF_ALLMULTI)
3250 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3254 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3257 fp->mc_list_length);
3264 * the fw doesn't provide an api to configure the filters. instead,
3265 * the filters configuration is based on the active roles / ROC
3270 wl1271_ps_elp_sleep(wl);
3273 mutex_unlock(&wl->mutex);
3277 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3278 u8 id, u8 key_type, u8 key_size,
3279 const u8 *key, u8 hlid, u32 tx_seq_32,
3282 struct wl1271_ap_key *ap_key;
3285 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3287 if (key_size > MAX_KEY_SIZE)
3291 * Find next free entry in ap_keys. Also check we are not replacing
3294 for (i = 0; i < MAX_NUM_KEYS; i++) {
3295 if (wlvif->ap.recorded_keys[i] == NULL)
3298 if (wlvif->ap.recorded_keys[i]->id == id) {
3299 wl1271_warning("trying to record key replacement");
3304 if (i == MAX_NUM_KEYS)
3307 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3312 ap_key->key_type = key_type;
3313 ap_key->key_size = key_size;
3314 memcpy(ap_key->key, key, key_size);
3315 ap_key->hlid = hlid;
3316 ap_key->tx_seq_32 = tx_seq_32;
3317 ap_key->tx_seq_16 = tx_seq_16;
3319 wlvif->ap.recorded_keys[i] = ap_key;
3323 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3327 for (i = 0; i < MAX_NUM_KEYS; i++) {
3328 kfree(wlvif->ap.recorded_keys[i]);
3329 wlvif->ap.recorded_keys[i] = NULL;
3333 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3336 struct wl1271_ap_key *key;
3337 bool wep_key_added = false;
3339 for (i = 0; i < MAX_NUM_KEYS; i++) {
3341 if (wlvif->ap.recorded_keys[i] == NULL)
3344 key = wlvif->ap.recorded_keys[i];
3346 if (hlid == WL12XX_INVALID_LINK_ID)
3347 hlid = wlvif->ap.bcast_hlid;
3349 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3350 key->id, key->key_type,
3351 key->key_size, key->key,
3352 hlid, key->tx_seq_32,
3357 if (key->key_type == KEY_WEP)
3358 wep_key_added = true;
3361 if (wep_key_added) {
3362 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3363 wlvif->ap.bcast_hlid);
3369 wl1271_free_ap_keys(wl, wlvif);
3373 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3374 u16 action, u8 id, u8 key_type,
3375 u8 key_size, const u8 *key, u32 tx_seq_32,
3376 u16 tx_seq_16, struct ieee80211_sta *sta)
3379 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3382 struct wl1271_station *wl_sta;
3386 wl_sta = (struct wl1271_station *)sta->drv_priv;
3387 hlid = wl_sta->hlid;
3389 hlid = wlvif->ap.bcast_hlid;
3392 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3394 * We do not support removing keys after AP shutdown.
3395 * Pretend we do to make mac80211 happy.
3397 if (action != KEY_ADD_OR_REPLACE)
3400 ret = wl1271_record_ap_key(wl, wlvif, id,
3402 key, hlid, tx_seq_32,
3405 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3406 id, key_type, key_size,
3407 key, hlid, tx_seq_32,
3415 static const u8 bcast_addr[ETH_ALEN] = {
3416 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3419 addr = sta ? sta->addr : bcast_addr;
3421 if (is_zero_ether_addr(addr)) {
3422 /* We dont support TX only encryption */
3426 /* The wl1271 does not allow to remove unicast keys - they
3427 will be cleared automatically on next CMD_JOIN. Ignore the
3428 request silently, as we dont want the mac80211 to emit
3429 an error message. */
3430 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3433 /* don't remove key if hlid was already deleted */
3434 if (action == KEY_REMOVE &&
3435 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3438 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3439 id, key_type, key_size,
3440 key, addr, tx_seq_32,
3450 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3451 struct ieee80211_vif *vif,
3452 struct ieee80211_sta *sta,
3453 struct ieee80211_key_conf *key_conf)
3455 struct wl1271 *wl = hw->priv;
3457 bool might_change_spare =
3458 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3459 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3461 if (might_change_spare) {
3463 * stop the queues and flush to ensure the next packets are
3464 * in sync with FW spare block accounting
3466 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3467 wl1271_tx_flush(wl);
3470 mutex_lock(&wl->mutex);
3472 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3474 goto out_wake_queues;
3477 ret = wl1271_ps_elp_wakeup(wl);
3479 goto out_wake_queues;
3481 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3483 wl1271_ps_elp_sleep(wl);
3486 if (might_change_spare)
3487 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3489 mutex_unlock(&wl->mutex);
3494 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3495 struct ieee80211_vif *vif,
3496 struct ieee80211_sta *sta,
3497 struct ieee80211_key_conf *key_conf)
3499 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3506 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3508 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3509 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3510 key_conf->cipher, key_conf->keyidx,
3511 key_conf->keylen, key_conf->flags);
3512 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3514 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3516 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3517 hlid = wl_sta->hlid;
3519 hlid = wlvif->ap.bcast_hlid;
3522 hlid = wlvif->sta.hlid;
3524 if (hlid != WL12XX_INVALID_LINK_ID) {
3525 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3526 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3527 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3530 switch (key_conf->cipher) {
3531 case WLAN_CIPHER_SUITE_WEP40:
3532 case WLAN_CIPHER_SUITE_WEP104:
3535 key_conf->hw_key_idx = key_conf->keyidx;
3537 case WLAN_CIPHER_SUITE_TKIP:
3538 key_type = KEY_TKIP;
3539 key_conf->hw_key_idx = key_conf->keyidx;
3541 case WLAN_CIPHER_SUITE_CCMP:
3543 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3545 case WL1271_CIPHER_SUITE_GEM:
3549 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3556 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3557 key_conf->keyidx, key_type,
3558 key_conf->keylen, key_conf->key,
3559 tx_seq_32, tx_seq_16, sta);
3561 wl1271_error("Could not add or replace key");
3566 * reconfiguring arp response if the unicast (or common)
3567 * encryption key type was changed
3569 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3570 (sta || key_type == KEY_WEP) &&
3571 wlvif->encryption_type != key_type) {
3572 wlvif->encryption_type = key_type;
3573 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3575 wl1271_warning("build arp rsp failed: %d", ret);
3582 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3583 key_conf->keyidx, key_type,
3584 key_conf->keylen, key_conf->key,
3587 wl1271_error("Could not remove key");
3593 wl1271_error("Unsupported key cmd 0x%x", cmd);
3599 EXPORT_SYMBOL_GPL(wlcore_set_key);
3601 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3602 struct ieee80211_vif *vif,
3605 struct wl1271 *wl = hw->priv;
3606 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3609 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3612 /* we don't handle unsetting of default key */
3616 mutex_lock(&wl->mutex);
3618 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3623 ret = wl1271_ps_elp_wakeup(wl);
3627 wlvif->default_key = key_idx;
3629 /* the default WEP key needs to be configured at least once */
3630 if (wlvif->encryption_type == KEY_WEP) {
3631 ret = wl12xx_cmd_set_default_wep_key(wl,
3639 wl1271_ps_elp_sleep(wl);
3642 mutex_unlock(&wl->mutex);
3645 void wlcore_regdomain_config(struct wl1271 *wl)
3649 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3652 mutex_lock(&wl->mutex);
3654 if (unlikely(wl->state != WLCORE_STATE_ON))
3657 ret = wl1271_ps_elp_wakeup(wl);
3661 ret = wlcore_cmd_regdomain_config_locked(wl);
3663 wl12xx_queue_recovery_work(wl);
3667 wl1271_ps_elp_sleep(wl);
3669 mutex_unlock(&wl->mutex);
3672 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3673 struct ieee80211_vif *vif,
3674 struct ieee80211_scan_request *hw_req)
3676 struct cfg80211_scan_request *req = &hw_req->req;
3677 struct wl1271 *wl = hw->priv;
3682 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3685 ssid = req->ssids[0].ssid;
3686 len = req->ssids[0].ssid_len;
3689 mutex_lock(&wl->mutex);
3691 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3693 * We cannot return -EBUSY here because cfg80211 will expect
3694 * a call to ieee80211_scan_completed if we do - in this case
3695 * there won't be any call.
3701 ret = wl1271_ps_elp_wakeup(wl);
3705 /* fail if there is any role in ROC */
3706 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3707 /* don't allow scanning right now */
3712 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3714 wl1271_ps_elp_sleep(wl);
3716 mutex_unlock(&wl->mutex);
3721 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3722 struct ieee80211_vif *vif)
3724 struct wl1271 *wl = hw->priv;
3725 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3728 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3730 mutex_lock(&wl->mutex);
3732 if (unlikely(wl->state != WLCORE_STATE_ON))
3735 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3738 ret = wl1271_ps_elp_wakeup(wl);
3742 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3743 ret = wl->ops->scan_stop(wl, wlvif);
3749 * Rearm the tx watchdog just before idling scan. This
3750 * prevents just-finished scans from triggering the watchdog
3752 wl12xx_rearm_tx_watchdog_locked(wl);
3754 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3755 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3756 wl->scan_wlvif = NULL;
3757 wl->scan.req = NULL;
3758 ieee80211_scan_completed(wl->hw, true);
3761 wl1271_ps_elp_sleep(wl);
3763 mutex_unlock(&wl->mutex);
3765 cancel_delayed_work_sync(&wl->scan_complete_work);
3768 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3769 struct ieee80211_vif *vif,
3770 struct cfg80211_sched_scan_request *req,
3771 struct ieee80211_scan_ies *ies)
3773 struct wl1271 *wl = hw->priv;
3774 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3777 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3779 mutex_lock(&wl->mutex);
3781 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3786 ret = wl1271_ps_elp_wakeup(wl);
3790 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3794 wl->sched_vif = wlvif;
3797 wl1271_ps_elp_sleep(wl);
3799 mutex_unlock(&wl->mutex);
3803 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3804 struct ieee80211_vif *vif)
3806 struct wl1271 *wl = hw->priv;
3807 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3810 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3812 mutex_lock(&wl->mutex);
3814 if (unlikely(wl->state != WLCORE_STATE_ON))
3817 ret = wl1271_ps_elp_wakeup(wl);
3821 wl->ops->sched_scan_stop(wl, wlvif);
3823 wl1271_ps_elp_sleep(wl);
3825 mutex_unlock(&wl->mutex);
3830 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3832 struct wl1271 *wl = hw->priv;
3835 mutex_lock(&wl->mutex);
3837 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3842 ret = wl1271_ps_elp_wakeup(wl);
3846 ret = wl1271_acx_frag_threshold(wl, value);
3848 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3850 wl1271_ps_elp_sleep(wl);
3853 mutex_unlock(&wl->mutex);
3858 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3860 struct wl1271 *wl = hw->priv;
3861 struct wl12xx_vif *wlvif;
3864 mutex_lock(&wl->mutex);
3866 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3871 ret = wl1271_ps_elp_wakeup(wl);
3875 wl12xx_for_each_wlvif(wl, wlvif) {
3876 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3878 wl1271_warning("set rts threshold failed: %d", ret);
3880 wl1271_ps_elp_sleep(wl);
3883 mutex_unlock(&wl->mutex);
3888 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3891 const u8 *next, *end = skb->data + skb->len;
3892 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3893 skb->len - ieoffset);
3898 memmove(ie, next, end - next);
3899 skb_trim(skb, skb->len - len);
3902 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3903 unsigned int oui, u8 oui_type,
3907 const u8 *next, *end = skb->data + skb->len;
3908 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3909 skb->data + ieoffset,
3910 skb->len - ieoffset);
3915 memmove(ie, next, end - next);
3916 skb_trim(skb, skb->len - len);
3919 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3920 struct ieee80211_vif *vif)
3922 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3923 struct sk_buff *skb;
3926 skb = ieee80211_proberesp_get(wl->hw, vif);
3930 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3931 CMD_TEMPL_AP_PROBE_RESPONSE,
3940 wl1271_debug(DEBUG_AP, "probe response updated");
3941 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3947 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3948 struct ieee80211_vif *vif,
3950 size_t probe_rsp_len,
3953 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3954 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3955 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3956 int ssid_ie_offset, ie_offset, templ_len;
3959 /* no need to change probe response if the SSID is set correctly */
3960 if (wlvif->ssid_len > 0)
3961 return wl1271_cmd_template_set(wl, wlvif->role_id,
3962 CMD_TEMPL_AP_PROBE_RESPONSE,
3967 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3968 wl1271_error("probe_rsp template too big");
3972 /* start searching from IE offset */
3973 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3975 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3976 probe_rsp_len - ie_offset);
3978 wl1271_error("No SSID in beacon!");
3982 ssid_ie_offset = ptr - probe_rsp_data;
3983 ptr += (ptr[1] + 2);
3985 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3987 /* insert SSID from bss_conf */
3988 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3989 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3990 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3991 bss_conf->ssid, bss_conf->ssid_len);
3992 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3994 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3995 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3996 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3998 return wl1271_cmd_template_set(wl, wlvif->role_id,
3999 CMD_TEMPL_AP_PROBE_RESPONSE,
4005 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4006 struct ieee80211_vif *vif,
4007 struct ieee80211_bss_conf *bss_conf,
4010 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4013 if (changed & BSS_CHANGED_ERP_SLOT) {
4014 if (bss_conf->use_short_slot)
4015 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4017 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4019 wl1271_warning("Set slot time failed %d", ret);
4024 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4025 if (bss_conf->use_short_preamble)
4026 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4028 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4031 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4032 if (bss_conf->use_cts_prot)
4033 ret = wl1271_acx_cts_protect(wl, wlvif,
4036 ret = wl1271_acx_cts_protect(wl, wlvif,
4037 CTSPROTECT_DISABLE);
4039 wl1271_warning("Set ctsprotect failed %d", ret);
4048 static int wlcore_set_beacon_template(struct wl1271 *wl,
4049 struct ieee80211_vif *vif,
4052 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4053 struct ieee80211_hdr *hdr;
4056 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4057 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4065 wl1271_debug(DEBUG_MASTER, "beacon updated");
4067 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4069 dev_kfree_skb(beacon);
4072 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4073 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4075 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4080 dev_kfree_skb(beacon);
4084 wlvif->wmm_enabled =
4085 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4086 WLAN_OUI_TYPE_MICROSOFT_WMM,
4087 beacon->data + ieoffset,
4088 beacon->len - ieoffset);
4091 * In case we already have a probe-resp beacon set explicitly
4092 * by usermode, don't use the beacon data.
4094 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4097 /* remove TIM ie from probe response */
4098 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4101 * remove p2p ie from probe response.
4102 * the fw reponds to probe requests that don't include
4103 * the p2p ie. probe requests with p2p ie will be passed,
4104 * and will be responded by the supplicant (the spec
4105 * forbids including the p2p ie when responding to probe
4106 * requests that didn't include it).
4108 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4109 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4111 hdr = (struct ieee80211_hdr *) beacon->data;
4112 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4113 IEEE80211_STYPE_PROBE_RESP);
4115 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4120 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4121 CMD_TEMPL_PROBE_RESPONSE,
4126 dev_kfree_skb(beacon);
4134 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4135 struct ieee80211_vif *vif,
4136 struct ieee80211_bss_conf *bss_conf,
4139 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4140 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4143 if (changed & BSS_CHANGED_BEACON_INT) {
4144 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4145 bss_conf->beacon_int);
4147 wlvif->beacon_int = bss_conf->beacon_int;
4150 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4151 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4153 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4156 if (changed & BSS_CHANGED_BEACON) {
4157 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4161 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4163 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4170 wl1271_error("beacon info change failed: %d", ret);
4174 /* AP mode changes */
4175 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4176 struct ieee80211_vif *vif,
4177 struct ieee80211_bss_conf *bss_conf,
4180 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4183 if (changed & BSS_CHANGED_BASIC_RATES) {
4184 u32 rates = bss_conf->basic_rates;
4186 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4188 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4189 wlvif->basic_rate_set);
4191 ret = wl1271_init_ap_rates(wl, wlvif);
4193 wl1271_error("AP rate policy change failed %d", ret);
4197 ret = wl1271_ap_init_templates(wl, vif);
4201 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4205 ret = wlcore_set_beacon_template(wl, vif, true);
4210 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4214 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4215 if (bss_conf->enable_beacon) {
4216 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4217 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4221 ret = wl1271_ap_init_hwenc(wl, wlvif);
4225 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4226 wl1271_debug(DEBUG_AP, "started AP");
4229 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4231 * AP might be in ROC in case we have just
4232 * sent auth reply. handle it.
4234 if (test_bit(wlvif->role_id, wl->roc_map))
4235 wl12xx_croc(wl, wlvif->role_id);
4237 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4241 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4242 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4244 wl1271_debug(DEBUG_AP, "stopped AP");
4249 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4253 /* Handle HT information change */
4254 if ((changed & BSS_CHANGED_HT) &&
4255 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4256 ret = wl1271_acx_set_ht_information(wl, wlvif,
4257 bss_conf->ht_operation_mode);
4259 wl1271_warning("Set ht information failed %d", ret);
4268 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4269 struct ieee80211_bss_conf *bss_conf,
4275 wl1271_debug(DEBUG_MAC80211,
4276 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4277 bss_conf->bssid, bss_conf->aid,
4278 bss_conf->beacon_int,
4279 bss_conf->basic_rates, sta_rate_set);
4281 wlvif->beacon_int = bss_conf->beacon_int;
4282 rates = bss_conf->basic_rates;
4283 wlvif->basic_rate_set =
4284 wl1271_tx_enabled_rates_get(wl, rates,
4287 wl1271_tx_min_rate_get(wl,
4288 wlvif->basic_rate_set);
4292 wl1271_tx_enabled_rates_get(wl,
4296 /* we only support sched_scan while not connected */
4297 if (wl->sched_vif == wlvif)
4298 wl->ops->sched_scan_stop(wl, wlvif);
4300 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4304 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4308 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4312 wlcore_set_ssid(wl, wlvif);
4314 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4319 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4323 /* revert back to minimum rates for the current band */
4324 wl1271_set_band_rate(wl, wlvif);
4325 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4327 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4331 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4332 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4333 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4338 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4341 /* STA/IBSS mode changes */
4342 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4343 struct ieee80211_vif *vif,
4344 struct ieee80211_bss_conf *bss_conf,
4347 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4348 bool do_join = false;
4349 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4350 bool ibss_joined = false;
4351 u32 sta_rate_set = 0;
4353 struct ieee80211_sta *sta;
4354 bool sta_exists = false;
4355 struct ieee80211_sta_ht_cap sta_ht_cap;
4358 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4364 if (changed & BSS_CHANGED_IBSS) {
4365 if (bss_conf->ibss_joined) {
4366 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4369 wlcore_unset_assoc(wl, wlvif);
4370 wl12xx_cmd_role_stop_sta(wl, wlvif);
4374 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4377 /* Need to update the SSID (for filtering etc) */
4378 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4381 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4382 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4383 bss_conf->enable_beacon ? "enabled" : "disabled");
4388 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4389 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4391 if (changed & BSS_CHANGED_CQM) {
4392 bool enable = false;
4393 if (bss_conf->cqm_rssi_thold)
4395 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4396 bss_conf->cqm_rssi_thold,
4397 bss_conf->cqm_rssi_hyst);
4400 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4403 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4404 BSS_CHANGED_ASSOC)) {
4406 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4408 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4410 /* save the supp_rates of the ap */
4411 sta_rate_set = sta->supp_rates[wlvif->band];
4412 if (sta->ht_cap.ht_supported)
4414 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4415 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4416 sta_ht_cap = sta->ht_cap;
4423 if (changed & BSS_CHANGED_BSSID) {
4424 if (!is_zero_ether_addr(bss_conf->bssid)) {
4425 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4430 /* Need to update the BSSID (for filtering etc) */
4433 ret = wlcore_clear_bssid(wl, wlvif);
4439 if (changed & BSS_CHANGED_IBSS) {
4440 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4441 bss_conf->ibss_joined);
4443 if (bss_conf->ibss_joined) {
4444 u32 rates = bss_conf->basic_rates;
4445 wlvif->basic_rate_set =
4446 wl1271_tx_enabled_rates_get(wl, rates,
4449 wl1271_tx_min_rate_get(wl,
4450 wlvif->basic_rate_set);
4452 /* by default, use 11b + OFDM rates */
4453 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4454 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4460 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4461 /* enable beacon filtering */
4462 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4467 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4472 ret = wlcore_join(wl, wlvif);
4474 wl1271_warning("cmd join failed %d", ret);
4479 if (changed & BSS_CHANGED_ASSOC) {
4480 if (bss_conf->assoc) {
4481 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4486 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4487 wl12xx_set_authorized(wl, wlvif);
4489 wlcore_unset_assoc(wl, wlvif);
4493 if (changed & BSS_CHANGED_PS) {
4494 if ((bss_conf->ps) &&
4495 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4496 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4500 if (wl->conf.conn.forced_ps) {
4501 ps_mode = STATION_POWER_SAVE_MODE;
4502 ps_mode_str = "forced";
4504 ps_mode = STATION_AUTO_PS_MODE;
4505 ps_mode_str = "auto";
4508 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4510 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4512 wl1271_warning("enter %s ps failed %d",
4514 } else if (!bss_conf->ps &&
4515 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4516 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4518 ret = wl1271_ps_set_mode(wl, wlvif,
4519 STATION_ACTIVE_MODE);
4521 wl1271_warning("exit auto ps failed %d", ret);
4525 /* Handle new association with HT. Do this after join. */
4528 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4530 ret = wlcore_hw_set_peer_cap(wl,
4536 wl1271_warning("Set ht cap failed %d", ret);
4542 ret = wl1271_acx_set_ht_information(wl, wlvif,
4543 bss_conf->ht_operation_mode);
4545 wl1271_warning("Set ht information failed %d",
4552 /* Handle arp filtering. Done after join. */
4553 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4554 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4555 __be32 addr = bss_conf->arp_addr_list[0];
4556 wlvif->sta.qos = bss_conf->qos;
4557 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4559 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4560 wlvif->ip_addr = addr;
4562 * The template should have been configured only upon
4563 * association. however, it seems that the correct ip
4564 * isn't being set (when sending), so we have to
4565 * reconfigure the template upon every ip change.
4567 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4569 wl1271_warning("build arp rsp failed: %d", ret);
4573 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4574 (ACX_ARP_FILTER_ARP_FILTERING |
4575 ACX_ARP_FILTER_AUTO_ARP),
4579 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4590 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4591 struct ieee80211_vif *vif,
4592 struct ieee80211_bss_conf *bss_conf,
4595 struct wl1271 *wl = hw->priv;
4596 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4597 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4600 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4601 wlvif->role_id, (int)changed);
4604 * make sure to cancel pending disconnections if our association
4607 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4608 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4610 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4611 !bss_conf->enable_beacon)
4612 wl1271_tx_flush(wl);
4614 mutex_lock(&wl->mutex);
4616 if (unlikely(wl->state != WLCORE_STATE_ON))
4619 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4622 ret = wl1271_ps_elp_wakeup(wl);
4626 if ((changed & BSS_CHANGED_TXPOWER) &&
4627 bss_conf->txpower != wlvif->power_level) {
4629 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4633 wlvif->power_level = bss_conf->txpower;
4637 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4639 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4641 wl1271_ps_elp_sleep(wl);
4644 mutex_unlock(&wl->mutex);
4647 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4648 struct ieee80211_chanctx_conf *ctx)
4650 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4651 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4652 cfg80211_get_chandef_type(&ctx->def));
4656 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4657 struct ieee80211_chanctx_conf *ctx)
4659 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4660 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4661 cfg80211_get_chandef_type(&ctx->def));
4664 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4665 struct ieee80211_chanctx_conf *ctx,
4668 struct wl1271 *wl = hw->priv;
4669 struct wl12xx_vif *wlvif;
4671 int channel = ieee80211_frequency_to_channel(
4672 ctx->def.chan->center_freq);
4674 wl1271_debug(DEBUG_MAC80211,
4675 "mac80211 change chanctx %d (type %d) changed 0x%x",
4676 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4678 mutex_lock(&wl->mutex);
4680 ret = wl1271_ps_elp_wakeup(wl);
4684 wl12xx_for_each_wlvif(wl, wlvif) {
4685 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4688 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4694 /* start radar if needed */
4695 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4696 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4697 ctx->radar_enabled && !wlvif->radar_enabled &&
4698 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4699 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4700 wlcore_hw_set_cac(wl, wlvif, true);
4701 wlvif->radar_enabled = true;
4705 wl1271_ps_elp_sleep(wl);
4707 mutex_unlock(&wl->mutex);
4710 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4711 struct ieee80211_vif *vif,
4712 struct ieee80211_chanctx_conf *ctx)
4714 struct wl1271 *wl = hw->priv;
4715 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4716 int channel = ieee80211_frequency_to_channel(
4717 ctx->def.chan->center_freq);
4720 wl1271_debug(DEBUG_MAC80211,
4721 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4722 wlvif->role_id, channel,
4723 cfg80211_get_chandef_type(&ctx->def),
4724 ctx->radar_enabled, ctx->def.chan->dfs_state);
4726 mutex_lock(&wl->mutex);
4728 if (unlikely(wl->state != WLCORE_STATE_ON))
4731 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4734 ret = wl1271_ps_elp_wakeup(wl);
4738 wlvif->band = ctx->def.chan->band;
4739 wlvif->channel = channel;
4740 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4742 /* update default rates according to the band */
4743 wl1271_set_band_rate(wl, wlvif);
4745 if (ctx->radar_enabled &&
4746 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4747 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4748 wlcore_hw_set_cac(wl, wlvif, true);
4749 wlvif->radar_enabled = true;
4752 wl1271_ps_elp_sleep(wl);
4754 mutex_unlock(&wl->mutex);
4759 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4760 struct ieee80211_vif *vif,
4761 struct ieee80211_chanctx_conf *ctx)
4763 struct wl1271 *wl = hw->priv;
4764 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4767 wl1271_debug(DEBUG_MAC80211,
4768 "mac80211 unassign chanctx (role %d) %d (type %d)",
4770 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4771 cfg80211_get_chandef_type(&ctx->def));
4773 wl1271_tx_flush(wl);
4775 mutex_lock(&wl->mutex);
4777 if (unlikely(wl->state != WLCORE_STATE_ON))
4780 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4783 ret = wl1271_ps_elp_wakeup(wl);
4787 if (wlvif->radar_enabled) {
4788 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4789 wlcore_hw_set_cac(wl, wlvif, false);
4790 wlvif->radar_enabled = false;
4793 wl1271_ps_elp_sleep(wl);
4795 mutex_unlock(&wl->mutex);
4798 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4799 struct wl12xx_vif *wlvif,
4800 struct ieee80211_chanctx_conf *new_ctx)
4802 int channel = ieee80211_frequency_to_channel(
4803 new_ctx->def.chan->center_freq);
4805 wl1271_debug(DEBUG_MAC80211,
4806 "switch vif (role %d) %d -> %d chan_type: %d",
4807 wlvif->role_id, wlvif->channel, channel,
4808 cfg80211_get_chandef_type(&new_ctx->def));
4810 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4813 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4815 if (wlvif->radar_enabled) {
4816 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4817 wlcore_hw_set_cac(wl, wlvif, false);
4818 wlvif->radar_enabled = false;
4821 wlvif->band = new_ctx->def.chan->band;
4822 wlvif->channel = channel;
4823 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4825 /* start radar if needed */
4826 if (new_ctx->radar_enabled) {
4827 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4828 wlcore_hw_set_cac(wl, wlvif, true);
4829 wlvif->radar_enabled = true;
4836 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4837 struct ieee80211_vif_chanctx_switch *vifs,
4839 enum ieee80211_chanctx_switch_mode mode)
4841 struct wl1271 *wl = hw->priv;
4844 wl1271_debug(DEBUG_MAC80211,
4845 "mac80211 switch chanctx n_vifs %d mode %d",
4848 mutex_lock(&wl->mutex);
4850 ret = wl1271_ps_elp_wakeup(wl);
4854 for (i = 0; i < n_vifs; i++) {
4855 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4857 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4862 wl1271_ps_elp_sleep(wl);
4864 mutex_unlock(&wl->mutex);
4869 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4870 struct ieee80211_vif *vif, u16 queue,
4871 const struct ieee80211_tx_queue_params *params)
4873 struct wl1271 *wl = hw->priv;
4874 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4878 if (wlcore_is_p2p_mgmt(wlvif))
4881 mutex_lock(&wl->mutex);
4883 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4886 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4888 ps_scheme = CONF_PS_SCHEME_LEGACY;
4890 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4893 ret = wl1271_ps_elp_wakeup(wl);
4898 * the txop is confed in units of 32us by the mac80211,
4901 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4902 params->cw_min, params->cw_max,
4903 params->aifs, params->txop << 5);
4907 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4908 CONF_CHANNEL_TYPE_EDCF,
4909 wl1271_tx_get_queue(queue),
4910 ps_scheme, CONF_ACK_POLICY_LEGACY,
4914 wl1271_ps_elp_sleep(wl);
4917 mutex_unlock(&wl->mutex);
4922 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4923 struct ieee80211_vif *vif)
4926 struct wl1271 *wl = hw->priv;
4927 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4928 u64 mactime = ULLONG_MAX;
4931 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4933 mutex_lock(&wl->mutex);
4935 if (unlikely(wl->state != WLCORE_STATE_ON))
4938 ret = wl1271_ps_elp_wakeup(wl);
4942 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4947 wl1271_ps_elp_sleep(wl);
4950 mutex_unlock(&wl->mutex);
4954 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4955 struct survey_info *survey)
4957 struct ieee80211_conf *conf = &hw->conf;
4962 survey->channel = conf->chandef.chan;
4967 static int wl1271_allocate_sta(struct wl1271 *wl,
4968 struct wl12xx_vif *wlvif,
4969 struct ieee80211_sta *sta)
4971 struct wl1271_station *wl_sta;
4975 if (wl->active_sta_count >= wl->max_ap_stations) {
4976 wl1271_warning("could not allocate HLID - too much stations");
4980 wl_sta = (struct wl1271_station *)sta->drv_priv;
4981 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4983 wl1271_warning("could not allocate HLID - too many links");
4987 /* use the previous security seq, if this is a recovery/resume */
4988 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4990 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4991 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4992 wl->active_sta_count++;
4996 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4998 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5001 clear_bit(hlid, wlvif->ap.sta_hlid_map);
5002 __clear_bit(hlid, &wl->ap_ps_map);
5003 __clear_bit(hlid, &wl->ap_fw_ps_map);
5006 * save the last used PN in the private part of iee80211_sta,
5007 * in case of recovery/suspend
5009 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5011 wl12xx_free_link(wl, wlvif, &hlid);
5012 wl->active_sta_count--;
5015 * rearm the tx watchdog when the last STA is freed - give the FW a
5016 * chance to return STA-buffered packets before complaining.
5018 if (wl->active_sta_count == 0)
5019 wl12xx_rearm_tx_watchdog_locked(wl);
5022 static int wl12xx_sta_add(struct wl1271 *wl,
5023 struct wl12xx_vif *wlvif,
5024 struct ieee80211_sta *sta)
5026 struct wl1271_station *wl_sta;
5030 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5032 ret = wl1271_allocate_sta(wl, wlvif, sta);
5036 wl_sta = (struct wl1271_station *)sta->drv_priv;
5037 hlid = wl_sta->hlid;
5039 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5041 wl1271_free_sta(wl, wlvif, hlid);
5046 static int wl12xx_sta_remove(struct wl1271 *wl,
5047 struct wl12xx_vif *wlvif,
5048 struct ieee80211_sta *sta)
5050 struct wl1271_station *wl_sta;
5053 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5055 wl_sta = (struct wl1271_station *)sta->drv_priv;
5057 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5060 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5064 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5068 static void wlcore_roc_if_possible(struct wl1271 *wl,
5069 struct wl12xx_vif *wlvif)
5071 if (find_first_bit(wl->roc_map,
5072 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5075 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5078 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5082 * when wl_sta is NULL, we treat this call as if coming from a
5083 * pending auth reply.
5084 * wl->mutex must be taken and the FW must be awake when the call
5087 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5088 struct wl1271_station *wl_sta, bool in_conn)
5091 if (WARN_ON(wl_sta && wl_sta->in_connection))
5094 if (!wlvif->ap_pending_auth_reply &&
5095 !wlvif->inconn_count)
5096 wlcore_roc_if_possible(wl, wlvif);
5099 wl_sta->in_connection = true;
5100 wlvif->inconn_count++;
5102 wlvif->ap_pending_auth_reply = true;
5105 if (wl_sta && !wl_sta->in_connection)
5108 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5111 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5115 wl_sta->in_connection = false;
5116 wlvif->inconn_count--;
5118 wlvif->ap_pending_auth_reply = false;
5121 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5122 test_bit(wlvif->role_id, wl->roc_map))
5123 wl12xx_croc(wl, wlvif->role_id);
5127 static int wl12xx_update_sta_state(struct wl1271 *wl,
5128 struct wl12xx_vif *wlvif,
5129 struct ieee80211_sta *sta,
5130 enum ieee80211_sta_state old_state,
5131 enum ieee80211_sta_state new_state)
5133 struct wl1271_station *wl_sta;
5134 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5135 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5138 wl_sta = (struct wl1271_station *)sta->drv_priv;
5140 /* Add station (AP mode) */
5142 old_state == IEEE80211_STA_NOTEXIST &&
5143 new_state == IEEE80211_STA_NONE) {
5144 ret = wl12xx_sta_add(wl, wlvif, sta);
5148 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5151 /* Remove station (AP mode) */
5153 old_state == IEEE80211_STA_NONE &&
5154 new_state == IEEE80211_STA_NOTEXIST) {
5156 wl12xx_sta_remove(wl, wlvif, sta);
5158 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5161 /* Authorize station (AP mode) */
5163 new_state == IEEE80211_STA_AUTHORIZED) {
5164 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5168 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5173 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5176 /* Authorize station */
5178 new_state == IEEE80211_STA_AUTHORIZED) {
5179 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5180 ret = wl12xx_set_authorized(wl, wlvif);
5186 old_state == IEEE80211_STA_AUTHORIZED &&
5187 new_state == IEEE80211_STA_ASSOC) {
5188 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5189 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5192 /* save seq number on disassoc (suspend) */
5194 old_state == IEEE80211_STA_ASSOC &&
5195 new_state == IEEE80211_STA_AUTH) {
5196 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5197 wlvif->total_freed_pkts = 0;
5200 /* restore seq number on assoc (resume) */
5202 old_state == IEEE80211_STA_AUTH &&
5203 new_state == IEEE80211_STA_ASSOC) {
5204 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5207 /* clear ROCs on failure or authorization */
5209 (new_state == IEEE80211_STA_AUTHORIZED ||
5210 new_state == IEEE80211_STA_NOTEXIST)) {
5211 if (test_bit(wlvif->role_id, wl->roc_map))
5212 wl12xx_croc(wl, wlvif->role_id);
5216 old_state == IEEE80211_STA_NOTEXIST &&
5217 new_state == IEEE80211_STA_NONE) {
5218 if (find_first_bit(wl->roc_map,
5219 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5220 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5221 wl12xx_roc(wl, wlvif, wlvif->role_id,
5222 wlvif->band, wlvif->channel);
5228 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5229 struct ieee80211_vif *vif,
5230 struct ieee80211_sta *sta,
5231 enum ieee80211_sta_state old_state,
5232 enum ieee80211_sta_state new_state)
5234 struct wl1271 *wl = hw->priv;
5235 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5238 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5239 sta->aid, old_state, new_state);
5241 mutex_lock(&wl->mutex);
5243 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5248 ret = wl1271_ps_elp_wakeup(wl);
5252 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5254 wl1271_ps_elp_sleep(wl);
5256 mutex_unlock(&wl->mutex);
5257 if (new_state < old_state)
5262 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5263 struct ieee80211_vif *vif,
5264 enum ieee80211_ampdu_mlme_action action,
5265 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5268 struct wl1271 *wl = hw->priv;
5269 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5271 u8 hlid, *ba_bitmap;
5273 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5276 /* sanity check - the fields in FW are only 8bits wide */
5277 if (WARN_ON(tid > 0xFF))
5280 mutex_lock(&wl->mutex);
5282 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5287 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5288 hlid = wlvif->sta.hlid;
5289 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5290 struct wl1271_station *wl_sta;
5292 wl_sta = (struct wl1271_station *)sta->drv_priv;
5293 hlid = wl_sta->hlid;
5299 ba_bitmap = &wl->links[hlid].ba_bitmap;
5301 ret = wl1271_ps_elp_wakeup(wl);
5305 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5309 case IEEE80211_AMPDU_RX_START:
5310 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5315 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5317 wl1271_error("exceeded max RX BA sessions");
5321 if (*ba_bitmap & BIT(tid)) {
5323 wl1271_error("cannot enable RX BA session on active "
5328 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5331 *ba_bitmap |= BIT(tid);
5332 wl->ba_rx_session_count++;
5336 case IEEE80211_AMPDU_RX_STOP:
5337 if (!(*ba_bitmap & BIT(tid))) {
5339 * this happens on reconfig - so only output a debug
5340 * message for now, and don't fail the function.
5342 wl1271_debug(DEBUG_MAC80211,
5343 "no active RX BA session on tid: %d",
5349 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5352 *ba_bitmap &= ~BIT(tid);
5353 wl->ba_rx_session_count--;
5358 * The BA initiator session management in FW independently.
5359 * Falling break here on purpose for all TX APDU commands.
5361 case IEEE80211_AMPDU_TX_START:
5362 case IEEE80211_AMPDU_TX_STOP_CONT:
5363 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5364 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5365 case IEEE80211_AMPDU_TX_OPERATIONAL:
5370 wl1271_error("Incorrect ampdu action id=%x\n", action);
5374 wl1271_ps_elp_sleep(wl);
5377 mutex_unlock(&wl->mutex);
5382 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5383 struct ieee80211_vif *vif,
5384 const struct cfg80211_bitrate_mask *mask)
5386 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5387 struct wl1271 *wl = hw->priv;
5390 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5391 mask->control[NL80211_BAND_2GHZ].legacy,
5392 mask->control[NL80211_BAND_5GHZ].legacy);
5394 mutex_lock(&wl->mutex);
5396 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5397 wlvif->bitrate_masks[i] =
5398 wl1271_tx_enabled_rates_get(wl,
5399 mask->control[i].legacy,
5402 if (unlikely(wl->state != WLCORE_STATE_ON))
5405 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5406 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5408 ret = wl1271_ps_elp_wakeup(wl);
5412 wl1271_set_band_rate(wl, wlvif);
5414 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5415 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5417 wl1271_ps_elp_sleep(wl);
5420 mutex_unlock(&wl->mutex);
5425 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5426 struct ieee80211_vif *vif,
5427 struct ieee80211_channel_switch *ch_switch)
5429 struct wl1271 *wl = hw->priv;
5430 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5433 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5435 wl1271_tx_flush(wl);
5437 mutex_lock(&wl->mutex);
5439 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5440 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5441 ieee80211_chswitch_done(vif, false);
5443 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5447 ret = wl1271_ps_elp_wakeup(wl);
5451 /* TODO: change mac80211 to pass vif as param */
5453 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5454 unsigned long delay_usec;
5456 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5460 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5462 /* indicate failure 5 seconds after channel switch time */
5463 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5465 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5466 usecs_to_jiffies(delay_usec) +
5467 msecs_to_jiffies(5000));
5471 wl1271_ps_elp_sleep(wl);
5474 mutex_unlock(&wl->mutex);
5477 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5478 struct wl12xx_vif *wlvif,
5481 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5482 struct sk_buff *beacon =
5483 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5488 return cfg80211_find_ie(eid,
5489 beacon->data + ieoffset,
5490 beacon->len - ieoffset);
5493 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5497 const struct ieee80211_channel_sw_ie *ie_csa;
5499 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5503 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5504 *csa_count = ie_csa->count;
5509 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5510 struct ieee80211_vif *vif,
5511 struct cfg80211_chan_def *chandef)
5513 struct wl1271 *wl = hw->priv;
5514 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5515 struct ieee80211_channel_switch ch_switch = {
5517 .chandef = *chandef,
5521 wl1271_debug(DEBUG_MAC80211,
5522 "mac80211 channel switch beacon (role %d)",
5525 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5527 wl1271_error("error getting beacon (for CSA counter)");
5531 mutex_lock(&wl->mutex);
5533 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5538 ret = wl1271_ps_elp_wakeup(wl);
5542 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5546 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5549 wl1271_ps_elp_sleep(wl);
5551 mutex_unlock(&wl->mutex);
5554 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5555 u32 queues, bool drop)
5557 struct wl1271 *wl = hw->priv;
5559 wl1271_tx_flush(wl);
5562 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5563 struct ieee80211_vif *vif,
5564 struct ieee80211_channel *chan,
5566 enum ieee80211_roc_type type)
5568 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5569 struct wl1271 *wl = hw->priv;
5570 int channel, ret = 0;
5572 channel = ieee80211_frequency_to_channel(chan->center_freq);
5574 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5575 channel, wlvif->role_id);
5577 mutex_lock(&wl->mutex);
5579 if (unlikely(wl->state != WLCORE_STATE_ON))
5582 /* return EBUSY if we can't ROC right now */
5583 if (WARN_ON(wl->roc_vif ||
5584 find_first_bit(wl->roc_map,
5585 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5590 ret = wl1271_ps_elp_wakeup(wl);
5594 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5599 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5600 msecs_to_jiffies(duration));
5602 wl1271_ps_elp_sleep(wl);
5604 mutex_unlock(&wl->mutex);
5608 static int __wlcore_roc_completed(struct wl1271 *wl)
5610 struct wl12xx_vif *wlvif;
5613 /* already completed */
5614 if (unlikely(!wl->roc_vif))
5617 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5619 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5622 ret = wl12xx_stop_dev(wl, wlvif);
5631 static int wlcore_roc_completed(struct wl1271 *wl)
5635 wl1271_debug(DEBUG_MAC80211, "roc complete");
5637 mutex_lock(&wl->mutex);
5639 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5644 ret = wl1271_ps_elp_wakeup(wl);
5648 ret = __wlcore_roc_completed(wl);
5650 wl1271_ps_elp_sleep(wl);
5652 mutex_unlock(&wl->mutex);
5657 static void wlcore_roc_complete_work(struct work_struct *work)
5659 struct delayed_work *dwork;
5663 dwork = container_of(work, struct delayed_work, work);
5664 wl = container_of(dwork, struct wl1271, roc_complete_work);
5666 ret = wlcore_roc_completed(wl);
5668 ieee80211_remain_on_channel_expired(wl->hw);
5671 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5673 struct wl1271 *wl = hw->priv;
5675 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5678 wl1271_tx_flush(wl);
5681 * we can't just flush_work here, because it might deadlock
5682 * (as we might get called from the same workqueue)
5684 cancel_delayed_work_sync(&wl->roc_complete_work);
5685 wlcore_roc_completed(wl);
5690 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5691 struct ieee80211_vif *vif,
5692 struct ieee80211_sta *sta,
5695 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5697 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5699 if (!(changed & IEEE80211_RC_BW_CHANGED))
5702 /* this callback is atomic, so schedule a new work */
5703 wlvif->rc_update_bw = sta->bandwidth;
5704 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5707 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5708 struct ieee80211_vif *vif,
5709 struct ieee80211_sta *sta,
5710 struct station_info *sinfo)
5712 struct wl1271 *wl = hw->priv;
5713 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5717 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5719 mutex_lock(&wl->mutex);
5721 if (unlikely(wl->state != WLCORE_STATE_ON))
5724 ret = wl1271_ps_elp_wakeup(wl);
5728 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5732 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5733 sinfo->signal = rssi_dbm;
5736 wl1271_ps_elp_sleep(wl);
5739 mutex_unlock(&wl->mutex);
5742 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5744 struct wl1271 *wl = hw->priv;
5747 mutex_lock(&wl->mutex);
5749 if (unlikely(wl->state != WLCORE_STATE_ON))
5752 /* packets are considered pending if in the TX queue or the FW */
5753 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5755 mutex_unlock(&wl->mutex);
5760 /* can't be const, mac80211 writes to this */
5761 static struct ieee80211_rate wl1271_rates[] = {
5763 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5764 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5766 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5767 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5768 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5770 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5771 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5772 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5774 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5775 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5776 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5778 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5779 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5781 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5782 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5784 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5785 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5787 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5788 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5790 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5791 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5793 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5794 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5796 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5797 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5799 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5800 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5803 /* can't be const, mac80211 writes to this */
5804 static struct ieee80211_channel wl1271_channels[] = {
5805 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5806 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5807 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5808 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5809 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5810 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5811 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5812 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5813 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5814 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5815 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5816 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5817 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5818 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5821 /* can't be const, mac80211 writes to this */
5822 static struct ieee80211_supported_band wl1271_band_2ghz = {
5823 .channels = wl1271_channels,
5824 .n_channels = ARRAY_SIZE(wl1271_channels),
5825 .bitrates = wl1271_rates,
5826 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5829 /* 5 GHz data rates for WL1273 */
5830 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5832 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5833 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5835 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5836 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5838 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5839 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5841 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5842 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5844 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5845 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5847 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5848 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5850 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5851 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5853 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5854 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5857 /* 5 GHz band channels for WL1273 */
5858 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5859 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5860 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5861 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5862 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5863 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5864 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5865 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5866 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5867 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5868 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5869 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5870 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5871 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5872 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5873 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5874 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5875 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5876 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5877 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5878 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5879 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5880 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5881 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5882 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5883 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5884 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5885 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5886 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5887 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5888 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5889 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5892 static struct ieee80211_supported_band wl1271_band_5ghz = {
5893 .channels = wl1271_channels_5ghz,
5894 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5895 .bitrates = wl1271_rates_5ghz,
5896 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5899 static const struct ieee80211_ops wl1271_ops = {
5900 .start = wl1271_op_start,
5901 .stop = wlcore_op_stop,
5902 .add_interface = wl1271_op_add_interface,
5903 .remove_interface = wl1271_op_remove_interface,
5904 .change_interface = wl12xx_op_change_interface,
5906 .suspend = wl1271_op_suspend,
5907 .resume = wl1271_op_resume,
5909 .config = wl1271_op_config,
5910 .prepare_multicast = wl1271_op_prepare_multicast,
5911 .configure_filter = wl1271_op_configure_filter,
5913 .set_key = wlcore_op_set_key,
5914 .hw_scan = wl1271_op_hw_scan,
5915 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5916 .sched_scan_start = wl1271_op_sched_scan_start,
5917 .sched_scan_stop = wl1271_op_sched_scan_stop,
5918 .bss_info_changed = wl1271_op_bss_info_changed,
5919 .set_frag_threshold = wl1271_op_set_frag_threshold,
5920 .set_rts_threshold = wl1271_op_set_rts_threshold,
5921 .conf_tx = wl1271_op_conf_tx,
5922 .get_tsf = wl1271_op_get_tsf,
5923 .get_survey = wl1271_op_get_survey,
5924 .sta_state = wl12xx_op_sta_state,
5925 .ampdu_action = wl1271_op_ampdu_action,
5926 .tx_frames_pending = wl1271_tx_frames_pending,
5927 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5928 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5929 .channel_switch = wl12xx_op_channel_switch,
5930 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5931 .flush = wlcore_op_flush,
5932 .remain_on_channel = wlcore_op_remain_on_channel,
5933 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5934 .add_chanctx = wlcore_op_add_chanctx,
5935 .remove_chanctx = wlcore_op_remove_chanctx,
5936 .change_chanctx = wlcore_op_change_chanctx,
5937 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5938 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5939 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5940 .sta_rc_update = wlcore_op_sta_rc_update,
5941 .sta_statistics = wlcore_op_sta_statistics,
5942 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5946 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5952 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5953 wl1271_error("Illegal RX rate from HW: %d", rate);
5957 idx = wl->band_rate_to_idx[band][rate];
5958 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5959 wl1271_error("Unsupported RX rate from HW: %d", rate);
5966 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5970 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5973 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5974 wl1271_warning("NIC part of the MAC address wraps around!");
5976 for (i = 0; i < wl->num_mac_addr; i++) {
5977 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5978 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5979 wl->addresses[i].addr[2] = (u8) oui;
5980 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5981 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5982 wl->addresses[i].addr[5] = (u8) nic;
5986 /* we may be one address short at the most */
5987 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5990 * turn on the LAA bit in the first address and use it as
5993 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5994 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5995 memcpy(&wl->addresses[idx], &wl->addresses[0],
5996 sizeof(wl->addresses[0]));
5998 wl->addresses[idx].addr[0] |= BIT(1);
6001 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6002 wl->hw->wiphy->addresses = wl->addresses;
6005 static int wl12xx_get_hw_info(struct wl1271 *wl)
6009 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6013 wl->fuse_oui_addr = 0;
6014 wl->fuse_nic_addr = 0;
6016 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6020 if (wl->ops->get_mac)
6021 ret = wl->ops->get_mac(wl);
6027 static int wl1271_register_hw(struct wl1271 *wl)
6030 u32 oui_addr = 0, nic_addr = 0;
6032 if (wl->mac80211_registered)
6035 if (wl->nvs_len >= 12) {
6036 /* NOTE: The wl->nvs->nvs element must be first, in
6037 * order to simplify the casting, we assume it is at
6038 * the beginning of the wl->nvs structure.
6040 u8 *nvs_ptr = (u8 *)wl->nvs;
6043 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6045 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6048 /* if the MAC address is zeroed in the NVS derive from fuse */
6049 if (oui_addr == 0 && nic_addr == 0) {
6050 oui_addr = wl->fuse_oui_addr;
6051 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6052 nic_addr = wl->fuse_nic_addr + 1;
6055 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6057 ret = ieee80211_register_hw(wl->hw);
6059 wl1271_error("unable to register mac80211 hw: %d", ret);
6063 wl->mac80211_registered = true;
6065 wl1271_debugfs_init(wl);
6067 wl1271_notice("loaded");
6073 static void wl1271_unregister_hw(struct wl1271 *wl)
6076 wl1271_plt_stop(wl);
6078 ieee80211_unregister_hw(wl->hw);
6079 wl->mac80211_registered = false;
6083 static int wl1271_init_ieee80211(struct wl1271 *wl)
6086 static const u32 cipher_suites[] = {
6087 WLAN_CIPHER_SUITE_WEP40,
6088 WLAN_CIPHER_SUITE_WEP104,
6089 WLAN_CIPHER_SUITE_TKIP,
6090 WLAN_CIPHER_SUITE_CCMP,
6091 WL1271_CIPHER_SUITE_GEM,
6094 /* The tx descriptor buffer */
6095 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6097 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6098 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6101 /* FIXME: find a proper value */
6102 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6104 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6105 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6106 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6107 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6108 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6109 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6110 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6111 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6112 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6113 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6114 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6115 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6116 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6118 wl->hw->wiphy->cipher_suites = cipher_suites;
6119 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6121 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6122 BIT(NL80211_IFTYPE_AP) |
6123 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6124 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6125 BIT(NL80211_IFTYPE_P2P_GO);
6126 wl->hw->wiphy->max_scan_ssids = 1;
6127 wl->hw->wiphy->max_sched_scan_ssids = 16;
6128 wl->hw->wiphy->max_match_sets = 16;
6130 * Maximum length of elements in scanning probe request templates
6131 * should be the maximum length possible for a template, without
6132 * the IEEE80211 header of the template
6134 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6135 sizeof(struct ieee80211_header);
6137 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6138 sizeof(struct ieee80211_header);
6140 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6142 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6143 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6144 WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6145 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6147 /* make sure all our channels fit in the scanned_ch bitmask */
6148 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6149 ARRAY_SIZE(wl1271_channels_5ghz) >
6150 WL1271_MAX_CHANNELS);
6152 * clear channel flags from the previous usage
6153 * and restore max_power & max_antenna_gain values.
6155 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6156 wl1271_band_2ghz.channels[i].flags = 0;
6157 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6158 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6161 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6162 wl1271_band_5ghz.channels[i].flags = 0;
6163 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6164 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6168 * We keep local copies of the band structs because we need to
6169 * modify them on a per-device basis.
6171 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
6172 sizeof(wl1271_band_2ghz));
6173 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
6174 &wl->ht_cap[IEEE80211_BAND_2GHZ],
6175 sizeof(*wl->ht_cap));
6176 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
6177 sizeof(wl1271_band_5ghz));
6178 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
6179 &wl->ht_cap[IEEE80211_BAND_5GHZ],
6180 sizeof(*wl->ht_cap));
6182 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
6183 &wl->bands[IEEE80211_BAND_2GHZ];
6184 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
6185 &wl->bands[IEEE80211_BAND_5GHZ];
6188 * allow 4 queues per mac address we support +
6189 * 1 cab queue per mac + one global offchannel Tx queue
6191 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6193 /* the last queue is the offchannel queue */
6194 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6195 wl->hw->max_rates = 1;
6197 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6199 /* the FW answers probe-requests in AP-mode */
6200 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6201 wl->hw->wiphy->probe_resp_offload =
6202 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6203 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6204 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6206 /* allowed interface combinations */
6207 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6208 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6210 /* register vendor commands */
6211 wlcore_set_vendor_commands(wl->hw->wiphy);
6213 SET_IEEE80211_DEV(wl->hw, wl->dev);
6215 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6216 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6218 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6223 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6226 struct ieee80211_hw *hw;
6231 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6233 wl1271_error("could not alloc ieee80211_hw");
6239 memset(wl, 0, sizeof(*wl));
6241 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6243 wl1271_error("could not alloc wl priv");
6245 goto err_priv_alloc;
6248 INIT_LIST_HEAD(&wl->wlvif_list);
6253 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6254 * we don't allocate any additional resource here, so that's fine.
6256 for (i = 0; i < NUM_TX_QUEUES; i++)
6257 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6258 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6260 skb_queue_head_init(&wl->deferred_rx_queue);
6261 skb_queue_head_init(&wl->deferred_tx_queue);
6263 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6264 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6265 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6266 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6267 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6268 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6269 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6271 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6272 if (!wl->freezable_wq) {
6279 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6280 wl->band = IEEE80211_BAND_2GHZ;
6281 wl->channel_type = NL80211_CHAN_NO_HT;
6283 wl->sg_enabled = true;
6284 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6285 wl->recovery_count = 0;
6288 wl->ap_fw_ps_map = 0;
6290 wl->system_hlid = WL12XX_SYSTEM_HLID;
6291 wl->active_sta_count = 0;
6292 wl->active_link_count = 0;
6294 init_waitqueue_head(&wl->fwlog_waitq);
6296 /* The system link is always allocated */
6297 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6299 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6300 for (i = 0; i < wl->num_tx_desc; i++)
6301 wl->tx_frames[i] = NULL;
6303 spin_lock_init(&wl->wl_lock);
6305 wl->state = WLCORE_STATE_OFF;
6306 wl->fw_type = WL12XX_FW_TYPE_NONE;
6307 mutex_init(&wl->mutex);
6308 mutex_init(&wl->flush_mutex);
6309 init_completion(&wl->nvs_loading_complete);
6311 order = get_order(aggr_buf_size);
6312 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6313 if (!wl->aggr_buf) {
6317 wl->aggr_buf_size = aggr_buf_size;
6319 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6320 if (!wl->dummy_packet) {
6325 /* Allocate one page for the FW log */
6326 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6329 goto err_dummy_packet;
6332 wl->mbox_size = mbox_size;
6333 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6339 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6340 if (!wl->buffer_32) {
6351 free_page((unsigned long)wl->fwlog);
6354 dev_kfree_skb(wl->dummy_packet);
6357 free_pages((unsigned long)wl->aggr_buf, order);
6360 destroy_workqueue(wl->freezable_wq);
6363 wl1271_debugfs_exit(wl);
6367 ieee80211_free_hw(hw);
6371 return ERR_PTR(ret);
6373 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6375 int wlcore_free_hw(struct wl1271 *wl)
6377 /* Unblock any fwlog readers */
6378 mutex_lock(&wl->mutex);
6379 wl->fwlog_size = -1;
6380 wake_up_interruptible_all(&wl->fwlog_waitq);
6381 mutex_unlock(&wl->mutex);
6383 wlcore_sysfs_free(wl);
6385 kfree(wl->buffer_32);
6387 free_page((unsigned long)wl->fwlog);
6388 dev_kfree_skb(wl->dummy_packet);
6389 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6391 wl1271_debugfs_exit(wl);
6395 wl->fw_type = WL12XX_FW_TYPE_NONE;
6399 kfree(wl->raw_fw_status);
6400 kfree(wl->fw_status);
6401 kfree(wl->tx_res_if);
6402 destroy_workqueue(wl->freezable_wq);
6405 ieee80211_free_hw(wl->hw);
6409 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6412 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6413 .flags = WIPHY_WOWLAN_ANY,
6414 .n_patterns = WL1271_MAX_RX_FILTERS,
6415 .pattern_min_len = 1,
6416 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6420 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6422 return IRQ_WAKE_THREAD;
6425 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6427 struct wl1271 *wl = context;
6428 struct platform_device *pdev = wl->pdev;
6429 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6430 struct resource *res;
6433 irq_handler_t hardirq_fn = NULL;
6436 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6438 wl1271_error("Could not allocate nvs data");
6441 wl->nvs_len = fw->size;
6443 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6449 ret = wl->ops->setup(wl);
6453 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6455 /* adjust some runtime configuration parameters */
6456 wlcore_adjust_conf(wl);
6458 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6460 wl1271_error("Could not get IRQ resource");
6464 wl->irq = res->start;
6465 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6466 wl->if_ops = pdev_data->if_ops;
6468 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6469 hardirq_fn = wlcore_hardirq;
6471 wl->irq_flags |= IRQF_ONESHOT;
6473 ret = wl12xx_set_power_on(wl);
6477 ret = wl12xx_get_hw_info(wl);
6479 wl1271_error("couldn't get hw info");
6480 wl1271_power_off(wl);
6484 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6485 wl->irq_flags, pdev->name, wl);
6487 wl1271_error("interrupt configuration failed");
6488 wl1271_power_off(wl);
6493 ret = enable_irq_wake(wl->irq);
6495 wl->irq_wake_enabled = true;
6496 device_init_wakeup(wl->dev, 1);
6497 if (pdev_data->pwr_in_suspend)
6498 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6501 disable_irq(wl->irq);
6502 wl1271_power_off(wl);
6504 ret = wl->ops->identify_chip(wl);
6508 ret = wl1271_init_ieee80211(wl);
6512 ret = wl1271_register_hw(wl);
6516 ret = wlcore_sysfs_init(wl);
6520 wl->initialized = true;
6524 wl1271_unregister_hw(wl);
6527 free_irq(wl->irq, wl);
6533 release_firmware(fw);
6534 complete_all(&wl->nvs_loading_complete);
6537 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6541 if (!wl->ops || !wl->ptable)
6544 wl->dev = &pdev->dev;
6546 platform_set_drvdata(pdev, wl);
6548 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6549 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6552 wl1271_error("request_firmware_nowait failed: %d", ret);
6553 complete_all(&wl->nvs_loading_complete);
6558 EXPORT_SYMBOL_GPL(wlcore_probe);
6560 int wlcore_remove(struct platform_device *pdev)
6562 struct wl1271 *wl = platform_get_drvdata(pdev);
6564 wait_for_completion(&wl->nvs_loading_complete);
6565 if (!wl->initialized)
6568 if (wl->irq_wake_enabled) {
6569 device_init_wakeup(wl->dev, 0);
6570 disable_irq_wake(wl->irq);
6572 wl1271_unregister_hw(wl);
6573 free_irq(wl->irq, wl);
6578 EXPORT_SYMBOL_GPL(wlcore_remove);
6580 u32 wl12xx_debug_level = DEBUG_NONE;
6581 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6582 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6583 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6585 module_param_named(fwlog, fwlog_param, charp, 0);
6586 MODULE_PARM_DESC(fwlog,
6587 "FW logger options: continuous, ondemand, dbgpins or disable");
6589 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6590 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6592 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6593 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6595 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6596 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6598 MODULE_LICENSE("GPL");
6599 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6600 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6601 MODULE_FIRMWARE(WL12XX_NVS_NAME);