3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param;
59 static int bug_on_recovery = -1;
60 static int no_recovery = -1;
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
86 wl1271_info("Association completed.");
90 static void wl1271_reg_notify(struct wiphy *wiphy,
91 struct regulatory_request *request)
93 struct ieee80211_supported_band *band;
94 struct ieee80211_channel *ch;
96 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
97 struct wl1271 *wl = hw->priv;
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
111 if (likely(wl->state == WLCORE_STATE_ON))
112 wlcore_regdomain_config(wl);
115 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
120 /* we should hold wl->mutex */
121 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
126 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
128 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
134 * this function is being called when the rx_streaming interval
135 * has beed changed or rx_streaming should be disabled
137 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
140 int period = wl->conf.rx_streaming.interval;
142 /* don't reconfigure if rx_streaming is disabled */
143 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
146 /* reconfigure/disable according to new streaming_period */
148 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
149 (wl->conf.rx_streaming.always ||
150 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
151 ret = wl1271_set_rx_streaming(wl, wlvif, true);
153 ret = wl1271_set_rx_streaming(wl, wlvif, false);
154 /* don't cancel_work_sync since we might deadlock */
155 del_timer_sync(&wlvif->rx_streaming_timer);
161 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
164 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
165 rx_streaming_enable_work);
166 struct wl1271 *wl = wlvif->wl;
168 mutex_lock(&wl->mutex);
170 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
171 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
172 (!wl->conf.rx_streaming.always &&
173 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
176 if (!wl->conf.rx_streaming.interval)
179 ret = wl1271_ps_elp_wakeup(wl);
183 ret = wl1271_set_rx_streaming(wl, wlvif, true);
187 /* stop it after some time of inactivity */
188 mod_timer(&wlvif->rx_streaming_timer,
189 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
192 wl1271_ps_elp_sleep(wl);
194 mutex_unlock(&wl->mutex);
197 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
200 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
201 rx_streaming_disable_work);
202 struct wl1271 *wl = wlvif->wl;
204 mutex_lock(&wl->mutex);
206 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
209 ret = wl1271_ps_elp_wakeup(wl);
213 ret = wl1271_set_rx_streaming(wl, wlvif, false);
218 wl1271_ps_elp_sleep(wl);
220 mutex_unlock(&wl->mutex);
223 static void wl1271_rx_streaming_timer(unsigned long data)
225 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
226 struct wl1271 *wl = wlvif->wl;
227 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
230 /* wl->mutex must be taken */
231 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
233 /* if the watchdog is not armed, don't do anything */
234 if (wl->tx_allocated_blocks == 0)
237 cancel_delayed_work(&wl->tx_watchdog_work);
238 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
239 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
244 struct delayed_work *dwork;
247 dwork = container_of(work, struct delayed_work, work);
248 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
250 mutex_lock(&wl->mutex);
252 if (unlikely(wl->state != WLCORE_STATE_ON))
255 /* Tx went out in the meantime - everything is ok */
256 if (unlikely(wl->tx_allocated_blocks == 0))
260 * if a ROC is in progress, we might not have any Tx for a long
261 * time (e.g. pending Tx on the non-ROC channels)
263 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 wl->conf.tx.tx_watchdog_timeout);
266 wl12xx_rearm_tx_watchdog_locked(wl);
271 * if a scan is in progress, we might not have any Tx for a long
274 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 wl->conf.tx.tx_watchdog_timeout);
277 wl12xx_rearm_tx_watchdog_locked(wl);
282 * AP might cache a frame for a long time for a sleeping station,
283 * so rearm the timer if there's an AP interface with stations. If
284 * Tx is genuinely stuck we will most hopefully discover it when all
285 * stations are removed due to inactivity.
287 if (wl->active_sta_count) {
288 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
290 wl->conf.tx.tx_watchdog_timeout,
291 wl->active_sta_count);
292 wl12xx_rearm_tx_watchdog_locked(wl);
296 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_queue_recovery_work(wl);
301 mutex_unlock(&wl->mutex);
304 static void wlcore_adjust_conf(struct wl1271 *wl)
306 /* Adjust settings according to optional module parameters */
309 if (!strcmp(fwlog_param, "continuous")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
311 } else if (!strcmp(fwlog_param, "ondemand")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
313 } else if (!strcmp(fwlog_param, "dbgpins")) {
314 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
315 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
316 } else if (!strcmp(fwlog_param, "disable")) {
317 wl->conf.fwlog.mem_blocks = 0;
318 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
320 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
324 if (bug_on_recovery != -1)
325 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
327 if (no_recovery != -1)
328 wl->conf.recovery.no_recovery = (u8) no_recovery;
331 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
332 struct wl12xx_vif *wlvif,
337 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
340 * Wake up from high level PS if the STA is asleep with too little
341 * packets in FW or if the STA is awake.
343 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
344 wl12xx_ps_link_end(wl, wlvif, hlid);
347 * Start high-level PS if the STA is asleep with enough blocks in FW.
348 * Make an exception if this is the only connected link. In this
349 * case FW-memory congestion is less of a problem.
350 * Note that a single connected STA means 3 active links, since we must
351 * account for the global and broadcast AP links. The "fw_ps" check
352 * assures us the third link is a STA connected to the AP. Otherwise
353 * the FW would not set the PSM bit.
355 else if (wl->active_link_count > 3 && fw_ps &&
356 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
357 wl12xx_ps_link_start(wl, wlvif, hlid, true);
360 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
361 struct wl12xx_vif *wlvif,
362 struct wl_fw_status_2 *status)
367 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
368 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
369 wl1271_debug(DEBUG_PSM,
370 "link ps prev 0x%x cur 0x%x changed 0x%x",
371 wl->ap_fw_ps_map, cur_fw_ps_map,
372 wl->ap_fw_ps_map ^ cur_fw_ps_map);
374 wl->ap_fw_ps_map = cur_fw_ps_map;
377 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
378 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
379 wl->links[hlid].allocated_pkts);
382 static int wlcore_fw_status(struct wl1271 *wl,
383 struct wl_fw_status_1 *status_1,
384 struct wl_fw_status_2 *status_2)
386 struct wl12xx_vif *wlvif;
388 u32 old_tx_blk_count = wl->tx_blocks_available;
389 int avail, freed_blocks;
393 struct wl1271_link *lnk;
395 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
396 sizeof(*status_2) + wl->fw_status_priv_len;
398 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
403 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
404 "drv_rx_counter = %d, tx_results_counter = %d)",
406 status_1->fw_rx_counter,
407 status_1->drv_rx_counter,
408 status_1->tx_results_counter);
410 for (i = 0; i < NUM_TX_QUEUES; i++) {
411 /* prevent wrap-around in freed-packets counter */
412 wl->tx_allocated_pkts[i] -=
413 (status_2->counters.tx_released_pkts[i] -
414 wl->tx_pkts_freed[i]) & 0xff;
416 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
420 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
424 /* prevent wrap-around in freed-packets counter */
425 diff = (status_2->counters.tx_lnk_free_pkts[i] -
426 lnk->prev_freed_pkts) & 0xff;
431 lnk->allocated_pkts -= diff;
432 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
434 /* accumulate the prev_freed_pkts counter */
435 lnk->total_freed_pkts += diff;
438 /* prevent wrap-around in total blocks counter */
439 if (likely(wl->tx_blocks_freed <=
440 le32_to_cpu(status_2->total_released_blks)))
441 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
444 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
445 le32_to_cpu(status_2->total_released_blks);
447 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
449 wl->tx_allocated_blocks -= freed_blocks;
452 * If the FW freed some blocks:
453 * If we still have allocated blocks - re-arm the timer, Tx is
454 * not stuck. Otherwise, cancel the timer (no Tx currently).
457 if (wl->tx_allocated_blocks)
458 wl12xx_rearm_tx_watchdog_locked(wl);
460 cancel_delayed_work(&wl->tx_watchdog_work);
463 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
466 * The FW might change the total number of TX memblocks before
467 * we get a notification about blocks being released. Thus, the
468 * available blocks calculation might yield a temporary result
469 * which is lower than the actual available blocks. Keeping in
470 * mind that only blocks that were allocated can be moved from
471 * TX to RX, tx_blocks_available should never decrease here.
473 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
476 /* if more blocks are available now, tx work can be scheduled */
477 if (wl->tx_blocks_available > old_tx_blk_count)
478 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
480 /* for AP update num of allocated TX blocks per link and ps status */
481 wl12xx_for_each_wlvif_ap(wl, wlvif) {
482 wl12xx_irq_update_links_status(wl, wlvif, status_2);
485 /* update the host-chipset time offset */
487 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
488 (s64)le32_to_cpu(status_2->fw_localtime);
490 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
495 static void wl1271_flush_deferred_work(struct wl1271 *wl)
499 /* Pass all received frames to the network stack */
500 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
501 ieee80211_rx_ni(wl->hw, skb);
503 /* Return sent skbs to the network stack */
504 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
505 ieee80211_tx_status_ni(wl->hw, skb);
508 static void wl1271_netstack_work(struct work_struct *work)
511 container_of(work, struct wl1271, netstack_work);
514 wl1271_flush_deferred_work(wl);
515 } while (skb_queue_len(&wl->deferred_rx_queue));
518 #define WL1271_IRQ_MAX_LOOPS 256
520 static int wlcore_irq_locked(struct wl1271 *wl)
524 int loopcount = WL1271_IRQ_MAX_LOOPS;
526 unsigned int defer_count;
530 * In case edge triggered interrupt must be used, we cannot iterate
531 * more than once without introducing race conditions with the hardirq.
533 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
536 wl1271_debug(DEBUG_IRQ, "IRQ work");
538 if (unlikely(wl->state != WLCORE_STATE_ON))
541 ret = wl1271_ps_elp_wakeup(wl);
545 while (!done && loopcount--) {
547 * In order to avoid a race with the hardirq, clear the flag
548 * before acknowledging the chip. Since the mutex is held,
549 * wl1271_ps_elp_wakeup cannot be called concurrently.
551 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
552 smp_mb__after_clear_bit();
554 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
558 wlcore_hw_tx_immediate_compl(wl);
560 intr = le32_to_cpu(wl->fw_status_1->intr);
561 intr &= WLCORE_ALL_INTR_MASK;
567 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
568 wl1271_error("HW watchdog interrupt received! starting recovery.");
569 wl->watchdog_recovery = true;
572 /* restarting the chip. ignore any other interrupt. */
576 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
577 wl1271_error("SW watchdog interrupt received! "
578 "starting recovery.");
579 wl->watchdog_recovery = true;
582 /* restarting the chip. ignore any other interrupt. */
586 if (likely(intr & WL1271_ACX_INTR_DATA)) {
587 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
589 ret = wlcore_rx(wl, wl->fw_status_1);
593 /* Check if any tx blocks were freed */
594 spin_lock_irqsave(&wl->wl_lock, flags);
595 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
596 wl1271_tx_total_queue_count(wl) > 0) {
597 spin_unlock_irqrestore(&wl->wl_lock, flags);
599 * In order to avoid starvation of the TX path,
600 * call the work function directly.
602 ret = wlcore_tx_work_locked(wl);
606 spin_unlock_irqrestore(&wl->wl_lock, flags);
609 /* check for tx results */
610 ret = wlcore_hw_tx_delayed_compl(wl);
614 /* Make sure the deferred queues don't get too long */
615 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
616 skb_queue_len(&wl->deferred_rx_queue);
617 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
618 wl1271_flush_deferred_work(wl);
621 if (intr & WL1271_ACX_INTR_EVENT_A) {
622 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
623 ret = wl1271_event_handle(wl, 0);
628 if (intr & WL1271_ACX_INTR_EVENT_B) {
629 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
630 ret = wl1271_event_handle(wl, 1);
635 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
636 wl1271_debug(DEBUG_IRQ,
637 "WL1271_ACX_INTR_INIT_COMPLETE");
639 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
640 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
643 wl1271_ps_elp_sleep(wl);
649 static irqreturn_t wlcore_irq(int irq, void *cookie)
653 struct wl1271 *wl = cookie;
655 /* TX might be handled here, avoid redundant work */
656 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
657 cancel_work_sync(&wl->tx_work);
659 mutex_lock(&wl->mutex);
661 ret = wlcore_irq_locked(wl);
663 wl12xx_queue_recovery_work(wl);
665 spin_lock_irqsave(&wl->wl_lock, flags);
666 /* In case TX was not handled here, queue TX work */
667 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
668 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
669 wl1271_tx_total_queue_count(wl) > 0)
670 ieee80211_queue_work(wl->hw, &wl->tx_work);
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
673 mutex_unlock(&wl->mutex);
678 struct vif_counter_data {
681 struct ieee80211_vif *cur_vif;
682 bool cur_vif_running;
685 static void wl12xx_vif_count_iter(void *data, u8 *mac,
686 struct ieee80211_vif *vif)
688 struct vif_counter_data *counter = data;
691 if (counter->cur_vif == vif)
692 counter->cur_vif_running = true;
695 /* caller must not hold wl->mutex, as it might deadlock */
696 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
697 struct ieee80211_vif *cur_vif,
698 struct vif_counter_data *data)
700 memset(data, 0, sizeof(*data));
701 data->cur_vif = cur_vif;
703 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
704 wl12xx_vif_count_iter, data);
707 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
709 const struct firmware *fw;
711 enum wl12xx_fw_type fw_type;
715 fw_type = WL12XX_FW_TYPE_PLT;
716 fw_name = wl->plt_fw_name;
719 * we can't call wl12xx_get_vif_count() here because
720 * wl->mutex is taken, so use the cached last_vif_count value
722 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
723 fw_type = WL12XX_FW_TYPE_MULTI;
724 fw_name = wl->mr_fw_name;
726 fw_type = WL12XX_FW_TYPE_NORMAL;
727 fw_name = wl->sr_fw_name;
731 if (wl->fw_type == fw_type)
734 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
736 ret = request_firmware(&fw, fw_name, wl->dev);
739 wl1271_error("could not get firmware %s: %d", fw_name, ret);
744 wl1271_error("firmware size is not multiple of 32 bits: %zu",
751 wl->fw_type = WL12XX_FW_TYPE_NONE;
752 wl->fw_len = fw->size;
753 wl->fw = vmalloc(wl->fw_len);
756 wl1271_error("could not allocate memory for the firmware");
761 memcpy(wl->fw, fw->data, wl->fw_len);
763 wl->fw_type = fw_type;
765 release_firmware(fw);
770 void wl12xx_queue_recovery_work(struct wl1271 *wl)
772 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
774 /* Avoid a recursive recovery */
775 if (wl->state == WLCORE_STATE_ON) {
776 wl->state = WLCORE_STATE_RESTARTING;
777 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
778 wlcore_disable_interrupts_nosync(wl);
779 ieee80211_queue_work(wl->hw, &wl->recovery_work);
783 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
787 /* The FW log is a length-value list, find where the log end */
788 while (len < maxlen) {
789 if (memblock[len] == 0)
791 if (len + memblock[len] + 1 > maxlen)
793 len += memblock[len] + 1;
796 /* Make sure we have enough room */
797 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
799 /* Fill the FW log file, consumed by the sysfs fwlog entry */
800 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
801 wl->fwlog_size += len;
806 #define WLCORE_FW_LOG_END 0x2000000
808 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
816 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
817 (wl->conf.fwlog.mem_blocks == 0))
820 wl1271_info("Reading FW panic log");
822 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
827 * Make sure the chip is awake and the logger isn't active.
828 * Do not send a stop fwlog command if the fw is hanged or if
829 * dbgpins are used (due to some fw bug).
831 if (wl1271_ps_elp_wakeup(wl))
833 if (!wl->watchdog_recovery &&
834 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
835 wl12xx_cmd_stop_fwlog(wl);
837 /* Read the first memory block address */
838 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
842 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
846 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
847 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
848 end_of_log = WLCORE_FW_LOG_END;
850 offset = sizeof(addr);
854 /* Traverse the memory blocks linked list */
856 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
857 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
863 * Memory blocks are linked to one another. The first 4 bytes
864 * of each memory block hold the hardware address of the next
865 * one. The last memory block points to the first one in
866 * on demand mode and is equal to 0x2000000 in continuous mode.
868 addr = le32_to_cpup((__le32 *)block);
869 if (!wl12xx_copy_fwlog(wl, block + offset,
870 WL12XX_HW_BLOCK_SIZE - offset))
872 } while (addr && (addr != end_of_log));
874 wake_up_interruptible(&wl->fwlog_waitq);
880 static void wlcore_print_recovery(struct wl1271 *wl)
886 wl1271_info("Hardware recovery in progress. FW ver: %s",
887 wl->chip.fw_ver_str);
889 /* change partitions momentarily so we can read the FW pc */
890 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
894 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
898 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
902 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
903 pc, hint_sts, ++wl->recovery_count);
905 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
909 static void wl1271_recovery_work(struct work_struct *work)
912 container_of(work, struct wl1271, recovery_work);
913 struct wl12xx_vif *wlvif;
914 struct ieee80211_vif *vif;
916 mutex_lock(&wl->mutex);
918 if (wl->state == WLCORE_STATE_OFF || wl->plt)
921 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
922 wl12xx_read_fwlog_panic(wl);
923 wlcore_print_recovery(wl);
926 BUG_ON(wl->conf.recovery.bug_on_recovery &&
927 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
929 if (wl->conf.recovery.no_recovery) {
930 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
934 /* Prevent spurious TX during FW restart */
935 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
937 /* reboot the chipset */
938 while (!list_empty(&wl->wlvif_list)) {
939 wlvif = list_first_entry(&wl->wlvif_list,
940 struct wl12xx_vif, list);
941 vif = wl12xx_wlvif_to_vif(wlvif);
942 __wl1271_op_remove_interface(wl, vif, false);
945 wlcore_op_stop_locked(wl);
947 ieee80211_restart_hw(wl->hw);
950 * Its safe to enable TX now - the queues are stopped after a request
953 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
956 wl->watchdog_recovery = false;
957 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
958 mutex_unlock(&wl->mutex);
961 static int wlcore_fw_wakeup(struct wl1271 *wl)
963 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
966 static int wl1271_setup(struct wl1271 *wl)
968 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
969 sizeof(*wl->fw_status_2) +
970 wl->fw_status_priv_len, GFP_KERNEL);
971 if (!wl->fw_status_1)
974 wl->fw_status_2 = (struct wl_fw_status_2 *)
975 (((u8 *) wl->fw_status_1) +
976 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
978 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
979 if (!wl->tx_res_if) {
980 kfree(wl->fw_status_1);
987 static int wl12xx_set_power_on(struct wl1271 *wl)
991 msleep(WL1271_PRE_POWER_ON_SLEEP);
992 ret = wl1271_power_on(wl);
995 msleep(WL1271_POWER_ON_SLEEP);
999 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1003 /* ELP module wake up */
1004 ret = wlcore_fw_wakeup(wl);
1012 wl1271_power_off(wl);
1016 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1020 ret = wl12xx_set_power_on(wl);
1025 * For wl127x based devices we could use the default block
1026 * size (512 bytes), but due to a bug in the sdio driver, we
1027 * need to set it explicitly after the chip is powered on. To
1028 * simplify the code and since the performance impact is
1029 * negligible, we use the same block size for all different
1032 * Check if the bus supports blocksize alignment and, if it
1033 * doesn't, make sure we don't have the quirk.
1035 if (!wl1271_set_block_size(wl))
1036 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1038 /* TODO: make sure the lower driver has set things up correctly */
1040 ret = wl1271_setup(wl);
1044 ret = wl12xx_fetch_firmware(wl, plt);
1052 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1054 int retries = WL1271_BOOT_RETRIES;
1055 struct wiphy *wiphy = wl->hw->wiphy;
1057 static const char* const PLT_MODE[] = {
1065 mutex_lock(&wl->mutex);
1067 wl1271_notice("power up");
1069 if (wl->state != WLCORE_STATE_OFF) {
1070 wl1271_error("cannot go into PLT state because not "
1071 "in off state: %d", wl->state);
1076 /* Indicate to lower levels that we are now in PLT mode */
1078 wl->plt_mode = plt_mode;
1082 ret = wl12xx_chip_wakeup(wl, true);
1086 ret = wl->ops->plt_init(wl);
1090 wl->state = WLCORE_STATE_ON;
1091 wl1271_notice("firmware booted in PLT mode %s (%s)",
1093 wl->chip.fw_ver_str);
1095 /* update hw/fw version info in wiphy struct */
1096 wiphy->hw_version = wl->chip.id;
1097 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1098 sizeof(wiphy->fw_version));
1103 wl1271_power_off(wl);
1107 wl->plt_mode = PLT_OFF;
1109 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1110 WL1271_BOOT_RETRIES);
1112 mutex_unlock(&wl->mutex);
1117 int wl1271_plt_stop(struct wl1271 *wl)
1121 wl1271_notice("power down");
1124 * Interrupts must be disabled before setting the state to OFF.
1125 * Otherwise, the interrupt handler might be called and exit without
1126 * reading the interrupt status.
1128 wlcore_disable_interrupts(wl);
1129 mutex_lock(&wl->mutex);
1131 mutex_unlock(&wl->mutex);
1134 * This will not necessarily enable interrupts as interrupts
1135 * may have been disabled when op_stop was called. It will,
1136 * however, balance the above call to disable_interrupts().
1138 wlcore_enable_interrupts(wl);
1140 wl1271_error("cannot power down because not in PLT "
1141 "state: %d", wl->state);
1146 mutex_unlock(&wl->mutex);
1148 wl1271_flush_deferred_work(wl);
1149 cancel_work_sync(&wl->netstack_work);
1150 cancel_work_sync(&wl->recovery_work);
1151 cancel_delayed_work_sync(&wl->elp_work);
1152 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1154 mutex_lock(&wl->mutex);
1155 wl1271_power_off(wl);
1157 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1158 wl->state = WLCORE_STATE_OFF;
1160 wl->plt_mode = PLT_OFF;
1162 mutex_unlock(&wl->mutex);
1168 static void wl1271_op_tx(struct ieee80211_hw *hw,
1169 struct ieee80211_tx_control *control,
1170 struct sk_buff *skb)
1172 struct wl1271 *wl = hw->priv;
1173 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1174 struct ieee80211_vif *vif = info->control.vif;
1175 struct wl12xx_vif *wlvif = NULL;
1176 unsigned long flags;
1181 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1182 ieee80211_free_txskb(hw, skb);
1186 wlvif = wl12xx_vif_to_data(vif);
1187 mapping = skb_get_queue_mapping(skb);
1188 q = wl1271_tx_get_queue(mapping);
1190 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1192 spin_lock_irqsave(&wl->wl_lock, flags);
1195 * drop the packet if the link is invalid or the queue is stopped
1196 * for any reason but watermark. Watermark is a "soft"-stop so we
1197 * allow these packets through.
1199 if (hlid == WL12XX_INVALID_LINK_ID ||
1200 (!test_bit(hlid, wlvif->links_map)) ||
1201 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1202 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1203 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1204 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1205 ieee80211_free_txskb(hw, skb);
1209 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1211 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1213 wl->tx_queue_count[q]++;
1214 wlvif->tx_queue_count[q]++;
1217 * The workqueue is slow to process the tx_queue and we need stop
1218 * the queue here, otherwise the queue will get too long.
1220 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1221 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1222 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1223 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1224 wlcore_stop_queue_locked(wl, wlvif, q,
1225 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1229 * The chip specific setup must run before the first TX packet -
1230 * before that, the tx_work will not be initialized!
1233 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1234 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1235 ieee80211_queue_work(wl->hw, &wl->tx_work);
1238 spin_unlock_irqrestore(&wl->wl_lock, flags);
1241 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1243 unsigned long flags;
1246 /* no need to queue a new dummy packet if one is already pending */
1247 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1250 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1252 spin_lock_irqsave(&wl->wl_lock, flags);
1253 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1254 wl->tx_queue_count[q]++;
1255 spin_unlock_irqrestore(&wl->wl_lock, flags);
1257 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1258 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1259 return wlcore_tx_work_locked(wl);
1262 * If the FW TX is busy, TX work will be scheduled by the threaded
1263 * interrupt handler function
1269 * The size of the dummy packet should be at least 1400 bytes. However, in
1270 * order to minimize the number of bus transactions, aligning it to 512 bytes
1271 * boundaries could be beneficial, performance wise
1273 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1275 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1277 struct sk_buff *skb;
1278 struct ieee80211_hdr_3addr *hdr;
1279 unsigned int dummy_packet_size;
1281 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1282 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1284 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1286 wl1271_warning("Failed to allocate a dummy packet skb");
1290 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1292 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1293 memset(hdr, 0, sizeof(*hdr));
1294 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1295 IEEE80211_STYPE_NULLFUNC |
1296 IEEE80211_FCTL_TODS);
1298 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1300 /* Dummy packets require the TID to be management */
1301 skb->priority = WL1271_TID_MGMT;
1303 /* Initialize all fields that might be used */
1304 skb_set_queue_mapping(skb, 0);
1305 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1313 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1315 int num_fields = 0, in_field = 0, fields_size = 0;
1316 int i, pattern_len = 0;
1319 wl1271_warning("No mask in WoWLAN pattern");
1324 * The pattern is broken up into segments of bytes at different offsets
1325 * that need to be checked by the FW filter. Each segment is called
1326 * a field in the FW API. We verify that the total number of fields
1327 * required for this pattern won't exceed FW limits (8)
1328 * as well as the total fields buffer won't exceed the FW limit.
1329 * Note that if there's a pattern which crosses Ethernet/IP header
1330 * boundary a new field is required.
1332 for (i = 0; i < p->pattern_len; i++) {
1333 if (test_bit(i, (unsigned long *)p->mask)) {
1338 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1340 fields_size += pattern_len +
1341 RX_FILTER_FIELD_OVERHEAD;
1349 fields_size += pattern_len +
1350 RX_FILTER_FIELD_OVERHEAD;
1357 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1361 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1362 wl1271_warning("RX Filter too complex. Too many segments");
1366 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1367 wl1271_warning("RX filter pattern is too big");
1374 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1376 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1379 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1386 for (i = 0; i < filter->num_fields; i++)
1387 kfree(filter->fields[i].pattern);
1392 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1393 u16 offset, u8 flags,
1394 u8 *pattern, u8 len)
1396 struct wl12xx_rx_filter_field *field;
1398 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1399 wl1271_warning("Max fields per RX filter. can't alloc another");
1403 field = &filter->fields[filter->num_fields];
1405 field->pattern = kzalloc(len, GFP_KERNEL);
1406 if (!field->pattern) {
1407 wl1271_warning("Failed to allocate RX filter pattern");
1411 filter->num_fields++;
1413 field->offset = cpu_to_le16(offset);
1414 field->flags = flags;
1416 memcpy(field->pattern, pattern, len);
1421 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1423 int i, fields_size = 0;
1425 for (i = 0; i < filter->num_fields; i++)
1426 fields_size += filter->fields[i].len +
1427 sizeof(struct wl12xx_rx_filter_field) -
1433 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1437 struct wl12xx_rx_filter_field *field;
1439 for (i = 0; i < filter->num_fields; i++) {
1440 field = (struct wl12xx_rx_filter_field *)buf;
1442 field->offset = filter->fields[i].offset;
1443 field->flags = filter->fields[i].flags;
1444 field->len = filter->fields[i].len;
1446 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1447 buf += sizeof(struct wl12xx_rx_filter_field) -
1448 sizeof(u8 *) + field->len;
1453 * Allocates an RX filter returned through f
1454 * which needs to be freed using rx_filter_free()
1456 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1457 struct cfg80211_wowlan_trig_pkt_pattern *p,
1458 struct wl12xx_rx_filter **f)
1461 struct wl12xx_rx_filter *filter;
1465 filter = wl1271_rx_filter_alloc();
1467 wl1271_warning("Failed to alloc rx filter");
1473 while (i < p->pattern_len) {
1474 if (!test_bit(i, (unsigned long *)p->mask)) {
1479 for (j = i; j < p->pattern_len; j++) {
1480 if (!test_bit(j, (unsigned long *)p->mask))
1483 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1484 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1488 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1490 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1492 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1493 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1498 ret = wl1271_rx_filter_alloc_field(filter,
1501 &p->pattern[i], len);
1508 filter->action = FILTER_SIGNAL;
1514 wl1271_rx_filter_free(filter);
1520 static int wl1271_configure_wowlan(struct wl1271 *wl,
1521 struct cfg80211_wowlan *wow)
1525 if (!wow || wow->any || !wow->n_patterns) {
1526 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1531 ret = wl1271_rx_filter_clear_all(wl);
1538 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1541 /* Validate all incoming patterns before clearing current FW state */
1542 for (i = 0; i < wow->n_patterns; i++) {
1543 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1545 wl1271_warning("Bad wowlan pattern %d", i);
1550 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1554 ret = wl1271_rx_filter_clear_all(wl);
1558 /* Translate WoWLAN patterns into filters */
1559 for (i = 0; i < wow->n_patterns; i++) {
1560 struct cfg80211_wowlan_trig_pkt_pattern *p;
1561 struct wl12xx_rx_filter *filter = NULL;
1563 p = &wow->patterns[i];
1565 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1567 wl1271_warning("Failed to create an RX filter from "
1568 "wowlan pattern %d", i);
1572 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1574 wl1271_rx_filter_free(filter);
1579 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1585 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1586 struct wl12xx_vif *wlvif,
1587 struct cfg80211_wowlan *wow)
1591 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1594 ret = wl1271_ps_elp_wakeup(wl);
1598 ret = wl1271_configure_wowlan(wl, wow);
1602 if ((wl->conf.conn.suspend_wake_up_event ==
1603 wl->conf.conn.wake_up_event) &&
1604 (wl->conf.conn.suspend_listen_interval ==
1605 wl->conf.conn.listen_interval))
1608 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1609 wl->conf.conn.suspend_wake_up_event,
1610 wl->conf.conn.suspend_listen_interval);
1613 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1616 wl1271_ps_elp_sleep(wl);
1622 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1623 struct wl12xx_vif *wlvif)
1627 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1630 ret = wl1271_ps_elp_wakeup(wl);
1634 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1636 wl1271_ps_elp_sleep(wl);
1642 static int wl1271_configure_suspend(struct wl1271 *wl,
1643 struct wl12xx_vif *wlvif,
1644 struct cfg80211_wowlan *wow)
1646 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1647 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1648 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1649 return wl1271_configure_suspend_ap(wl, wlvif);
1653 static void wl1271_configure_resume(struct wl1271 *wl,
1654 struct wl12xx_vif *wlvif)
1657 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1658 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1660 if ((!is_ap) && (!is_sta))
1663 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1666 ret = wl1271_ps_elp_wakeup(wl);
1671 wl1271_configure_wowlan(wl, NULL);
1673 if ((wl->conf.conn.suspend_wake_up_event ==
1674 wl->conf.conn.wake_up_event) &&
1675 (wl->conf.conn.suspend_listen_interval ==
1676 wl->conf.conn.listen_interval))
1679 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1680 wl->conf.conn.wake_up_event,
1681 wl->conf.conn.listen_interval);
1684 wl1271_error("resume: wake up conditions failed: %d",
1688 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1692 wl1271_ps_elp_sleep(wl);
1695 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1696 struct cfg80211_wowlan *wow)
1698 struct wl1271 *wl = hw->priv;
1699 struct wl12xx_vif *wlvif;
1702 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1705 /* we want to perform the recovery before suspending */
1706 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1707 wl1271_warning("postponing suspend to perform recovery");
1711 wl1271_tx_flush(wl);
1713 mutex_lock(&wl->mutex);
1714 wl->wow_enabled = true;
1715 wl12xx_for_each_wlvif(wl, wlvif) {
1716 ret = wl1271_configure_suspend(wl, wlvif, wow);
1718 mutex_unlock(&wl->mutex);
1719 wl1271_warning("couldn't prepare device to suspend");
1723 mutex_unlock(&wl->mutex);
1724 /* flush any remaining work */
1725 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1728 * disable and re-enable interrupts in order to flush
1731 wlcore_disable_interrupts(wl);
1734 * set suspended flag to avoid triggering a new threaded_irq
1735 * work. no need for spinlock as interrupts are disabled.
1737 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1739 wlcore_enable_interrupts(wl);
1740 flush_work(&wl->tx_work);
1741 flush_delayed_work(&wl->elp_work);
1746 static int wl1271_op_resume(struct ieee80211_hw *hw)
1748 struct wl1271 *wl = hw->priv;
1749 struct wl12xx_vif *wlvif;
1750 unsigned long flags;
1751 bool run_irq_work = false, pending_recovery;
1754 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1756 WARN_ON(!wl->wow_enabled);
1759 * re-enable irq_work enqueuing, and call irq_work directly if
1760 * there is a pending work.
1762 spin_lock_irqsave(&wl->wl_lock, flags);
1763 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1764 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1765 run_irq_work = true;
1766 spin_unlock_irqrestore(&wl->wl_lock, flags);
1768 mutex_lock(&wl->mutex);
1770 /* test the recovery flag before calling any SDIO functions */
1771 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1775 wl1271_debug(DEBUG_MAC80211,
1776 "run postponed irq_work directly");
1778 /* don't talk to the HW if recovery is pending */
1779 if (!pending_recovery) {
1780 ret = wlcore_irq_locked(wl);
1782 wl12xx_queue_recovery_work(wl);
1785 wlcore_enable_interrupts(wl);
1788 if (pending_recovery) {
1789 wl1271_warning("queuing forgotten recovery on resume");
1790 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1794 wl12xx_for_each_wlvif(wl, wlvif) {
1795 wl1271_configure_resume(wl, wlvif);
1799 wl->wow_enabled = false;
1800 mutex_unlock(&wl->mutex);
1806 static int wl1271_op_start(struct ieee80211_hw *hw)
1808 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1811 * We have to delay the booting of the hardware because
1812 * we need to know the local MAC address before downloading and
1813 * initializing the firmware. The MAC address cannot be changed
1814 * after boot, and without the proper MAC address, the firmware
1815 * will not function properly.
1817 * The MAC address is first known when the corresponding interface
1818 * is added. That is where we will initialize the hardware.
1824 static void wlcore_op_stop_locked(struct wl1271 *wl)
1828 if (wl->state == WLCORE_STATE_OFF) {
1829 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1831 wlcore_enable_interrupts(wl);
1837 * this must be before the cancel_work calls below, so that the work
1838 * functions don't perform further work.
1840 wl->state = WLCORE_STATE_OFF;
1843 * Use the nosync variant to disable interrupts, so the mutex could be
1844 * held while doing so without deadlocking.
1846 wlcore_disable_interrupts_nosync(wl);
1848 mutex_unlock(&wl->mutex);
1850 wlcore_synchronize_interrupts(wl);
1851 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1852 cancel_work_sync(&wl->recovery_work);
1853 wl1271_flush_deferred_work(wl);
1854 cancel_delayed_work_sync(&wl->scan_complete_work);
1855 cancel_work_sync(&wl->netstack_work);
1856 cancel_work_sync(&wl->tx_work);
1857 cancel_delayed_work_sync(&wl->elp_work);
1858 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1860 /* let's notify MAC80211 about the remaining pending TX frames */
1861 mutex_lock(&wl->mutex);
1862 wl12xx_tx_reset(wl);
1864 wl1271_power_off(wl);
1866 * In case a recovery was scheduled, interrupts were disabled to avoid
1867 * an interrupt storm. Now that the power is down, it is safe to
1868 * re-enable interrupts to balance the disable depth
1870 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1871 wlcore_enable_interrupts(wl);
1873 wl->band = IEEE80211_BAND_2GHZ;
1876 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1877 wl->channel_type = NL80211_CHAN_NO_HT;
1878 wl->tx_blocks_available = 0;
1879 wl->tx_allocated_blocks = 0;
1880 wl->tx_results_count = 0;
1881 wl->tx_packets_count = 0;
1882 wl->time_offset = 0;
1883 wl->ap_fw_ps_map = 0;
1885 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1886 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1887 memset(wl->links_map, 0, sizeof(wl->links_map));
1888 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1889 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1890 wl->active_sta_count = 0;
1891 wl->active_link_count = 0;
1893 /* The system link is always allocated */
1894 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1895 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1896 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1899 * this is performed after the cancel_work calls and the associated
1900 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1901 * get executed before all these vars have been reset.
1905 wl->tx_blocks_freed = 0;
1907 for (i = 0; i < NUM_TX_QUEUES; i++) {
1908 wl->tx_pkts_freed[i] = 0;
1909 wl->tx_allocated_pkts[i] = 0;
1912 wl1271_debugfs_reset(wl);
1914 kfree(wl->fw_status_1);
1915 wl->fw_status_1 = NULL;
1916 wl->fw_status_2 = NULL;
1917 kfree(wl->tx_res_if);
1918 wl->tx_res_if = NULL;
1919 kfree(wl->target_mem_map);
1920 wl->target_mem_map = NULL;
1923 * FW channels must be re-calibrated after recovery,
1924 * clear the last Reg-Domain channel configuration.
1926 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1929 static void wlcore_op_stop(struct ieee80211_hw *hw)
1931 struct wl1271 *wl = hw->priv;
1933 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1935 mutex_lock(&wl->mutex);
1937 wlcore_op_stop_locked(wl);
1939 mutex_unlock(&wl->mutex);
1942 static void wlcore_channel_switch_work(struct work_struct *work)
1944 struct delayed_work *dwork;
1946 struct ieee80211_vif *vif;
1947 struct wl12xx_vif *wlvif;
1950 dwork = container_of(work, struct delayed_work, work);
1951 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1954 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1956 mutex_lock(&wl->mutex);
1958 if (unlikely(wl->state != WLCORE_STATE_ON))
1961 /* check the channel switch is still ongoing */
1962 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1965 vif = wl12xx_wlvif_to_vif(wlvif);
1966 ieee80211_chswitch_done(vif, false);
1968 ret = wl1271_ps_elp_wakeup(wl);
1972 wl12xx_cmd_stop_channel_switch(wl, wlvif);
1974 wl1271_ps_elp_sleep(wl);
1976 mutex_unlock(&wl->mutex);
1979 static void wlcore_connection_loss_work(struct work_struct *work)
1981 struct delayed_work *dwork;
1983 struct ieee80211_vif *vif;
1984 struct wl12xx_vif *wlvif;
1986 dwork = container_of(work, struct delayed_work, work);
1987 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
1990 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
1992 mutex_lock(&wl->mutex);
1994 if (unlikely(wl->state != WLCORE_STATE_ON))
1997 /* Call mac80211 connection loss */
1998 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2001 vif = wl12xx_wlvif_to_vif(wlvif);
2002 ieee80211_connection_loss(vif);
2004 mutex_unlock(&wl->mutex);
2007 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2009 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2010 WL12XX_MAX_RATE_POLICIES);
2011 if (policy >= WL12XX_MAX_RATE_POLICIES)
2014 __set_bit(policy, wl->rate_policies_map);
2019 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2021 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2024 __clear_bit(*idx, wl->rate_policies_map);
2025 *idx = WL12XX_MAX_RATE_POLICIES;
2028 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2030 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2031 WLCORE_MAX_KLV_TEMPLATES);
2032 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2035 __set_bit(policy, wl->klv_templates_map);
2040 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2042 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2045 __clear_bit(*idx, wl->klv_templates_map);
2046 *idx = WLCORE_MAX_KLV_TEMPLATES;
2049 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2051 switch (wlvif->bss_type) {
2052 case BSS_TYPE_AP_BSS:
2054 return WL1271_ROLE_P2P_GO;
2056 return WL1271_ROLE_AP;
2058 case BSS_TYPE_STA_BSS:
2060 return WL1271_ROLE_P2P_CL;
2062 return WL1271_ROLE_STA;
2065 return WL1271_ROLE_IBSS;
2068 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2070 return WL12XX_INVALID_ROLE_TYPE;
2073 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2075 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2078 /* clear everything but the persistent data */
2079 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2081 switch (ieee80211_vif_type_p2p(vif)) {
2082 case NL80211_IFTYPE_P2P_CLIENT:
2085 case NL80211_IFTYPE_STATION:
2086 wlvif->bss_type = BSS_TYPE_STA_BSS;
2088 case NL80211_IFTYPE_ADHOC:
2089 wlvif->bss_type = BSS_TYPE_IBSS;
2091 case NL80211_IFTYPE_P2P_GO:
2094 case NL80211_IFTYPE_AP:
2095 wlvif->bss_type = BSS_TYPE_AP_BSS;
2098 wlvif->bss_type = MAX_BSS_TYPE;
2102 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2103 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2104 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2106 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2107 wlvif->bss_type == BSS_TYPE_IBSS) {
2108 /* init sta/ibss data */
2109 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2110 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2111 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2112 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2113 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2114 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2115 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2116 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2119 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2120 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2121 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2122 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2123 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2124 wl12xx_allocate_rate_policy(wl,
2125 &wlvif->ap.ucast_rate_idx[i]);
2126 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2128 * TODO: check if basic_rate shouldn't be
2129 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2130 * instead (the same thing for STA above).
2132 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2133 /* TODO: this seems to be used only for STA, check it */
2134 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2137 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2138 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2139 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2142 * mac80211 configures some values globally, while we treat them
2143 * per-interface. thus, on init, we have to copy them from wl
2145 wlvif->band = wl->band;
2146 wlvif->channel = wl->channel;
2147 wlvif->power_level = wl->power_level;
2148 wlvif->channel_type = wl->channel_type;
2150 INIT_WORK(&wlvif->rx_streaming_enable_work,
2151 wl1271_rx_streaming_enable_work);
2152 INIT_WORK(&wlvif->rx_streaming_disable_work,
2153 wl1271_rx_streaming_disable_work);
2154 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2155 wlcore_channel_switch_work);
2156 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2157 wlcore_connection_loss_work);
2158 INIT_LIST_HEAD(&wlvif->list);
2160 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2161 (unsigned long) wlvif);
2165 static int wl12xx_init_fw(struct wl1271 *wl)
2167 int retries = WL1271_BOOT_RETRIES;
2168 bool booted = false;
2169 struct wiphy *wiphy = wl->hw->wiphy;
2174 ret = wl12xx_chip_wakeup(wl, false);
2178 ret = wl->ops->boot(wl);
2182 ret = wl1271_hw_init(wl);
2190 mutex_unlock(&wl->mutex);
2191 /* Unlocking the mutex in the middle of handling is
2192 inherently unsafe. In this case we deem it safe to do,
2193 because we need to let any possibly pending IRQ out of
2194 the system (and while we are WLCORE_STATE_OFF the IRQ
2195 work function will not do anything.) Also, any other
2196 possible concurrent operations will fail due to the
2197 current state, hence the wl1271 struct should be safe. */
2198 wlcore_disable_interrupts(wl);
2199 wl1271_flush_deferred_work(wl);
2200 cancel_work_sync(&wl->netstack_work);
2201 mutex_lock(&wl->mutex);
2203 wl1271_power_off(wl);
2207 wl1271_error("firmware boot failed despite %d retries",
2208 WL1271_BOOT_RETRIES);
2212 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2214 /* update hw/fw version info in wiphy struct */
2215 wiphy->hw_version = wl->chip.id;
2216 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2217 sizeof(wiphy->fw_version));
2220 * Now we know if 11a is supported (info from the NVS), so disable
2221 * 11a channels if not supported
2223 if (!wl->enable_11a)
2224 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2226 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2227 wl->enable_11a ? "" : "not ");
2229 wl->state = WLCORE_STATE_ON;
2234 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2236 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2240 * Check whether a fw switch (i.e. moving from one loaded
2241 * fw to another) is needed. This function is also responsible
2242 * for updating wl->last_vif_count, so it must be called before
2243 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2246 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2247 struct vif_counter_data vif_counter_data,
2250 enum wl12xx_fw_type current_fw = wl->fw_type;
2251 u8 vif_count = vif_counter_data.counter;
2253 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2256 /* increase the vif count if this is a new vif */
2257 if (add && !vif_counter_data.cur_vif_running)
2260 wl->last_vif_count = vif_count;
2262 /* no need for fw change if the device is OFF */
2263 if (wl->state == WLCORE_STATE_OFF)
2266 /* no need for fw change if a single fw is used */
2267 if (!wl->mr_fw_name)
2270 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2272 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2279 * Enter "forced psm". Make sure the sta is in psm against the ap,
2280 * to make the fw switch a bit more disconnection-persistent.
2282 static void wl12xx_force_active_psm(struct wl1271 *wl)
2284 struct wl12xx_vif *wlvif;
2286 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2287 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2291 struct wlcore_hw_queue_iter_data {
2292 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2294 struct ieee80211_vif *vif;
2295 /* is the current vif among those iterated */
2299 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2300 struct ieee80211_vif *vif)
2302 struct wlcore_hw_queue_iter_data *iter_data = data;
2304 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2307 if (iter_data->cur_running || vif == iter_data->vif) {
2308 iter_data->cur_running = true;
2312 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2315 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2316 struct wl12xx_vif *wlvif)
2318 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2319 struct wlcore_hw_queue_iter_data iter_data = {};
2322 iter_data.vif = vif;
2324 /* mark all bits taken by active interfaces */
2325 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2326 IEEE80211_IFACE_ITER_RESUME_ALL,
2327 wlcore_hw_queue_iter, &iter_data);
2329 /* the current vif is already running in mac80211 (resume/recovery) */
2330 if (iter_data.cur_running) {
2331 wlvif->hw_queue_base = vif->hw_queue[0];
2332 wl1271_debug(DEBUG_MAC80211,
2333 "using pre-allocated hw queue base %d",
2334 wlvif->hw_queue_base);
2336 /* interface type might have changed type */
2337 goto adjust_cab_queue;
2340 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2341 WLCORE_NUM_MAC_ADDRESSES);
2342 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2345 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2346 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2347 wlvif->hw_queue_base);
2349 for (i = 0; i < NUM_TX_QUEUES; i++) {
2350 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2351 /* register hw queues in mac80211 */
2352 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2356 /* the last places are reserved for cab queues per interface */
2357 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2358 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2359 wlvif->hw_queue_base / NUM_TX_QUEUES;
2361 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2366 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2367 struct ieee80211_vif *vif)
2369 struct wl1271 *wl = hw->priv;
2370 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2371 struct vif_counter_data vif_count;
2375 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2376 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2378 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2379 ieee80211_vif_type_p2p(vif), vif->addr);
2381 wl12xx_get_vif_count(hw, vif, &vif_count);
2383 mutex_lock(&wl->mutex);
2384 ret = wl1271_ps_elp_wakeup(wl);
2389 * in some very corner case HW recovery scenarios its possible to
2390 * get here before __wl1271_op_remove_interface is complete, so
2391 * opt out if that is the case.
2393 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2394 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2400 ret = wl12xx_init_vif_data(wl, vif);
2405 role_type = wl12xx_get_role_type(wl, wlvif);
2406 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2411 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2415 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2416 wl12xx_force_active_psm(wl);
2417 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2418 mutex_unlock(&wl->mutex);
2419 wl1271_recovery_work(&wl->recovery_work);
2424 * TODO: after the nvs issue will be solved, move this block
2425 * to start(), and make sure here the driver is ON.
2427 if (wl->state == WLCORE_STATE_OFF) {
2429 * we still need this in order to configure the fw
2430 * while uploading the nvs
2432 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2434 ret = wl12xx_init_fw(wl);
2439 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2440 role_type, &wlvif->role_id);
2444 ret = wl1271_init_vif_specific(wl, vif);
2448 list_add(&wlvif->list, &wl->wlvif_list);
2449 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2451 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2456 wl1271_ps_elp_sleep(wl);
2458 mutex_unlock(&wl->mutex);
2463 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2464 struct ieee80211_vif *vif,
2465 bool reset_tx_queues)
2467 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2469 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2471 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2473 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2476 /* because of hardware recovery, we may get here twice */
2477 if (wl->state == WLCORE_STATE_OFF)
2480 wl1271_info("down");
2482 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2483 wl->scan_wlvif == wlvif) {
2485 * Rearm the tx watchdog just before idling scan. This
2486 * prevents just-finished scans from triggering the watchdog
2488 wl12xx_rearm_tx_watchdog_locked(wl);
2490 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2491 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2492 wl->scan_wlvif = NULL;
2493 wl->scan.req = NULL;
2494 ieee80211_scan_completed(wl->hw, true);
2497 if (wl->sched_vif == wlvif) {
2498 ieee80211_sched_scan_stopped(wl->hw);
2499 wl->sched_vif = NULL;
2502 if (wl->roc_vif == vif) {
2504 ieee80211_remain_on_channel_expired(wl->hw);
2507 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2508 /* disable active roles */
2509 ret = wl1271_ps_elp_wakeup(wl);
2513 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2514 wlvif->bss_type == BSS_TYPE_IBSS) {
2515 if (wl12xx_dev_role_started(wlvif))
2516 wl12xx_stop_dev(wl, wlvif);
2519 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2523 wl1271_ps_elp_sleep(wl);
2526 /* clear all hlids (except system_hlid) */
2527 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2529 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2530 wlvif->bss_type == BSS_TYPE_IBSS) {
2531 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2532 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2533 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2534 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2535 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2537 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2538 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2539 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2540 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2541 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2542 wl12xx_free_rate_policy(wl,
2543 &wlvif->ap.ucast_rate_idx[i]);
2544 wl1271_free_ap_keys(wl, wlvif);
2547 dev_kfree_skb(wlvif->probereq);
2548 wlvif->probereq = NULL;
2549 wl12xx_tx_reset_wlvif(wl, wlvif);
2550 if (wl->last_wlvif == wlvif)
2551 wl->last_wlvif = NULL;
2552 list_del(&wlvif->list);
2553 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2554 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2555 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2563 * Last AP, have more stations. Configure sleep auth according to STA.
2564 * Don't do thin on unintended recovery.
2566 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2567 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2570 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2571 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2572 /* Configure for power according to debugfs */
2573 if (sta_auth != WL1271_PSM_ILLEGAL)
2574 wl1271_acx_sleep_auth(wl, sta_auth);
2575 /* Configure for ELP power saving */
2577 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2581 mutex_unlock(&wl->mutex);
2583 del_timer_sync(&wlvif->rx_streaming_timer);
2584 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2585 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2586 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2588 mutex_lock(&wl->mutex);
2591 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2592 struct ieee80211_vif *vif)
2594 struct wl1271 *wl = hw->priv;
2595 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2596 struct wl12xx_vif *iter;
2597 struct vif_counter_data vif_count;
2599 wl12xx_get_vif_count(hw, vif, &vif_count);
2600 mutex_lock(&wl->mutex);
2602 if (wl->state == WLCORE_STATE_OFF ||
2603 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2607 * wl->vif can be null here if someone shuts down the interface
2608 * just when hardware recovery has been started.
2610 wl12xx_for_each_wlvif(wl, iter) {
2614 __wl1271_op_remove_interface(wl, vif, true);
2617 WARN_ON(iter != wlvif);
2618 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2619 wl12xx_force_active_psm(wl);
2620 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2621 wl12xx_queue_recovery_work(wl);
2624 mutex_unlock(&wl->mutex);
2627 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2628 struct ieee80211_vif *vif,
2629 enum nl80211_iftype new_type, bool p2p)
2631 struct wl1271 *wl = hw->priv;
2634 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2635 wl1271_op_remove_interface(hw, vif);
2637 vif->type = new_type;
2639 ret = wl1271_op_add_interface(hw, vif);
2641 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2645 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2648 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2651 * One of the side effects of the JOIN command is that is clears
2652 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2653 * to a WPA/WPA2 access point will therefore kill the data-path.
2654 * Currently the only valid scenario for JOIN during association
2655 * is on roaming, in which case we will also be given new keys.
2656 * Keep the below message for now, unless it starts bothering
2657 * users who really like to roam a lot :)
2659 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2660 wl1271_info("JOIN while associated.");
2662 /* clear encryption type */
2663 wlvif->encryption_type = KEY_NONE;
2666 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2668 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2670 * TODO: this is an ugly workaround for wl12xx fw
2671 * bug - we are not able to tx/rx after the first
2672 * start_sta, so make dummy start+stop calls,
2673 * and then call start_sta again.
2674 * this should be fixed in the fw.
2676 wl12xx_cmd_role_start_sta(wl, wlvif);
2677 wl12xx_cmd_role_stop_sta(wl, wlvif);
2680 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2686 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2690 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2694 wl1271_error("No SSID in IEs!");
2699 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2700 wl1271_error("SSID is too long!");
2704 wlvif->ssid_len = ssid_len;
2705 memcpy(wlvif->ssid, ptr+2, ssid_len);
2709 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2711 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2712 struct sk_buff *skb;
2715 /* we currently only support setting the ssid from the ap probe req */
2716 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2719 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2723 ieoffset = offsetof(struct ieee80211_mgmt,
2724 u.probe_req.variable);
2725 wl1271_ssid_set(wlvif, skb, ieoffset);
2731 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2732 struct ieee80211_bss_conf *bss_conf,
2738 wlvif->aid = bss_conf->aid;
2739 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2740 wlvif->beacon_int = bss_conf->beacon_int;
2741 wlvif->wmm_enabled = bss_conf->qos;
2743 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2746 * with wl1271, we don't need to update the
2747 * beacon_int and dtim_period, because the firmware
2748 * updates it by itself when the first beacon is
2749 * received after a join.
2751 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2756 * Get a template for hardware connection maintenance
2758 dev_kfree_skb(wlvif->probereq);
2759 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2762 ieoffset = offsetof(struct ieee80211_mgmt,
2763 u.probe_req.variable);
2764 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2766 /* enable the connection monitoring feature */
2767 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2772 * The join command disable the keep-alive mode, shut down its process,
2773 * and also clear the template config, so we need to reset it all after
2774 * the join. The acx_aid starts the keep-alive process, and the order
2775 * of the commands below is relevant.
2777 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2781 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2785 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2789 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2790 wlvif->sta.klv_template_id,
2791 ACX_KEEP_ALIVE_TPL_VALID);
2796 * The default fw psm configuration is AUTO, while mac80211 default
2797 * setting is off (ACTIVE), so sync the fw with the correct value.
2799 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2805 wl1271_tx_enabled_rates_get(wl,
2808 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2816 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2819 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2821 /* make sure we are connected (sta) joined */
2823 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2826 /* make sure we are joined (ibss) */
2828 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2832 /* use defaults when not associated */
2835 /* free probe-request template */
2836 dev_kfree_skb(wlvif->probereq);
2837 wlvif->probereq = NULL;
2839 /* disable connection monitor features */
2840 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2844 /* Disable the keep-alive feature */
2845 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2850 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2851 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2853 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2854 ieee80211_chswitch_done(vif, false);
2855 cancel_delayed_work(&wlvif->channel_switch_work);
2858 /* invalidate keep-alive template */
2859 wl1271_acx_keep_alive_config(wl, wlvif,
2860 wlvif->sta.klv_template_id,
2861 ACX_KEEP_ALIVE_TPL_INVALID);
2866 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2868 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2869 wlvif->rate_set = wlvif->basic_rate_set;
2872 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2873 struct ieee80211_conf *conf, u32 changed)
2877 if (conf->power_level != wlvif->power_level) {
2878 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2882 wlvif->power_level = conf->power_level;
2888 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2890 struct wl1271 *wl = hw->priv;
2891 struct wl12xx_vif *wlvif;
2892 struct ieee80211_conf *conf = &hw->conf;
2895 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2897 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2899 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2902 mutex_lock(&wl->mutex);
2904 if (changed & IEEE80211_CONF_CHANGE_POWER)
2905 wl->power_level = conf->power_level;
2907 if (unlikely(wl->state != WLCORE_STATE_ON))
2910 ret = wl1271_ps_elp_wakeup(wl);
2914 /* configure each interface */
2915 wl12xx_for_each_wlvif(wl, wlvif) {
2916 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2922 wl1271_ps_elp_sleep(wl);
2925 mutex_unlock(&wl->mutex);
2930 struct wl1271_filter_params {
2933 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2936 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2937 struct netdev_hw_addr_list *mc_list)
2939 struct wl1271_filter_params *fp;
2940 struct netdev_hw_addr *ha;
2942 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2944 wl1271_error("Out of memory setting filters.");
2948 /* update multicast filtering parameters */
2949 fp->mc_list_length = 0;
2950 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2951 fp->enabled = false;
2954 netdev_hw_addr_list_for_each(ha, mc_list) {
2955 memcpy(fp->mc_list[fp->mc_list_length],
2956 ha->addr, ETH_ALEN);
2957 fp->mc_list_length++;
2961 return (u64)(unsigned long)fp;
2964 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2967 FIF_BCN_PRBRESP_PROMISC | \
2971 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2972 unsigned int changed,
2973 unsigned int *total, u64 multicast)
2975 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2976 struct wl1271 *wl = hw->priv;
2977 struct wl12xx_vif *wlvif;
2981 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2982 " total %x", changed, *total);
2984 mutex_lock(&wl->mutex);
2986 *total &= WL1271_SUPPORTED_FILTERS;
2987 changed &= WL1271_SUPPORTED_FILTERS;
2989 if (unlikely(wl->state != WLCORE_STATE_ON))
2992 ret = wl1271_ps_elp_wakeup(wl);
2996 wl12xx_for_each_wlvif(wl, wlvif) {
2997 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2998 if (*total & FIF_ALLMULTI)
2999 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3003 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3006 fp->mc_list_length);
3013 * the fw doesn't provide an api to configure the filters. instead,
3014 * the filters configuration is based on the active roles / ROC
3019 wl1271_ps_elp_sleep(wl);
3022 mutex_unlock(&wl->mutex);
3026 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3027 u8 id, u8 key_type, u8 key_size,
3028 const u8 *key, u8 hlid, u32 tx_seq_32,
3031 struct wl1271_ap_key *ap_key;
3034 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3036 if (key_size > MAX_KEY_SIZE)
3040 * Find next free entry in ap_keys. Also check we are not replacing
3043 for (i = 0; i < MAX_NUM_KEYS; i++) {
3044 if (wlvif->ap.recorded_keys[i] == NULL)
3047 if (wlvif->ap.recorded_keys[i]->id == id) {
3048 wl1271_warning("trying to record key replacement");
3053 if (i == MAX_NUM_KEYS)
3056 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3061 ap_key->key_type = key_type;
3062 ap_key->key_size = key_size;
3063 memcpy(ap_key->key, key, key_size);
3064 ap_key->hlid = hlid;
3065 ap_key->tx_seq_32 = tx_seq_32;
3066 ap_key->tx_seq_16 = tx_seq_16;
3068 wlvif->ap.recorded_keys[i] = ap_key;
3072 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3076 for (i = 0; i < MAX_NUM_KEYS; i++) {
3077 kfree(wlvif->ap.recorded_keys[i]);
3078 wlvif->ap.recorded_keys[i] = NULL;
3082 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3085 struct wl1271_ap_key *key;
3086 bool wep_key_added = false;
3088 for (i = 0; i < MAX_NUM_KEYS; i++) {
3090 if (wlvif->ap.recorded_keys[i] == NULL)
3093 key = wlvif->ap.recorded_keys[i];
3095 if (hlid == WL12XX_INVALID_LINK_ID)
3096 hlid = wlvif->ap.bcast_hlid;
3098 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3099 key->id, key->key_type,
3100 key->key_size, key->key,
3101 hlid, key->tx_seq_32,
3106 if (key->key_type == KEY_WEP)
3107 wep_key_added = true;
3110 if (wep_key_added) {
3111 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3112 wlvif->ap.bcast_hlid);
3118 wl1271_free_ap_keys(wl, wlvif);
3122 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3123 u16 action, u8 id, u8 key_type,
3124 u8 key_size, const u8 *key, u32 tx_seq_32,
3125 u16 tx_seq_16, struct ieee80211_sta *sta)
3128 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3131 struct wl1271_station *wl_sta;
3135 wl_sta = (struct wl1271_station *)sta->drv_priv;
3136 hlid = wl_sta->hlid;
3138 hlid = wlvif->ap.bcast_hlid;
3141 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3143 * We do not support removing keys after AP shutdown.
3144 * Pretend we do to make mac80211 happy.
3146 if (action != KEY_ADD_OR_REPLACE)
3149 ret = wl1271_record_ap_key(wl, wlvif, id,
3151 key, hlid, tx_seq_32,
3154 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3155 id, key_type, key_size,
3156 key, hlid, tx_seq_32,
3164 static const u8 bcast_addr[ETH_ALEN] = {
3165 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3168 addr = sta ? sta->addr : bcast_addr;
3170 if (is_zero_ether_addr(addr)) {
3171 /* We dont support TX only encryption */
3175 /* The wl1271 does not allow to remove unicast keys - they
3176 will be cleared automatically on next CMD_JOIN. Ignore the
3177 request silently, as we dont want the mac80211 to emit
3178 an error message. */
3179 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3182 /* don't remove key if hlid was already deleted */
3183 if (action == KEY_REMOVE &&
3184 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3187 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3188 id, key_type, key_size,
3189 key, addr, tx_seq_32,
3194 /* the default WEP key needs to be configured at least once */
3195 if (key_type == KEY_WEP) {
3196 ret = wl12xx_cmd_set_default_wep_key(wl,
3207 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3208 struct ieee80211_vif *vif,
3209 struct ieee80211_sta *sta,
3210 struct ieee80211_key_conf *key_conf)
3212 struct wl1271 *wl = hw->priv;
3214 bool might_change_spare =
3215 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3216 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3218 if (might_change_spare) {
3220 * stop the queues and flush to ensure the next packets are
3221 * in sync with FW spare block accounting
3223 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3224 wl1271_tx_flush(wl);
3227 mutex_lock(&wl->mutex);
3229 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3231 goto out_wake_queues;
3234 ret = wl1271_ps_elp_wakeup(wl);
3236 goto out_wake_queues;
3238 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3240 wl1271_ps_elp_sleep(wl);
3243 if (might_change_spare)
3244 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3246 mutex_unlock(&wl->mutex);
3251 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3252 struct ieee80211_vif *vif,
3253 struct ieee80211_sta *sta,
3254 struct ieee80211_key_conf *key_conf)
3256 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3263 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3265 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3266 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3267 key_conf->cipher, key_conf->keyidx,
3268 key_conf->keylen, key_conf->flags);
3269 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3271 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3273 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3274 hlid = wl_sta->hlid;
3276 hlid = wlvif->ap.bcast_hlid;
3279 hlid = wlvif->sta.hlid;
3281 if (hlid != WL12XX_INVALID_LINK_ID) {
3282 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3283 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3284 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3287 switch (key_conf->cipher) {
3288 case WLAN_CIPHER_SUITE_WEP40:
3289 case WLAN_CIPHER_SUITE_WEP104:
3292 key_conf->hw_key_idx = key_conf->keyidx;
3294 case WLAN_CIPHER_SUITE_TKIP:
3295 key_type = KEY_TKIP;
3296 key_conf->hw_key_idx = key_conf->keyidx;
3298 case WLAN_CIPHER_SUITE_CCMP:
3300 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3302 case WL1271_CIPHER_SUITE_GEM:
3306 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3313 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3314 key_conf->keyidx, key_type,
3315 key_conf->keylen, key_conf->key,
3316 tx_seq_32, tx_seq_16, sta);
3318 wl1271_error("Could not add or replace key");
3323 * reconfiguring arp response if the unicast (or common)
3324 * encryption key type was changed
3326 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3327 (sta || key_type == KEY_WEP) &&
3328 wlvif->encryption_type != key_type) {
3329 wlvif->encryption_type = key_type;
3330 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3332 wl1271_warning("build arp rsp failed: %d", ret);
3339 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3340 key_conf->keyidx, key_type,
3341 key_conf->keylen, key_conf->key,
3344 wl1271_error("Could not remove key");
3350 wl1271_error("Unsupported key cmd 0x%x", cmd);
3356 EXPORT_SYMBOL_GPL(wlcore_set_key);
3358 void wlcore_regdomain_config(struct wl1271 *wl)
3362 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3365 mutex_lock(&wl->mutex);
3366 ret = wl1271_ps_elp_wakeup(wl);
3370 ret = wlcore_cmd_regdomain_config_locked(wl);
3372 wl12xx_queue_recovery_work(wl);
3376 wl1271_ps_elp_sleep(wl);
3378 mutex_unlock(&wl->mutex);
3381 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3382 struct ieee80211_vif *vif,
3383 struct cfg80211_scan_request *req)
3385 struct wl1271 *wl = hw->priv;
3390 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3393 ssid = req->ssids[0].ssid;
3394 len = req->ssids[0].ssid_len;
3397 mutex_lock(&wl->mutex);
3399 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3401 * We cannot return -EBUSY here because cfg80211 will expect
3402 * a call to ieee80211_scan_completed if we do - in this case
3403 * there won't be any call.
3409 ret = wl1271_ps_elp_wakeup(wl);
3413 /* fail if there is any role in ROC */
3414 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3415 /* don't allow scanning right now */
3420 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3422 wl1271_ps_elp_sleep(wl);
3424 mutex_unlock(&wl->mutex);
3429 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3430 struct ieee80211_vif *vif)
3432 struct wl1271 *wl = hw->priv;
3433 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3436 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3438 mutex_lock(&wl->mutex);
3440 if (unlikely(wl->state != WLCORE_STATE_ON))
3443 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3446 ret = wl1271_ps_elp_wakeup(wl);
3450 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3451 ret = wl->ops->scan_stop(wl, wlvif);
3457 * Rearm the tx watchdog just before idling scan. This
3458 * prevents just-finished scans from triggering the watchdog
3460 wl12xx_rearm_tx_watchdog_locked(wl);
3462 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3463 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3464 wl->scan_wlvif = NULL;
3465 wl->scan.req = NULL;
3466 ieee80211_scan_completed(wl->hw, true);
3469 wl1271_ps_elp_sleep(wl);
3471 mutex_unlock(&wl->mutex);
3473 cancel_delayed_work_sync(&wl->scan_complete_work);
3476 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3477 struct ieee80211_vif *vif,
3478 struct cfg80211_sched_scan_request *req,
3479 struct ieee80211_sched_scan_ies *ies)
3481 struct wl1271 *wl = hw->priv;
3482 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3485 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3487 mutex_lock(&wl->mutex);
3489 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3494 ret = wl1271_ps_elp_wakeup(wl);
3498 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3502 wl->sched_vif = wlvif;
3505 wl1271_ps_elp_sleep(wl);
3507 mutex_unlock(&wl->mutex);
3511 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3512 struct ieee80211_vif *vif)
3514 struct wl1271 *wl = hw->priv;
3515 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3518 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3520 mutex_lock(&wl->mutex);
3522 if (unlikely(wl->state != WLCORE_STATE_ON))
3525 ret = wl1271_ps_elp_wakeup(wl);
3529 wl->ops->sched_scan_stop(wl, wlvif);
3531 wl1271_ps_elp_sleep(wl);
3533 mutex_unlock(&wl->mutex);
3536 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3538 struct wl1271 *wl = hw->priv;
3541 mutex_lock(&wl->mutex);
3543 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3548 ret = wl1271_ps_elp_wakeup(wl);
3552 ret = wl1271_acx_frag_threshold(wl, value);
3554 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3556 wl1271_ps_elp_sleep(wl);
3559 mutex_unlock(&wl->mutex);
3564 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3566 struct wl1271 *wl = hw->priv;
3567 struct wl12xx_vif *wlvif;
3570 mutex_lock(&wl->mutex);
3572 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3577 ret = wl1271_ps_elp_wakeup(wl);
3581 wl12xx_for_each_wlvif(wl, wlvif) {
3582 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3584 wl1271_warning("set rts threshold failed: %d", ret);
3586 wl1271_ps_elp_sleep(wl);
3589 mutex_unlock(&wl->mutex);
3594 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3597 const u8 *next, *end = skb->data + skb->len;
3598 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3599 skb->len - ieoffset);
3604 memmove(ie, next, end - next);
3605 skb_trim(skb, skb->len - len);
3608 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3609 unsigned int oui, u8 oui_type,
3613 const u8 *next, *end = skb->data + skb->len;
3614 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3615 skb->data + ieoffset,
3616 skb->len - ieoffset);
3621 memmove(ie, next, end - next);
3622 skb_trim(skb, skb->len - len);
3625 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3626 struct ieee80211_vif *vif)
3628 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3629 struct sk_buff *skb;
3632 skb = ieee80211_proberesp_get(wl->hw, vif);
3636 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3637 CMD_TEMPL_AP_PROBE_RESPONSE,
3646 wl1271_debug(DEBUG_AP, "probe response updated");
3647 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3653 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3654 struct ieee80211_vif *vif,
3656 size_t probe_rsp_len,
3659 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3660 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3661 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3662 int ssid_ie_offset, ie_offset, templ_len;
3665 /* no need to change probe response if the SSID is set correctly */
3666 if (wlvif->ssid_len > 0)
3667 return wl1271_cmd_template_set(wl, wlvif->role_id,
3668 CMD_TEMPL_AP_PROBE_RESPONSE,
3673 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3674 wl1271_error("probe_rsp template too big");
3678 /* start searching from IE offset */
3679 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3681 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3682 probe_rsp_len - ie_offset);
3684 wl1271_error("No SSID in beacon!");
3688 ssid_ie_offset = ptr - probe_rsp_data;
3689 ptr += (ptr[1] + 2);
3691 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3693 /* insert SSID from bss_conf */
3694 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3695 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3696 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3697 bss_conf->ssid, bss_conf->ssid_len);
3698 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3700 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3701 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3702 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3704 return wl1271_cmd_template_set(wl, wlvif->role_id,
3705 CMD_TEMPL_AP_PROBE_RESPONSE,
3711 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3712 struct ieee80211_vif *vif,
3713 struct ieee80211_bss_conf *bss_conf,
3716 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3719 if (changed & BSS_CHANGED_ERP_SLOT) {
3720 if (bss_conf->use_short_slot)
3721 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3723 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3725 wl1271_warning("Set slot time failed %d", ret);
3730 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3731 if (bss_conf->use_short_preamble)
3732 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3734 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3737 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3738 if (bss_conf->use_cts_prot)
3739 ret = wl1271_acx_cts_protect(wl, wlvif,
3742 ret = wl1271_acx_cts_protect(wl, wlvif,
3743 CTSPROTECT_DISABLE);
3745 wl1271_warning("Set ctsprotect failed %d", ret);
3754 static int wlcore_set_beacon_template(struct wl1271 *wl,
3755 struct ieee80211_vif *vif,
3758 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3759 struct ieee80211_hdr *hdr;
3762 int ieoffset = offsetof(struct ieee80211_mgmt,
3764 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3772 wl1271_debug(DEBUG_MASTER, "beacon updated");
3774 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3776 dev_kfree_skb(beacon);
3779 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3780 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3782 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3787 dev_kfree_skb(beacon);
3791 wlvif->wmm_enabled =
3792 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3793 WLAN_OUI_TYPE_MICROSOFT_WMM,
3794 beacon->data + ieoffset,
3795 beacon->len - ieoffset);
3798 * In case we already have a probe-resp beacon set explicitly
3799 * by usermode, don't use the beacon data.
3801 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3804 /* remove TIM ie from probe response */
3805 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3808 * remove p2p ie from probe response.
3809 * the fw reponds to probe requests that don't include
3810 * the p2p ie. probe requests with p2p ie will be passed,
3811 * and will be responded by the supplicant (the spec
3812 * forbids including the p2p ie when responding to probe
3813 * requests that didn't include it).
3815 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3816 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3818 hdr = (struct ieee80211_hdr *) beacon->data;
3819 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3820 IEEE80211_STYPE_PROBE_RESP);
3822 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3827 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3828 CMD_TEMPL_PROBE_RESPONSE,
3833 dev_kfree_skb(beacon);
3841 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3842 struct ieee80211_vif *vif,
3843 struct ieee80211_bss_conf *bss_conf,
3846 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3847 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3850 if (changed & BSS_CHANGED_BEACON_INT) {
3851 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3852 bss_conf->beacon_int);
3854 wlvif->beacon_int = bss_conf->beacon_int;
3857 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3858 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3860 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3863 if (changed & BSS_CHANGED_BEACON) {
3864 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3871 wl1271_error("beacon info change failed: %d", ret);
3875 /* AP mode changes */
3876 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3877 struct ieee80211_vif *vif,
3878 struct ieee80211_bss_conf *bss_conf,
3881 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3884 if (changed & BSS_CHANGED_BASIC_RATES) {
3885 u32 rates = bss_conf->basic_rates;
3887 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3889 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3890 wlvif->basic_rate_set);
3892 ret = wl1271_init_ap_rates(wl, wlvif);
3894 wl1271_error("AP rate policy change failed %d", ret);
3898 ret = wl1271_ap_init_templates(wl, vif);
3902 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3906 ret = wlcore_set_beacon_template(wl, vif, true);
3911 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3915 if (changed & BSS_CHANGED_BEACON_ENABLED) {
3916 if (bss_conf->enable_beacon) {
3917 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3918 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3922 ret = wl1271_ap_init_hwenc(wl, wlvif);
3926 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3927 wl1271_debug(DEBUG_AP, "started AP");
3930 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3931 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3935 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3936 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3938 wl1271_debug(DEBUG_AP, "stopped AP");
3943 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3947 /* Handle HT information change */
3948 if ((changed & BSS_CHANGED_HT) &&
3949 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
3950 ret = wl1271_acx_set_ht_information(wl, wlvif,
3951 bss_conf->ht_operation_mode);
3953 wl1271_warning("Set ht information failed %d", ret);
3962 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3963 struct ieee80211_bss_conf *bss_conf,
3969 wl1271_debug(DEBUG_MAC80211,
3970 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3971 bss_conf->bssid, bss_conf->aid,
3972 bss_conf->beacon_int,
3973 bss_conf->basic_rates, sta_rate_set);
3975 wlvif->beacon_int = bss_conf->beacon_int;
3976 rates = bss_conf->basic_rates;
3977 wlvif->basic_rate_set =
3978 wl1271_tx_enabled_rates_get(wl, rates,
3981 wl1271_tx_min_rate_get(wl,
3982 wlvif->basic_rate_set);
3986 wl1271_tx_enabled_rates_get(wl,
3990 /* we only support sched_scan while not connected */
3991 if (wl->sched_vif == wlvif)
3992 wl->ops->sched_scan_stop(wl, wlvif);
3994 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3998 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4002 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4006 wlcore_set_ssid(wl, wlvif);
4008 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4013 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4017 /* revert back to minimum rates for the current band */
4018 wl1271_set_band_rate(wl, wlvif);
4019 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4021 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4025 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4026 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4027 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4032 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4035 /* STA/IBSS mode changes */
4036 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4037 struct ieee80211_vif *vif,
4038 struct ieee80211_bss_conf *bss_conf,
4041 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4042 bool do_join = false;
4043 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4044 bool ibss_joined = false;
4045 u32 sta_rate_set = 0;
4047 struct ieee80211_sta *sta;
4048 bool sta_exists = false;
4049 struct ieee80211_sta_ht_cap sta_ht_cap;
4052 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4058 if (changed & BSS_CHANGED_IBSS) {
4059 if (bss_conf->ibss_joined) {
4060 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4063 wlcore_unset_assoc(wl, wlvif);
4064 wl12xx_cmd_role_stop_sta(wl, wlvif);
4068 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4071 /* Need to update the SSID (for filtering etc) */
4072 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4075 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4076 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4077 bss_conf->enable_beacon ? "enabled" : "disabled");
4082 if (changed & BSS_CHANGED_CQM) {
4083 bool enable = false;
4084 if (bss_conf->cqm_rssi_thold)
4086 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4087 bss_conf->cqm_rssi_thold,
4088 bss_conf->cqm_rssi_hyst);
4091 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4094 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4095 BSS_CHANGED_ASSOC)) {
4097 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4099 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4101 /* save the supp_rates of the ap */
4102 sta_rate_set = sta->supp_rates[wlvif->band];
4103 if (sta->ht_cap.ht_supported)
4105 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4106 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4107 sta_ht_cap = sta->ht_cap;
4114 if (changed & BSS_CHANGED_BSSID) {
4115 if (!is_zero_ether_addr(bss_conf->bssid)) {
4116 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4121 /* Need to update the BSSID (for filtering etc) */
4124 ret = wlcore_clear_bssid(wl, wlvif);
4130 if (changed & BSS_CHANGED_IBSS) {
4131 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4132 bss_conf->ibss_joined);
4134 if (bss_conf->ibss_joined) {
4135 u32 rates = bss_conf->basic_rates;
4136 wlvif->basic_rate_set =
4137 wl1271_tx_enabled_rates_get(wl, rates,
4140 wl1271_tx_min_rate_get(wl,
4141 wlvif->basic_rate_set);
4143 /* by default, use 11b + OFDM rates */
4144 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4145 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4151 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4156 ret = wlcore_join(wl, wlvif);
4158 wl1271_warning("cmd join failed %d", ret);
4163 if (changed & BSS_CHANGED_ASSOC) {
4164 if (bss_conf->assoc) {
4165 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4170 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4171 wl12xx_set_authorized(wl, wlvif);
4173 wlcore_unset_assoc(wl, wlvif);
4177 if (changed & BSS_CHANGED_PS) {
4178 if ((bss_conf->ps) &&
4179 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4180 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4184 if (wl->conf.conn.forced_ps) {
4185 ps_mode = STATION_POWER_SAVE_MODE;
4186 ps_mode_str = "forced";
4188 ps_mode = STATION_AUTO_PS_MODE;
4189 ps_mode_str = "auto";
4192 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4194 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4196 wl1271_warning("enter %s ps failed %d",
4198 } else if (!bss_conf->ps &&
4199 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4200 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4202 ret = wl1271_ps_set_mode(wl, wlvif,
4203 STATION_ACTIVE_MODE);
4205 wl1271_warning("exit auto ps failed %d", ret);
4209 /* Handle new association with HT. Do this after join. */
4211 (changed & BSS_CHANGED_HT)) {
4213 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4215 ret = wlcore_hw_set_peer_cap(wl,
4221 wl1271_warning("Set ht cap failed %d", ret);
4227 ret = wl1271_acx_set_ht_information(wl, wlvif,
4228 bss_conf->ht_operation_mode);
4230 wl1271_warning("Set ht information failed %d",
4237 /* Handle arp filtering. Done after join. */
4238 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4239 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4240 __be32 addr = bss_conf->arp_addr_list[0];
4241 wlvif->sta.qos = bss_conf->qos;
4242 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4244 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4245 wlvif->ip_addr = addr;
4247 * The template should have been configured only upon
4248 * association. however, it seems that the correct ip
4249 * isn't being set (when sending), so we have to
4250 * reconfigure the template upon every ip change.
4252 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4254 wl1271_warning("build arp rsp failed: %d", ret);
4258 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4259 (ACX_ARP_FILTER_ARP_FILTERING |
4260 ACX_ARP_FILTER_AUTO_ARP),
4264 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4275 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4276 struct ieee80211_vif *vif,
4277 struct ieee80211_bss_conf *bss_conf,
4280 struct wl1271 *wl = hw->priv;
4281 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4282 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4285 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4286 wlvif->role_id, (int)changed);
4289 * make sure to cancel pending disconnections if our association
4292 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4293 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4295 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4296 !bss_conf->enable_beacon)
4297 wl1271_tx_flush(wl);
4299 mutex_lock(&wl->mutex);
4301 if (unlikely(wl->state != WLCORE_STATE_ON))
4304 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4307 ret = wl1271_ps_elp_wakeup(wl);
4312 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4314 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4316 wl1271_ps_elp_sleep(wl);
4319 mutex_unlock(&wl->mutex);
4322 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4323 struct ieee80211_chanctx_conf *ctx)
4325 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4326 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4327 cfg80211_get_chandef_type(&ctx->def));
4331 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4332 struct ieee80211_chanctx_conf *ctx)
4334 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4335 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4336 cfg80211_get_chandef_type(&ctx->def));
4339 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4340 struct ieee80211_chanctx_conf *ctx,
4343 wl1271_debug(DEBUG_MAC80211,
4344 "mac80211 change chanctx %d (type %d) changed 0x%x",
4345 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4346 cfg80211_get_chandef_type(&ctx->def), changed);
4349 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4350 struct ieee80211_vif *vif,
4351 struct ieee80211_chanctx_conf *ctx)
4353 struct wl1271 *wl = hw->priv;
4354 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4355 int channel = ieee80211_frequency_to_channel(
4356 ctx->def.chan->center_freq);
4358 wl1271_debug(DEBUG_MAC80211,
4359 "mac80211 assign chanctx (role %d) %d (type %d)",
4360 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4362 mutex_lock(&wl->mutex);
4364 wlvif->band = ctx->def.chan->band;
4365 wlvif->channel = channel;
4366 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4368 /* update default rates according to the band */
4369 wl1271_set_band_rate(wl, wlvif);
4371 mutex_unlock(&wl->mutex);
4376 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4377 struct ieee80211_vif *vif,
4378 struct ieee80211_chanctx_conf *ctx)
4380 struct wl1271 *wl = hw->priv;
4381 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4383 wl1271_debug(DEBUG_MAC80211,
4384 "mac80211 unassign chanctx (role %d) %d (type %d)",
4386 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4387 cfg80211_get_chandef_type(&ctx->def));
4389 wl1271_tx_flush(wl);
4392 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4393 struct ieee80211_vif *vif, u16 queue,
4394 const struct ieee80211_tx_queue_params *params)
4396 struct wl1271 *wl = hw->priv;
4397 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4401 mutex_lock(&wl->mutex);
4403 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4406 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4408 ps_scheme = CONF_PS_SCHEME_LEGACY;
4410 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4413 ret = wl1271_ps_elp_wakeup(wl);
4418 * the txop is confed in units of 32us by the mac80211,
4421 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4422 params->cw_min, params->cw_max,
4423 params->aifs, params->txop << 5);
4427 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4428 CONF_CHANNEL_TYPE_EDCF,
4429 wl1271_tx_get_queue(queue),
4430 ps_scheme, CONF_ACK_POLICY_LEGACY,
4434 wl1271_ps_elp_sleep(wl);
4437 mutex_unlock(&wl->mutex);
4442 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4443 struct ieee80211_vif *vif)
4446 struct wl1271 *wl = hw->priv;
4447 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4448 u64 mactime = ULLONG_MAX;
4451 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4453 mutex_lock(&wl->mutex);
4455 if (unlikely(wl->state != WLCORE_STATE_ON))
4458 ret = wl1271_ps_elp_wakeup(wl);
4462 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4467 wl1271_ps_elp_sleep(wl);
4470 mutex_unlock(&wl->mutex);
4474 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4475 struct survey_info *survey)
4477 struct ieee80211_conf *conf = &hw->conf;
4482 survey->channel = conf->channel;
4487 static int wl1271_allocate_sta(struct wl1271 *wl,
4488 struct wl12xx_vif *wlvif,
4489 struct ieee80211_sta *sta)
4491 struct wl1271_station *wl_sta;
4495 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4496 wl1271_warning("could not allocate HLID - too much stations");
4500 wl_sta = (struct wl1271_station *)sta->drv_priv;
4501 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4503 wl1271_warning("could not allocate HLID - too many links");
4507 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4508 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4509 wl->active_sta_count++;
4513 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4515 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4518 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4519 __clear_bit(hlid, &wl->ap_ps_map);
4520 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4521 wl12xx_free_link(wl, wlvif, &hlid);
4522 wl->active_sta_count--;
4525 * rearm the tx watchdog when the last STA is freed - give the FW a
4526 * chance to return STA-buffered packets before complaining.
4528 if (wl->active_sta_count == 0)
4529 wl12xx_rearm_tx_watchdog_locked(wl);
4532 static int wl12xx_sta_add(struct wl1271 *wl,
4533 struct wl12xx_vif *wlvif,
4534 struct ieee80211_sta *sta)
4536 struct wl1271_station *wl_sta;
4540 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4542 ret = wl1271_allocate_sta(wl, wlvif, sta);
4546 wl_sta = (struct wl1271_station *)sta->drv_priv;
4547 hlid = wl_sta->hlid;
4549 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4551 wl1271_free_sta(wl, wlvif, hlid);
4556 static int wl12xx_sta_remove(struct wl1271 *wl,
4557 struct wl12xx_vif *wlvif,
4558 struct ieee80211_sta *sta)
4560 struct wl1271_station *wl_sta;
4563 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4565 wl_sta = (struct wl1271_station *)sta->drv_priv;
4567 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4570 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4574 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4578 static void wlcore_roc_if_possible(struct wl1271 *wl,
4579 struct wl12xx_vif *wlvif)
4581 if (find_first_bit(wl->roc_map,
4582 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4585 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4588 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4591 static void wlcore_update_inconn_sta(struct wl1271 *wl,
4592 struct wl12xx_vif *wlvif,
4593 struct wl1271_station *wl_sta,
4596 if (in_connection) {
4597 if (WARN_ON(wl_sta->in_connection))
4599 wl_sta->in_connection = true;
4600 if (!wlvif->inconn_count++)
4601 wlcore_roc_if_possible(wl, wlvif);
4603 if (!wl_sta->in_connection)
4606 wl_sta->in_connection = false;
4607 wlvif->inconn_count--;
4608 if (WARN_ON(wlvif->inconn_count < 0))
4611 if (!wlvif->inconn_count)
4612 if (test_bit(wlvif->role_id, wl->roc_map))
4613 wl12xx_croc(wl, wlvif->role_id);
4617 static int wl12xx_update_sta_state(struct wl1271 *wl,
4618 struct wl12xx_vif *wlvif,
4619 struct ieee80211_sta *sta,
4620 enum ieee80211_sta_state old_state,
4621 enum ieee80211_sta_state new_state)
4623 struct wl1271_station *wl_sta;
4624 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4625 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4628 wl_sta = (struct wl1271_station *)sta->drv_priv;
4630 /* Add station (AP mode) */
4632 old_state == IEEE80211_STA_NOTEXIST &&
4633 new_state == IEEE80211_STA_NONE) {
4634 ret = wl12xx_sta_add(wl, wlvif, sta);
4638 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4641 /* Remove station (AP mode) */
4643 old_state == IEEE80211_STA_NONE &&
4644 new_state == IEEE80211_STA_NOTEXIST) {
4646 wl12xx_sta_remove(wl, wlvif, sta);
4648 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4651 /* Authorize station (AP mode) */
4653 new_state == IEEE80211_STA_AUTHORIZED) {
4654 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4658 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4663 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4666 /* Authorize station */
4668 new_state == IEEE80211_STA_AUTHORIZED) {
4669 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4670 ret = wl12xx_set_authorized(wl, wlvif);
4676 old_state == IEEE80211_STA_AUTHORIZED &&
4677 new_state == IEEE80211_STA_ASSOC) {
4678 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4679 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4682 /* clear ROCs on failure or authorization */
4684 (new_state == IEEE80211_STA_AUTHORIZED ||
4685 new_state == IEEE80211_STA_NOTEXIST)) {
4686 if (test_bit(wlvif->role_id, wl->roc_map))
4687 wl12xx_croc(wl, wlvif->role_id);
4691 old_state == IEEE80211_STA_NOTEXIST &&
4692 new_state == IEEE80211_STA_NONE) {
4693 if (find_first_bit(wl->roc_map,
4694 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4695 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4696 wl12xx_roc(wl, wlvif, wlvif->role_id,
4697 wlvif->band, wlvif->channel);
4703 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4704 struct ieee80211_vif *vif,
4705 struct ieee80211_sta *sta,
4706 enum ieee80211_sta_state old_state,
4707 enum ieee80211_sta_state new_state)
4709 struct wl1271 *wl = hw->priv;
4710 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4713 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4714 sta->aid, old_state, new_state);
4716 mutex_lock(&wl->mutex);
4718 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4723 ret = wl1271_ps_elp_wakeup(wl);
4727 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4729 wl1271_ps_elp_sleep(wl);
4731 mutex_unlock(&wl->mutex);
4732 if (new_state < old_state)
4737 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4738 struct ieee80211_vif *vif,
4739 enum ieee80211_ampdu_mlme_action action,
4740 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4743 struct wl1271 *wl = hw->priv;
4744 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4746 u8 hlid, *ba_bitmap;
4748 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4751 /* sanity check - the fields in FW are only 8bits wide */
4752 if (WARN_ON(tid > 0xFF))
4755 mutex_lock(&wl->mutex);
4757 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4762 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4763 hlid = wlvif->sta.hlid;
4764 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4765 struct wl1271_station *wl_sta;
4767 wl_sta = (struct wl1271_station *)sta->drv_priv;
4768 hlid = wl_sta->hlid;
4774 ba_bitmap = &wl->links[hlid].ba_bitmap;
4776 ret = wl1271_ps_elp_wakeup(wl);
4780 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4784 case IEEE80211_AMPDU_RX_START:
4785 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4790 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4792 wl1271_error("exceeded max RX BA sessions");
4796 if (*ba_bitmap & BIT(tid)) {
4798 wl1271_error("cannot enable RX BA session on active "
4803 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4806 *ba_bitmap |= BIT(tid);
4807 wl->ba_rx_session_count++;
4811 case IEEE80211_AMPDU_RX_STOP:
4812 if (!(*ba_bitmap & BIT(tid))) {
4814 * this happens on reconfig - so only output a debug
4815 * message for now, and don't fail the function.
4817 wl1271_debug(DEBUG_MAC80211,
4818 "no active RX BA session on tid: %d",
4824 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4827 *ba_bitmap &= ~BIT(tid);
4828 wl->ba_rx_session_count--;
4833 * The BA initiator session management in FW independently.
4834 * Falling break here on purpose for all TX APDU commands.
4836 case IEEE80211_AMPDU_TX_START:
4837 case IEEE80211_AMPDU_TX_STOP_CONT:
4838 case IEEE80211_AMPDU_TX_STOP_FLUSH:
4839 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
4840 case IEEE80211_AMPDU_TX_OPERATIONAL:
4845 wl1271_error("Incorrect ampdu action id=%x\n", action);
4849 wl1271_ps_elp_sleep(wl);
4852 mutex_unlock(&wl->mutex);
4857 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4858 struct ieee80211_vif *vif,
4859 const struct cfg80211_bitrate_mask *mask)
4861 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4862 struct wl1271 *wl = hw->priv;
4865 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4866 mask->control[NL80211_BAND_2GHZ].legacy,
4867 mask->control[NL80211_BAND_5GHZ].legacy);
4869 mutex_lock(&wl->mutex);
4871 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4872 wlvif->bitrate_masks[i] =
4873 wl1271_tx_enabled_rates_get(wl,
4874 mask->control[i].legacy,
4877 if (unlikely(wl->state != WLCORE_STATE_ON))
4880 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4881 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4883 ret = wl1271_ps_elp_wakeup(wl);
4887 wl1271_set_band_rate(wl, wlvif);
4889 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4890 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4892 wl1271_ps_elp_sleep(wl);
4895 mutex_unlock(&wl->mutex);
4900 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4901 struct ieee80211_channel_switch *ch_switch)
4903 struct wl1271 *wl = hw->priv;
4904 struct wl12xx_vif *wlvif;
4907 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4909 wl1271_tx_flush(wl);
4911 mutex_lock(&wl->mutex);
4913 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4914 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4915 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4916 ieee80211_chswitch_done(vif, false);
4919 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4923 ret = wl1271_ps_elp_wakeup(wl);
4927 /* TODO: change mac80211 to pass vif as param */
4928 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4929 unsigned long delay_usec;
4931 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
4935 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4937 /* indicate failure 5 seconds after channel switch time */
4938 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
4940 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
4941 usecs_to_jiffies(delay_usec) +
4942 msecs_to_jiffies(5000));
4946 wl1271_ps_elp_sleep(wl);
4949 mutex_unlock(&wl->mutex);
4952 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4954 struct wl1271 *wl = hw->priv;
4956 wl1271_tx_flush(wl);
4959 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
4960 struct ieee80211_vif *vif,
4961 struct ieee80211_channel *chan,
4964 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4965 struct wl1271 *wl = hw->priv;
4966 int channel, ret = 0;
4968 channel = ieee80211_frequency_to_channel(chan->center_freq);
4970 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
4971 channel, wlvif->role_id);
4973 mutex_lock(&wl->mutex);
4975 if (unlikely(wl->state != WLCORE_STATE_ON))
4978 /* return EBUSY if we can't ROC right now */
4979 if (WARN_ON(wl->roc_vif ||
4980 find_first_bit(wl->roc_map,
4981 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
4986 ret = wl1271_ps_elp_wakeup(wl);
4990 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
4995 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
4996 msecs_to_jiffies(duration));
4998 wl1271_ps_elp_sleep(wl);
5000 mutex_unlock(&wl->mutex);
5004 static int __wlcore_roc_completed(struct wl1271 *wl)
5006 struct wl12xx_vif *wlvif;
5009 /* already completed */
5010 if (unlikely(!wl->roc_vif))
5013 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5015 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5018 ret = wl12xx_stop_dev(wl, wlvif);
5027 static int wlcore_roc_completed(struct wl1271 *wl)
5031 wl1271_debug(DEBUG_MAC80211, "roc complete");
5033 mutex_lock(&wl->mutex);
5035 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5040 ret = wl1271_ps_elp_wakeup(wl);
5044 ret = __wlcore_roc_completed(wl);
5046 wl1271_ps_elp_sleep(wl);
5048 mutex_unlock(&wl->mutex);
5053 static void wlcore_roc_complete_work(struct work_struct *work)
5055 struct delayed_work *dwork;
5059 dwork = container_of(work, struct delayed_work, work);
5060 wl = container_of(dwork, struct wl1271, roc_complete_work);
5062 ret = wlcore_roc_completed(wl);
5064 ieee80211_remain_on_channel_expired(wl->hw);
5067 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5069 struct wl1271 *wl = hw->priv;
5071 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5074 wl1271_tx_flush(wl);
5077 * we can't just flush_work here, because it might deadlock
5078 * (as we might get called from the same workqueue)
5080 cancel_delayed_work_sync(&wl->roc_complete_work);
5081 wlcore_roc_completed(wl);
5086 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5087 struct ieee80211_vif *vif,
5088 struct ieee80211_sta *sta,
5091 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5092 struct wl1271 *wl = hw->priv;
5094 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5097 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5098 struct ieee80211_vif *vif,
5099 struct ieee80211_sta *sta,
5102 struct wl1271 *wl = hw->priv;
5103 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5106 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5108 mutex_lock(&wl->mutex);
5110 if (unlikely(wl->state != WLCORE_STATE_ON))
5113 ret = wl1271_ps_elp_wakeup(wl);
5117 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5122 wl1271_ps_elp_sleep(wl);
5125 mutex_unlock(&wl->mutex);
5130 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5132 struct wl1271 *wl = hw->priv;
5135 mutex_lock(&wl->mutex);
5137 if (unlikely(wl->state != WLCORE_STATE_ON))
5140 /* packets are considered pending if in the TX queue or the FW */
5141 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5143 mutex_unlock(&wl->mutex);
5148 /* can't be const, mac80211 writes to this */
5149 static struct ieee80211_rate wl1271_rates[] = {
5151 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5152 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5154 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5155 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5156 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5158 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5159 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5160 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5162 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5163 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5164 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5166 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5167 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5169 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5170 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5172 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5173 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5175 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5176 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5178 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5179 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5181 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5182 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5184 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5185 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5187 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5188 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5191 /* can't be const, mac80211 writes to this */
5192 static struct ieee80211_channel wl1271_channels[] = {
5193 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5194 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5195 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5196 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5197 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5198 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5199 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5200 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5201 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5202 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5203 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5204 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5205 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5206 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5209 /* can't be const, mac80211 writes to this */
5210 static struct ieee80211_supported_band wl1271_band_2ghz = {
5211 .channels = wl1271_channels,
5212 .n_channels = ARRAY_SIZE(wl1271_channels),
5213 .bitrates = wl1271_rates,
5214 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5217 /* 5 GHz data rates for WL1273 */
5218 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5220 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5221 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5223 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5224 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5226 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5227 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5229 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5230 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5232 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5233 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5235 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5236 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5238 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5239 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5241 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5242 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5245 /* 5 GHz band channels for WL1273 */
5246 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5247 { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
5248 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5249 { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
5250 { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
5251 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5252 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5253 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5254 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5255 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5256 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5257 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5258 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5259 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5260 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5261 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5262 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5263 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5264 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5265 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5266 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5267 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5268 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5269 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5270 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5271 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5272 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5273 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5274 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5275 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5276 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5277 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5278 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5279 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5280 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5283 static struct ieee80211_supported_band wl1271_band_5ghz = {
5284 .channels = wl1271_channels_5ghz,
5285 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5286 .bitrates = wl1271_rates_5ghz,
5287 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5290 static const struct ieee80211_ops wl1271_ops = {
5291 .start = wl1271_op_start,
5292 .stop = wlcore_op_stop,
5293 .add_interface = wl1271_op_add_interface,
5294 .remove_interface = wl1271_op_remove_interface,
5295 .change_interface = wl12xx_op_change_interface,
5297 .suspend = wl1271_op_suspend,
5298 .resume = wl1271_op_resume,
5300 .config = wl1271_op_config,
5301 .prepare_multicast = wl1271_op_prepare_multicast,
5302 .configure_filter = wl1271_op_configure_filter,
5304 .set_key = wlcore_op_set_key,
5305 .hw_scan = wl1271_op_hw_scan,
5306 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5307 .sched_scan_start = wl1271_op_sched_scan_start,
5308 .sched_scan_stop = wl1271_op_sched_scan_stop,
5309 .bss_info_changed = wl1271_op_bss_info_changed,
5310 .set_frag_threshold = wl1271_op_set_frag_threshold,
5311 .set_rts_threshold = wl1271_op_set_rts_threshold,
5312 .conf_tx = wl1271_op_conf_tx,
5313 .get_tsf = wl1271_op_get_tsf,
5314 .get_survey = wl1271_op_get_survey,
5315 .sta_state = wl12xx_op_sta_state,
5316 .ampdu_action = wl1271_op_ampdu_action,
5317 .tx_frames_pending = wl1271_tx_frames_pending,
5318 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5319 .channel_switch = wl12xx_op_channel_switch,
5320 .flush = wlcore_op_flush,
5321 .remain_on_channel = wlcore_op_remain_on_channel,
5322 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5323 .add_chanctx = wlcore_op_add_chanctx,
5324 .remove_chanctx = wlcore_op_remove_chanctx,
5325 .change_chanctx = wlcore_op_change_chanctx,
5326 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5327 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5328 .sta_rc_update = wlcore_op_sta_rc_update,
5329 .get_rssi = wlcore_op_get_rssi,
5330 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5334 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5340 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5341 wl1271_error("Illegal RX rate from HW: %d", rate);
5345 idx = wl->band_rate_to_idx[band][rate];
5346 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5347 wl1271_error("Unsupported RX rate from HW: %d", rate);
5354 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5355 struct device_attribute *attr,
5358 struct wl1271 *wl = dev_get_drvdata(dev);
5363 mutex_lock(&wl->mutex);
5364 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5366 mutex_unlock(&wl->mutex);
5372 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5373 struct device_attribute *attr,
5374 const char *buf, size_t count)
5376 struct wl1271 *wl = dev_get_drvdata(dev);
5380 ret = kstrtoul(buf, 10, &res);
5382 wl1271_warning("incorrect value written to bt_coex_mode");
5386 mutex_lock(&wl->mutex);
5390 if (res == wl->sg_enabled)
5393 wl->sg_enabled = res;
5395 if (unlikely(wl->state != WLCORE_STATE_ON))
5398 ret = wl1271_ps_elp_wakeup(wl);
5402 wl1271_acx_sg_enable(wl, wl->sg_enabled);
5403 wl1271_ps_elp_sleep(wl);
5406 mutex_unlock(&wl->mutex);
5410 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5411 wl1271_sysfs_show_bt_coex_state,
5412 wl1271_sysfs_store_bt_coex_state);
5414 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5415 struct device_attribute *attr,
5418 struct wl1271 *wl = dev_get_drvdata(dev);
5423 mutex_lock(&wl->mutex);
5424 if (wl->hw_pg_ver >= 0)
5425 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5427 len = snprintf(buf, len, "n/a\n");
5428 mutex_unlock(&wl->mutex);
5433 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5434 wl1271_sysfs_show_hw_pg_ver, NULL);
5436 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5437 struct bin_attribute *bin_attr,
5438 char *buffer, loff_t pos, size_t count)
5440 struct device *dev = container_of(kobj, struct device, kobj);
5441 struct wl1271 *wl = dev_get_drvdata(dev);
5445 ret = mutex_lock_interruptible(&wl->mutex);
5447 return -ERESTARTSYS;
5449 /* Let only one thread read the log at a time, blocking others */
5450 while (wl->fwlog_size == 0) {
5453 prepare_to_wait_exclusive(&wl->fwlog_waitq,
5455 TASK_INTERRUPTIBLE);
5457 if (wl->fwlog_size != 0) {
5458 finish_wait(&wl->fwlog_waitq, &wait);
5462 mutex_unlock(&wl->mutex);
5465 finish_wait(&wl->fwlog_waitq, &wait);
5467 if (signal_pending(current))
5468 return -ERESTARTSYS;
5470 ret = mutex_lock_interruptible(&wl->mutex);
5472 return -ERESTARTSYS;
5475 /* Check if the fwlog is still valid */
5476 if (wl->fwlog_size < 0) {
5477 mutex_unlock(&wl->mutex);
5481 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5482 len = min(count, (size_t)wl->fwlog_size);
5483 wl->fwlog_size -= len;
5484 memcpy(buffer, wl->fwlog, len);
5486 /* Make room for new messages */
5487 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5489 mutex_unlock(&wl->mutex);
5494 static struct bin_attribute fwlog_attr = {
5495 .attr = {.name = "fwlog", .mode = S_IRUSR},
5496 .read = wl1271_sysfs_read_fwlog,
5499 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5503 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5506 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5507 wl1271_warning("NIC part of the MAC address wraps around!");
5509 for (i = 0; i < wl->num_mac_addr; i++) {
5510 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5511 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5512 wl->addresses[i].addr[2] = (u8) oui;
5513 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5514 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5515 wl->addresses[i].addr[5] = (u8) nic;
5519 /* we may be one address short at the most */
5520 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5523 * turn on the LAA bit in the first address and use it as
5526 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5527 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5528 memcpy(&wl->addresses[idx], &wl->addresses[0],
5529 sizeof(wl->addresses[0]));
5531 wl->addresses[idx].addr[2] |= BIT(1);
5534 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5535 wl->hw->wiphy->addresses = wl->addresses;
5538 static int wl12xx_get_hw_info(struct wl1271 *wl)
5542 ret = wl12xx_set_power_on(wl);
5546 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5550 wl->fuse_oui_addr = 0;
5551 wl->fuse_nic_addr = 0;
5553 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5557 if (wl->ops->get_mac)
5558 ret = wl->ops->get_mac(wl);
5561 wl1271_power_off(wl);
5565 static int wl1271_register_hw(struct wl1271 *wl)
5568 u32 oui_addr = 0, nic_addr = 0;
5570 if (wl->mac80211_registered)
5573 if (wl->nvs_len >= 12) {
5574 /* NOTE: The wl->nvs->nvs element must be first, in
5575 * order to simplify the casting, we assume it is at
5576 * the beginning of the wl->nvs structure.
5578 u8 *nvs_ptr = (u8 *)wl->nvs;
5581 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5583 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5586 /* if the MAC address is zeroed in the NVS derive from fuse */
5587 if (oui_addr == 0 && nic_addr == 0) {
5588 oui_addr = wl->fuse_oui_addr;
5589 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5590 nic_addr = wl->fuse_nic_addr + 1;
5593 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5595 ret = ieee80211_register_hw(wl->hw);
5597 wl1271_error("unable to register mac80211 hw: %d", ret);
5601 wl->mac80211_registered = true;
5603 wl1271_debugfs_init(wl);
5605 wl1271_notice("loaded");
5611 static void wl1271_unregister_hw(struct wl1271 *wl)
5614 wl1271_plt_stop(wl);
5616 ieee80211_unregister_hw(wl->hw);
5617 wl->mac80211_registered = false;
5621 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5624 .types = BIT(NL80211_IFTYPE_STATION),
5628 .types = BIT(NL80211_IFTYPE_AP) |
5629 BIT(NL80211_IFTYPE_P2P_GO) |
5630 BIT(NL80211_IFTYPE_P2P_CLIENT),
5634 static struct ieee80211_iface_combination
5635 wlcore_iface_combinations[] = {
5637 .max_interfaces = 3,
5638 .limits = wlcore_iface_limits,
5639 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5643 static int wl1271_init_ieee80211(struct wl1271 *wl)
5646 static const u32 cipher_suites[] = {
5647 WLAN_CIPHER_SUITE_WEP40,
5648 WLAN_CIPHER_SUITE_WEP104,
5649 WLAN_CIPHER_SUITE_TKIP,
5650 WLAN_CIPHER_SUITE_CCMP,
5651 WL1271_CIPHER_SUITE_GEM,
5654 /* The tx descriptor buffer */
5655 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5657 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5658 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5661 /* FIXME: find a proper value */
5662 wl->hw->channel_change_time = 10000;
5663 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5665 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5666 IEEE80211_HW_SUPPORTS_PS |
5667 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5668 IEEE80211_HW_SUPPORTS_UAPSD |
5669 IEEE80211_HW_HAS_RATE_CONTROL |
5670 IEEE80211_HW_CONNECTION_MONITOR |
5671 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5672 IEEE80211_HW_SPECTRUM_MGMT |
5673 IEEE80211_HW_AP_LINK_PS |
5674 IEEE80211_HW_AMPDU_AGGREGATION |
5675 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5676 IEEE80211_HW_QUEUE_CONTROL;
5678 wl->hw->wiphy->cipher_suites = cipher_suites;
5679 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5681 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5682 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5683 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5684 wl->hw->wiphy->max_scan_ssids = 1;
5685 wl->hw->wiphy->max_sched_scan_ssids = 16;
5686 wl->hw->wiphy->max_match_sets = 16;
5688 * Maximum length of elements in scanning probe request templates
5689 * should be the maximum length possible for a template, without
5690 * the IEEE80211 header of the template
5692 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5693 sizeof(struct ieee80211_header);
5695 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5696 sizeof(struct ieee80211_header);
5698 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5700 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5701 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5703 /* make sure all our channels fit in the scanned_ch bitmask */
5704 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5705 ARRAY_SIZE(wl1271_channels_5ghz) >
5706 WL1271_MAX_CHANNELS);
5708 * clear channel flags from the previous usage
5709 * and restore max_power & max_antenna_gain values.
5711 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5712 wl1271_band_2ghz.channels[i].flags = 0;
5713 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5714 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5717 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5718 wl1271_band_5ghz.channels[i].flags = 0;
5719 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5720 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5724 * We keep local copies of the band structs because we need to
5725 * modify them on a per-device basis.
5727 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5728 sizeof(wl1271_band_2ghz));
5729 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5730 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5731 sizeof(*wl->ht_cap));
5732 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5733 sizeof(wl1271_band_5ghz));
5734 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5735 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5736 sizeof(*wl->ht_cap));
5738 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5739 &wl->bands[IEEE80211_BAND_2GHZ];
5740 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5741 &wl->bands[IEEE80211_BAND_5GHZ];
5744 * allow 4 queues per mac address we support +
5745 * 1 cab queue per mac + one global offchannel Tx queue
5747 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5749 /* the last queue is the offchannel queue */
5750 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5751 wl->hw->max_rates = 1;
5753 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5755 /* the FW answers probe-requests in AP-mode */
5756 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5757 wl->hw->wiphy->probe_resp_offload =
5758 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5759 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5760 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5762 /* allowed interface combinations */
5763 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5764 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5765 wl->hw->wiphy->n_iface_combinations =
5766 ARRAY_SIZE(wlcore_iface_combinations);
5768 SET_IEEE80211_DEV(wl->hw, wl->dev);
5770 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5771 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5773 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5778 #define WL1271_DEFAULT_CHANNEL 0
5780 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5783 struct ieee80211_hw *hw;
5788 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5790 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5792 wl1271_error("could not alloc ieee80211_hw");
5798 memset(wl, 0, sizeof(*wl));
5800 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5802 wl1271_error("could not alloc wl priv");
5804 goto err_priv_alloc;
5807 INIT_LIST_HEAD(&wl->wlvif_list);
5811 for (i = 0; i < NUM_TX_QUEUES; i++)
5812 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5813 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5815 skb_queue_head_init(&wl->deferred_rx_queue);
5816 skb_queue_head_init(&wl->deferred_tx_queue);
5818 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5819 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5820 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5821 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5822 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5823 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5824 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5826 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5827 if (!wl->freezable_wq) {
5832 wl->channel = WL1271_DEFAULT_CHANNEL;
5834 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5835 wl->band = IEEE80211_BAND_2GHZ;
5836 wl->channel_type = NL80211_CHAN_NO_HT;
5838 wl->sg_enabled = true;
5839 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5840 wl->recovery_count = 0;
5843 wl->ap_fw_ps_map = 0;
5845 wl->platform_quirks = 0;
5846 wl->system_hlid = WL12XX_SYSTEM_HLID;
5847 wl->active_sta_count = 0;
5848 wl->active_link_count = 0;
5850 init_waitqueue_head(&wl->fwlog_waitq);
5852 /* The system link is always allocated */
5853 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5855 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5856 for (i = 0; i < wl->num_tx_desc; i++)
5857 wl->tx_frames[i] = NULL;
5859 spin_lock_init(&wl->wl_lock);
5861 wl->state = WLCORE_STATE_OFF;
5862 wl->fw_type = WL12XX_FW_TYPE_NONE;
5863 mutex_init(&wl->mutex);
5864 mutex_init(&wl->flush_mutex);
5865 init_completion(&wl->nvs_loading_complete);
5867 order = get_order(aggr_buf_size);
5868 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5869 if (!wl->aggr_buf) {
5873 wl->aggr_buf_size = aggr_buf_size;
5875 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5876 if (!wl->dummy_packet) {
5881 /* Allocate one page for the FW log */
5882 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5885 goto err_dummy_packet;
5888 wl->mbox_size = mbox_size;
5889 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5895 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5896 if (!wl->buffer_32) {
5907 free_page((unsigned long)wl->fwlog);
5910 dev_kfree_skb(wl->dummy_packet);
5913 free_pages((unsigned long)wl->aggr_buf, order);
5916 destroy_workqueue(wl->freezable_wq);
5919 wl1271_debugfs_exit(wl);
5923 ieee80211_free_hw(hw);
5927 return ERR_PTR(ret);
5929 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5931 int wlcore_free_hw(struct wl1271 *wl)
5933 /* Unblock any fwlog readers */
5934 mutex_lock(&wl->mutex);
5935 wl->fwlog_size = -1;
5936 wake_up_interruptible_all(&wl->fwlog_waitq);
5937 mutex_unlock(&wl->mutex);
5939 device_remove_bin_file(wl->dev, &fwlog_attr);
5941 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5943 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5944 kfree(wl->buffer_32);
5946 free_page((unsigned long)wl->fwlog);
5947 dev_kfree_skb(wl->dummy_packet);
5948 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5950 wl1271_debugfs_exit(wl);
5954 wl->fw_type = WL12XX_FW_TYPE_NONE;
5958 kfree(wl->fw_status_1);
5959 kfree(wl->tx_res_if);
5960 destroy_workqueue(wl->freezable_wq);
5963 ieee80211_free_hw(wl->hw);
5967 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5969 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5971 struct wl1271 *wl = cookie;
5972 unsigned long flags;
5974 wl1271_debug(DEBUG_IRQ, "IRQ");
5976 /* complete the ELP completion */
5977 spin_lock_irqsave(&wl->wl_lock, flags);
5978 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5979 if (wl->elp_compl) {
5980 complete(wl->elp_compl);
5981 wl->elp_compl = NULL;
5984 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5985 /* don't enqueue a work right now. mark it as pending */
5986 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5987 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5988 disable_irq_nosync(wl->irq);
5989 pm_wakeup_event(wl->dev, 0);
5990 spin_unlock_irqrestore(&wl->wl_lock, flags);
5993 spin_unlock_irqrestore(&wl->wl_lock, flags);
5995 return IRQ_WAKE_THREAD;
5998 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6000 struct wl1271 *wl = context;
6001 struct platform_device *pdev = wl->pdev;
6002 struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
6003 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6004 unsigned long irqflags;
6008 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6010 wl1271_error("Could not allocate nvs data");
6013 wl->nvs_len = fw->size;
6015 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6021 ret = wl->ops->setup(wl);
6025 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6027 /* adjust some runtime configuration parameters */
6028 wlcore_adjust_conf(wl);
6030 wl->irq = platform_get_irq(pdev, 0);
6031 wl->platform_quirks = pdata->platform_quirks;
6032 wl->if_ops = pdev_data->if_ops;
6034 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
6035 irqflags = IRQF_TRIGGER_RISING;
6037 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6039 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
6043 wl1271_error("request_irq() failed: %d", ret);
6048 ret = enable_irq_wake(wl->irq);
6050 wl->irq_wake_enabled = true;
6051 device_init_wakeup(wl->dev, 1);
6052 if (pdata->pwr_in_suspend) {
6053 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
6054 wl->hw->wiphy->wowlan.n_patterns =
6055 WL1271_MAX_RX_FILTERS;
6056 wl->hw->wiphy->wowlan.pattern_min_len = 1;
6057 wl->hw->wiphy->wowlan.pattern_max_len =
6058 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
6062 disable_irq(wl->irq);
6064 ret = wl12xx_get_hw_info(wl);
6066 wl1271_error("couldn't get hw info");
6070 ret = wl->ops->identify_chip(wl);
6074 ret = wl1271_init_ieee80211(wl);
6078 ret = wl1271_register_hw(wl);
6082 /* Create sysfs file to control bt coex state */
6083 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
6085 wl1271_error("failed to create sysfs file bt_coex_state");
6089 /* Create sysfs file to get HW PG version */
6090 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
6092 wl1271_error("failed to create sysfs file hw_pg_ver");
6093 goto out_bt_coex_state;
6096 /* Create sysfs file for the FW log */
6097 ret = device_create_bin_file(wl->dev, &fwlog_attr);
6099 wl1271_error("failed to create sysfs file fwlog");
6103 wl->initialized = true;
6107 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
6110 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
6113 wl1271_unregister_hw(wl);
6116 free_irq(wl->irq, wl);
6122 release_firmware(fw);
6123 complete_all(&wl->nvs_loading_complete);
6126 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6130 if (!wl->ops || !wl->ptable)
6133 wl->dev = &pdev->dev;
6135 platform_set_drvdata(pdev, wl);
6137 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6138 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6141 wl1271_error("request_firmware_nowait failed: %d", ret);
6142 complete_all(&wl->nvs_loading_complete);
6147 EXPORT_SYMBOL_GPL(wlcore_probe);
6149 int wlcore_remove(struct platform_device *pdev)
6151 struct wl1271 *wl = platform_get_drvdata(pdev);
6153 wait_for_completion(&wl->nvs_loading_complete);
6154 if (!wl->initialized)
6157 if (wl->irq_wake_enabled) {
6158 device_init_wakeup(wl->dev, 0);
6159 disable_irq_wake(wl->irq);
6161 wl1271_unregister_hw(wl);
6162 free_irq(wl->irq, wl);
6167 EXPORT_SYMBOL_GPL(wlcore_remove);
6169 u32 wl12xx_debug_level = DEBUG_NONE;
6170 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6171 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6172 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6174 module_param_named(fwlog, fwlog_param, charp, 0);
6175 MODULE_PARM_DESC(fwlog,
6176 "FW logger options: continuous, ondemand, dbgpins or disable");
6178 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6179 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6181 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6182 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6184 MODULE_LICENSE("GPL");
6185 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6186 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6187 MODULE_FIRMWARE(WL12XX_NVS_NAME);