3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param;
59 static int bug_on_recovery = -1;
60 static int no_recovery = -1;
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
86 wl1271_info("Association completed.");
90 static void wl1271_reg_notify(struct wiphy *wiphy,
91 struct regulatory_request *request)
93 struct ieee80211_supported_band *band;
94 struct ieee80211_channel *ch;
96 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
97 struct wl1271 *wl = hw->priv;
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
111 if (likely(wl->state == WLCORE_STATE_ON))
112 wlcore_regdomain_config(wl);
115 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
120 /* we should hold wl->mutex */
121 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
126 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
128 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
134 * this function is being called when the rx_streaming interval
135 * has beed changed or rx_streaming should be disabled
137 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
140 int period = wl->conf.rx_streaming.interval;
142 /* don't reconfigure if rx_streaming is disabled */
143 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
146 /* reconfigure/disable according to new streaming_period */
148 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
149 (wl->conf.rx_streaming.always ||
150 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
151 ret = wl1271_set_rx_streaming(wl, wlvif, true);
153 ret = wl1271_set_rx_streaming(wl, wlvif, false);
154 /* don't cancel_work_sync since we might deadlock */
155 del_timer_sync(&wlvif->rx_streaming_timer);
161 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
164 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
165 rx_streaming_enable_work);
166 struct wl1271 *wl = wlvif->wl;
168 mutex_lock(&wl->mutex);
170 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
171 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
172 (!wl->conf.rx_streaming.always &&
173 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
176 if (!wl->conf.rx_streaming.interval)
179 ret = wl1271_ps_elp_wakeup(wl);
183 ret = wl1271_set_rx_streaming(wl, wlvif, true);
187 /* stop it after some time of inactivity */
188 mod_timer(&wlvif->rx_streaming_timer,
189 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
192 wl1271_ps_elp_sleep(wl);
194 mutex_unlock(&wl->mutex);
197 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
200 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
201 rx_streaming_disable_work);
202 struct wl1271 *wl = wlvif->wl;
204 mutex_lock(&wl->mutex);
206 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
209 ret = wl1271_ps_elp_wakeup(wl);
213 ret = wl1271_set_rx_streaming(wl, wlvif, false);
218 wl1271_ps_elp_sleep(wl);
220 mutex_unlock(&wl->mutex);
223 static void wl1271_rx_streaming_timer(unsigned long data)
225 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
226 struct wl1271 *wl = wlvif->wl;
227 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
230 /* wl->mutex must be taken */
231 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
233 /* if the watchdog is not armed, don't do anything */
234 if (wl->tx_allocated_blocks == 0)
237 cancel_delayed_work(&wl->tx_watchdog_work);
238 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
239 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
244 struct delayed_work *dwork;
247 dwork = container_of(work, struct delayed_work, work);
248 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
250 mutex_lock(&wl->mutex);
252 if (unlikely(wl->state != WLCORE_STATE_ON))
255 /* Tx went out in the meantime - everything is ok */
256 if (unlikely(wl->tx_allocated_blocks == 0))
260 * if a ROC is in progress, we might not have any Tx for a long
261 * time (e.g. pending Tx on the non-ROC channels)
263 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 wl->conf.tx.tx_watchdog_timeout);
266 wl12xx_rearm_tx_watchdog_locked(wl);
271 * if a scan is in progress, we might not have any Tx for a long
274 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 wl->conf.tx.tx_watchdog_timeout);
277 wl12xx_rearm_tx_watchdog_locked(wl);
282 * AP might cache a frame for a long time for a sleeping station,
283 * so rearm the timer if there's an AP interface with stations. If
284 * Tx is genuinely stuck we will most hopefully discover it when all
285 * stations are removed due to inactivity.
287 if (wl->active_sta_count) {
288 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
290 wl->conf.tx.tx_watchdog_timeout,
291 wl->active_sta_count);
292 wl12xx_rearm_tx_watchdog_locked(wl);
296 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_queue_recovery_work(wl);
301 mutex_unlock(&wl->mutex);
304 static void wlcore_adjust_conf(struct wl1271 *wl)
306 /* Adjust settings according to optional module parameters */
309 if (!strcmp(fwlog_param, "continuous")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
311 } else if (!strcmp(fwlog_param, "ondemand")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
313 } else if (!strcmp(fwlog_param, "dbgpins")) {
314 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
315 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
316 } else if (!strcmp(fwlog_param, "disable")) {
317 wl->conf.fwlog.mem_blocks = 0;
318 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
320 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
324 if (bug_on_recovery != -1)
325 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
327 if (no_recovery != -1)
328 wl->conf.recovery.no_recovery = (u8) no_recovery;
331 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
332 struct wl12xx_vif *wlvif,
337 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
340 * Wake up from high level PS if the STA is asleep with too little
341 * packets in FW or if the STA is awake.
343 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
344 wl12xx_ps_link_end(wl, wlvif, hlid);
347 * Start high-level PS if the STA is asleep with enough blocks in FW.
348 * Make an exception if this is the only connected link. In this
349 * case FW-memory congestion is less of a problem.
350 * Note that a single connected STA means 3 active links, since we must
351 * account for the global and broadcast AP links. The "fw_ps" check
352 * assures us the third link is a STA connected to the AP. Otherwise
353 * the FW would not set the PSM bit.
355 else if (wl->active_link_count > 3 && fw_ps &&
356 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
357 wl12xx_ps_link_start(wl, wlvif, hlid, true);
360 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
361 struct wl12xx_vif *wlvif,
362 struct wl_fw_status_2 *status)
367 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
368 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
369 wl1271_debug(DEBUG_PSM,
370 "link ps prev 0x%x cur 0x%x changed 0x%x",
371 wl->ap_fw_ps_map, cur_fw_ps_map,
372 wl->ap_fw_ps_map ^ cur_fw_ps_map);
374 wl->ap_fw_ps_map = cur_fw_ps_map;
377 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
378 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
379 wl->links[hlid].allocated_pkts);
382 static int wlcore_fw_status(struct wl1271 *wl,
383 struct wl_fw_status_1 *status_1,
384 struct wl_fw_status_2 *status_2)
386 struct wl12xx_vif *wlvif;
388 u32 old_tx_blk_count = wl->tx_blocks_available;
389 int avail, freed_blocks;
393 struct wl1271_link *lnk;
395 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
396 sizeof(*status_2) + wl->fw_status_priv_len;
398 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
403 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
404 "drv_rx_counter = %d, tx_results_counter = %d)",
406 status_1->fw_rx_counter,
407 status_1->drv_rx_counter,
408 status_1->tx_results_counter);
410 for (i = 0; i < NUM_TX_QUEUES; i++) {
411 /* prevent wrap-around in freed-packets counter */
412 wl->tx_allocated_pkts[i] -=
413 (status_2->counters.tx_released_pkts[i] -
414 wl->tx_pkts_freed[i]) & 0xff;
416 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
420 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
422 /* prevent wrap-around in freed-packets counter */
423 lnk->allocated_pkts -=
424 (status_2->counters.tx_lnk_free_pkts[i] -
425 lnk->prev_freed_pkts) & 0xff;
427 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
430 /* prevent wrap-around in total blocks counter */
431 if (likely(wl->tx_blocks_freed <=
432 le32_to_cpu(status_2->total_released_blks)))
433 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
436 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
437 le32_to_cpu(status_2->total_released_blks);
439 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
441 wl->tx_allocated_blocks -= freed_blocks;
444 * If the FW freed some blocks:
445 * If we still have allocated blocks - re-arm the timer, Tx is
446 * not stuck. Otherwise, cancel the timer (no Tx currently).
449 if (wl->tx_allocated_blocks)
450 wl12xx_rearm_tx_watchdog_locked(wl);
452 cancel_delayed_work(&wl->tx_watchdog_work);
455 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
458 * The FW might change the total number of TX memblocks before
459 * we get a notification about blocks being released. Thus, the
460 * available blocks calculation might yield a temporary result
461 * which is lower than the actual available blocks. Keeping in
462 * mind that only blocks that were allocated can be moved from
463 * TX to RX, tx_blocks_available should never decrease here.
465 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
468 /* if more blocks are available now, tx work can be scheduled */
469 if (wl->tx_blocks_available > old_tx_blk_count)
470 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
472 /* for AP update num of allocated TX blocks per link and ps status */
473 wl12xx_for_each_wlvif_ap(wl, wlvif) {
474 wl12xx_irq_update_links_status(wl, wlvif, status_2);
477 /* update the host-chipset time offset */
479 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
480 (s64)le32_to_cpu(status_2->fw_localtime);
482 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
487 static void wl1271_flush_deferred_work(struct wl1271 *wl)
491 /* Pass all received frames to the network stack */
492 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
493 ieee80211_rx_ni(wl->hw, skb);
495 /* Return sent skbs to the network stack */
496 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
497 ieee80211_tx_status_ni(wl->hw, skb);
500 static void wl1271_netstack_work(struct work_struct *work)
503 container_of(work, struct wl1271, netstack_work);
506 wl1271_flush_deferred_work(wl);
507 } while (skb_queue_len(&wl->deferred_rx_queue));
510 #define WL1271_IRQ_MAX_LOOPS 256
512 static int wlcore_irq_locked(struct wl1271 *wl)
516 int loopcount = WL1271_IRQ_MAX_LOOPS;
518 unsigned int defer_count;
522 * In case edge triggered interrupt must be used, we cannot iterate
523 * more than once without introducing race conditions with the hardirq.
525 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
528 wl1271_debug(DEBUG_IRQ, "IRQ work");
530 if (unlikely(wl->state != WLCORE_STATE_ON))
533 ret = wl1271_ps_elp_wakeup(wl);
537 while (!done && loopcount--) {
539 * In order to avoid a race with the hardirq, clear the flag
540 * before acknowledging the chip. Since the mutex is held,
541 * wl1271_ps_elp_wakeup cannot be called concurrently.
543 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
544 smp_mb__after_clear_bit();
546 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
550 wlcore_hw_tx_immediate_compl(wl);
552 intr = le32_to_cpu(wl->fw_status_1->intr);
553 intr &= WLCORE_ALL_INTR_MASK;
559 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
560 wl1271_error("HW watchdog interrupt received! starting recovery.");
561 wl->watchdog_recovery = true;
564 /* restarting the chip. ignore any other interrupt. */
568 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
569 wl1271_error("SW watchdog interrupt received! "
570 "starting recovery.");
571 wl->watchdog_recovery = true;
574 /* restarting the chip. ignore any other interrupt. */
578 if (likely(intr & WL1271_ACX_INTR_DATA)) {
579 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
581 ret = wlcore_rx(wl, wl->fw_status_1);
585 /* Check if any tx blocks were freed */
586 spin_lock_irqsave(&wl->wl_lock, flags);
587 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
588 wl1271_tx_total_queue_count(wl) > 0) {
589 spin_unlock_irqrestore(&wl->wl_lock, flags);
591 * In order to avoid starvation of the TX path,
592 * call the work function directly.
594 ret = wlcore_tx_work_locked(wl);
598 spin_unlock_irqrestore(&wl->wl_lock, flags);
601 /* check for tx results */
602 ret = wlcore_hw_tx_delayed_compl(wl);
606 /* Make sure the deferred queues don't get too long */
607 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
608 skb_queue_len(&wl->deferred_rx_queue);
609 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
610 wl1271_flush_deferred_work(wl);
613 if (intr & WL1271_ACX_INTR_EVENT_A) {
614 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
615 ret = wl1271_event_handle(wl, 0);
620 if (intr & WL1271_ACX_INTR_EVENT_B) {
621 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
622 ret = wl1271_event_handle(wl, 1);
627 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
628 wl1271_debug(DEBUG_IRQ,
629 "WL1271_ACX_INTR_INIT_COMPLETE");
631 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
632 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
635 wl1271_ps_elp_sleep(wl);
641 static irqreturn_t wlcore_irq(int irq, void *cookie)
645 struct wl1271 *wl = cookie;
647 /* TX might be handled here, avoid redundant work */
648 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
649 cancel_work_sync(&wl->tx_work);
651 mutex_lock(&wl->mutex);
653 ret = wlcore_irq_locked(wl);
655 wl12xx_queue_recovery_work(wl);
657 spin_lock_irqsave(&wl->wl_lock, flags);
658 /* In case TX was not handled here, queue TX work */
659 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
660 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
661 wl1271_tx_total_queue_count(wl) > 0)
662 ieee80211_queue_work(wl->hw, &wl->tx_work);
663 spin_unlock_irqrestore(&wl->wl_lock, flags);
665 mutex_unlock(&wl->mutex);
670 struct vif_counter_data {
673 struct ieee80211_vif *cur_vif;
674 bool cur_vif_running;
677 static void wl12xx_vif_count_iter(void *data, u8 *mac,
678 struct ieee80211_vif *vif)
680 struct vif_counter_data *counter = data;
683 if (counter->cur_vif == vif)
684 counter->cur_vif_running = true;
687 /* caller must not hold wl->mutex, as it might deadlock */
688 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
689 struct ieee80211_vif *cur_vif,
690 struct vif_counter_data *data)
692 memset(data, 0, sizeof(*data));
693 data->cur_vif = cur_vif;
695 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
696 wl12xx_vif_count_iter, data);
699 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
701 const struct firmware *fw;
703 enum wl12xx_fw_type fw_type;
707 fw_type = WL12XX_FW_TYPE_PLT;
708 fw_name = wl->plt_fw_name;
711 * we can't call wl12xx_get_vif_count() here because
712 * wl->mutex is taken, so use the cached last_vif_count value
714 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
715 fw_type = WL12XX_FW_TYPE_MULTI;
716 fw_name = wl->mr_fw_name;
718 fw_type = WL12XX_FW_TYPE_NORMAL;
719 fw_name = wl->sr_fw_name;
723 if (wl->fw_type == fw_type)
726 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
728 ret = request_firmware(&fw, fw_name, wl->dev);
731 wl1271_error("could not get firmware %s: %d", fw_name, ret);
736 wl1271_error("firmware size is not multiple of 32 bits: %zu",
743 wl->fw_type = WL12XX_FW_TYPE_NONE;
744 wl->fw_len = fw->size;
745 wl->fw = vmalloc(wl->fw_len);
748 wl1271_error("could not allocate memory for the firmware");
753 memcpy(wl->fw, fw->data, wl->fw_len);
755 wl->fw_type = fw_type;
757 release_firmware(fw);
762 void wl12xx_queue_recovery_work(struct wl1271 *wl)
764 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
766 /* Avoid a recursive recovery */
767 if (wl->state == WLCORE_STATE_ON) {
768 wl->state = WLCORE_STATE_RESTARTING;
769 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
770 wlcore_disable_interrupts_nosync(wl);
771 ieee80211_queue_work(wl->hw, &wl->recovery_work);
775 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
779 /* The FW log is a length-value list, find where the log end */
780 while (len < maxlen) {
781 if (memblock[len] == 0)
783 if (len + memblock[len] + 1 > maxlen)
785 len += memblock[len] + 1;
788 /* Make sure we have enough room */
789 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
791 /* Fill the FW log file, consumed by the sysfs fwlog entry */
792 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
793 wl->fwlog_size += len;
798 #define WLCORE_FW_LOG_END 0x2000000
800 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
808 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
809 (wl->conf.fwlog.mem_blocks == 0))
812 wl1271_info("Reading FW panic log");
814 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
819 * Make sure the chip is awake and the logger isn't active.
820 * Do not send a stop fwlog command if the fw is hanged or if
821 * dbgpins are used (due to some fw bug).
823 if (wl1271_ps_elp_wakeup(wl))
825 if (!wl->watchdog_recovery &&
826 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
827 wl12xx_cmd_stop_fwlog(wl);
829 /* Read the first memory block address */
830 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
834 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
838 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
839 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
840 end_of_log = WLCORE_FW_LOG_END;
842 offset = sizeof(addr);
846 /* Traverse the memory blocks linked list */
848 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
849 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
855 * Memory blocks are linked to one another. The first 4 bytes
856 * of each memory block hold the hardware address of the next
857 * one. The last memory block points to the first one in
858 * on demand mode and is equal to 0x2000000 in continuous mode.
860 addr = le32_to_cpup((__le32 *)block);
861 if (!wl12xx_copy_fwlog(wl, block + offset,
862 WL12XX_HW_BLOCK_SIZE - offset))
864 } while (addr && (addr != end_of_log));
866 wake_up_interruptible(&wl->fwlog_waitq);
872 static void wlcore_print_recovery(struct wl1271 *wl)
878 wl1271_info("Hardware recovery in progress. FW ver: %s",
879 wl->chip.fw_ver_str);
881 /* change partitions momentarily so we can read the FW pc */
882 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
886 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
890 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
894 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
895 pc, hint_sts, ++wl->recovery_count);
897 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
901 static void wl1271_recovery_work(struct work_struct *work)
904 container_of(work, struct wl1271, recovery_work);
905 struct wl12xx_vif *wlvif;
906 struct ieee80211_vif *vif;
908 mutex_lock(&wl->mutex);
910 if (wl->state == WLCORE_STATE_OFF || wl->plt)
913 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
914 wl12xx_read_fwlog_panic(wl);
915 wlcore_print_recovery(wl);
918 BUG_ON(wl->conf.recovery.bug_on_recovery &&
919 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
921 if (wl->conf.recovery.no_recovery) {
922 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
927 * Advance security sequence number to overcome potential progress
928 * in the firmware during recovery. This doens't hurt if the network is
931 wl12xx_for_each_wlvif(wl, wlvif) {
932 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
933 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
934 wlvif->tx_security_seq +=
935 WL1271_TX_SQN_POST_RECOVERY_PADDING;
938 /* Prevent spurious TX during FW restart */
939 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
941 /* reboot the chipset */
942 while (!list_empty(&wl->wlvif_list)) {
943 wlvif = list_first_entry(&wl->wlvif_list,
944 struct wl12xx_vif, list);
945 vif = wl12xx_wlvif_to_vif(wlvif);
946 __wl1271_op_remove_interface(wl, vif, false);
949 wlcore_op_stop_locked(wl);
951 ieee80211_restart_hw(wl->hw);
954 * Its safe to enable TX now - the queues are stopped after a request
957 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
960 wl->watchdog_recovery = false;
961 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
962 mutex_unlock(&wl->mutex);
965 static int wlcore_fw_wakeup(struct wl1271 *wl)
967 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
970 static int wl1271_setup(struct wl1271 *wl)
972 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
973 sizeof(*wl->fw_status_2) +
974 wl->fw_status_priv_len, GFP_KERNEL);
975 if (!wl->fw_status_1)
978 wl->fw_status_2 = (struct wl_fw_status_2 *)
979 (((u8 *) wl->fw_status_1) +
980 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
982 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
983 if (!wl->tx_res_if) {
984 kfree(wl->fw_status_1);
991 static int wl12xx_set_power_on(struct wl1271 *wl)
995 msleep(WL1271_PRE_POWER_ON_SLEEP);
996 ret = wl1271_power_on(wl);
999 msleep(WL1271_POWER_ON_SLEEP);
1000 wl1271_io_reset(wl);
1003 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1007 /* ELP module wake up */
1008 ret = wlcore_fw_wakeup(wl);
1016 wl1271_power_off(wl);
1020 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1024 ret = wl12xx_set_power_on(wl);
1029 * For wl127x based devices we could use the default block
1030 * size (512 bytes), but due to a bug in the sdio driver, we
1031 * need to set it explicitly after the chip is powered on. To
1032 * simplify the code and since the performance impact is
1033 * negligible, we use the same block size for all different
1036 * Check if the bus supports blocksize alignment and, if it
1037 * doesn't, make sure we don't have the quirk.
1039 if (!wl1271_set_block_size(wl))
1040 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1042 /* TODO: make sure the lower driver has set things up correctly */
1044 ret = wl1271_setup(wl);
1048 ret = wl12xx_fetch_firmware(wl, plt);
1056 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1058 int retries = WL1271_BOOT_RETRIES;
1059 struct wiphy *wiphy = wl->hw->wiphy;
1061 static const char* const PLT_MODE[] = {
1069 mutex_lock(&wl->mutex);
1071 wl1271_notice("power up");
1073 if (wl->state != WLCORE_STATE_OFF) {
1074 wl1271_error("cannot go into PLT state because not "
1075 "in off state: %d", wl->state);
1080 /* Indicate to lower levels that we are now in PLT mode */
1082 wl->plt_mode = plt_mode;
1086 ret = wl12xx_chip_wakeup(wl, true);
1090 ret = wl->ops->plt_init(wl);
1094 wl->state = WLCORE_STATE_ON;
1095 wl1271_notice("firmware booted in PLT mode %s (%s)",
1097 wl->chip.fw_ver_str);
1099 /* update hw/fw version info in wiphy struct */
1100 wiphy->hw_version = wl->chip.id;
1101 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1102 sizeof(wiphy->fw_version));
1107 wl1271_power_off(wl);
1111 wl->plt_mode = PLT_OFF;
1113 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1114 WL1271_BOOT_RETRIES);
1116 mutex_unlock(&wl->mutex);
1121 int wl1271_plt_stop(struct wl1271 *wl)
1125 wl1271_notice("power down");
1128 * Interrupts must be disabled before setting the state to OFF.
1129 * Otherwise, the interrupt handler might be called and exit without
1130 * reading the interrupt status.
1132 wlcore_disable_interrupts(wl);
1133 mutex_lock(&wl->mutex);
1135 mutex_unlock(&wl->mutex);
1138 * This will not necessarily enable interrupts as interrupts
1139 * may have been disabled when op_stop was called. It will,
1140 * however, balance the above call to disable_interrupts().
1142 wlcore_enable_interrupts(wl);
1144 wl1271_error("cannot power down because not in PLT "
1145 "state: %d", wl->state);
1150 mutex_unlock(&wl->mutex);
1152 wl1271_flush_deferred_work(wl);
1153 cancel_work_sync(&wl->netstack_work);
1154 cancel_work_sync(&wl->recovery_work);
1155 cancel_delayed_work_sync(&wl->elp_work);
1156 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1158 mutex_lock(&wl->mutex);
1159 wl1271_power_off(wl);
1161 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1162 wl->state = WLCORE_STATE_OFF;
1164 wl->plt_mode = PLT_OFF;
1166 mutex_unlock(&wl->mutex);
1172 static void wl1271_op_tx(struct ieee80211_hw *hw,
1173 struct ieee80211_tx_control *control,
1174 struct sk_buff *skb)
1176 struct wl1271 *wl = hw->priv;
1177 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1178 struct ieee80211_vif *vif = info->control.vif;
1179 struct wl12xx_vif *wlvif = NULL;
1180 unsigned long flags;
1185 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1186 ieee80211_free_txskb(hw, skb);
1190 wlvif = wl12xx_vif_to_data(vif);
1191 mapping = skb_get_queue_mapping(skb);
1192 q = wl1271_tx_get_queue(mapping);
1194 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1196 spin_lock_irqsave(&wl->wl_lock, flags);
1199 * drop the packet if the link is invalid or the queue is stopped
1200 * for any reason but watermark. Watermark is a "soft"-stop so we
1201 * allow these packets through.
1203 if (hlid == WL12XX_INVALID_LINK_ID ||
1204 (!test_bit(hlid, wlvif->links_map)) ||
1205 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1206 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1207 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1208 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1209 ieee80211_free_txskb(hw, skb);
1213 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1215 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1217 wl->tx_queue_count[q]++;
1218 wlvif->tx_queue_count[q]++;
1221 * The workqueue is slow to process the tx_queue and we need stop
1222 * the queue here, otherwise the queue will get too long.
1224 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1225 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1226 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1227 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1228 wlcore_stop_queue_locked(wl, wlvif, q,
1229 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1233 * The chip specific setup must run before the first TX packet -
1234 * before that, the tx_work will not be initialized!
1237 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1238 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1239 ieee80211_queue_work(wl->hw, &wl->tx_work);
1242 spin_unlock_irqrestore(&wl->wl_lock, flags);
1245 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1247 unsigned long flags;
1250 /* no need to queue a new dummy packet if one is already pending */
1251 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1254 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1256 spin_lock_irqsave(&wl->wl_lock, flags);
1257 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1258 wl->tx_queue_count[q]++;
1259 spin_unlock_irqrestore(&wl->wl_lock, flags);
1261 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1262 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1263 return wlcore_tx_work_locked(wl);
1266 * If the FW TX is busy, TX work will be scheduled by the threaded
1267 * interrupt handler function
1273 * The size of the dummy packet should be at least 1400 bytes. However, in
1274 * order to minimize the number of bus transactions, aligning it to 512 bytes
1275 * boundaries could be beneficial, performance wise
1277 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1279 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1281 struct sk_buff *skb;
1282 struct ieee80211_hdr_3addr *hdr;
1283 unsigned int dummy_packet_size;
1285 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1286 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1288 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1290 wl1271_warning("Failed to allocate a dummy packet skb");
1294 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1296 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1297 memset(hdr, 0, sizeof(*hdr));
1298 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1299 IEEE80211_STYPE_NULLFUNC |
1300 IEEE80211_FCTL_TODS);
1302 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1304 /* Dummy packets require the TID to be management */
1305 skb->priority = WL1271_TID_MGMT;
1307 /* Initialize all fields that might be used */
1308 skb_set_queue_mapping(skb, 0);
1309 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1317 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1319 int num_fields = 0, in_field = 0, fields_size = 0;
1320 int i, pattern_len = 0;
1323 wl1271_warning("No mask in WoWLAN pattern");
1328 * The pattern is broken up into segments of bytes at different offsets
1329 * that need to be checked by the FW filter. Each segment is called
1330 * a field in the FW API. We verify that the total number of fields
1331 * required for this pattern won't exceed FW limits (8)
1332 * as well as the total fields buffer won't exceed the FW limit.
1333 * Note that if there's a pattern which crosses Ethernet/IP header
1334 * boundary a new field is required.
1336 for (i = 0; i < p->pattern_len; i++) {
1337 if (test_bit(i, (unsigned long *)p->mask)) {
1342 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1344 fields_size += pattern_len +
1345 RX_FILTER_FIELD_OVERHEAD;
1353 fields_size += pattern_len +
1354 RX_FILTER_FIELD_OVERHEAD;
1361 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1365 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1366 wl1271_warning("RX Filter too complex. Too many segments");
1370 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1371 wl1271_warning("RX filter pattern is too big");
1378 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1380 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1383 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1390 for (i = 0; i < filter->num_fields; i++)
1391 kfree(filter->fields[i].pattern);
1396 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1397 u16 offset, u8 flags,
1398 u8 *pattern, u8 len)
1400 struct wl12xx_rx_filter_field *field;
1402 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1403 wl1271_warning("Max fields per RX filter. can't alloc another");
1407 field = &filter->fields[filter->num_fields];
1409 field->pattern = kzalloc(len, GFP_KERNEL);
1410 if (!field->pattern) {
1411 wl1271_warning("Failed to allocate RX filter pattern");
1415 filter->num_fields++;
1417 field->offset = cpu_to_le16(offset);
1418 field->flags = flags;
1420 memcpy(field->pattern, pattern, len);
1425 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1427 int i, fields_size = 0;
1429 for (i = 0; i < filter->num_fields; i++)
1430 fields_size += filter->fields[i].len +
1431 sizeof(struct wl12xx_rx_filter_field) -
1437 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1441 struct wl12xx_rx_filter_field *field;
1443 for (i = 0; i < filter->num_fields; i++) {
1444 field = (struct wl12xx_rx_filter_field *)buf;
1446 field->offset = filter->fields[i].offset;
1447 field->flags = filter->fields[i].flags;
1448 field->len = filter->fields[i].len;
1450 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1451 buf += sizeof(struct wl12xx_rx_filter_field) -
1452 sizeof(u8 *) + field->len;
1457 * Allocates an RX filter returned through f
1458 * which needs to be freed using rx_filter_free()
1460 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1461 struct cfg80211_wowlan_trig_pkt_pattern *p,
1462 struct wl12xx_rx_filter **f)
1465 struct wl12xx_rx_filter *filter;
1469 filter = wl1271_rx_filter_alloc();
1471 wl1271_warning("Failed to alloc rx filter");
1477 while (i < p->pattern_len) {
1478 if (!test_bit(i, (unsigned long *)p->mask)) {
1483 for (j = i; j < p->pattern_len; j++) {
1484 if (!test_bit(j, (unsigned long *)p->mask))
1487 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1488 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1492 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1494 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1496 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1497 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1502 ret = wl1271_rx_filter_alloc_field(filter,
1505 &p->pattern[i], len);
1512 filter->action = FILTER_SIGNAL;
1518 wl1271_rx_filter_free(filter);
1524 static int wl1271_configure_wowlan(struct wl1271 *wl,
1525 struct cfg80211_wowlan *wow)
1529 if (!wow || wow->any || !wow->n_patterns) {
1530 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1535 ret = wl1271_rx_filter_clear_all(wl);
1542 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1545 /* Validate all incoming patterns before clearing current FW state */
1546 for (i = 0; i < wow->n_patterns; i++) {
1547 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1549 wl1271_warning("Bad wowlan pattern %d", i);
1554 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1558 ret = wl1271_rx_filter_clear_all(wl);
1562 /* Translate WoWLAN patterns into filters */
1563 for (i = 0; i < wow->n_patterns; i++) {
1564 struct cfg80211_wowlan_trig_pkt_pattern *p;
1565 struct wl12xx_rx_filter *filter = NULL;
1567 p = &wow->patterns[i];
1569 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1571 wl1271_warning("Failed to create an RX filter from "
1572 "wowlan pattern %d", i);
1576 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1578 wl1271_rx_filter_free(filter);
1583 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1589 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1590 struct wl12xx_vif *wlvif,
1591 struct cfg80211_wowlan *wow)
1595 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1598 ret = wl1271_ps_elp_wakeup(wl);
1602 ret = wl1271_configure_wowlan(wl, wow);
1606 if ((wl->conf.conn.suspend_wake_up_event ==
1607 wl->conf.conn.wake_up_event) &&
1608 (wl->conf.conn.suspend_listen_interval ==
1609 wl->conf.conn.listen_interval))
1612 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1613 wl->conf.conn.suspend_wake_up_event,
1614 wl->conf.conn.suspend_listen_interval);
1617 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1620 wl1271_ps_elp_sleep(wl);
1626 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1627 struct wl12xx_vif *wlvif)
1631 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1634 ret = wl1271_ps_elp_wakeup(wl);
1638 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1640 wl1271_ps_elp_sleep(wl);
1646 static int wl1271_configure_suspend(struct wl1271 *wl,
1647 struct wl12xx_vif *wlvif,
1648 struct cfg80211_wowlan *wow)
1650 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1651 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1652 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1653 return wl1271_configure_suspend_ap(wl, wlvif);
1657 static void wl1271_configure_resume(struct wl1271 *wl,
1658 struct wl12xx_vif *wlvif)
1661 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1662 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1664 if ((!is_ap) && (!is_sta))
1667 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1670 ret = wl1271_ps_elp_wakeup(wl);
1675 wl1271_configure_wowlan(wl, NULL);
1677 if ((wl->conf.conn.suspend_wake_up_event ==
1678 wl->conf.conn.wake_up_event) &&
1679 (wl->conf.conn.suspend_listen_interval ==
1680 wl->conf.conn.listen_interval))
1683 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1684 wl->conf.conn.wake_up_event,
1685 wl->conf.conn.listen_interval);
1688 wl1271_error("resume: wake up conditions failed: %d",
1692 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1696 wl1271_ps_elp_sleep(wl);
1699 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1700 struct cfg80211_wowlan *wow)
1702 struct wl1271 *wl = hw->priv;
1703 struct wl12xx_vif *wlvif;
1706 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1709 /* we want to perform the recovery before suspending */
1710 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1711 wl1271_warning("postponing suspend to perform recovery");
1715 wl1271_tx_flush(wl);
1717 mutex_lock(&wl->mutex);
1718 wl->wow_enabled = true;
1719 wl12xx_for_each_wlvif(wl, wlvif) {
1720 ret = wl1271_configure_suspend(wl, wlvif, wow);
1722 mutex_unlock(&wl->mutex);
1723 wl1271_warning("couldn't prepare device to suspend");
1727 mutex_unlock(&wl->mutex);
1728 /* flush any remaining work */
1729 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1732 * disable and re-enable interrupts in order to flush
1735 wlcore_disable_interrupts(wl);
1738 * set suspended flag to avoid triggering a new threaded_irq
1739 * work. no need for spinlock as interrupts are disabled.
1741 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1743 wlcore_enable_interrupts(wl);
1744 flush_work(&wl->tx_work);
1745 flush_delayed_work(&wl->elp_work);
1750 static int wl1271_op_resume(struct ieee80211_hw *hw)
1752 struct wl1271 *wl = hw->priv;
1753 struct wl12xx_vif *wlvif;
1754 unsigned long flags;
1755 bool run_irq_work = false, pending_recovery;
1758 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1760 WARN_ON(!wl->wow_enabled);
1763 * re-enable irq_work enqueuing, and call irq_work directly if
1764 * there is a pending work.
1766 spin_lock_irqsave(&wl->wl_lock, flags);
1767 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1768 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1769 run_irq_work = true;
1770 spin_unlock_irqrestore(&wl->wl_lock, flags);
1772 mutex_lock(&wl->mutex);
1774 /* test the recovery flag before calling any SDIO functions */
1775 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1779 wl1271_debug(DEBUG_MAC80211,
1780 "run postponed irq_work directly");
1782 /* don't talk to the HW if recovery is pending */
1783 if (!pending_recovery) {
1784 ret = wlcore_irq_locked(wl);
1786 wl12xx_queue_recovery_work(wl);
1789 wlcore_enable_interrupts(wl);
1792 if (pending_recovery) {
1793 wl1271_warning("queuing forgotten recovery on resume");
1794 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1798 wl12xx_for_each_wlvif(wl, wlvif) {
1799 wl1271_configure_resume(wl, wlvif);
1803 wl->wow_enabled = false;
1804 mutex_unlock(&wl->mutex);
1810 static int wl1271_op_start(struct ieee80211_hw *hw)
1812 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1815 * We have to delay the booting of the hardware because
1816 * we need to know the local MAC address before downloading and
1817 * initializing the firmware. The MAC address cannot be changed
1818 * after boot, and without the proper MAC address, the firmware
1819 * will not function properly.
1821 * The MAC address is first known when the corresponding interface
1822 * is added. That is where we will initialize the hardware.
1828 static void wlcore_op_stop_locked(struct wl1271 *wl)
1832 if (wl->state == WLCORE_STATE_OFF) {
1833 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1835 wlcore_enable_interrupts(wl);
1841 * this must be before the cancel_work calls below, so that the work
1842 * functions don't perform further work.
1844 wl->state = WLCORE_STATE_OFF;
1847 * Use the nosync variant to disable interrupts, so the mutex could be
1848 * held while doing so without deadlocking.
1850 wlcore_disable_interrupts_nosync(wl);
1852 mutex_unlock(&wl->mutex);
1854 wlcore_synchronize_interrupts(wl);
1855 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1856 cancel_work_sync(&wl->recovery_work);
1857 wl1271_flush_deferred_work(wl);
1858 cancel_delayed_work_sync(&wl->scan_complete_work);
1859 cancel_work_sync(&wl->netstack_work);
1860 cancel_work_sync(&wl->tx_work);
1861 cancel_delayed_work_sync(&wl->elp_work);
1862 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1864 /* let's notify MAC80211 about the remaining pending TX frames */
1865 mutex_lock(&wl->mutex);
1866 wl12xx_tx_reset(wl);
1868 wl1271_power_off(wl);
1870 * In case a recovery was scheduled, interrupts were disabled to avoid
1871 * an interrupt storm. Now that the power is down, it is safe to
1872 * re-enable interrupts to balance the disable depth
1874 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1875 wlcore_enable_interrupts(wl);
1877 wl->band = IEEE80211_BAND_2GHZ;
1880 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1881 wl->channel_type = NL80211_CHAN_NO_HT;
1882 wl->tx_blocks_available = 0;
1883 wl->tx_allocated_blocks = 0;
1884 wl->tx_results_count = 0;
1885 wl->tx_packets_count = 0;
1886 wl->time_offset = 0;
1887 wl->ap_fw_ps_map = 0;
1889 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1890 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1891 memset(wl->links_map, 0, sizeof(wl->links_map));
1892 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1893 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1894 wl->active_sta_count = 0;
1895 wl->active_link_count = 0;
1897 /* The system link is always allocated */
1898 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1899 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1900 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1903 * this is performed after the cancel_work calls and the associated
1904 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1905 * get executed before all these vars have been reset.
1909 wl->tx_blocks_freed = 0;
1911 for (i = 0; i < NUM_TX_QUEUES; i++) {
1912 wl->tx_pkts_freed[i] = 0;
1913 wl->tx_allocated_pkts[i] = 0;
1916 wl1271_debugfs_reset(wl);
1918 kfree(wl->fw_status_1);
1919 wl->fw_status_1 = NULL;
1920 wl->fw_status_2 = NULL;
1921 kfree(wl->tx_res_if);
1922 wl->tx_res_if = NULL;
1923 kfree(wl->target_mem_map);
1924 wl->target_mem_map = NULL;
1927 * FW channels must be re-calibrated after recovery,
1928 * clear the last Reg-Domain channel configuration.
1930 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1933 static void wlcore_op_stop(struct ieee80211_hw *hw)
1935 struct wl1271 *wl = hw->priv;
1937 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1939 mutex_lock(&wl->mutex);
1941 wlcore_op_stop_locked(wl);
1943 mutex_unlock(&wl->mutex);
1946 static void wlcore_channel_switch_work(struct work_struct *work)
1948 struct delayed_work *dwork;
1950 struct ieee80211_vif *vif;
1951 struct wl12xx_vif *wlvif;
1954 dwork = container_of(work, struct delayed_work, work);
1955 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1958 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1960 mutex_lock(&wl->mutex);
1962 if (unlikely(wl->state != WLCORE_STATE_ON))
1965 /* check the channel switch is still ongoing */
1966 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1969 vif = wl12xx_wlvif_to_vif(wlvif);
1970 ieee80211_chswitch_done(vif, false);
1972 ret = wl1271_ps_elp_wakeup(wl);
1976 wl12xx_cmd_stop_channel_switch(wl, wlvif);
1978 wl1271_ps_elp_sleep(wl);
1980 mutex_unlock(&wl->mutex);
1983 static void wlcore_connection_loss_work(struct work_struct *work)
1985 struct delayed_work *dwork;
1987 struct ieee80211_vif *vif;
1988 struct wl12xx_vif *wlvif;
1990 dwork = container_of(work, struct delayed_work, work);
1991 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
1994 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
1996 mutex_lock(&wl->mutex);
1998 if (unlikely(wl->state != WLCORE_STATE_ON))
2001 /* Call mac80211 connection loss */
2002 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2005 vif = wl12xx_wlvif_to_vif(wlvif);
2006 ieee80211_connection_loss(vif);
2008 mutex_unlock(&wl->mutex);
2011 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2013 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2014 WL12XX_MAX_RATE_POLICIES);
2015 if (policy >= WL12XX_MAX_RATE_POLICIES)
2018 __set_bit(policy, wl->rate_policies_map);
2023 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2025 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2028 __clear_bit(*idx, wl->rate_policies_map);
2029 *idx = WL12XX_MAX_RATE_POLICIES;
2032 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2034 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2035 WLCORE_MAX_KLV_TEMPLATES);
2036 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2039 __set_bit(policy, wl->klv_templates_map);
2044 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2046 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2049 __clear_bit(*idx, wl->klv_templates_map);
2050 *idx = WLCORE_MAX_KLV_TEMPLATES;
2053 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2055 switch (wlvif->bss_type) {
2056 case BSS_TYPE_AP_BSS:
2058 return WL1271_ROLE_P2P_GO;
2060 return WL1271_ROLE_AP;
2062 case BSS_TYPE_STA_BSS:
2064 return WL1271_ROLE_P2P_CL;
2066 return WL1271_ROLE_STA;
2069 return WL1271_ROLE_IBSS;
2072 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2074 return WL12XX_INVALID_ROLE_TYPE;
2077 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2079 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2082 /* clear everything but the persistent data */
2083 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2085 switch (ieee80211_vif_type_p2p(vif)) {
2086 case NL80211_IFTYPE_P2P_CLIENT:
2089 case NL80211_IFTYPE_STATION:
2090 wlvif->bss_type = BSS_TYPE_STA_BSS;
2092 case NL80211_IFTYPE_ADHOC:
2093 wlvif->bss_type = BSS_TYPE_IBSS;
2095 case NL80211_IFTYPE_P2P_GO:
2098 case NL80211_IFTYPE_AP:
2099 wlvif->bss_type = BSS_TYPE_AP_BSS;
2102 wlvif->bss_type = MAX_BSS_TYPE;
2106 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2107 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2108 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2110 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2111 wlvif->bss_type == BSS_TYPE_IBSS) {
2112 /* init sta/ibss data */
2113 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2114 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2115 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2116 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2117 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2118 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2119 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2120 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2123 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2124 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2125 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2126 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2127 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2128 wl12xx_allocate_rate_policy(wl,
2129 &wlvif->ap.ucast_rate_idx[i]);
2130 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2132 * TODO: check if basic_rate shouldn't be
2133 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2134 * instead (the same thing for STA above).
2136 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2137 /* TODO: this seems to be used only for STA, check it */
2138 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2141 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2142 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2143 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2146 * mac80211 configures some values globally, while we treat them
2147 * per-interface. thus, on init, we have to copy them from wl
2149 wlvif->band = wl->band;
2150 wlvif->channel = wl->channel;
2151 wlvif->power_level = wl->power_level;
2152 wlvif->channel_type = wl->channel_type;
2154 INIT_WORK(&wlvif->rx_streaming_enable_work,
2155 wl1271_rx_streaming_enable_work);
2156 INIT_WORK(&wlvif->rx_streaming_disable_work,
2157 wl1271_rx_streaming_disable_work);
2158 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2159 wlcore_channel_switch_work);
2160 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2161 wlcore_connection_loss_work);
2162 INIT_LIST_HEAD(&wlvif->list);
2164 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2165 (unsigned long) wlvif);
2169 static int wl12xx_init_fw(struct wl1271 *wl)
2171 int retries = WL1271_BOOT_RETRIES;
2172 bool booted = false;
2173 struct wiphy *wiphy = wl->hw->wiphy;
2178 ret = wl12xx_chip_wakeup(wl, false);
2182 ret = wl->ops->boot(wl);
2186 ret = wl1271_hw_init(wl);
2194 mutex_unlock(&wl->mutex);
2195 /* Unlocking the mutex in the middle of handling is
2196 inherently unsafe. In this case we deem it safe to do,
2197 because we need to let any possibly pending IRQ out of
2198 the system (and while we are WLCORE_STATE_OFF the IRQ
2199 work function will not do anything.) Also, any other
2200 possible concurrent operations will fail due to the
2201 current state, hence the wl1271 struct should be safe. */
2202 wlcore_disable_interrupts(wl);
2203 wl1271_flush_deferred_work(wl);
2204 cancel_work_sync(&wl->netstack_work);
2205 mutex_lock(&wl->mutex);
2207 wl1271_power_off(wl);
2211 wl1271_error("firmware boot failed despite %d retries",
2212 WL1271_BOOT_RETRIES);
2216 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2218 /* update hw/fw version info in wiphy struct */
2219 wiphy->hw_version = wl->chip.id;
2220 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2221 sizeof(wiphy->fw_version));
2224 * Now we know if 11a is supported (info from the NVS), so disable
2225 * 11a channels if not supported
2227 if (!wl->enable_11a)
2228 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2230 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2231 wl->enable_11a ? "" : "not ");
2233 wl->state = WLCORE_STATE_ON;
2238 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2240 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2244 * Check whether a fw switch (i.e. moving from one loaded
2245 * fw to another) is needed. This function is also responsible
2246 * for updating wl->last_vif_count, so it must be called before
2247 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2250 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2251 struct vif_counter_data vif_counter_data,
2254 enum wl12xx_fw_type current_fw = wl->fw_type;
2255 u8 vif_count = vif_counter_data.counter;
2257 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2260 /* increase the vif count if this is a new vif */
2261 if (add && !vif_counter_data.cur_vif_running)
2264 wl->last_vif_count = vif_count;
2266 /* no need for fw change if the device is OFF */
2267 if (wl->state == WLCORE_STATE_OFF)
2270 /* no need for fw change if a single fw is used */
2271 if (!wl->mr_fw_name)
2274 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2276 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2283 * Enter "forced psm". Make sure the sta is in psm against the ap,
2284 * to make the fw switch a bit more disconnection-persistent.
2286 static void wl12xx_force_active_psm(struct wl1271 *wl)
2288 struct wl12xx_vif *wlvif;
2290 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2291 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2295 struct wlcore_hw_queue_iter_data {
2296 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2298 struct ieee80211_vif *vif;
2299 /* is the current vif among those iterated */
2303 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2304 struct ieee80211_vif *vif)
2306 struct wlcore_hw_queue_iter_data *iter_data = data;
2308 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2311 if (iter_data->cur_running || vif == iter_data->vif) {
2312 iter_data->cur_running = true;
2316 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2319 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2320 struct wl12xx_vif *wlvif)
2322 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2323 struct wlcore_hw_queue_iter_data iter_data = {};
2326 iter_data.vif = vif;
2328 /* mark all bits taken by active interfaces */
2329 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2330 IEEE80211_IFACE_ITER_RESUME_ALL,
2331 wlcore_hw_queue_iter, &iter_data);
2333 /* the current vif is already running in mac80211 (resume/recovery) */
2334 if (iter_data.cur_running) {
2335 wlvif->hw_queue_base = vif->hw_queue[0];
2336 wl1271_debug(DEBUG_MAC80211,
2337 "using pre-allocated hw queue base %d",
2338 wlvif->hw_queue_base);
2340 /* interface type might have changed type */
2341 goto adjust_cab_queue;
2344 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2345 WLCORE_NUM_MAC_ADDRESSES);
2346 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2349 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2350 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2351 wlvif->hw_queue_base);
2353 for (i = 0; i < NUM_TX_QUEUES; i++) {
2354 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2355 /* register hw queues in mac80211 */
2356 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2360 /* the last places are reserved for cab queues per interface */
2361 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2362 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2363 wlvif->hw_queue_base / NUM_TX_QUEUES;
2365 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2370 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2371 struct ieee80211_vif *vif)
2373 struct wl1271 *wl = hw->priv;
2374 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2375 struct vif_counter_data vif_count;
2379 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2380 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2382 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2383 ieee80211_vif_type_p2p(vif), vif->addr);
2385 wl12xx_get_vif_count(hw, vif, &vif_count);
2387 mutex_lock(&wl->mutex);
2388 ret = wl1271_ps_elp_wakeup(wl);
2393 * in some very corner case HW recovery scenarios its possible to
2394 * get here before __wl1271_op_remove_interface is complete, so
2395 * opt out if that is the case.
2397 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2398 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2404 ret = wl12xx_init_vif_data(wl, vif);
2409 role_type = wl12xx_get_role_type(wl, wlvif);
2410 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2415 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2419 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2420 wl12xx_force_active_psm(wl);
2421 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2422 mutex_unlock(&wl->mutex);
2423 wl1271_recovery_work(&wl->recovery_work);
2428 * TODO: after the nvs issue will be solved, move this block
2429 * to start(), and make sure here the driver is ON.
2431 if (wl->state == WLCORE_STATE_OFF) {
2433 * we still need this in order to configure the fw
2434 * while uploading the nvs
2436 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2438 ret = wl12xx_init_fw(wl);
2443 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2444 role_type, &wlvif->role_id);
2448 ret = wl1271_init_vif_specific(wl, vif);
2452 list_add(&wlvif->list, &wl->wlvif_list);
2453 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2455 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2460 wl1271_ps_elp_sleep(wl);
2462 mutex_unlock(&wl->mutex);
2467 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2468 struct ieee80211_vif *vif,
2469 bool reset_tx_queues)
2471 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2473 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2475 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2477 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2480 /* because of hardware recovery, we may get here twice */
2481 if (wl->state == WLCORE_STATE_OFF)
2484 wl1271_info("down");
2486 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2487 wl->scan_wlvif == wlvif) {
2489 * Rearm the tx watchdog just before idling scan. This
2490 * prevents just-finished scans from triggering the watchdog
2492 wl12xx_rearm_tx_watchdog_locked(wl);
2494 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2495 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2496 wl->scan_wlvif = NULL;
2497 wl->scan.req = NULL;
2498 ieee80211_scan_completed(wl->hw, true);
2501 if (wl->sched_vif == wlvif) {
2502 ieee80211_sched_scan_stopped(wl->hw);
2503 wl->sched_vif = NULL;
2506 if (wl->roc_vif == vif) {
2508 ieee80211_remain_on_channel_expired(wl->hw);
2511 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2512 /* disable active roles */
2513 ret = wl1271_ps_elp_wakeup(wl);
2517 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2518 wlvif->bss_type == BSS_TYPE_IBSS) {
2519 if (wl12xx_dev_role_started(wlvif))
2520 wl12xx_stop_dev(wl, wlvif);
2523 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2527 wl1271_ps_elp_sleep(wl);
2530 /* clear all hlids (except system_hlid) */
2531 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2533 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2534 wlvif->bss_type == BSS_TYPE_IBSS) {
2535 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2536 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2537 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2538 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2539 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2541 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2542 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2543 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2544 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2545 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2546 wl12xx_free_rate_policy(wl,
2547 &wlvif->ap.ucast_rate_idx[i]);
2548 wl1271_free_ap_keys(wl, wlvif);
2551 dev_kfree_skb(wlvif->probereq);
2552 wlvif->probereq = NULL;
2553 wl12xx_tx_reset_wlvif(wl, wlvif);
2554 if (wl->last_wlvif == wlvif)
2555 wl->last_wlvif = NULL;
2556 list_del(&wlvif->list);
2557 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2558 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2559 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2567 * Last AP, have more stations. Configure sleep auth according to STA.
2568 * Don't do thin on unintended recovery.
2570 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2571 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2574 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2575 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2576 /* Configure for power according to debugfs */
2577 if (sta_auth != WL1271_PSM_ILLEGAL)
2578 wl1271_acx_sleep_auth(wl, sta_auth);
2579 /* Configure for ELP power saving */
2581 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2585 mutex_unlock(&wl->mutex);
2587 del_timer_sync(&wlvif->rx_streaming_timer);
2588 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2589 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2590 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2592 mutex_lock(&wl->mutex);
2595 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2596 struct ieee80211_vif *vif)
2598 struct wl1271 *wl = hw->priv;
2599 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2600 struct wl12xx_vif *iter;
2601 struct vif_counter_data vif_count;
2603 wl12xx_get_vif_count(hw, vif, &vif_count);
2604 mutex_lock(&wl->mutex);
2606 if (wl->state == WLCORE_STATE_OFF ||
2607 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2611 * wl->vif can be null here if someone shuts down the interface
2612 * just when hardware recovery has been started.
2614 wl12xx_for_each_wlvif(wl, iter) {
2618 __wl1271_op_remove_interface(wl, vif, true);
2621 WARN_ON(iter != wlvif);
2622 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2623 wl12xx_force_active_psm(wl);
2624 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2625 wl12xx_queue_recovery_work(wl);
2628 mutex_unlock(&wl->mutex);
2631 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2632 struct ieee80211_vif *vif,
2633 enum nl80211_iftype new_type, bool p2p)
2635 struct wl1271 *wl = hw->priv;
2638 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2639 wl1271_op_remove_interface(hw, vif);
2641 vif->type = new_type;
2643 ret = wl1271_op_add_interface(hw, vif);
2645 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2649 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2652 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2655 * One of the side effects of the JOIN command is that is clears
2656 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2657 * to a WPA/WPA2 access point will therefore kill the data-path.
2658 * Currently the only valid scenario for JOIN during association
2659 * is on roaming, in which case we will also be given new keys.
2660 * Keep the below message for now, unless it starts bothering
2661 * users who really like to roam a lot :)
2663 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2664 wl1271_info("JOIN while associated.");
2666 /* clear encryption type */
2667 wlvif->encryption_type = KEY_NONE;
2670 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2672 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2674 * TODO: this is an ugly workaround for wl12xx fw
2675 * bug - we are not able to tx/rx after the first
2676 * start_sta, so make dummy start+stop calls,
2677 * and then call start_sta again.
2678 * this should be fixed in the fw.
2680 wl12xx_cmd_role_start_sta(wl, wlvif);
2681 wl12xx_cmd_role_stop_sta(wl, wlvif);
2684 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2690 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2694 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2698 wl1271_error("No SSID in IEs!");
2703 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2704 wl1271_error("SSID is too long!");
2708 wlvif->ssid_len = ssid_len;
2709 memcpy(wlvif->ssid, ptr+2, ssid_len);
2713 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2715 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2716 struct sk_buff *skb;
2719 /* we currently only support setting the ssid from the ap probe req */
2720 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2723 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2727 ieoffset = offsetof(struct ieee80211_mgmt,
2728 u.probe_req.variable);
2729 wl1271_ssid_set(wlvif, skb, ieoffset);
2735 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2736 struct ieee80211_bss_conf *bss_conf,
2742 wlvif->aid = bss_conf->aid;
2743 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2744 wlvif->beacon_int = bss_conf->beacon_int;
2745 wlvif->wmm_enabled = bss_conf->qos;
2747 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2750 * with wl1271, we don't need to update the
2751 * beacon_int and dtim_period, because the firmware
2752 * updates it by itself when the first beacon is
2753 * received after a join.
2755 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2760 * Get a template for hardware connection maintenance
2762 dev_kfree_skb(wlvif->probereq);
2763 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2766 ieoffset = offsetof(struct ieee80211_mgmt,
2767 u.probe_req.variable);
2768 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2770 /* enable the connection monitoring feature */
2771 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2776 * The join command disable the keep-alive mode, shut down its process,
2777 * and also clear the template config, so we need to reset it all after
2778 * the join. The acx_aid starts the keep-alive process, and the order
2779 * of the commands below is relevant.
2781 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2785 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2789 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2793 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2794 wlvif->sta.klv_template_id,
2795 ACX_KEEP_ALIVE_TPL_VALID);
2800 * The default fw psm configuration is AUTO, while mac80211 default
2801 * setting is off (ACTIVE), so sync the fw with the correct value.
2803 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2809 wl1271_tx_enabled_rates_get(wl,
2812 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2820 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2823 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2825 /* make sure we are connected (sta) joined */
2827 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2830 /* make sure we are joined (ibss) */
2832 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2836 /* use defaults when not associated */
2839 /* free probe-request template */
2840 dev_kfree_skb(wlvif->probereq);
2841 wlvif->probereq = NULL;
2843 /* disable connection monitor features */
2844 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2848 /* Disable the keep-alive feature */
2849 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2854 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2855 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2857 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2858 ieee80211_chswitch_done(vif, false);
2859 cancel_delayed_work(&wlvif->channel_switch_work);
2862 /* invalidate keep-alive template */
2863 wl1271_acx_keep_alive_config(wl, wlvif,
2864 wlvif->sta.klv_template_id,
2865 ACX_KEEP_ALIVE_TPL_INVALID);
2867 /* reset TX security counters on a clean disconnect */
2868 wlvif->tx_security_last_seq_lsb = 0;
2869 wlvif->tx_security_seq = 0;
2874 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2876 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2877 wlvif->rate_set = wlvif->basic_rate_set;
2880 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2881 struct ieee80211_conf *conf, u32 changed)
2885 if (conf->power_level != wlvif->power_level) {
2886 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2890 wlvif->power_level = conf->power_level;
2896 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2898 struct wl1271 *wl = hw->priv;
2899 struct wl12xx_vif *wlvif;
2900 struct ieee80211_conf *conf = &hw->conf;
2903 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2905 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2907 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2910 mutex_lock(&wl->mutex);
2912 if (changed & IEEE80211_CONF_CHANGE_POWER)
2913 wl->power_level = conf->power_level;
2915 if (unlikely(wl->state != WLCORE_STATE_ON))
2918 ret = wl1271_ps_elp_wakeup(wl);
2922 /* configure each interface */
2923 wl12xx_for_each_wlvif(wl, wlvif) {
2924 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2930 wl1271_ps_elp_sleep(wl);
2933 mutex_unlock(&wl->mutex);
2938 struct wl1271_filter_params {
2941 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2944 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2945 struct netdev_hw_addr_list *mc_list)
2947 struct wl1271_filter_params *fp;
2948 struct netdev_hw_addr *ha;
2950 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2952 wl1271_error("Out of memory setting filters.");
2956 /* update multicast filtering parameters */
2957 fp->mc_list_length = 0;
2958 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2959 fp->enabled = false;
2962 netdev_hw_addr_list_for_each(ha, mc_list) {
2963 memcpy(fp->mc_list[fp->mc_list_length],
2964 ha->addr, ETH_ALEN);
2965 fp->mc_list_length++;
2969 return (u64)(unsigned long)fp;
2972 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2975 FIF_BCN_PRBRESP_PROMISC | \
2979 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2980 unsigned int changed,
2981 unsigned int *total, u64 multicast)
2983 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2984 struct wl1271 *wl = hw->priv;
2985 struct wl12xx_vif *wlvif;
2989 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2990 " total %x", changed, *total);
2992 mutex_lock(&wl->mutex);
2994 *total &= WL1271_SUPPORTED_FILTERS;
2995 changed &= WL1271_SUPPORTED_FILTERS;
2997 if (unlikely(wl->state != WLCORE_STATE_ON))
3000 ret = wl1271_ps_elp_wakeup(wl);
3004 wl12xx_for_each_wlvif(wl, wlvif) {
3005 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3006 if (*total & FIF_ALLMULTI)
3007 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3011 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3014 fp->mc_list_length);
3021 * the fw doesn't provide an api to configure the filters. instead,
3022 * the filters configuration is based on the active roles / ROC
3027 wl1271_ps_elp_sleep(wl);
3030 mutex_unlock(&wl->mutex);
3034 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3035 u8 id, u8 key_type, u8 key_size,
3036 const u8 *key, u8 hlid, u32 tx_seq_32,
3039 struct wl1271_ap_key *ap_key;
3042 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3044 if (key_size > MAX_KEY_SIZE)
3048 * Find next free entry in ap_keys. Also check we are not replacing
3051 for (i = 0; i < MAX_NUM_KEYS; i++) {
3052 if (wlvif->ap.recorded_keys[i] == NULL)
3055 if (wlvif->ap.recorded_keys[i]->id == id) {
3056 wl1271_warning("trying to record key replacement");
3061 if (i == MAX_NUM_KEYS)
3064 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3069 ap_key->key_type = key_type;
3070 ap_key->key_size = key_size;
3071 memcpy(ap_key->key, key, key_size);
3072 ap_key->hlid = hlid;
3073 ap_key->tx_seq_32 = tx_seq_32;
3074 ap_key->tx_seq_16 = tx_seq_16;
3076 wlvif->ap.recorded_keys[i] = ap_key;
3080 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3084 for (i = 0; i < MAX_NUM_KEYS; i++) {
3085 kfree(wlvif->ap.recorded_keys[i]);
3086 wlvif->ap.recorded_keys[i] = NULL;
3090 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3093 struct wl1271_ap_key *key;
3094 bool wep_key_added = false;
3096 for (i = 0; i < MAX_NUM_KEYS; i++) {
3098 if (wlvif->ap.recorded_keys[i] == NULL)
3101 key = wlvif->ap.recorded_keys[i];
3103 if (hlid == WL12XX_INVALID_LINK_ID)
3104 hlid = wlvif->ap.bcast_hlid;
3106 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3107 key->id, key->key_type,
3108 key->key_size, key->key,
3109 hlid, key->tx_seq_32,
3114 if (key->key_type == KEY_WEP)
3115 wep_key_added = true;
3118 if (wep_key_added) {
3119 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3120 wlvif->ap.bcast_hlid);
3126 wl1271_free_ap_keys(wl, wlvif);
3130 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3131 u16 action, u8 id, u8 key_type,
3132 u8 key_size, const u8 *key, u32 tx_seq_32,
3133 u16 tx_seq_16, struct ieee80211_sta *sta)
3136 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3139 struct wl1271_station *wl_sta;
3143 wl_sta = (struct wl1271_station *)sta->drv_priv;
3144 hlid = wl_sta->hlid;
3146 hlid = wlvif->ap.bcast_hlid;
3149 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3151 * We do not support removing keys after AP shutdown.
3152 * Pretend we do to make mac80211 happy.
3154 if (action != KEY_ADD_OR_REPLACE)
3157 ret = wl1271_record_ap_key(wl, wlvif, id,
3159 key, hlid, tx_seq_32,
3162 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3163 id, key_type, key_size,
3164 key, hlid, tx_seq_32,
3172 static const u8 bcast_addr[ETH_ALEN] = {
3173 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3176 addr = sta ? sta->addr : bcast_addr;
3178 if (is_zero_ether_addr(addr)) {
3179 /* We dont support TX only encryption */
3183 /* The wl1271 does not allow to remove unicast keys - they
3184 will be cleared automatically on next CMD_JOIN. Ignore the
3185 request silently, as we dont want the mac80211 to emit
3186 an error message. */
3187 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3190 /* don't remove key if hlid was already deleted */
3191 if (action == KEY_REMOVE &&
3192 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3195 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3196 id, key_type, key_size,
3197 key, addr, tx_seq_32,
3202 /* the default WEP key needs to be configured at least once */
3203 if (key_type == KEY_WEP) {
3204 ret = wl12xx_cmd_set_default_wep_key(wl,
3215 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3216 struct ieee80211_vif *vif,
3217 struct ieee80211_sta *sta,
3218 struct ieee80211_key_conf *key_conf)
3220 struct wl1271 *wl = hw->priv;
3222 bool might_change_spare =
3223 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3224 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3226 if (might_change_spare) {
3228 * stop the queues and flush to ensure the next packets are
3229 * in sync with FW spare block accounting
3231 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3232 wl1271_tx_flush(wl);
3235 mutex_lock(&wl->mutex);
3237 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3239 goto out_wake_queues;
3242 ret = wl1271_ps_elp_wakeup(wl);
3244 goto out_wake_queues;
3246 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3248 wl1271_ps_elp_sleep(wl);
3251 if (might_change_spare)
3252 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3254 mutex_unlock(&wl->mutex);
3259 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3260 struct ieee80211_vif *vif,
3261 struct ieee80211_sta *sta,
3262 struct ieee80211_key_conf *key_conf)
3264 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3270 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3272 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3273 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3274 key_conf->cipher, key_conf->keyidx,
3275 key_conf->keylen, key_conf->flags);
3276 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3278 switch (key_conf->cipher) {
3279 case WLAN_CIPHER_SUITE_WEP40:
3280 case WLAN_CIPHER_SUITE_WEP104:
3283 key_conf->hw_key_idx = key_conf->keyidx;
3285 case WLAN_CIPHER_SUITE_TKIP:
3286 key_type = KEY_TKIP;
3288 key_conf->hw_key_idx = key_conf->keyidx;
3289 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3290 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3292 case WLAN_CIPHER_SUITE_CCMP:
3295 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3296 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3297 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3299 case WL1271_CIPHER_SUITE_GEM:
3301 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3302 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3305 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3312 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3313 key_conf->keyidx, key_type,
3314 key_conf->keylen, key_conf->key,
3315 tx_seq_32, tx_seq_16, sta);
3317 wl1271_error("Could not add or replace key");
3322 * reconfiguring arp response if the unicast (or common)
3323 * encryption key type was changed
3325 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3326 (sta || key_type == KEY_WEP) &&
3327 wlvif->encryption_type != key_type) {
3328 wlvif->encryption_type = key_type;
3329 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3331 wl1271_warning("build arp rsp failed: %d", ret);
3338 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3339 key_conf->keyidx, key_type,
3340 key_conf->keylen, key_conf->key,
3343 wl1271_error("Could not remove key");
3349 wl1271_error("Unsupported key cmd 0x%x", cmd);
3355 EXPORT_SYMBOL_GPL(wlcore_set_key);
3357 void wlcore_regdomain_config(struct wl1271 *wl)
3361 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3364 mutex_lock(&wl->mutex);
3365 ret = wl1271_ps_elp_wakeup(wl);
3369 ret = wlcore_cmd_regdomain_config_locked(wl);
3371 wl12xx_queue_recovery_work(wl);
3375 wl1271_ps_elp_sleep(wl);
3377 mutex_unlock(&wl->mutex);
3380 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3381 struct ieee80211_vif *vif,
3382 struct cfg80211_scan_request *req)
3384 struct wl1271 *wl = hw->priv;
3389 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3392 ssid = req->ssids[0].ssid;
3393 len = req->ssids[0].ssid_len;
3396 mutex_lock(&wl->mutex);
3398 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3400 * We cannot return -EBUSY here because cfg80211 will expect
3401 * a call to ieee80211_scan_completed if we do - in this case
3402 * there won't be any call.
3408 ret = wl1271_ps_elp_wakeup(wl);
3412 /* fail if there is any role in ROC */
3413 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3414 /* don't allow scanning right now */
3419 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3421 wl1271_ps_elp_sleep(wl);
3423 mutex_unlock(&wl->mutex);
3428 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3429 struct ieee80211_vif *vif)
3431 struct wl1271 *wl = hw->priv;
3432 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3435 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3437 mutex_lock(&wl->mutex);
3439 if (unlikely(wl->state != WLCORE_STATE_ON))
3442 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3445 ret = wl1271_ps_elp_wakeup(wl);
3449 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3450 ret = wl->ops->scan_stop(wl, wlvif);
3456 * Rearm the tx watchdog just before idling scan. This
3457 * prevents just-finished scans from triggering the watchdog
3459 wl12xx_rearm_tx_watchdog_locked(wl);
3461 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3462 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3463 wl->scan_wlvif = NULL;
3464 wl->scan.req = NULL;
3465 ieee80211_scan_completed(wl->hw, true);
3468 wl1271_ps_elp_sleep(wl);
3470 mutex_unlock(&wl->mutex);
3472 cancel_delayed_work_sync(&wl->scan_complete_work);
3475 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3476 struct ieee80211_vif *vif,
3477 struct cfg80211_sched_scan_request *req,
3478 struct ieee80211_sched_scan_ies *ies)
3480 struct wl1271 *wl = hw->priv;
3481 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3484 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3486 mutex_lock(&wl->mutex);
3488 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3493 ret = wl1271_ps_elp_wakeup(wl);
3497 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3501 wl->sched_vif = wlvif;
3504 wl1271_ps_elp_sleep(wl);
3506 mutex_unlock(&wl->mutex);
3510 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3511 struct ieee80211_vif *vif)
3513 struct wl1271 *wl = hw->priv;
3514 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3517 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3519 mutex_lock(&wl->mutex);
3521 if (unlikely(wl->state != WLCORE_STATE_ON))
3524 ret = wl1271_ps_elp_wakeup(wl);
3528 wl->ops->sched_scan_stop(wl, wlvif);
3530 wl1271_ps_elp_sleep(wl);
3532 mutex_unlock(&wl->mutex);
3535 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3537 struct wl1271 *wl = hw->priv;
3540 mutex_lock(&wl->mutex);
3542 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3547 ret = wl1271_ps_elp_wakeup(wl);
3551 ret = wl1271_acx_frag_threshold(wl, value);
3553 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3555 wl1271_ps_elp_sleep(wl);
3558 mutex_unlock(&wl->mutex);
3563 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3565 struct wl1271 *wl = hw->priv;
3566 struct wl12xx_vif *wlvif;
3569 mutex_lock(&wl->mutex);
3571 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3576 ret = wl1271_ps_elp_wakeup(wl);
3580 wl12xx_for_each_wlvif(wl, wlvif) {
3581 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3583 wl1271_warning("set rts threshold failed: %d", ret);
3585 wl1271_ps_elp_sleep(wl);
3588 mutex_unlock(&wl->mutex);
3593 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3596 const u8 *next, *end = skb->data + skb->len;
3597 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3598 skb->len - ieoffset);
3603 memmove(ie, next, end - next);
3604 skb_trim(skb, skb->len - len);
3607 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3608 unsigned int oui, u8 oui_type,
3612 const u8 *next, *end = skb->data + skb->len;
3613 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3614 skb->data + ieoffset,
3615 skb->len - ieoffset);
3620 memmove(ie, next, end - next);
3621 skb_trim(skb, skb->len - len);
3624 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3625 struct ieee80211_vif *vif)
3627 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3628 struct sk_buff *skb;
3631 skb = ieee80211_proberesp_get(wl->hw, vif);
3635 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3636 CMD_TEMPL_AP_PROBE_RESPONSE,
3645 wl1271_debug(DEBUG_AP, "probe response updated");
3646 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3652 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3653 struct ieee80211_vif *vif,
3655 size_t probe_rsp_len,
3658 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3659 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3660 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3661 int ssid_ie_offset, ie_offset, templ_len;
3664 /* no need to change probe response if the SSID is set correctly */
3665 if (wlvif->ssid_len > 0)
3666 return wl1271_cmd_template_set(wl, wlvif->role_id,
3667 CMD_TEMPL_AP_PROBE_RESPONSE,
3672 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3673 wl1271_error("probe_rsp template too big");
3677 /* start searching from IE offset */
3678 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3680 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3681 probe_rsp_len - ie_offset);
3683 wl1271_error("No SSID in beacon!");
3687 ssid_ie_offset = ptr - probe_rsp_data;
3688 ptr += (ptr[1] + 2);
3690 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3692 /* insert SSID from bss_conf */
3693 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3694 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3695 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3696 bss_conf->ssid, bss_conf->ssid_len);
3697 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3699 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3700 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3701 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3703 return wl1271_cmd_template_set(wl, wlvif->role_id,
3704 CMD_TEMPL_AP_PROBE_RESPONSE,
3710 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3711 struct ieee80211_vif *vif,
3712 struct ieee80211_bss_conf *bss_conf,
3715 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3718 if (changed & BSS_CHANGED_ERP_SLOT) {
3719 if (bss_conf->use_short_slot)
3720 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3722 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3724 wl1271_warning("Set slot time failed %d", ret);
3729 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3730 if (bss_conf->use_short_preamble)
3731 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3733 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3736 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3737 if (bss_conf->use_cts_prot)
3738 ret = wl1271_acx_cts_protect(wl, wlvif,
3741 ret = wl1271_acx_cts_protect(wl, wlvif,
3742 CTSPROTECT_DISABLE);
3744 wl1271_warning("Set ctsprotect failed %d", ret);
3753 static int wlcore_set_beacon_template(struct wl1271 *wl,
3754 struct ieee80211_vif *vif,
3757 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3758 struct ieee80211_hdr *hdr;
3761 int ieoffset = offsetof(struct ieee80211_mgmt,
3763 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3771 wl1271_debug(DEBUG_MASTER, "beacon updated");
3773 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3775 dev_kfree_skb(beacon);
3778 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3779 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3781 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3786 dev_kfree_skb(beacon);
3790 wlvif->wmm_enabled =
3791 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3792 WLAN_OUI_TYPE_MICROSOFT_WMM,
3793 beacon->data + ieoffset,
3794 beacon->len - ieoffset);
3797 * In case we already have a probe-resp beacon set explicitly
3798 * by usermode, don't use the beacon data.
3800 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3803 /* remove TIM ie from probe response */
3804 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3807 * remove p2p ie from probe response.
3808 * the fw reponds to probe requests that don't include
3809 * the p2p ie. probe requests with p2p ie will be passed,
3810 * and will be responded by the supplicant (the spec
3811 * forbids including the p2p ie when responding to probe
3812 * requests that didn't include it).
3814 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3815 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3817 hdr = (struct ieee80211_hdr *) beacon->data;
3818 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3819 IEEE80211_STYPE_PROBE_RESP);
3821 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3826 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3827 CMD_TEMPL_PROBE_RESPONSE,
3832 dev_kfree_skb(beacon);
3840 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3841 struct ieee80211_vif *vif,
3842 struct ieee80211_bss_conf *bss_conf,
3845 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3846 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3849 if (changed & BSS_CHANGED_BEACON_INT) {
3850 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3851 bss_conf->beacon_int);
3853 wlvif->beacon_int = bss_conf->beacon_int;
3856 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3857 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3859 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3862 if (changed & BSS_CHANGED_BEACON) {
3863 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3870 wl1271_error("beacon info change failed: %d", ret);
3874 /* AP mode changes */
3875 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3876 struct ieee80211_vif *vif,
3877 struct ieee80211_bss_conf *bss_conf,
3880 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3883 if (changed & BSS_CHANGED_BASIC_RATES) {
3884 u32 rates = bss_conf->basic_rates;
3886 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3888 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3889 wlvif->basic_rate_set);
3891 ret = wl1271_init_ap_rates(wl, wlvif);
3893 wl1271_error("AP rate policy change failed %d", ret);
3897 ret = wl1271_ap_init_templates(wl, vif);
3901 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3905 ret = wlcore_set_beacon_template(wl, vif, true);
3910 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3914 if (changed & BSS_CHANGED_BEACON_ENABLED) {
3915 if (bss_conf->enable_beacon) {
3916 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3917 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3921 ret = wl1271_ap_init_hwenc(wl, wlvif);
3925 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3926 wl1271_debug(DEBUG_AP, "started AP");
3929 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3930 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3934 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3935 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3937 wl1271_debug(DEBUG_AP, "stopped AP");
3942 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3946 /* Handle HT information change */
3947 if ((changed & BSS_CHANGED_HT) &&
3948 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
3949 ret = wl1271_acx_set_ht_information(wl, wlvif,
3950 bss_conf->ht_operation_mode);
3952 wl1271_warning("Set ht information failed %d", ret);
3961 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3962 struct ieee80211_bss_conf *bss_conf,
3968 wl1271_debug(DEBUG_MAC80211,
3969 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3970 bss_conf->bssid, bss_conf->aid,
3971 bss_conf->beacon_int,
3972 bss_conf->basic_rates, sta_rate_set);
3974 wlvif->beacon_int = bss_conf->beacon_int;
3975 rates = bss_conf->basic_rates;
3976 wlvif->basic_rate_set =
3977 wl1271_tx_enabled_rates_get(wl, rates,
3980 wl1271_tx_min_rate_get(wl,
3981 wlvif->basic_rate_set);
3985 wl1271_tx_enabled_rates_get(wl,
3989 /* we only support sched_scan while not connected */
3990 if (wl->sched_vif == wlvif)
3991 wl->ops->sched_scan_stop(wl, wlvif);
3993 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3997 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4001 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4005 wlcore_set_ssid(wl, wlvif);
4007 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4012 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4016 /* revert back to minimum rates for the current band */
4017 wl1271_set_band_rate(wl, wlvif);
4018 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4020 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4024 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4025 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4026 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4031 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4034 /* STA/IBSS mode changes */
4035 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4036 struct ieee80211_vif *vif,
4037 struct ieee80211_bss_conf *bss_conf,
4040 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4041 bool do_join = false;
4042 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4043 bool ibss_joined = false;
4044 u32 sta_rate_set = 0;
4046 struct ieee80211_sta *sta;
4047 bool sta_exists = false;
4048 struct ieee80211_sta_ht_cap sta_ht_cap;
4051 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4057 if (changed & BSS_CHANGED_IBSS) {
4058 if (bss_conf->ibss_joined) {
4059 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4062 wlcore_unset_assoc(wl, wlvif);
4063 wl12xx_cmd_role_stop_sta(wl, wlvif);
4067 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4070 /* Need to update the SSID (for filtering etc) */
4071 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4074 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4075 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4076 bss_conf->enable_beacon ? "enabled" : "disabled");
4081 if (changed & BSS_CHANGED_CQM) {
4082 bool enable = false;
4083 if (bss_conf->cqm_rssi_thold)
4085 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4086 bss_conf->cqm_rssi_thold,
4087 bss_conf->cqm_rssi_hyst);
4090 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4093 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4094 BSS_CHANGED_ASSOC)) {
4096 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4098 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4100 /* save the supp_rates of the ap */
4101 sta_rate_set = sta->supp_rates[wlvif->band];
4102 if (sta->ht_cap.ht_supported)
4104 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4105 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4106 sta_ht_cap = sta->ht_cap;
4113 if (changed & BSS_CHANGED_BSSID) {
4114 if (!is_zero_ether_addr(bss_conf->bssid)) {
4115 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4120 /* Need to update the BSSID (for filtering etc) */
4123 ret = wlcore_clear_bssid(wl, wlvif);
4129 if (changed & BSS_CHANGED_IBSS) {
4130 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4131 bss_conf->ibss_joined);
4133 if (bss_conf->ibss_joined) {
4134 u32 rates = bss_conf->basic_rates;
4135 wlvif->basic_rate_set =
4136 wl1271_tx_enabled_rates_get(wl, rates,
4139 wl1271_tx_min_rate_get(wl,
4140 wlvif->basic_rate_set);
4142 /* by default, use 11b + OFDM rates */
4143 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4144 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4150 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4155 ret = wlcore_join(wl, wlvif);
4157 wl1271_warning("cmd join failed %d", ret);
4162 if (changed & BSS_CHANGED_ASSOC) {
4163 if (bss_conf->assoc) {
4164 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4169 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4170 wl12xx_set_authorized(wl, wlvif);
4172 wlcore_unset_assoc(wl, wlvif);
4176 if (changed & BSS_CHANGED_PS) {
4177 if ((bss_conf->ps) &&
4178 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4179 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4183 if (wl->conf.conn.forced_ps) {
4184 ps_mode = STATION_POWER_SAVE_MODE;
4185 ps_mode_str = "forced";
4187 ps_mode = STATION_AUTO_PS_MODE;
4188 ps_mode_str = "auto";
4191 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4193 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4195 wl1271_warning("enter %s ps failed %d",
4197 } else if (!bss_conf->ps &&
4198 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4199 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4201 ret = wl1271_ps_set_mode(wl, wlvif,
4202 STATION_ACTIVE_MODE);
4204 wl1271_warning("exit auto ps failed %d", ret);
4208 /* Handle new association with HT. Do this after join. */
4210 (changed & BSS_CHANGED_HT)) {
4212 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4214 ret = wlcore_hw_set_peer_cap(wl,
4220 wl1271_warning("Set ht cap failed %d", ret);
4226 ret = wl1271_acx_set_ht_information(wl, wlvif,
4227 bss_conf->ht_operation_mode);
4229 wl1271_warning("Set ht information failed %d",
4236 /* Handle arp filtering. Done after join. */
4237 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4238 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4239 __be32 addr = bss_conf->arp_addr_list[0];
4240 wlvif->sta.qos = bss_conf->qos;
4241 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4243 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4244 wlvif->ip_addr = addr;
4246 * The template should have been configured only upon
4247 * association. however, it seems that the correct ip
4248 * isn't being set (when sending), so we have to
4249 * reconfigure the template upon every ip change.
4251 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4253 wl1271_warning("build arp rsp failed: %d", ret);
4257 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4258 (ACX_ARP_FILTER_ARP_FILTERING |
4259 ACX_ARP_FILTER_AUTO_ARP),
4263 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4274 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4275 struct ieee80211_vif *vif,
4276 struct ieee80211_bss_conf *bss_conf,
4279 struct wl1271 *wl = hw->priv;
4280 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4281 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4284 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4285 wlvif->role_id, (int)changed);
4288 * make sure to cancel pending disconnections if our association
4291 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4292 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4294 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4295 !bss_conf->enable_beacon)
4296 wl1271_tx_flush(wl);
4298 mutex_lock(&wl->mutex);
4300 if (unlikely(wl->state != WLCORE_STATE_ON))
4303 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4306 ret = wl1271_ps_elp_wakeup(wl);
4311 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4313 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4315 wl1271_ps_elp_sleep(wl);
4318 mutex_unlock(&wl->mutex);
4321 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4322 struct ieee80211_chanctx_conf *ctx)
4324 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4325 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4326 cfg80211_get_chandef_type(&ctx->def));
4330 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4331 struct ieee80211_chanctx_conf *ctx)
4333 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4334 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4335 cfg80211_get_chandef_type(&ctx->def));
4338 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4339 struct ieee80211_chanctx_conf *ctx,
4342 wl1271_debug(DEBUG_MAC80211,
4343 "mac80211 change chanctx %d (type %d) changed 0x%x",
4344 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4345 cfg80211_get_chandef_type(&ctx->def), changed);
4348 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4349 struct ieee80211_vif *vif,
4350 struct ieee80211_chanctx_conf *ctx)
4352 struct wl1271 *wl = hw->priv;
4353 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4354 int channel = ieee80211_frequency_to_channel(
4355 ctx->def.chan->center_freq);
4357 wl1271_debug(DEBUG_MAC80211,
4358 "mac80211 assign chanctx (role %d) %d (type %d)",
4359 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4361 mutex_lock(&wl->mutex);
4363 wlvif->band = ctx->def.chan->band;
4364 wlvif->channel = channel;
4365 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4367 /* update default rates according to the band */
4368 wl1271_set_band_rate(wl, wlvif);
4370 mutex_unlock(&wl->mutex);
4375 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4376 struct ieee80211_vif *vif,
4377 struct ieee80211_chanctx_conf *ctx)
4379 struct wl1271 *wl = hw->priv;
4380 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4382 wl1271_debug(DEBUG_MAC80211,
4383 "mac80211 unassign chanctx (role %d) %d (type %d)",
4385 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4386 cfg80211_get_chandef_type(&ctx->def));
4388 wl1271_tx_flush(wl);
4391 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4392 struct ieee80211_vif *vif, u16 queue,
4393 const struct ieee80211_tx_queue_params *params)
4395 struct wl1271 *wl = hw->priv;
4396 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4400 mutex_lock(&wl->mutex);
4402 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4405 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4407 ps_scheme = CONF_PS_SCHEME_LEGACY;
4409 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4412 ret = wl1271_ps_elp_wakeup(wl);
4417 * the txop is confed in units of 32us by the mac80211,
4420 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4421 params->cw_min, params->cw_max,
4422 params->aifs, params->txop << 5);
4426 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4427 CONF_CHANNEL_TYPE_EDCF,
4428 wl1271_tx_get_queue(queue),
4429 ps_scheme, CONF_ACK_POLICY_LEGACY,
4433 wl1271_ps_elp_sleep(wl);
4436 mutex_unlock(&wl->mutex);
4441 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4442 struct ieee80211_vif *vif)
4445 struct wl1271 *wl = hw->priv;
4446 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4447 u64 mactime = ULLONG_MAX;
4450 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4452 mutex_lock(&wl->mutex);
4454 if (unlikely(wl->state != WLCORE_STATE_ON))
4457 ret = wl1271_ps_elp_wakeup(wl);
4461 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4466 wl1271_ps_elp_sleep(wl);
4469 mutex_unlock(&wl->mutex);
4473 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4474 struct survey_info *survey)
4476 struct ieee80211_conf *conf = &hw->conf;
4481 survey->channel = conf->channel;
4486 static int wl1271_allocate_sta(struct wl1271 *wl,
4487 struct wl12xx_vif *wlvif,
4488 struct ieee80211_sta *sta)
4490 struct wl1271_station *wl_sta;
4494 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4495 wl1271_warning("could not allocate HLID - too much stations");
4499 wl_sta = (struct wl1271_station *)sta->drv_priv;
4500 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4502 wl1271_warning("could not allocate HLID - too many links");
4506 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4507 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4508 wl->active_sta_count++;
4512 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4514 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4517 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4518 __clear_bit(hlid, &wl->ap_ps_map);
4519 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4520 wl12xx_free_link(wl, wlvif, &hlid);
4521 wl->active_sta_count--;
4524 * rearm the tx watchdog when the last STA is freed - give the FW a
4525 * chance to return STA-buffered packets before complaining.
4527 if (wl->active_sta_count == 0)
4528 wl12xx_rearm_tx_watchdog_locked(wl);
4531 static int wl12xx_sta_add(struct wl1271 *wl,
4532 struct wl12xx_vif *wlvif,
4533 struct ieee80211_sta *sta)
4535 struct wl1271_station *wl_sta;
4539 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4541 ret = wl1271_allocate_sta(wl, wlvif, sta);
4545 wl_sta = (struct wl1271_station *)sta->drv_priv;
4546 hlid = wl_sta->hlid;
4548 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4550 wl1271_free_sta(wl, wlvif, hlid);
4555 static int wl12xx_sta_remove(struct wl1271 *wl,
4556 struct wl12xx_vif *wlvif,
4557 struct ieee80211_sta *sta)
4559 struct wl1271_station *wl_sta;
4562 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4564 wl_sta = (struct wl1271_station *)sta->drv_priv;
4566 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4569 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4573 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4577 static void wlcore_roc_if_possible(struct wl1271 *wl,
4578 struct wl12xx_vif *wlvif)
4580 if (find_first_bit(wl->roc_map,
4581 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4584 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4587 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4590 static void wlcore_update_inconn_sta(struct wl1271 *wl,
4591 struct wl12xx_vif *wlvif,
4592 struct wl1271_station *wl_sta,
4595 if (in_connection) {
4596 if (WARN_ON(wl_sta->in_connection))
4598 wl_sta->in_connection = true;
4599 if (!wlvif->inconn_count++)
4600 wlcore_roc_if_possible(wl, wlvif);
4602 if (!wl_sta->in_connection)
4605 wl_sta->in_connection = false;
4606 wlvif->inconn_count--;
4607 if (WARN_ON(wlvif->inconn_count < 0))
4610 if (!wlvif->inconn_count)
4611 if (test_bit(wlvif->role_id, wl->roc_map))
4612 wl12xx_croc(wl, wlvif->role_id);
4616 static int wl12xx_update_sta_state(struct wl1271 *wl,
4617 struct wl12xx_vif *wlvif,
4618 struct ieee80211_sta *sta,
4619 enum ieee80211_sta_state old_state,
4620 enum ieee80211_sta_state new_state)
4622 struct wl1271_station *wl_sta;
4623 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4624 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4627 wl_sta = (struct wl1271_station *)sta->drv_priv;
4629 /* Add station (AP mode) */
4631 old_state == IEEE80211_STA_NOTEXIST &&
4632 new_state == IEEE80211_STA_NONE) {
4633 ret = wl12xx_sta_add(wl, wlvif, sta);
4637 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4640 /* Remove station (AP mode) */
4642 old_state == IEEE80211_STA_NONE &&
4643 new_state == IEEE80211_STA_NOTEXIST) {
4645 wl12xx_sta_remove(wl, wlvif, sta);
4647 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4650 /* Authorize station (AP mode) */
4652 new_state == IEEE80211_STA_AUTHORIZED) {
4653 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4657 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4662 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4665 /* Authorize station */
4667 new_state == IEEE80211_STA_AUTHORIZED) {
4668 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4669 ret = wl12xx_set_authorized(wl, wlvif);
4675 old_state == IEEE80211_STA_AUTHORIZED &&
4676 new_state == IEEE80211_STA_ASSOC) {
4677 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4678 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4681 /* clear ROCs on failure or authorization */
4683 (new_state == IEEE80211_STA_AUTHORIZED ||
4684 new_state == IEEE80211_STA_NOTEXIST)) {
4685 if (test_bit(wlvif->role_id, wl->roc_map))
4686 wl12xx_croc(wl, wlvif->role_id);
4690 old_state == IEEE80211_STA_NOTEXIST &&
4691 new_state == IEEE80211_STA_NONE) {
4692 if (find_first_bit(wl->roc_map,
4693 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4694 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4695 wl12xx_roc(wl, wlvif, wlvif->role_id,
4696 wlvif->band, wlvif->channel);
4702 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4703 struct ieee80211_vif *vif,
4704 struct ieee80211_sta *sta,
4705 enum ieee80211_sta_state old_state,
4706 enum ieee80211_sta_state new_state)
4708 struct wl1271 *wl = hw->priv;
4709 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4712 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4713 sta->aid, old_state, new_state);
4715 mutex_lock(&wl->mutex);
4717 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4722 ret = wl1271_ps_elp_wakeup(wl);
4726 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4728 wl1271_ps_elp_sleep(wl);
4730 mutex_unlock(&wl->mutex);
4731 if (new_state < old_state)
4736 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4737 struct ieee80211_vif *vif,
4738 enum ieee80211_ampdu_mlme_action action,
4739 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4742 struct wl1271 *wl = hw->priv;
4743 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4745 u8 hlid, *ba_bitmap;
4747 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4750 /* sanity check - the fields in FW are only 8bits wide */
4751 if (WARN_ON(tid > 0xFF))
4754 mutex_lock(&wl->mutex);
4756 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4761 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4762 hlid = wlvif->sta.hlid;
4763 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4764 struct wl1271_station *wl_sta;
4766 wl_sta = (struct wl1271_station *)sta->drv_priv;
4767 hlid = wl_sta->hlid;
4773 ba_bitmap = &wl->links[hlid].ba_bitmap;
4775 ret = wl1271_ps_elp_wakeup(wl);
4779 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4783 case IEEE80211_AMPDU_RX_START:
4784 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4789 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4791 wl1271_error("exceeded max RX BA sessions");
4795 if (*ba_bitmap & BIT(tid)) {
4797 wl1271_error("cannot enable RX BA session on active "
4802 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4805 *ba_bitmap |= BIT(tid);
4806 wl->ba_rx_session_count++;
4810 case IEEE80211_AMPDU_RX_STOP:
4811 if (!(*ba_bitmap & BIT(tid))) {
4813 * this happens on reconfig - so only output a debug
4814 * message for now, and don't fail the function.
4816 wl1271_debug(DEBUG_MAC80211,
4817 "no active RX BA session on tid: %d",
4823 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4826 *ba_bitmap &= ~BIT(tid);
4827 wl->ba_rx_session_count--;
4832 * The BA initiator session management in FW independently.
4833 * Falling break here on purpose for all TX APDU commands.
4835 case IEEE80211_AMPDU_TX_START:
4836 case IEEE80211_AMPDU_TX_STOP_CONT:
4837 case IEEE80211_AMPDU_TX_STOP_FLUSH:
4838 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
4839 case IEEE80211_AMPDU_TX_OPERATIONAL:
4844 wl1271_error("Incorrect ampdu action id=%x\n", action);
4848 wl1271_ps_elp_sleep(wl);
4851 mutex_unlock(&wl->mutex);
4856 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4857 struct ieee80211_vif *vif,
4858 const struct cfg80211_bitrate_mask *mask)
4860 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4861 struct wl1271 *wl = hw->priv;
4864 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4865 mask->control[NL80211_BAND_2GHZ].legacy,
4866 mask->control[NL80211_BAND_5GHZ].legacy);
4868 mutex_lock(&wl->mutex);
4870 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4871 wlvif->bitrate_masks[i] =
4872 wl1271_tx_enabled_rates_get(wl,
4873 mask->control[i].legacy,
4876 if (unlikely(wl->state != WLCORE_STATE_ON))
4879 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4880 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4882 ret = wl1271_ps_elp_wakeup(wl);
4886 wl1271_set_band_rate(wl, wlvif);
4888 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4889 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4891 wl1271_ps_elp_sleep(wl);
4894 mutex_unlock(&wl->mutex);
4899 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4900 struct ieee80211_channel_switch *ch_switch)
4902 struct wl1271 *wl = hw->priv;
4903 struct wl12xx_vif *wlvif;
4906 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4908 wl1271_tx_flush(wl);
4910 mutex_lock(&wl->mutex);
4912 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4913 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4914 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4915 ieee80211_chswitch_done(vif, false);
4918 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4922 ret = wl1271_ps_elp_wakeup(wl);
4926 /* TODO: change mac80211 to pass vif as param */
4927 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4928 unsigned long delay_usec;
4930 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
4934 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4936 /* indicate failure 5 seconds after channel switch time */
4937 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
4939 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
4940 usecs_to_jiffies(delay_usec) +
4941 msecs_to_jiffies(5000));
4945 wl1271_ps_elp_sleep(wl);
4948 mutex_unlock(&wl->mutex);
4951 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4953 struct wl1271 *wl = hw->priv;
4955 wl1271_tx_flush(wl);
4958 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
4959 struct ieee80211_vif *vif,
4960 struct ieee80211_channel *chan,
4963 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4964 struct wl1271 *wl = hw->priv;
4965 int channel, ret = 0;
4967 channel = ieee80211_frequency_to_channel(chan->center_freq);
4969 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
4970 channel, wlvif->role_id);
4972 mutex_lock(&wl->mutex);
4974 if (unlikely(wl->state != WLCORE_STATE_ON))
4977 /* return EBUSY if we can't ROC right now */
4978 if (WARN_ON(wl->roc_vif ||
4979 find_first_bit(wl->roc_map,
4980 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
4985 ret = wl1271_ps_elp_wakeup(wl);
4989 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
4994 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
4995 msecs_to_jiffies(duration));
4997 wl1271_ps_elp_sleep(wl);
4999 mutex_unlock(&wl->mutex);
5003 static int __wlcore_roc_completed(struct wl1271 *wl)
5005 struct wl12xx_vif *wlvif;
5008 /* already completed */
5009 if (unlikely(!wl->roc_vif))
5012 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5014 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5017 ret = wl12xx_stop_dev(wl, wlvif);
5026 static int wlcore_roc_completed(struct wl1271 *wl)
5030 wl1271_debug(DEBUG_MAC80211, "roc complete");
5032 mutex_lock(&wl->mutex);
5034 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5039 ret = wl1271_ps_elp_wakeup(wl);
5043 ret = __wlcore_roc_completed(wl);
5045 wl1271_ps_elp_sleep(wl);
5047 mutex_unlock(&wl->mutex);
5052 static void wlcore_roc_complete_work(struct work_struct *work)
5054 struct delayed_work *dwork;
5058 dwork = container_of(work, struct delayed_work, work);
5059 wl = container_of(dwork, struct wl1271, roc_complete_work);
5061 ret = wlcore_roc_completed(wl);
5063 ieee80211_remain_on_channel_expired(wl->hw);
5066 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5068 struct wl1271 *wl = hw->priv;
5070 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5073 wl1271_tx_flush(wl);
5076 * we can't just flush_work here, because it might deadlock
5077 * (as we might get called from the same workqueue)
5079 cancel_delayed_work_sync(&wl->roc_complete_work);
5080 wlcore_roc_completed(wl);
5085 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5086 struct ieee80211_vif *vif,
5087 struct ieee80211_sta *sta,
5090 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5091 struct wl1271 *wl = hw->priv;
5093 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5096 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5098 struct wl1271 *wl = hw->priv;
5101 mutex_lock(&wl->mutex);
5103 if (unlikely(wl->state != WLCORE_STATE_ON))
5106 /* packets are considered pending if in the TX queue or the FW */
5107 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5109 mutex_unlock(&wl->mutex);
5114 /* can't be const, mac80211 writes to this */
5115 static struct ieee80211_rate wl1271_rates[] = {
5117 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5118 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5120 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5121 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5122 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5124 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5125 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5126 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5128 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5129 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5130 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5132 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5133 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5135 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5136 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5138 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5139 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5141 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5142 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5144 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5145 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5147 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5148 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5150 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5151 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5153 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5154 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5157 /* can't be const, mac80211 writes to this */
5158 static struct ieee80211_channel wl1271_channels[] = {
5159 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5160 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5161 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5162 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5163 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5164 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5165 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5166 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5167 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5168 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5169 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5170 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5171 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5172 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5175 /* can't be const, mac80211 writes to this */
5176 static struct ieee80211_supported_band wl1271_band_2ghz = {
5177 .channels = wl1271_channels,
5178 .n_channels = ARRAY_SIZE(wl1271_channels),
5179 .bitrates = wl1271_rates,
5180 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5183 /* 5 GHz data rates for WL1273 */
5184 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5186 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5187 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5189 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5190 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5192 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5193 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5195 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5196 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5198 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5199 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5201 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5202 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5204 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5205 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5207 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5208 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5211 /* 5 GHz band channels for WL1273 */
5212 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5213 { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
5214 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5215 { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
5216 { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
5217 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5218 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5219 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5220 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5221 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5222 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5223 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5224 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5225 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5226 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5227 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5228 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5229 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5230 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5231 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5232 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5233 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5234 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5235 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5236 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5237 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5238 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5239 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5240 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5241 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5242 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5243 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5244 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5245 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5246 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5249 static struct ieee80211_supported_band wl1271_band_5ghz = {
5250 .channels = wl1271_channels_5ghz,
5251 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5252 .bitrates = wl1271_rates_5ghz,
5253 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5256 static const struct ieee80211_ops wl1271_ops = {
5257 .start = wl1271_op_start,
5258 .stop = wlcore_op_stop,
5259 .add_interface = wl1271_op_add_interface,
5260 .remove_interface = wl1271_op_remove_interface,
5261 .change_interface = wl12xx_op_change_interface,
5263 .suspend = wl1271_op_suspend,
5264 .resume = wl1271_op_resume,
5266 .config = wl1271_op_config,
5267 .prepare_multicast = wl1271_op_prepare_multicast,
5268 .configure_filter = wl1271_op_configure_filter,
5270 .set_key = wlcore_op_set_key,
5271 .hw_scan = wl1271_op_hw_scan,
5272 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5273 .sched_scan_start = wl1271_op_sched_scan_start,
5274 .sched_scan_stop = wl1271_op_sched_scan_stop,
5275 .bss_info_changed = wl1271_op_bss_info_changed,
5276 .set_frag_threshold = wl1271_op_set_frag_threshold,
5277 .set_rts_threshold = wl1271_op_set_rts_threshold,
5278 .conf_tx = wl1271_op_conf_tx,
5279 .get_tsf = wl1271_op_get_tsf,
5280 .get_survey = wl1271_op_get_survey,
5281 .sta_state = wl12xx_op_sta_state,
5282 .ampdu_action = wl1271_op_ampdu_action,
5283 .tx_frames_pending = wl1271_tx_frames_pending,
5284 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5285 .channel_switch = wl12xx_op_channel_switch,
5286 .flush = wlcore_op_flush,
5287 .remain_on_channel = wlcore_op_remain_on_channel,
5288 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5289 .add_chanctx = wlcore_op_add_chanctx,
5290 .remove_chanctx = wlcore_op_remove_chanctx,
5291 .change_chanctx = wlcore_op_change_chanctx,
5292 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5293 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5294 .sta_rc_update = wlcore_op_sta_rc_update,
5295 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5299 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5305 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5306 wl1271_error("Illegal RX rate from HW: %d", rate);
5310 idx = wl->band_rate_to_idx[band][rate];
5311 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5312 wl1271_error("Unsupported RX rate from HW: %d", rate);
5319 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5320 struct device_attribute *attr,
5323 struct wl1271 *wl = dev_get_drvdata(dev);
5328 mutex_lock(&wl->mutex);
5329 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5331 mutex_unlock(&wl->mutex);
5337 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5338 struct device_attribute *attr,
5339 const char *buf, size_t count)
5341 struct wl1271 *wl = dev_get_drvdata(dev);
5345 ret = kstrtoul(buf, 10, &res);
5347 wl1271_warning("incorrect value written to bt_coex_mode");
5351 mutex_lock(&wl->mutex);
5355 if (res == wl->sg_enabled)
5358 wl->sg_enabled = res;
5360 if (unlikely(wl->state != WLCORE_STATE_ON))
5363 ret = wl1271_ps_elp_wakeup(wl);
5367 wl1271_acx_sg_enable(wl, wl->sg_enabled);
5368 wl1271_ps_elp_sleep(wl);
5371 mutex_unlock(&wl->mutex);
5375 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5376 wl1271_sysfs_show_bt_coex_state,
5377 wl1271_sysfs_store_bt_coex_state);
5379 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5380 struct device_attribute *attr,
5383 struct wl1271 *wl = dev_get_drvdata(dev);
5388 mutex_lock(&wl->mutex);
5389 if (wl->hw_pg_ver >= 0)
5390 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5392 len = snprintf(buf, len, "n/a\n");
5393 mutex_unlock(&wl->mutex);
5398 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5399 wl1271_sysfs_show_hw_pg_ver, NULL);
5401 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5402 struct bin_attribute *bin_attr,
5403 char *buffer, loff_t pos, size_t count)
5405 struct device *dev = container_of(kobj, struct device, kobj);
5406 struct wl1271 *wl = dev_get_drvdata(dev);
5410 ret = mutex_lock_interruptible(&wl->mutex);
5412 return -ERESTARTSYS;
5414 /* Let only one thread read the log at a time, blocking others */
5415 while (wl->fwlog_size == 0) {
5418 prepare_to_wait_exclusive(&wl->fwlog_waitq,
5420 TASK_INTERRUPTIBLE);
5422 if (wl->fwlog_size != 0) {
5423 finish_wait(&wl->fwlog_waitq, &wait);
5427 mutex_unlock(&wl->mutex);
5430 finish_wait(&wl->fwlog_waitq, &wait);
5432 if (signal_pending(current))
5433 return -ERESTARTSYS;
5435 ret = mutex_lock_interruptible(&wl->mutex);
5437 return -ERESTARTSYS;
5440 /* Check if the fwlog is still valid */
5441 if (wl->fwlog_size < 0) {
5442 mutex_unlock(&wl->mutex);
5446 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5447 len = min(count, (size_t)wl->fwlog_size);
5448 wl->fwlog_size -= len;
5449 memcpy(buffer, wl->fwlog, len);
5451 /* Make room for new messages */
5452 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5454 mutex_unlock(&wl->mutex);
5459 static struct bin_attribute fwlog_attr = {
5460 .attr = {.name = "fwlog", .mode = S_IRUSR},
5461 .read = wl1271_sysfs_read_fwlog,
5464 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5468 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5471 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5472 wl1271_warning("NIC part of the MAC address wraps around!");
5474 for (i = 0; i < wl->num_mac_addr; i++) {
5475 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5476 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5477 wl->addresses[i].addr[2] = (u8) oui;
5478 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5479 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5480 wl->addresses[i].addr[5] = (u8) nic;
5484 /* we may be one address short at the most */
5485 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5488 * turn on the LAA bit in the first address and use it as
5491 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5492 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5493 memcpy(&wl->addresses[idx], &wl->addresses[0],
5494 sizeof(wl->addresses[0]));
5496 wl->addresses[idx].addr[2] |= BIT(1);
5499 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5500 wl->hw->wiphy->addresses = wl->addresses;
5503 static int wl12xx_get_hw_info(struct wl1271 *wl)
5507 ret = wl12xx_set_power_on(wl);
5511 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5515 wl->fuse_oui_addr = 0;
5516 wl->fuse_nic_addr = 0;
5518 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5522 if (wl->ops->get_mac)
5523 ret = wl->ops->get_mac(wl);
5526 wl1271_power_off(wl);
5530 static int wl1271_register_hw(struct wl1271 *wl)
5533 u32 oui_addr = 0, nic_addr = 0;
5535 if (wl->mac80211_registered)
5538 if (wl->nvs_len >= 12) {
5539 /* NOTE: The wl->nvs->nvs element must be first, in
5540 * order to simplify the casting, we assume it is at
5541 * the beginning of the wl->nvs structure.
5543 u8 *nvs_ptr = (u8 *)wl->nvs;
5546 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5548 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5551 /* if the MAC address is zeroed in the NVS derive from fuse */
5552 if (oui_addr == 0 && nic_addr == 0) {
5553 oui_addr = wl->fuse_oui_addr;
5554 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5555 nic_addr = wl->fuse_nic_addr + 1;
5558 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5560 ret = ieee80211_register_hw(wl->hw);
5562 wl1271_error("unable to register mac80211 hw: %d", ret);
5566 wl->mac80211_registered = true;
5568 wl1271_debugfs_init(wl);
5570 wl1271_notice("loaded");
5576 static void wl1271_unregister_hw(struct wl1271 *wl)
5579 wl1271_plt_stop(wl);
5581 ieee80211_unregister_hw(wl->hw);
5582 wl->mac80211_registered = false;
5586 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5589 .types = BIT(NL80211_IFTYPE_STATION),
5593 .types = BIT(NL80211_IFTYPE_AP) |
5594 BIT(NL80211_IFTYPE_P2P_GO) |
5595 BIT(NL80211_IFTYPE_P2P_CLIENT),
5599 static struct ieee80211_iface_combination
5600 wlcore_iface_combinations[] = {
5602 .max_interfaces = 3,
5603 .limits = wlcore_iface_limits,
5604 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5608 static int wl1271_init_ieee80211(struct wl1271 *wl)
5611 static const u32 cipher_suites[] = {
5612 WLAN_CIPHER_SUITE_WEP40,
5613 WLAN_CIPHER_SUITE_WEP104,
5614 WLAN_CIPHER_SUITE_TKIP,
5615 WLAN_CIPHER_SUITE_CCMP,
5616 WL1271_CIPHER_SUITE_GEM,
5619 /* The tx descriptor buffer */
5620 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5622 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5623 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5626 /* FIXME: find a proper value */
5627 wl->hw->channel_change_time = 10000;
5628 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5630 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5631 IEEE80211_HW_SUPPORTS_PS |
5632 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5633 IEEE80211_HW_SUPPORTS_UAPSD |
5634 IEEE80211_HW_HAS_RATE_CONTROL |
5635 IEEE80211_HW_CONNECTION_MONITOR |
5636 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5637 IEEE80211_HW_SPECTRUM_MGMT |
5638 IEEE80211_HW_AP_LINK_PS |
5639 IEEE80211_HW_AMPDU_AGGREGATION |
5640 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5641 IEEE80211_HW_QUEUE_CONTROL;
5643 wl->hw->wiphy->cipher_suites = cipher_suites;
5644 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5646 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5647 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5648 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5649 wl->hw->wiphy->max_scan_ssids = 1;
5650 wl->hw->wiphy->max_sched_scan_ssids = 16;
5651 wl->hw->wiphy->max_match_sets = 16;
5653 * Maximum length of elements in scanning probe request templates
5654 * should be the maximum length possible for a template, without
5655 * the IEEE80211 header of the template
5657 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5658 sizeof(struct ieee80211_header);
5660 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5661 sizeof(struct ieee80211_header);
5663 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5665 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5666 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5668 /* make sure all our channels fit in the scanned_ch bitmask */
5669 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5670 ARRAY_SIZE(wl1271_channels_5ghz) >
5671 WL1271_MAX_CHANNELS);
5673 * clear channel flags from the previous usage
5674 * and restore max_power & max_antenna_gain values.
5676 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5677 wl1271_band_2ghz.channels[i].flags = 0;
5678 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5679 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5682 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5683 wl1271_band_5ghz.channels[i].flags = 0;
5684 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5685 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5689 * We keep local copies of the band structs because we need to
5690 * modify them on a per-device basis.
5692 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5693 sizeof(wl1271_band_2ghz));
5694 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5695 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5696 sizeof(*wl->ht_cap));
5697 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5698 sizeof(wl1271_band_5ghz));
5699 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5700 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5701 sizeof(*wl->ht_cap));
5703 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5704 &wl->bands[IEEE80211_BAND_2GHZ];
5705 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5706 &wl->bands[IEEE80211_BAND_5GHZ];
5709 * allow 4 queues per mac address we support +
5710 * 1 cab queue per mac + one global offchannel Tx queue
5712 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5714 /* the last queue is the offchannel queue */
5715 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5716 wl->hw->max_rates = 1;
5718 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5720 /* the FW answers probe-requests in AP-mode */
5721 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5722 wl->hw->wiphy->probe_resp_offload =
5723 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5724 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5725 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5727 /* allowed interface combinations */
5728 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5729 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5730 wl->hw->wiphy->n_iface_combinations =
5731 ARRAY_SIZE(wlcore_iface_combinations);
5733 SET_IEEE80211_DEV(wl->hw, wl->dev);
5735 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5736 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5738 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5743 #define WL1271_DEFAULT_CHANNEL 0
5745 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5748 struct ieee80211_hw *hw;
5753 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5755 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5757 wl1271_error("could not alloc ieee80211_hw");
5763 memset(wl, 0, sizeof(*wl));
5765 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5767 wl1271_error("could not alloc wl priv");
5769 goto err_priv_alloc;
5772 INIT_LIST_HEAD(&wl->wlvif_list);
5776 for (i = 0; i < NUM_TX_QUEUES; i++)
5777 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5778 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5780 skb_queue_head_init(&wl->deferred_rx_queue);
5781 skb_queue_head_init(&wl->deferred_tx_queue);
5783 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5784 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5785 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5786 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5787 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5788 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5789 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5791 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5792 if (!wl->freezable_wq) {
5797 wl->channel = WL1271_DEFAULT_CHANNEL;
5799 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5800 wl->band = IEEE80211_BAND_2GHZ;
5801 wl->channel_type = NL80211_CHAN_NO_HT;
5803 wl->sg_enabled = true;
5804 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5805 wl->recovery_count = 0;
5808 wl->ap_fw_ps_map = 0;
5810 wl->platform_quirks = 0;
5811 wl->system_hlid = WL12XX_SYSTEM_HLID;
5812 wl->active_sta_count = 0;
5813 wl->active_link_count = 0;
5815 init_waitqueue_head(&wl->fwlog_waitq);
5817 /* The system link is always allocated */
5818 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5820 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5821 for (i = 0; i < wl->num_tx_desc; i++)
5822 wl->tx_frames[i] = NULL;
5824 spin_lock_init(&wl->wl_lock);
5826 wl->state = WLCORE_STATE_OFF;
5827 wl->fw_type = WL12XX_FW_TYPE_NONE;
5828 mutex_init(&wl->mutex);
5829 mutex_init(&wl->flush_mutex);
5830 init_completion(&wl->nvs_loading_complete);
5832 order = get_order(aggr_buf_size);
5833 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5834 if (!wl->aggr_buf) {
5838 wl->aggr_buf_size = aggr_buf_size;
5840 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5841 if (!wl->dummy_packet) {
5846 /* Allocate one page for the FW log */
5847 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5850 goto err_dummy_packet;
5853 wl->mbox_size = mbox_size;
5854 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5860 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5861 if (!wl->buffer_32) {
5872 free_page((unsigned long)wl->fwlog);
5875 dev_kfree_skb(wl->dummy_packet);
5878 free_pages((unsigned long)wl->aggr_buf, order);
5881 destroy_workqueue(wl->freezable_wq);
5884 wl1271_debugfs_exit(wl);
5888 ieee80211_free_hw(hw);
5892 return ERR_PTR(ret);
5894 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5896 int wlcore_free_hw(struct wl1271 *wl)
5898 /* Unblock any fwlog readers */
5899 mutex_lock(&wl->mutex);
5900 wl->fwlog_size = -1;
5901 wake_up_interruptible_all(&wl->fwlog_waitq);
5902 mutex_unlock(&wl->mutex);
5904 device_remove_bin_file(wl->dev, &fwlog_attr);
5906 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5908 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5909 kfree(wl->buffer_32);
5911 free_page((unsigned long)wl->fwlog);
5912 dev_kfree_skb(wl->dummy_packet);
5913 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5915 wl1271_debugfs_exit(wl);
5919 wl->fw_type = WL12XX_FW_TYPE_NONE;
5923 kfree(wl->fw_status_1);
5924 kfree(wl->tx_res_if);
5925 destroy_workqueue(wl->freezable_wq);
5928 ieee80211_free_hw(wl->hw);
5932 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5934 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5936 struct wl1271 *wl = cookie;
5937 unsigned long flags;
5939 wl1271_debug(DEBUG_IRQ, "IRQ");
5941 /* complete the ELP completion */
5942 spin_lock_irqsave(&wl->wl_lock, flags);
5943 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5944 if (wl->elp_compl) {
5945 complete(wl->elp_compl);
5946 wl->elp_compl = NULL;
5949 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5950 /* don't enqueue a work right now. mark it as pending */
5951 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5952 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5953 disable_irq_nosync(wl->irq);
5954 pm_wakeup_event(wl->dev, 0);
5955 spin_unlock_irqrestore(&wl->wl_lock, flags);
5958 spin_unlock_irqrestore(&wl->wl_lock, flags);
5960 return IRQ_WAKE_THREAD;
5963 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5965 struct wl1271 *wl = context;
5966 struct platform_device *pdev = wl->pdev;
5967 struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
5968 struct wl12xx_platform_data *pdata = pdev_data->pdata;
5969 unsigned long irqflags;
5973 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5975 wl1271_error("Could not allocate nvs data");
5978 wl->nvs_len = fw->size;
5980 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5986 ret = wl->ops->setup(wl);
5990 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5992 /* adjust some runtime configuration parameters */
5993 wlcore_adjust_conf(wl);
5995 wl->irq = platform_get_irq(pdev, 0);
5996 wl->platform_quirks = pdata->platform_quirks;
5997 wl->if_ops = pdev_data->if_ops;
5999 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
6000 irqflags = IRQF_TRIGGER_RISING;
6002 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6004 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
6008 wl1271_error("request_irq() failed: %d", ret);
6013 ret = enable_irq_wake(wl->irq);
6015 wl->irq_wake_enabled = true;
6016 device_init_wakeup(wl->dev, 1);
6017 if (pdata->pwr_in_suspend) {
6018 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
6019 wl->hw->wiphy->wowlan.n_patterns =
6020 WL1271_MAX_RX_FILTERS;
6021 wl->hw->wiphy->wowlan.pattern_min_len = 1;
6022 wl->hw->wiphy->wowlan.pattern_max_len =
6023 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
6027 disable_irq(wl->irq);
6029 ret = wl12xx_get_hw_info(wl);
6031 wl1271_error("couldn't get hw info");
6035 ret = wl->ops->identify_chip(wl);
6039 ret = wl1271_init_ieee80211(wl);
6043 ret = wl1271_register_hw(wl);
6047 /* Create sysfs file to control bt coex state */
6048 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
6050 wl1271_error("failed to create sysfs file bt_coex_state");
6054 /* Create sysfs file to get HW PG version */
6055 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
6057 wl1271_error("failed to create sysfs file hw_pg_ver");
6058 goto out_bt_coex_state;
6061 /* Create sysfs file for the FW log */
6062 ret = device_create_bin_file(wl->dev, &fwlog_attr);
6064 wl1271_error("failed to create sysfs file fwlog");
6068 wl->initialized = true;
6072 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
6075 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
6078 wl1271_unregister_hw(wl);
6081 free_irq(wl->irq, wl);
6087 release_firmware(fw);
6088 complete_all(&wl->nvs_loading_complete);
6091 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6095 if (!wl->ops || !wl->ptable)
6098 wl->dev = &pdev->dev;
6100 platform_set_drvdata(pdev, wl);
6102 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6103 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6106 wl1271_error("request_firmware_nowait failed: %d", ret);
6107 complete_all(&wl->nvs_loading_complete);
6112 EXPORT_SYMBOL_GPL(wlcore_probe);
6114 int wlcore_remove(struct platform_device *pdev)
6116 struct wl1271 *wl = platform_get_drvdata(pdev);
6118 wait_for_completion(&wl->nvs_loading_complete);
6119 if (!wl->initialized)
6122 if (wl->irq_wake_enabled) {
6123 device_init_wakeup(wl->dev, 0);
6124 disable_irq_wake(wl->irq);
6126 wl1271_unregister_hw(wl);
6127 free_irq(wl->irq, wl);
6132 EXPORT_SYMBOL_GPL(wlcore_remove);
6134 u32 wl12xx_debug_level = DEBUG_NONE;
6135 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6136 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6137 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6139 module_param_named(fwlog, fwlog_param, charp, 0);
6140 MODULE_PARM_DESC(fwlog,
6141 "FW logger options: continuous, ondemand, dbgpins or disable");
6143 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6144 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6146 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6147 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6149 MODULE_LICENSE("GPL");
6150 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6151 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6152 MODULE_FIRMWARE(WL12XX_NVS_NAME);