2 * Atheros CARL9170 driver
4 * mac80211 interaction code
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
23 * This file incorporates work covered by the following copyright and
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <linux/random.h>
45 #include <net/mac80211.h>
46 #include <net/cfg80211.h>
51 static bool modparam_nohwcrypt;
52 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
53 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
56 module_param_named(noht, modparam_noht, int, S_IRUGO);
57 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
59 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
60 .bitrate = (_bitrate), \
62 .hw_value = (_hw_rate) | (_txpidx) << 4, \
65 struct ieee80211_rate __carl9170_ratetable[] = {
67 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
81 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
82 #define carl9170_g_ratetable_size 12
83 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
84 #define carl9170_a_ratetable_size 8
87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88 * array in phy.c so that we don't have to do frequency lookups!
90 #define CHAN(_freq, _idx) { \
91 .center_freq = (_freq), \
93 .max_power = 18, /* XXX */ \
96 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
113 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
152 #define CARL9170_HT_CAP \
154 .ht_supported = true, \
155 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
156 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
157 IEEE80211_HT_CAP_SGI_40 | \
158 IEEE80211_HT_CAP_DSSSCCK40 | \
159 IEEE80211_HT_CAP_SM_PS, \
160 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
161 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
163 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
164 .rx_highest = cpu_to_le16(300), \
165 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
169 static struct ieee80211_supported_band carl9170_band_2GHz = {
170 .channels = carl9170_2ghz_chantable,
171 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
172 .bitrates = carl9170_g_ratetable,
173 .n_bitrates = carl9170_g_ratetable_size,
174 .ht_cap = CARL9170_HT_CAP,
177 static struct ieee80211_supported_band carl9170_band_5GHz = {
178 .channels = carl9170_5ghz_chantable,
179 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
180 .bitrates = carl9170_a_ratetable,
181 .n_bitrates = carl9170_a_ratetable_size,
182 .ht_cap = CARL9170_HT_CAP,
185 static void carl9170_ampdu_gc(struct ar9170 *ar)
187 struct carl9170_sta_tid *tid_info;
191 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
192 spin_lock_bh(&ar->tx_ampdu_list_lock);
193 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
194 tid_info->state = CARL9170_TID_STATE_KILLED;
195 list_del_rcu(&tid_info->list);
196 ar->tx_ampdu_list_len--;
197 list_add_tail(&tid_info->tmp_list, &tid_gc);
199 spin_unlock_bh(&ar->tx_ampdu_list_lock);
202 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
207 while (!list_empty(&tid_gc)) {
209 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
212 while ((skb = __skb_dequeue(&tid_info->queue)))
213 carl9170_tx_status(ar, skb, false);
215 list_del_init(&tid_info->tmp_list);
220 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
226 * We can only drop frames which have not been uploaded
230 for (i = 0; i < ar->hw->queues; i++) {
233 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
234 struct ieee80211_tx_info *info;
236 info = IEEE80211_SKB_CB(skb);
237 if (info->flags & IEEE80211_TX_CTL_AMPDU)
238 atomic_dec(&ar->tx_ampdu_upload);
240 carl9170_tx_status(ar, skb, false);
245 /* Wait for all other outstanding frames to timeout. */
246 if (atomic_read(&ar->tx_total_queued))
247 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
250 static void carl9170_flush_ba(struct ar9170 *ar)
252 struct sk_buff_head free;
253 struct carl9170_sta_tid *tid_info;
256 __skb_queue_head_init(&free);
259 spin_lock_bh(&ar->tx_ampdu_list_lock);
260 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
261 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
262 tid_info->state = CARL9170_TID_STATE_SUSPEND;
264 spin_lock(&tid_info->lock);
265 while ((skb = __skb_dequeue(&tid_info->queue)))
266 __skb_queue_tail(&free, skb);
267 spin_unlock(&tid_info->lock);
270 spin_unlock_bh(&ar->tx_ampdu_list_lock);
273 while ((skb = __skb_dequeue(&free)))
274 carl9170_tx_status(ar, skb, false);
277 static void carl9170_zap_queues(struct ar9170 *ar)
279 struct carl9170_vif_info *cvif;
282 carl9170_ampdu_gc(ar);
284 carl9170_flush_ba(ar);
285 carl9170_flush(ar, true);
287 for (i = 0; i < ar->hw->queues; i++) {
288 spin_lock_bh(&ar->tx_status[i].lock);
289 while (!skb_queue_empty(&ar->tx_status[i])) {
292 skb = skb_peek(&ar->tx_status[i]);
293 carl9170_tx_get_skb(skb);
294 spin_unlock_bh(&ar->tx_status[i].lock);
295 carl9170_tx_drop(ar, skb);
296 spin_lock_bh(&ar->tx_status[i].lock);
297 carl9170_tx_put_skb(skb);
299 spin_unlock_bh(&ar->tx_status[i].lock);
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
304 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
306 /* reinitialize queues statistics */
307 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
308 for (i = 0; i < ar->hw->queues; i++)
309 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
311 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
312 ar->mem_bitmap[i] = 0;
315 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
316 spin_lock_bh(&ar->beacon_lock);
317 dev_kfree_skb_any(cvif->beacon);
319 spin_unlock_bh(&ar->beacon_lock);
323 atomic_set(&ar->tx_ampdu_upload, 0);
324 atomic_set(&ar->tx_ampdu_scheduler, 0);
325 atomic_set(&ar->tx_total_pending, 0);
326 atomic_set(&ar->tx_total_queued, 0);
327 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
330 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
332 queue.aifs = ai_fs; \
333 queue.cw_min = cwmin; \
334 queue.cw_max = cwmax; \
335 queue.txop = _txop; \
338 static int carl9170_op_start(struct ieee80211_hw *hw)
340 struct ar9170 *ar = hw->priv;
343 mutex_lock(&ar->mutex);
345 carl9170_zap_queues(ar);
347 /* reset QoS defaults */
348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
352 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
354 ar->current_factor = ar->current_density = -1;
355 /* "The first key is unique." */
357 ar->filter_state = 0;
358 ar->ps.last_action = jiffies;
359 ar->ps.last_slept = jiffies;
360 ar->erp_mode = CARL9170_ERP_AUTO;
362 /* Set "disable hw crypto offload" whenever the module parameter
363 * nohwcrypt is true or if the firmware does not support it.
365 ar->disable_offload = modparam_nohwcrypt |
366 ar->fw.disable_offload_fw;
367 ar->rx_software_decryption = ar->disable_offload;
369 for (i = 0; i < ar->hw->queues; i++) {
370 ar->queue_stop_timeout[i] = jiffies;
371 ar->max_queue_stop_timeout[i] = 0;
374 atomic_set(&ar->mem_allocs, 0);
376 err = carl9170_usb_open(ar);
380 err = carl9170_init_mac(ar);
384 err = carl9170_set_qos(ar);
388 if (ar->fw.rx_filter) {
389 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
390 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
395 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
396 AR9170_DMA_TRIGGER_RXQ);
400 /* Clear key-cache */
401 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
402 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
407 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
412 if (i < AR9170_CAM_MAX_USER) {
413 err = carl9170_disable_key(ar, i);
419 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
421 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
422 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
424 ieee80211_wake_queues(ar->hw);
428 mutex_unlock(&ar->mutex);
432 static void carl9170_cancel_worker(struct ar9170 *ar)
434 cancel_delayed_work_sync(&ar->stat_work);
435 cancel_delayed_work_sync(&ar->tx_janitor);
436 #ifdef CONFIG_CARL9170_LEDS
437 cancel_delayed_work_sync(&ar->led_work);
438 #endif /* CONFIG_CARL9170_LEDS */
439 cancel_work_sync(&ar->ps_work);
440 cancel_work_sync(&ar->ping_work);
441 cancel_work_sync(&ar->ampdu_work);
444 static void carl9170_op_stop(struct ieee80211_hw *hw)
446 struct ar9170 *ar = hw->priv;
448 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
450 ieee80211_stop_queues(ar->hw);
452 mutex_lock(&ar->mutex);
453 if (IS_ACCEPTING_CMD(ar)) {
454 RCU_INIT_POINTER(ar->beacon_iter, NULL);
456 carl9170_led_set_state(ar, 0);
459 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
460 carl9170_usb_stop(ar);
463 carl9170_zap_queues(ar);
464 mutex_unlock(&ar->mutex);
466 carl9170_cancel_worker(ar);
469 static void carl9170_restart_work(struct work_struct *work)
471 struct ar9170 *ar = container_of(work, struct ar9170,
476 ar->filter_state = 0;
477 carl9170_cancel_worker(ar);
479 mutex_lock(&ar->mutex);
480 if (!ar->force_usb_reset) {
481 err = carl9170_usb_restart(ar);
482 if (net_ratelimit()) {
484 dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
486 dev_info(&ar->udev->dev, "device restarted successfully.\n");
489 carl9170_zap_queues(ar);
490 mutex_unlock(&ar->mutex);
492 if (!err && !ar->force_usb_reset) {
493 ar->restart_counter++;
494 atomic_set(&ar->pending_restarts, 0);
496 ieee80211_restart_hw(ar->hw);
499 * The reset was unsuccessful and the device seems to
500 * be dead. But there's still one option: a low-level
501 * usb subsystem reset...
504 carl9170_usb_reset(ar);
508 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
510 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
513 * Sometimes, an error can trigger several different reset events.
514 * By ignoring these *surplus* reset events, the device won't be
515 * killed again, right after it has recovered.
517 if (atomic_inc_return(&ar->pending_restarts) > 1) {
518 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
522 ieee80211_stop_queues(ar->hw);
524 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
526 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
527 !WARN_ON(r >= __CARL9170_RR_LAST))
533 if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
534 ar->force_usb_reset = true;
536 ieee80211_queue_work(ar->hw, &ar->restart_work);
539 * At this point, the device instance might have vanished/disabled.
540 * So, don't put any code which access the ar9170 struct
541 * without proper protection.
545 static void carl9170_ping_work(struct work_struct *work)
547 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
553 mutex_lock(&ar->mutex);
554 err = carl9170_echo_test(ar, 0xdeadbeef);
556 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
557 mutex_unlock(&ar->mutex);
560 static int carl9170_init_interface(struct ar9170 *ar,
561 struct ieee80211_vif *vif)
563 struct ath_common *common = &ar->common;
567 WARN_ON_ONCE(IS_STARTED(ar));
571 memcpy(common->macaddr, vif->addr, ETH_ALEN);
573 /* We have to fall back to software crypto, whenever
574 * the user choose to participates in an IBSS. HW
575 * offload for IBSS RSN is not supported by this driver.
577 * NOTE: If the previous main interface has already
578 * disabled hw crypto offload, we have to keep this
579 * previous disable_offload setting as it was.
580 * Altough ideally, we should notify mac80211 and tell
581 * it to forget about any HW crypto offload for now.
583 ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
584 (vif->type != NL80211_IFTYPE_AP));
586 /* While the driver supports HW offload in a single
587 * P2P client configuration, it doesn't support HW
588 * offload in the favourit, concurrent P2P GO+CLIENT
589 * configuration. Hence, HW offload will always be
592 ar->disable_offload |= vif->p2p;
594 ar->rx_software_decryption = ar->disable_offload;
596 err = carl9170_set_operating_mode(ar);
600 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
601 struct ieee80211_vif *vif)
603 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
604 struct ieee80211_vif *main_vif, *old_main = NULL;
605 struct ar9170 *ar = hw->priv;
606 int vif_id = -1, err = 0;
608 mutex_lock(&ar->mutex);
610 if (vif_priv->active) {
612 * Skip the interface structure initialization,
613 * if the vif survived the _restart call.
615 vif_id = vif_priv->id;
616 vif_priv->enable_beacon = false;
618 spin_lock_bh(&ar->beacon_lock);
619 dev_kfree_skb_any(vif_priv->beacon);
620 vif_priv->beacon = NULL;
621 spin_unlock_bh(&ar->beacon_lock);
626 /* Because the AR9170 HW's MAC doesn't provide full support for
627 * multiple, independent interfaces [of different operation modes].
628 * We have to select ONE main interface [main mode of HW], but we
629 * can have multiple slaves [AKA: entry in the ACK-table].
631 * The first (from HEAD/TOP) interface in the ar->vif_list is
632 * always the main intf. All following intfs in this list
633 * are considered to be slave intfs.
635 main_vif = carl9170_get_main_vif(ar);
638 switch (main_vif->type) {
639 case NL80211_IFTYPE_STATION:
640 if (vif->type == NL80211_IFTYPE_STATION)
643 /* P2P GO [master] use-case
644 * Because the P2P GO station is selected dynamically
645 * by all participating peers of a WIFI Direct network,
646 * the driver has be able to change the main interface
647 * operating mode on the fly.
649 if (main_vif->p2p && vif->p2p &&
650 vif->type == NL80211_IFTYPE_AP) {
660 case NL80211_IFTYPE_MESH_POINT:
661 case NL80211_IFTYPE_AP:
662 if ((vif->type == NL80211_IFTYPE_STATION) ||
663 (vif->type == NL80211_IFTYPE_WDS) ||
664 (vif->type == NL80211_IFTYPE_AP) ||
665 (vif->type == NL80211_IFTYPE_MESH_POINT))
678 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
687 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
689 vif_priv->active = true;
690 vif_priv->id = vif_id;
691 vif_priv->enable_beacon = false;
694 /* We end up in here, if the main interface is being replaced.
695 * Put the new main interface at the HEAD of the list and the
696 * previous inteface will automatically become second in line.
698 list_add_rcu(&vif_priv->list, &ar->vif_list);
700 /* Add new inteface. If the list is empty, it will become the
701 * main inteface, otherwise it will be slave.
703 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
705 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
708 main_vif = carl9170_get_main_vif(ar);
710 if (main_vif == vif) {
711 rcu_assign_pointer(ar->beacon_iter, vif_priv);
715 struct carl9170_vif_info *old_main_priv =
716 (void *) old_main->drv_priv;
717 /* downgrade old main intf to slave intf.
718 * NOTE: We are no longer under rcu_read_lock.
719 * But we are still holding ar->mutex, so the
720 * vif data [id, addr] is safe.
722 err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
728 err = carl9170_init_interface(ar, vif);
733 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
739 if (ar->fw.tx_seq_table) {
740 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
747 if (err && (vif_id >= 0)) {
748 vif_priv->active = false;
749 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
751 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
752 list_del_rcu(&vif_priv->list);
753 mutex_unlock(&ar->mutex);
757 ar->ps.off_override |= PS_OFF_VIF;
759 mutex_unlock(&ar->mutex);
765 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
766 struct ieee80211_vif *vif)
768 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
769 struct ieee80211_vif *main_vif;
770 struct ar9170 *ar = hw->priv;
773 mutex_lock(&ar->mutex);
775 if (WARN_ON_ONCE(!vif_priv->active))
781 main_vif = carl9170_get_main_vif(ar);
785 vif_priv->active = false;
786 WARN_ON(vif_priv->enable_beacon);
787 vif_priv->enable_beacon = false;
788 list_del_rcu(&vif_priv->list);
789 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
791 if (vif == main_vif) {
795 WARN_ON(carl9170_init_interface(ar,
796 carl9170_get_main_vif(ar)));
798 carl9170_set_operating_mode(ar);
803 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
806 carl9170_update_beacon(ar, false);
807 carl9170_flush_cab(ar, id);
809 spin_lock_bh(&ar->beacon_lock);
810 dev_kfree_skb_any(vif_priv->beacon);
811 vif_priv->beacon = NULL;
812 spin_unlock_bh(&ar->beacon_lock);
814 bitmap_release_region(&ar->vif_bitmap, id, 0);
816 carl9170_set_beacon_timers(ar);
819 ar->ps.off_override &= ~PS_OFF_VIF;
822 mutex_unlock(&ar->mutex);
827 void carl9170_ps_check(struct ar9170 *ar)
829 ieee80211_queue_work(ar->hw, &ar->ps_work);
832 /* caller must hold ar->mutex */
833 static int carl9170_ps_update(struct ar9170 *ar)
838 if (!ar->ps.off_override)
839 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
841 if (ps != ar->ps.state) {
842 err = carl9170_powersave(ar, ps);
846 if (ar->ps.state && !ps) {
847 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
852 ar->ps.last_slept = jiffies;
854 ar->ps.last_action = jiffies;
861 static void carl9170_ps_work(struct work_struct *work)
863 struct ar9170 *ar = container_of(work, struct ar9170,
865 mutex_lock(&ar->mutex);
867 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
868 mutex_unlock(&ar->mutex);
871 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
876 err = carl9170_get_noisefloor(ar);
881 if (ar->fw.hw_counters) {
882 err = carl9170_collect_tally(ar);
888 memset(&ar->tally, 0, sizeof(ar->tally));
893 static void carl9170_stat_work(struct work_struct *work)
895 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
898 mutex_lock(&ar->mutex);
899 err = carl9170_update_survey(ar, false, true);
900 mutex_unlock(&ar->mutex);
905 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
906 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
909 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
911 struct ar9170 *ar = hw->priv;
914 mutex_lock(&ar->mutex);
915 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
920 if (changed & IEEE80211_CONF_CHANGE_PS) {
921 err = carl9170_ps_update(ar);
926 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
931 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
932 /* adjust slot time for 5 GHz */
933 err = carl9170_set_slot_time(ar);
937 err = carl9170_update_survey(ar, true, false);
941 err = carl9170_set_channel(ar, hw->conf.channel,
942 hw->conf.channel_type);
946 err = carl9170_update_survey(ar, false, true);
950 err = carl9170_set_dyn_sifs_ack(ar);
954 err = carl9170_set_rts_cts_rate(ar);
959 if (changed & IEEE80211_CONF_CHANGE_POWER) {
960 err = carl9170_set_mac_tpc(ar, ar->hw->conf.channel);
966 mutex_unlock(&ar->mutex);
970 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
971 struct netdev_hw_addr_list *mc_list)
973 struct netdev_hw_addr *ha;
976 /* always get broadcast frames */
977 mchash = 1ULL << (0xff >> 2);
979 netdev_hw_addr_list_for_each(ha, mc_list)
980 mchash |= 1ULL << (ha->addr[5] >> 2);
985 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
986 unsigned int changed_flags,
987 unsigned int *new_flags,
990 struct ar9170 *ar = hw->priv;
992 /* mask supported flags */
993 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
995 if (!IS_ACCEPTING_CMD(ar))
998 mutex_lock(&ar->mutex);
1000 ar->filter_state = *new_flags;
1002 * We can support more by setting the sniffer bit and
1003 * then checking the error flags, later.
1006 if (*new_flags & FIF_ALLMULTI)
1009 if (multicast != ar->cur_mc_hash)
1010 WARN_ON(carl9170_update_multicast(ar, multicast));
1012 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
1013 ar->sniffer_enabled = !!(*new_flags &
1014 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
1016 WARN_ON(carl9170_set_operating_mode(ar));
1019 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1022 if (!ar->fw.ba_filter)
1023 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1025 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1026 rx_filter |= CARL9170_RX_FILTER_BAD;
1028 if (!(*new_flags & FIF_CONTROL))
1029 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1031 if (!(*new_flags & FIF_PSPOLL))
1032 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1034 if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
1035 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1036 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1039 WARN_ON(carl9170_rx_filter(ar, rx_filter));
1042 mutex_unlock(&ar->mutex);
1046 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1047 struct ieee80211_vif *vif,
1048 struct ieee80211_bss_conf *bss_conf,
1051 struct ar9170 *ar = hw->priv;
1052 struct ath_common *common = &ar->common;
1054 struct carl9170_vif_info *vif_priv;
1055 struct ieee80211_vif *main_vif;
1057 mutex_lock(&ar->mutex);
1058 vif_priv = (void *) vif->drv_priv;
1059 main_vif = carl9170_get_main_vif(ar);
1060 if (WARN_ON(!main_vif))
1063 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1064 struct carl9170_vif_info *iter;
1067 vif_priv->enable_beacon = bss_conf->enable_beacon;
1069 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1070 if (iter->active && iter->enable_beacon)
1076 ar->beacon_enabled = i;
1079 if (changed & BSS_CHANGED_BEACON) {
1080 err = carl9170_update_beacon(ar, false);
1085 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1086 BSS_CHANGED_BEACON_INT)) {
1088 if (main_vif != vif) {
1089 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1090 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1094 * Therefore a hard limit for the broadcast traffic should
1095 * prevent false alarms.
1097 if (vif->type != NL80211_IFTYPE_STATION &&
1098 (bss_conf->beacon_int * bss_conf->dtim_period >=
1099 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1104 err = carl9170_set_beacon_timers(ar);
1109 if (changed & BSS_CHANGED_HT) {
1116 if (main_vif != vif)
1120 * The following settings can only be changed by the
1124 if (changed & BSS_CHANGED_BSSID) {
1125 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1126 err = carl9170_set_operating_mode(ar);
1131 if (changed & BSS_CHANGED_ASSOC) {
1132 ar->common.curaid = bss_conf->aid;
1133 err = carl9170_set_beacon_timers(ar);
1138 if (changed & BSS_CHANGED_ERP_SLOT) {
1139 err = carl9170_set_slot_time(ar);
1144 if (changed & BSS_CHANGED_BASIC_RATES) {
1145 err = carl9170_set_mac_rates(ar);
1151 WARN_ON_ONCE(err && IS_STARTED(ar));
1152 mutex_unlock(&ar->mutex);
1155 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1156 struct ieee80211_vif *vif)
1158 struct ar9170 *ar = hw->priv;
1159 struct carl9170_tsf_rsp tsf;
1162 mutex_lock(&ar->mutex);
1163 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1164 0, NULL, sizeof(tsf), &tsf);
1165 mutex_unlock(&ar->mutex);
1169 return le64_to_cpu(tsf.tsf_64);
1172 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1173 struct ieee80211_vif *vif,
1174 struct ieee80211_sta *sta,
1175 struct ieee80211_key_conf *key)
1177 struct ar9170 *ar = hw->priv;
1181 if (ar->disable_offload || !vif)
1184 /* Fall back to software encryption whenever the driver is connected
1185 * to more than one network.
1187 * This is very unfortunate, because some machines cannot handle
1188 * the high througput speed in 802.11n networks.
1191 if (!is_main_vif(ar, vif)) {
1192 mutex_lock(&ar->mutex);
1197 * While the hardware supports *catch-all* key, for offloading
1198 * group-key en-/de-cryption. The way of how the hardware
1199 * decides which keyId maps to which key, remains a mystery...
1201 if ((vif->type != NL80211_IFTYPE_STATION &&
1202 vif->type != NL80211_IFTYPE_ADHOC) &&
1203 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1206 switch (key->cipher) {
1207 case WLAN_CIPHER_SUITE_WEP40:
1208 ktype = AR9170_ENC_ALG_WEP64;
1210 case WLAN_CIPHER_SUITE_WEP104:
1211 ktype = AR9170_ENC_ALG_WEP128;
1213 case WLAN_CIPHER_SUITE_TKIP:
1214 ktype = AR9170_ENC_ALG_TKIP;
1216 case WLAN_CIPHER_SUITE_CCMP:
1217 ktype = AR9170_ENC_ALG_AESCCMP;
1218 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1224 mutex_lock(&ar->mutex);
1225 if (cmd == SET_KEY) {
1226 if (!IS_STARTED(ar)) {
1231 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1234 i = 64 + key->keyidx;
1236 for (i = 0; i < 64; i++)
1237 if (!(ar->usedkeys & BIT(i)))
1243 key->hw_key_idx = i;
1245 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1247 min_t(u8, 16, key->keylen));
1251 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1252 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1259 * hardware is not capable generating MMIC
1260 * of fragmented frames!
1262 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1266 ar->usedkeys |= BIT(i);
1268 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1270 if (!IS_STARTED(ar)) {
1271 /* The device is gone... together with the key ;-) */
1276 if (key->hw_key_idx < 64) {
1277 ar->usedkeys &= ~BIT(key->hw_key_idx);
1279 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1280 AR9170_ENC_ALG_NONE, 0,
1285 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1286 err = carl9170_upload_key(ar, key->hw_key_idx,
1288 AR9170_ENC_ALG_NONE,
1296 err = carl9170_disable_key(ar, key->hw_key_idx);
1302 mutex_unlock(&ar->mutex);
1306 if (!ar->rx_software_decryption) {
1307 ar->rx_software_decryption = true;
1308 carl9170_set_operating_mode(ar);
1310 mutex_unlock(&ar->mutex);
1314 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1315 struct ieee80211_vif *vif,
1316 struct ieee80211_sta *sta)
1318 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1321 atomic_set(&sta_info->pending_frames, 0);
1323 if (sta->ht_cap.ht_supported) {
1324 if (sta->ht_cap.ampdu_density > 6) {
1326 * HW does support 16us AMPDU density.
1327 * No HT-Xmit for station.
1333 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1334 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1336 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1337 sta_info->ht_sta = true;
1343 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1344 struct ieee80211_vif *vif,
1345 struct ieee80211_sta *sta)
1347 struct ar9170 *ar = hw->priv;
1348 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1350 bool cleanup = false;
1352 if (sta->ht_cap.ht_supported) {
1354 sta_info->ht_sta = false;
1357 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1358 struct carl9170_sta_tid *tid_info;
1360 tid_info = rcu_dereference(sta_info->agg[i]);
1361 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1366 spin_lock_bh(&ar->tx_ampdu_list_lock);
1367 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1368 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1369 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1375 carl9170_ampdu_gc(ar);
1381 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1382 struct ieee80211_vif *vif, u16 queue,
1383 const struct ieee80211_tx_queue_params *param)
1385 struct ar9170 *ar = hw->priv;
1388 mutex_lock(&ar->mutex);
1389 if (queue < ar->hw->queues) {
1390 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1391 ret = carl9170_set_qos(ar);
1396 mutex_unlock(&ar->mutex);
1400 static void carl9170_ampdu_work(struct work_struct *work)
1402 struct ar9170 *ar = container_of(work, struct ar9170,
1405 if (!IS_STARTED(ar))
1408 mutex_lock(&ar->mutex);
1409 carl9170_ampdu_gc(ar);
1410 mutex_unlock(&ar->mutex);
1413 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1414 struct ieee80211_vif *vif,
1415 enum ieee80211_ampdu_mlme_action action,
1416 struct ieee80211_sta *sta,
1417 u16 tid, u16 *ssn, u8 buf_size)
1419 struct ar9170 *ar = hw->priv;
1420 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1421 struct carl9170_sta_tid *tid_info;
1427 case IEEE80211_AMPDU_TX_START:
1428 if (!sta_info->ht_sta)
1432 if (rcu_dereference(sta_info->agg[tid])) {
1437 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1444 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1445 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1446 tid_info->tid = tid;
1447 tid_info->max = sta_info->ampdu_max_len;
1449 INIT_LIST_HEAD(&tid_info->list);
1450 INIT_LIST_HEAD(&tid_info->tmp_list);
1451 skb_queue_head_init(&tid_info->queue);
1452 spin_lock_init(&tid_info->lock);
1454 spin_lock_bh(&ar->tx_ampdu_list_lock);
1455 ar->tx_ampdu_list_len++;
1456 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1457 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1458 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1461 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1464 case IEEE80211_AMPDU_TX_STOP_CONT:
1465 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1466 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1468 tid_info = rcu_dereference(sta_info->agg[tid]);
1470 spin_lock_bh(&ar->tx_ampdu_list_lock);
1471 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1472 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1473 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1476 RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1479 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1480 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1483 case IEEE80211_AMPDU_TX_OPERATIONAL:
1485 tid_info = rcu_dereference(sta_info->agg[tid]);
1487 sta_info->stats[tid].clear = true;
1488 sta_info->stats[tid].req = false;
1491 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1492 tid_info->state = CARL9170_TID_STATE_IDLE;
1496 if (WARN_ON_ONCE(!tid_info))
1501 case IEEE80211_AMPDU_RX_START:
1502 case IEEE80211_AMPDU_RX_STOP:
1503 /* Handled by hardware */
1513 #ifdef CONFIG_CARL9170_WPC
1514 static int carl9170_register_wps_button(struct ar9170 *ar)
1516 struct input_dev *input;
1519 if (!(ar->features & CARL9170_WPS_BUTTON))
1522 input = input_allocate_device();
1526 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1527 wiphy_name(ar->hw->wiphy));
1529 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1530 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1532 input->name = ar->wps.name;
1533 input->phys = ar->wps.phys;
1534 input->id.bustype = BUS_USB;
1535 input->dev.parent = &ar->hw->wiphy->dev;
1537 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1539 err = input_register_device(input);
1541 input_free_device(input);
1545 ar->wps.pbc = input;
1548 #endif /* CONFIG_CARL9170_WPC */
1550 #ifdef CONFIG_CARL9170_HWRNG
1551 static int carl9170_rng_get(struct ar9170 *ar)
1554 #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1555 #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1557 static const __le32 rng_load[RW] = {
1558 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1562 unsigned int i, off = 0, transfer, count;
1565 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1567 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1570 count = ARRAY_SIZE(ar->rng.cache);
1572 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1573 RB, (u8 *) rng_load,
1578 transfer = min_t(unsigned int, count, RW);
1579 for (i = 0; i < transfer; i++)
1580 ar->rng.cache[off + i] = buf[i];
1586 ar->rng.cache_idx = 0;
1593 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1595 struct ar9170 *ar = (struct ar9170 *)rng->priv;
1598 mutex_lock(&ar->mutex);
1599 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1600 ret = carl9170_rng_get(ar);
1602 mutex_unlock(&ar->mutex);
1607 *data = ar->rng.cache[ar->rng.cache_idx++];
1608 mutex_unlock(&ar->mutex);
1613 static void carl9170_unregister_hwrng(struct ar9170 *ar)
1615 if (ar->rng.initialized) {
1616 hwrng_unregister(&ar->rng.rng);
1617 ar->rng.initialized = false;
1621 static int carl9170_register_hwrng(struct ar9170 *ar)
1625 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1626 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1627 ar->rng.rng.name = ar->rng.name;
1628 ar->rng.rng.data_read = carl9170_rng_read;
1629 ar->rng.rng.priv = (unsigned long)ar;
1631 if (WARN_ON(ar->rng.initialized))
1634 err = hwrng_register(&ar->rng.rng);
1636 dev_err(&ar->udev->dev, "Failed to register the random "
1637 "number generator (%d)\n", err);
1641 ar->rng.initialized = true;
1643 err = carl9170_rng_get(ar);
1645 carl9170_unregister_hwrng(ar);
1651 #endif /* CONFIG_CARL9170_HWRNG */
1653 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1654 struct survey_info *survey)
1656 struct ar9170 *ar = hw->priv;
1657 struct ieee80211_channel *chan;
1658 struct ieee80211_supported_band *band;
1665 if (idx == chan->hw_value) {
1666 mutex_lock(&ar->mutex);
1667 err = carl9170_update_survey(ar, false, true);
1668 mutex_unlock(&ar->mutex);
1673 for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
1674 band = ar->hw->wiphy->bands[b];
1679 for (i = 0; i < band->n_channels; i++) {
1680 if (band->channels[i].hw_value == idx) {
1681 chan = &band->channels[i];
1689 memcpy(survey, &ar->survey[idx], sizeof(*survey));
1691 survey->channel = chan;
1692 survey->filled = SURVEY_INFO_NOISE_DBM;
1694 if (ar->channel == chan)
1695 survey->filled |= SURVEY_INFO_IN_USE;
1697 if (ar->fw.hw_counters) {
1698 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
1699 SURVEY_INFO_CHANNEL_TIME_BUSY |
1700 SURVEY_INFO_CHANNEL_TIME_TX;
1706 static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1708 struct ar9170 *ar = hw->priv;
1711 mutex_lock(&ar->mutex);
1712 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1713 carl9170_flush_cab(ar, vid);
1715 carl9170_flush(ar, drop);
1716 mutex_unlock(&ar->mutex);
1719 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1720 struct ieee80211_low_level_stats *stats)
1722 struct ar9170 *ar = hw->priv;
1724 memset(stats, 0, sizeof(*stats));
1725 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1726 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1730 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1731 struct ieee80211_vif *vif,
1732 enum sta_notify_cmd cmd,
1733 struct ieee80211_sta *sta)
1735 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1738 case STA_NOTIFY_SLEEP:
1739 sta_info->sleeping = true;
1740 if (atomic_read(&sta_info->pending_frames))
1741 ieee80211_sta_block_awake(hw, sta, true);
1744 case STA_NOTIFY_AWAKE:
1745 sta_info->sleeping = false;
1750 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1752 struct ar9170 *ar = hw->priv;
1754 return !!atomic_read(&ar->tx_total_queued);
1757 static const struct ieee80211_ops carl9170_ops = {
1758 .start = carl9170_op_start,
1759 .stop = carl9170_op_stop,
1760 .tx = carl9170_op_tx,
1761 .flush = carl9170_op_flush,
1762 .add_interface = carl9170_op_add_interface,
1763 .remove_interface = carl9170_op_remove_interface,
1764 .config = carl9170_op_config,
1765 .prepare_multicast = carl9170_op_prepare_multicast,
1766 .configure_filter = carl9170_op_configure_filter,
1767 .conf_tx = carl9170_op_conf_tx,
1768 .bss_info_changed = carl9170_op_bss_info_changed,
1769 .get_tsf = carl9170_op_get_tsf,
1770 .set_key = carl9170_op_set_key,
1771 .sta_add = carl9170_op_sta_add,
1772 .sta_remove = carl9170_op_sta_remove,
1773 .sta_notify = carl9170_op_sta_notify,
1774 .get_survey = carl9170_op_get_survey,
1775 .get_stats = carl9170_op_get_stats,
1776 .ampdu_action = carl9170_op_ampdu_action,
1777 .tx_frames_pending = carl9170_tx_frames_pending,
1780 void *carl9170_alloc(size_t priv_size)
1782 struct ieee80211_hw *hw;
1784 struct sk_buff *skb;
1788 * this buffer is used for rx stream reconstruction.
1789 * Under heavy load this device (or the transport layer?)
1790 * tends to split the streams into separate rx descriptors.
1793 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1797 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1803 ar->rx_failover = skb;
1805 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1806 ar->rx_has_plcp = false;
1809 * Here's a hidden pitfall!
1811 * All 4 AC queues work perfectly well under _legacy_ operation.
1812 * However as soon as aggregation is enabled, the traffic flow
1813 * gets very bumpy. Therefore we have to _switch_ to a
1814 * software AC with a single HW queue.
1816 hw->queues = __AR9170_NUM_TXQ;
1818 mutex_init(&ar->mutex);
1819 spin_lock_init(&ar->beacon_lock);
1820 spin_lock_init(&ar->cmd_lock);
1821 spin_lock_init(&ar->tx_stats_lock);
1822 spin_lock_init(&ar->tx_ampdu_list_lock);
1823 spin_lock_init(&ar->mem_lock);
1824 spin_lock_init(&ar->state_lock);
1825 atomic_set(&ar->pending_restarts, 0);
1827 for (i = 0; i < ar->hw->queues; i++) {
1828 skb_queue_head_init(&ar->tx_status[i]);
1829 skb_queue_head_init(&ar->tx_pending[i]);
1831 INIT_LIST_HEAD(&ar->bar_list[i]);
1832 spin_lock_init(&ar->bar_list_lock[i]);
1834 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1835 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1836 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1837 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1838 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1839 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1840 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1841 rcu_assign_pointer(ar->tx_ampdu_iter,
1842 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1844 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1845 INIT_LIST_HEAD(&ar->vif_list);
1846 init_completion(&ar->tx_flush);
1848 /* firmware decides which modes we support */
1849 hw->wiphy->interface_modes = 0;
1851 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1852 IEEE80211_HW_MFP_CAPABLE |
1853 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1854 IEEE80211_HW_SUPPORTS_PS |
1855 IEEE80211_HW_PS_NULLFUNC_STACK |
1856 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
1857 IEEE80211_HW_SIGNAL_DBM;
1859 if (!modparam_noht) {
1861 * see the comment above, why we allow the user
1862 * to disable HT by a module parameter.
1864 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1867 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1868 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1869 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1871 hw->max_rates = CARL9170_TX_MAX_RATES;
1872 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1874 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1875 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1881 return ERR_PTR(-ENOMEM);
1884 static int carl9170_read_eeprom(struct ar9170 *ar)
1886 #define RW 8 /* number of words to read at once */
1887 #define RB (sizeof(u32) * RW)
1888 u8 *eeprom = (void *)&ar->eeprom;
1892 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1894 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1896 /* don't want to handle trailing remains */
1897 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1900 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1901 for (j = 0; j < RW; j++)
1902 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1905 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1906 RB, (u8 *) &offsets,
1907 RB, eeprom + RB * i);
1917 static int carl9170_parse_eeprom(struct ar9170 *ar)
1919 struct ath_regulatory *regulatory = &ar->common.regulatory;
1920 unsigned int rx_streams, tx_streams, tx_params = 0;
1924 if (ar->eeprom.length == cpu_to_le16(0xffff))
1927 rx_streams = hweight8(ar->eeprom.rx_mask);
1928 tx_streams = hweight8(ar->eeprom.tx_mask);
1930 if (rx_streams != tx_streams) {
1931 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1933 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1934 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1936 tx_params = (tx_streams - 1) <<
1937 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1939 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1940 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1943 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1944 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1945 &carl9170_band_2GHz;
1946 chans += carl9170_band_2GHz.n_channels;
1949 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1950 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1951 &carl9170_band_5GHz;
1952 chans += carl9170_band_5GHz.n_channels;
1959 ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
1962 ar->num_channels = chans;
1965 * I measured this, a bandswitch takes roughly
1966 * 135 ms and a frequency switch about 80.
1968 * FIXME: measure these values again once EEPROM settings
1969 * are used, that will influence them!
1972 ar->hw->channel_change_time = 135 * 1000;
1974 ar->hw->channel_change_time = 80 * 1000;
1976 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1978 /* second part of wiphy init */
1979 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1984 static void carl9170_reg_notifier(struct wiphy *wiphy,
1985 struct regulatory_request *request)
1987 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1988 struct ar9170 *ar = hw->priv;
1990 ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1993 int carl9170_register(struct ar9170 *ar)
1995 struct ath_regulatory *regulatory = &ar->common.regulatory;
1998 if (WARN_ON(ar->mem_bitmap))
2001 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
2002 sizeof(unsigned long), GFP_KERNEL);
2004 if (!ar->mem_bitmap)
2007 /* try to read EEPROM, init MAC addr */
2008 err = carl9170_read_eeprom(ar);
2012 err = carl9170_parse_eeprom(ar);
2016 err = ath_regd_init(regulatory, ar->hw->wiphy,
2017 carl9170_reg_notifier);
2021 if (modparam_noht) {
2022 carl9170_band_2GHz.ht_cap.ht_supported = false;
2023 carl9170_band_5GHz.ht_cap.ht_supported = false;
2026 for (i = 0; i < ar->fw.vif_num; i++) {
2027 ar->vif_priv[i].id = i;
2028 ar->vif_priv[i].vif = NULL;
2031 err = ieee80211_register_hw(ar->hw);
2035 /* mac80211 interface is now registered */
2036 ar->registered = true;
2038 if (!ath_is_world_regd(regulatory))
2039 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2041 #ifdef CONFIG_CARL9170_DEBUGFS
2042 carl9170_debugfs_register(ar);
2043 #endif /* CONFIG_CARL9170_DEBUGFS */
2045 err = carl9170_led_init(ar);
2049 #ifdef CONFIG_CARL9170_LEDS
2050 err = carl9170_led_register(ar);
2053 #endif /* CONFIG_CARL9170_LEDS */
2055 #ifdef CONFIG_CARL9170_WPC
2056 err = carl9170_register_wps_button(ar);
2059 #endif /* CONFIG_CARL9170_WPC */
2061 #ifdef CONFIG_CARL9170_HWRNG
2062 err = carl9170_register_hwrng(ar);
2065 #endif /* CONFIG_CARL9170_HWRNG */
2067 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2068 wiphy_name(ar->hw->wiphy));
2073 carl9170_unregister(ar);
2077 void carl9170_unregister(struct ar9170 *ar)
2079 if (!ar->registered)
2082 ar->registered = false;
2084 #ifdef CONFIG_CARL9170_LEDS
2085 carl9170_led_unregister(ar);
2086 #endif /* CONFIG_CARL9170_LEDS */
2088 #ifdef CONFIG_CARL9170_DEBUGFS
2089 carl9170_debugfs_unregister(ar);
2090 #endif /* CONFIG_CARL9170_DEBUGFS */
2092 #ifdef CONFIG_CARL9170_WPC
2094 input_unregister_device(ar->wps.pbc);
2097 #endif /* CONFIG_CARL9170_WPC */
2099 #ifdef CONFIG_CARL9170_HWRNG
2100 carl9170_unregister_hwrng(ar);
2101 #endif /* CONFIG_CARL9170_HWRNG */
2103 carl9170_cancel_worker(ar);
2104 cancel_work_sync(&ar->restart_work);
2106 ieee80211_unregister_hw(ar->hw);
2109 void carl9170_free(struct ar9170 *ar)
2111 WARN_ON(ar->registered);
2112 WARN_ON(IS_INITIALIZED(ar));
2114 kfree_skb(ar->rx_failover);
2115 ar->rx_failover = NULL;
2117 kfree(ar->mem_bitmap);
2118 ar->mem_bitmap = NULL;
2123 mutex_destroy(&ar->mutex);
2125 ieee80211_free_hw(ar->hw);