2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/slab.h>
21 struct ath9k_vif_iter_data {
26 static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
28 struct ath9k_vif_iter_data *iter_data = data;
31 for (i = 0; i < ETH_ALEN; i++)
32 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
35 void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
37 struct ath_wiphy *aphy = hw->priv;
38 struct ath_softc *sc = aphy->sc;
39 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
40 struct ath9k_vif_iter_data iter_data;
44 * Use the hardware MAC address as reference, the hardware uses it
45 * together with the BSSID mask when matching addresses.
47 iter_data.hw_macaddr = common->macaddr;
48 memset(&iter_data.mask, 0xff, ETH_ALEN);
51 ath9k_vif_iter(&iter_data, vif->addr, vif);
53 /* Get list of all active MAC addresses */
54 spin_lock_bh(&sc->wiphy_lock);
55 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
57 for (i = 0; i < sc->num_sec_wiphy; i++) {
58 if (sc->sec_wiphy[i] == NULL)
60 ieee80211_iterate_active_interfaces_atomic(
61 sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
63 spin_unlock_bh(&sc->wiphy_lock);
65 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
66 ath_hw_setbssidmask(common);
69 int ath9k_wiphy_add(struct ath_softc *sc)
72 struct ath_wiphy *aphy;
73 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
74 struct ieee80211_hw *hw;
77 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
81 spin_lock_bh(&sc->wiphy_lock);
82 for (i = 0; i < sc->num_sec_wiphy; i++) {
83 if (sc->sec_wiphy[i] == NULL)
87 if (i == sc->num_sec_wiphy) {
88 /* No empty slot available; increase array length */
90 n = krealloc(sc->sec_wiphy,
91 (sc->num_sec_wiphy + 1) *
92 sizeof(struct ath_wiphy *),
95 spin_unlock_bh(&sc->wiphy_lock);
96 ieee80211_free_hw(hw);
104 SET_IEEE80211_DEV(hw, sc->dev);
109 sc->sec_wiphy[i] = aphy;
110 spin_unlock_bh(&sc->wiphy_lock);
112 memcpy(addr, common->macaddr, ETH_ALEN);
113 addr[0] |= 0x02; /* Locally managed address */
115 * XOR virtual wiphy index into the least significant bits to generate
116 * a different MAC address for each virtual wiphy.
119 addr[4] ^= (i & 0xff00) >> 8;
120 addr[3] ^= (i & 0xff0000) >> 16;
122 SET_IEEE80211_PERM_ADDR(hw, addr);
124 ath9k_set_hw_capab(sc, hw);
126 error = ieee80211_register_hw(hw);
129 /* Make sure wiphy scheduler is started (if enabled) */
130 ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
136 int ath9k_wiphy_del(struct ath_wiphy *aphy)
138 struct ath_softc *sc = aphy->sc;
141 spin_lock_bh(&sc->wiphy_lock);
142 for (i = 0; i < sc->num_sec_wiphy; i++) {
143 if (aphy == sc->sec_wiphy[i]) {
144 sc->sec_wiphy[i] = NULL;
145 spin_unlock_bh(&sc->wiphy_lock);
146 ieee80211_unregister_hw(aphy->hw);
147 ieee80211_free_hw(aphy->hw);
151 spin_unlock_bh(&sc->wiphy_lock);
155 static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
156 struct ieee80211_vif *vif, const u8 *bssid,
159 struct ath_softc *sc = aphy->sc;
160 struct ath_tx_control txctl;
162 struct ieee80211_hdr *hdr;
164 struct ieee80211_tx_info *info;
166 skb = dev_alloc_skb(24);
169 hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
171 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
172 IEEE80211_FCTL_TODS);
174 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
175 hdr->frame_control = fc;
176 memcpy(hdr->addr1, bssid, ETH_ALEN);
177 memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
178 memcpy(hdr->addr3, bssid, ETH_ALEN);
180 info = IEEE80211_SKB_CB(skb);
181 memset(info, 0, sizeof(*info));
182 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
183 info->control.vif = vif;
184 info->control.rates[0].idx = 0;
185 info->control.rates[0].count = 4;
186 info->control.rates[1].idx = -1;
188 memset(&txctl, 0, sizeof(struct ath_tx_control));
189 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]];
190 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
192 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
197 dev_kfree_skb_any(skb);
201 static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
204 if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
206 for (i = 0; i < sc->num_sec_wiphy; i++) {
207 if (sc->sec_wiphy[i] &&
208 sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
214 static bool ath9k_wiphy_pausing(struct ath_softc *sc)
217 spin_lock_bh(&sc->wiphy_lock);
218 ret = __ath9k_wiphy_pausing(sc);
219 spin_unlock_bh(&sc->wiphy_lock);
223 static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
226 if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
228 for (i = 0; i < sc->num_sec_wiphy; i++) {
229 if (sc->sec_wiphy[i] &&
230 sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
236 bool ath9k_wiphy_scanning(struct ath_softc *sc)
239 spin_lock_bh(&sc->wiphy_lock);
240 ret = __ath9k_wiphy_scanning(sc);
241 spin_unlock_bh(&sc->wiphy_lock);
245 static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
247 /* caller must hold wiphy_lock */
248 static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
252 if (aphy->chan_idx != aphy->sc->chan_idx)
253 return; /* wiphy not on the selected channel */
254 __ath9k_wiphy_unpause(aphy);
257 static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
260 spin_lock_bh(&sc->wiphy_lock);
261 __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
262 for (i = 0; i < sc->num_sec_wiphy; i++)
263 __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
264 spin_unlock_bh(&sc->wiphy_lock);
267 void ath9k_wiphy_chan_work(struct work_struct *work)
269 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
270 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
271 struct ath_wiphy *aphy = sc->next_wiphy;
277 * All pending interfaces paused; ready to change
281 /* Change channels */
282 mutex_lock(&sc->mutex);
283 /* XXX: remove me eventually */
284 ath9k_update_ichannel(sc, aphy->hw,
285 &sc->sc_ah->channels[sc->chan_idx]);
287 /* sync hw configuration for hw code */
288 common->hw = aphy->hw;
290 ath_update_chainmask(sc, sc->chan_is_ht);
291 if (ath_set_channel(sc, aphy->hw,
292 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
293 printk(KERN_DEBUG "ath9k: Failed to set channel for new "
295 mutex_unlock(&sc->mutex);
298 mutex_unlock(&sc->mutex);
300 ath9k_wiphy_unpause_channel(sc);
304 * ath9k version of ieee80211_tx_status() for TX frames that are generated
305 * internally in the driver.
307 void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
309 struct ath_wiphy *aphy = hw->priv;
310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
312 if ((tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_PAUSE) &&
313 aphy->state == ATH_WIPHY_PAUSING) {
314 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
315 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
316 "frame\n", wiphy_name(hw->wiphy));
318 * The AP did not reply; ignore this to allow us to
322 aphy->state = ATH_WIPHY_PAUSED;
323 if (!ath9k_wiphy_pausing(aphy->sc)) {
325 * Drop from tasklet to work to allow mutex for channel
328 ieee80211_queue_work(aphy->sc->hw,
329 &aphy->sc->chan_work);
336 static void ath9k_mark_paused(struct ath_wiphy *aphy)
338 struct ath_softc *sc = aphy->sc;
339 aphy->state = ATH_WIPHY_PAUSED;
340 if (!__ath9k_wiphy_pausing(sc))
341 ieee80211_queue_work(sc->hw, &sc->chan_work);
344 static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
346 struct ath_wiphy *aphy = data;
347 struct ath_vif *avp = (void *) vif->drv_priv;
350 case NL80211_IFTYPE_STATION:
351 if (!vif->bss_conf.assoc) {
352 ath9k_mark_paused(aphy);
355 /* TODO: could avoid this if already in PS mode */
356 if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
357 printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
359 ath9k_mark_paused(aphy);
362 case NL80211_IFTYPE_AP:
363 /* Beacon transmission is paused by aphy->state change */
364 ath9k_mark_paused(aphy);
371 /* caller must hold wiphy_lock */
372 static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
374 ieee80211_stop_queues(aphy->hw);
375 aphy->state = ATH_WIPHY_PAUSING;
377 * TODO: handle PAUSING->PAUSED for the case where there are multiple
378 * active vifs (now we do it on the first vif getting ready; should be
381 ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
386 int ath9k_wiphy_pause(struct ath_wiphy *aphy)
389 spin_lock_bh(&aphy->sc->wiphy_lock);
390 ret = __ath9k_wiphy_pause(aphy);
391 spin_unlock_bh(&aphy->sc->wiphy_lock);
395 static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
397 struct ath_wiphy *aphy = data;
398 struct ath_vif *avp = (void *) vif->drv_priv;
401 case NL80211_IFTYPE_STATION:
402 if (!vif->bss_conf.assoc)
404 ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
406 case NL80211_IFTYPE_AP:
407 /* Beacon transmission is re-enabled by aphy->state change */
414 /* caller must hold wiphy_lock */
415 static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
417 ieee80211_iterate_active_interfaces_atomic(aphy->hw,
418 ath9k_unpause_iter, aphy);
419 aphy->state = ATH_WIPHY_ACTIVE;
420 ieee80211_wake_queues(aphy->hw);
424 int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
427 spin_lock_bh(&aphy->sc->wiphy_lock);
428 ret = __ath9k_wiphy_unpause(aphy);
429 spin_unlock_bh(&aphy->sc->wiphy_lock);
433 static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
436 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
437 sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
438 for (i = 0; i < sc->num_sec_wiphy; i++) {
439 if (sc->sec_wiphy[i] &&
440 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
441 sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
445 /* caller must hold wiphy_lock */
446 static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
449 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
450 __ath9k_wiphy_pause(sc->pri_wiphy);
451 for (i = 0; i < sc->num_sec_wiphy; i++) {
452 if (sc->sec_wiphy[i] &&
453 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
454 __ath9k_wiphy_pause(sc->sec_wiphy[i]);
458 int ath9k_wiphy_select(struct ath_wiphy *aphy)
460 struct ath_softc *sc = aphy->sc;
463 spin_lock_bh(&sc->wiphy_lock);
464 if (__ath9k_wiphy_scanning(sc)) {
466 * For now, we are using mac80211 sw scan and it expects to
467 * have full control over channel changes, so avoid wiphy
468 * scheduling during a scan. This could be optimized if the
469 * scanning control were moved into the driver.
471 spin_unlock_bh(&sc->wiphy_lock);
474 if (__ath9k_wiphy_pausing(sc)) {
475 if (sc->wiphy_select_failures == 0)
476 sc->wiphy_select_first_fail = jiffies;
477 sc->wiphy_select_failures++;
478 if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
480 printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
481 "out; disable/enable hw to recover\n");
482 __ath9k_wiphy_mark_all_paused(sc);
484 * TODO: this workaround to fix hardware is unlikely to
485 * be specific to virtual wiphy changes. It can happen
486 * on normal channel change, too, and as such, this
487 * should really be made more generic. For example,
488 * tricker radio disable/enable on GTT interrupt burst
489 * (say, 10 GTT interrupts received without any TX
490 * frame being completed)
492 spin_unlock_bh(&sc->wiphy_lock);
493 ath_radio_disable(sc, aphy->hw);
494 ath_radio_enable(sc, aphy->hw);
495 /* Only the primary wiphy hw is used for queuing work */
496 ieee80211_queue_work(aphy->sc->hw,
497 &aphy->sc->chan_work);
498 return -EBUSY; /* previous select still in progress */
500 spin_unlock_bh(&sc->wiphy_lock);
501 return -EBUSY; /* previous select still in progress */
503 sc->wiphy_select_failures = 0;
505 /* Store the new channel */
506 sc->chan_idx = aphy->chan_idx;
507 sc->chan_is_ht = aphy->chan_is_ht;
508 sc->next_wiphy = aphy;
510 __ath9k_wiphy_pause_all(sc);
511 now = !__ath9k_wiphy_pausing(aphy->sc);
512 spin_unlock_bh(&sc->wiphy_lock);
515 /* Ready to request channel change immediately */
516 ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
520 * wiphys will be unpaused in ath9k_tx_status() once channel has been
521 * changed if any wiphy needs time to become paused.
527 bool ath9k_wiphy_started(struct ath_softc *sc)
530 spin_lock_bh(&sc->wiphy_lock);
531 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
532 spin_unlock_bh(&sc->wiphy_lock);
535 for (i = 0; i < sc->num_sec_wiphy; i++) {
536 if (sc->sec_wiphy[i] &&
537 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
538 spin_unlock_bh(&sc->wiphy_lock);
542 spin_unlock_bh(&sc->wiphy_lock);
546 static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
547 struct ath_wiphy *selected)
549 if (selected->state == ATH_WIPHY_SCAN) {
550 if (aphy == selected)
553 * Pause all other wiphys for the duration of the scan even if
554 * they are on the current channel now.
556 } else if (aphy->chan_idx == selected->chan_idx)
558 aphy->state = ATH_WIPHY_PAUSED;
559 ieee80211_stop_queues(aphy->hw);
562 void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
563 struct ath_wiphy *selected)
566 spin_lock_bh(&sc->wiphy_lock);
567 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
568 ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
569 for (i = 0; i < sc->num_sec_wiphy; i++) {
570 if (sc->sec_wiphy[i] &&
571 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
572 ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
574 spin_unlock_bh(&sc->wiphy_lock);
577 void ath9k_wiphy_work(struct work_struct *work)
579 struct ath_softc *sc = container_of(work, struct ath_softc,
581 struct ath_wiphy *aphy = NULL;
584 spin_lock_bh(&sc->wiphy_lock);
586 if (sc->wiphy_scheduler_int == 0) {
587 /* wiphy scheduler is disabled */
588 spin_unlock_bh(&sc->wiphy_lock);
593 sc->wiphy_scheduler_index++;
594 while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
595 aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
596 if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
599 sc->wiphy_scheduler_index++;
603 sc->wiphy_scheduler_index = 0;
604 if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
609 /* No wiphy is ready to be scheduled */
611 aphy = sc->pri_wiphy;
614 spin_unlock_bh(&sc->wiphy_lock);
617 aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
618 ath9k_wiphy_select(aphy)) {
619 printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
623 ieee80211_queue_delayed_work(sc->hw,
625 sc->wiphy_scheduler_int);
628 void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
630 cancel_delayed_work_sync(&sc->wiphy_work);
631 sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
632 if (sc->wiphy_scheduler_int)
633 ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
634 sc->wiphy_scheduler_int);
637 /* caller must hold wiphy_lock */
638 bool ath9k_all_wiphys_idle(struct ath_softc *sc)
641 if (!sc->pri_wiphy->idle)
643 for (i = 0; i < sc->num_sec_wiphy; i++) {
644 struct ath_wiphy *aphy = sc->sec_wiphy[i];
653 /* caller must hold wiphy_lock */
654 void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
656 struct ath_softc *sc = aphy->sc;
659 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
660 "Marking %s as %s\n",
661 wiphy_name(aphy->hw->wiphy),
662 idle ? "idle" : "not-idle");
664 /* Only bother starting a queue on an active virtual wiphy */
665 bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
667 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
669 bool txq_started = false;
671 spin_lock_bh(&sc->wiphy_lock);
673 /* Start the primary wiphy */
674 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
675 ieee80211_wake_queue(hw, skb_queue);
680 /* Now start the secondary wiphy queues */
681 for (i = 0; i < sc->num_sec_wiphy; i++) {
682 struct ath_wiphy *aphy = sc->sec_wiphy[i];
685 if (aphy->state != ATH_WIPHY_ACTIVE)
689 ieee80211_wake_queue(hw, skb_queue);
695 spin_unlock_bh(&sc->wiphy_lock);
699 /* Go ahead and propagate information to all virtual wiphys, it won't hurt */
700 void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
702 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
705 spin_lock_bh(&sc->wiphy_lock);
707 /* Stop the primary wiphy */
708 ieee80211_stop_queue(hw, skb_queue);
710 /* Now stop the secondary wiphy queues */
711 for (i = 0; i < sc->num_sec_wiphy; i++) {
712 struct ath_wiphy *aphy = sc->sec_wiphy[i];
716 ieee80211_stop_queue(hw, skb_queue);
718 spin_unlock_bh(&sc->wiphy_lock);