2 * Copyright (c) 2008, Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 static u32 ath_chainmask_sel_up_rssi_thres =
21 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
22 static u32 ath_chainmask_sel_down_rssi_thres =
23 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
24 static u32 ath_chainmask_sel_period =
25 ATH_CHAINMASK_SEL_TIMEOUT;
27 /* return bus cachesize in 4B word units */
29 static void bus_read_cachesize(struct ath_softc *sc, int *csz)
33 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
37 * This check was put in to avoid "unplesant" consequences if
38 * the bootrom has not fully initialized all PCI devices.
39 * Sometimes the cache line size register is not set
43 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
46 static u8 parse_mpdudensity(u8 mpdudensity)
49 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
50 * 0 for no restriction
59 switch (mpdudensity) {
65 /* Our lower layer calculations limit our precision to
82 * Set current operating mode
84 * This function initializes and fills the rate table in the ATH object based
85 * on the operating mode.
87 static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
89 const struct ath9k_rate_table *rt;
92 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
93 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
96 for (i = 0; i < rt->rateCount; i++)
97 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
99 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
100 for (i = 0; i < 256; i++) {
101 u8 ix = rt->rateCodeToIndex[i];
106 sc->sc_hwmap[i].ieeerate =
107 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
108 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
110 if (rt->info[ix].shortPreamble ||
111 rt->info[ix].phy == PHY_OFDM) {
112 /* XXX: Handle this */
115 /* NB: this uses the last entry if the rate isn't found */
116 /* XXX beware of overlow */
118 sc->sc_currates = rt;
119 sc->sc_curmode = mode;
121 * All protection frames are transmited at 2Mb/s for
122 * 11g, otherwise at 1Mb/s.
123 * XXX select protection rate index from rate table.
125 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
129 * Set up rate table (legacy rates)
131 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
133 struct ath_hal *ah = sc->sc_ah;
134 const struct ath9k_rate_table *rt = NULL;
135 struct ieee80211_supported_band *sband;
136 struct ieee80211_rate *rate;
140 case IEEE80211_BAND_2GHZ:
141 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
143 case IEEE80211_BAND_5GHZ:
144 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
153 sband = &sc->sbands[band];
154 rate = sc->rates[band];
156 if (rt->rateCount > ATH_RATE_MAX)
157 maxrates = ATH_RATE_MAX;
159 maxrates = rt->rateCount;
161 for (i = 0; i < maxrates; i++) {
162 rate[i].bitrate = rt->info[i].rateKbps / 100;
163 rate[i].hw_value = rt->info[i].rateCode;
165 DPRINTF(sc, ATH_DBG_CONFIG,
166 "%s: Rate: %2dMbps, ratecode: %2d\n",
168 rate[i].bitrate / 10,
174 * Set up channel list
176 static int ath_setup_channels(struct ath_softc *sc)
178 struct ath_hal *ah = sc->sc_ah;
179 int nchan, i, a = 0, b = 0;
180 u8 regclassids[ATH_REGCLASSIDS_MAX];
182 struct ieee80211_supported_band *band_2ghz;
183 struct ieee80211_supported_band *band_5ghz;
184 struct ieee80211_channel *chan_2ghz;
185 struct ieee80211_channel *chan_5ghz;
186 struct ath9k_channel *c;
188 /* Fill in ah->ah_channels */
189 if (!ath9k_regd_init_channels(ah,
198 u32 rd = ah->ah_currentRD;
200 DPRINTF(sc, ATH_DBG_FATAL,
201 "%s: unable to collect channel list; "
202 "regdomain likely %u country code %u\n",
203 __func__, rd, CTRY_DEFAULT);
207 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
208 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
209 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
210 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
212 for (i = 0; i < nchan; i++) {
213 c = &ah->ah_channels[i];
214 if (IS_CHAN_2GHZ(c)) {
215 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
216 chan_2ghz[a].center_freq = c->channel;
217 chan_2ghz[a].max_power = c->maxTxPower;
219 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
220 chan_2ghz[a].flags |=
221 IEEE80211_CHAN_NO_IBSS;
222 if (c->channelFlags & CHANNEL_PASSIVE)
223 chan_2ghz[a].flags |=
224 IEEE80211_CHAN_PASSIVE_SCAN;
226 band_2ghz->n_channels = ++a;
228 DPRINTF(sc, ATH_DBG_CONFIG,
229 "%s: 2MHz channel: %d, "
230 "channelFlags: 0x%x\n",
234 } else if (IS_CHAN_5GHZ(c)) {
235 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
236 chan_5ghz[b].center_freq = c->channel;
237 chan_5ghz[b].max_power = c->maxTxPower;
239 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
240 chan_5ghz[b].flags |=
241 IEEE80211_CHAN_NO_IBSS;
242 if (c->channelFlags & CHANNEL_PASSIVE)
243 chan_5ghz[b].flags |=
244 IEEE80211_CHAN_PASSIVE_SCAN;
246 band_5ghz->n_channels = ++b;
248 DPRINTF(sc, ATH_DBG_CONFIG,
249 "%s: 5MHz channel: %d, "
250 "channelFlags: 0x%x\n",
261 * Determine mode from channel flags
263 * This routine will provide the enumerated WIRELESSS_MODE value based
264 * on the settings of the channel flags. If no valid set of flags
265 * exist, the lowest mode (11b) is selected.
268 static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
270 if (chan->chanmode == CHANNEL_A)
271 return ATH9K_MODE_11A;
272 else if (chan->chanmode == CHANNEL_G)
273 return ATH9K_MODE_11G;
274 else if (chan->chanmode == CHANNEL_B)
275 return ATH9K_MODE_11B;
276 else if (chan->chanmode == CHANNEL_A_HT20)
277 return ATH9K_MODE_11NA_HT20;
278 else if (chan->chanmode == CHANNEL_G_HT20)
279 return ATH9K_MODE_11NG_HT20;
280 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
281 return ATH9K_MODE_11NA_HT40PLUS;
282 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
283 return ATH9K_MODE_11NA_HT40MINUS;
284 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
285 return ATH9K_MODE_11NG_HT40PLUS;
286 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
287 return ATH9K_MODE_11NG_HT40MINUS;
289 WARN_ON(1); /* should not get here */
291 return ATH9K_MODE_11B;
295 * Stop the device, grabbing the top-level lock to protect
296 * against concurrent entry through ath_init (which can happen
297 * if another thread does a system call and the thread doing the
298 * stop is preempted).
301 static int ath_stop(struct ath_softc *sc)
303 struct ath_hal *ah = sc->sc_ah;
305 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %ld\n",
306 __func__, sc->sc_flags & SC_OP_INVALID);
309 * Shutdown the hardware and driver:
310 * stop output from above
313 * clear transmit machinery
314 * clear receive machinery
316 * reclaim beacon resources
318 * Note that some of this work is not possible if the
319 * hardware is gone (invalid).
322 ath_draintxq(sc, false);
323 if (!(sc->sc_flags & SC_OP_INVALID)) {
325 ath9k_hw_phy_disable(ah);
327 sc->sc_rxlink = NULL;
333 * Set the current channel
335 * Set/change channels. If the channel is really being changed, it's done
336 * by reseting the chip. To accomplish this we must first cleanup any pending
337 * DMA, then restart stuff after a la ath_init.
339 int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
341 struct ath_hal *ah = sc->sc_ah;
342 bool fastcc = true, stopped;
344 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
347 DPRINTF(sc, ATH_DBG_CONFIG,
348 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
350 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
351 sc->sc_ah->ah_curchan->channelFlags),
352 sc->sc_ah->ah_curchan->channel,
353 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
354 hchan->channel, hchan->channelFlags);
356 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
357 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
358 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
359 (sc->sc_flags & SC_OP_FULL_RESET)) {
362 * This is only performed if the channel settings have
365 * To switch channels clear any pending DMA operations;
366 * wait long enough for the RX fifo to drain, reset the
367 * hardware at the new frequency, and then re-enable
368 * the relevant bits of the h/w.
370 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
371 ath_draintxq(sc, false); /* clear pending tx frames */
372 stopped = ath_stoprecv(sc); /* turn off frame recv */
374 /* XXX: do not flush receive queue here. We don't want
375 * to flush data frames already in queue because of
376 * changing channel. */
378 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
381 spin_lock_bh(&sc->sc_resetlock);
382 if (!ath9k_hw_reset(ah, hchan,
383 sc->sc_ht_info.tx_chan_width,
386 sc->sc_ht_extprotspacing,
388 DPRINTF(sc, ATH_DBG_FATAL,
389 "%s: unable to reset channel %u (%uMhz) "
390 "flags 0x%x hal status %u\n", __func__,
391 ath9k_hw_mhz2ieee(ah, hchan->channel,
392 hchan->channelFlags),
393 hchan->channel, hchan->channelFlags, status);
394 spin_unlock_bh(&sc->sc_resetlock);
397 spin_unlock_bh(&sc->sc_resetlock);
399 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
400 sc->sc_flags &= ~SC_OP_FULL_RESET;
402 /* Re-enable rx framework */
403 if (ath_startrecv(sc) != 0) {
404 DPRINTF(sc, ATH_DBG_FATAL,
405 "%s: unable to restart recv logic\n", __func__);
409 * Change channels and update the h/w rate map
410 * if we're switching; e.g. 11a to 11b/g.
412 ath_setcurmode(sc, ath_chan2mode(hchan));
414 ath_update_txpow(sc); /* update tx power state */
416 * Re-enable interrupts.
418 ath9k_hw_set_interrupts(ah, sc->sc_imask);
423 /**********************/
424 /* Chainmask Handling */
425 /**********************/
427 static void ath_chainmask_sel_timertimeout(unsigned long data)
429 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
430 cm->switch_allowed = 1;
433 /* Start chainmask select timer */
434 static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
436 cm->switch_allowed = 0;
437 mod_timer(&cm->timer, ath_chainmask_sel_period);
440 /* Stop chainmask select timer */
441 static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
443 cm->switch_allowed = 0;
444 del_timer_sync(&cm->timer);
447 static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
449 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
451 memset(cm, 0, sizeof(struct ath_chainmask_sel));
453 cm->cur_tx_mask = sc->sc_tx_chainmask;
454 cm->cur_rx_mask = sc->sc_rx_chainmask;
455 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
456 setup_timer(&cm->timer,
457 ath_chainmask_sel_timertimeout, (unsigned long) cm);
460 int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
462 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
465 * Disable auto-swtiching in one of the following if conditions.
466 * sc_chainmask_auto_sel is used for internal global auto-switching
467 * enabled/disabled setting
469 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
470 cm->cur_tx_mask = sc->sc_tx_chainmask;
471 return cm->cur_tx_mask;
474 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
475 return cm->cur_tx_mask;
477 if (cm->switch_allowed) {
478 /* Switch down from tx 3 to tx 2. */
479 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
480 ATH_RSSI_OUT(cm->tx_avgrssi) >=
481 ath_chainmask_sel_down_rssi_thres) {
482 cm->cur_tx_mask = sc->sc_tx_chainmask;
484 /* Don't let another switch happen until
485 * this timer expires */
486 ath_chainmask_sel_timerstart(cm);
488 /* Switch up from tx 2 to 3. */
489 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
490 ATH_RSSI_OUT(cm->tx_avgrssi) <=
491 ath_chainmask_sel_up_rssi_thres) {
492 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
494 /* Don't let another switch happen
495 * until this timer expires */
496 ath_chainmask_sel_timerstart(cm);
500 return cm->cur_tx_mask;
504 * Update tx/rx chainmask. For legacy association,
505 * hard code chainmask to 1x1, for 11n association, use
506 * the chainmask configuration.
509 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
511 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
513 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
514 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
516 sc->sc_tx_chainmask = 1;
517 sc->sc_rx_chainmask = 1;
520 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
521 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
529 * This routine performs the periodic noise floor calibration function
530 * that is used to adjust and optimize the chip performance. This
531 * takes environmental changes (location, temperature) into account.
532 * When the task is complete, it reschedules itself depending on the
533 * appropriate interval that was calculated.
536 static void ath_ani_calibrate(unsigned long data)
538 struct ath_softc *sc;
540 bool longcal = false;
541 bool shortcal = false;
542 bool aniflag = false;
543 unsigned int timestamp = jiffies_to_msecs(jiffies);
546 sc = (struct ath_softc *)data;
550 * don't calibrate when we're scanning.
551 * we are most likely not on our home channel.
553 if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)
556 /* Long calibration runs independently of short calibration. */
557 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
559 DPRINTF(sc, ATH_DBG_ANI, "%s: longcal @%lu\n",
561 sc->sc_ani.sc_longcal_timer = timestamp;
564 /* Short calibration applies only while sc_caldone is false */
565 if (!sc->sc_ani.sc_caldone) {
566 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
567 ATH_SHORT_CALINTERVAL) {
569 DPRINTF(sc, ATH_DBG_ANI, "%s: shortcal @%lu\n",
571 sc->sc_ani.sc_shortcal_timer = timestamp;
572 sc->sc_ani.sc_resetcal_timer = timestamp;
575 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
576 ATH_RESTART_CALINTERVAL) {
577 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
578 &sc->sc_ani.sc_caldone);
579 if (sc->sc_ani.sc_caldone)
580 sc->sc_ani.sc_resetcal_timer = timestamp;
584 /* Verify whether we must check ANI */
585 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
586 ATH_ANI_POLLINTERVAL) {
588 sc->sc_ani.sc_checkani_timer = timestamp;
591 /* Skip all processing if there's nothing to do. */
592 if (longcal || shortcal || aniflag) {
593 /* Call ANI routine if necessary */
595 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
598 /* Perform calibration if necessary */
599 if (longcal || shortcal) {
600 bool iscaldone = false;
602 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
603 sc->sc_rx_chainmask, longcal,
606 sc->sc_ani.sc_noise_floor =
607 ath9k_hw_getchan_noise(ah,
610 DPRINTF(sc, ATH_DBG_ANI,
611 "%s: calibrate chan %u/%x nf: %d\n",
613 ah->ah_curchan->channel,
614 ah->ah_curchan->channelFlags,
615 sc->sc_ani.sc_noise_floor);
617 DPRINTF(sc, ATH_DBG_ANY,
618 "%s: calibrate chan %u/%x failed\n",
620 ah->ah_curchan->channel,
621 ah->ah_curchan->channelFlags);
623 sc->sc_ani.sc_caldone = iscaldone;
628 * Set timer interval based on previous results.
629 * The interval must be the shortest necessary to satisfy ANI,
630 * short calibration and long calibration.
633 cal_interval = ATH_ANI_POLLINTERVAL;
634 if (!sc->sc_ani.sc_caldone)
635 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
637 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
644 int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
646 struct ath_hal *ah = sc->sc_ah;
650 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
651 __func__, sc->sc_ah->ah_opmode);
654 * Stop anything previously setup. This is safe
655 * whether this is the first time through or not.
659 /* Initialize chanmask selection */
660 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
661 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
663 /* Reset SERDES registers */
664 ath9k_hw_configpcipowersave(ah, 0);
667 * The basic interface to setting the hardware in a good
668 * state is ``reset''. On return the hardware is known to
669 * be powered up and with interrupts disabled. This must
670 * be followed by initialization of the appropriate bits
671 * and then setup of the interrupt mask.
674 spin_lock_bh(&sc->sc_resetlock);
675 if (!ath9k_hw_reset(ah, initial_chan,
676 sc->sc_ht_info.tx_chan_width,
677 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
678 sc->sc_ht_extprotspacing, false, &status)) {
679 DPRINTF(sc, ATH_DBG_FATAL,
680 "%s: unable to reset hardware; hal status %u "
681 "(freq %u flags 0x%x)\n", __func__, status,
682 initial_chan->channel, initial_chan->channelFlags);
684 spin_unlock_bh(&sc->sc_resetlock);
687 spin_unlock_bh(&sc->sc_resetlock);
689 * This is needed only to setup initial state
690 * but it's best done after a reset.
692 ath_update_txpow(sc);
695 * Setup the hardware after reset:
696 * The receive engine is set going.
697 * Frame transmit is handled entirely
698 * in the frame output path; there's nothing to do
699 * here except setup the interrupt mask.
701 if (ath_startrecv(sc) != 0) {
702 DPRINTF(sc, ATH_DBG_FATAL,
703 "%s: unable to start recv logic\n", __func__);
707 /* Setup our intr mask. */
708 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
709 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
710 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
712 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
713 sc->sc_imask |= ATH9K_INT_GTT;
715 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
716 sc->sc_imask |= ATH9K_INT_CST;
719 * Enable MIB interrupts when there are hardware phy counters.
720 * Note we only do this (at the moment) for station mode.
722 if (ath9k_hw_phycounters(ah) &&
723 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
724 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
725 sc->sc_imask |= ATH9K_INT_MIB;
727 * Some hardware processes the TIM IE and fires an
728 * interrupt when the TIM bit is set. For hardware
729 * that does, if not overridden by configuration,
730 * enable the TIM interrupt when operating as station.
732 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
733 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
734 !sc->sc_config.swBeaconProcess)
735 sc->sc_imask |= ATH9K_INT_TIM;
737 * Don't enable interrupts here as we've not yet built our
738 * vap and node data structures, which will be needed as soon
739 * as we start receiving.
741 ath_setcurmode(sc, ath_chan2mode(initial_chan));
743 /* XXX: we must make sure h/w is ready and clear invalid flag
744 * before turning on interrupt. */
745 sc->sc_flags &= ~SC_OP_INVALID;
750 int ath_reset(struct ath_softc *sc, bool retry_tx)
752 struct ath_hal *ah = sc->sc_ah;
756 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
757 ath_draintxq(sc, retry_tx); /* stop xmit */
758 ath_stoprecv(sc); /* stop recv */
759 ath_flushrecv(sc); /* flush recv queue */
762 spin_lock_bh(&sc->sc_resetlock);
763 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
764 sc->sc_ht_info.tx_chan_width,
765 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
766 sc->sc_ht_extprotspacing, false, &status)) {
767 DPRINTF(sc, ATH_DBG_FATAL,
768 "%s: unable to reset hardware; hal status %u\n",
772 spin_unlock_bh(&sc->sc_resetlock);
774 if (ath_startrecv(sc) != 0) /* restart recv */
775 DPRINTF(sc, ATH_DBG_FATAL,
776 "%s: unable to start recv logic\n", __func__);
779 * We may be doing a reset in response to a request
780 * that changes the channel so update any state that
781 * might change as a result.
783 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
785 ath_update_txpow(sc);
787 if (sc->sc_flags & SC_OP_BEACONS)
788 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
790 ath9k_hw_set_interrupts(ah, sc->sc_imask);
792 /* Restart the txq */
795 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
796 if (ATH_TXQ_SETUP(sc, i)) {
797 spin_lock_bh(&sc->sc_txq[i].axq_lock);
798 ath_txq_schedule(sc, &sc->sc_txq[i]);
799 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
807 int ath_suspend(struct ath_softc *sc)
809 struct ath_hal *ah = sc->sc_ah;
811 /* No I/O if device has been surprise removed */
812 if (sc->sc_flags & SC_OP_INVALID)
815 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
816 ath9k_hw_set_interrupts(ah, 0);
818 /* XXX: we must make sure h/w will not generate any interrupt
819 * before setting the invalid flag. */
820 sc->sc_flags |= SC_OP_INVALID;
822 /* disable HAL and put h/w to sleep */
823 ath9k_hw_disable(sc->sc_ah);
825 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
830 /* Interrupt handler. Most of the actual processing is deferred.
831 * It's the caller's responsibility to ensure the chip is awake. */
833 irqreturn_t ath_isr(int irq, void *dev)
835 struct ath_softc *sc = dev;
836 struct ath_hal *ah = sc->sc_ah;
837 enum ath9k_int status;
841 if (sc->sc_flags & SC_OP_INVALID) {
843 * The hardware is not ready/present, don't
844 * touch anything. Note this can happen early
845 * on if the IRQ is shared.
849 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
854 * Figure out the reason(s) for the interrupt. Note
855 * that the hal returns a pseudo-ISR that may include
856 * bits we haven't explicitly enabled so we mask the
857 * value to insure we only process bits we requested.
859 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
861 status &= sc->sc_imask; /* discard unasked-for bits */
864 * If there are no status bits set, then this interrupt was not
865 * for me (should have been caught above).
871 sc->sc_intrstatus = status;
873 if (status & ATH9K_INT_FATAL) {
874 /* need a chip reset */
876 } else if (status & ATH9K_INT_RXORN) {
877 /* need a chip reset */
880 if (status & ATH9K_INT_SWBA) {
881 /* schedule a tasklet for beacon handling */
882 tasklet_schedule(&sc->bcon_tasklet);
884 if (status & ATH9K_INT_RXEOL) {
886 * NB: the hardware should re-read the link when
887 * RXE bit is written, but it doesn't work
888 * at least on older hardware revs.
893 if (status & ATH9K_INT_TXURN)
894 /* bump tx trigger level */
895 ath9k_hw_updatetxtriglevel(ah, true);
896 /* XXX: optimize this */
897 if (status & ATH9K_INT_RX)
899 if (status & ATH9K_INT_TX)
901 if (status & ATH9K_INT_BMISS)
903 /* carrier sense timeout */
904 if (status & ATH9K_INT_CST)
906 if (status & ATH9K_INT_MIB) {
908 * Disable interrupts until we service the MIB
909 * interrupt; otherwise it will continue to
912 ath9k_hw_set_interrupts(ah, 0);
914 * Let the hal handle the event. We assume
915 * it will clear whatever condition caused
918 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
919 ath9k_hw_set_interrupts(ah, sc->sc_imask);
921 if (status & ATH9K_INT_TIM_TIMER) {
922 if (!(ah->ah_caps.hw_caps &
923 ATH9K_HW_CAP_AUTOSLEEP)) {
924 /* Clear RxAbort bit so that we can
926 ath9k_hw_setrxabort(ah, 0);
934 /* turn off every interrupt except SWBA */
935 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
936 tasklet_schedule(&sc->intr_tq);
942 /* Deferred interrupt processing */
944 static void ath9k_tasklet(unsigned long data)
946 struct ath_softc *sc = (struct ath_softc *)data;
947 u32 status = sc->sc_intrstatus;
949 if (status & ATH9K_INT_FATAL) {
950 /* need a chip reset */
951 ath_reset(sc, false);
956 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
957 /* XXX: fill me in */
959 if (status & ATH9K_INT_RXORN) {
961 if (status & ATH9K_INT_RXEOL) {
964 spin_lock_bh(&sc->sc_rxflushlock);
965 ath_rx_tasklet(sc, 0);
966 spin_unlock_bh(&sc->sc_rxflushlock);
968 /* XXX: optimize this */
969 if (status & ATH9K_INT_TX)
971 /* XXX: fill me in */
973 if (status & ATH9K_INT_BMISS) {
975 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
976 if (status & ATH9K_INT_TIM) {
978 if (status & ATH9K_INT_DTIMSYNC) {
984 /* re-enable hardware interrupt */
985 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
988 int ath_init(u16 devid, struct ath_softc *sc)
990 struct ath_hal *ah = NULL;
995 /* XXX: hardware will not be ready until ath_open() being called */
996 sc->sc_flags |= SC_OP_INVALID;
998 sc->sc_debug = DBG_DEFAULT;
999 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1001 /* Initialize tasklet */
1002 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1003 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1007 * Cache line size is used to size and align various
1008 * structures used to communicate with the hardware.
1010 bus_read_cachesize(sc, &csz);
1011 /* XXX assert csz is non-zero */
1012 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1014 spin_lock_init(&sc->sc_resetlock);
1016 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1018 DPRINTF(sc, ATH_DBG_FATAL,
1019 "%s: unable to attach hardware; HAL status %u\n",
1026 /* Initializes the noise floor to a reasonable default value.
1027 * Later on this will be updated during ANI processing. */
1028 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1030 /* Get the hardware key cache size. */
1031 sc->sc_keymax = ah->ah_caps.keycache_size;
1032 if (sc->sc_keymax > ATH_KEYMAX) {
1033 DPRINTF(sc, ATH_DBG_KEYCACHE,
1034 "%s: Warning, using only %u entries in %u key cache\n",
1035 __func__, ATH_KEYMAX, sc->sc_keymax);
1036 sc->sc_keymax = ATH_KEYMAX;
1040 * Reset the key cache since some parts do not
1041 * reset the contents on initial power up.
1043 for (i = 0; i < sc->sc_keymax; i++)
1044 ath9k_hw_keyreset(ah, (u16) i);
1046 * Mark key cache slots associated with global keys
1047 * as in use. If we knew TKIP was not to be used we
1048 * could leave the +32, +64, and +32+64 slots free.
1049 * XXX only for splitmic.
1051 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1052 set_bit(i, sc->sc_keymap);
1053 set_bit(i + 32, sc->sc_keymap);
1054 set_bit(i + 64, sc->sc_keymap);
1055 set_bit(i + 32 + 64, sc->sc_keymap);
1058 * Collect the channel list using the default country
1059 * code and including outdoor channels. The 802.11 layer
1060 * is resposible for filtering this list based on settings
1061 * like the phy mode.
1063 error = ath_setup_channels(sc);
1067 /* default to STA mode */
1068 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
1070 /* Setup rate tables */
1072 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1073 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1075 /* NB: setup here so ath_rate_update is happy */
1076 ath_setcurmode(sc, ATH9K_MODE_11A);
1079 * Allocate hardware transmit queues: one queue for
1080 * beacon frames and one data queue for each QoS
1081 * priority. Note that the hal handles reseting
1082 * these queues at the needed time.
1084 sc->sc_bhalq = ath_beaconq_setup(ah);
1085 if (sc->sc_bhalq == -1) {
1086 DPRINTF(sc, ATH_DBG_FATAL,
1087 "%s: unable to setup a beacon xmit queue\n", __func__);
1091 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1092 if (sc->sc_cabq == NULL) {
1093 DPRINTF(sc, ATH_DBG_FATAL,
1094 "%s: unable to setup CAB xmit queue\n", __func__);
1099 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1100 ath_cabq_update(sc);
1102 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1103 sc->sc_haltype2q[i] = -1;
1105 /* Setup data queues */
1106 /* NB: ensure BK queue is the lowest priority h/w queue */
1107 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1108 DPRINTF(sc, ATH_DBG_FATAL,
1109 "%s: unable to setup xmit queue for BK traffic\n",
1115 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1116 DPRINTF(sc, ATH_DBG_FATAL,
1117 "%s: unable to setup xmit queue for BE traffic\n",
1122 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1123 DPRINTF(sc, ATH_DBG_FATAL,
1124 "%s: unable to setup xmit queue for VI traffic\n",
1129 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1130 DPRINTF(sc, ATH_DBG_FATAL,
1131 "%s: unable to setup xmit queue for VO traffic\n",
1137 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
1139 sc->sc_rc = ath_rate_attach(ah);
1140 if (sc->sc_rc == NULL) {
1145 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1146 ATH9K_CIPHER_TKIP, NULL)) {
1148 * Whether we should enable h/w TKIP MIC.
1149 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1150 * report WMM capable, so it's always safe to turn on
1151 * TKIP MIC in this case.
1153 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1158 * Check whether the separate key cache entries
1159 * are required to handle both tx+rx MIC keys.
1160 * With split mic keys the number of stations is limited
1161 * to 27 otherwise 59.
1163 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1164 ATH9K_CIPHER_TKIP, NULL)
1165 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1166 ATH9K_CIPHER_MIC, NULL)
1167 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1169 sc->sc_splitmic = 1;
1171 /* turn on mcast key search if possible */
1172 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1173 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1176 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1177 sc->sc_config.txpowlimit_override = 0;
1179 /* 11n Capabilities */
1180 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1181 sc->sc_flags |= SC_OP_TXAGGR;
1182 sc->sc_flags |= SC_OP_RXAGGR;
1185 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1186 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1188 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1189 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1191 ath9k_hw_getmac(ah, sc->sc_myaddr);
1192 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1193 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1194 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1195 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1197 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1199 /* initialize beacon slots */
1200 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1201 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1203 /* save MISC configurations */
1204 sc->sc_config.swBeaconProcess = 1;
1206 #ifdef CONFIG_SLOW_ANT_DIV
1207 /* range is 40 - 255, we use something in the middle */
1208 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1213 /* cleanup tx queues */
1214 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1215 if (ATH_TXQ_SETUP(sc, i))
1216 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1219 ath9k_hw_detach(ah);
1223 void ath_deinit(struct ath_softc *sc)
1225 struct ath_hal *ah = sc->sc_ah;
1228 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1230 tasklet_kill(&sc->intr_tq);
1231 tasklet_kill(&sc->bcon_tasklet);
1233 if (!(sc->sc_flags & SC_OP_INVALID))
1234 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1235 ath_rate_detach(sc->sc_rc);
1236 /* cleanup tx queues */
1237 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1238 if (ATH_TXQ_SETUP(sc, i))
1239 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1240 ath9k_hw_detach(ah);
1243 /*******************/
1244 /* Node Management */
1245 /*******************/
1247 void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
1249 struct ath_node *an;
1251 an = (struct ath_node *)sta->drv_priv;
1253 if (sc->sc_flags & SC_OP_TXAGGR)
1254 ath_tx_node_init(sc, an);
1255 if (sc->sc_flags & SC_OP_RXAGGR)
1256 ath_rx_node_init(sc, an);
1258 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
1259 sta->ht_cap.ampdu_factor);
1260 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
1262 ath_chainmask_sel_init(sc, an);
1263 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1266 void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
1268 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1270 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1272 if (sc->sc_flags & SC_OP_TXAGGR)
1273 ath_tx_node_cleanup(sc, an);
1274 if (sc->sc_flags & SC_OP_RXAGGR)
1275 ath_rx_node_cleanup(sc, an);
1281 * Setup driver-specific state for a newly associated node. This routine
1282 * really only applies if compression or XR are enabled, there is no code
1283 * covering any other cases.
1286 void ath_newassoc(struct ath_softc *sc,
1287 struct ath_node *an, int isnew, int isuapsd)
1291 /* if station reassociates, tear down the aggregation state. */
1293 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1294 if (sc->sc_flags & SC_OP_TXAGGR)
1295 ath_tx_aggr_teardown(sc, an, tidno);
1296 if (sc->sc_flags & SC_OP_RXAGGR)
1297 ath_rx_aggr_teardown(sc, an, tidno);
1306 void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1308 ath9k_hw_keyreset(sc->sc_ah, keyix);
1310 clear_bit(keyix, sc->sc_keymap);
1313 int ath_keyset(struct ath_softc *sc,
1315 struct ath9k_keyval *hk,
1316 const u8 mac[ETH_ALEN])
1320 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1321 keyix, hk, mac, false);
1323 return status != false;
1326 /***********************/
1327 /* TX Power/Regulatory */
1328 /***********************/
1331 * Set Transmit power in HAL
1333 * This routine makes the actual HAL calls to set the new transmit power
1337 void ath_update_txpow(struct ath_softc *sc)
1339 struct ath_hal *ah = sc->sc_ah;
1342 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1343 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1344 /* read back in case value is clamped */
1345 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1346 sc->sc_curtxpow = txpow;
1350 /**************************/
1351 /* Slow Antenna Diversity */
1352 /**************************/
1354 void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1355 struct ath_softc *sc,
1360 /* antdivf_rssitrig can range from 40 - 0xff */
1361 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1362 trig = (rssitrig < 40) ? 40 : rssitrig;
1364 antdiv->antdiv_sc = sc;
1365 antdiv->antdivf_rssitrig = trig;
1368 void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1372 antdiv->antdiv_num_antcfg =
1373 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1374 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1375 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1376 antdiv->antdiv_curcfg = 0;
1377 antdiv->antdiv_bestcfg = 0;
1378 antdiv->antdiv_laststatetsf = 0;
1380 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1382 antdiv->antdiv_start = 1;
1385 void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1387 antdiv->antdiv_start = 0;
1390 static int32_t ath_find_max_val(int32_t *val,
1391 u8 num_val, u8 *max_index)
1393 u32 MaxVal = *val++;
1397 while (++cur_index < num_val) {
1398 if (*val > MaxVal) {
1400 *max_index = cur_index;
1409 void ath_slow_ant_div(struct ath_antdiv *antdiv,
1410 struct ieee80211_hdr *hdr,
1411 struct ath_rx_status *rx_stats)
1413 struct ath_softc *sc = antdiv->antdiv_sc;
1414 struct ath_hal *ah = sc->sc_ah;
1416 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1417 __le16 fc = hdr->frame_control;
1419 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1420 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1421 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1422 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1423 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1428 switch (antdiv->antdiv_state) {
1429 case ATH_ANT_DIV_IDLE:
1430 if ((antdiv->antdiv_lastbrssi[curcfg] <
1431 antdiv->antdivf_rssitrig)
1432 && ((curtsf - antdiv->antdiv_laststatetsf) >
1433 ATH_ANT_DIV_MIN_IDLE_US)) {
1436 if (curcfg == antdiv->antdiv_num_antcfg)
1439 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1440 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1441 antdiv->antdiv_curcfg = curcfg;
1442 antdiv->antdiv_laststatetsf = curtsf;
1443 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1448 case ATH_ANT_DIV_SCAN:
1449 if ((curtsf - antdiv->antdiv_laststatetsf) <
1450 ATH_ANT_DIV_MIN_SCAN_US)
1454 if (curcfg == antdiv->antdiv_num_antcfg)
1457 if (curcfg == antdiv->antdiv_bestcfg) {
1458 ath_find_max_val(antdiv->antdiv_lastbrssi,
1459 antdiv->antdiv_num_antcfg, &bestcfg);
1460 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1461 antdiv->antdiv_bestcfg = bestcfg;
1462 antdiv->antdiv_curcfg = bestcfg;
1463 antdiv->antdiv_laststatetsf = curtsf;
1464 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1467 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1468 antdiv->antdiv_curcfg = curcfg;
1469 antdiv->antdiv_laststatetsf = curtsf;
1470 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1478 /***********************/
1479 /* Descriptor Handling */
1480 /***********************/
1483 * Set up DMA descriptors
1485 * This function will allocate both the DMA descriptor structure, and the
1486 * buffers it contains. These are used to contain the descriptors used
1490 int ath_descdma_setup(struct ath_softc *sc,
1491 struct ath_descdma *dd,
1492 struct list_head *head,
1497 #define DS2PHYS(_dd, _ds) \
1498 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1499 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1500 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1502 struct ath_desc *ds;
1504 int i, bsize, error;
1506 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1507 __func__, name, nbuf, ndesc);
1509 /* ath_desc must be a multiple of DWORDs */
1510 if ((sizeof(struct ath_desc) % 4) != 0) {
1511 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1513 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1519 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1522 * Need additional DMA memory because we can't use
1523 * descriptors that cross the 4K page boundary. Assume
1524 * one skipped descriptor per 4K page.
1526 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1528 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1531 while (ndesc_skipped) {
1532 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1533 dd->dd_desc_len += dma_len;
1535 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1539 /* allocate descriptors */
1540 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1542 &dd->dd_desc_paddr);
1543 if (dd->dd_desc == NULL) {
1548 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1549 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1550 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1552 /* allocate buffers */
1553 bsize = sizeof(struct ath_buf) * nbuf;
1554 bf = kmalloc(bsize, GFP_KERNEL);
1559 memset(bf, 0, bsize);
1562 INIT_LIST_HEAD(head);
1563 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1565 bf->bf_daddr = DS2PHYS(dd, ds);
1567 if (!(sc->sc_ah->ah_caps.hw_caps &
1568 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1570 * Skip descriptor addresses which can cause 4KB
1571 * boundary crossing (addr + length) with a 32 dword
1574 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1575 ASSERT((caddr_t) bf->bf_desc <
1576 ((caddr_t) dd->dd_desc +
1581 bf->bf_daddr = DS2PHYS(dd, ds);
1584 list_add_tail(&bf->list, head);
1588 pci_free_consistent(sc->pdev,
1589 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1591 memset(dd, 0, sizeof(*dd));
1593 #undef ATH_DESC_4KB_BOUND_CHECK
1594 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1599 * Cleanup DMA descriptors
1601 * This function will free the DMA block that was allocated for the descriptor
1602 * pool. Since this was allocated as one "chunk", it is freed in the same
1606 void ath_descdma_cleanup(struct ath_softc *sc,
1607 struct ath_descdma *dd,
1608 struct list_head *head)
1610 /* Free memory associated with descriptors */
1611 pci_free_consistent(sc->pdev,
1612 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1614 INIT_LIST_HEAD(head);
1615 kfree(dd->dd_bufptr);
1616 memset(dd, 0, sizeof(*dd));
1623 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1629 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1632 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1635 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1638 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1641 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1648 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1653 case ATH9K_WME_AC_VO:
1656 case ATH9K_WME_AC_VI:
1659 case ATH9K_WME_AC_BE:
1662 case ATH9K_WME_AC_BK:
1675 * Expand time stamp to TSF
1677 * Extend 15-bit time stamp from rx descriptor to
1678 * a full 64-bit TSF using the current h/w TSF.
1681 u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1685 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1686 if ((tsf & 0x7fff) < rstamp)
1688 return (tsf & ~0x7fff) | rstamp;
1692 * Set Default Antenna
1694 * Call into the HAL to set the default antenna to use. Not really valid for
1698 void ath_setdefantenna(void *context, u32 antenna)
1700 struct ath_softc *sc = (struct ath_softc *)context;
1701 struct ath_hal *ah = sc->sc_ah;
1703 /* XXX block beacon interrupts */
1704 ath9k_hw_setantenna(ah, antenna);
1705 sc->sc_defant = antenna;
1706 sc->sc_rxotherant = 0;
1712 * This will wake up the chip if required, and set the slot time for the
1713 * frame (maximum transmit time). Slot time is assumed to be already set
1714 * in the ATH object member sc_slottime
1717 void ath_setslottime(struct ath_softc *sc)
1719 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1720 sc->sc_updateslot = OK;