2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 static void ar9002_hw_rx_enable(struct ath_hw *ah)
21 REG_WRITE(ah, AR_CR, AR_CR_RXE);
24 static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
26 ((struct ath_desc *) ds)->ds_link = ds_link;
29 static void ar9002_hw_get_desc_link(void *ds, u32 **ds_link)
31 *ds_link = &((struct ath_desc *)ds)->ds_link;
34 static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
38 struct ath9k_hw_capabilities *pCap = &ah->caps;
40 bool fatal_int = false;
41 struct ath_common *common = ath9k_hw_common(ah);
43 if (!AR_SREV_9100(ah)) {
44 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
45 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
46 == AR_RTC_STATUS_ON) {
47 isr = REG_READ(ah, AR_ISR);
51 sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
56 if (!isr && !sync_cause)
60 isr = REG_READ(ah, AR_ISR);
64 if (isr & AR_ISR_BCNMISC) {
66 isr2 = REG_READ(ah, AR_ISR_S2);
67 if (isr2 & AR_ISR_S2_TIM)
68 mask2 |= ATH9K_INT_TIM;
69 if (isr2 & AR_ISR_S2_DTIM)
70 mask2 |= ATH9K_INT_DTIM;
71 if (isr2 & AR_ISR_S2_DTIMSYNC)
72 mask2 |= ATH9K_INT_DTIMSYNC;
73 if (isr2 & (AR_ISR_S2_CABEND))
74 mask2 |= ATH9K_INT_CABEND;
75 if (isr2 & AR_ISR_S2_GTT)
76 mask2 |= ATH9K_INT_GTT;
77 if (isr2 & AR_ISR_S2_CST)
78 mask2 |= ATH9K_INT_CST;
79 if (isr2 & AR_ISR_S2_TSFOOR)
80 mask2 |= ATH9K_INT_TSFOOR;
83 isr = REG_READ(ah, AR_ISR_RAC);
84 if (isr == 0xffffffff) {
89 *masked = isr & ATH9K_INT_COMMON;
91 if (ah->config.rx_intr_mitigation) {
92 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
93 *masked |= ATH9K_INT_RX;
96 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
97 *masked |= ATH9K_INT_RX;
99 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
103 *masked |= ATH9K_INT_TX;
105 s0_s = REG_READ(ah, AR_ISR_S0_S);
106 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
107 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
109 s1_s = REG_READ(ah, AR_ISR_S1_S);
110 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
111 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
114 if (isr & AR_ISR_RXORN) {
115 ath_print(common, ATH_DBG_INTERRUPT,
116 "receive FIFO overrun interrupt\n");
119 if (!AR_SREV_9100(ah)) {
120 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
121 u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
122 if (isr5 & AR_ISR_S5_TIM_TIMER)
123 *masked |= ATH9K_INT_TIM_TIMER;
130 if (AR_SREV_9100(ah))
133 if (isr & AR_ISR_GENTMR) {
136 s5_s = REG_READ(ah, AR_ISR_S5_S);
137 if (isr & AR_ISR_GENTMR) {
138 ah->intr_gen_timer_trigger =
139 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
141 ah->intr_gen_timer_thresh =
142 MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
144 if (ah->intr_gen_timer_trigger)
145 *masked |= ATH9K_INT_GENTIMER;
153 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
157 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
158 ath_print(common, ATH_DBG_ANY,
159 "received PCI FATAL interrupt\n");
161 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
162 ath_print(common, ATH_DBG_ANY,
163 "received PCI PERR interrupt\n");
165 *masked |= ATH9K_INT_FATAL;
167 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
168 ath_print(common, ATH_DBG_INTERRUPT,
169 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
170 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
171 REG_WRITE(ah, AR_RC, 0);
172 *masked |= ATH9K_INT_FATAL;
174 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
175 ath_print(common, ATH_DBG_INTERRUPT,
176 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
179 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
180 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
186 static void ar9002_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
187 bool is_firstseg, bool is_lastseg,
188 const void *ds0, dma_addr_t buf_addr,
191 struct ar5416_desc *ads = AR5416DESC(ds);
193 ads->ds_data = buf_addr;
196 ads->ds_ctl1 |= seglen | (is_lastseg ? 0 : AR_TxMore);
197 } else if (is_lastseg) {
199 ads->ds_ctl1 = seglen;
200 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
201 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
204 ads->ds_ctl1 = seglen | AR_TxMore;
208 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
209 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
210 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
211 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
212 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
215 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
216 struct ath_tx_status *ts)
218 struct ar5416_desc *ads = AR5416DESC(ds);
220 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
223 ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
224 ts->ts_tstamp = ads->AR_SendTimestamp;
228 if (ads->ds_txstatus1 & AR_FrmXmitOK)
229 ts->ts_status |= ATH9K_TX_ACKED;
230 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
231 ts->ts_status |= ATH9K_TXERR_XRETRY;
232 if (ads->ds_txstatus1 & AR_Filtered)
233 ts->ts_status |= ATH9K_TXERR_FILT;
234 if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
235 ts->ts_status |= ATH9K_TXERR_FIFO;
236 ath9k_hw_updatetxtriglevel(ah, true);
238 if (ads->ds_txstatus9 & AR_TxOpExceeded)
239 ts->ts_status |= ATH9K_TXERR_XTXOP;
240 if (ads->ds_txstatus1 & AR_TxTimerExpired)
241 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
243 if (ads->ds_txstatus1 & AR_DescCfgErr)
244 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
245 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
246 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
247 ath9k_hw_updatetxtriglevel(ah, true);
249 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
250 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
251 ath9k_hw_updatetxtriglevel(ah, true);
253 if (ads->ds_txstatus0 & AR_TxBaStatus) {
254 ts->ts_flags |= ATH9K_TX_BA;
255 ts->ba_low = ads->AR_BaBitmapLow;
256 ts->ba_high = ads->AR_BaBitmapHigh;
259 ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
260 switch (ts->ts_rateindex) {
262 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
265 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
268 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
271 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
275 ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
276 ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
277 ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
278 ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
279 ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
280 ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
281 ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
282 ts->evm0 = ads->AR_TxEVM0;
283 ts->evm1 = ads->AR_TxEVM1;
284 ts->evm2 = ads->AR_TxEVM2;
285 ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
286 ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
287 ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
293 static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
294 u32 pktLen, enum ath9k_pkt_type type,
295 u32 txPower, u32 keyIx,
296 enum ath9k_key_type keyType, u32 flags)
298 struct ar5416_desc *ads = AR5416DESC(ds);
300 txPower += ah->txpower_indexoffset;
304 ads->ds_ctl0 = (pktLen & AR_FrameLen)
305 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
306 | SM(txPower, AR_XmitPower)
307 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
308 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
309 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
310 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
313 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
314 | SM(type, AR_FrameType)
315 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
316 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
317 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
319 ads->ds_ctl6 = SM(keyType, AR_EncrType);
321 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
329 static void ar9002_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
331 u32 durUpdateEn, u32 rtsctsRate,
333 struct ath9k_11n_rate_series series[],
334 u32 nseries, u32 flags)
336 struct ar5416_desc *ads = AR5416DESC(ds);
337 struct ar5416_desc *last_ads = AR5416DESC(lastds);
340 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
341 ds_ctl0 = ads->ds_ctl0;
343 if (flags & ATH9K_TXDESC_RTSENA) {
344 ds_ctl0 &= ~AR_CTSEnable;
345 ds_ctl0 |= AR_RTSEnable;
347 ds_ctl0 &= ~AR_RTSEnable;
348 ds_ctl0 |= AR_CTSEnable;
351 ads->ds_ctl0 = ds_ctl0;
354 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
357 ads->ds_ctl2 = set11nTries(series, 0)
358 | set11nTries(series, 1)
359 | set11nTries(series, 2)
360 | set11nTries(series, 3)
361 | (durUpdateEn ? AR_DurUpdateEna : 0)
362 | SM(0, AR_BurstDur);
364 ads->ds_ctl3 = set11nRate(series, 0)
365 | set11nRate(series, 1)
366 | set11nRate(series, 2)
367 | set11nRate(series, 3);
369 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
370 | set11nPktDurRTSCTS(series, 1);
372 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
373 | set11nPktDurRTSCTS(series, 3);
375 ads->ds_ctl7 = set11nRateFlags(series, 0)
376 | set11nRateFlags(series, 1)
377 | set11nRateFlags(series, 2)
378 | set11nRateFlags(series, 3)
379 | SM(rtsctsRate, AR_RTSCTSRate);
380 last_ads->ds_ctl2 = ads->ds_ctl2;
381 last_ads->ds_ctl3 = ads->ds_ctl3;
384 static void ar9002_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
387 struct ar5416_desc *ads = AR5416DESC(ds);
389 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
390 ads->ds_ctl6 &= ~AR_AggrLen;
391 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
394 static void ar9002_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
397 struct ar5416_desc *ads = AR5416DESC(ds);
400 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
403 ctl6 &= ~AR_PadDelim;
404 ctl6 |= SM(numDelims, AR_PadDelim);
408 static void ar9002_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
410 struct ar5416_desc *ads = AR5416DESC(ds);
412 ads->ds_ctl1 |= AR_IsAggr;
413 ads->ds_ctl1 &= ~AR_MoreAggr;
414 ads->ds_ctl6 &= ~AR_PadDelim;
417 static void ar9002_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
419 struct ar5416_desc *ads = AR5416DESC(ds);
421 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
424 static void ar9002_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
427 struct ar5416_desc *ads = AR5416DESC(ds);
429 ads->ds_ctl2 &= ~AR_BurstDur;
430 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
433 static void ar9002_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
436 struct ar5416_desc *ads = AR5416DESC(ds);
439 ads->ds_ctl0 |= AR_VirtMoreFrag;
441 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
444 void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
446 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
448 ops->rx_enable = ar9002_hw_rx_enable;
449 ops->set_desc_link = ar9002_hw_set_desc_link;
450 ops->get_desc_link = ar9002_hw_get_desc_link;
451 ops->get_isr = ar9002_hw_get_isr;
452 ops->fill_txdesc = ar9002_hw_fill_txdesc;
453 ops->proc_txdesc = ar9002_hw_proc_txdesc;
454 ops->set11n_txdesc = ar9002_hw_set11n_txdesc;
455 ops->set11n_ratescenario = ar9002_hw_set11n_ratescenario;
456 ops->set11n_aggr_first = ar9002_hw_set11n_aggr_first;
457 ops->set11n_aggr_middle = ar9002_hw_set11n_aggr_middle;
458 ops->set11n_aggr_last = ar9002_hw_set11n_aggr_last;
459 ops->clr11n_aggr = ar9002_hw_clr11n_aggr;
460 ops->set11n_burstduration = ar9002_hw_set11n_burstduration;
461 ops->set11n_virtualmorefrag = ar9002_hw_set11n_virtualmorefrag;
464 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
465 struct ath9k_tx_queue_info *qi)
467 ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
468 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
469 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
470 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
471 ah->txurn_interrupt_mask);
473 REG_WRITE(ah, AR_IMR_S0,
474 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
475 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
476 REG_WRITE(ah, AR_IMR_S1,
477 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
478 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
480 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
481 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
482 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
485 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
487 return REG_READ(ah, AR_QTXDP(q));
489 EXPORT_SYMBOL(ath9k_hw_gettxbuf);
491 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
493 REG_WRITE(ah, AR_QTXDP(q), txdp);
495 EXPORT_SYMBOL(ath9k_hw_puttxbuf);
497 void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
499 ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
500 "Enable TXE on queue: %u\n", q);
501 REG_WRITE(ah, AR_Q_TXE, 1 << q);
503 EXPORT_SYMBOL(ath9k_hw_txstart);
505 void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
507 struct ar5416_desc *ads = AR5416DESC(ds);
509 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
510 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
511 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
512 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
513 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
515 EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
517 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
521 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
524 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
530 EXPORT_SYMBOL(ath9k_hw_numtxpending);
533 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
535 * @ah: atheros hardware struct
536 * @bIncTrigLevel: whether or not the frame trigger level should be updated
538 * The frame trigger level specifies the minimum number of bytes,
539 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
540 * before the PCU will initiate sending the frame on the air. This can
541 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
542 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
545 * Caution must be taken to ensure to set the frame trigger level based
546 * on the DMA request size. For example if the DMA request size is set to
547 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
548 * there need to be enough space in the tx FIFO for the requested transfer
549 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
550 * the threshold to a value beyond 6, then the transmit will hang.
552 * Current dual stream devices have a PCU TX FIFO size of 8 KB.
553 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
554 * there is a hardware issue which forces us to use 2 KB instead so the
555 * frame trigger level must not exceed 2 KB for these chipsets.
557 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
559 u32 txcfg, curLevel, newLevel;
560 enum ath9k_int omask;
562 if (ah->tx_trig_level >= ah->config.max_txtrig_level)
565 omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);
567 txcfg = REG_READ(ah, AR_TXCFG);
568 curLevel = MS(txcfg, AR_FTRIG);
571 if (curLevel < ah->config.max_txtrig_level)
573 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
575 if (newLevel != curLevel)
576 REG_WRITE(ah, AR_TXCFG,
577 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
579 ath9k_hw_set_interrupts(ah, omask);
581 ah->tx_trig_level = newLevel;
583 return newLevel != curLevel;
585 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
587 bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
589 #define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
590 #define ATH9K_TIME_QUANTUM 100 /* usec */
591 struct ath_common *common = ath9k_hw_common(ah);
592 struct ath9k_hw_capabilities *pCap = &ah->caps;
593 struct ath9k_tx_queue_info *qi;
595 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
597 if (q >= pCap->total_queues) {
598 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
599 "invalid queue: %u\n", q);
604 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
605 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
606 "inactive queue: %u\n", q);
610 REG_WRITE(ah, AR_Q_TXD, 1 << q);
612 for (wait = wait_time; wait != 0; wait--) {
613 if (ath9k_hw_numtxpending(ah, q) == 0)
615 udelay(ATH9K_TIME_QUANTUM);
618 if (ath9k_hw_numtxpending(ah, q)) {
619 ath_print(common, ATH_DBG_QUEUE,
620 "%s: Num of pending TX Frames %d on Q %d\n",
621 __func__, ath9k_hw_numtxpending(ah, q), q);
623 for (j = 0; j < 2; j++) {
624 tsfLow = REG_READ(ah, AR_TSF_L32);
625 REG_WRITE(ah, AR_QUIET2,
626 SM(10, AR_QUIET2_QUIET_DUR));
627 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
628 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
629 REG_SET_BIT(ah, AR_TIMER_MODE,
632 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
635 ath_print(common, ATH_DBG_QUEUE,
636 "TSF has moved while trying to set "
637 "quiet time TSF: 0x%08x\n", tsfLow);
640 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
643 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
646 while (ath9k_hw_numtxpending(ah, q)) {
648 ath_print(common, ATH_DBG_FATAL,
649 "Failed to stop TX DMA in 100 "
650 "msec after killing last frame\n");
653 udelay(ATH9K_TIME_QUANTUM);
656 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
659 REG_WRITE(ah, AR_Q_TXD, 0);
662 #undef ATH9K_TX_STOP_DMA_TIMEOUT
663 #undef ATH9K_TIME_QUANTUM
665 EXPORT_SYMBOL(ath9k_hw_stoptxdma);
667 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
669 *txqs &= ah->intr_txqs;
670 ah->intr_txqs &= ~(*txqs);
672 EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
674 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
675 const struct ath9k_tx_queue_info *qinfo)
678 struct ath_common *common = ath9k_hw_common(ah);
679 struct ath9k_hw_capabilities *pCap = &ah->caps;
680 struct ath9k_tx_queue_info *qi;
682 if (q >= pCap->total_queues) {
683 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
684 "invalid queue: %u\n", q);
689 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
690 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
691 "inactive queue: %u\n", q);
695 ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
697 qi->tqi_ver = qinfo->tqi_ver;
698 qi->tqi_subtype = qinfo->tqi_subtype;
699 qi->tqi_qflags = qinfo->tqi_qflags;
700 qi->tqi_priority = qinfo->tqi_priority;
701 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
702 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
704 qi->tqi_aifs = INIT_AIFS;
705 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
706 cw = min(qinfo->tqi_cwmin, 1024U);
708 while (qi->tqi_cwmin < cw)
709 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
711 qi->tqi_cwmin = qinfo->tqi_cwmin;
712 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
713 cw = min(qinfo->tqi_cwmax, 1024U);
715 while (qi->tqi_cwmax < cw)
716 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
718 qi->tqi_cwmax = INIT_CWMAX;
720 if (qinfo->tqi_shretry != 0)
721 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
723 qi->tqi_shretry = INIT_SH_RETRY;
724 if (qinfo->tqi_lgretry != 0)
725 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
727 qi->tqi_lgretry = INIT_LG_RETRY;
728 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
729 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
730 qi->tqi_burstTime = qinfo->tqi_burstTime;
731 qi->tqi_readyTime = qinfo->tqi_readyTime;
733 switch (qinfo->tqi_subtype) {
735 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
736 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
744 EXPORT_SYMBOL(ath9k_hw_set_txq_props);
746 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
747 struct ath9k_tx_queue_info *qinfo)
749 struct ath_common *common = ath9k_hw_common(ah);
750 struct ath9k_hw_capabilities *pCap = &ah->caps;
751 struct ath9k_tx_queue_info *qi;
753 if (q >= pCap->total_queues) {
754 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
755 "invalid queue: %u\n", q);
760 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
761 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
762 "inactive queue: %u\n", q);
766 qinfo->tqi_qflags = qi->tqi_qflags;
767 qinfo->tqi_ver = qi->tqi_ver;
768 qinfo->tqi_subtype = qi->tqi_subtype;
769 qinfo->tqi_qflags = qi->tqi_qflags;
770 qinfo->tqi_priority = qi->tqi_priority;
771 qinfo->tqi_aifs = qi->tqi_aifs;
772 qinfo->tqi_cwmin = qi->tqi_cwmin;
773 qinfo->tqi_cwmax = qi->tqi_cwmax;
774 qinfo->tqi_shretry = qi->tqi_shretry;
775 qinfo->tqi_lgretry = qi->tqi_lgretry;
776 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
777 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
778 qinfo->tqi_burstTime = qi->tqi_burstTime;
779 qinfo->tqi_readyTime = qi->tqi_readyTime;
783 EXPORT_SYMBOL(ath9k_hw_get_txq_props);
785 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
786 const struct ath9k_tx_queue_info *qinfo)
788 struct ath_common *common = ath9k_hw_common(ah);
789 struct ath9k_tx_queue_info *qi;
790 struct ath9k_hw_capabilities *pCap = &ah->caps;
794 case ATH9K_TX_QUEUE_BEACON:
795 q = pCap->total_queues - 1;
797 case ATH9K_TX_QUEUE_CAB:
798 q = pCap->total_queues - 2;
800 case ATH9K_TX_QUEUE_PSPOLL:
803 case ATH9K_TX_QUEUE_UAPSD:
804 q = pCap->total_queues - 3;
806 case ATH9K_TX_QUEUE_DATA:
807 for (q = 0; q < pCap->total_queues; q++)
808 if (ah->txq[q].tqi_type ==
809 ATH9K_TX_QUEUE_INACTIVE)
811 if (q == pCap->total_queues) {
812 ath_print(common, ATH_DBG_FATAL,
813 "No available TX queue\n");
818 ath_print(common, ATH_DBG_FATAL,
819 "Invalid TX queue type: %u\n", type);
823 ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
826 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
827 ath_print(common, ATH_DBG_FATAL,
828 "TX queue: %u already active\n", q);
831 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
835 TXQ_FLAG_TXOKINT_ENABLE
836 | TXQ_FLAG_TXERRINT_ENABLE
837 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
838 qi->tqi_aifs = INIT_AIFS;
839 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
840 qi->tqi_cwmax = INIT_CWMAX;
841 qi->tqi_shretry = INIT_SH_RETRY;
842 qi->tqi_lgretry = INIT_LG_RETRY;
843 qi->tqi_physCompBuf = 0;
845 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
846 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
851 EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
853 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
855 struct ath9k_hw_capabilities *pCap = &ah->caps;
856 struct ath_common *common = ath9k_hw_common(ah);
857 struct ath9k_tx_queue_info *qi;
859 if (q >= pCap->total_queues) {
860 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
861 "invalid queue: %u\n", q);
865 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
866 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
867 "inactive queue: %u\n", q);
871 ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
873 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
874 ah->txok_interrupt_mask &= ~(1 << q);
875 ah->txerr_interrupt_mask &= ~(1 << q);
876 ah->txdesc_interrupt_mask &= ~(1 << q);
877 ah->txeol_interrupt_mask &= ~(1 << q);
878 ah->txurn_interrupt_mask &= ~(1 << q);
879 ath9k_hw_set_txq_interrupts(ah, qi);
883 EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
885 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
887 struct ath9k_hw_capabilities *pCap = &ah->caps;
888 struct ath_common *common = ath9k_hw_common(ah);
889 struct ath9k_channel *chan = ah->curchan;
890 struct ath9k_tx_queue_info *qi;
891 u32 cwMin, chanCwMin, value;
893 if (q >= pCap->total_queues) {
894 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
895 "invalid queue: %u\n", q);
900 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
901 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
902 "inactive queue: %u\n", q);
906 ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
908 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
909 if (chan && IS_CHAN_B(chan))
910 chanCwMin = INIT_CWMIN_11B;
912 chanCwMin = INIT_CWMIN;
914 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
916 cwMin = qi->tqi_cwmin;
918 REG_WRITE(ah, AR_DLCL_IFS(q),
919 SM(cwMin, AR_D_LCL_IFS_CWMIN) |
920 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
921 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
923 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
924 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
925 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
926 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
928 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
929 REG_WRITE(ah, AR_DMISC(q),
930 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
932 if (qi->tqi_cbrPeriod) {
933 REG_WRITE(ah, AR_QCBRCFG(q),
934 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
935 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
936 REG_WRITE(ah, AR_QMISC(q),
937 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
938 (qi->tqi_cbrOverflowLimit ?
939 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
941 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
942 REG_WRITE(ah, AR_QRDYTIMECFG(q),
943 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
947 REG_WRITE(ah, AR_DCHNTIME(q),
948 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
949 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
951 if (qi->tqi_burstTime
952 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
953 REG_WRITE(ah, AR_QMISC(q),
954 REG_READ(ah, AR_QMISC(q)) |
955 AR_Q_MISC_RDYTIME_EXP_POLICY);
959 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
960 REG_WRITE(ah, AR_DMISC(q),
961 REG_READ(ah, AR_DMISC(q)) |
962 AR_D_MISC_POST_FR_BKOFF_DIS);
964 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
965 REG_WRITE(ah, AR_DMISC(q),
966 REG_READ(ah, AR_DMISC(q)) |
967 AR_D_MISC_FRAG_BKOFF_EN);
969 switch (qi->tqi_type) {
970 case ATH9K_TX_QUEUE_BEACON:
971 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
972 | AR_Q_MISC_FSP_DBA_GATED
973 | AR_Q_MISC_BEACON_USE
974 | AR_Q_MISC_CBR_INCR_DIS1);
976 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
977 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
978 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
979 | AR_D_MISC_BEACON_USE
980 | AR_D_MISC_POST_FR_BKOFF_DIS);
982 case ATH9K_TX_QUEUE_CAB:
983 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
984 | AR_Q_MISC_FSP_DBA_GATED
985 | AR_Q_MISC_CBR_INCR_DIS1
986 | AR_Q_MISC_CBR_INCR_DIS0);
987 value = (qi->tqi_readyTime -
988 (ah->config.sw_beacon_response_time -
989 ah->config.dma_beacon_response_time) -
990 ah->config.additional_swba_backoff) * 1024;
991 REG_WRITE(ah, AR_QRDYTIMECFG(q),
992 value | AR_Q_RDYTIMECFG_EN);
993 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
994 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
995 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
997 case ATH9K_TX_QUEUE_PSPOLL:
998 REG_WRITE(ah, AR_QMISC(q),
999 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
1001 case ATH9K_TX_QUEUE_UAPSD:
1002 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
1003 AR_D_MISC_POST_FR_BKOFF_DIS);
1009 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
1010 REG_WRITE(ah, AR_DMISC(q),
1011 REG_READ(ah, AR_DMISC(q)) |
1012 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1013 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
1014 AR_D_MISC_POST_FR_BKOFF_DIS);
1017 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
1018 ah->txok_interrupt_mask |= 1 << q;
1020 ah->txok_interrupt_mask &= ~(1 << q);
1021 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
1022 ah->txerr_interrupt_mask |= 1 << q;
1024 ah->txerr_interrupt_mask &= ~(1 << q);
1025 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
1026 ah->txdesc_interrupt_mask |= 1 << q;
1028 ah->txdesc_interrupt_mask &= ~(1 << q);
1029 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
1030 ah->txeol_interrupt_mask |= 1 << q;
1032 ah->txeol_interrupt_mask &= ~(1 << q);
1033 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
1034 ah->txurn_interrupt_mask |= 1 << q;
1036 ah->txurn_interrupt_mask &= ~(1 << q);
1037 ath9k_hw_set_txq_interrupts(ah, qi);
1041 EXPORT_SYMBOL(ath9k_hw_resettxqueue);
1043 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
1044 struct ath_rx_status *rs, u64 tsf)
1046 struct ar5416_desc ads;
1047 struct ar5416_desc *adsp = AR5416DESC(ds);
1050 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
1051 return -EINPROGRESS;
1053 ads.u.rx = adsp->u.rx;
1058 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
1059 rs->rs_tstamp = ads.AR_RcvTimestamp;
1061 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
1062 rs->rs_rssi = ATH9K_RSSI_BAD;
1063 rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
1064 rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
1065 rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
1066 rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
1067 rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
1068 rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
1070 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
1071 rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
1073 rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
1075 rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
1077 rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
1079 rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
1081 rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
1084 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
1085 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
1087 rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
1089 rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
1090 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
1092 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
1094 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
1095 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
1097 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
1099 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
1101 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
1102 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
1103 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
1104 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
1105 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
1106 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
1108 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
1109 if (ads.ds_rxstatus8 & AR_CRCErr)
1110 rs->rs_status |= ATH9K_RXERR_CRC;
1111 else if (ads.ds_rxstatus8 & AR_PHYErr) {
1112 rs->rs_status |= ATH9K_RXERR_PHY;
1113 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
1114 rs->rs_phyerr = phyerr;
1115 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
1116 rs->rs_status |= ATH9K_RXERR_DECRYPT;
1117 else if (ads.ds_rxstatus8 & AR_MichaelErr)
1118 rs->rs_status |= ATH9K_RXERR_MIC;
1123 EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
1125 void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
1126 u32 size, u32 flags)
1128 struct ar5416_desc *ads = AR5416DESC(ds);
1129 struct ath9k_hw_capabilities *pCap = &ah->caps;
1131 ads->ds_ctl1 = size & AR_BufLen;
1132 if (flags & ATH9K_RXDESC_INTREQ)
1133 ads->ds_ctl1 |= AR_RxIntrReq;
1135 ads->ds_rxstatus8 &= ~AR_RxDone;
1136 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
1137 memset(&(ads->u), 0, sizeof(ads->u));
1139 EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
1142 * This can stop or re-enables RX.
1144 * If bool is set this will kill any frame which is currently being
1145 * transferred between the MAC and baseband and also prevent any new
1146 * frames from getting started.
1148 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
1153 REG_SET_BIT(ah, AR_DIAG_SW,
1154 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1156 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
1157 0, AH_WAIT_TIMEOUT)) {
1158 REG_CLR_BIT(ah, AR_DIAG_SW,
1162 reg = REG_READ(ah, AR_OBS_BUS_1);
1163 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1164 "RX failed to go idle in 10 ms RXSM=0x%x\n",
1170 REG_CLR_BIT(ah, AR_DIAG_SW,
1171 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1176 EXPORT_SYMBOL(ath9k_hw_setrxabort);
1178 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
1180 REG_WRITE(ah, AR_RXDP, rxdp);
1182 EXPORT_SYMBOL(ath9k_hw_putrxbuf);
1184 void ath9k_hw_startpcureceive(struct ath_hw *ah)
1186 ath9k_enable_mib_counters(ah);
1188 ath9k_ani_reset(ah);
1190 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1192 EXPORT_SYMBOL(ath9k_hw_startpcureceive);
1194 void ath9k_hw_stoppcurecv(struct ath_hw *ah)
1196 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
1198 ath9k_hw_disable_mib_counters(ah);
1200 EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
1202 bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
1204 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
1205 #define AH_RX_TIME_QUANTUM 100 /* usec */
1206 struct ath_common *common = ath9k_hw_common(ah);
1209 REG_WRITE(ah, AR_CR, AR_CR_RXD);
1211 /* Wait for rx enable bit to go low */
1212 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
1213 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
1215 udelay(AH_TIME_QUANTUM);
1219 ath_print(common, ATH_DBG_FATAL,
1220 "DMA failed to stop in %d ms "
1221 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
1222 AH_RX_STOP_DMA_TIMEOUT / 1000,
1223 REG_READ(ah, AR_CR),
1224 REG_READ(ah, AR_DIAG_SW));
1230 #undef AH_RX_TIME_QUANTUM
1231 #undef AH_RX_STOP_DMA_TIMEOUT
1233 EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
1235 int ath9k_hw_beaconq_setup(struct ath_hw *ah)
1237 struct ath9k_tx_queue_info qi;
1239 memset(&qi, 0, sizeof(qi));
1243 /* NB: don't enable any interrupts */
1244 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
1246 EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
1248 bool ath9k_hw_intrpend(struct ath_hw *ah)
1252 if (AR_SREV_9100(ah))
1255 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
1256 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
1259 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
1260 if ((host_isr & AR_INTR_SYNC_DEFAULT)
1261 && (host_isr != AR_INTR_SPURIOUS))
1266 EXPORT_SYMBOL(ath9k_hw_intrpend);
1268 enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
1269 enum ath9k_int ints)
1271 enum ath9k_int omask = ah->imask;
1273 struct ath9k_hw_capabilities *pCap = &ah->caps;
1274 struct ath_common *common = ath9k_hw_common(ah);
1276 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
1278 if (omask & ATH9K_INT_GLOBAL) {
1279 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
1280 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
1281 (void) REG_READ(ah, AR_IER);
1282 if (!AR_SREV_9100(ah)) {
1283 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
1284 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
1286 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
1287 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
1291 /* TODO: global int Ref count */
1292 mask = ints & ATH9K_INT_COMMON;
1295 if (ints & ATH9K_INT_TX) {
1296 if (ah->config.tx_intr_mitigation)
1297 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
1298 if (ah->txok_interrupt_mask)
1299 mask |= AR_IMR_TXOK;
1300 if (ah->txdesc_interrupt_mask)
1301 mask |= AR_IMR_TXDESC;
1302 if (ah->txerr_interrupt_mask)
1303 mask |= AR_IMR_TXERR;
1304 if (ah->txeol_interrupt_mask)
1305 mask |= AR_IMR_TXEOL;
1307 if (ints & ATH9K_INT_RX) {
1308 if (AR_SREV_9300_20_OR_LATER(ah)) {
1309 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
1310 if (ah->config.rx_intr_mitigation) {
1311 mask &= ~AR_IMR_RXOK_LP;
1312 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
1314 mask |= AR_IMR_RXOK_LP;
1317 if (ah->config.rx_intr_mitigation)
1318 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
1320 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
1322 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
1323 mask |= AR_IMR_GENTMR;
1326 if (ints & (ATH9K_INT_BMISC)) {
1327 mask |= AR_IMR_BCNMISC;
1328 if (ints & ATH9K_INT_TIM)
1329 mask2 |= AR_IMR_S2_TIM;
1330 if (ints & ATH9K_INT_DTIM)
1331 mask2 |= AR_IMR_S2_DTIM;
1332 if (ints & ATH9K_INT_DTIMSYNC)
1333 mask2 |= AR_IMR_S2_DTIMSYNC;
1334 if (ints & ATH9K_INT_CABEND)
1335 mask2 |= AR_IMR_S2_CABEND;
1336 if (ints & ATH9K_INT_TSFOOR)
1337 mask2 |= AR_IMR_S2_TSFOOR;
1340 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
1341 mask |= AR_IMR_BCNMISC;
1342 if (ints & ATH9K_INT_GTT)
1343 mask2 |= AR_IMR_S2_GTT;
1344 if (ints & ATH9K_INT_CST)
1345 mask2 |= AR_IMR_S2_CST;
1348 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
1349 REG_WRITE(ah, AR_IMR, mask);
1350 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
1351 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
1352 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
1353 ah->imrs2_reg |= mask2;
1354 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
1356 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1357 if (ints & ATH9K_INT_TIM_TIMER)
1358 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
1360 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
1363 if (ints & ATH9K_INT_GLOBAL) {
1364 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
1365 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
1366 if (!AR_SREV_9100(ah)) {
1367 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
1369 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
1372 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
1373 AR_INTR_SYNC_DEFAULT);
1374 REG_WRITE(ah, AR_INTR_SYNC_MASK,
1375 AR_INTR_SYNC_DEFAULT);
1377 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
1378 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
1383 EXPORT_SYMBOL(ath9k_hw_set_interrupts);