2 * Copyright (c) 2008 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #define BITS_PER_BYTE 8
20 #define OFDM_PLCP_BITS 22
21 #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
22 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
28 #define HT_LTF(_ns) (4 * (_ns))
29 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34 #define OFDM_SIFS_TIME 16
36 static u32 bits_per_symbol[][2] = {
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
46 { 52, 108 }, /* 8: BPSK */
47 { 104, 216 }, /* 9: QPSK 1/2 */
48 { 156, 324 }, /* 10: QPSK 3/4 */
49 { 208, 432 }, /* 11: 16-QAM 1/2 */
50 { 312, 648 }, /* 12: 16-QAM 3/4 */
51 { 416, 864 }, /* 13: 64-QAM 2/3 */
52 { 468, 972 }, /* 14: 64-QAM 3/4 */
53 { 520, 1080 }, /* 15: 64-QAM 5/6 */
56 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
59 * Insert a chain of ath_buf (descriptors) on a txq and
60 * assume the descriptors are already chained together by caller.
61 * NB: must be called with txq lock held
64 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct list_head *head)
67 struct ath_hal *ah = sc->sc_ah;
71 * Insert the frame on the outbound list and
72 * pass it on to the hardware.
78 bf = list_first_entry(head, struct ath_buf, list);
80 list_splice_tail_init(head, &txq->axq_q);
82 txq->axq_totalqueued++;
83 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
85 DPRINTF(sc, ATH_DBG_QUEUE,
86 "%s: txq depth = %d\n", __func__, txq->axq_depth);
88 if (txq->axq_link == NULL) {
89 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
90 DPRINTF(sc, ATH_DBG_XMIT,
91 "%s: TXDP[%u] = %llx (%p)\n",
92 __func__, txq->axq_qnum,
93 ito64(bf->bf_daddr), bf->bf_desc);
95 *txq->axq_link = bf->bf_daddr;
96 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
98 txq->axq_qnum, txq->axq_link,
99 ito64(bf->bf_daddr), bf->bf_desc);
101 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
102 ath9k_hw_txstart(ah, txq->axq_qnum);
105 /* Check if it's okay to send out aggregates */
107 static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
109 struct ath_atx_tid *tid;
110 tid = ATH_AN_2_TID(an, tidno);
112 if (tid->state & AGGR_ADDBA_COMPLETE ||
113 tid->state & AGGR_ADDBA_PROGRESS)
119 /* Calculate Atheros packet type from IEEE80211 packet header */
121 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
123 struct ieee80211_hdr *hdr;
124 enum ath9k_pkt_type htype;
127 hdr = (struct ieee80211_hdr *)skb->data;
128 fc = hdr->frame_control;
130 if (ieee80211_is_beacon(fc))
131 htype = ATH9K_PKT_TYPE_BEACON;
132 else if (ieee80211_is_probe_resp(fc))
133 htype = ATH9K_PKT_TYPE_PROBE_RESP;
134 else if (ieee80211_is_atim(fc))
135 htype = ATH9K_PKT_TYPE_ATIM;
136 else if (ieee80211_is_pspoll(fc))
137 htype = ATH9K_PKT_TYPE_PSPOLL;
139 htype = ATH9K_PKT_TYPE_NORMAL;
144 static bool is_pae(struct sk_buff *skb)
146 struct ieee80211_hdr *hdr;
149 hdr = (struct ieee80211_hdr *)skb->data;
150 fc = hdr->frame_control;
152 if (ieee80211_is_data(fc)) {
153 if (ieee80211_is_nullfunc(fc) ||
154 /* Port Access Entity (IEEE 802.1X) */
155 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
163 static int get_hw_crypto_keytype(struct sk_buff *skb)
165 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
167 if (tx_info->control.hw_key) {
168 if (tx_info->control.hw_key->alg == ALG_WEP)
169 return ATH9K_KEY_TYPE_WEP;
170 else if (tx_info->control.hw_key->alg == ALG_TKIP)
171 return ATH9K_KEY_TYPE_TKIP;
172 else if (tx_info->control.hw_key->alg == ALG_CCMP)
173 return ATH9K_KEY_TYPE_AES;
176 return ATH9K_KEY_TYPE_CLEAR;
179 static void setup_rate_retries(struct ath_softc *sc, struct sk_buff *skb)
181 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
182 struct ieee80211_tx_rate *rates = tx_info->control.rates;
183 struct ieee80211_hdr *hdr;
186 hdr = (struct ieee80211_hdr *)skb->data;
187 fc = hdr->frame_control;
189 if (ieee80211_has_morefrags(fc) ||
190 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
191 rates[1].count = rates[2].count = rates[3].count = 0;
192 rates[1].idx = rates[2].idx = rates[3].idx = 0;
193 /* reset tries but keep rate index */
194 rates[0].count = ATH_TXMAXTRY;
198 /* Called only when tx aggregation is enabled and HT is supported */
200 static void assign_aggr_tid_seqno(struct sk_buff *skb,
203 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
204 struct ieee80211_hdr *hdr;
206 struct ath_atx_tid *tid;
210 if (!tx_info->control.sta)
213 an = (struct ath_node *)tx_info->control.sta->drv_priv;
214 hdr = (struct ieee80211_hdr *)skb->data;
215 fc = hdr->frame_control;
219 if (ieee80211_is_data_qos(fc)) {
220 qc = ieee80211_get_qos_ctl(hdr);
221 bf->bf_tidno = qc[0] & 0xf;
226 if (ieee80211_is_data(fc) && !is_pae(skb)) {
227 /* For HT capable stations, we save tidno for later use.
228 * We also override seqno set by upper layer with the one
229 * in tx aggregation state.
231 * If fragmentation is on, the sequence number is
232 * not overridden, since it has been
233 * incremented by the fragmentation routine.
235 * FIXME: check if the fragmentation threshold exceeds
238 tid = ATH_AN_2_TID(an, bf->bf_tidno);
239 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
240 IEEE80211_SEQ_SEQ_SHIFT);
241 bf->bf_seqno = tid->seq_next;
242 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
246 static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
249 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
252 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
253 flags |= ATH9K_TXDESC_INTREQ;
255 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
256 flags |= ATH9K_TXDESC_NOACK;
257 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
258 flags |= ATH9K_TXDESC_RTSENA;
263 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
265 struct ath_buf *bf = NULL;
267 spin_lock_bh(&sc->sc_txbuflock);
269 if (unlikely(list_empty(&sc->sc_txbuf))) {
270 spin_unlock_bh(&sc->sc_txbuflock);
274 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
277 spin_unlock_bh(&sc->sc_txbuflock);
282 /* To complete a chain of buffers associated a frame */
284 static void ath_tx_complete_buf(struct ath_softc *sc,
286 struct list_head *bf_q,
287 int txok, int sendbar)
289 struct sk_buff *skb = bf->bf_mpdu;
290 struct ath_xmit_status tx_status;
293 * Set retry information.
294 * NB: Don't use the information in the descriptor, because the frame
295 * could be software retried.
297 tx_status.retries = bf->bf_retries;
301 tx_status.flags = ATH_TX_BAR;
304 tx_status.flags |= ATH_TX_ERROR;
306 if (bf_isxretried(bf))
307 tx_status.flags |= ATH_TX_XRETRY;
310 /* Unmap this frame */
311 pci_unmap_single(sc->pdev,
315 /* complete this frame */
316 ath_tx_complete(sc, skb, &tx_status);
319 * Return the list of ath_buf of this mpdu to free queue
321 spin_lock_bh(&sc->sc_txbuflock);
322 list_splice_tail_init(bf_q, &sc->sc_txbuf);
323 spin_unlock_bh(&sc->sc_txbuflock);
327 * queue up a dest/ac pair for tx scheduling
328 * NB: must be called with txq lock held
331 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
333 struct ath_atx_ac *ac = tid->ac;
336 * if tid is paused, hold off
342 * add tid to ac atmost once
348 list_add_tail(&tid->list, &ac->tid_q);
351 * add node ac to txq atmost once
357 list_add_tail(&ac->list, &txq->axq_acq);
362 static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
364 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
366 spin_lock_bh(&txq->axq_lock);
370 spin_unlock_bh(&txq->axq_lock);
373 /* resume a tid and schedule aggregate */
375 void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
377 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
379 ASSERT(tid->paused > 0);
380 spin_lock_bh(&txq->axq_lock);
387 if (list_empty(&tid->buf_q))
391 * Add this TID to scheduler and try to send out aggregates
393 ath_tx_queue_tid(txq, tid);
394 ath_txq_schedule(sc, txq);
396 spin_unlock_bh(&txq->axq_lock);
399 /* Compute the number of bad frames */
401 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
404 struct ath_buf *bf_last = bf->bf_lastbf;
405 struct ath_desc *ds = bf_last->bf_desc;
407 u32 ba[WME_BA_BMP_SIZE >> 5];
412 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
415 isaggr = bf_isaggr(bf);
417 seq_st = ATH_DS_BA_SEQ(ds);
418 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
422 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
423 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
432 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
435 struct ieee80211_hdr *hdr;
437 bf->bf_state.bf_type |= BUF_RETRY;
441 hdr = (struct ieee80211_hdr *)skb->data;
442 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
445 /* Update block ack window */
447 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
452 index = ATH_BA_INDEX(tid->seq_start, seqno);
453 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
455 tid->tx_buf[cindex] = NULL;
457 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
458 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
459 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
464 * ath_pkt_dur - compute packet duration (NB: not NAV)
467 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
468 * width - 0 for 20 MHz, 1 for 40 MHz
469 * half_gi - to use 4us v/s 3.6 us for symbol time
472 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
473 int width, int half_gi, bool shortPreamble)
475 const struct ath9k_rate_table *rt = sc->sc_currates;
476 u32 nbits, nsymbits, duration, nsymbols;
480 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
481 rc = rt->info[rix].rateCode;
484 * for legacy rates, use old function to compute packet duration
487 return ath9k_hw_computetxtime(sc->sc_ah, rt, pktlen, rix,
490 * find number of symbols: PLCP + data
492 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
493 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
494 nsymbols = (nbits + nsymbits - 1) / nsymbits;
497 duration = SYMBOL_TIME(nsymbols);
499 duration = SYMBOL_TIME_HALFGI(nsymbols);
502 * addup duration for legacy/ht training and signal fields
504 streams = HT_RC_2_STREAMS(rc);
505 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
510 /* Rate module function to set rate related fields in tx descriptor */
512 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
514 struct ath_hal *ah = sc->sc_ah;
515 const struct ath9k_rate_table *rt;
516 struct ath_desc *ds = bf->bf_desc;
517 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
518 struct ath9k_11n_rate_series series[4];
519 int i, flags, rtsctsena = 0;
521 u8 rix = 0, cix, ctsrate = 0;
522 struct ath_node *an = NULL;
524 struct ieee80211_tx_info *tx_info;
525 struct ieee80211_tx_rate *rates;
527 skb = (struct sk_buff *)bf->bf_mpdu;
528 tx_info = IEEE80211_SKB_CB(skb);
529 rates = tx_info->rate_driver_data[0];
531 if (tx_info->control.sta)
532 an = (struct ath_node *)tx_info->control.sta->drv_priv;
535 * get the cix for the lowest valid rix.
537 rt = sc->sc_currates;
538 for (i = 3; i >= 0; i--) {
539 if (rates[i].count) {
544 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
545 cix = rt->info[rix].controlRate;
548 * If 802.11g protection is enabled, determine whether
549 * to use RTS/CTS or just CTS. Note that this is only
550 * done for OFDM/HT unicast frames.
552 if (sc->sc_protmode != PROT_M_NONE &&
553 (rt->info[rix].phy == PHY_OFDM ||
554 rt->info[rix].phy == PHY_HT) &&
555 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
556 if (sc->sc_protmode == PROT_M_RTSCTS)
557 flags = ATH9K_TXDESC_RTSENA;
558 else if (sc->sc_protmode == PROT_M_CTSONLY)
559 flags = ATH9K_TXDESC_CTSENA;
561 cix = rt->info[sc->sc_protrix].controlRate;
565 /* For 11n, the default behavior is to enable RTS for
566 * hw retried frames. We enable the global flag here and
567 * let rate series flags determine which rates will actually
570 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
572 * 802.11g protection not needed, use our default behavior
575 flags = ATH9K_TXDESC_RTSENA;
579 * Set protection if aggregate protection on
581 if (sc->sc_config.ath_aggr_prot &&
582 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
583 flags = ATH9K_TXDESC_RTSENA;
584 cix = rt->info[sc->sc_protrix].controlRate;
589 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
591 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit)) {
593 * Ensure that in the case of SM Dynamic power save
594 * while we are bursting the second aggregate the
597 flags &= ~(ATH9K_TXDESC_RTSENA);
601 * CTS transmit rate is derived from the transmit rate
602 * by looking in the h/w rate table. We must also factor
603 * in whether or not a short preamble is to be used.
604 * NB: cix is set above where RTS/CTS is enabled
607 ctsrate = rt->info[cix].rateCode |
608 (bf_isshpreamble(bf) ? rt->info[cix].shortPreamble : 0);
611 * Setup HAL rate series
613 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
615 for (i = 0; i < 4; i++) {
621 series[i].Rate = rt->info[rix].rateCode |
622 (bf_isshpreamble(bf) ? rt->info[rix].shortPreamble : 0);
624 series[i].Tries = rates[i].count;
626 series[i].RateFlags = (
627 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ?
628 ATH9K_RATESERIES_RTS_CTS : 0) |
629 ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ?
630 ATH9K_RATESERIES_2040 : 0) |
631 ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ?
632 ATH9K_RATESERIES_HALFGI : 0);
634 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
635 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
636 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
637 bf_isshpreamble(bf));
639 if (bf_isht(bf) && an)
640 series[i].ChSel = ath_chainmask_sel_logic(sc, an);
642 series[i].ChSel = sc->sc_tx_chainmask;
645 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
649 * set dur_update_en for l-sig computation except for PS-Poll frames
651 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
657 if (sc->sc_config.ath_aggr_prot && flags)
658 ath9k_hw_set11n_burstduration(ah, ds, 8192);
662 * Function to send a normal HT (non-AMPDU) frame
663 * NB: must be called with txq lock held
666 static int ath_tx_send_normal(struct ath_softc *sc,
668 struct ath_atx_tid *tid,
669 struct list_head *bf_head)
673 BUG_ON(list_empty(bf_head));
675 bf = list_first_entry(bf_head, struct ath_buf, list);
676 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
678 /* update starting sequence number for subsequent ADDBA request */
679 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
681 /* Queue to h/w without aggregation */
683 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
684 ath_buf_set_rate(sc, bf);
685 ath_tx_txqaddbuf(sc, txq, bf_head);
690 /* flush tid's software queue and send frames as non-ampdu's */
692 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
694 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
696 struct list_head bf_head;
697 INIT_LIST_HEAD(&bf_head);
699 ASSERT(tid->paused > 0);
700 spin_lock_bh(&txq->axq_lock);
704 if (tid->paused > 0) {
705 spin_unlock_bh(&txq->axq_lock);
709 while (!list_empty(&tid->buf_q)) {
710 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
711 ASSERT(!bf_isretried(bf));
712 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
713 ath_tx_send_normal(sc, txq, tid, &bf_head);
716 spin_unlock_bh(&txq->axq_lock);
719 /* Completion routine of an aggregate */
721 static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
724 struct list_head *bf_q,
727 struct ath_node *an = NULL;
729 struct ieee80211_tx_info *tx_info;
730 struct ath_atx_tid *tid = NULL;
731 struct ath_buf *bf_last = bf->bf_lastbf;
732 struct ath_desc *ds = bf_last->bf_desc;
733 struct ath_buf *bf_next, *bf_lastq = NULL;
734 struct list_head bf_head, bf_pending;
736 u32 ba[WME_BA_BMP_SIZE >> 5];
737 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
739 skb = (struct sk_buff *)bf->bf_mpdu;
740 tx_info = IEEE80211_SKB_CB(skb);
742 if (tx_info->control.sta) {
743 an = (struct ath_node *)tx_info->control.sta->drv_priv;
744 tid = ATH_AN_2_TID(an, bf->bf_tidno);
747 isaggr = bf_isaggr(bf);
750 if (ATH_DS_TX_BA(ds)) {
752 * extract starting sequence and
755 seq_st = ATH_DS_BA_SEQ(ds);
757 ATH_DS_BA_BITMAP(ds),
758 WME_BA_BMP_SIZE >> 3);
760 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
763 * AR5416 can become deaf/mute when BA
764 * issue happens. Chip needs to be reset.
765 * But AP code may have sychronization issues
766 * when perform internal reset in this routine.
767 * Only enable reset in STA mode for now.
769 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
773 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
777 INIT_LIST_HEAD(&bf_pending);
778 INIT_LIST_HEAD(&bf_head);
781 txfail = txpending = 0;
782 bf_next = bf->bf_next;
784 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
785 /* transmit completion, subframe is
786 * acked by block ack */
787 } else if (!isaggr && txok) {
788 /* transmit completion */
791 if (!(tid->state & AGGR_CLEANUP) &&
792 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
793 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
794 ath_tx_set_retry(sc, bf);
797 bf->bf_state.bf_type |= BUF_XRETRY;
803 * cleanup in progress, just fail
804 * the un-acked sub-frames
810 * Remove ath_buf's of this sub-frame from aggregate queue.
812 if (bf_next == NULL) { /* last subframe in the aggregate */
813 ASSERT(bf->bf_lastfrm == bf_last);
816 * The last descriptor of the last sub frame could be
817 * a holding descriptor for h/w. If that's the case,
818 * bf->bf_lastfrm won't be in the bf_q.
819 * Make sure we handle bf_q properly here.
822 if (!list_empty(bf_q)) {
823 bf_lastq = list_entry(bf_q->prev,
824 struct ath_buf, list);
825 list_cut_position(&bf_head,
826 bf_q, &bf_lastq->list);
829 * XXX: if the last subframe only has one
830 * descriptor which is also being used as
831 * a holding descriptor. Then the ath_buf
832 * is not in the bf_q at all.
834 INIT_LIST_HEAD(&bf_head);
837 ASSERT(!list_empty(bf_q));
838 list_cut_position(&bf_head,
839 bf_q, &bf->bf_lastfrm->list);
844 * complete the acked-ones/xretried ones; update
847 spin_lock_bh(&txq->axq_lock);
848 ath_tx_update_baw(sc, tid, bf->bf_seqno);
849 spin_unlock_bh(&txq->axq_lock);
851 /* complete this sub-frame */
852 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
855 * retry the un-acked ones
858 * XXX: if the last descriptor is holding descriptor,
859 * in order to requeue the frame to software queue, we
860 * need to allocate a new descriptor and
861 * copy the content of holding descriptor to it.
863 if (bf->bf_next == NULL &&
864 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
867 /* allocate new descriptor */
868 spin_lock_bh(&sc->sc_txbuflock);
869 ASSERT(!list_empty((&sc->sc_txbuf)));
870 tbf = list_first_entry(&sc->sc_txbuf,
871 struct ath_buf, list);
872 list_del(&tbf->list);
873 spin_unlock_bh(&sc->sc_txbuflock);
875 ATH_TXBUF_RESET(tbf);
877 /* copy descriptor content */
878 tbf->bf_mpdu = bf_last->bf_mpdu;
879 tbf->bf_buf_addr = bf_last->bf_buf_addr;
880 *(tbf->bf_desc) = *(bf_last->bf_desc);
882 /* link it to the frame */
884 bf_lastq->bf_desc->ds_link =
886 bf->bf_lastfrm = tbf;
887 ath9k_hw_cleartxdesc(sc->sc_ah,
888 bf->bf_lastfrm->bf_desc);
890 tbf->bf_state = bf_last->bf_state;
891 tbf->bf_lastfrm = tbf;
892 ath9k_hw_cleartxdesc(sc->sc_ah,
893 tbf->bf_lastfrm->bf_desc);
895 /* copy the DMA context */
897 bf_last->bf_dmacontext;
899 list_add_tail(&tbf->list, &bf_head);
902 * Clear descriptor status words for
905 ath9k_hw_cleartxdesc(sc->sc_ah,
906 bf->bf_lastfrm->bf_desc);
910 * Put this buffer to the temporary pending
911 * queue to retain ordering
913 list_splice_tail_init(&bf_head, &bf_pending);
919 if (tid->state & AGGR_CLEANUP) {
920 /* check to see if we're done with cleaning the h/w queue */
921 spin_lock_bh(&txq->axq_lock);
923 if (tid->baw_head == tid->baw_tail) {
924 tid->state &= ~AGGR_ADDBA_COMPLETE;
925 tid->addba_exchangeattempts = 0;
926 spin_unlock_bh(&txq->axq_lock);
928 tid->state &= ~AGGR_CLEANUP;
930 /* send buffered frames as singles */
931 ath_tx_flush_tid(sc, tid);
933 spin_unlock_bh(&txq->axq_lock);
939 * prepend un-acked frames to the beginning of the pending frame queue
941 if (!list_empty(&bf_pending)) {
942 spin_lock_bh(&txq->axq_lock);
943 /* Note: we _prepend_, we _do_not_ at to
944 * the end of the queue ! */
945 list_splice(&bf_pending, &tid->buf_q);
946 ath_tx_queue_tid(txq, tid);
947 spin_unlock_bh(&txq->axq_lock);
951 ath_reset(sc, false);
956 /* Process completed xmit descriptors from the specified queue */
958 static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
960 struct ath_hal *ah = sc->sc_ah;
961 struct ath_buf *bf, *lastbf, *bf_held = NULL;
962 struct list_head bf_head;
963 struct ath_desc *ds, *tmp_ds;
965 struct ieee80211_tx_info *tx_info;
966 struct ath_tx_info_priv *tx_info_priv;
967 int nacked, txok, nbad = 0, isrifs = 0;
970 DPRINTF(sc, ATH_DBG_QUEUE,
971 "%s: tx queue %d (%x), link %p\n", __func__,
972 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
977 spin_lock_bh(&txq->axq_lock);
978 if (list_empty(&txq->axq_q)) {
979 txq->axq_link = NULL;
980 txq->axq_linkbuf = NULL;
981 spin_unlock_bh(&txq->axq_lock);
984 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
987 * There is a race condition that a BH gets scheduled
988 * after sw writes TxE and before hw re-load the last
989 * descriptor to get the newly chained one.
990 * Software must keep the last DONE descriptor as a
991 * holding descriptor - software does so by marking
992 * it with the STALE flag.
995 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
997 if (list_is_last(&bf_held->list, &txq->axq_q)) {
999 * The holding descriptor is the last
1000 * descriptor in queue. It's safe to remove
1001 * the last holding descriptor in BH context.
1003 spin_unlock_bh(&txq->axq_lock);
1006 /* Lets work with the next buffer now */
1007 bf = list_entry(bf_held->list.next,
1008 struct ath_buf, list);
1012 lastbf = bf->bf_lastbf;
1013 ds = lastbf->bf_desc; /* NB: last decriptor */
1015 status = ath9k_hw_txprocdesc(ah, ds);
1016 if (status == -EINPROGRESS) {
1017 spin_unlock_bh(&txq->axq_lock);
1020 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1021 txq->axq_lastdsWithCTS = NULL;
1022 if (ds == txq->axq_gatingds)
1023 txq->axq_gatingds = NULL;
1026 * Remove ath_buf's of the same transmit unit from txq,
1027 * however leave the last descriptor back as the holding
1028 * descriptor for hw.
1030 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1031 INIT_LIST_HEAD(&bf_head);
1033 if (!list_is_singular(&lastbf->list))
1034 list_cut_position(&bf_head,
1035 &txq->axq_q, lastbf->list.prev);
1040 txq->axq_aggr_depth--;
1042 txok = (ds->ds_txstat.ts_status == 0);
1044 spin_unlock_bh(&txq->axq_lock);
1047 list_del(&bf_held->list);
1048 spin_lock_bh(&sc->sc_txbuflock);
1049 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1050 spin_unlock_bh(&sc->sc_txbuflock);
1053 if (!bf_isampdu(bf)) {
1055 * This frame is sent out as a single frame.
1056 * Use hardware retry status for this frame.
1058 bf->bf_retries = ds->ds_txstat.ts_longretry;
1059 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1060 bf->bf_state.bf_type |= BUF_XRETRY;
1063 nbad = ath_tx_num_badfrms(sc, bf, txok);
1066 tx_info = IEEE80211_SKB_CB(skb);
1069 (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
1070 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1071 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1072 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1073 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
1074 if (ds->ds_txstat.ts_status == 0)
1077 if (bf_isdata(bf)) {
1079 tmp_ds = bf->bf_rifslast->bf_desc;
1082 memcpy(&tx_info_priv->tx,
1084 sizeof(tx_info_priv->tx));
1085 tx_info_priv->n_frames = bf->bf_nframes;
1086 tx_info_priv->n_bad_frames = nbad;
1091 * Complete this transmit unit
1094 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1096 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1098 /* Wake up mac80211 queue */
1100 spin_lock_bh(&txq->axq_lock);
1101 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1104 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1106 ieee80211_wake_queue(sc->hw, qnum);
1113 * schedule any pending packets if aggregation is enabled
1115 if (sc->sc_flags & SC_OP_TXAGGR)
1116 ath_txq_schedule(sc, txq);
1117 spin_unlock_bh(&txq->axq_lock);
1122 static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1124 struct ath_hal *ah = sc->sc_ah;
1126 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1127 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
1128 __func__, txq->axq_qnum,
1129 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
1132 /* Drain only the data queues */
1134 static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1136 struct ath_hal *ah = sc->sc_ah;
1137 int i, status, npend = 0;
1139 if (!(sc->sc_flags & SC_OP_INVALID)) {
1140 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1141 if (ATH_TXQ_SETUP(sc, i)) {
1142 ath_tx_stopdma(sc, &sc->sc_txq[i]);
1143 /* The TxDMA may not really be stopped.
1144 * Double check the hal tx pending count */
1145 npend += ath9k_hw_numtxpending(ah,
1146 sc->sc_txq[i].axq_qnum);
1152 /* TxDMA not stopped, reset the hal */
1153 DPRINTF(sc, ATH_DBG_XMIT,
1154 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1156 spin_lock_bh(&sc->sc_resetlock);
1157 if (!ath9k_hw_reset(ah,
1158 sc->sc_ah->ah_curchan,
1159 sc->sc_ht_info.tx_chan_width,
1160 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1161 sc->sc_ht_extprotspacing, true, &status)) {
1163 DPRINTF(sc, ATH_DBG_FATAL,
1164 "%s: unable to reset hardware; hal status %u\n",
1168 spin_unlock_bh(&sc->sc_resetlock);
1171 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1172 if (ATH_TXQ_SETUP(sc, i))
1173 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1177 /* Add a sub-frame to block ack window */
1179 static void ath_tx_addto_baw(struct ath_softc *sc,
1180 struct ath_atx_tid *tid,
1185 if (bf_isretried(bf))
1188 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1189 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1191 ASSERT(tid->tx_buf[cindex] == NULL);
1192 tid->tx_buf[cindex] = bf;
1194 if (index >= ((tid->baw_tail - tid->baw_head) &
1195 (ATH_TID_MAX_BUFS - 1))) {
1196 tid->baw_tail = cindex;
1197 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1202 * Function to send an A-MPDU
1203 * NB: must be called with txq lock held
1206 static int ath_tx_send_ampdu(struct ath_softc *sc,
1207 struct ath_atx_tid *tid,
1208 struct list_head *bf_head,
1209 struct ath_tx_control *txctl)
1213 BUG_ON(list_empty(bf_head));
1215 bf = list_first_entry(bf_head, struct ath_buf, list);
1216 bf->bf_state.bf_type |= BUF_AMPDU;
1219 * Do not queue to h/w when any of the following conditions is true:
1220 * - there are pending frames in software queue
1221 * - the TID is currently paused for ADDBA/BAR request
1222 * - seqno is not within block-ack window
1223 * - h/w queue depth exceeds low water mark
1225 if (!list_empty(&tid->buf_q) || tid->paused ||
1226 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1227 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1229 * Add this frame to software queue for scheduling later
1232 list_splice_tail_init(bf_head, &tid->buf_q);
1233 ath_tx_queue_tid(txctl->txq, tid);
1237 /* Add sub-frame to BAW */
1238 ath_tx_addto_baw(sc, tid, bf);
1240 /* Queue to h/w without aggregation */
1242 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1243 ath_buf_set_rate(sc, bf);
1244 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
1251 * returns aggr limit based on lowest of the rates
1254 static u32 ath_lookup_rate(struct ath_softc *sc,
1256 struct ath_atx_tid *tid)
1258 struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode];
1259 const struct ath9k_rate_table *rt = sc->sc_currates;
1260 struct sk_buff *skb;
1261 struct ieee80211_tx_info *tx_info;
1262 struct ieee80211_tx_rate *rates;
1263 struct ath_tx_info_priv *tx_info_priv;
1264 u32 max_4ms_framelen, frame_length;
1265 u16 aggr_limit, legacy = 0, maxampdu;
1268 skb = (struct sk_buff *)bf->bf_mpdu;
1269 tx_info = IEEE80211_SKB_CB(skb);
1270 rates = tx_info->control.rates;
1272 (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
1275 * Find the lowest frame length among the rate series that will have a
1276 * 4ms transmit duration.
1277 * TODO - TXOP limit needs to be considered.
1279 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1281 for (i = 0; i < 4; i++) {
1282 if (rates[i].count) {
1283 if (rt->info[rates[i].idx].phy != PHY_HT) {
1289 rate_table->info[rates[i].idx].max_4ms_framelen;
1290 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1295 * limit aggregate size by the minimum rate if rate selected is
1296 * not a probe rate, if rate selected is a probe rate then
1297 * avoid aggregation of this packet.
1299 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1302 aggr_limit = min(max_4ms_framelen,
1303 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1306 * h/w can accept aggregates upto 16 bit lengths (65535).
1307 * The IE, however can hold upto 65536, which shows up here
1308 * as zero. Ignore 65536 since we are constrained by hw.
1310 maxampdu = tid->an->maxampdu;
1312 aggr_limit = min(aggr_limit, maxampdu);
1318 * returns the number of delimiters to be added to
1319 * meet the minimum required mpdudensity.
1320 * caller should make sure that the rate is HT rate .
1323 static int ath_compute_num_delims(struct ath_softc *sc,
1324 struct ath_atx_tid *tid,
1328 const struct ath9k_rate_table *rt = sc->sc_currates;
1329 struct sk_buff *skb = bf->bf_mpdu;
1330 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1331 u32 nsymbits, nsymbols, mpdudensity;
1334 int width, half_gi, ndelim, mindelim;
1336 /* Select standard number of delimiters based on frame length alone */
1337 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1340 * If encryption enabled, hardware requires some more padding between
1342 * TODO - this could be improved to be dependent on the rate.
1343 * The hardware can keep up at lower rates, but not higher rates
1345 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1346 ndelim += ATH_AGGR_ENCRYPTDELIM;
1349 * Convert desired mpdu density from microeconds to bytes based
1350 * on highest rate in rate series (i.e. first rate) to determine
1351 * required minimum length for subframe. Take into account
1352 * whether high rate is 20 or 40Mhz and half or full GI.
1354 mpdudensity = tid->an->mpdudensity;
1357 * If there is no mpdu density restriction, no further calculation
1360 if (mpdudensity == 0)
1363 rix = tx_info->control.rates[0].idx;
1364 flags = tx_info->control.rates[0].flags;
1365 rc = rt->info[rix].rateCode;
1366 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
1367 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
1370 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1372 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1377 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1378 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1380 /* Is frame shorter than required minimum length? */
1381 if (frmlen < minlen) {
1382 /* Get the minimum number of delimiters required. */
1383 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1384 ndelim = max(mindelim, ndelim);
1391 * For aggregation from software buffer queue.
1392 * NB: must be called with txq lock held
1395 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1396 struct ath_atx_tid *tid,
1397 struct list_head *bf_q,
1398 struct ath_buf **bf_last,
1399 struct aggr_rifs_param *param,
1402 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1403 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1404 struct list_head bf_head;
1405 int rl = 0, nframes = 0, ndelim;
1406 u16 aggr_limit = 0, al = 0, bpad = 0,
1407 al_delta, h_baw = tid->baw_size / 2;
1408 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1410 INIT_LIST_HEAD(&bf_head);
1412 BUG_ON(list_empty(&tid->buf_q));
1414 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1417 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1420 * do not step over block-ack window
1422 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1423 status = ATH_AGGR_BAW_CLOSED;
1428 aggr_limit = ath_lookup_rate(sc, bf, tid);
1433 * do not exceed aggregation limit
1435 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1437 if (nframes && (aggr_limit <
1438 (al + bpad + al_delta + prev_al))) {
1439 status = ATH_AGGR_LIMITED;
1444 * do not exceed subframe limit
1446 if ((nframes + *prev_frames) >=
1447 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1448 status = ATH_AGGR_LIMITED;
1453 * add padding for previous frame to aggregation length
1455 al += bpad + al_delta;
1458 * Get the delimiters needed to meet the MPDU
1459 * density for this node.
1461 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
1463 bpad = PADBYTES(al_delta) + (ndelim << 2);
1466 bf->bf_lastfrm->bf_desc->ds_link = 0;
1469 * this packet is part of an aggregate
1470 * - remove all descriptors belonging to this frame from
1472 * - add it to block ack window
1473 * - set up descriptors for aggregation
1475 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1476 ath_tx_addto_baw(sc, tid, bf);
1478 list_for_each_entry(tbf, &bf_head, list) {
1479 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1480 tbf->bf_desc, ndelim);
1484 * link buffers of this frame to the aggregate
1486 list_splice_tail_init(&bf_head, bf_q);
1490 bf_prev->bf_next = bf;
1491 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1497 * terminate aggregation on a small packet boundary
1499 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1500 status = ATH_AGGR_SHORTPKT;
1504 } while (!list_empty(&tid->buf_q));
1506 bf_first->bf_al = al;
1507 bf_first->bf_nframes = nframes;
1514 * process pending frames possibly doing a-mpdu aggregation
1515 * NB: must be called with txq lock held
1518 static void ath_tx_sched_aggr(struct ath_softc *sc,
1519 struct ath_txq *txq, struct ath_atx_tid *tid)
1521 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1522 enum ATH_AGGR_STATUS status;
1523 struct list_head bf_q;
1524 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1525 int prev_frames = 0;
1528 if (list_empty(&tid->buf_q))
1531 INIT_LIST_HEAD(&bf_q);
1533 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, ¶m,
1537 * no frames picked up to be aggregated; block-ack
1538 * window is not open
1540 if (list_empty(&bf_q))
1543 bf = list_first_entry(&bf_q, struct ath_buf, list);
1544 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1545 bf->bf_lastbf = bf_last;
1548 * if only one frame, send as non-aggregate
1550 if (bf->bf_nframes == 1) {
1551 ASSERT(bf->bf_lastfrm == bf_last);
1553 bf->bf_state.bf_type &= ~BUF_AGGR;
1555 * clear aggr bits for every descriptor
1556 * XXX TODO: is there a way to optimize it?
1558 list_for_each_entry(tbf, &bf_q, list) {
1559 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1562 ath_buf_set_rate(sc, bf);
1563 ath_tx_txqaddbuf(sc, txq, &bf_q);
1568 * setup first desc with rate and aggr info
1570 bf->bf_state.bf_type |= BUF_AGGR;
1571 ath_buf_set_rate(sc, bf);
1572 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1575 * anchor last frame of aggregate correctly
1577 ASSERT(bf_lastaggr);
1578 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1580 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1582 /* XXX: We don't enter into this loop, consider removing this */
1583 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1584 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1585 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1588 txq->axq_aggr_depth++;
1591 * Normal aggregate, queue to hardware
1593 ath_tx_txqaddbuf(sc, txq, &bf_q);
1595 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1596 status != ATH_AGGR_BAW_CLOSED);
1599 /* Called with txq lock held */
1601 static void ath_tid_drain(struct ath_softc *sc,
1602 struct ath_txq *txq,
1603 struct ath_atx_tid *tid)
1607 struct list_head bf_head;
1608 INIT_LIST_HEAD(&bf_head);
1611 if (list_empty(&tid->buf_q))
1613 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1615 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1617 /* update baw for software retried frame */
1618 if (bf_isretried(bf))
1619 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1622 * do not indicate packets while holding txq spinlock.
1623 * unlock is intentional here
1625 spin_unlock(&txq->axq_lock);
1627 /* complete this sub-frame */
1628 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1630 spin_lock(&txq->axq_lock);
1634 * TODO: For frame(s) that are in the retry state, we will reuse the
1635 * sequence number(s) without setting the retry bit. The
1636 * alternative is to give up on these and BAR the receiver's window
1639 tid->seq_next = tid->seq_start;
1640 tid->baw_tail = tid->baw_head;
1644 * Drain all pending buffers
1645 * NB: must be called with txq lock held
1648 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1649 struct ath_txq *txq)
1651 struct ath_atx_ac *ac, *ac_tmp;
1652 struct ath_atx_tid *tid, *tid_tmp;
1654 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1655 list_del(&ac->list);
1657 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1658 list_del(&tid->list);
1660 ath_tid_drain(sc, txq, tid);
1665 static void ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
1666 struct sk_buff *skb, struct scatterlist *sg,
1667 struct ath_tx_control *txctl)
1669 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1670 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1671 struct ath_tx_info_priv *tx_info_priv;
1675 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_KERNEL);
1676 tx_info->rate_driver_data[0] = tx_info_priv;
1677 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1678 fc = hdr->frame_control;
1680 ATH_TXBUF_RESET(bf);
1684 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
1686 ieee80211_is_data(fc) ?
1687 (bf->bf_state.bf_type |= BUF_DATA) :
1688 (bf->bf_state.bf_type &= ~BUF_DATA);
1689 ieee80211_is_back_req(fc) ?
1690 (bf->bf_state.bf_type |= BUF_BAR) :
1691 (bf->bf_state.bf_type &= ~BUF_BAR);
1692 ieee80211_is_pspoll(fc) ?
1693 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1694 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1695 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
1696 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1697 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1698 (sc->hw->conf.ht.enabled && !is_pae(skb) &&
1699 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1700 (bf->bf_state.bf_type |= BUF_HT) :
1701 (bf->bf_state.bf_type &= ~BUF_HT);
1703 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1707 bf->bf_keytype = get_hw_crypto_keytype(skb);
1709 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1710 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1711 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1713 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1718 setup_rate_retries(sc, skb);
1720 /* Assign seqno, tidno */
1722 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
1723 assign_aggr_tid_seqno(skb, bf);
1728 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1729 skb->len, PCI_DMA_TODEVICE);
1730 bf->bf_buf_addr = bf->bf_dmacontext;
1733 /* FIXME: tx power */
1734 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1735 struct scatterlist *sg, u32 n_sg,
1736 struct ath_tx_control *txctl)
1738 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1739 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1740 struct ath_node *an = NULL;
1741 struct list_head bf_head;
1742 struct ath_desc *ds;
1743 struct ath_atx_tid *tid;
1744 struct ath_hal *ah = sc->sc_ah;
1747 frm_type = get_hw_packet_type(skb);
1749 INIT_LIST_HEAD(&bf_head);
1750 list_add_tail(&bf->list, &bf_head);
1752 /* setup descriptor */
1756 ds->ds_data = bf->bf_buf_addr;
1758 /* Formulate first tx descriptor with tx controls */
1760 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1761 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1763 ath9k_hw_filltxdesc(ah, ds,
1764 sg_dma_len(sg), /* segment length */
1765 true, /* first segment */
1766 (n_sg == 1) ? true : false, /* last segment */
1767 ds); /* first descriptor */
1769 bf->bf_lastfrm = bf;
1771 spin_lock_bh(&txctl->txq->axq_lock);
1773 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1774 tx_info->control.sta) {
1775 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1776 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1778 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
1780 * Try aggregation if it's a unicast data frame
1781 * and the destination is HT capable.
1783 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1786 * Send this frame as regular when ADDBA
1787 * exchange is neither complete nor pending.
1789 ath_tx_send_normal(sc, txctl->txq,
1796 ath_buf_set_rate(sc, bf);
1797 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
1800 spin_unlock_bh(&txctl->txq->axq_lock);
1803 int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1804 struct ath_tx_control *txctl)
1807 struct scatterlist sg;
1809 /* Check if a tx buffer is available */
1811 bf = ath_tx_get_buffer(sc);
1813 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX buffers are full\n",
1818 ath_tx_setup_buffer(sc, bf, skb, &sg, txctl);
1822 memset(&sg, 0, sizeof(struct scatterlist));
1823 sg_dma_address(&sg) = bf->bf_dmacontext;
1824 sg_dma_len(&sg) = skb->len;
1826 ath_tx_start_dma(sc, bf, &sg, 1, txctl);
1831 /* Initialize TX queue and h/w */
1833 int ath_tx_init(struct ath_softc *sc, int nbufs)
1838 spin_lock_init(&sc->sc_txbuflock);
1840 /* Setup tx descriptors */
1841 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
1844 DPRINTF(sc, ATH_DBG_FATAL,
1845 "%s: failed to allocate tx descriptors: %d\n",
1850 /* XXX allocate beacon state together with vap */
1851 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
1852 "beacon", ATH_BCBUF, 1);
1854 DPRINTF(sc, ATH_DBG_FATAL,
1855 "%s: failed to allocate "
1856 "beacon descripotrs: %d\n",
1869 /* Reclaim all tx queue resources */
1871 int ath_tx_cleanup(struct ath_softc *sc)
1873 /* cleanup beacon descriptors */
1874 if (sc->sc_bdma.dd_desc_len != 0)
1875 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
1877 /* cleanup tx descriptors */
1878 if (sc->sc_txdma.dd_desc_len != 0)
1879 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
1884 /* Setup a h/w transmit queue */
1886 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1888 struct ath_hal *ah = sc->sc_ah;
1889 struct ath9k_tx_queue_info qi;
1892 memset(&qi, 0, sizeof(qi));
1893 qi.tqi_subtype = subtype;
1894 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1895 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1896 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1897 qi.tqi_physCompBuf = 0;
1900 * Enable interrupts only for EOL and DESC conditions.
1901 * We mark tx descriptors to receive a DESC interrupt
1902 * when a tx queue gets deep; otherwise waiting for the
1903 * EOL to reap descriptors. Note that this is done to
1904 * reduce interrupt load and this only defers reaping
1905 * descriptors, never transmitting frames. Aside from
1906 * reducing interrupts this also permits more concurrency.
1907 * The only potential downside is if the tx queue backs
1908 * up in which case the top half of the kernel may backup
1909 * due to a lack of tx descriptors.
1911 * The UAPSD queue is an exception, since we take a desc-
1912 * based intr on the EOSP frames.
1914 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1915 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1917 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1918 TXQ_FLAG_TXDESCINT_ENABLE;
1919 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1922 * NB: don't print a message, this happens
1923 * normally on parts with too few tx queues
1927 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
1928 DPRINTF(sc, ATH_DBG_FATAL,
1929 "%s: hal qnum %u out of range, max %u!\n",
1930 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
1931 ath9k_hw_releasetxqueue(ah, qnum);
1934 if (!ATH_TXQ_SETUP(sc, qnum)) {
1935 struct ath_txq *txq = &sc->sc_txq[qnum];
1937 txq->axq_qnum = qnum;
1938 txq->axq_link = NULL;
1939 INIT_LIST_HEAD(&txq->axq_q);
1940 INIT_LIST_HEAD(&txq->axq_acq);
1941 spin_lock_init(&txq->axq_lock);
1943 txq->axq_aggr_depth = 0;
1944 txq->axq_totalqueued = 0;
1945 txq->axq_linkbuf = NULL;
1946 sc->sc_txqsetup |= 1<<qnum;
1948 return &sc->sc_txq[qnum];
1951 /* Reclaim resources for a setup queue */
1953 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1955 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1956 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
1960 * Setup a hardware data transmit queue for the specified
1961 * access control. The hal may not support all requested
1962 * queues in which case it will return a reference to a
1963 * previously setup queue. We record the mapping from ac's
1964 * to h/w queues for use by ath_tx_start and also track
1965 * the set of h/w queues being used to optimize work in the
1966 * transmit interrupt handler and related routines.
1969 int ath_tx_setup(struct ath_softc *sc, int haltype)
1971 struct ath_txq *txq;
1973 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1974 DPRINTF(sc, ATH_DBG_FATAL,
1975 "%s: HAL AC %u out of range, max %zu!\n",
1976 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
1979 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1981 sc->sc_haltype2q[haltype] = txq->axq_qnum;
1987 int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
1992 case ATH9K_TX_QUEUE_DATA:
1993 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1994 DPRINTF(sc, ATH_DBG_FATAL,
1995 "%s: HAL AC %u out of range, max %zu!\n",
1997 haltype, ARRAY_SIZE(sc->sc_haltype2q));
2000 qnum = sc->sc_haltype2q[haltype];
2002 case ATH9K_TX_QUEUE_BEACON:
2003 qnum = sc->sc_bhalq;
2005 case ATH9K_TX_QUEUE_CAB:
2006 qnum = sc->sc_cabq->axq_qnum;
2014 /* Get a transmit queue, if available */
2016 struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2018 struct ath_txq *txq = NULL;
2021 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
2022 txq = &sc->sc_txq[qnum];
2024 spin_lock_bh(&txq->axq_lock);
2026 /* Try to avoid running out of descriptors */
2027 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
2028 DPRINTF(sc, ATH_DBG_FATAL,
2029 "%s: TX queue: %d is full, depth: %d\n",
2030 __func__, qnum, txq->axq_depth);
2031 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
2033 spin_unlock_bh(&txq->axq_lock);
2037 spin_unlock_bh(&txq->axq_lock);
2042 /* Update parameters for a transmit queue */
2044 int ath_txq_update(struct ath_softc *sc, int qnum,
2045 struct ath9k_tx_queue_info *qinfo)
2047 struct ath_hal *ah = sc->sc_ah;
2049 struct ath9k_tx_queue_info qi;
2051 if (qnum == sc->sc_bhalq) {
2053 * XXX: for beacon queue, we just save the parameter.
2054 * It will be picked up by ath_beaconq_config when
2057 sc->sc_beacon_qi = *qinfo;
2061 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2063 ath9k_hw_get_txq_props(ah, qnum, &qi);
2064 qi.tqi_aifs = qinfo->tqi_aifs;
2065 qi.tqi_cwmin = qinfo->tqi_cwmin;
2066 qi.tqi_cwmax = qinfo->tqi_cwmax;
2067 qi.tqi_burstTime = qinfo->tqi_burstTime;
2068 qi.tqi_readyTime = qinfo->tqi_readyTime;
2070 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
2071 DPRINTF(sc, ATH_DBG_FATAL,
2072 "%s: unable to update hardware queue %u!\n",
2076 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2082 int ath_cabq_update(struct ath_softc *sc)
2084 struct ath9k_tx_queue_info qi;
2085 int qnum = sc->sc_cabq->axq_qnum;
2086 struct ath_beacon_config conf;
2088 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
2090 * Ensure the readytime % is within the bounds.
2092 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2093 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2094 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2095 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2097 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2099 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2100 ath_txq_update(sc, qnum, &qi);
2105 /* Deferred processing of transmit interrupt */
2107 void ath_tx_tasklet(struct ath_softc *sc)
2110 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2112 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2115 * Process each active queue.
2117 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2118 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2119 ath_tx_processq(sc, &sc->sc_txq[i]);
2123 void ath_tx_draintxq(struct ath_softc *sc,
2124 struct ath_txq *txq, bool retry_tx)
2126 struct ath_buf *bf, *lastbf;
2127 struct list_head bf_head;
2129 INIT_LIST_HEAD(&bf_head);
2132 * NB: this assumes output has been stopped and
2133 * we do not need to block ath_tx_tasklet
2136 spin_lock_bh(&txq->axq_lock);
2138 if (list_empty(&txq->axq_q)) {
2139 txq->axq_link = NULL;
2140 txq->axq_linkbuf = NULL;
2141 spin_unlock_bh(&txq->axq_lock);
2145 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2147 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2148 list_del(&bf->list);
2149 spin_unlock_bh(&txq->axq_lock);
2151 spin_lock_bh(&sc->sc_txbuflock);
2152 list_add_tail(&bf->list, &sc->sc_txbuf);
2153 spin_unlock_bh(&sc->sc_txbuflock);
2157 lastbf = bf->bf_lastbf;
2159 lastbf->bf_desc->ds_txstat.ts_flags =
2160 ATH9K_TX_SW_ABORTED;
2162 /* remove ath_buf's of the same mpdu from txq */
2163 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2166 spin_unlock_bh(&txq->axq_lock);
2169 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2171 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2174 /* flush any pending frames if aggregation is enabled */
2175 if (sc->sc_flags & SC_OP_TXAGGR) {
2177 spin_lock_bh(&txq->axq_lock);
2178 ath_txq_drain_pending_buffers(sc, txq);
2179 spin_unlock_bh(&txq->axq_lock);
2184 /* Drain the transmit queues and reclaim resources */
2186 void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2188 /* stop beacon queue. The beacon will be freed when
2189 * we go to INIT state */
2190 if (!(sc->sc_flags & SC_OP_INVALID)) {
2191 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2192 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2193 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2196 ath_drain_txdataq(sc, retry_tx);
2199 u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2201 return sc->sc_txq[qnum].axq_depth;
2204 u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2206 return sc->sc_txq[qnum].axq_aggr_depth;
2209 bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
2211 struct ath_atx_tid *txtid;
2213 if (!(sc->sc_flags & SC_OP_TXAGGR))
2216 txtid = ATH_AN_2_TID(an, tidno);
2218 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2219 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
2220 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2221 txtid->addba_exchangeattempts++;
2229 /* Start TX aggregation */
2231 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
2234 struct ath_atx_tid *txtid;
2235 struct ath_node *an;
2237 an = (struct ath_node *)sta->drv_priv;
2239 if (sc->sc_flags & SC_OP_TXAGGR) {
2240 txtid = ATH_AN_2_TID(an, tid);
2241 txtid->state |= AGGR_ADDBA_PROGRESS;
2242 ath_tx_pause_tid(sc, txtid);
2248 /* Stop tx aggregation */
2250 int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2252 struct ath_node *an = (struct ath_node *)sta->drv_priv;
2254 ath_tx_aggr_teardown(sc, an, tid);
2258 /* Resume tx aggregation */
2260 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2262 struct ath_atx_tid *txtid;
2263 struct ath_node *an;
2265 an = (struct ath_node *)sta->drv_priv;
2267 if (sc->sc_flags & SC_OP_TXAGGR) {
2268 txtid = ATH_AN_2_TID(an, tid);
2270 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
2271 txtid->state |= AGGR_ADDBA_COMPLETE;
2272 txtid->state &= ~AGGR_ADDBA_PROGRESS;
2273 ath_tx_resume_tid(sc, txtid);
2278 * Performs transmit side cleanup when TID changes from aggregated to
2280 * - Pause the TID and mark cleanup in progress
2281 * - Discard all retry frames from the s/w queue.
2284 void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
2286 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2287 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2289 struct list_head bf_head;
2290 INIT_LIST_HEAD(&bf_head);
2292 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
2294 if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */
2297 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2298 txtid->addba_exchangeattempts = 0;
2302 /* TID must be paused first */
2303 ath_tx_pause_tid(sc, txtid);
2305 /* drop all software retried frames and mark this TID */
2306 spin_lock_bh(&txq->axq_lock);
2307 while (!list_empty(&txtid->buf_q)) {
2308 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2309 if (!bf_isretried(bf)) {
2311 * NB: it's based on the assumption that
2312 * software retried frame will always stay
2313 * at the head of software queue.
2317 list_cut_position(&bf_head,
2318 &txtid->buf_q, &bf->bf_lastfrm->list);
2319 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2321 /* complete this sub-frame */
2322 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2325 if (txtid->baw_head != txtid->baw_tail) {
2326 spin_unlock_bh(&txq->axq_lock);
2327 txtid->state |= AGGR_CLEANUP;
2329 txtid->state &= ~AGGR_ADDBA_COMPLETE;
2330 txtid->addba_exchangeattempts = 0;
2331 spin_unlock_bh(&txq->axq_lock);
2332 ath_tx_flush_tid(sc, txtid);
2337 * Tx scheduling logic
2338 * NB: must be called with txq lock held
2341 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2343 struct ath_atx_ac *ac;
2344 struct ath_atx_tid *tid;
2346 /* nothing to schedule */
2347 if (list_empty(&txq->axq_acq))
2350 * get the first node/ac pair on the queue
2352 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2353 list_del(&ac->list);
2357 * process a single tid per destination
2360 /* nothing to schedule */
2361 if (list_empty(&ac->tid_q))
2364 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2365 list_del(&tid->list);
2368 if (tid->paused) /* check next tid to keep h/w busy */
2371 if ((txq->axq_depth % 2) == 0)
2372 ath_tx_sched_aggr(sc, txq, tid);
2375 * add tid to round-robin queue if more frames
2376 * are pending for the tid
2378 if (!list_empty(&tid->buf_q))
2379 ath_tx_queue_tid(txq, tid);
2381 /* only schedule one TID at a time */
2383 } while (!list_empty(&ac->tid_q));
2386 * schedule AC if more TIDs need processing
2388 if (!list_empty(&ac->tid_q)) {
2390 * add dest ac to txq if not already added
2394 list_add_tail(&ac->list, &txq->axq_acq);
2399 /* Initialize per-node transmit state */
2401 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2403 struct ath_atx_tid *tid;
2404 struct ath_atx_ac *ac;
2408 * Init per tid tx state
2410 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2411 tidno < WME_NUM_TID;
2415 tid->seq_start = tid->seq_next = 0;
2416 tid->baw_size = WME_MAX_BA;
2417 tid->baw_head = tid->baw_tail = 0;
2419 tid->paused = false;
2420 tid->state &= ~AGGR_CLEANUP;
2421 INIT_LIST_HEAD(&tid->buf_q);
2423 acno = TID_TO_WME_AC(tidno);
2424 tid->ac = &an->an_aggr.tx.ac[acno];
2427 tid->state &= ~AGGR_ADDBA_COMPLETE;
2428 tid->state &= ~AGGR_ADDBA_PROGRESS;
2429 tid->addba_exchangeattempts = 0;
2433 * Init per ac tx state
2435 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2436 acno < WME_NUM_AC; acno++, ac++) {
2438 INIT_LIST_HEAD(&ac->tid_q);
2442 ac->qnum = ath_tx_get_qnum(sc,
2443 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2446 ac->qnum = ath_tx_get_qnum(sc,
2447 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2450 ac->qnum = ath_tx_get_qnum(sc,
2451 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2454 ac->qnum = ath_tx_get_qnum(sc,
2455 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2461 /* Cleanupthe pending buffers for the node. */
2463 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2466 struct ath_atx_ac *ac, *ac_tmp;
2467 struct ath_atx_tid *tid, *tid_tmp;
2468 struct ath_txq *txq;
2469 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2470 if (ATH_TXQ_SETUP(sc, i)) {
2471 txq = &sc->sc_txq[i];
2473 spin_lock(&txq->axq_lock);
2475 list_for_each_entry_safe(ac,
2476 ac_tmp, &txq->axq_acq, list) {
2477 tid = list_first_entry(&ac->tid_q,
2478 struct ath_atx_tid, list);
2479 if (tid && tid->an != an)
2481 list_del(&ac->list);
2484 list_for_each_entry_safe(tid,
2485 tid_tmp, &ac->tid_q, list) {
2486 list_del(&tid->list);
2488 ath_tid_drain(sc, txq, tid);
2489 tid->state &= ~AGGR_ADDBA_COMPLETE;
2490 tid->addba_exchangeattempts = 0;
2491 tid->state &= ~AGGR_CLEANUP;
2495 spin_unlock(&txq->axq_lock);
2500 void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2502 int hdrlen, padsize;
2503 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2504 struct ath_tx_control txctl;
2506 memset(&txctl, 0, sizeof(struct ath_tx_control));
2509 * As a temporary workaround, assign seq# here; this will likely need
2510 * to be cleaned up to work better with Beacon transmission and virtual
2513 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2514 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2515 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2517 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2518 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2521 /* Add the padding after the header if this is not already done */
2522 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2524 padsize = hdrlen % 4;
2525 if (skb_headroom(skb) < padsize) {
2526 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding "
2527 "failed\n", __func__);
2528 dev_kfree_skb_any(skb);
2531 skb_push(skb, padsize);
2532 memmove(skb->data, skb->data + padsize, hdrlen);
2535 txctl.txq = sc->sc_cabq;
2537 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
2541 if (ath_tx_start(sc, skb, &txctl) != 0) {
2542 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
2548 dev_kfree_skb_any(skb);