2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include "ar9003_mac.h"
20 #define BITS_PER_BYTE 8
21 #define OFDM_PLCP_BITS 22
22 #define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
23 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
29 #define HT_LTF(_ns) (4 * (_ns))
30 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
31 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
32 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
35 #define OFDM_SIFS_TIME 16
37 static u32 bits_per_symbol[][2] = {
39 { 26, 54 }, /* 0: BPSK */
40 { 52, 108 }, /* 1: QPSK 1/2 */
41 { 78, 162 }, /* 2: QPSK 3/4 */
42 { 104, 216 }, /* 3: 16-QAM 1/2 */
43 { 156, 324 }, /* 4: 16-QAM 3/4 */
44 { 208, 432 }, /* 5: 64-QAM 2/3 */
45 { 234, 486 }, /* 6: 64-QAM 3/4 */
46 { 260, 540 }, /* 7: 64-QAM 5/6 */
47 { 52, 108 }, /* 8: BPSK */
48 { 104, 216 }, /* 9: QPSK 1/2 */
49 { 156, 324 }, /* 10: QPSK 3/4 */
50 { 208, 432 }, /* 11: 16-QAM 1/2 */
51 { 312, 648 }, /* 12: 16-QAM 3/4 */
52 { 416, 864 }, /* 13: 64-QAM 2/3 */
53 { 468, 972 }, /* 14: 64-QAM 3/4 */
54 { 520, 1080 }, /* 15: 64-QAM 5/6 */
57 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
59 static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
60 struct ath_atx_tid *tid,
61 struct list_head *bf_head);
62 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
63 struct ath_txq *txq, struct list_head *bf_q,
64 struct ath_tx_status *ts, int txok, int sendbar);
65 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
66 struct list_head *head);
67 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
68 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
69 struct ath_tx_status *ts, int txok);
70 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
71 int nbad, int txok, bool update_rc);
80 static int ath_max_4ms_framelen[4][32] = {
82 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
83 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
84 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
85 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
88 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
89 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
90 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
91 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
94 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
95 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
96 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
97 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
100 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
101 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
102 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
103 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
107 /*********************/
108 /* Aggregation logic */
109 /*********************/
111 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
113 struct ath_atx_ac *ac = tid->ac;
122 list_add_tail(&tid->list, &ac->tid_q);
128 list_add_tail(&ac->list, &txq->axq_acq);
131 static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
133 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
135 spin_lock_bh(&txq->axq_lock);
137 spin_unlock_bh(&txq->axq_lock);
140 static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
142 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
144 BUG_ON(tid->paused <= 0);
145 spin_lock_bh(&txq->axq_lock);
152 if (list_empty(&tid->buf_q))
155 ath_tx_queue_tid(txq, tid);
156 ath_txq_schedule(sc, txq);
158 spin_unlock_bh(&txq->axq_lock);
161 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
163 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
165 struct list_head bf_head;
166 INIT_LIST_HEAD(&bf_head);
168 BUG_ON(tid->paused <= 0);
169 spin_lock_bh(&txq->axq_lock);
173 if (tid->paused > 0) {
174 spin_unlock_bh(&txq->axq_lock);
178 while (!list_empty(&tid->buf_q)) {
179 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
180 BUG_ON(bf_isretried(bf));
181 list_move_tail(&bf->list, &bf_head);
182 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
185 spin_unlock_bh(&txq->axq_lock);
188 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
193 index = ATH_BA_INDEX(tid->seq_start, seqno);
194 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
196 tid->tx_buf[cindex] = NULL;
198 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
199 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
200 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
204 static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
209 if (bf_isretried(bf))
212 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
213 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
215 BUG_ON(tid->tx_buf[cindex] != NULL);
216 tid->tx_buf[cindex] = bf;
218 if (index >= ((tid->baw_tail - tid->baw_head) &
219 (ATH_TID_MAX_BUFS - 1))) {
220 tid->baw_tail = cindex;
221 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
226 * TODO: For frame(s) that are in the retry state, we will reuse the
227 * sequence number(s) without setting the retry bit. The
228 * alternative is to give up on these and BAR the receiver's window
231 static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
232 struct ath_atx_tid *tid)
236 struct list_head bf_head;
237 struct ath_tx_status ts;
239 memset(&ts, 0, sizeof(ts));
240 INIT_LIST_HEAD(&bf_head);
243 if (list_empty(&tid->buf_q))
246 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
247 list_move_tail(&bf->list, &bf_head);
249 if (bf_isretried(bf))
250 ath_tx_update_baw(sc, tid, bf->bf_seqno);
252 spin_unlock(&txq->axq_lock);
253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
254 spin_lock(&txq->axq_lock);
257 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head;
261 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
265 struct ieee80211_hdr *hdr;
267 bf->bf_state.bf_type |= BUF_RETRY;
269 TX_STAT_INC(txq->axq_qnum, a_retries);
272 hdr = (struct ieee80211_hdr *)skb->data;
273 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
276 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
278 struct ath_buf *bf = NULL;
280 spin_lock_bh(&sc->tx.txbuflock);
282 if (unlikely(list_empty(&sc->tx.txbuf))) {
283 spin_unlock_bh(&sc->tx.txbuflock);
287 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
290 spin_unlock_bh(&sc->tx.txbuflock);
295 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
297 spin_lock_bh(&sc->tx.txbuflock);
298 list_add_tail(&bf->list, &sc->tx.txbuf);
299 spin_unlock_bh(&sc->tx.txbuflock);
302 static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
306 tbf = ath_tx_get_buffer(sc);
310 ATH_TXBUF_RESET(tbf);
312 tbf->aphy = bf->aphy;
313 tbf->bf_mpdu = bf->bf_mpdu;
314 tbf->bf_buf_addr = bf->bf_buf_addr;
315 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
316 tbf->bf_state = bf->bf_state;
317 tbf->bf_dmacontext = bf->bf_dmacontext;
322 static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
323 struct ath_buf *bf, struct list_head *bf_q,
324 struct ath_tx_status *ts, int txok)
326 struct ath_node *an = NULL;
328 struct ieee80211_sta *sta;
329 struct ieee80211_hw *hw;
330 struct ieee80211_hdr *hdr;
331 struct ieee80211_tx_info *tx_info;
332 struct ath_atx_tid *tid = NULL;
333 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
334 struct list_head bf_head, bf_pending;
335 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
336 u32 ba[WME_BA_BMP_SIZE >> 5];
337 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
338 bool rc_update = true;
341 hdr = (struct ieee80211_hdr *)skb->data;
343 tx_info = IEEE80211_SKB_CB(skb);
348 /* XXX: use ieee80211_find_sta! */
349 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
355 an = (struct ath_node *)sta->drv_priv;
356 tid = ATH_AN_2_TID(an, bf->bf_tidno);
358 isaggr = bf_isaggr(bf);
359 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
361 if (isaggr && txok) {
362 if (ts->ts_flags & ATH9K_TX_BA) {
363 seq_st = ts->ts_seqnum;
364 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
367 * AR5416 can become deaf/mute when BA
368 * issue happens. Chip needs to be reset.
369 * But AP code may have sychronization issues
370 * when perform internal reset in this routine.
371 * Only enable reset in STA mode for now.
373 if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
378 INIT_LIST_HEAD(&bf_pending);
379 INIT_LIST_HEAD(&bf_head);
381 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
383 txfail = txpending = 0;
384 bf_next = bf->bf_next;
386 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
387 /* transmit completion, subframe is
388 * acked by block ack */
390 } else if (!isaggr && txok) {
391 /* transmit completion */
394 if (!(tid->state & AGGR_CLEANUP) &&
395 !bf_last->bf_tx_aborted) {
396 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
397 ath_tx_set_retry(sc, txq, bf);
400 bf->bf_state.bf_type |= BUF_XRETRY;
407 * cleanup in progress, just fail
408 * the un-acked sub-frames
414 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
417 * Make sure the last desc is reclaimed if it
418 * not a holding desc.
420 if (!bf_last->bf_stale)
421 list_move_tail(&bf->list, &bf_head);
423 INIT_LIST_HEAD(&bf_head);
425 BUG_ON(list_empty(bf_q));
426 list_move_tail(&bf->list, &bf_head);
431 * complete the acked-ones/xretried ones; update
434 spin_lock_bh(&txq->axq_lock);
435 ath_tx_update_baw(sc, tid, bf->bf_seqno);
436 spin_unlock_bh(&txq->axq_lock);
438 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
439 ath_tx_rc_status(bf, ts, nbad, txok, true);
442 ath_tx_rc_status(bf, ts, nbad, txok, false);
445 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
448 /* retry the un-acked ones */
449 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
450 if (bf->bf_next == NULL && bf_last->bf_stale) {
453 tbf = ath_clone_txbuf(sc, bf_last);
455 * Update tx baw and complete the
456 * frame with failed status if we
460 spin_lock_bh(&txq->axq_lock);
461 ath_tx_update_baw(sc, tid,
463 spin_unlock_bh(&txq->axq_lock);
465 bf->bf_state.bf_type |=
467 ath_tx_rc_status(bf, ts, nbad,
469 ath_tx_complete_buf(sc, bf, txq,
475 ath9k_hw_cleartxdesc(sc->sc_ah,
477 list_add_tail(&tbf->list, &bf_head);
480 * Clear descriptor status words for
483 ath9k_hw_cleartxdesc(sc->sc_ah,
489 * Put this buffer to the temporary pending
490 * queue to retain ordering
492 list_splice_tail_init(&bf_head, &bf_pending);
498 if (tid->state & AGGR_CLEANUP) {
499 if (tid->baw_head == tid->baw_tail) {
500 tid->state &= ~AGGR_ADDBA_COMPLETE;
501 tid->state &= ~AGGR_CLEANUP;
503 /* send buffered frames as singles */
504 ath_tx_flush_tid(sc, tid);
510 /* prepend un-acked frames to the beginning of the pending frame queue */
511 if (!list_empty(&bf_pending)) {
512 spin_lock_bh(&txq->axq_lock);
513 list_splice(&bf_pending, &tid->buf_q);
514 ath_tx_queue_tid(txq, tid);
515 spin_unlock_bh(&txq->axq_lock);
521 ath_reset(sc, false);
524 static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
525 struct ath_atx_tid *tid)
528 struct ieee80211_tx_info *tx_info;
529 struct ieee80211_tx_rate *rates;
530 u32 max_4ms_framelen, frmlen;
531 u16 aggr_limit, legacy = 0;
535 tx_info = IEEE80211_SKB_CB(skb);
536 rates = tx_info->control.rates;
539 * Find the lowest frame length among the rate series that will have a
540 * 4ms transmit duration.
541 * TODO - TXOP limit needs to be considered.
543 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
545 for (i = 0; i < 4; i++) {
546 if (rates[i].count) {
548 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
553 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
558 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
561 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
562 max_4ms_framelen = min(max_4ms_framelen, frmlen);
567 * limit aggregate size by the minimum rate if rate selected is
568 * not a probe rate, if rate selected is a probe rate then
569 * avoid aggregation of this packet.
571 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
574 if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
575 aggr_limit = min((max_4ms_framelen * 3) / 8,
576 (u32)ATH_AMPDU_LIMIT_MAX);
578 aggr_limit = min(max_4ms_framelen,
579 (u32)ATH_AMPDU_LIMIT_MAX);
582 * h/w can accept aggregates upto 16 bit lengths (65535).
583 * The IE, however can hold upto 65536, which shows up here
584 * as zero. Ignore 65536 since we are constrained by hw.
586 if (tid->an->maxampdu)
587 aggr_limit = min(aggr_limit, tid->an->maxampdu);
593 * Returns the number of delimiters to be added to
594 * meet the minimum required mpdudensity.
596 static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
597 struct ath_buf *bf, u16 frmlen)
599 struct sk_buff *skb = bf->bf_mpdu;
600 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
601 u32 nsymbits, nsymbols;
604 int width, half_gi, ndelim, mindelim;
606 /* Select standard number of delimiters based on frame length alone */
607 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
610 * If encryption enabled, hardware requires some more padding between
612 * TODO - this could be improved to be dependent on the rate.
613 * The hardware can keep up at lower rates, but not higher rates
615 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
616 ndelim += ATH_AGGR_ENCRYPTDELIM;
619 * Convert desired mpdu density from microeconds to bytes based
620 * on highest rate in rate series (i.e. first rate) to determine
621 * required minimum length for subframe. Take into account
622 * whether high rate is 20 or 40Mhz and half or full GI.
624 * If there is no mpdu density restriction, no further calculation
628 if (tid->an->mpdudensity == 0)
631 rix = tx_info->control.rates[0].idx;
632 flags = tx_info->control.rates[0].flags;
633 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
634 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
637 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity);
639 nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity);
644 nsymbits = bits_per_symbol[rix][width];
645 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
647 if (frmlen < minlen) {
648 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
649 ndelim = max(mindelim, ndelim);
655 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
657 struct ath_atx_tid *tid,
658 struct list_head *bf_q)
660 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
661 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
662 int rl = 0, nframes = 0, ndelim, prev_al = 0;
663 u16 aggr_limit = 0, al = 0, bpad = 0,
664 al_delta, h_baw = tid->baw_size / 2;
665 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
667 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
670 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
672 /* do not step over block-ack window */
673 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
674 status = ATH_AGGR_BAW_CLOSED;
679 aggr_limit = ath_lookup_rate(sc, bf, tid);
683 /* do not exceed aggregation limit */
684 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
687 (aggr_limit < (al + bpad + al_delta + prev_al))) {
688 status = ATH_AGGR_LIMITED;
692 /* do not exceed subframe limit */
693 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
694 status = ATH_AGGR_LIMITED;
699 /* add padding for previous frame to aggregation length */
700 al += bpad + al_delta;
703 * Get the delimiters needed to meet the MPDU
704 * density for this node.
706 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
707 bpad = PADBYTES(al_delta) + (ndelim << 2);
710 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
712 /* link buffers of this frame to the aggregate */
713 ath_tx_addto_baw(sc, tid, bf);
714 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
715 list_move_tail(&bf->list, bf_q);
717 bf_prev->bf_next = bf;
718 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
723 } while (!list_empty(&tid->buf_q));
725 bf_first->bf_al = al;
726 bf_first->bf_nframes = nframes;
732 static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
733 struct ath_atx_tid *tid)
736 enum ATH_AGGR_STATUS status;
737 struct list_head bf_q;
740 if (list_empty(&tid->buf_q))
743 INIT_LIST_HEAD(&bf_q);
745 status = ath_tx_form_aggr(sc, txq, tid, &bf_q);
748 * no frames picked up to be aggregated;
749 * block-ack window is not open.
751 if (list_empty(&bf_q))
754 bf = list_first_entry(&bf_q, struct ath_buf, list);
755 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
757 /* if only one frame, send as non-aggregate */
758 if (bf->bf_nframes == 1) {
759 bf->bf_state.bf_type &= ~BUF_AGGR;
760 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
761 ath_buf_set_rate(sc, bf);
762 ath_tx_txqaddbuf(sc, txq, &bf_q);
766 /* setup first desc of aggregate */
767 bf->bf_state.bf_type |= BUF_AGGR;
768 ath_buf_set_rate(sc, bf);
769 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
771 /* anchor last desc of aggregate */
772 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
774 ath_tx_txqaddbuf(sc, txq, &bf_q);
775 TX_STAT_INC(txq->axq_qnum, a_aggr);
777 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
778 status != ATH_AGGR_BAW_CLOSED);
781 void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
784 struct ath_atx_tid *txtid;
787 an = (struct ath_node *)sta->drv_priv;
788 txtid = ATH_AN_2_TID(an, tid);
789 txtid->state |= AGGR_ADDBA_PROGRESS;
790 ath_tx_pause_tid(sc, txtid);
791 *ssn = txtid->seq_start;
794 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
796 struct ath_node *an = (struct ath_node *)sta->drv_priv;
797 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
798 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
799 struct ath_tx_status ts;
801 struct list_head bf_head;
803 memset(&ts, 0, sizeof(ts));
804 INIT_LIST_HEAD(&bf_head);
806 if (txtid->state & AGGR_CLEANUP)
809 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
810 txtid->state &= ~AGGR_ADDBA_PROGRESS;
814 ath_tx_pause_tid(sc, txtid);
816 /* drop all software retried frames and mark this TID */
817 spin_lock_bh(&txq->axq_lock);
818 while (!list_empty(&txtid->buf_q)) {
819 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
820 if (!bf_isretried(bf)) {
822 * NB: it's based on the assumption that
823 * software retried frame will always stay
824 * at the head of software queue.
828 list_move_tail(&bf->list, &bf_head);
829 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
830 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
832 spin_unlock_bh(&txq->axq_lock);
834 if (txtid->baw_head != txtid->baw_tail) {
835 txtid->state |= AGGR_CLEANUP;
837 txtid->state &= ~AGGR_ADDBA_COMPLETE;
838 ath_tx_flush_tid(sc, txtid);
842 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
844 struct ath_atx_tid *txtid;
847 an = (struct ath_node *)sta->drv_priv;
849 if (sc->sc_flags & SC_OP_TXAGGR) {
850 txtid = ATH_AN_2_TID(an, tid);
852 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
853 txtid->state |= AGGR_ADDBA_COMPLETE;
854 txtid->state &= ~AGGR_ADDBA_PROGRESS;
855 ath_tx_resume_tid(sc, txtid);
859 bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
861 struct ath_atx_tid *txtid;
863 if (!(sc->sc_flags & SC_OP_TXAGGR))
866 txtid = ATH_AN_2_TID(an, tidno);
868 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
873 /********************/
874 /* Queue Management */
875 /********************/
877 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
880 struct ath_atx_ac *ac, *ac_tmp;
881 struct ath_atx_tid *tid, *tid_tmp;
883 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
886 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
887 list_del(&tid->list);
889 ath_tid_drain(sc, txq, tid);
894 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
896 struct ath_hw *ah = sc->sc_ah;
897 struct ath_common *common = ath9k_hw_common(ah);
898 struct ath9k_tx_queue_info qi;
901 memset(&qi, 0, sizeof(qi));
902 qi.tqi_subtype = subtype;
903 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
904 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
905 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
906 qi.tqi_physCompBuf = 0;
909 * Enable interrupts only for EOL and DESC conditions.
910 * We mark tx descriptors to receive a DESC interrupt
911 * when a tx queue gets deep; otherwise waiting for the
912 * EOL to reap descriptors. Note that this is done to
913 * reduce interrupt load and this only defers reaping
914 * descriptors, never transmitting frames. Aside from
915 * reducing interrupts this also permits more concurrency.
916 * The only potential downside is if the tx queue backs
917 * up in which case the top half of the kernel may backup
918 * due to a lack of tx descriptors.
920 * The UAPSD queue is an exception, since we take a desc-
921 * based intr on the EOSP frames.
923 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
924 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
925 TXQ_FLAG_TXERRINT_ENABLE;
927 if (qtype == ATH9K_TX_QUEUE_UAPSD)
928 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
930 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
931 TXQ_FLAG_TXDESCINT_ENABLE;
933 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
936 * NB: don't print a message, this happens
937 * normally on parts with too few tx queues
941 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
942 ath_print(common, ATH_DBG_FATAL,
943 "qnum %u out of range, max %u!\n",
944 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
945 ath9k_hw_releasetxqueue(ah, qnum);
948 if (!ATH_TXQ_SETUP(sc, qnum)) {
949 struct ath_txq *txq = &sc->tx.txq[qnum];
951 txq->axq_qnum = qnum;
952 txq->axq_link = NULL;
953 INIT_LIST_HEAD(&txq->axq_q);
954 INIT_LIST_HEAD(&txq->axq_acq);
955 spin_lock_init(&txq->axq_lock);
957 txq->axq_tx_inprogress = false;
958 sc->tx.txqsetup |= 1<<qnum;
960 txq->txq_headidx = txq->txq_tailidx = 0;
961 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
962 INIT_LIST_HEAD(&txq->txq_fifo[i]);
963 INIT_LIST_HEAD(&txq->txq_fifo_pending);
965 return &sc->tx.txq[qnum];
968 int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
973 case ATH9K_TX_QUEUE_DATA:
974 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
975 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
976 "HAL AC %u out of range, max %zu!\n",
977 haltype, ARRAY_SIZE(sc->tx.hwq_map));
980 qnum = sc->tx.hwq_map[haltype];
982 case ATH9K_TX_QUEUE_BEACON:
983 qnum = sc->beacon.beaconq;
985 case ATH9K_TX_QUEUE_CAB:
986 qnum = sc->beacon.cabq->axq_qnum;
994 struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
996 struct ath_txq *txq = NULL;
997 u16 skb_queue = skb_get_queue_mapping(skb);
1000 qnum = ath_get_hal_qnum(skb_queue, sc);
1001 txq = &sc->tx.txq[qnum];
1003 spin_lock_bh(&txq->axq_lock);
1005 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
1006 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
1007 "TX queue: %d is full, depth: %d\n",
1008 qnum, txq->axq_depth);
1009 ath_mac80211_stop_queue(sc, skb_queue);
1011 spin_unlock_bh(&txq->axq_lock);
1015 spin_unlock_bh(&txq->axq_lock);
1020 int ath_txq_update(struct ath_softc *sc, int qnum,
1021 struct ath9k_tx_queue_info *qinfo)
1023 struct ath_hw *ah = sc->sc_ah;
1025 struct ath9k_tx_queue_info qi;
1027 if (qnum == sc->beacon.beaconq) {
1029 * XXX: for beacon queue, we just save the parameter.
1030 * It will be picked up by ath_beaconq_config when
1033 sc->beacon.beacon_qi = *qinfo;
1037 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
1039 ath9k_hw_get_txq_props(ah, qnum, &qi);
1040 qi.tqi_aifs = qinfo->tqi_aifs;
1041 qi.tqi_cwmin = qinfo->tqi_cwmin;
1042 qi.tqi_cwmax = qinfo->tqi_cwmax;
1043 qi.tqi_burstTime = qinfo->tqi_burstTime;
1044 qi.tqi_readyTime = qinfo->tqi_readyTime;
1046 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1047 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1048 "Unable to update hardware queue %u!\n", qnum);
1051 ath9k_hw_resettxqueue(ah, qnum);
1057 int ath_cabq_update(struct ath_softc *sc)
1059 struct ath9k_tx_queue_info qi;
1060 int qnum = sc->beacon.cabq->axq_qnum;
1062 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1064 * Ensure the readytime % is within the bounds.
1066 if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1067 sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1068 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1069 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1071 qi.tqi_readyTime = (sc->beacon_interval *
1072 sc->config.cabqReadytime) / 100;
1073 ath_txq_update(sc, qnum, &qi);
1079 * Drain a given TX queue (could be Beacon or Data)
1081 * This assumes output has been stopped and
1082 * we do not need to block ath_tx_tasklet.
1084 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1086 struct ath_buf *bf, *lastbf;
1087 struct list_head bf_head;
1088 struct ath_tx_status ts;
1090 memset(&ts, 0, sizeof(ts));
1091 INIT_LIST_HEAD(&bf_head);
1094 spin_lock_bh(&txq->axq_lock);
1096 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1097 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1098 txq->txq_headidx = txq->txq_tailidx = 0;
1099 spin_unlock_bh(&txq->axq_lock);
1102 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1103 struct ath_buf, list);
1106 if (list_empty(&txq->axq_q)) {
1107 txq->axq_link = NULL;
1108 spin_unlock_bh(&txq->axq_lock);
1111 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1115 list_del(&bf->list);
1116 spin_unlock_bh(&txq->axq_lock);
1118 ath_tx_return_buffer(sc, bf);
1123 lastbf = bf->bf_lastbf;
1125 lastbf->bf_tx_aborted = true;
1127 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1128 list_cut_position(&bf_head,
1129 &txq->txq_fifo[txq->txq_tailidx],
1131 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1133 /* remove ath_buf's of the same mpdu from txq */
1134 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1139 spin_unlock_bh(&txq->axq_lock);
1142 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
1144 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1147 spin_lock_bh(&txq->axq_lock);
1148 txq->axq_tx_inprogress = false;
1149 spin_unlock_bh(&txq->axq_lock);
1151 /* flush any pending frames if aggregation is enabled */
1152 if (sc->sc_flags & SC_OP_TXAGGR) {
1154 spin_lock_bh(&txq->axq_lock);
1155 ath_txq_drain_pending_buffers(sc, txq);
1156 spin_unlock_bh(&txq->axq_lock);
1160 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1161 spin_lock_bh(&txq->axq_lock);
1162 while (!list_empty(&txq->txq_fifo_pending)) {
1163 bf = list_first_entry(&txq->txq_fifo_pending,
1164 struct ath_buf, list);
1165 list_cut_position(&bf_head,
1166 &txq->txq_fifo_pending,
1167 &bf->bf_lastbf->list);
1168 spin_unlock_bh(&txq->axq_lock);
1171 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1174 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1176 spin_lock_bh(&txq->axq_lock);
1178 spin_unlock_bh(&txq->axq_lock);
1182 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1184 struct ath_hw *ah = sc->sc_ah;
1185 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1186 struct ath_txq *txq;
1189 if (sc->sc_flags & SC_OP_INVALID)
1192 /* Stop beacon queue */
1193 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1195 /* Stop data queues */
1196 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1197 if (ATH_TXQ_SETUP(sc, i)) {
1198 txq = &sc->tx.txq[i];
1199 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1200 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
1207 ath_print(common, ATH_DBG_FATAL,
1208 "Unable to stop TxDMA. Reset HAL!\n");
1210 spin_lock_bh(&sc->sc_resetlock);
1211 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1213 ath_print(common, ATH_DBG_FATAL,
1214 "Unable to reset hardware; reset status %d\n",
1216 spin_unlock_bh(&sc->sc_resetlock);
1219 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1220 if (ATH_TXQ_SETUP(sc, i))
1221 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1225 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1227 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1228 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1231 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1233 struct ath_atx_ac *ac;
1234 struct ath_atx_tid *tid;
1236 if (list_empty(&txq->axq_acq))
1239 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1240 list_del(&ac->list);
1244 if (list_empty(&ac->tid_q))
1247 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
1248 list_del(&tid->list);
1254 ath_tx_sched_aggr(sc, txq, tid);
1257 * add tid to round-robin queue if more frames
1258 * are pending for the tid
1260 if (!list_empty(&tid->buf_q))
1261 ath_tx_queue_tid(txq, tid);
1264 } while (!list_empty(&ac->tid_q));
1266 if (!list_empty(&ac->tid_q)) {
1269 list_add_tail(&ac->list, &txq->axq_acq);
1274 int ath_tx_setup(struct ath_softc *sc, int haltype)
1276 struct ath_txq *txq;
1278 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1279 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1280 "HAL AC %u out of range, max %zu!\n",
1281 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1284 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1286 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1297 * Insert a chain of ath_buf (descriptors) on a txq and
1298 * assume the descriptors are already chained together by caller.
1300 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1301 struct list_head *head)
1303 struct ath_hw *ah = sc->sc_ah;
1304 struct ath_common *common = ath9k_hw_common(ah);
1308 * Insert the frame on the outbound list and
1309 * pass it on to the hardware.
1312 if (list_empty(head))
1315 bf = list_first_entry(head, struct ath_buf, list);
1317 ath_print(common, ATH_DBG_QUEUE,
1318 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1320 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1321 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1322 list_splice_tail_init(head, &txq->txq_fifo_pending);
1325 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1326 ath_print(common, ATH_DBG_XMIT,
1327 "Initializing tx fifo %d which "
1330 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1331 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1332 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1333 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1334 ath_print(common, ATH_DBG_XMIT,
1335 "TXDP[%u] = %llx (%p)\n",
1336 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1338 list_splice_tail_init(head, &txq->axq_q);
1340 if (txq->axq_link == NULL) {
1341 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1342 ath_print(common, ATH_DBG_XMIT,
1343 "TXDP[%u] = %llx (%p)\n",
1344 txq->axq_qnum, ito64(bf->bf_daddr),
1347 *txq->axq_link = bf->bf_daddr;
1348 ath_print(common, ATH_DBG_XMIT,
1349 "link[%u] (%p)=%llx (%p)\n",
1350 txq->axq_qnum, txq->axq_link,
1351 ito64(bf->bf_daddr), bf->bf_desc);
1353 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1355 ath9k_hw_txstart(ah, txq->axq_qnum);
1360 static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1361 struct list_head *bf_head,
1362 struct ath_tx_control *txctl)
1366 bf = list_first_entry(bf_head, struct ath_buf, list);
1367 bf->bf_state.bf_type |= BUF_AMPDU;
1368 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
1371 * Do not queue to h/w when any of the following conditions is true:
1372 * - there are pending frames in software queue
1373 * - the TID is currently paused for ADDBA/BAR request
1374 * - seqno is not within block-ack window
1375 * - h/w queue depth exceeds low water mark
1377 if (!list_empty(&tid->buf_q) || tid->paused ||
1378 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1379 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1381 * Add this frame to software queue for scheduling later
1384 list_move_tail(&bf->list, &tid->buf_q);
1385 ath_tx_queue_tid(txctl->txq, tid);
1389 /* Add sub-frame to BAW */
1390 ath_tx_addto_baw(sc, tid, bf);
1392 /* Queue to h/w without aggregation */
1395 ath_buf_set_rate(sc, bf);
1396 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
1399 static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1400 struct ath_atx_tid *tid,
1401 struct list_head *bf_head)
1405 bf = list_first_entry(bf_head, struct ath_buf, list);
1406 bf->bf_state.bf_type &= ~BUF_AMPDU;
1408 /* update starting sequence number for subsequent ADDBA request */
1409 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1413 ath_buf_set_rate(sc, bf);
1414 ath_tx_txqaddbuf(sc, txq, bf_head);
1415 TX_STAT_INC(txq->axq_qnum, queued);
1418 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1419 struct list_head *bf_head)
1423 bf = list_first_entry(bf_head, struct ath_buf, list);
1427 ath_buf_set_rate(sc, bf);
1428 ath_tx_txqaddbuf(sc, txq, bf_head);
1429 TX_STAT_INC(txq->axq_qnum, queued);
1432 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1434 struct ieee80211_hdr *hdr;
1435 enum ath9k_pkt_type htype;
1438 hdr = (struct ieee80211_hdr *)skb->data;
1439 fc = hdr->frame_control;
1441 if (ieee80211_is_beacon(fc))
1442 htype = ATH9K_PKT_TYPE_BEACON;
1443 else if (ieee80211_is_probe_resp(fc))
1444 htype = ATH9K_PKT_TYPE_PROBE_RESP;
1445 else if (ieee80211_is_atim(fc))
1446 htype = ATH9K_PKT_TYPE_ATIM;
1447 else if (ieee80211_is_pspoll(fc))
1448 htype = ATH9K_PKT_TYPE_PSPOLL;
1450 htype = ATH9K_PKT_TYPE_NORMAL;
1455 static int get_hw_crypto_keytype(struct sk_buff *skb)
1457 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1459 if (tx_info->control.hw_key) {
1460 if (tx_info->control.hw_key->alg == ALG_WEP)
1461 return ATH9K_KEY_TYPE_WEP;
1462 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1463 return ATH9K_KEY_TYPE_TKIP;
1464 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1465 return ATH9K_KEY_TYPE_AES;
1468 return ATH9K_KEY_TYPE_CLEAR;
1471 static void assign_aggr_tid_seqno(struct sk_buff *skb,
1474 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1475 struct ieee80211_hdr *hdr;
1476 struct ath_node *an;
1477 struct ath_atx_tid *tid;
1481 if (!tx_info->control.sta)
1484 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1485 hdr = (struct ieee80211_hdr *)skb->data;
1486 fc = hdr->frame_control;
1488 if (ieee80211_is_data_qos(fc)) {
1489 qc = ieee80211_get_qos_ctl(hdr);
1490 bf->bf_tidno = qc[0] & 0xf;
1494 * For HT capable stations, we save tidno for later use.
1495 * We also override seqno set by upper layer with the one
1496 * in tx aggregation state.
1498 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1499 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1500 bf->bf_seqno = tid->seq_next;
1501 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1504 static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
1506 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1509 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1510 flags |= ATH9K_TXDESC_INTREQ;
1512 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1513 flags |= ATH9K_TXDESC_NOACK;
1516 flags |= ATH9K_TXDESC_LDPC;
1523 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1524 * width - 0 for 20 MHz, 1 for 40 MHz
1525 * half_gi - to use 4us v/s 3.6 us for symbol time
1527 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1528 int width, int half_gi, bool shortPreamble)
1530 u32 nbits, nsymbits, duration, nsymbols;
1531 int streams, pktlen;
1533 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1535 /* find number of symbols: PLCP + data */
1536 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1537 nsymbits = bits_per_symbol[rix][width];
1538 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1541 duration = SYMBOL_TIME(nsymbols);
1543 duration = SYMBOL_TIME_HALFGI(nsymbols);
1545 /* addup duration for legacy/ht training and signal fields */
1546 streams = HT_RC_2_STREAMS(rix);
1547 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1552 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1554 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1555 struct ath9k_11n_rate_series series[4];
1556 struct sk_buff *skb;
1557 struct ieee80211_tx_info *tx_info;
1558 struct ieee80211_tx_rate *rates;
1559 const struct ieee80211_rate *rate;
1560 struct ieee80211_hdr *hdr;
1562 u8 rix = 0, ctsrate = 0;
1565 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
1568 tx_info = IEEE80211_SKB_CB(skb);
1569 rates = tx_info->control.rates;
1570 hdr = (struct ieee80211_hdr *)skb->data;
1571 is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
1574 * We check if Short Preamble is needed for the CTS rate by
1575 * checking the BSS's global flag.
1576 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
1578 rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
1579 ctsrate = rate->hw_value;
1580 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
1581 ctsrate |= rate->hw_value_short;
1583 for (i = 0; i < 4; i++) {
1584 bool is_40, is_sgi, is_sp;
1587 if (!rates[i].count || (rates[i].idx < 0))
1591 series[i].Tries = rates[i].count;
1592 series[i].ChSel = common->tx_chainmask;
1594 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1595 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
1596 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1597 flags |= ATH9K_TXDESC_RTSENA;
1598 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1599 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1600 flags |= ATH9K_TXDESC_CTSENA;
1603 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1604 series[i].RateFlags |= ATH9K_RATESERIES_2040;
1605 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
1606 series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
1608 is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI);
1609 is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH);
1610 is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1612 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1614 series[i].Rate = rix | 0x80;
1615 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1616 is_40, is_sgi, is_sp);
1621 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1622 !(rate->flags & IEEE80211_RATE_ERP_G))
1623 phy = WLAN_RC_PHY_CCK;
1625 phy = WLAN_RC_PHY_OFDM;
1627 rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
1628 series[i].Rate = rate->hw_value;
1629 if (rate->hw_value_short) {
1630 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1631 series[i].Rate |= rate->hw_value_short;
1636 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1637 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
1640 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1641 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
1642 flags &= ~ATH9K_TXDESC_RTSENA;
1644 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
1645 if (flags & ATH9K_TXDESC_RTSENA)
1646 flags &= ~ATH9K_TXDESC_CTSENA;
1648 /* set dur_update_en for l-sig computation except for PS-Poll frames */
1649 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
1650 bf->bf_lastbf->bf_desc,
1651 !is_pspoll, ctsrate,
1652 0, series, 4, flags);
1654 if (sc->config.ath_aggr_prot && flags)
1655 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
1658 static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1659 struct sk_buff *skb,
1660 struct ath_tx_control *txctl)
1662 struct ath_wiphy *aphy = hw->priv;
1663 struct ath_softc *sc = aphy->sc;
1664 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1665 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1668 int padpos, padsize;
1669 bool use_ldpc = false;
1671 tx_info->pad[0] = 0;
1672 switch (txctl->frame_type) {
1673 case ATH9K_IFT_NOT_INTERNAL:
1675 case ATH9K_IFT_PAUSE:
1676 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1678 case ATH9K_IFT_UNPAUSE:
1679 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1682 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1683 fc = hdr->frame_control;
1685 ATH_TXBUF_RESET(bf);
1688 bf->bf_frmlen = skb->len + FCS_LEN;
1689 /* Remove the padding size from bf_frmlen, if any */
1690 padpos = ath9k_cmn_padpos(hdr->frame_control);
1691 padsize = padpos & 3;
1692 if (padsize && skb->len>padpos+padsize) {
1693 bf->bf_frmlen -= padsize;
1696 if (conf_is_ht(&hw->conf)) {
1697 bf->bf_state.bf_type |= BUF_HT;
1698 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1702 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
1704 bf->bf_keytype = get_hw_crypto_keytype(skb);
1705 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1706 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1707 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1709 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1712 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1713 (sc->sc_flags & SC_OP_TXAGGR))
1714 assign_aggr_tid_seqno(skb, bf);
1718 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
1719 skb->len, DMA_TO_DEVICE);
1720 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1722 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1723 "dma_mapping_error() on TX\n");
1727 bf->bf_buf_addr = bf->bf_dmacontext;
1729 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1730 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1731 bf->bf_isnullfunc = true;
1732 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
1734 bf->bf_isnullfunc = false;
1739 /* FIXME: tx power */
1740 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1741 struct ath_tx_control *txctl)
1743 struct sk_buff *skb = bf->bf_mpdu;
1744 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1745 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1746 struct ath_node *an = NULL;
1747 struct list_head bf_head;
1748 struct ath_desc *ds;
1749 struct ath_atx_tid *tid;
1750 struct ath_hw *ah = sc->sc_ah;
1754 frm_type = get_hw_packet_type(skb);
1755 fc = hdr->frame_control;
1757 INIT_LIST_HEAD(&bf_head);
1758 list_add_tail(&bf->list, &bf_head);
1761 ath9k_hw_set_desc_link(ah, ds, 0);
1763 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1764 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1766 ath9k_hw_filltxdesc(ah, ds,
1767 skb->len, /* segment length */
1768 true, /* first segment */
1769 true, /* last segment */
1770 ds, /* first descriptor */
1772 txctl->txq->axq_qnum);
1774 spin_lock_bh(&txctl->txq->axq_lock);
1776 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1777 tx_info->control.sta) {
1778 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1779 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1781 if (!ieee80211_is_data_qos(fc)) {
1782 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1786 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
1788 * Try aggregation if it's a unicast data frame
1789 * and the destination is HT capable.
1791 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1794 * Send this frame as regular when ADDBA
1795 * exchange is neither complete nor pending.
1797 ath_tx_send_ht_normal(sc, txctl->txq,
1801 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1805 spin_unlock_bh(&txctl->txq->axq_lock);
1808 /* Upon failure caller should free skb */
1809 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1810 struct ath_tx_control *txctl)
1812 struct ath_wiphy *aphy = hw->priv;
1813 struct ath_softc *sc = aphy->sc;
1814 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1818 bf = ath_tx_get_buffer(sc);
1820 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
1824 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
1826 struct ath_txq *txq = txctl->txq;
1828 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
1830 /* upon ath_tx_processq() this TX queue will be resumed, we
1831 * guarantee this will happen by knowing beforehand that
1832 * we will at least have to run TX completionon one buffer
1834 spin_lock_bh(&txq->axq_lock);
1835 if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
1836 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1839 spin_unlock_bh(&txq->axq_lock);
1841 ath_tx_return_buffer(sc, bf);
1846 ath_tx_start_dma(sc, bf, txctl);
1851 void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1853 struct ath_wiphy *aphy = hw->priv;
1854 struct ath_softc *sc = aphy->sc;
1855 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1856 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1857 int padpos, padsize;
1858 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1859 struct ath_tx_control txctl;
1861 memset(&txctl, 0, sizeof(struct ath_tx_control));
1864 * As a temporary workaround, assign seq# here; this will likely need
1865 * to be cleaned up to work better with Beacon transmission and virtual
1868 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1869 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1870 sc->tx.seq_no += 0x10;
1871 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1872 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1875 /* Add the padding after the header if this is not already done */
1876 padpos = ath9k_cmn_padpos(hdr->frame_control);
1877 padsize = padpos & 3;
1878 if (padsize && skb->len>padpos) {
1879 if (skb_headroom(skb) < padsize) {
1880 ath_print(common, ATH_DBG_XMIT,
1881 "TX CABQ padding failed\n");
1882 dev_kfree_skb_any(skb);
1885 skb_push(skb, padsize);
1886 memmove(skb->data, skb->data + padsize, padpos);
1889 txctl.txq = sc->beacon.cabq;
1891 ath_print(common, ATH_DBG_XMIT,
1892 "transmitting CABQ packet, skb: %p\n", skb);
1894 if (ath_tx_start(hw, skb, &txctl) != 0) {
1895 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
1901 dev_kfree_skb_any(skb);
1908 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1909 struct ath_wiphy *aphy, int tx_flags)
1911 struct ieee80211_hw *hw = sc->hw;
1912 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1913 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1914 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1915 int padpos, padsize;
1917 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1922 if (tx_flags & ATH_TX_BAR)
1923 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1925 if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
1926 /* Frame was ACKed */
1927 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1930 padpos = ath9k_cmn_padpos(hdr->frame_control);
1931 padsize = padpos & 3;
1932 if (padsize && skb->len>padpos+padsize) {
1934 * Remove MAC header padding before giving the frame back to
1937 memmove(skb->data + padsize, skb->data, padpos);
1938 skb_pull(skb, padsize);
1941 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1942 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
1943 ath_print(common, ATH_DBG_PS,
1944 "Going back to sleep after having "
1945 "received TX status (0x%lx)\n",
1946 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1948 PS_WAIT_FOR_PSPOLL_DATA |
1949 PS_WAIT_FOR_TX_ACK));
1952 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
1953 ath9k_tx_status(hw, skb);
1955 ieee80211_tx_status(hw, skb);
1958 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1959 struct ath_txq *txq, struct list_head *bf_q,
1960 struct ath_tx_status *ts, int txok, int sendbar)
1962 struct sk_buff *skb = bf->bf_mpdu;
1963 unsigned long flags;
1967 tx_flags = ATH_TX_BAR;
1970 tx_flags |= ATH_TX_ERROR;
1972 if (bf_isxretried(bf))
1973 tx_flags |= ATH_TX_XRETRY;
1976 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
1977 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1978 ath_debug_stat_tx(sc, txq, bf, ts);
1981 * Return the list of ath_buf of this mpdu to free queue
1983 spin_lock_irqsave(&sc->tx.txbuflock, flags);
1984 list_splice_tail_init(bf_q, &sc->tx.txbuf);
1985 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1988 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1989 struct ath_tx_status *ts, int txok)
1992 u32 ba[WME_BA_BMP_SIZE >> 5];
1997 if (bf->bf_tx_aborted)
2000 isaggr = bf_isaggr(bf);
2002 seq_st = ts->ts_seqnum;
2003 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
2007 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
2008 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
2017 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
2018 int nbad, int txok, bool update_rc)
2020 struct sk_buff *skb = bf->bf_mpdu;
2021 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2022 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2023 struct ieee80211_hw *hw = bf->aphy->hw;
2027 tx_info->status.ack_signal = ts->ts_rssi;
2029 tx_rateindex = ts->ts_rateindex;
2030 WARN_ON(tx_rateindex >= hw->max_rates);
2032 if (ts->ts_status & ATH9K_TXERR_FILT)
2033 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2034 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2035 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2037 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2038 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
2039 if (ieee80211_is_data(hdr->frame_control)) {
2041 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
2042 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
2043 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2044 (ts->ts_status & ATH9K_TXERR_FIFO))
2045 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2046 tx_info->status.ampdu_len = bf->bf_nframes;
2047 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
2051 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
2052 tx_info->status.rates[i].count = 0;
2053 tx_info->status.rates[i].idx = -1;
2056 tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
2059 static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2063 spin_lock_bh(&txq->axq_lock);
2065 sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
2066 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
2068 ath_mac80211_start_queue(sc, qnum);
2072 spin_unlock_bh(&txq->axq_lock);
2075 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2077 struct ath_hw *ah = sc->sc_ah;
2078 struct ath_common *common = ath9k_hw_common(ah);
2079 struct ath_buf *bf, *lastbf, *bf_held = NULL;
2080 struct list_head bf_head;
2081 struct ath_desc *ds;
2082 struct ath_tx_status ts;
2086 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2087 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2091 spin_lock_bh(&txq->axq_lock);
2092 if (list_empty(&txq->axq_q)) {
2093 txq->axq_link = NULL;
2094 spin_unlock_bh(&txq->axq_lock);
2097 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2100 * There is a race condition that a BH gets scheduled
2101 * after sw writes TxE and before hw re-load the last
2102 * descriptor to get the newly chained one.
2103 * Software must keep the last DONE descriptor as a
2104 * holding descriptor - software does so by marking
2105 * it with the STALE flag.
2110 if (list_is_last(&bf_held->list, &txq->axq_q)) {
2111 spin_unlock_bh(&txq->axq_lock);
2114 bf = list_entry(bf_held->list.next,
2115 struct ath_buf, list);
2119 lastbf = bf->bf_lastbf;
2120 ds = lastbf->bf_desc;
2122 memset(&ts, 0, sizeof(ts));
2123 status = ath9k_hw_txprocdesc(ah, ds, &ts);
2124 if (status == -EINPROGRESS) {
2125 spin_unlock_bh(&txq->axq_lock);
2130 * We now know the nullfunc frame has been ACKed so we
2133 if (bf->bf_isnullfunc &&
2134 (ts.ts_status & ATH9K_TX_ACKED)) {
2135 if ((sc->ps_flags & PS_ENABLED))
2136 ath9k_enable_ps(sc);
2138 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2142 * Remove ath_buf's of the same transmit unit from txq,
2143 * however leave the last descriptor back as the holding
2144 * descriptor for hw.
2146 lastbf->bf_stale = true;
2147 INIT_LIST_HEAD(&bf_head);
2148 if (!list_is_singular(&lastbf->list))
2149 list_cut_position(&bf_head,
2150 &txq->axq_q, lastbf->list.prev);
2153 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
2154 txq->axq_tx_inprogress = false;
2156 list_del(&bf_held->list);
2157 spin_unlock_bh(&txq->axq_lock);
2160 ath_tx_return_buffer(sc, bf_held);
2162 if (!bf_isampdu(bf)) {
2164 * This frame is sent out as a single frame.
2165 * Use hardware retry status for this frame.
2167 bf->bf_retries = ts.ts_longretry;
2168 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2169 bf->bf_state.bf_type |= BUF_XRETRY;
2170 ath_tx_rc_status(bf, &ts, 0, txok, true);
2174 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
2176 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2178 ath_wake_mac80211_queue(sc, txq);
2180 spin_lock_bh(&txq->axq_lock);
2181 if (sc->sc_flags & SC_OP_TXAGGR)
2182 ath_txq_schedule(sc, txq);
2183 spin_unlock_bh(&txq->axq_lock);
2187 static void ath_tx_complete_poll_work(struct work_struct *work)
2189 struct ath_softc *sc = container_of(work, struct ath_softc,
2190 tx_complete_work.work);
2191 struct ath_txq *txq;
2193 bool needreset = false;
2195 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2196 if (ATH_TXQ_SETUP(sc, i)) {
2197 txq = &sc->tx.txq[i];
2198 spin_lock_bh(&txq->axq_lock);
2199 if (txq->axq_depth) {
2200 if (txq->axq_tx_inprogress) {
2202 spin_unlock_bh(&txq->axq_lock);
2205 txq->axq_tx_inprogress = true;
2208 spin_unlock_bh(&txq->axq_lock);
2212 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2213 "tx hung, resetting the chip\n");
2214 ath9k_ps_wakeup(sc);
2215 ath_reset(sc, false);
2216 ath9k_ps_restore(sc);
2219 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2220 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2225 void ath_tx_tasklet(struct ath_softc *sc)
2228 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2230 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2232 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2233 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2234 ath_tx_processq(sc, &sc->tx.txq[i]);
2238 void ath_tx_edma_tasklet(struct ath_softc *sc)
2240 struct ath_tx_status txs;
2241 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2242 struct ath_hw *ah = sc->sc_ah;
2243 struct ath_txq *txq;
2244 struct ath_buf *bf, *lastbf;
2245 struct list_head bf_head;
2250 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2251 if (status == -EINPROGRESS)
2253 if (status == -EIO) {
2254 ath_print(common, ATH_DBG_XMIT,
2255 "Error processing tx status\n");
2259 /* Skip beacon completions */
2260 if (txs.qid == sc->beacon.beaconq)
2263 txq = &sc->tx.txq[txs.qid];
2265 spin_lock_bh(&txq->axq_lock);
2266 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2267 spin_unlock_bh(&txq->axq_lock);
2271 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2272 struct ath_buf, list);
2273 lastbf = bf->bf_lastbf;
2275 INIT_LIST_HEAD(&bf_head);
2276 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2278 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2280 txq->axq_tx_inprogress = false;
2281 spin_unlock_bh(&txq->axq_lock);
2283 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2285 if (!bf_isampdu(bf)) {
2286 bf->bf_retries = txs.ts_longretry;
2287 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2288 bf->bf_state.bf_type |= BUF_XRETRY;
2289 ath_tx_rc_status(bf, &txs, 0, txok, true);
2293 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2295 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2298 spin_lock_bh(&txq->axq_lock);
2299 if (!list_empty(&txq->txq_fifo_pending)) {
2300 INIT_LIST_HEAD(&bf_head);
2301 bf = list_first_entry(&txq->txq_fifo_pending,
2302 struct ath_buf, list);
2303 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2304 &bf->bf_lastbf->list);
2305 ath_tx_txqaddbuf(sc, txq, &bf_head);
2306 } else if (sc->sc_flags & SC_OP_TXAGGR)
2307 ath_txq_schedule(sc, txq);
2308 spin_unlock_bh(&txq->axq_lock);
2316 static int ath_txstatus_setup(struct ath_softc *sc, int size)
2318 struct ath_descdma *dd = &sc->txsdma;
2319 u8 txs_len = sc->sc_ah->caps.txs_len;
2321 dd->dd_desc_len = size * txs_len;
2322 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2323 &dd->dd_desc_paddr, GFP_KERNEL);
2330 static int ath_tx_edma_init(struct ath_softc *sc)
2334 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2336 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2337 sc->txsdma.dd_desc_paddr,
2338 ATH_TXSTATUS_RING_SIZE);
2343 static void ath_tx_edma_cleanup(struct ath_softc *sc)
2345 struct ath_descdma *dd = &sc->txsdma;
2347 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2351 int ath_tx_init(struct ath_softc *sc, int nbufs)
2353 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2356 spin_lock_init(&sc->tx.txbuflock);
2358 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2361 ath_print(common, ATH_DBG_FATAL,
2362 "Failed to allocate tx descriptors: %d\n", error);
2366 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2367 "beacon", ATH_BCBUF, 1, 1);
2369 ath_print(common, ATH_DBG_FATAL,
2370 "Failed to allocate beacon descriptors: %d\n", error);
2374 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2376 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2377 error = ath_tx_edma_init(sc);
2389 void ath_tx_cleanup(struct ath_softc *sc)
2391 if (sc->beacon.bdma.dd_desc_len != 0)
2392 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2394 if (sc->tx.txdma.dd_desc_len != 0)
2395 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2397 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2398 ath_tx_edma_cleanup(sc);
2401 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2403 struct ath_atx_tid *tid;
2404 struct ath_atx_ac *ac;
2407 for (tidno = 0, tid = &an->tid[tidno];
2408 tidno < WME_NUM_TID;
2412 tid->seq_start = tid->seq_next = 0;
2413 tid->baw_size = WME_MAX_BA;
2414 tid->baw_head = tid->baw_tail = 0;
2416 tid->paused = false;
2417 tid->state &= ~AGGR_CLEANUP;
2418 INIT_LIST_HEAD(&tid->buf_q);
2419 acno = TID_TO_WME_AC(tidno);
2420 tid->ac = &an->ac[acno];
2421 tid->state &= ~AGGR_ADDBA_COMPLETE;
2422 tid->state &= ~AGGR_ADDBA_PROGRESS;
2425 for (acno = 0, ac = &an->ac[acno];
2426 acno < WME_NUM_AC; acno++, ac++) {
2428 INIT_LIST_HEAD(&ac->tid_q);
2432 ac->qnum = ath_tx_get_qnum(sc,
2433 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2436 ac->qnum = ath_tx_get_qnum(sc,
2437 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2440 ac->qnum = ath_tx_get_qnum(sc,
2441 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2444 ac->qnum = ath_tx_get_qnum(sc,
2445 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2451 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2454 struct ath_atx_ac *ac, *ac_tmp;
2455 struct ath_atx_tid *tid, *tid_tmp;
2456 struct ath_txq *txq;
2458 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2459 if (ATH_TXQ_SETUP(sc, i)) {
2460 txq = &sc->tx.txq[i];
2462 spin_lock_bh(&txq->axq_lock);
2464 list_for_each_entry_safe(ac,
2465 ac_tmp, &txq->axq_acq, list) {
2466 tid = list_first_entry(&ac->tid_q,
2467 struct ath_atx_tid, list);
2468 if (tid && tid->an != an)
2470 list_del(&ac->list);
2473 list_for_each_entry_safe(tid,
2474 tid_tmp, &ac->tid_q, list) {
2475 list_del(&tid->list);
2477 ath_tid_drain(sc, txq, tid);
2478 tid->state &= ~AGGR_ADDBA_COMPLETE;
2479 tid->state &= ~AGGR_CLEANUP;
2483 spin_unlock_bh(&txq->axq_lock);