2 * Copyright (c) 2008 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * Implementation of receive path.
24 * Setup and link descriptors.
26 * 11N: we can no longer afford to self link the last descriptor.
27 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked.
31 * NOTE: Caller should hold the rxbuf lock.
34 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
36 struct ath_hal *ah = sc->sc_ah;
43 ds->ds_link = 0; /* link to null */
44 ds->ds_data = bf->bf_buf_addr;
47 * virtual addr of the beginning of the buffer. */
50 ds->ds_vdata = skb->data;
52 /* setup rx descriptors */
53 ath9k_hw_setuprxdesc(ah,
55 skb_tailroom(skb), /* buffer size */
58 if (sc->sc_rxlink == NULL)
59 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
61 *sc->sc_rxlink = bf->bf_daddr;
63 sc->sc_rxlink = &ds->ds_link;
67 static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
74 * Cache-line-align. This is important (for the
75 * 5210 at least) as not doing so causes bogus data
79 skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
81 off = ((unsigned long) skb->data) % sc->sc_cachelsz;
83 skb_reserve(skb, sc->sc_cachelsz - off);
85 DPRINTF(sc, ATH_DBG_FATAL,
86 "%s: skbuff alloc of size %u failed\n",
94 static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb)
96 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
100 spin_lock_bh(&sc->sc_rxbuflock);
101 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
103 * This buffer is still held for hw acess.
104 * Mark it as free to be re-queued it later.
106 bf->bf_status |= ATH_BUFSTATUS_FREE;
108 /* XXX: we probably never enter here, remove after
110 list_add_tail(&bf->list, &sc->sc_rxbuf);
111 ath_rx_buf_link(sc, bf);
113 spin_unlock_bh(&sc->sc_rxbuflock);
117 * The skb indicated to upper stack won't be returned to us.
118 * So we have to allocate a new one and queue it by ourselves.
120 static int ath_rx_indicate(struct ath_softc *sc,
122 struct ath_recv_status *status,
125 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf;
126 struct sk_buff *nskb;
129 /* indicate frame to the stack, which will free the old skb. */
130 type = _ath_rx_indicate(sc, skb, status, keyix);
132 /* allocate a new skb and queue it to for H/W processing */
133 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
136 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data,
137 skb_end_pointer(nskb) - nskb->head,
139 bf->bf_dmacontext = bf->bf_buf_addr;
140 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
142 /* queue the new wbuf to H/W */
143 ath_rx_requeue(sc, nskb);
149 static void ath_opmode_init(struct ath_softc *sc)
151 struct ath_hal *ah = sc->sc_ah;
154 /* configure rx filter */
155 rfilt = ath_calcrxfilter(sc);
156 ath9k_hw_setrxfilter(ah, rfilt);
158 /* configure bssid mask */
159 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
160 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
162 /* configure operational mode */
163 ath9k_hw_setopmode(ah);
165 /* Handle any link-level address change. */
166 ath9k_hw_setmac(ah, sc->sc_myaddr);
168 /* calculate and install multicast filter */
169 mfilt[0] = mfilt[1] = ~0;
171 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
172 DPRINTF(sc, ATH_DBG_CONFIG ,
173 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
174 __func__, rfilt, mfilt[0], mfilt[1]);
177 int ath_rx_init(struct ath_softc *sc, int nbufs)
184 spin_lock_init(&sc->sc_rxflushlock);
185 sc->sc_flags &= ~SC_OP_RXFLUSH;
186 spin_lock_init(&sc->sc_rxbuflock);
189 * Cisco's VPN software requires that drivers be able to
190 * receive encapsulated frames that are larger than the MTU.
191 * Since we can't be sure how large a frame we'll get, setup
192 * to handle the larges on possible.
194 sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
198 DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n",
199 __func__, sc->sc_cachelsz, sc->sc_rxbufsize);
201 /* Initialize rx descriptors */
203 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
206 DPRINTF(sc, ATH_DBG_FATAL,
207 "%s: failed to allocate rx descriptors: %d\n",
212 /* Pre-allocate a wbuf for each rx buffer */
214 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
215 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
222 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
223 skb_end_pointer(skb) - skb->head,
225 bf->bf_dmacontext = bf->bf_buf_addr;
226 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
228 sc->sc_rxlink = NULL;
238 /* Reclaim all rx queue resources */
240 void ath_rx_cleanup(struct ath_softc *sc)
245 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
251 /* cleanup rx descriptors */
253 if (sc->sc_rxdma.dd_desc_len != 0)
254 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
258 * Calculate the receive filter according to the
259 * operating mode and state:
261 * o always accept unicast, broadcast, and multicast traffic
262 * o maintain current state of phy error reception (the hal
263 * may enable phy error frames for noise immunity work)
264 * o probe request frames are accepted only when operating in
265 * hostap, adhoc, or monitor modes
266 * o enable promiscuous mode according to the interface state
268 * - when operating in adhoc mode so the 802.11 layer creates
269 * node table entries for peers,
270 * - when operating in station mode for collecting rssi data when
271 * the station is otherwise quiet, or
272 * - when operating as a repeater so we see repeater-sta beacons
276 u32 ath_calcrxfilter(struct ath_softc *sc)
278 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
282 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
283 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
284 | ATH9K_RX_FILTER_MCAST;
286 /* If not a STA, enable processing of Probe Requests */
287 if (sc->sc_ah->ah_opmode != ATH9K_M_STA)
288 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
290 /* Can't set HOSTAP into promiscous mode */
291 if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) &&
292 (sc->rx_filter & FIF_PROMISC_IN_BSS)) ||
293 (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) {
294 rfilt |= ATH9K_RX_FILTER_PROM;
295 /* ??? To prevent from sending ACK */
296 rfilt &= ~ATH9K_RX_FILTER_UCAST;
299 if (sc->sc_ah->ah_opmode == ATH9K_M_STA ||
300 sc->sc_ah->ah_opmode == ATH9K_M_IBSS)
301 rfilt |= ATH9K_RX_FILTER_BEACON;
303 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
305 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP)
306 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
309 #undef RX_FILTER_PRESERVE
312 /* Enable the receive h/w following a reset. */
314 int ath_startrecv(struct ath_softc *sc)
316 struct ath_hal *ah = sc->sc_ah;
317 struct ath_buf *bf, *tbf;
319 spin_lock_bh(&sc->sc_rxbuflock);
320 if (list_empty(&sc->sc_rxbuf))
323 sc->sc_rxlink = NULL;
324 list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
325 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
326 /* restarting h/w, no need for holding descriptors */
327 bf->bf_status &= ~ATH_BUFSTATUS_STALE;
329 * Upper layer may not be done with the frame yet so
330 * we can't just re-queue it to hardware. Remove it
331 * from h/w queue. It'll be re-queued when upper layer
332 * returns the frame and ath_rx_requeue_mpdu is called.
334 if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) {
339 /* chain descriptors */
340 ath_rx_buf_link(sc, bf);
343 /* We could have deleted elements so the list may be empty now */
344 if (list_empty(&sc->sc_rxbuf))
347 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
348 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
349 ath9k_hw_rxena(ah); /* enable recv descriptors */
352 spin_unlock_bh(&sc->sc_rxbuflock);
353 ath_opmode_init(sc); /* set filters, etc. */
354 ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */
358 /* Disable the receive h/w in preparation for a reset. */
360 bool ath_stoprecv(struct ath_softc *sc)
362 struct ath_hal *ah = sc->sc_ah;
366 ath9k_hw_stoppcurecv(ah); /* disable PCU */
367 ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */
368 stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */
369 mdelay(3); /* 3ms is long enough for 1 frame */
370 tsf = ath9k_hw_gettsf64(ah);
371 sc->sc_rxlink = NULL; /* just in case */
375 /* Flush receive queue */
377 void ath_flushrecv(struct ath_softc *sc)
380 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
381 * queue at the same time. Use a lock to serialize the access of rx
383 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
384 * Instead, do not claim the spinlock but check for a flush in
385 * progress (see references to sc_rxflush)
387 spin_lock_bh(&sc->sc_rxflushlock);
388 sc->sc_flags |= SC_OP_RXFLUSH;
390 ath_rx_tasklet(sc, 1);
392 sc->sc_flags &= ~SC_OP_RXFLUSH;
393 spin_unlock_bh(&sc->sc_rxflushlock);
396 /* Process receive queue, as well as LED, etc. */
398 int ath_rx_tasklet(struct ath_softc *sc, int flush)
400 #define PA2DESC(_sc, _pa) \
401 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
402 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
404 struct ath_buf *bf, *bf_held = NULL;
406 struct ieee80211_hdr *hdr;
407 struct sk_buff *skb = NULL;
408 struct ath_recv_status rx_status;
409 struct ath_hal *ah = sc->sc_ah;
410 int type, rx_processed = 0;
417 /* If handling rx interrupt and flush is in progress => exit */
418 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
421 spin_lock_bh(&sc->sc_rxbuflock);
422 if (list_empty(&sc->sc_rxbuf)) {
423 sc->sc_rxlink = NULL;
424 spin_unlock_bh(&sc->sc_rxbuflock);
428 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
431 * There is a race condition that BH gets scheduled after sw
432 * writes RxE and before hw re-load the last descriptor to get
433 * the newly chained one. Software must keep the last DONE
434 * descriptor as a holding descriptor - software does so by
435 * marking it with the STALE flag.
437 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
439 if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) {
441 * The holding descriptor is the last
442 * descriptor in queue. It's safe to
443 * remove the last holding descriptor
446 list_del(&bf_held->list);
447 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
448 sc->sc_rxlink = NULL;
450 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
451 list_add_tail(&bf_held->list,
453 ath_rx_buf_link(sc, bf_held);
455 spin_unlock_bh(&sc->sc_rxbuflock);
458 bf = list_entry(bf->list.next, struct ath_buf, list);
465 * Must provide the virtual address of the current
466 * descriptor, the physical address, and the virtual
467 * address of the next descriptor in the h/w chain.
468 * This allows the HAL to look ahead to see if the
469 * hardware is done with a descriptor by checking the
470 * done bit in the following descriptor and the address
471 * of the current descriptor the DMA engine is working
472 * on. All this is necessary because of our use of
473 * a self-linked list to avoid rx overruns.
475 retval = ath9k_hw_rxprocdesc(ah,
478 PA2DESC(sc, ds->ds_link),
480 if (retval == -EINPROGRESS) {
482 struct ath_desc *tds;
484 if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
485 spin_unlock_bh(&sc->sc_rxbuflock);
489 tbf = list_entry(bf->list.next, struct ath_buf, list);
492 * On some hardware the descriptor status words could
493 * get corrupted, including the done bit. Because of
494 * this, check if the next descriptor's done bit is
497 * If the next descriptor's done bit is set, the current
498 * descriptor has been corrupted. Force s/w to discard
499 * this descriptor and continue...
503 retval = ath9k_hw_rxprocdesc(ah,
505 PA2DESC(sc, tds->ds_link), 0);
506 if (retval == -EINPROGRESS) {
507 spin_unlock_bh(&sc->sc_rxbuflock);
512 /* XXX: we do not support frames spanning
513 * multiple descriptors */
514 bf->bf_status |= ATH_BUFSTATUS_DONE;
517 if (skb == NULL) { /* XXX ??? can this happen */
518 spin_unlock_bh(&sc->sc_rxbuflock);
522 * Now we know it's a completed frame, we can indicate the
523 * frame. Remove the previous holding descriptor and leave
524 * this one in the queue as the new holding descriptor.
527 list_del(&bf_held->list);
528 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
529 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
530 list_add_tail(&bf_held->list, &sc->sc_rxbuf);
531 /* try to requeue this descriptor */
532 ath_rx_buf_link(sc, bf_held);
536 bf->bf_status |= ATH_BUFSTATUS_STALE;
539 * Release the lock here in case ieee80211_input() return
540 * the frame immediately by calling ath_rx_mpdu_requeue().
542 spin_unlock_bh(&sc->sc_rxbuflock);
546 * If we're asked to flush receive queue, directly
547 * chain it back at the queue without processing it.
552 hdr = (struct ieee80211_hdr *)skb->data;
553 fc = hdr->frame_control;
554 memset(&rx_status, 0, sizeof(struct ath_recv_status));
556 if (ds->ds_rxstat.rs_more) {
558 * Frame spans multiple descriptors; this
559 * cannot happen yet as we don't support
560 * jumbograms. If not in monitor mode,
565 * Enable this if you want to see
566 * error frames in Monitor mode.
568 if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR)
571 /* fall thru for monitor mode handling... */
572 } else if (ds->ds_rxstat.rs_status != 0) {
573 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
574 rx_status.flags |= ATH_RX_FCS_ERROR;
575 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
576 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
580 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
582 * Decrypt error. We only mark packet status
583 * here and always push up the frame up to let
584 * mac80211 handle the actual error case, be
585 * it no decryption key or real decryption
586 * error. This let us keep statistics there.
588 rx_status.flags |= ATH_RX_DECRYPT_ERROR;
589 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
591 * Demic error. We only mark frame status here
592 * and always push up the frame up to let
593 * mac80211 handle the actual error case. This
594 * let us keep statistics there. Hardware may
595 * post a false-positive MIC error.
597 if (ieee80211_is_ctl(fc))
599 * Sometimes, we get invalid
600 * MIC failures on valid control frames.
601 * Remove these mic errors.
603 ds->ds_rxstat.rs_status &=
606 rx_status.flags |= ATH_RX_MIC_ERROR;
609 * Reject error frames with the exception of
610 * decryption and MIC failures. For monitor mode,
611 * we also ignore the CRC error.
613 if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) {
614 if (ds->ds_rxstat.rs_status &
615 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
619 if (ds->ds_rxstat.rs_status &
620 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
626 * The status portion of the descriptor could get corrupted.
628 if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
631 * Sync and unmap the frame. At this point we're
632 * committed to passing the sk_buff somewhere so
633 * clear buf_skb; this means a new sk_buff must be
634 * allocated when the rx descriptor is setup again
635 * to receive another frame.
637 skb_put(skb, ds->ds_rxstat.rs_datalen);
638 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
639 rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
641 sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate;
643 sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps;
644 rx_status.ratecode = ds->ds_rxstat.rs_rate;
647 if (rx_status.ratecode & 0x80) {
648 /* TODO - add table to avoid division */
649 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
650 rx_status.flags |= ATH_RX_40MHZ;
652 (rx_status.rateKbps * 27) / 13;
654 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
656 (rx_status.rateKbps * 10) / 9;
658 rx_status.flags |= ATH_RX_SHORT_GI;
661 /* sc_noise_floor is only available when the station
662 attaches to an AP, so we use a default value
663 if we are not yet attached. */
665 ds->ds_rxstat.rs_rssi + sc->sc_ani.sc_noise_floor;
667 pci_dma_sync_single_for_cpu(sc->pdev,
671 pci_unmap_single(sc->pdev,
676 /* XXX: Ah! make me more readable, use a helper */
677 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
678 if (ds->ds_rxstat.rs_moreaggr == 0) {
679 rx_status.rssictl[0] =
680 ds->ds_rxstat.rs_rssi_ctl0;
681 rx_status.rssictl[1] =
682 ds->ds_rxstat.rs_rssi_ctl1;
683 rx_status.rssictl[2] =
684 ds->ds_rxstat.rs_rssi_ctl2;
685 rx_status.rssi = ds->ds_rxstat.rs_rssi;
686 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
687 rx_status.rssiextn[0] =
688 ds->ds_rxstat.rs_rssi_ext0;
689 rx_status.rssiextn[1] =
690 ds->ds_rxstat.rs_rssi_ext1;
691 rx_status.rssiextn[2] =
692 ds->ds_rxstat.rs_rssi_ext2;
694 ATH_RX_RSSI_EXTN_VALID;
696 rx_status.flags |= ATH_RX_RSSI_VALID |
697 ATH_RX_CHAIN_RSSI_VALID;
701 * Need to insert the "combined" rssi into the
702 * status structure for upper layer processing
704 rx_status.rssi = ds->ds_rxstat.rs_rssi;
705 rx_status.flags |= ATH_RX_RSSI_VALID;
708 /* Pass frames up to the stack. */
710 type = ath_rx_indicate(sc, skb,
711 &rx_status, ds->ds_rxstat.rs_keyix);
714 * change the default rx antenna if rx diversity chooses the
715 * other antenna 3 times in a row.
717 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
718 if (++sc->sc_rxotherant >= 3)
719 ath_setdefantenna(sc,
720 ds->ds_rxstat.rs_antenna);
722 sc->sc_rxotherant = 0;
725 #ifdef CONFIG_SLOW_ANT_DIV
726 if ((rx_status.flags & ATH_RX_RSSI_VALID) &&
727 ieee80211_is_beacon(fc)) {
728 ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat);
732 * For frames successfully indicated, the buffer will be
733 * returned to us by upper layers by calling
734 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
735 * So we don't want to do it here in this loop.
740 bf->bf_status |= ATH_BUFSTATUS_FREE;
744 DPRINTF(sc, ATH_DBG_CONFIG,
745 "%s: Reset rx chain mask. "
746 "Do internal reset\n", __func__);
748 ath_reset(sc, false);