2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 * tid - tid_mux0..tid_mux3
26 * aid - tid_mux4..tid_mux7
28 #define ATH6KL_TID_MASK 0xf
29 #define ATH6KL_AID_SHIFT 4
31 static inline u8 ath6kl_get_tid(u8 tid_mux)
33 return tid_mux & ATH6KL_TID_MASK;
36 static inline u8 ath6kl_get_aid(u8 tid_mux)
38 return tid_mux >> ATH6KL_AID_SHIFT;
41 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
44 struct ath6kl *ar = ath6kl_priv(dev);
45 struct ethhdr *eth_hdr;
51 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
53 if (is_multicast_ether_addr(eth_hdr->h_dest))
56 for (i = 0; i < ar->node_num; i++) {
57 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
60 ar->node_map[i].tx_pend++;
61 return ar->node_map[i].ep_id;
64 if ((ep_map == -1) && !ar->node_map[i].tx_pend)
69 ep_map = ar->node_num;
71 if (ar->node_num > MAX_NODE_NUM)
72 return ENDPOINT_UNUSED;
75 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
77 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
78 if (!ar->tx_pending[i]) {
79 ar->node_map[ep_map].ep_id = i;
84 * No free endpoint is available, start redistribution on
85 * the inuse endpoints.
87 if (i == ENDPOINT_5) {
88 ar->node_map[ep_map].ep_id = ar->next_ep_id;
90 if (ar->next_ep_id > ENDPOINT_5)
91 ar->next_ep_id = ENDPOINT_2;
96 ar->node_map[ep_map].tx_pend++;
98 return ar->node_map[ep_map].ep_id;
101 static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
102 struct ath6kl_vif *vif,
106 struct ath6kl *ar = vif->ar;
107 bool is_apsdq_empty = false;
108 struct ethhdr *datap = (struct ethhdr *) skb->data;
109 u8 up = 0, traffic_class, *ip_hdr;
111 struct ath6kl_llc_snap_hdr *llc_hdr;
113 if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
115 * This tx is because of a uAPSD trigger, determine
116 * more and EOSP bit. Set EOSP if queue is empty
117 * or sufficient frames are delivered for this trigger.
119 spin_lock_bh(&conn->psq_lock);
120 if (!skb_queue_empty(&conn->apsdq))
121 *flags |= WMI_DATA_HDR_FLAGS_MORE;
122 else if (conn->sta_flags & STA_PS_APSD_EOSP)
123 *flags |= WMI_DATA_HDR_FLAGS_EOSP;
124 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
125 spin_unlock_bh(&conn->psq_lock);
127 } else if (!conn->apsd_info)
130 if (test_bit(WMM_ENABLED, &vif->flags)) {
131 ether_type = be16_to_cpu(datap->h_proto);
132 if (is_ethertype(ether_type)) {
133 /* packet is in DIX format */
134 ip_hdr = (u8 *)(datap + 1);
136 /* packet is in 802.3 format */
137 llc_hdr = (struct ath6kl_llc_snap_hdr *)
139 ether_type = be16_to_cpu(llc_hdr->eth_type);
140 ip_hdr = (u8 *)(llc_hdr + 1);
143 if (ether_type == IP_ETHERTYPE)
144 up = ath6kl_wmi_determine_user_priority(
148 traffic_class = ath6kl_wmi_get_traffic_class(up);
150 if ((conn->apsd_info & (1 << traffic_class)) == 0)
153 /* Queue the frames if the STA is sleeping */
154 spin_lock_bh(&conn->psq_lock);
155 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
156 skb_queue_tail(&conn->apsdq, skb);
157 spin_unlock_bh(&conn->psq_lock);
160 * If this is the first pkt getting queued
161 * for this STA, update the PVB for this STA
163 if (is_apsdq_empty) {
164 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
168 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
173 static bool ath6kl_process_psq(struct ath6kl_sta *conn,
174 struct ath6kl_vif *vif,
178 bool is_psq_empty = false;
179 struct ath6kl *ar = vif->ar;
181 if (conn->sta_flags & STA_PS_POLLED) {
182 spin_lock_bh(&conn->psq_lock);
183 if (!skb_queue_empty(&conn->psq))
184 *flags |= WMI_DATA_HDR_FLAGS_MORE;
185 spin_unlock_bh(&conn->psq_lock);
189 /* Queue the frames if the STA is sleeping */
190 spin_lock_bh(&conn->psq_lock);
191 is_psq_empty = skb_queue_empty(&conn->psq);
192 skb_queue_tail(&conn->psq, skb);
193 spin_unlock_bh(&conn->psq_lock);
196 * If this is the first pkt getting queued
197 * for this STA, update the PVB for this
201 ath6kl_wmi_set_pvb_cmd(ar->wmi,
207 static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
210 struct ethhdr *datap = (struct ethhdr *) skb->data;
211 struct ath6kl_sta *conn = NULL;
212 bool ps_queued = false;
213 struct ath6kl *ar = vif->ar;
215 if (is_multicast_ether_addr(datap->h_dest)) {
217 bool q_mcast = false;
219 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
220 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
228 * If this transmit is not because of a Dtim Expiry
231 if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
232 bool is_mcastq_empty = false;
234 spin_lock_bh(&ar->mcastpsq_lock);
236 skb_queue_empty(&ar->mcastpsq);
237 skb_queue_tail(&ar->mcastpsq, skb);
238 spin_unlock_bh(&ar->mcastpsq_lock);
241 * If this is the first Mcast pkt getting
242 * queued indicate to the target to set the
243 * BitmapControl LSB of the TIM IE.
246 ath6kl_wmi_set_pvb_cmd(ar->wmi,
253 * This transmit is because of Dtim expiry.
254 * Determine if MoreData bit has to be set.
256 spin_lock_bh(&ar->mcastpsq_lock);
257 if (!skb_queue_empty(&ar->mcastpsq))
258 *flags |= WMI_DATA_HDR_FLAGS_MORE;
259 spin_unlock_bh(&ar->mcastpsq_lock);
263 conn = ath6kl_find_sta(vif, datap->h_dest);
267 /* Inform the caller that the skb is consumed */
271 if (conn->sta_flags & STA_PS_SLEEP) {
272 ps_queued = ath6kl_process_uapsdq(conn,
274 if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
275 ps_queued = ath6kl_process_psq(conn,
284 int ath6kl_control_tx(void *devt, struct sk_buff *skb,
285 enum htc_endpoint_id eid)
287 struct ath6kl *ar = devt;
289 struct ath6kl_cookie *cookie = NULL;
291 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
296 if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED ||
297 eid >= ENDPOINT_MAX)) {
302 spin_lock_bh(&ar->lock);
304 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
305 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
308 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
310 * Control endpoint is full, don't allocate resources, we
311 * are just going to drop this packet.
314 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
317 cookie = ath6kl_alloc_cookie(ar);
319 if (cookie == NULL) {
320 spin_unlock_bh(&ar->lock);
325 ar->tx_pending[eid]++;
327 if (eid != ar->ctrl_ep)
328 ar->total_tx_data_pend++;
330 spin_unlock_bh(&ar->lock);
334 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
335 eid, ATH6KL_CONTROL_PKT_TAG);
336 cookie->htc_pkt.skb = skb;
339 * This interface is asynchronous, if there is an error, cleanup
340 * will happen in the TX completion callback.
342 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
351 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
353 struct ath6kl *ar = ath6kl_priv(dev);
354 struct ath6kl_cookie *cookie = NULL;
355 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
356 struct ath6kl_vif *vif = netdev_priv(dev);
358 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
359 u8 ac = 99 ; /* initialize to unmapped ac */
360 bool chk_adhoc_ps_mapping = false;
362 struct wmi_tx_meta_v2 meta_v2;
364 u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
368 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
369 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
370 skb, skb->data, skb->len);
372 /* If target is not associated */
373 if (!test_bit(CONNECTED, &vif->flags))
376 if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
379 if (!test_bit(WMI_READY, &ar->flag))
382 /* AP mode Power saving processing */
383 if (vif->nw_type == AP_NETWORK) {
384 if (ath6kl_powersave_ap(vif, skb, &flags))
388 if (test_bit(WMI_ENABLED, &ar->flag)) {
389 if ((dev->features & NETIF_F_IP_CSUM) &&
390 (csum == CHECKSUM_PARTIAL)) {
391 csum_start = skb->csum_start -
392 (skb_network_header(skb) - skb->head) +
393 sizeof(struct ath6kl_llc_snap_hdr);
394 csum_dest = skb->csum_offset + csum_start;
397 if (skb_headroom(skb) < dev->needed_headroom) {
398 struct sk_buff *tmp_skb = skb;
400 skb = skb_realloc_headroom(skb, dev->needed_headroom);
403 vif->net_stats.tx_dropped++;
408 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
409 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
413 if ((dev->features & NETIF_F_IP_CSUM) &&
414 (csum == CHECKSUM_PARTIAL)) {
415 meta_v2.csum_start = csum_start;
416 meta_v2.csum_dest = csum_dest;
418 /* instruct target to calculate checksum */
419 meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
420 meta_ver = WMI_META_VERSION_2;
427 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
428 DATA_MSGTYPE, flags, 0,
430 meta, vif->fw_vif_idx);
433 ath6kl_warn("failed to add wmi data header:%d\n"
438 if ((vif->nw_type == ADHOC_NETWORK) &&
439 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
440 chk_adhoc_ps_mapping = true;
442 /* get the stream mapping */
443 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
444 vif->fw_vif_idx, skb,
445 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
452 spin_lock_bh(&ar->lock);
454 if (chk_adhoc_ps_mapping)
455 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
457 eid = ar->ac2ep_map[ac];
459 if (eid == 0 || eid == ENDPOINT_UNUSED) {
460 ath6kl_err("eid %d is not mapped!\n", eid);
461 spin_unlock_bh(&ar->lock);
465 /* allocate resource for this packet */
466 cookie = ath6kl_alloc_cookie(ar);
469 spin_unlock_bh(&ar->lock);
473 /* update counts while the lock is held */
474 ar->tx_pending[eid]++;
475 ar->total_tx_data_pend++;
477 spin_unlock_bh(&ar->lock);
479 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
482 * We will touch (move the buffer data to align it. Since the
483 * skb buffer is cloned and not only the header is changed, we
484 * have to copy it to allow the changes. Since we are copying
485 * the data here, we may as well align it by reserving suitable
486 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
488 struct sk_buff *nskb;
490 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
498 cookie->map_no = map_no;
499 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
501 cookie->htc_pkt.skb = skb;
503 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
504 skb->data, skb->len);
507 * HTC interface is asynchronous, if this fails, cleanup will
508 * happen in the ath6kl_tx_complete callback.
510 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
517 vif->net_stats.tx_dropped++;
518 vif->net_stats.tx_aborted_errors++;
523 /* indicate tx activity or inactivity on a WMI stream */
524 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
526 struct ath6kl *ar = devt;
527 enum htc_endpoint_id eid;
530 eid = ar->ac2ep_map[traffic_class];
532 if (!test_bit(WMI_ENABLED, &ar->flag))
535 spin_lock_bh(&ar->lock);
537 ar->ac_stream_active[traffic_class] = active;
541 * Keep track of the active stream with the highest
544 if (ar->ac_stream_pri_map[traffic_class] >
545 ar->hiac_stream_active_pri)
546 /* set the new highest active priority */
547 ar->hiac_stream_active_pri =
548 ar->ac_stream_pri_map[traffic_class];
552 * We may have to search for the next active stream
553 * that is the highest priority.
555 if (ar->hiac_stream_active_pri ==
556 ar->ac_stream_pri_map[traffic_class]) {
558 * The highest priority stream just went inactive
559 * reset and search for the "next" highest "active"
562 ar->hiac_stream_active_pri = 0;
564 for (i = 0; i < WMM_NUM_AC; i++) {
565 if (ar->ac_stream_active[i] &&
566 (ar->ac_stream_pri_map[i] >
567 ar->hiac_stream_active_pri))
569 * Set the new highest active
572 ar->hiac_stream_active_pri =
573 ar->ac_stream_pri_map[i];
578 spin_unlock_bh(&ar->lock);
581 /* notify HTC, this may cause credit distribution changes */
582 ath6kl_htc_activity_changed(ar->htc_target, eid, active);
585 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
586 struct htc_packet *packet)
588 struct ath6kl *ar = target->dev->ar;
589 struct ath6kl_vif *vif;
590 enum htc_endpoint_id endpoint = packet->endpoint;
591 enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
593 if (endpoint == ar->ctrl_ep) {
595 * Under normal WMI if this is getting full, then something
596 * is running rampant the host should not be exhausting the
597 * WMI queue with too many commands the only exception to
598 * this is during testing using endpointping.
600 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
601 ath6kl_err("wmi ctrl ep is full\n");
602 ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL);
606 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
610 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
611 * the highest active stream.
613 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
614 ar->hiac_stream_active_pri &&
616 target->endpoint[endpoint].tx_drop_packet_threshold)
618 * Give preference to the highest priority stream by
619 * dropping the packets which overflowed.
621 action = HTC_SEND_FULL_DROP;
624 spin_lock_bh(&ar->list_lock);
625 list_for_each_entry(vif, &ar->vif_list, list) {
626 if (vif->nw_type == ADHOC_NETWORK ||
627 action != HTC_SEND_FULL_DROP) {
628 spin_unlock_bh(&ar->list_lock);
630 set_bit(NETQ_STOPPED, &vif->flags);
631 netif_stop_queue(vif->ndev);
636 spin_unlock_bh(&ar->list_lock);
641 /* TODO this needs to be looked at */
642 static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
643 enum htc_endpoint_id eid, u32 map_no)
645 struct ath6kl *ar = vif->ar;
648 if (vif->nw_type != ADHOC_NETWORK)
651 if (!ar->ibss_ps_enable)
654 if (eid == ar->ctrl_ep)
661 ar->node_map[map_no].tx_pend--;
663 if (ar->node_map[map_no].tx_pend)
666 if (map_no != (ar->node_num - 1))
669 for (i = ar->node_num; i > 0; i--) {
670 if (ar->node_map[i - 1].tx_pend)
673 memset(&ar->node_map[i - 1], 0,
674 sizeof(struct ath6kl_node_mapping));
679 void ath6kl_tx_complete(struct htc_target *target,
680 struct list_head *packet_queue)
682 struct ath6kl *ar = target->dev->ar;
683 struct sk_buff_head skb_queue;
684 struct htc_packet *packet;
686 struct ath6kl_cookie *ath6kl_cookie;
689 enum htc_endpoint_id eid;
690 bool wake_event = false;
691 bool flushing[ATH6KL_VIF_MAX] = {false};
693 struct ath6kl_vif *vif;
695 skb_queue_head_init(&skb_queue);
697 /* lock the driver as we update internal state */
698 spin_lock_bh(&ar->lock);
700 /* reap completed packets */
701 while (!list_empty(packet_queue)) {
703 packet = list_first_entry(packet_queue, struct htc_packet,
705 list_del(&packet->list);
707 if (WARN_ON_ONCE(packet->endpoint == ENDPOINT_UNUSED ||
708 packet->endpoint >= ENDPOINT_MAX))
711 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
712 if (WARN_ON_ONCE(!ath6kl_cookie))
715 status = packet->status;
716 skb = ath6kl_cookie->skb;
717 eid = packet->endpoint;
718 map_no = ath6kl_cookie->map_no;
720 if (WARN_ON_ONCE(!skb || !skb->data)) {
722 ath6kl_free_cookie(ar, ath6kl_cookie);
726 __skb_queue_tail(&skb_queue, skb);
728 if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
729 ath6kl_free_cookie(ar, ath6kl_cookie);
733 ar->tx_pending[eid]--;
735 if (eid != ar->ctrl_ep)
736 ar->total_tx_data_pend--;
738 if (eid == ar->ctrl_ep) {
739 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
740 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
742 if (ar->tx_pending[eid] == 0)
746 if (eid == ar->ctrl_ep) {
747 if_idx = wmi_cmd_hdr_get_if_idx(
748 (struct wmi_cmd_hdr *) packet->buf);
750 if_idx = wmi_data_hdr_get_if_idx(
751 (struct wmi_data_hdr *) packet->buf);
754 vif = ath6kl_get_vif_by_index(ar, if_idx);
756 ath6kl_free_cookie(ar, ath6kl_cookie);
761 if (status == -ECANCELED)
762 /* a packet was flushed */
763 flushing[if_idx] = true;
765 vif->net_stats.tx_errors++;
767 if (status != -ENOSPC && status != -ECANCELED)
768 ath6kl_warn("tx complete error: %d\n", status);
770 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
771 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
772 __func__, skb, packet->buf, packet->act_len,
775 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
776 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
777 __func__, skb, packet->buf, packet->act_len,
780 flushing[if_idx] = false;
781 vif->net_stats.tx_packets++;
782 vif->net_stats.tx_bytes += skb->len;
785 ath6kl_tx_clear_node_map(vif, eid, map_no);
787 ath6kl_free_cookie(ar, ath6kl_cookie);
789 if (test_bit(NETQ_STOPPED, &vif->flags))
790 clear_bit(NETQ_STOPPED, &vif->flags);
793 spin_unlock_bh(&ar->lock);
795 __skb_queue_purge(&skb_queue);
798 spin_lock_bh(&ar->list_lock);
799 list_for_each_entry(vif, &ar->vif_list, list) {
800 if (test_bit(CONNECTED, &vif->flags) &&
801 !flushing[vif->fw_vif_idx]) {
802 spin_unlock_bh(&ar->list_lock);
803 netif_wake_queue(vif->ndev);
804 spin_lock_bh(&ar->list_lock);
807 spin_unlock_bh(&ar->list_lock);
810 wake_up(&ar->event_wq);
815 void ath6kl_tx_data_cleanup(struct ath6kl *ar)
819 /* flush all the data (non-control) streams */
820 for (i = 0; i < WMM_NUM_AC; i++)
821 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
822 ATH6KL_DATA_PKT_TAG);
827 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
835 if (!(skb->dev->flags & IFF_UP)) {
840 skb->protocol = eth_type_trans(skb, skb->dev);
845 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
850 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
852 ath6kl_err("netbuf allocation failed\n");
855 skb_queue_tail(q, skb);
860 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
862 struct sk_buff *skb = NULL;
864 if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
865 (AGGR_NUM_OF_FREE_NETBUFS >> 2))
866 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
867 AGGR_NUM_OF_FREE_NETBUFS);
869 skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
874 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
876 struct ath6kl *ar = target->dev->ar;
880 struct htc_packet *packet;
881 struct list_head queue;
883 n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
884 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
886 if (n_buf_refill <= 0)
889 INIT_LIST_HEAD(&queue);
891 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
892 "%s: providing htc with %d buffers at eid=%d\n",
893 __func__, n_buf_refill, endpoint);
895 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
896 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
900 packet = (struct htc_packet *) skb->head;
901 if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
902 size_t len = skb_headlen(skb);
903 skb->data = PTR_ALIGN(skb->data - 4, 4);
904 skb_set_tail_pointer(skb, len);
906 set_htc_rxpkt_info(packet, skb, skb->data,
907 ATH6KL_BUFFER_SIZE, endpoint);
909 list_add_tail(&packet->list, &queue);
912 if (!list_empty(&queue))
913 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
916 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
918 struct htc_packet *packet;
922 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
926 packet = (struct htc_packet *) skb->head;
927 if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
928 size_t len = skb_headlen(skb);
929 skb->data = PTR_ALIGN(skb->data - 4, 4);
930 skb_set_tail_pointer(skb, len);
932 set_htc_rxpkt_info(packet, skb, skb->data,
933 ATH6KL_AMSDU_BUFFER_SIZE, 0);
936 spin_lock_bh(&ar->lock);
937 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
938 spin_unlock_bh(&ar->lock);
944 * Callback to allocate a receive buffer for a pending packet. We use a
945 * pre-allocated list of buffers of maximum AMSDU size (4K).
947 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
948 enum htc_endpoint_id endpoint,
951 struct ath6kl *ar = target->dev->ar;
952 struct htc_packet *packet = NULL;
953 struct list_head *pkt_pos;
954 int refill_cnt = 0, depth = 0;
956 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
957 __func__, endpoint, len);
959 if ((len <= ATH6KL_BUFFER_SIZE) ||
960 (len > ATH6KL_AMSDU_BUFFER_SIZE))
963 spin_lock_bh(&ar->lock);
965 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
966 spin_unlock_bh(&ar->lock);
967 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
971 packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
972 struct htc_packet, list);
973 list_del(&packet->list);
974 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
977 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
978 spin_unlock_bh(&ar->lock);
980 /* set actual endpoint ID */
981 packet->endpoint = endpoint;
984 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
985 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
990 static void aggr_slice_amsdu(struct aggr_info *p_aggr,
991 struct rxtid *rxtid, struct sk_buff *skb)
993 struct sk_buff *new_skb;
995 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
998 mac_hdr_len = sizeof(struct ethhdr);
999 framep = skb->data + mac_hdr_len;
1000 amsdu_len = skb->len - mac_hdr_len;
1002 while (amsdu_len > mac_hdr_len) {
1003 hdr = (struct ethhdr *) framep;
1004 payload_8023_len = ntohs(hdr->h_proto);
1006 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
1007 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
1008 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
1013 frame_8023_len = payload_8023_len + mac_hdr_len;
1014 new_skb = aggr_get_free_skb(p_aggr);
1016 ath6kl_err("no buffer available\n");
1020 memcpy(new_skb->data, framep, frame_8023_len);
1021 skb_put(new_skb, frame_8023_len);
1022 if (ath6kl_wmi_dot3_2_dix(new_skb)) {
1023 ath6kl_err("dot3_2_dix error\n");
1024 dev_kfree_skb(new_skb);
1028 skb_queue_tail(&rxtid->q, new_skb);
1030 /* Is this the last subframe within this aggregate ? */
1031 if ((amsdu_len - frame_8023_len) == 0)
1034 /* Add the length of A-MSDU subframe padding bytes -
1035 * Round to nearest word.
1037 frame_8023_len = ALIGN(frame_8023_len, 4);
1039 framep += frame_8023_len;
1040 amsdu_len -= frame_8023_len;
1046 static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1047 u16 seq_no, u8 order)
1049 struct sk_buff *skb;
1050 struct rxtid *rxtid;
1051 struct skb_hold_q *node;
1052 u16 idx, idx_end, seq_end;
1053 struct rxtid_stats *stats;
1055 rxtid = &agg_conn->rx_tid[tid];
1056 stats = &agg_conn->stat[tid];
1058 spin_lock_bh(&rxtid->lock);
1059 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1062 * idx_end is typically the last possible frame in the window,
1063 * but changes to 'the' seq_no, when BAR comes. If seq_no
1064 * is non-zero, we will go up to that and stop.
1065 * Note: last seq no in current window will occupy the same
1066 * index position as index that is just previous to start.
1067 * An imp point : if win_sz is 7, for seq_no space of 4095,
1068 * then, there would be holes when sequence wrap around occurs.
1069 * Target should judiciously choose the win_sz, based on
1070 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1071 * 2, 4, 8, 16 win_sz works fine).
1072 * We must deque from "idx" to "idx_end", including both.
1074 seq_end = seq_no ? seq_no : rxtid->seq_next;
1075 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1078 node = &rxtid->hold_q[idx];
1079 if ((order == 1) && (!node->skb))
1084 aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
1087 skb_queue_tail(&rxtid->q, node->skb);
1092 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1093 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1094 } while (idx != idx_end);
1096 spin_unlock_bh(&rxtid->lock);
1098 stats->num_delivered += skb_queue_len(&rxtid->q);
1100 while ((skb = skb_dequeue(&rxtid->q)))
1101 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
1104 static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1106 bool is_amsdu, struct sk_buff *frame)
1108 struct rxtid *rxtid;
1109 struct rxtid_stats *stats;
1110 struct sk_buff *skb;
1111 struct skb_hold_q *node;
1112 u16 idx, st, cur, end;
1113 bool is_queued = false;
1116 rxtid = &agg_conn->rx_tid[tid];
1117 stats = &agg_conn->stat[tid];
1119 stats->num_into_aggr++;
1123 aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
1126 while ((skb = skb_dequeue(&rxtid->q)))
1127 ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
1133 /* Check the incoming sequence no, if it's in the window */
1134 st = rxtid->seq_next;
1136 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1138 if (((st < end) && (cur < st || cur > end)) ||
1139 ((st > end) && (cur > end) && (cur < st))) {
1140 extended_end = (end + rxtid->hold_q_sz - 1) &
1143 if (((end < extended_end) &&
1144 (cur < end || cur > extended_end)) ||
1145 ((end > extended_end) && (cur > extended_end) &&
1147 aggr_deque_frms(agg_conn, tid, 0, 0);
1148 spin_lock_bh(&rxtid->lock);
1149 if (cur >= rxtid->hold_q_sz - 1)
1150 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1152 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1153 (rxtid->hold_q_sz - 2 - cur);
1154 spin_unlock_bh(&rxtid->lock);
1157 * Dequeue only those frames that are outside the
1158 * new shifted window.
1160 if (cur >= rxtid->hold_q_sz - 1)
1161 st = cur - (rxtid->hold_q_sz - 1);
1163 st = ATH6KL_MAX_SEQ_NO -
1164 (rxtid->hold_q_sz - 2 - cur);
1166 aggr_deque_frms(agg_conn, tid, st, 0);
1172 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1174 node = &rxtid->hold_q[idx];
1176 spin_lock_bh(&rxtid->lock);
1179 * Is the cur frame duplicate or something beyond our window(hold_q
1180 * -> which is 2x, already)?
1182 * 1. Duplicate is easy - drop incoming frame.
1183 * 2. Not falling in current sliding window.
1184 * 2a. is the frame_seq_no preceding current tid_seq_no?
1185 * -> drop the frame. perhaps sender did not get our ACK.
1186 * this is taken care of above.
1187 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1188 * -> Taken care of it above, by moving window forward.
1190 dev_kfree_skb(node->skb);
1195 node->is_amsdu = is_amsdu;
1196 node->seq_no = seq_no;
1203 spin_unlock_bh(&rxtid->lock);
1205 aggr_deque_frms(agg_conn, tid, 0, 1);
1207 if (agg_conn->timer_scheduled)
1210 spin_lock_bh(&rxtid->lock);
1211 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1212 if (rxtid->hold_q[idx].skb) {
1214 * There is a frame in the queue and no
1215 * timer so start a timer to ensure that
1216 * the frame doesn't remain stuck
1219 agg_conn->timer_scheduled = true;
1220 mod_timer(&agg_conn->timer,
1221 (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
1222 rxtid->timer_mon = true;
1226 spin_unlock_bh(&rxtid->lock);
1231 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1232 struct ath6kl_sta *conn)
1234 struct ath6kl *ar = vif->ar;
1235 bool is_apsdq_empty, is_apsdq_empty_at_start;
1236 u32 num_frames_to_deliver, flags;
1237 struct sk_buff *skb = NULL;
1240 * If the APSD q for this STA is not empty, dequeue and
1241 * send a pkt from the head of the q. Also update the
1242 * More data bit in the WMI_DATA_HDR if there are
1243 * more pkts for this STA in the APSD q.
1244 * If there are no more pkts for this STA,
1245 * update the APSD bitmap for this STA.
1248 num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1249 ATH6KL_APSD_FRAME_MASK;
1251 * Number of frames to send in a service period is
1252 * indicated by the station
1253 * in the QOS_INFO of the association request
1254 * If it is zero, send all frames
1256 if (!num_frames_to_deliver)
1257 num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1259 spin_lock_bh(&conn->psq_lock);
1260 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1261 spin_unlock_bh(&conn->psq_lock);
1262 is_apsdq_empty_at_start = is_apsdq_empty;
1264 while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1266 spin_lock_bh(&conn->psq_lock);
1267 skb = skb_dequeue(&conn->apsdq);
1268 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1269 spin_unlock_bh(&conn->psq_lock);
1272 * Set the STA flag to Trigger delivery,
1273 * so that the frame will go out
1275 conn->sta_flags |= STA_PS_APSD_TRIGGER;
1276 num_frames_to_deliver--;
1278 /* Last frame in the service period, set EOSP or queue empty */
1279 if ((is_apsdq_empty) || (!num_frames_to_deliver))
1280 conn->sta_flags |= STA_PS_APSD_EOSP;
1282 ath6kl_data_tx(skb, vif->ndev);
1283 conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1284 conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1287 if (is_apsdq_empty) {
1288 if (is_apsdq_empty_at_start)
1289 flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1293 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
1295 conn->aid, 0, flags);
1301 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1303 struct ath6kl *ar = target->dev->ar;
1304 struct sk_buff *skb = packet->pkt_cntxt;
1305 struct wmi_rx_meta_v2 *meta;
1306 struct wmi_data_hdr *dhdr;
1308 u8 meta_type, dot11_hdr = 0;
1309 u8 pad_before_data_start;
1310 int status = packet->status;
1311 enum htc_endpoint_id ept = packet->endpoint;
1312 bool is_amsdu, prev_ps, ps_state = false;
1313 bool trig_state = false;
1314 struct ath6kl_sta *conn = NULL;
1315 struct sk_buff *skb1 = NULL;
1316 struct ethhdr *datap = NULL;
1317 struct ath6kl_vif *vif;
1318 struct aggr_info_conn *aggr_conn;
1322 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1323 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1324 __func__, ar, ept, skb, packet->buf,
1325 packet->act_len, status);
1327 if (status || !(skb->data + HTC_HDR_LENGTH)) {
1332 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1333 skb_pull(skb, HTC_HDR_LENGTH);
1335 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1336 skb->data, skb->len);
1338 if (ept == ar->ctrl_ep) {
1339 if (test_bit(WMI_ENABLED, &ar->flag)) {
1340 ath6kl_check_wow_status(ar);
1341 ath6kl_wmi_control_rx(ar->wmi, skb);
1345 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1348 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1351 vif = ath6kl_get_vif_by_index(ar, if_idx);
1358 * Take lock to protect buffer counts and adaptive power throughput
1361 spin_lock_bh(&vif->if_lock);
1363 vif->net_stats.rx_packets++;
1364 vif->net_stats.rx_bytes += packet->act_len;
1366 spin_unlock_bh(&vif->if_lock);
1368 skb->dev = vif->ndev;
1370 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1371 if (EPPING_ALIGNMENT_PAD > 0)
1372 skb_pull(skb, EPPING_ALIGNMENT_PAD);
1373 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1377 ath6kl_check_wow_status(ar);
1379 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1380 sizeof(struct ath6kl_llc_snap_hdr);
1382 dhdr = (struct wmi_data_hdr *) skb->data;
1385 * In the case of AP mode we may receive NULL data frames
1386 * that do not have LLC hdr. They are 16 bytes in size.
1387 * Allow these frames in the AP mode.
1389 if (vif->nw_type != AP_NETWORK &&
1390 ((packet->act_len < min_hdr_len) ||
1391 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1392 ath6kl_info("frame len is too short or too long\n");
1393 vif->net_stats.rx_errors++;
1394 vif->net_stats.rx_length_errors++;
1399 /* Get the Power save state of the STA */
1400 if (vif->nw_type == AP_NETWORK) {
1401 meta_type = wmi_data_hdr_get_meta(dhdr);
1403 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1404 WMI_DATA_HDR_PS_MASK);
1406 offset = sizeof(struct wmi_data_hdr);
1407 trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
1409 switch (meta_type) {
1412 case WMI_META_VERSION_1:
1413 offset += sizeof(struct wmi_rx_meta_v1);
1415 case WMI_META_VERSION_2:
1416 offset += sizeof(struct wmi_rx_meta_v2);
1422 datap = (struct ethhdr *) (skb->data + offset);
1423 conn = ath6kl_find_sta(vif, datap->h_source);
1431 * If there is a change in PS state of the STA,
1432 * take appropriate steps:
1434 * 1. If Sleep-->Awake, flush the psq for the STA
1435 * Clear the PVB for the STA.
1436 * 2. If Awake-->Sleep, Starting queueing frames
1439 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1442 conn->sta_flags |= STA_PS_SLEEP;
1444 conn->sta_flags &= ~STA_PS_SLEEP;
1446 /* Accept trigger only when the station is in sleep */
1447 if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1448 ath6kl_uapsd_trigger_frame_rx(vif, conn);
1450 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1451 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1452 struct sk_buff *skbuff = NULL;
1453 bool is_apsdq_empty;
1454 struct ath6kl_mgmt_buff *mgmt;
1457 spin_lock_bh(&conn->psq_lock);
1458 while (conn->mgmt_psq_len > 0) {
1459 mgmt = list_first_entry(
1461 struct ath6kl_mgmt_buff,
1463 list_del(&mgmt->list);
1464 conn->mgmt_psq_len--;
1465 spin_unlock_bh(&conn->psq_lock);
1466 idx = vif->fw_vif_idx;
1468 ath6kl_wmi_send_mgmt_cmd(ar->wmi,
1478 spin_lock_bh(&conn->psq_lock);
1480 conn->mgmt_psq_len = 0;
1481 while ((skbuff = skb_dequeue(&conn->psq))) {
1482 spin_unlock_bh(&conn->psq_lock);
1483 ath6kl_data_tx(skbuff, vif->ndev);
1484 spin_lock_bh(&conn->psq_lock);
1487 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1488 while ((skbuff = skb_dequeue(&conn->apsdq))) {
1489 spin_unlock_bh(&conn->psq_lock);
1490 ath6kl_data_tx(skbuff, vif->ndev);
1491 spin_lock_bh(&conn->psq_lock);
1493 spin_unlock_bh(&conn->psq_lock);
1495 if (!is_apsdq_empty)
1496 ath6kl_wmi_set_apsd_bfrd_traf(
1501 /* Clear the PVB for this STA */
1502 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1507 /* drop NULL data frames here */
1508 if ((packet->act_len < min_hdr_len) ||
1510 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1516 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1517 tid = wmi_data_hdr_get_up(dhdr);
1518 seq_no = wmi_data_hdr_get_seqno(dhdr);
1519 meta_type = wmi_data_hdr_get_meta(dhdr);
1520 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1521 pad_before_data_start =
1522 (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
1523 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
1525 skb_pull(skb, sizeof(struct wmi_data_hdr));
1527 switch (meta_type) {
1528 case WMI_META_VERSION_1:
1529 skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1531 case WMI_META_VERSION_2:
1532 meta = (struct wmi_rx_meta_v2 *) skb->data;
1533 if (meta->csum_flags & 0x1) {
1534 skb->ip_summed = CHECKSUM_COMPLETE;
1535 skb->csum = (__force __wsum) meta->csum;
1537 skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1543 skb_pull(skb, pad_before_data_start);
1546 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1548 status = ath6kl_wmi_dot3_2_dix(skb);
1552 * Drop frames that could not be processed (lack of
1559 if (!(vif->ndev->flags & IFF_UP)) {
1564 if (vif->nw_type == AP_NETWORK) {
1565 datap = (struct ethhdr *) skb->data;
1566 if (is_multicast_ether_addr(datap->h_dest))
1568 * Bcast/Mcast frames should be sent to the
1569 * OS stack as well as on the air.
1571 skb1 = skb_copy(skb, GFP_ATOMIC);
1574 * Search for a connected STA with dstMac
1575 * as the Mac address. If found send the
1576 * frame to it on the air else send the
1577 * frame up the stack.
1579 conn = ath6kl_find_sta(vif, datap->h_dest);
1581 if (conn && ar->intra_bss) {
1584 } else if (conn && !ar->intra_bss) {
1590 ath6kl_data_tx(skb1, vif->ndev);
1593 /* nothing to deliver up the stack */
1598 datap = (struct ethhdr *) skb->data;
1600 if (is_unicast_ether_addr(datap->h_dest)) {
1601 if (vif->nw_type == AP_NETWORK) {
1602 conn = ath6kl_find_sta(vif, datap->h_source);
1605 aggr_conn = conn->aggr_conn;
1607 aggr_conn = vif->aggr_cntxt->aggr_conn;
1609 if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
1611 /* aggregation code will handle the skb */
1614 } else if (!is_broadcast_ether_addr(datap->h_dest))
1615 vif->net_stats.multicast++;
1617 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1620 static void aggr_timeout(unsigned long arg)
1623 struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
1624 struct rxtid *rxtid;
1625 struct rxtid_stats *stats;
1627 for (i = 0; i < NUM_OF_TIDS; i++) {
1628 rxtid = &aggr_conn->rx_tid[i];
1629 stats = &aggr_conn->stat[i];
1631 if (!rxtid->aggr || !rxtid->timer_mon)
1634 stats->num_timeouts++;
1635 ath6kl_dbg(ATH6KL_DBG_AGGR,
1636 "aggr timeout (st %d end %d)\n",
1638 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1639 ATH6KL_MAX_SEQ_NO));
1640 aggr_deque_frms(aggr_conn, i, 0, 0);
1643 aggr_conn->timer_scheduled = false;
1645 for (i = 0; i < NUM_OF_TIDS; i++) {
1646 rxtid = &aggr_conn->rx_tid[i];
1648 if (rxtid->aggr && rxtid->hold_q) {
1649 spin_lock_bh(&rxtid->lock);
1650 for (j = 0; j < rxtid->hold_q_sz; j++) {
1651 if (rxtid->hold_q[j].skb) {
1652 aggr_conn->timer_scheduled = true;
1653 rxtid->timer_mon = true;
1657 spin_unlock_bh(&rxtid->lock);
1659 if (j >= rxtid->hold_q_sz)
1660 rxtid->timer_mon = false;
1664 if (aggr_conn->timer_scheduled)
1665 mod_timer(&aggr_conn->timer,
1666 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1669 static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
1671 struct rxtid *rxtid;
1672 struct rxtid_stats *stats;
1674 if (!aggr_conn || tid >= NUM_OF_TIDS)
1677 rxtid = &aggr_conn->rx_tid[tid];
1678 stats = &aggr_conn->stat[tid];
1681 aggr_deque_frms(aggr_conn, tid, 0, 0);
1683 rxtid->aggr = false;
1684 rxtid->timer_mon = false;
1686 rxtid->seq_next = 0;
1687 rxtid->hold_q_sz = 0;
1689 kfree(rxtid->hold_q);
1690 rxtid->hold_q = NULL;
1692 memset(stats, 0, sizeof(struct rxtid_stats));
1695 void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
1698 struct ath6kl_sta *sta;
1699 struct aggr_info_conn *aggr_conn = NULL;
1700 struct rxtid *rxtid;
1701 struct rxtid_stats *stats;
1705 if (vif->nw_type == AP_NETWORK) {
1706 aid = ath6kl_get_aid(tid_mux);
1707 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1709 aggr_conn = sta->aggr_conn;
1711 aggr_conn = vif->aggr_cntxt->aggr_conn;
1716 tid = ath6kl_get_tid(tid_mux);
1717 if (tid >= NUM_OF_TIDS)
1720 rxtid = &aggr_conn->rx_tid[tid];
1721 stats = &aggr_conn->stat[tid];
1723 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1724 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1725 __func__, win_sz, tid);
1728 aggr_delete_tid_state(aggr_conn, tid);
1730 rxtid->seq_next = seq_no;
1731 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1732 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1736 rxtid->win_sz = win_sz;
1737 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1738 if (!skb_queue_empty(&rxtid->q))
1744 void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1745 struct aggr_info_conn *aggr_conn)
1747 struct rxtid *rxtid;
1750 aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
1751 aggr_conn->dev = vif->ndev;
1752 init_timer(&aggr_conn->timer);
1753 aggr_conn->timer.function = aggr_timeout;
1754 aggr_conn->timer.data = (unsigned long) aggr_conn;
1755 aggr_conn->aggr_info = aggr_info;
1757 aggr_conn->timer_scheduled = false;
1759 for (i = 0; i < NUM_OF_TIDS; i++) {
1760 rxtid = &aggr_conn->rx_tid[i];
1761 rxtid->aggr = false;
1762 rxtid->timer_mon = false;
1763 skb_queue_head_init(&rxtid->q);
1764 spin_lock_init(&rxtid->lock);
1769 struct aggr_info *aggr_init(struct ath6kl_vif *vif)
1771 struct aggr_info *p_aggr = NULL;
1773 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1775 ath6kl_err("failed to alloc memory for aggr_node\n");
1779 p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
1780 if (!p_aggr->aggr_conn) {
1781 ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1786 aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
1788 skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
1789 ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
1794 void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
1796 struct ath6kl_sta *sta;
1797 struct rxtid *rxtid;
1798 struct aggr_info_conn *aggr_conn = NULL;
1801 if (vif->nw_type == AP_NETWORK) {
1802 aid = ath6kl_get_aid(tid_mux);
1803 sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1805 aggr_conn = sta->aggr_conn;
1807 aggr_conn = vif->aggr_cntxt->aggr_conn;
1812 tid = ath6kl_get_tid(tid_mux);
1813 if (tid >= NUM_OF_TIDS)
1816 rxtid = &aggr_conn->rx_tid[tid];
1819 aggr_delete_tid_state(aggr_conn, tid);
1822 void aggr_reset_state(struct aggr_info_conn *aggr_conn)
1829 if (aggr_conn->timer_scheduled) {
1830 del_timer(&aggr_conn->timer);
1831 aggr_conn->timer_scheduled = false;
1834 for (tid = 0; tid < NUM_OF_TIDS; tid++)
1835 aggr_delete_tid_state(aggr_conn, tid);
1838 /* clean up our amsdu buffer list */
1839 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1841 struct htc_packet *packet, *tmp_pkt;
1843 spin_lock_bh(&ar->lock);
1844 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1845 spin_unlock_bh(&ar->lock);
1849 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1851 list_del(&packet->list);
1852 spin_unlock_bh(&ar->lock);
1853 dev_kfree_skb(packet->pkt_cntxt);
1854 spin_lock_bh(&ar->lock);
1857 spin_unlock_bh(&ar->lock);
1860 void aggr_module_destroy(struct aggr_info *aggr_info)
1865 aggr_reset_state(aggr_info->aggr_conn);
1866 skb_queue_purge(&aggr_info->rx_amsdu_freeq);
1867 kfree(aggr_info->aggr_conn);