2 * Copyright (c) 2004-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
23 struct ath6kl *ar = ath6kl_priv(dev);
24 struct ethhdr *eth_hdr;
30 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
32 if (is_multicast_ether_addr(eth_hdr->h_dest))
35 for (i = 0; i < ar->node_num; i++) {
36 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
39 ar->node_map[i].tx_pend++;
40 return ar->node_map[i].ep_id;
43 if ((ep_map == -1) && !ar->node_map[i].tx_pend)
48 ep_map = ar->node_num;
50 if (ar->node_num > MAX_NODE_NUM)
51 return ENDPOINT_UNUSED;
54 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
56 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
57 if (!ar->tx_pending[i]) {
58 ar->node_map[ep_map].ep_id = i;
63 * No free endpoint is available, start redistribution on
64 * the inuse endpoints.
66 if (i == ENDPOINT_5) {
67 ar->node_map[ep_map].ep_id = ar->next_ep_id;
69 if (ar->next_ep_id > ENDPOINT_5)
70 ar->next_ep_id = ENDPOINT_2;
75 ar->node_map[ep_map].tx_pend++;
77 return ar->node_map[ep_map].ep_id;
80 static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
81 struct ath6kl_vif *vif,
85 struct ath6kl *ar = vif->ar;
86 bool is_apsdq_empty = false;
87 struct ethhdr *datap = (struct ethhdr *) skb->data;
88 u8 up = 0, traffic_class, *ip_hdr;
90 struct ath6kl_llc_snap_hdr *llc_hdr;
92 if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
94 * This tx is because of a uAPSD trigger, determine
95 * more and EOSP bit. Set EOSP if queue is empty
96 * or sufficient frames are delivered for this trigger.
98 spin_lock_bh(&conn->psq_lock);
99 if (!skb_queue_empty(&conn->apsdq))
100 *flags |= WMI_DATA_HDR_FLAGS_MORE;
101 else if (conn->sta_flags & STA_PS_APSD_EOSP)
102 *flags |= WMI_DATA_HDR_FLAGS_EOSP;
103 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
104 spin_unlock_bh(&conn->psq_lock);
106 } else if (!conn->apsd_info)
109 if (test_bit(WMM_ENABLED, &vif->flags)) {
110 ether_type = be16_to_cpu(datap->h_proto);
111 if (is_ethertype(ether_type)) {
112 /* packet is in DIX format */
113 ip_hdr = (u8 *)(datap + 1);
115 /* packet is in 802.3 format */
116 llc_hdr = (struct ath6kl_llc_snap_hdr *)
118 ether_type = be16_to_cpu(llc_hdr->eth_type);
119 ip_hdr = (u8 *)(llc_hdr + 1);
122 if (ether_type == IP_ETHERTYPE)
123 up = ath6kl_wmi_determine_user_priority(
127 traffic_class = ath6kl_wmi_get_traffic_class(up);
129 if ((conn->apsd_info & (1 << traffic_class)) == 0)
132 /* Queue the frames if the STA is sleeping */
133 spin_lock_bh(&conn->psq_lock);
134 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
135 skb_queue_tail(&conn->apsdq, skb);
136 spin_unlock_bh(&conn->psq_lock);
139 * If this is the first pkt getting queued
140 * for this STA, update the PVB for this STA
142 if (is_apsdq_empty) {
143 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
147 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
152 static bool ath6kl_process_psq(struct ath6kl_sta *conn,
153 struct ath6kl_vif *vif,
157 bool is_psq_empty = false;
158 struct ath6kl *ar = vif->ar;
160 if (conn->sta_flags & STA_PS_POLLED) {
161 spin_lock_bh(&conn->psq_lock);
162 if (!skb_queue_empty(&conn->psq))
163 *flags |= WMI_DATA_HDR_FLAGS_MORE;
164 spin_unlock_bh(&conn->psq_lock);
168 /* Queue the frames if the STA is sleeping */
169 spin_lock_bh(&conn->psq_lock);
170 is_psq_empty = skb_queue_empty(&conn->psq);
171 skb_queue_tail(&conn->psq, skb);
172 spin_unlock_bh(&conn->psq_lock);
175 * If this is the first pkt getting queued
176 * for this STA, update the PVB for this
180 ath6kl_wmi_set_pvb_cmd(ar->wmi,
186 static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
189 struct ethhdr *datap = (struct ethhdr *) skb->data;
190 struct ath6kl_sta *conn = NULL;
191 bool ps_queued = false;
192 struct ath6kl *ar = vif->ar;
194 if (is_multicast_ether_addr(datap->h_dest)) {
196 bool q_mcast = false;
198 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
199 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
207 * If this transmit is not because of a Dtim Expiry
210 if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
211 bool is_mcastq_empty = false;
213 spin_lock_bh(&ar->mcastpsq_lock);
215 skb_queue_empty(&ar->mcastpsq);
216 skb_queue_tail(&ar->mcastpsq, skb);
217 spin_unlock_bh(&ar->mcastpsq_lock);
220 * If this is the first Mcast pkt getting
221 * queued indicate to the target to set the
222 * BitmapControl LSB of the TIM IE.
225 ath6kl_wmi_set_pvb_cmd(ar->wmi,
232 * This transmit is because of Dtim expiry.
233 * Determine if MoreData bit has to be set.
235 spin_lock_bh(&ar->mcastpsq_lock);
236 if (!skb_queue_empty(&ar->mcastpsq))
237 *flags |= WMI_DATA_HDR_FLAGS_MORE;
238 spin_unlock_bh(&ar->mcastpsq_lock);
242 conn = ath6kl_find_sta(vif, datap->h_dest);
246 /* Inform the caller that the skb is consumed */
250 if (conn->sta_flags & STA_PS_SLEEP) {
251 ps_queued = ath6kl_process_uapsdq(conn,
253 if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
254 ps_queued = ath6kl_process_psq(conn,
263 int ath6kl_control_tx(void *devt, struct sk_buff *skb,
264 enum htc_endpoint_id eid)
266 struct ath6kl *ar = devt;
268 struct ath6kl_cookie *cookie = NULL;
270 spin_lock_bh(&ar->lock);
272 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
273 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
276 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
278 * Control endpoint is full, don't allocate resources, we
279 * are just going to drop this packet.
282 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
285 cookie = ath6kl_alloc_cookie(ar);
287 if (cookie == NULL) {
288 spin_unlock_bh(&ar->lock);
293 ar->tx_pending[eid]++;
295 if (eid != ar->ctrl_ep)
296 ar->total_tx_data_pend++;
298 spin_unlock_bh(&ar->lock);
302 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
303 eid, ATH6KL_CONTROL_PKT_TAG);
306 * This interface is asynchronous, if there is an error, cleanup
307 * will happen in the TX completion callback.
309 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
318 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
320 struct ath6kl *ar = ath6kl_priv(dev);
321 struct ath6kl_cookie *cookie = NULL;
322 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
323 struct ath6kl_vif *vif = netdev_priv(dev);
325 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
326 u8 ac = 99 ; /* initialize to unmapped ac */
327 bool chk_adhoc_ps_mapping = false;
329 struct wmi_tx_meta_v2 meta_v2;
331 u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
335 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
336 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
337 skb, skb->data, skb->len);
339 /* If target is not associated */
340 if (!test_bit(CONNECTED, &vif->flags)) {
345 if (!test_bit(WMI_READY, &ar->flag))
348 /* AP mode Power saving processing */
349 if (vif->nw_type == AP_NETWORK) {
350 if (ath6kl_powersave_ap(vif, skb, &flags))
354 if (test_bit(WMI_ENABLED, &ar->flag)) {
355 if ((dev->features & NETIF_F_IP_CSUM) &&
356 (csum == CHECKSUM_PARTIAL)) {
357 csum_start = skb->csum_start -
358 (skb_network_header(skb) - skb->head) +
359 sizeof(struct ath6kl_llc_snap_hdr);
360 csum_dest = skb->csum_offset + csum_start;
363 if (skb_headroom(skb) < dev->needed_headroom) {
364 struct sk_buff *tmp_skb = skb;
366 skb = skb_realloc_headroom(skb, dev->needed_headroom);
369 vif->net_stats.tx_dropped++;
374 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
375 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
379 if ((dev->features & NETIF_F_IP_CSUM) &&
380 (csum == CHECKSUM_PARTIAL)) {
381 meta_v2.csum_start = csum_start;
382 meta_v2.csum_dest = csum_dest;
384 /* instruct target to calculate checksum */
385 meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
386 meta_ver = WMI_META_VERSION_2;
393 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
394 DATA_MSGTYPE, flags, 0,
396 meta, vif->fw_vif_idx);
399 ath6kl_warn("failed to add wmi data header:%d\n"
404 if ((vif->nw_type == ADHOC_NETWORK) &&
405 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
406 chk_adhoc_ps_mapping = true;
408 /* get the stream mapping */
409 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
410 vif->fw_vif_idx, skb,
411 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
418 spin_lock_bh(&ar->lock);
420 if (chk_adhoc_ps_mapping)
421 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
423 eid = ar->ac2ep_map[ac];
425 if (eid == 0 || eid == ENDPOINT_UNUSED) {
426 ath6kl_err("eid %d is not mapped!\n", eid);
427 spin_unlock_bh(&ar->lock);
431 /* allocate resource for this packet */
432 cookie = ath6kl_alloc_cookie(ar);
435 spin_unlock_bh(&ar->lock);
439 /* update counts while the lock is held */
440 ar->tx_pending[eid]++;
441 ar->total_tx_data_pend++;
443 spin_unlock_bh(&ar->lock);
445 if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
448 * We will touch (move the buffer data to align it. Since the
449 * skb buffer is cloned and not only the header is changed, we
450 * have to copy it to allow the changes. Since we are copying
451 * the data here, we may as well align it by reserving suitable
452 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
454 struct sk_buff *nskb;
456 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
464 cookie->map_no = map_no;
465 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
468 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
469 skb->data, skb->len);
472 * HTC interface is asynchronous, if this fails, cleanup will
473 * happen in the ath6kl_tx_complete callback.
475 ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
482 vif->net_stats.tx_dropped++;
483 vif->net_stats.tx_aborted_errors++;
488 /* indicate tx activity or inactivity on a WMI stream */
489 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
491 struct ath6kl *ar = devt;
492 enum htc_endpoint_id eid;
495 eid = ar->ac2ep_map[traffic_class];
497 if (!test_bit(WMI_ENABLED, &ar->flag))
500 spin_lock_bh(&ar->lock);
502 ar->ac_stream_active[traffic_class] = active;
506 * Keep track of the active stream with the highest
509 if (ar->ac_stream_pri_map[traffic_class] >
510 ar->hiac_stream_active_pri)
511 /* set the new highest active priority */
512 ar->hiac_stream_active_pri =
513 ar->ac_stream_pri_map[traffic_class];
517 * We may have to search for the next active stream
518 * that is the highest priority.
520 if (ar->hiac_stream_active_pri ==
521 ar->ac_stream_pri_map[traffic_class]) {
523 * The highest priority stream just went inactive
524 * reset and search for the "next" highest "active"
527 ar->hiac_stream_active_pri = 0;
529 for (i = 0; i < WMM_NUM_AC; i++) {
530 if (ar->ac_stream_active[i] &&
531 (ar->ac_stream_pri_map[i] >
532 ar->hiac_stream_active_pri))
534 * Set the new highest active
537 ar->hiac_stream_active_pri =
538 ar->ac_stream_pri_map[i];
543 spin_unlock_bh(&ar->lock);
546 /* notify HTC, this may cause credit distribution changes */
547 ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active);
550 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
551 struct htc_packet *packet)
553 struct ath6kl *ar = target->dev->ar;
554 struct ath6kl_vif *vif;
555 enum htc_endpoint_id endpoint = packet->endpoint;
556 enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
558 if (endpoint == ar->ctrl_ep) {
560 * Under normal WMI if this is getting full, then something
561 * is running rampant the host should not be exhausting the
562 * WMI queue with too many commands the only exception to
563 * this is during testing using endpointping.
565 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
566 ath6kl_err("wmi ctrl ep is full\n");
570 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
574 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
575 * the highest active stream.
577 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
578 ar->hiac_stream_active_pri &&
579 ar->cookie_count <= MAX_HI_COOKIE_NUM)
581 * Give preference to the highest priority stream by
582 * dropping the packets which overflowed.
584 action = HTC_SEND_FULL_DROP;
587 spin_lock_bh(&ar->list_lock);
588 list_for_each_entry(vif, &ar->vif_list, list) {
589 if (vif->nw_type == ADHOC_NETWORK ||
590 action != HTC_SEND_FULL_DROP) {
591 spin_unlock_bh(&ar->list_lock);
593 set_bit(NETQ_STOPPED, &vif->flags);
594 netif_stop_queue(vif->ndev);
599 spin_unlock_bh(&ar->list_lock);
604 /* TODO this needs to be looked at */
605 static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
606 enum htc_endpoint_id eid, u32 map_no)
608 struct ath6kl *ar = vif->ar;
611 if (vif->nw_type != ADHOC_NETWORK)
614 if (!ar->ibss_ps_enable)
617 if (eid == ar->ctrl_ep)
624 ar->node_map[map_no].tx_pend--;
626 if (ar->node_map[map_no].tx_pend)
629 if (map_no != (ar->node_num - 1))
632 for (i = ar->node_num; i > 0; i--) {
633 if (ar->node_map[i - 1].tx_pend)
636 memset(&ar->node_map[i - 1], 0,
637 sizeof(struct ath6kl_node_mapping));
642 void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
644 struct ath6kl *ar = context;
645 struct sk_buff_head skb_queue;
646 struct htc_packet *packet;
648 struct ath6kl_cookie *ath6kl_cookie;
651 enum htc_endpoint_id eid;
652 bool wake_event = false;
653 bool flushing[ATH6KL_VIF_MAX] = {false};
655 struct ath6kl_vif *vif;
657 skb_queue_head_init(&skb_queue);
659 /* lock the driver as we update internal state */
660 spin_lock_bh(&ar->lock);
662 /* reap completed packets */
663 while (!list_empty(packet_queue)) {
665 packet = list_first_entry(packet_queue, struct htc_packet,
667 list_del(&packet->list);
669 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
673 status = packet->status;
674 skb = ath6kl_cookie->skb;
675 eid = packet->endpoint;
676 map_no = ath6kl_cookie->map_no;
678 if (!skb || !skb->data)
681 __skb_queue_tail(&skb_queue, skb);
683 if (!status && (packet->act_len != skb->len))
686 ar->tx_pending[eid]--;
688 if (eid != ar->ctrl_ep)
689 ar->total_tx_data_pend--;
691 if (eid == ar->ctrl_ep) {
692 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
693 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
695 if (ar->tx_pending[eid] == 0)
699 if (eid == ar->ctrl_ep) {
700 if_idx = wmi_cmd_hdr_get_if_idx(
701 (struct wmi_cmd_hdr *) packet->buf);
703 if_idx = wmi_data_hdr_get_if_idx(
704 (struct wmi_data_hdr *) packet->buf);
707 vif = ath6kl_get_vif_by_index(ar, if_idx);
709 ath6kl_free_cookie(ar, ath6kl_cookie);
714 if (status == -ECANCELED)
715 /* a packet was flushed */
716 flushing[if_idx] = true;
718 vif->net_stats.tx_errors++;
720 if (status != -ENOSPC && status != -ECANCELED)
721 ath6kl_warn("tx complete error: %d\n", status);
723 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
724 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
725 __func__, skb, packet->buf, packet->act_len,
728 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
729 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
730 __func__, skb, packet->buf, packet->act_len,
733 flushing[if_idx] = false;
734 vif->net_stats.tx_packets++;
735 vif->net_stats.tx_bytes += skb->len;
738 ath6kl_tx_clear_node_map(vif, eid, map_no);
740 ath6kl_free_cookie(ar, ath6kl_cookie);
742 if (test_bit(NETQ_STOPPED, &vif->flags))
743 clear_bit(NETQ_STOPPED, &vif->flags);
746 spin_unlock_bh(&ar->lock);
748 __skb_queue_purge(&skb_queue);
751 spin_lock_bh(&ar->list_lock);
752 list_for_each_entry(vif, &ar->vif_list, list) {
753 if (test_bit(CONNECTED, &vif->flags) &&
754 !flushing[vif->fw_vif_idx]) {
755 spin_unlock_bh(&ar->list_lock);
756 netif_wake_queue(vif->ndev);
757 spin_lock_bh(&ar->list_lock);
760 spin_unlock_bh(&ar->list_lock);
763 wake_up(&ar->event_wq);
769 spin_unlock_bh(&ar->lock);
773 void ath6kl_tx_data_cleanup(struct ath6kl *ar)
777 /* flush all the data (non-control) streams */
778 for (i = 0; i < WMM_NUM_AC; i++)
779 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
780 ATH6KL_DATA_PKT_TAG);
785 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
793 if (!(skb->dev->flags & IFF_UP)) {
798 skb->protocol = eth_type_trans(skb, skb->dev);
803 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
808 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
810 ath6kl_err("netbuf allocation failed\n");
813 skb_queue_tail(q, skb);
818 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
820 struct sk_buff *skb = NULL;
822 if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
823 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
825 skb = skb_dequeue(&p_aggr->free_q);
830 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
832 struct ath6kl *ar = target->dev->ar;
836 struct htc_packet *packet;
837 struct list_head queue;
839 n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
840 ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
842 if (n_buf_refill <= 0)
845 INIT_LIST_HEAD(&queue);
847 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
848 "%s: providing htc with %d buffers at eid=%d\n",
849 __func__, n_buf_refill, endpoint);
851 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
852 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
856 packet = (struct htc_packet *) skb->head;
857 if (!IS_ALIGNED((unsigned long) skb->data, 4))
858 skb->data = PTR_ALIGN(skb->data - 4, 4);
859 set_htc_rxpkt_info(packet, skb, skb->data,
860 ATH6KL_BUFFER_SIZE, endpoint);
861 list_add_tail(&packet->list, &queue);
864 if (!list_empty(&queue))
865 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
868 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
870 struct htc_packet *packet;
874 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
878 packet = (struct htc_packet *) skb->head;
879 if (!IS_ALIGNED((unsigned long) skb->data, 4))
880 skb->data = PTR_ALIGN(skb->data - 4, 4);
881 set_htc_rxpkt_info(packet, skb, skb->data,
882 ATH6KL_AMSDU_BUFFER_SIZE, 0);
883 spin_lock_bh(&ar->lock);
884 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
885 spin_unlock_bh(&ar->lock);
891 * Callback to allocate a receive buffer for a pending packet. We use a
892 * pre-allocated list of buffers of maximum AMSDU size (4K).
894 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
895 enum htc_endpoint_id endpoint,
898 struct ath6kl *ar = target->dev->ar;
899 struct htc_packet *packet = NULL;
900 struct list_head *pkt_pos;
901 int refill_cnt = 0, depth = 0;
903 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
904 __func__, endpoint, len);
906 if ((len <= ATH6KL_BUFFER_SIZE) ||
907 (len > ATH6KL_AMSDU_BUFFER_SIZE))
910 spin_lock_bh(&ar->lock);
912 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
913 spin_unlock_bh(&ar->lock);
914 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
918 packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
919 struct htc_packet, list);
920 list_del(&packet->list);
921 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
924 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
925 spin_unlock_bh(&ar->lock);
927 /* set actual endpoint ID */
928 packet->endpoint = endpoint;
931 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
932 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
937 static void aggr_slice_amsdu(struct aggr_info *p_aggr,
938 struct rxtid *rxtid, struct sk_buff *skb)
940 struct sk_buff *new_skb;
942 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
945 mac_hdr_len = sizeof(struct ethhdr);
946 framep = skb->data + mac_hdr_len;
947 amsdu_len = skb->len - mac_hdr_len;
949 while (amsdu_len > mac_hdr_len) {
950 hdr = (struct ethhdr *) framep;
951 payload_8023_len = ntohs(hdr->h_proto);
953 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
954 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
955 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
960 frame_8023_len = payload_8023_len + mac_hdr_len;
961 new_skb = aggr_get_free_skb(p_aggr);
963 ath6kl_err("no buffer available\n");
967 memcpy(new_skb->data, framep, frame_8023_len);
968 skb_put(new_skb, frame_8023_len);
969 if (ath6kl_wmi_dot3_2_dix(new_skb)) {
970 ath6kl_err("dot3_2_dix error\n");
971 dev_kfree_skb(new_skb);
975 skb_queue_tail(&rxtid->q, new_skb);
977 /* Is this the last subframe within this aggregate ? */
978 if ((amsdu_len - frame_8023_len) == 0)
981 /* Add the length of A-MSDU subframe padding bytes -
982 * Round to nearest word.
984 frame_8023_len = ALIGN(frame_8023_len, 4);
986 framep += frame_8023_len;
987 amsdu_len -= frame_8023_len;
993 static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
994 u16 seq_no, u8 order)
998 struct skb_hold_q *node;
999 u16 idx, idx_end, seq_end;
1000 struct rxtid_stats *stats;
1005 rxtid = &p_aggr->rx_tid[tid];
1006 stats = &p_aggr->stat[tid];
1008 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1011 * idx_end is typically the last possible frame in the window,
1012 * but changes to 'the' seq_no, when BAR comes. If seq_no
1013 * is non-zero, we will go up to that and stop.
1014 * Note: last seq no in current window will occupy the same
1015 * index position as index that is just previous to start.
1016 * An imp point : if win_sz is 7, for seq_no space of 4095,
1017 * then, there would be holes when sequence wrap around occurs.
1018 * Target should judiciously choose the win_sz, based on
1019 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1020 * 2, 4, 8, 16 win_sz works fine).
1021 * We must deque from "idx" to "idx_end", including both.
1023 seq_end = seq_no ? seq_no : rxtid->seq_next;
1024 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1026 spin_lock_bh(&rxtid->lock);
1029 node = &rxtid->hold_q[idx];
1030 if ((order == 1) && (!node->skb))
1035 aggr_slice_amsdu(p_aggr, rxtid, node->skb);
1037 skb_queue_tail(&rxtid->q, node->skb);
1042 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1043 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1044 } while (idx != idx_end);
1046 spin_unlock_bh(&rxtid->lock);
1048 stats->num_delivered += skb_queue_len(&rxtid->q);
1050 while ((skb = skb_dequeue(&rxtid->q)))
1051 ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
1054 static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
1056 bool is_amsdu, struct sk_buff *frame)
1058 struct rxtid *rxtid;
1059 struct rxtid_stats *stats;
1060 struct sk_buff *skb;
1061 struct skb_hold_q *node;
1062 u16 idx, st, cur, end;
1063 bool is_queued = false;
1066 rxtid = &agg_info->rx_tid[tid];
1067 stats = &agg_info->stat[tid];
1069 stats->num_into_aggr++;
1073 aggr_slice_amsdu(agg_info, rxtid, frame);
1076 while ((skb = skb_dequeue(&rxtid->q)))
1077 ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
1083 /* Check the incoming sequence no, if it's in the window */
1084 st = rxtid->seq_next;
1086 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1088 if (((st < end) && (cur < st || cur > end)) ||
1089 ((st > end) && (cur > end) && (cur < st))) {
1090 extended_end = (end + rxtid->hold_q_sz - 1) &
1093 if (((end < extended_end) &&
1094 (cur < end || cur > extended_end)) ||
1095 ((end > extended_end) && (cur > extended_end) &&
1097 aggr_deque_frms(agg_info, tid, 0, 0);
1098 if (cur >= rxtid->hold_q_sz - 1)
1099 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1101 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1102 (rxtid->hold_q_sz - 2 - cur);
1105 * Dequeue only those frames that are outside the
1106 * new shifted window.
1108 if (cur >= rxtid->hold_q_sz - 1)
1109 st = cur - (rxtid->hold_q_sz - 1);
1111 st = ATH6KL_MAX_SEQ_NO -
1112 (rxtid->hold_q_sz - 2 - cur);
1114 aggr_deque_frms(agg_info, tid, st, 0);
1120 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1122 node = &rxtid->hold_q[idx];
1124 spin_lock_bh(&rxtid->lock);
1127 * Is the cur frame duplicate or something beyond our window(hold_q
1128 * -> which is 2x, already)?
1130 * 1. Duplicate is easy - drop incoming frame.
1131 * 2. Not falling in current sliding window.
1132 * 2a. is the frame_seq_no preceding current tid_seq_no?
1133 * -> drop the frame. perhaps sender did not get our ACK.
1134 * this is taken care of above.
1135 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1136 * -> Taken care of it above, by moving window forward.
1138 dev_kfree_skb(node->skb);
1143 node->is_amsdu = is_amsdu;
1144 node->seq_no = seq_no;
1151 spin_unlock_bh(&rxtid->lock);
1153 aggr_deque_frms(agg_info, tid, 0, 1);
1155 if (agg_info->timer_scheduled)
1156 rxtid->progress = true;
1158 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1159 if (rxtid->hold_q[idx].skb) {
1161 * There is a frame in the queue and no
1162 * timer so start a timer to ensure that
1163 * the frame doesn't remain stuck
1166 agg_info->timer_scheduled = true;
1167 mod_timer(&agg_info->timer,
1169 HZ * (AGGR_RX_TIMEOUT) / 1000));
1170 rxtid->progress = false;
1171 rxtid->timer_mon = true;
1179 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1180 struct ath6kl_sta *conn)
1182 struct ath6kl *ar = vif->ar;
1183 bool is_apsdq_empty, is_apsdq_empty_at_start;
1184 u32 num_frames_to_deliver, flags;
1185 struct sk_buff *skb = NULL;
1188 * If the APSD q for this STA is not empty, dequeue and
1189 * send a pkt from the head of the q. Also update the
1190 * More data bit in the WMI_DATA_HDR if there are
1191 * more pkts for this STA in the APSD q.
1192 * If there are no more pkts for this STA,
1193 * update the APSD bitmap for this STA.
1196 num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1197 ATH6KL_APSD_FRAME_MASK;
1199 * Number of frames to send in a service period is
1200 * indicated by the station
1201 * in the QOS_INFO of the association request
1202 * If it is zero, send all frames
1204 if (!num_frames_to_deliver)
1205 num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1207 spin_lock_bh(&conn->psq_lock);
1208 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1209 spin_unlock_bh(&conn->psq_lock);
1210 is_apsdq_empty_at_start = is_apsdq_empty;
1212 while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1214 spin_lock_bh(&conn->psq_lock);
1215 skb = skb_dequeue(&conn->apsdq);
1216 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1217 spin_unlock_bh(&conn->psq_lock);
1220 * Set the STA flag to Trigger delivery,
1221 * so that the frame will go out
1223 conn->sta_flags |= STA_PS_APSD_TRIGGER;
1224 num_frames_to_deliver--;
1226 /* Last frame in the service period, set EOSP or queue empty */
1227 if ((is_apsdq_empty) || (!num_frames_to_deliver))
1228 conn->sta_flags |= STA_PS_APSD_EOSP;
1230 ath6kl_data_tx(skb, vif->ndev);
1231 conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1232 conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1235 if (is_apsdq_empty) {
1236 if (is_apsdq_empty_at_start)
1237 flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1241 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
1243 conn->aid, 0, flags);
1249 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1251 struct ath6kl *ar = target->dev->ar;
1252 struct sk_buff *skb = packet->pkt_cntxt;
1253 struct wmi_rx_meta_v2 *meta;
1254 struct wmi_data_hdr *dhdr;
1256 u8 meta_type, dot11_hdr = 0;
1257 int status = packet->status;
1258 enum htc_endpoint_id ept = packet->endpoint;
1259 bool is_amsdu, prev_ps, ps_state = false;
1260 bool trig_state = false;
1261 struct ath6kl_sta *conn = NULL;
1262 struct sk_buff *skb1 = NULL;
1263 struct ethhdr *datap = NULL;
1264 struct ath6kl_vif *vif;
1268 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1269 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1270 __func__, ar, ept, skb, packet->buf,
1271 packet->act_len, status);
1273 if (status || !(skb->data + HTC_HDR_LENGTH)) {
1278 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1279 skb_pull(skb, HTC_HDR_LENGTH);
1281 if (ept == ar->ctrl_ep) {
1283 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1286 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1289 vif = ath6kl_get_vif_by_index(ar, if_idx);
1296 * Take lock to protect buffer counts and adaptive power throughput
1299 spin_lock_bh(&vif->if_lock);
1301 vif->net_stats.rx_packets++;
1302 vif->net_stats.rx_bytes += packet->act_len;
1304 spin_unlock_bh(&vif->if_lock);
1307 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1308 skb->data, skb->len);
1310 skb->dev = vif->ndev;
1312 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1313 if (EPPING_ALIGNMENT_PAD > 0)
1314 skb_pull(skb, EPPING_ALIGNMENT_PAD);
1315 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1319 ath6kl_check_wow_status(ar);
1321 if (ept == ar->ctrl_ep) {
1322 ath6kl_wmi_control_rx(ar->wmi, skb);
1326 min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1327 sizeof(struct ath6kl_llc_snap_hdr);
1329 dhdr = (struct wmi_data_hdr *) skb->data;
1332 * In the case of AP mode we may receive NULL data frames
1333 * that do not have LLC hdr. They are 16 bytes in size.
1334 * Allow these frames in the AP mode.
1336 if (vif->nw_type != AP_NETWORK &&
1337 ((packet->act_len < min_hdr_len) ||
1338 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1339 ath6kl_info("frame len is too short or too long\n");
1340 vif->net_stats.rx_errors++;
1341 vif->net_stats.rx_length_errors++;
1346 /* Get the Power save state of the STA */
1347 if (vif->nw_type == AP_NETWORK) {
1348 meta_type = wmi_data_hdr_get_meta(dhdr);
1350 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1351 WMI_DATA_HDR_PS_MASK);
1353 offset = sizeof(struct wmi_data_hdr);
1354 trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
1356 switch (meta_type) {
1359 case WMI_META_VERSION_1:
1360 offset += sizeof(struct wmi_rx_meta_v1);
1362 case WMI_META_VERSION_2:
1363 offset += sizeof(struct wmi_rx_meta_v2);
1369 datap = (struct ethhdr *) (skb->data + offset);
1370 conn = ath6kl_find_sta(vif, datap->h_source);
1378 * If there is a change in PS state of the STA,
1379 * take appropriate steps:
1381 * 1. If Sleep-->Awake, flush the psq for the STA
1382 * Clear the PVB for the STA.
1383 * 2. If Awake-->Sleep, Starting queueing frames
1386 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1389 conn->sta_flags |= STA_PS_SLEEP;
1391 conn->sta_flags &= ~STA_PS_SLEEP;
1393 /* Accept trigger only when the station is in sleep */
1394 if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1395 ath6kl_uapsd_trigger_frame_rx(vif, conn);
1397 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1398 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1399 struct sk_buff *skbuff = NULL;
1400 bool is_apsdq_empty;
1402 spin_lock_bh(&conn->psq_lock);
1403 while ((skbuff = skb_dequeue(&conn->psq))) {
1404 spin_unlock_bh(&conn->psq_lock);
1405 ath6kl_data_tx(skbuff, vif->ndev);
1406 spin_lock_bh(&conn->psq_lock);
1409 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1410 while ((skbuff = skb_dequeue(&conn->apsdq))) {
1411 spin_unlock_bh(&conn->psq_lock);
1412 ath6kl_data_tx(skbuff, vif->ndev);
1413 spin_lock_bh(&conn->psq_lock);
1415 spin_unlock_bh(&conn->psq_lock);
1417 if (!is_apsdq_empty)
1418 ath6kl_wmi_set_apsd_bfrd_traf(
1423 /* Clear the PVB for this STA */
1424 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1429 /* drop NULL data frames here */
1430 if ((packet->act_len < min_hdr_len) ||
1432 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1438 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1439 tid = wmi_data_hdr_get_up(dhdr);
1440 seq_no = wmi_data_hdr_get_seqno(dhdr);
1441 meta_type = wmi_data_hdr_get_meta(dhdr);
1442 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1443 skb_pull(skb, sizeof(struct wmi_data_hdr));
1445 switch (meta_type) {
1446 case WMI_META_VERSION_1:
1447 skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1449 case WMI_META_VERSION_2:
1450 meta = (struct wmi_rx_meta_v2 *) skb->data;
1451 if (meta->csum_flags & 0x1) {
1452 skb->ip_summed = CHECKSUM_COMPLETE;
1453 skb->csum = (__force __wsum) meta->csum;
1455 skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1462 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1464 status = ath6kl_wmi_dot3_2_dix(skb);
1468 * Drop frames that could not be processed (lack of
1475 if (!(vif->ndev->flags & IFF_UP)) {
1480 if (vif->nw_type == AP_NETWORK) {
1481 datap = (struct ethhdr *) skb->data;
1482 if (is_multicast_ether_addr(datap->h_dest))
1484 * Bcast/Mcast frames should be sent to the
1485 * OS stack as well as on the air.
1487 skb1 = skb_copy(skb, GFP_ATOMIC);
1490 * Search for a connected STA with dstMac
1491 * as the Mac address. If found send the
1492 * frame to it on the air else send the
1493 * frame up the stack.
1495 conn = ath6kl_find_sta(vif, datap->h_dest);
1497 if (conn && ar->intra_bss) {
1500 } else if (conn && !ar->intra_bss) {
1506 ath6kl_data_tx(skb1, vif->ndev);
1509 /* nothing to deliver up the stack */
1514 datap = (struct ethhdr *) skb->data;
1516 if (is_unicast_ether_addr(datap->h_dest) &&
1517 aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
1519 /* aggregation code will handle the skb */
1522 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1525 static void aggr_timeout(unsigned long arg)
1528 struct aggr_info *p_aggr = (struct aggr_info *) arg;
1529 struct rxtid *rxtid;
1530 struct rxtid_stats *stats;
1532 for (i = 0; i < NUM_OF_TIDS; i++) {
1533 rxtid = &p_aggr->rx_tid[i];
1534 stats = &p_aggr->stat[i];
1536 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
1539 stats->num_timeouts++;
1540 ath6kl_dbg(ATH6KL_DBG_AGGR,
1541 "aggr timeout (st %d end %d)\n",
1543 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1544 ATH6KL_MAX_SEQ_NO));
1545 aggr_deque_frms(p_aggr, i, 0, 0);
1548 p_aggr->timer_scheduled = false;
1550 for (i = 0; i < NUM_OF_TIDS; i++) {
1551 rxtid = &p_aggr->rx_tid[i];
1553 if (rxtid->aggr && rxtid->hold_q) {
1554 for (j = 0; j < rxtid->hold_q_sz; j++) {
1555 if (rxtid->hold_q[j].skb) {
1556 p_aggr->timer_scheduled = true;
1557 rxtid->timer_mon = true;
1558 rxtid->progress = false;
1563 if (j >= rxtid->hold_q_sz)
1564 rxtid->timer_mon = false;
1568 if (p_aggr->timer_scheduled)
1569 mod_timer(&p_aggr->timer,
1570 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1573 static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
1575 struct rxtid *rxtid;
1576 struct rxtid_stats *stats;
1578 if (!p_aggr || tid >= NUM_OF_TIDS)
1581 rxtid = &p_aggr->rx_tid[tid];
1582 stats = &p_aggr->stat[tid];
1585 aggr_deque_frms(p_aggr, tid, 0, 0);
1587 rxtid->aggr = false;
1588 rxtid->progress = false;
1589 rxtid->timer_mon = false;
1591 rxtid->seq_next = 0;
1592 rxtid->hold_q_sz = 0;
1594 kfree(rxtid->hold_q);
1595 rxtid->hold_q = NULL;
1597 memset(stats, 0, sizeof(struct rxtid_stats));
1600 void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
1603 struct aggr_info *p_aggr = vif->aggr_cntxt;
1604 struct rxtid *rxtid;
1605 struct rxtid_stats *stats;
1611 rxtid = &p_aggr->rx_tid[tid];
1612 stats = &p_aggr->stat[tid];
1614 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1615 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1616 __func__, win_sz, tid);
1619 aggr_delete_tid_state(p_aggr, tid);
1621 rxtid->seq_next = seq_no;
1622 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1623 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1627 rxtid->win_sz = win_sz;
1628 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1629 if (!skb_queue_empty(&rxtid->q))
1635 struct aggr_info *aggr_init(struct net_device *dev)
1637 struct aggr_info *p_aggr = NULL;
1638 struct rxtid *rxtid;
1641 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1643 ath6kl_err("failed to alloc memory for aggr_node\n");
1647 p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
1649 init_timer(&p_aggr->timer);
1650 p_aggr->timer.function = aggr_timeout;
1651 p_aggr->timer.data = (unsigned long) p_aggr;
1653 p_aggr->timer_scheduled = false;
1654 skb_queue_head_init(&p_aggr->free_q);
1656 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
1658 for (i = 0; i < NUM_OF_TIDS; i++) {
1659 rxtid = &p_aggr->rx_tid[i];
1660 rxtid->aggr = false;
1661 rxtid->progress = false;
1662 rxtid->timer_mon = false;
1663 skb_queue_head_init(&rxtid->q);
1664 spin_lock_init(&rxtid->lock);
1670 void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
1672 struct aggr_info *p_aggr = vif->aggr_cntxt;
1673 struct rxtid *rxtid;
1678 rxtid = &p_aggr->rx_tid[tid];
1681 aggr_delete_tid_state(p_aggr, tid);
1684 void aggr_reset_state(struct aggr_info *aggr_info)
1688 for (tid = 0; tid < NUM_OF_TIDS; tid++)
1689 aggr_delete_tid_state(aggr_info, tid);
1692 /* clean up our amsdu buffer list */
1693 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1695 struct htc_packet *packet, *tmp_pkt;
1697 spin_lock_bh(&ar->lock);
1698 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1699 spin_unlock_bh(&ar->lock);
1703 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1705 list_del(&packet->list);
1706 spin_unlock_bh(&ar->lock);
1707 dev_kfree_skb(packet->pkt_cntxt);
1708 spin_lock_bh(&ar->lock);
1711 spin_unlock_bh(&ar->lock);
1714 void aggr_module_destroy(struct aggr_info *aggr_info)
1716 struct rxtid *rxtid;
1722 if (aggr_info->timer_scheduled) {
1723 del_timer(&aggr_info->timer);
1724 aggr_info->timer_scheduled = false;
1727 for (i = 0; i < NUM_OF_TIDS; i++) {
1728 rxtid = &aggr_info->rx_tid[i];
1729 if (rxtid->hold_q) {
1730 for (k = 0; k < rxtid->hold_q_sz; k++)
1731 dev_kfree_skb(rxtid->hold_q[k].skb);
1732 kfree(rxtid->hold_q);
1735 skb_queue_purge(&rxtid->q);
1738 skb_queue_purge(&aggr_info->free_q);