ath6kl: fix uninitialized warning in ath6kl_process_uapsdq()
[firefly-linux-kernel-4.4.55.git] / drivers / net / wireless / ath / ath6kl / txrx.c
1 /*
2  * Copyright (c) 2004-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16
17 #include "core.h"
18 #include "debug.h"
19
20 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
21                                u32 *map_no)
22 {
23         struct ath6kl *ar = ath6kl_priv(dev);
24         struct ethhdr *eth_hdr;
25         u32 i, ep_map = -1;
26         u8 *datap;
27
28         *map_no = 0;
29         datap = skb->data;
30         eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
31
32         if (is_multicast_ether_addr(eth_hdr->h_dest))
33                 return ENDPOINT_2;
34
35         for (i = 0; i < ar->node_num; i++) {
36                 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
37                            ETH_ALEN) == 0) {
38                         *map_no = i + 1;
39                         ar->node_map[i].tx_pend++;
40                         return ar->node_map[i].ep_id;
41                 }
42
43                 if ((ep_map == -1) && !ar->node_map[i].tx_pend)
44                         ep_map = i;
45         }
46
47         if (ep_map == -1) {
48                 ep_map = ar->node_num;
49                 ar->node_num++;
50                 if (ar->node_num > MAX_NODE_NUM)
51                         return ENDPOINT_UNUSED;
52         }
53
54         memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
55
56         for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
57                 if (!ar->tx_pending[i]) {
58                         ar->node_map[ep_map].ep_id = i;
59                         break;
60                 }
61
62                 /*
63                  * No free endpoint is available, start redistribution on
64                  * the inuse endpoints.
65                  */
66                 if (i == ENDPOINT_5) {
67                         ar->node_map[ep_map].ep_id = ar->next_ep_id;
68                         ar->next_ep_id++;
69                         if (ar->next_ep_id > ENDPOINT_5)
70                                 ar->next_ep_id = ENDPOINT_2;
71                 }
72         }
73
74         *map_no = ep_map + 1;
75         ar->node_map[ep_map].tx_pend++;
76
77         return ar->node_map[ep_map].ep_id;
78 }
79
80 static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
81                                 struct ath6kl_vif *vif,
82                                 struct sk_buff *skb,
83                                 u32 *flags)
84 {
85         struct ath6kl *ar = vif->ar;
86         bool is_apsdq_empty = false;
87         struct ethhdr *datap = (struct ethhdr *) skb->data;
88         u8 up = 0, traffic_class, *ip_hdr;
89         u16 ether_type;
90         struct ath6kl_llc_snap_hdr *llc_hdr;
91
92         if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
93                 /*
94                  * This tx is because of a uAPSD trigger, determine
95                  * more and EOSP bit. Set EOSP if queue is empty
96                  * or sufficient frames are delivered for this trigger.
97                  */
98                 spin_lock_bh(&conn->psq_lock);
99                 if (!skb_queue_empty(&conn->apsdq))
100                         *flags |= WMI_DATA_HDR_FLAGS_MORE;
101                 else if (conn->sta_flags & STA_PS_APSD_EOSP)
102                         *flags |= WMI_DATA_HDR_FLAGS_EOSP;
103                 *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
104                 spin_unlock_bh(&conn->psq_lock);
105                 return false;
106         } else if (!conn->apsd_info)
107                 return false;
108
109         if (test_bit(WMM_ENABLED, &vif->flags)) {
110                 ether_type = be16_to_cpu(datap->h_proto);
111                 if (is_ethertype(ether_type)) {
112                         /* packet is in DIX format  */
113                         ip_hdr = (u8 *)(datap + 1);
114                 } else {
115                         /* packet is in 802.3 format */
116                         llc_hdr = (struct ath6kl_llc_snap_hdr *)
117                                                         (datap + 1);
118                         ether_type = be16_to_cpu(llc_hdr->eth_type);
119                         ip_hdr = (u8 *)(llc_hdr + 1);
120                 }
121
122                 if (ether_type == IP_ETHERTYPE)
123                         up = ath6kl_wmi_determine_user_priority(
124                                                         ip_hdr, 0);
125         }
126
127         traffic_class = ath6kl_wmi_get_traffic_class(up);
128
129         if ((conn->apsd_info & (1 << traffic_class)) == 0)
130                 return false;
131
132         /* Queue the frames if the STA is sleeping */
133         spin_lock_bh(&conn->psq_lock);
134         is_apsdq_empty = skb_queue_empty(&conn->apsdq);
135         skb_queue_tail(&conn->apsdq, skb);
136         spin_unlock_bh(&conn->psq_lock);
137
138         /*
139          * If this is the first pkt getting queued
140          * for this STA, update the PVB for this STA
141          */
142         if (is_apsdq_empty) {
143                 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
144                                 vif->fw_vif_idx,
145                                 conn->aid, 1, 0);
146         }
147         *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
148
149         return true;
150 }
151
152 static bool ath6kl_process_psq(struct ath6kl_sta *conn,
153                                 struct ath6kl_vif *vif,
154                                 struct sk_buff *skb,
155                                 u32 *flags)
156 {
157         bool is_psq_empty = false;
158         struct ath6kl *ar = vif->ar;
159
160         if (conn->sta_flags & STA_PS_POLLED) {
161                 spin_lock_bh(&conn->psq_lock);
162                 if (!skb_queue_empty(&conn->psq))
163                         *flags |= WMI_DATA_HDR_FLAGS_MORE;
164                 spin_unlock_bh(&conn->psq_lock);
165                 return false;
166         }
167
168         /* Queue the frames if the STA is sleeping */
169         spin_lock_bh(&conn->psq_lock);
170         is_psq_empty = skb_queue_empty(&conn->psq);
171         skb_queue_tail(&conn->psq, skb);
172         spin_unlock_bh(&conn->psq_lock);
173
174         /*
175          * If this is the first pkt getting queued
176          * for this STA, update the PVB for this
177          * STA.
178          */
179         if (is_psq_empty)
180                 ath6kl_wmi_set_pvb_cmd(ar->wmi,
181                                        vif->fw_vif_idx,
182                                        conn->aid, 1);
183         return true;
184 }
185
186 static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
187                                 u32 *flags)
188 {
189         struct ethhdr *datap = (struct ethhdr *) skb->data;
190         struct ath6kl_sta *conn = NULL;
191         bool ps_queued = false;
192         struct ath6kl *ar = vif->ar;
193
194         if (is_multicast_ether_addr(datap->h_dest)) {
195                 u8 ctr = 0;
196                 bool q_mcast = false;
197
198                 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
199                         if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
200                                 q_mcast = true;
201                                 break;
202                         }
203                 }
204
205                 if (q_mcast) {
206                         /*
207                          * If this transmit is not because of a Dtim Expiry
208                          * q it.
209                          */
210                         if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
211                                 bool is_mcastq_empty = false;
212
213                                 spin_lock_bh(&ar->mcastpsq_lock);
214                                 is_mcastq_empty =
215                                         skb_queue_empty(&ar->mcastpsq);
216                                 skb_queue_tail(&ar->mcastpsq, skb);
217                                 spin_unlock_bh(&ar->mcastpsq_lock);
218
219                                 /*
220                                  * If this is the first Mcast pkt getting
221                                  * queued indicate to the target to set the
222                                  * BitmapControl LSB of the TIM IE.
223                                  */
224                                 if (is_mcastq_empty)
225                                         ath6kl_wmi_set_pvb_cmd(ar->wmi,
226                                                                vif->fw_vif_idx,
227                                                                MCAST_AID, 1);
228
229                                 ps_queued = true;
230                         } else {
231                                 /*
232                                  * This transmit is because of Dtim expiry.
233                                  * Determine if MoreData bit has to be set.
234                                  */
235                                 spin_lock_bh(&ar->mcastpsq_lock);
236                                 if (!skb_queue_empty(&ar->mcastpsq))
237                                         *flags |= WMI_DATA_HDR_FLAGS_MORE;
238                                 spin_unlock_bh(&ar->mcastpsq_lock);
239                         }
240                 }
241         } else {
242                 conn = ath6kl_find_sta(vif, datap->h_dest);
243                 if (!conn) {
244                         dev_kfree_skb(skb);
245
246                         /* Inform the caller that the skb is consumed */
247                         return true;
248                 }
249
250                 if (conn->sta_flags & STA_PS_SLEEP) {
251                         ps_queued = ath6kl_process_uapsdq(conn,
252                                                 vif, skb, flags);
253                         if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
254                                 ps_queued = ath6kl_process_psq(conn,
255                                                 vif, skb, flags);
256                 }
257         }
258         return ps_queued;
259 }
260
261 /* Tx functions */
262
263 int ath6kl_control_tx(void *devt, struct sk_buff *skb,
264                       enum htc_endpoint_id eid)
265 {
266         struct ath6kl *ar = devt;
267         int status = 0;
268         struct ath6kl_cookie *cookie = NULL;
269
270         spin_lock_bh(&ar->lock);
271
272         ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
273                    "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
274                    skb, skb->len, eid);
275
276         if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
277                 /*
278                  * Control endpoint is full, don't allocate resources, we
279                  * are just going to drop this packet.
280                  */
281                 cookie = NULL;
282                 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
283                            skb, skb->len);
284         } else
285                 cookie = ath6kl_alloc_cookie(ar);
286
287         if (cookie == NULL) {
288                 spin_unlock_bh(&ar->lock);
289                 status = -ENOMEM;
290                 goto fail_ctrl_tx;
291         }
292
293         ar->tx_pending[eid]++;
294
295         if (eid != ar->ctrl_ep)
296                 ar->total_tx_data_pend++;
297
298         spin_unlock_bh(&ar->lock);
299
300         cookie->skb = skb;
301         cookie->map_no = 0;
302         set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
303                          eid, ATH6KL_CONTROL_PKT_TAG);
304
305         /*
306          * This interface is asynchronous, if there is an error, cleanup
307          * will happen in the TX completion callback.
308          */
309         ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
310
311         return 0;
312
313 fail_ctrl_tx:
314         dev_kfree_skb(skb);
315         return status;
316 }
317
318 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
319 {
320         struct ath6kl *ar = ath6kl_priv(dev);
321         struct ath6kl_cookie *cookie = NULL;
322         enum htc_endpoint_id eid = ENDPOINT_UNUSED;
323         struct ath6kl_vif *vif = netdev_priv(dev);
324         u32 map_no = 0;
325         u16 htc_tag = ATH6KL_DATA_PKT_TAG;
326         u8 ac = 99 ; /* initialize to unmapped ac */
327         bool chk_adhoc_ps_mapping = false;
328         int ret;
329         struct wmi_tx_meta_v2 meta_v2;
330         void *meta;
331         u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
332         u8 meta_ver = 0;
333         u32 flags = 0;
334
335         ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
336                    "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
337                    skb, skb->data, skb->len);
338
339         /* If target is not associated */
340         if (!test_bit(CONNECTED, &vif->flags)) {
341                 dev_kfree_skb(skb);
342                 return 0;
343         }
344
345         if (!test_bit(WMI_READY, &ar->flag))
346                 goto fail_tx;
347
348         /* AP mode Power saving processing */
349         if (vif->nw_type == AP_NETWORK) {
350                 if (ath6kl_powersave_ap(vif, skb, &flags))
351                         return 0;
352         }
353
354         if (test_bit(WMI_ENABLED, &ar->flag)) {
355                 if ((dev->features & NETIF_F_IP_CSUM) &&
356                                 (csum == CHECKSUM_PARTIAL)) {
357                         csum_start = skb->csum_start -
358                                         (skb_network_header(skb) - skb->head) +
359                                         sizeof(struct ath6kl_llc_snap_hdr);
360                         csum_dest = skb->csum_offset + csum_start;
361                 }
362
363                 if (skb_headroom(skb) < dev->needed_headroom) {
364                         struct sk_buff *tmp_skb = skb;
365
366                         skb = skb_realloc_headroom(skb, dev->needed_headroom);
367                         kfree_skb(tmp_skb);
368                         if (skb == NULL) {
369                                 vif->net_stats.tx_dropped++;
370                                 return 0;
371                         }
372                 }
373
374                 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
375                         ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
376                         goto fail_tx;
377                 }
378
379                 if ((dev->features & NETIF_F_IP_CSUM) &&
380                                 (csum == CHECKSUM_PARTIAL)) {
381                         meta_v2.csum_start = csum_start;
382                         meta_v2.csum_dest = csum_dest;
383
384                         /* instruct target to calculate checksum */
385                         meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
386                         meta_ver = WMI_META_VERSION_2;
387                         meta = &meta_v2;
388                 } else {
389                         meta_ver = 0;
390                         meta = NULL;
391                 }
392
393                 ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
394                                 DATA_MSGTYPE, flags, 0,
395                                 meta_ver,
396                                 meta, vif->fw_vif_idx);
397
398                 if (ret) {
399                         ath6kl_warn("failed to add wmi data header:%d\n"
400                                 , ret);
401                         goto fail_tx;
402                 }
403
404                 if ((vif->nw_type == ADHOC_NETWORK) &&
405                      ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
406                         chk_adhoc_ps_mapping = true;
407                 else {
408                         /* get the stream mapping */
409                         ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
410                                     vif->fw_vif_idx, skb,
411                                     0, test_bit(WMM_ENABLED, &vif->flags), &ac);
412                         if (ret)
413                                 goto fail_tx;
414                 }
415         } else
416                 goto fail_tx;
417
418         spin_lock_bh(&ar->lock);
419
420         if (chk_adhoc_ps_mapping)
421                 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
422         else
423                 eid = ar->ac2ep_map[ac];
424
425         if (eid == 0 || eid == ENDPOINT_UNUSED) {
426                 ath6kl_err("eid %d is not mapped!\n", eid);
427                 spin_unlock_bh(&ar->lock);
428                 goto fail_tx;
429         }
430
431         /* allocate resource for this packet */
432         cookie = ath6kl_alloc_cookie(ar);
433
434         if (!cookie) {
435                 spin_unlock_bh(&ar->lock);
436                 goto fail_tx;
437         }
438
439         /* update counts while the lock is held */
440         ar->tx_pending[eid]++;
441         ar->total_tx_data_pend++;
442
443         spin_unlock_bh(&ar->lock);
444
445         if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
446             skb_cloned(skb)) {
447                 /*
448                  * We will touch (move the buffer data to align it. Since the
449                  * skb buffer is cloned and not only the header is changed, we
450                  * have to copy it to allow the changes. Since we are copying
451                  * the data here, we may as well align it by reserving suitable
452                  * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
453                  */
454                 struct sk_buff *nskb;
455
456                 nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
457                 if (nskb == NULL)
458                         goto fail_tx;
459                 kfree_skb(skb);
460                 skb = nskb;
461         }
462
463         cookie->skb = skb;
464         cookie->map_no = map_no;
465         set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
466                          eid, htc_tag);
467
468         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
469                         skb->data, skb->len);
470
471         /*
472          * HTC interface is asynchronous, if this fails, cleanup will
473          * happen in the ath6kl_tx_complete callback.
474          */
475         ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
476
477         return 0;
478
479 fail_tx:
480         dev_kfree_skb(skb);
481
482         vif->net_stats.tx_dropped++;
483         vif->net_stats.tx_aborted_errors++;
484
485         return 0;
486 }
487
488 /* indicate tx activity or inactivity on a WMI stream */
489 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
490 {
491         struct ath6kl *ar = devt;
492         enum htc_endpoint_id eid;
493         int i;
494
495         eid = ar->ac2ep_map[traffic_class];
496
497         if (!test_bit(WMI_ENABLED, &ar->flag))
498                 goto notify_htc;
499
500         spin_lock_bh(&ar->lock);
501
502         ar->ac_stream_active[traffic_class] = active;
503
504         if (active) {
505                 /*
506                  * Keep track of the active stream with the highest
507                  * priority.
508                  */
509                 if (ar->ac_stream_pri_map[traffic_class] >
510                     ar->hiac_stream_active_pri)
511                         /* set the new highest active priority */
512                         ar->hiac_stream_active_pri =
513                                         ar->ac_stream_pri_map[traffic_class];
514
515         } else {
516                 /*
517                  * We may have to search for the next active stream
518                  * that is the highest priority.
519                  */
520                 if (ar->hiac_stream_active_pri ==
521                         ar->ac_stream_pri_map[traffic_class]) {
522                         /*
523                          * The highest priority stream just went inactive
524                          * reset and search for the "next" highest "active"
525                          * priority stream.
526                          */
527                         ar->hiac_stream_active_pri = 0;
528
529                         for (i = 0; i < WMM_NUM_AC; i++) {
530                                 if (ar->ac_stream_active[i] &&
531                                     (ar->ac_stream_pri_map[i] >
532                                      ar->hiac_stream_active_pri))
533                                         /*
534                                          * Set the new highest active
535                                          * priority.
536                                          */
537                                         ar->hiac_stream_active_pri =
538                                                 ar->ac_stream_pri_map[i];
539                         }
540                 }
541         }
542
543         spin_unlock_bh(&ar->lock);
544
545 notify_htc:
546         /* notify HTC, this may cause credit distribution changes */
547         ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active);
548 }
549
550 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
551                                                struct htc_packet *packet)
552 {
553         struct ath6kl *ar = target->dev->ar;
554         struct ath6kl_vif *vif;
555         enum htc_endpoint_id endpoint = packet->endpoint;
556         enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
557
558         if (endpoint == ar->ctrl_ep) {
559                 /*
560                  * Under normal WMI if this is getting full, then something
561                  * is running rampant the host should not be exhausting the
562                  * WMI queue with too many commands the only exception to
563                  * this is during testing using endpointping.
564                  */
565                 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
566                 ath6kl_err("wmi ctrl ep is full\n");
567                 return action;
568         }
569
570         if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
571                 return action;
572
573         /*
574          * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
575          * the highest active stream.
576          */
577         if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
578             ar->hiac_stream_active_pri &&
579             ar->cookie_count <= MAX_HI_COOKIE_NUM)
580                 /*
581                  * Give preference to the highest priority stream by
582                  * dropping the packets which overflowed.
583                  */
584                 action = HTC_SEND_FULL_DROP;
585
586         /* FIXME: Locking */
587         spin_lock_bh(&ar->list_lock);
588         list_for_each_entry(vif, &ar->vif_list, list) {
589                 if (vif->nw_type == ADHOC_NETWORK ||
590                     action != HTC_SEND_FULL_DROP) {
591                         spin_unlock_bh(&ar->list_lock);
592
593                         set_bit(NETQ_STOPPED, &vif->flags);
594                         netif_stop_queue(vif->ndev);
595
596                         return action;
597                 }
598         }
599         spin_unlock_bh(&ar->list_lock);
600
601         return action;
602 }
603
604 /* TODO this needs to be looked at */
605 static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
606                                      enum htc_endpoint_id eid, u32 map_no)
607 {
608         struct ath6kl *ar = vif->ar;
609         u32 i;
610
611         if (vif->nw_type != ADHOC_NETWORK)
612                 return;
613
614         if (!ar->ibss_ps_enable)
615                 return;
616
617         if (eid == ar->ctrl_ep)
618                 return;
619
620         if (map_no == 0)
621                 return;
622
623         map_no--;
624         ar->node_map[map_no].tx_pend--;
625
626         if (ar->node_map[map_no].tx_pend)
627                 return;
628
629         if (map_no != (ar->node_num - 1))
630                 return;
631
632         for (i = ar->node_num; i > 0; i--) {
633                 if (ar->node_map[i - 1].tx_pend)
634                         break;
635
636                 memset(&ar->node_map[i - 1], 0,
637                        sizeof(struct ath6kl_node_mapping));
638                 ar->node_num--;
639         }
640 }
641
642 void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
643 {
644         struct ath6kl *ar = context;
645         struct sk_buff_head skb_queue;
646         struct htc_packet *packet;
647         struct sk_buff *skb;
648         struct ath6kl_cookie *ath6kl_cookie;
649         u32 map_no = 0;
650         int status;
651         enum htc_endpoint_id eid;
652         bool wake_event = false;
653         bool flushing[ATH6KL_VIF_MAX] = {false};
654         u8 if_idx;
655         struct ath6kl_vif *vif;
656
657         skb_queue_head_init(&skb_queue);
658
659         /* lock the driver as we update internal state */
660         spin_lock_bh(&ar->lock);
661
662         /* reap completed packets */
663         while (!list_empty(packet_queue)) {
664
665                 packet = list_first_entry(packet_queue, struct htc_packet,
666                                           list);
667                 list_del(&packet->list);
668
669                 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
670                 if (!ath6kl_cookie)
671                         goto fatal;
672
673                 status = packet->status;
674                 skb = ath6kl_cookie->skb;
675                 eid = packet->endpoint;
676                 map_no = ath6kl_cookie->map_no;
677
678                 if (!skb || !skb->data)
679                         goto fatal;
680
681                 __skb_queue_tail(&skb_queue, skb);
682
683                 if (!status && (packet->act_len != skb->len))
684                         goto fatal;
685
686                 ar->tx_pending[eid]--;
687
688                 if (eid != ar->ctrl_ep)
689                         ar->total_tx_data_pend--;
690
691                 if (eid == ar->ctrl_ep) {
692                         if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
693                                 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
694
695                         if (ar->tx_pending[eid] == 0)
696                                 wake_event = true;
697                 }
698
699                 if (eid == ar->ctrl_ep) {
700                         if_idx = wmi_cmd_hdr_get_if_idx(
701                                 (struct wmi_cmd_hdr *) packet->buf);
702                 } else {
703                         if_idx = wmi_data_hdr_get_if_idx(
704                                 (struct wmi_data_hdr *) packet->buf);
705                 }
706
707                 vif = ath6kl_get_vif_by_index(ar, if_idx);
708                 if (!vif) {
709                         ath6kl_free_cookie(ar, ath6kl_cookie);
710                         continue;
711                 }
712
713                 if (status) {
714                         if (status == -ECANCELED)
715                                 /* a packet was flushed  */
716                                 flushing[if_idx] = true;
717
718                         vif->net_stats.tx_errors++;
719
720                         if (status != -ENOSPC && status != -ECANCELED)
721                                 ath6kl_warn("tx complete error: %d\n", status);
722
723                         ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
724                                    "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
725                                    __func__, skb, packet->buf, packet->act_len,
726                                    eid, "error!");
727                 } else {
728                         ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
729                                    "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
730                                    __func__, skb, packet->buf, packet->act_len,
731                                    eid, "OK");
732
733                         flushing[if_idx] = false;
734                         vif->net_stats.tx_packets++;
735                         vif->net_stats.tx_bytes += skb->len;
736                 }
737
738                 ath6kl_tx_clear_node_map(vif, eid, map_no);
739
740                 ath6kl_free_cookie(ar, ath6kl_cookie);
741
742                 if (test_bit(NETQ_STOPPED, &vif->flags))
743                         clear_bit(NETQ_STOPPED, &vif->flags);
744         }
745
746         spin_unlock_bh(&ar->lock);
747
748         __skb_queue_purge(&skb_queue);
749
750         /* FIXME: Locking */
751         spin_lock_bh(&ar->list_lock);
752         list_for_each_entry(vif, &ar->vif_list, list) {
753                 if (test_bit(CONNECTED, &vif->flags) &&
754                     !flushing[vif->fw_vif_idx]) {
755                         spin_unlock_bh(&ar->list_lock);
756                         netif_wake_queue(vif->ndev);
757                         spin_lock_bh(&ar->list_lock);
758                 }
759         }
760         spin_unlock_bh(&ar->list_lock);
761
762         if (wake_event)
763                 wake_up(&ar->event_wq);
764
765         return;
766
767 fatal:
768         WARN_ON(1);
769         spin_unlock_bh(&ar->lock);
770         return;
771 }
772
773 void ath6kl_tx_data_cleanup(struct ath6kl *ar)
774 {
775         int i;
776
777         /* flush all the data (non-control) streams */
778         for (i = 0; i < WMM_NUM_AC; i++)
779                 ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
780                                       ATH6KL_DATA_PKT_TAG);
781 }
782
783 /* Rx functions */
784
785 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
786                                               struct sk_buff *skb)
787 {
788         if (!skb)
789                 return;
790
791         skb->dev = dev;
792
793         if (!(skb->dev->flags & IFF_UP)) {
794                 dev_kfree_skb(skb);
795                 return;
796         }
797
798         skb->protocol = eth_type_trans(skb, skb->dev);
799
800         netif_rx_ni(skb);
801 }
802
803 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
804 {
805         struct sk_buff *skb;
806
807         while (num) {
808                 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
809                 if (!skb) {
810                         ath6kl_err("netbuf allocation failed\n");
811                         return;
812                 }
813                 skb_queue_tail(q, skb);
814                 num--;
815         }
816 }
817
818 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
819 {
820         struct sk_buff *skb = NULL;
821
822         if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
823                 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
824
825         skb = skb_dequeue(&p_aggr->free_q);
826
827         return skb;
828 }
829
830 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
831 {
832         struct ath6kl *ar = target->dev->ar;
833         struct sk_buff *skb;
834         int rx_buf;
835         int n_buf_refill;
836         struct htc_packet *packet;
837         struct list_head queue;
838
839         n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
840                           ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
841
842         if (n_buf_refill <= 0)
843                 return;
844
845         INIT_LIST_HEAD(&queue);
846
847         ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
848                    "%s: providing htc with %d buffers at eid=%d\n",
849                    __func__, n_buf_refill, endpoint);
850
851         for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
852                 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
853                 if (!skb)
854                         break;
855
856                 packet = (struct htc_packet *) skb->head;
857                 if (!IS_ALIGNED((unsigned long) skb->data, 4))
858                         skb->data = PTR_ALIGN(skb->data - 4, 4);
859                 set_htc_rxpkt_info(packet, skb, skb->data,
860                                 ATH6KL_BUFFER_SIZE, endpoint);
861                 list_add_tail(&packet->list, &queue);
862         }
863
864         if (!list_empty(&queue))
865                 ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
866 }
867
868 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
869 {
870         struct htc_packet *packet;
871         struct sk_buff *skb;
872
873         while (count) {
874                 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
875                 if (!skb)
876                         return;
877
878                 packet = (struct htc_packet *) skb->head;
879                 if (!IS_ALIGNED((unsigned long) skb->data, 4))
880                         skb->data = PTR_ALIGN(skb->data - 4, 4);
881                 set_htc_rxpkt_info(packet, skb, skb->data,
882                                    ATH6KL_AMSDU_BUFFER_SIZE, 0);
883                 spin_lock_bh(&ar->lock);
884                 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
885                 spin_unlock_bh(&ar->lock);
886                 count--;
887         }
888 }
889
890 /*
891  * Callback to allocate a receive buffer for a pending packet. We use a
892  * pre-allocated list of buffers of maximum AMSDU size (4K).
893  */
894 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
895                                             enum htc_endpoint_id endpoint,
896                                             int len)
897 {
898         struct ath6kl *ar = target->dev->ar;
899         struct htc_packet *packet = NULL;
900         struct list_head *pkt_pos;
901         int refill_cnt = 0, depth = 0;
902
903         ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
904                    __func__, endpoint, len);
905
906         if ((len <= ATH6KL_BUFFER_SIZE) ||
907             (len > ATH6KL_AMSDU_BUFFER_SIZE))
908                 return NULL;
909
910         spin_lock_bh(&ar->lock);
911
912         if (list_empty(&ar->amsdu_rx_buffer_queue)) {
913                 spin_unlock_bh(&ar->lock);
914                 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
915                 goto refill_buf;
916         }
917
918         packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
919                                   struct htc_packet, list);
920         list_del(&packet->list);
921         list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
922                 depth++;
923
924         refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
925         spin_unlock_bh(&ar->lock);
926
927         /* set actual endpoint ID */
928         packet->endpoint = endpoint;
929
930 refill_buf:
931         if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
932                 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
933
934         return packet;
935 }
936
937 static void aggr_slice_amsdu(struct aggr_info *p_aggr,
938                              struct rxtid *rxtid, struct sk_buff *skb)
939 {
940         struct sk_buff *new_skb;
941         struct ethhdr *hdr;
942         u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
943         u8 *framep;
944
945         mac_hdr_len = sizeof(struct ethhdr);
946         framep = skb->data + mac_hdr_len;
947         amsdu_len = skb->len - mac_hdr_len;
948
949         while (amsdu_len > mac_hdr_len) {
950                 hdr = (struct ethhdr *) framep;
951                 payload_8023_len = ntohs(hdr->h_proto);
952
953                 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
954                     payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
955                         ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
956                                    payload_8023_len);
957                         break;
958                 }
959
960                 frame_8023_len = payload_8023_len + mac_hdr_len;
961                 new_skb = aggr_get_free_skb(p_aggr);
962                 if (!new_skb) {
963                         ath6kl_err("no buffer available\n");
964                         break;
965                 }
966
967                 memcpy(new_skb->data, framep, frame_8023_len);
968                 skb_put(new_skb, frame_8023_len);
969                 if (ath6kl_wmi_dot3_2_dix(new_skb)) {
970                         ath6kl_err("dot3_2_dix error\n");
971                         dev_kfree_skb(new_skb);
972                         break;
973                 }
974
975                 skb_queue_tail(&rxtid->q, new_skb);
976
977                 /* Is this the last subframe within this aggregate ? */
978                 if ((amsdu_len - frame_8023_len) == 0)
979                         break;
980
981                 /* Add the length of A-MSDU subframe padding bytes -
982                  * Round to nearest word.
983                  */
984                 frame_8023_len = ALIGN(frame_8023_len, 4);
985
986                 framep += frame_8023_len;
987                 amsdu_len -= frame_8023_len;
988         }
989
990         dev_kfree_skb(skb);
991 }
992
993 static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
994                             u16 seq_no, u8 order)
995 {
996         struct sk_buff *skb;
997         struct rxtid *rxtid;
998         struct skb_hold_q *node;
999         u16 idx, idx_end, seq_end;
1000         struct rxtid_stats *stats;
1001
1002         if (!p_aggr)
1003                 return;
1004
1005         rxtid = &p_aggr->rx_tid[tid];
1006         stats = &p_aggr->stat[tid];
1007
1008         idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1009
1010         /*
1011          * idx_end is typically the last possible frame in the window,
1012          * but changes to 'the' seq_no, when BAR comes. If seq_no
1013          * is non-zero, we will go up to that and stop.
1014          * Note: last seq no in current window will occupy the same
1015          * index position as index that is just previous to start.
1016          * An imp point : if win_sz is 7, for seq_no space of 4095,
1017          * then, there would be holes when sequence wrap around occurs.
1018          * Target should judiciously choose the win_sz, based on
1019          * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1020          * 2, 4, 8, 16 win_sz works fine).
1021          * We must deque from "idx" to "idx_end", including both.
1022          */
1023         seq_end = seq_no ? seq_no : rxtid->seq_next;
1024         idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1025
1026         spin_lock_bh(&rxtid->lock);
1027
1028         do {
1029                 node = &rxtid->hold_q[idx];
1030                 if ((order == 1) && (!node->skb))
1031                         break;
1032
1033                 if (node->skb) {
1034                         if (node->is_amsdu)
1035                                 aggr_slice_amsdu(p_aggr, rxtid, node->skb);
1036                         else
1037                                 skb_queue_tail(&rxtid->q, node->skb);
1038                         node->skb = NULL;
1039                 } else
1040                         stats->num_hole++;
1041
1042                 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1043                 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1044         } while (idx != idx_end);
1045
1046         spin_unlock_bh(&rxtid->lock);
1047
1048         stats->num_delivered += skb_queue_len(&rxtid->q);
1049
1050         while ((skb = skb_dequeue(&rxtid->q)))
1051                 ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
1052 }
1053
1054 static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
1055                                   u16 seq_no,
1056                                   bool is_amsdu, struct sk_buff *frame)
1057 {
1058         struct rxtid *rxtid;
1059         struct rxtid_stats *stats;
1060         struct sk_buff *skb;
1061         struct skb_hold_q *node;
1062         u16 idx, st, cur, end;
1063         bool is_queued = false;
1064         u16 extended_end;
1065
1066         rxtid = &agg_info->rx_tid[tid];
1067         stats = &agg_info->stat[tid];
1068
1069         stats->num_into_aggr++;
1070
1071         if (!rxtid->aggr) {
1072                 if (is_amsdu) {
1073                         aggr_slice_amsdu(agg_info, rxtid, frame);
1074                         is_queued = true;
1075                         stats->num_amsdu++;
1076                         while ((skb = skb_dequeue(&rxtid->q)))
1077                                 ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
1078                                                                   skb);
1079                 }
1080                 return is_queued;
1081         }
1082
1083         /* Check the incoming sequence no, if it's in the window */
1084         st = rxtid->seq_next;
1085         cur = seq_no;
1086         end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1087
1088         if (((st < end) && (cur < st || cur > end)) ||
1089             ((st > end) && (cur > end) && (cur < st))) {
1090                 extended_end = (end + rxtid->hold_q_sz - 1) &
1091                         ATH6KL_MAX_SEQ_NO;
1092
1093                 if (((end < extended_end) &&
1094                      (cur < end || cur > extended_end)) ||
1095                     ((end > extended_end) && (cur > extended_end) &&
1096                      (cur < end))) {
1097                         aggr_deque_frms(agg_info, tid, 0, 0);
1098                         if (cur >= rxtid->hold_q_sz - 1)
1099                                 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1100                         else
1101                                 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1102                                                   (rxtid->hold_q_sz - 2 - cur);
1103                 } else {
1104                         /*
1105                          * Dequeue only those frames that are outside the
1106                          * new shifted window.
1107                          */
1108                         if (cur >= rxtid->hold_q_sz - 1)
1109                                 st = cur - (rxtid->hold_q_sz - 1);
1110                         else
1111                                 st = ATH6KL_MAX_SEQ_NO -
1112                                         (rxtid->hold_q_sz - 2 - cur);
1113
1114                         aggr_deque_frms(agg_info, tid, st, 0);
1115                 }
1116
1117                 stats->num_oow++;
1118         }
1119
1120         idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1121
1122         node = &rxtid->hold_q[idx];
1123
1124         spin_lock_bh(&rxtid->lock);
1125
1126         /*
1127          * Is the cur frame duplicate or something beyond our window(hold_q
1128          * -> which is 2x, already)?
1129          *
1130          * 1. Duplicate is easy - drop incoming frame.
1131          * 2. Not falling in current sliding window.
1132          *  2a. is the frame_seq_no preceding current tid_seq_no?
1133          *      -> drop the frame. perhaps sender did not get our ACK.
1134          *         this is taken care of above.
1135          *  2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1136          *      -> Taken care of it above, by moving window forward.
1137          */
1138         dev_kfree_skb(node->skb);
1139         stats->num_dups++;
1140
1141         node->skb = frame;
1142         is_queued = true;
1143         node->is_amsdu = is_amsdu;
1144         node->seq_no = seq_no;
1145
1146         if (node->is_amsdu)
1147                 stats->num_amsdu++;
1148         else
1149                 stats->num_mpdu++;
1150
1151         spin_unlock_bh(&rxtid->lock);
1152
1153         aggr_deque_frms(agg_info, tid, 0, 1);
1154
1155         if (agg_info->timer_scheduled)
1156                 rxtid->progress = true;
1157         else
1158                 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1159                         if (rxtid->hold_q[idx].skb) {
1160                                 /*
1161                                  * There is a frame in the queue and no
1162                                  * timer so start a timer to ensure that
1163                                  * the frame doesn't remain stuck
1164                                  * forever.
1165                                  */
1166                                 agg_info->timer_scheduled = true;
1167                                 mod_timer(&agg_info->timer,
1168                                           (jiffies +
1169                                            HZ * (AGGR_RX_TIMEOUT) / 1000));
1170                                 rxtid->progress = false;
1171                                 rxtid->timer_mon = true;
1172                                 break;
1173                         }
1174                 }
1175
1176         return is_queued;
1177 }
1178
1179 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1180                                                  struct ath6kl_sta *conn)
1181 {
1182         struct ath6kl *ar = vif->ar;
1183         bool is_apsdq_empty, is_apsdq_empty_at_start;
1184         u32 num_frames_to_deliver, flags;
1185         struct sk_buff *skb = NULL;
1186
1187         /*
1188          * If the APSD q for this STA is not empty, dequeue and
1189          * send a pkt from the head of the q. Also update the
1190          * More data bit in the WMI_DATA_HDR if there are
1191          * more pkts for this STA in the APSD q.
1192          * If there are no more pkts for this STA,
1193          * update the APSD bitmap for this STA.
1194          */
1195
1196         num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1197                                                     ATH6KL_APSD_FRAME_MASK;
1198         /*
1199          * Number of frames to send in a service period is
1200          * indicated by the station
1201          * in the QOS_INFO of the association request
1202          * If it is zero, send all frames
1203          */
1204         if (!num_frames_to_deliver)
1205                 num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1206
1207         spin_lock_bh(&conn->psq_lock);
1208         is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1209         spin_unlock_bh(&conn->psq_lock);
1210         is_apsdq_empty_at_start = is_apsdq_empty;
1211
1212         while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1213
1214                 spin_lock_bh(&conn->psq_lock);
1215                 skb = skb_dequeue(&conn->apsdq);
1216                 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1217                 spin_unlock_bh(&conn->psq_lock);
1218
1219                 /*
1220                  * Set the STA flag to Trigger delivery,
1221                  * so that the frame will go out
1222                  */
1223                 conn->sta_flags |= STA_PS_APSD_TRIGGER;
1224                 num_frames_to_deliver--;
1225
1226                 /* Last frame in the service period, set EOSP or queue empty */
1227                 if ((is_apsdq_empty) || (!num_frames_to_deliver))
1228                         conn->sta_flags |= STA_PS_APSD_EOSP;
1229
1230                 ath6kl_data_tx(skb, vif->ndev);
1231                 conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1232                 conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1233         }
1234
1235         if (is_apsdq_empty) {
1236                 if (is_apsdq_empty_at_start)
1237                         flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1238                 else
1239                         flags = 0;
1240
1241                 ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
1242                                 vif->fw_vif_idx,
1243                                 conn->aid, 0, flags);
1244         }
1245
1246         return;
1247 }
1248
1249 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1250 {
1251         struct ath6kl *ar = target->dev->ar;
1252         struct sk_buff *skb = packet->pkt_cntxt;
1253         struct wmi_rx_meta_v2 *meta;
1254         struct wmi_data_hdr *dhdr;
1255         int min_hdr_len;
1256         u8 meta_type, dot11_hdr = 0;
1257         int status = packet->status;
1258         enum htc_endpoint_id ept = packet->endpoint;
1259         bool is_amsdu, prev_ps, ps_state = false;
1260         bool trig_state = false;
1261         struct ath6kl_sta *conn = NULL;
1262         struct sk_buff *skb1 = NULL;
1263         struct ethhdr *datap = NULL;
1264         struct ath6kl_vif *vif;
1265         u16 seq_no, offset;
1266         u8 tid, if_idx;
1267
1268         ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1269                    "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1270                    __func__, ar, ept, skb, packet->buf,
1271                    packet->act_len, status);
1272
1273         if (status || !(skb->data + HTC_HDR_LENGTH)) {
1274                 dev_kfree_skb(skb);
1275                 return;
1276         }
1277
1278         skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1279         skb_pull(skb, HTC_HDR_LENGTH);
1280
1281         if (ept == ar->ctrl_ep) {
1282                 if_idx =
1283                 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1284         } else {
1285                 if_idx =
1286                 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1287         }
1288
1289         vif = ath6kl_get_vif_by_index(ar, if_idx);
1290         if (!vif) {
1291                 dev_kfree_skb(skb);
1292                 return;
1293         }
1294
1295         /*
1296          * Take lock to protect buffer counts and adaptive power throughput
1297          * state.
1298          */
1299         spin_lock_bh(&vif->if_lock);
1300
1301         vif->net_stats.rx_packets++;
1302         vif->net_stats.rx_bytes += packet->act_len;
1303
1304         spin_unlock_bh(&vif->if_lock);
1305
1306
1307         ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1308                         skb->data, skb->len);
1309
1310         skb->dev = vif->ndev;
1311
1312         if (!test_bit(WMI_ENABLED, &ar->flag)) {
1313                 if (EPPING_ALIGNMENT_PAD > 0)
1314                         skb_pull(skb, EPPING_ALIGNMENT_PAD);
1315                 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1316                 return;
1317         }
1318
1319         ath6kl_check_wow_status(ar);
1320
1321         if (ept == ar->ctrl_ep) {
1322                 ath6kl_wmi_control_rx(ar->wmi, skb);
1323                 return;
1324         }
1325
1326         min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1327                       sizeof(struct ath6kl_llc_snap_hdr);
1328
1329         dhdr = (struct wmi_data_hdr *) skb->data;
1330
1331         /*
1332          * In the case of AP mode we may receive NULL data frames
1333          * that do not have LLC hdr. They are 16 bytes in size.
1334          * Allow these frames in the AP mode.
1335          */
1336         if (vif->nw_type != AP_NETWORK &&
1337             ((packet->act_len < min_hdr_len) ||
1338              (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1339                 ath6kl_info("frame len is too short or too long\n");
1340                 vif->net_stats.rx_errors++;
1341                 vif->net_stats.rx_length_errors++;
1342                 dev_kfree_skb(skb);
1343                 return;
1344         }
1345
1346         /* Get the Power save state of the STA */
1347         if (vif->nw_type == AP_NETWORK) {
1348                 meta_type = wmi_data_hdr_get_meta(dhdr);
1349
1350                 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1351                               WMI_DATA_HDR_PS_MASK);
1352
1353                 offset = sizeof(struct wmi_data_hdr);
1354                 trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
1355
1356                 switch (meta_type) {
1357                 case 0:
1358                         break;
1359                 case WMI_META_VERSION_1:
1360                         offset += sizeof(struct wmi_rx_meta_v1);
1361                         break;
1362                 case WMI_META_VERSION_2:
1363                         offset += sizeof(struct wmi_rx_meta_v2);
1364                         break;
1365                 default:
1366                         break;
1367                 }
1368
1369                 datap = (struct ethhdr *) (skb->data + offset);
1370                 conn = ath6kl_find_sta(vif, datap->h_source);
1371
1372                 if (!conn) {
1373                         dev_kfree_skb(skb);
1374                         return;
1375                 }
1376
1377                 /*
1378                  * If there is a change in PS state of the STA,
1379                  * take appropriate steps:
1380                  *
1381                  * 1. If Sleep-->Awake, flush the psq for the STA
1382                  *    Clear the PVB for the STA.
1383                  * 2. If Awake-->Sleep, Starting queueing frames
1384                  *    the STA.
1385                  */
1386                 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1387
1388                 if (ps_state)
1389                         conn->sta_flags |= STA_PS_SLEEP;
1390                 else
1391                         conn->sta_flags &= ~STA_PS_SLEEP;
1392
1393                 /* Accept trigger only when the station is in sleep */
1394                 if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1395                         ath6kl_uapsd_trigger_frame_rx(vif, conn);
1396
1397                 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1398                         if (!(conn->sta_flags & STA_PS_SLEEP)) {
1399                                 struct sk_buff *skbuff = NULL;
1400                                 bool is_apsdq_empty;
1401
1402                                 spin_lock_bh(&conn->psq_lock);
1403                                 while ((skbuff = skb_dequeue(&conn->psq))) {
1404                                         spin_unlock_bh(&conn->psq_lock);
1405                                         ath6kl_data_tx(skbuff, vif->ndev);
1406                                         spin_lock_bh(&conn->psq_lock);
1407                                 }
1408
1409                                 is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1410                                 while ((skbuff = skb_dequeue(&conn->apsdq))) {
1411                                         spin_unlock_bh(&conn->psq_lock);
1412                                         ath6kl_data_tx(skbuff, vif->ndev);
1413                                         spin_lock_bh(&conn->psq_lock);
1414                                 }
1415                                 spin_unlock_bh(&conn->psq_lock);
1416
1417                                 if (!is_apsdq_empty)
1418                                         ath6kl_wmi_set_apsd_bfrd_traf(
1419                                                         ar->wmi,
1420                                                         vif->fw_vif_idx,
1421                                                         conn->aid, 0, 0);
1422
1423                                 /* Clear the PVB for this STA */
1424                                 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1425                                                        conn->aid, 0);
1426                         }
1427                 }
1428
1429                 /* drop NULL data frames here */
1430                 if ((packet->act_len < min_hdr_len) ||
1431                     (packet->act_len >
1432                      WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1433                         dev_kfree_skb(skb);
1434                         return;
1435                 }
1436         }
1437
1438         is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1439         tid = wmi_data_hdr_get_up(dhdr);
1440         seq_no = wmi_data_hdr_get_seqno(dhdr);
1441         meta_type = wmi_data_hdr_get_meta(dhdr);
1442         dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1443         skb_pull(skb, sizeof(struct wmi_data_hdr));
1444
1445         switch (meta_type) {
1446         case WMI_META_VERSION_1:
1447                 skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1448                 break;
1449         case WMI_META_VERSION_2:
1450                 meta = (struct wmi_rx_meta_v2 *) skb->data;
1451                 if (meta->csum_flags & 0x1) {
1452                         skb->ip_summed = CHECKSUM_COMPLETE;
1453                         skb->csum = (__force __wsum) meta->csum;
1454                 }
1455                 skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1456                 break;
1457         default:
1458                 break;
1459         }
1460
1461         if (dot11_hdr)
1462                 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1463         else if (!is_amsdu)
1464                 status = ath6kl_wmi_dot3_2_dix(skb);
1465
1466         if (status) {
1467                 /*
1468                  * Drop frames that could not be processed (lack of
1469                  * memory, etc.)
1470                  */
1471                 dev_kfree_skb(skb);
1472                 return;
1473         }
1474
1475         if (!(vif->ndev->flags & IFF_UP)) {
1476                 dev_kfree_skb(skb);
1477                 return;
1478         }
1479
1480         if (vif->nw_type == AP_NETWORK) {
1481                 datap = (struct ethhdr *) skb->data;
1482                 if (is_multicast_ether_addr(datap->h_dest))
1483                         /*
1484                          * Bcast/Mcast frames should be sent to the
1485                          * OS stack as well as on the air.
1486                          */
1487                         skb1 = skb_copy(skb, GFP_ATOMIC);
1488                 else {
1489                         /*
1490                          * Search for a connected STA with dstMac
1491                          * as the Mac address. If found send the
1492                          * frame to it on the air else send the
1493                          * frame up the stack.
1494                          */
1495                         conn = ath6kl_find_sta(vif, datap->h_dest);
1496
1497                         if (conn && ar->intra_bss) {
1498                                 skb1 = skb;
1499                                 skb = NULL;
1500                         } else if (conn && !ar->intra_bss) {
1501                                 dev_kfree_skb(skb);
1502                                 skb = NULL;
1503                         }
1504                 }
1505                 if (skb1)
1506                         ath6kl_data_tx(skb1, vif->ndev);
1507
1508                 if (skb == NULL) {
1509                         /* nothing to deliver up the stack */
1510                         return;
1511                 }
1512         }
1513
1514         datap = (struct ethhdr *) skb->data;
1515
1516         if (is_unicast_ether_addr(datap->h_dest) &&
1517             aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
1518                                   is_amsdu, skb))
1519                 /* aggregation code will handle the skb */
1520                 return;
1521
1522         ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1523 }
1524
1525 static void aggr_timeout(unsigned long arg)
1526 {
1527         u8 i, j;
1528         struct aggr_info *p_aggr = (struct aggr_info *) arg;
1529         struct rxtid *rxtid;
1530         struct rxtid_stats *stats;
1531
1532         for (i = 0; i < NUM_OF_TIDS; i++) {
1533                 rxtid = &p_aggr->rx_tid[i];
1534                 stats = &p_aggr->stat[i];
1535
1536                 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
1537                         continue;
1538
1539                 stats->num_timeouts++;
1540                 ath6kl_dbg(ATH6KL_DBG_AGGR,
1541                            "aggr timeout (st %d end %d)\n",
1542                            rxtid->seq_next,
1543                            ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1544                             ATH6KL_MAX_SEQ_NO));
1545                 aggr_deque_frms(p_aggr, i, 0, 0);
1546         }
1547
1548         p_aggr->timer_scheduled = false;
1549
1550         for (i = 0; i < NUM_OF_TIDS; i++) {
1551                 rxtid = &p_aggr->rx_tid[i];
1552
1553                 if (rxtid->aggr && rxtid->hold_q) {
1554                         for (j = 0; j < rxtid->hold_q_sz; j++) {
1555                                 if (rxtid->hold_q[j].skb) {
1556                                         p_aggr->timer_scheduled = true;
1557                                         rxtid->timer_mon = true;
1558                                         rxtid->progress = false;
1559                                         break;
1560                                 }
1561                         }
1562
1563                         if (j >= rxtid->hold_q_sz)
1564                                 rxtid->timer_mon = false;
1565                 }
1566         }
1567
1568         if (p_aggr->timer_scheduled)
1569                 mod_timer(&p_aggr->timer,
1570                           jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1571 }
1572
1573 static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
1574 {
1575         struct rxtid *rxtid;
1576         struct rxtid_stats *stats;
1577
1578         if (!p_aggr || tid >= NUM_OF_TIDS)
1579                 return;
1580
1581         rxtid = &p_aggr->rx_tid[tid];
1582         stats = &p_aggr->stat[tid];
1583
1584         if (rxtid->aggr)
1585                 aggr_deque_frms(p_aggr, tid, 0, 0);
1586
1587         rxtid->aggr = false;
1588         rxtid->progress = false;
1589         rxtid->timer_mon = false;
1590         rxtid->win_sz = 0;
1591         rxtid->seq_next = 0;
1592         rxtid->hold_q_sz = 0;
1593
1594         kfree(rxtid->hold_q);
1595         rxtid->hold_q = NULL;
1596
1597         memset(stats, 0, sizeof(struct rxtid_stats));
1598 }
1599
1600 void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
1601                              u8 win_sz)
1602 {
1603         struct aggr_info *p_aggr = vif->aggr_cntxt;
1604         struct rxtid *rxtid;
1605         struct rxtid_stats *stats;
1606         u16 hold_q_size;
1607
1608         if (!p_aggr)
1609                 return;
1610
1611         rxtid = &p_aggr->rx_tid[tid];
1612         stats = &p_aggr->stat[tid];
1613
1614         if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1615                 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1616                            __func__, win_sz, tid);
1617
1618         if (rxtid->aggr)
1619                 aggr_delete_tid_state(p_aggr, tid);
1620
1621         rxtid->seq_next = seq_no;
1622         hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1623         rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1624         if (!rxtid->hold_q)
1625                 return;
1626
1627         rxtid->win_sz = win_sz;
1628         rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1629         if (!skb_queue_empty(&rxtid->q))
1630                 return;
1631
1632         rxtid->aggr = true;
1633 }
1634
1635 struct aggr_info *aggr_init(struct net_device *dev)
1636 {
1637         struct aggr_info *p_aggr = NULL;
1638         struct rxtid *rxtid;
1639         u8 i;
1640
1641         p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1642         if (!p_aggr) {
1643                 ath6kl_err("failed to alloc memory for aggr_node\n");
1644                 return NULL;
1645         }
1646
1647         p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
1648         p_aggr->dev = dev;
1649         init_timer(&p_aggr->timer);
1650         p_aggr->timer.function = aggr_timeout;
1651         p_aggr->timer.data = (unsigned long) p_aggr;
1652
1653         p_aggr->timer_scheduled = false;
1654         skb_queue_head_init(&p_aggr->free_q);
1655
1656         ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
1657
1658         for (i = 0; i < NUM_OF_TIDS; i++) {
1659                 rxtid = &p_aggr->rx_tid[i];
1660                 rxtid->aggr = false;
1661                 rxtid->progress = false;
1662                 rxtid->timer_mon = false;
1663                 skb_queue_head_init(&rxtid->q);
1664                 spin_lock_init(&rxtid->lock);
1665         }
1666
1667         return p_aggr;
1668 }
1669
1670 void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
1671 {
1672         struct aggr_info *p_aggr = vif->aggr_cntxt;
1673         struct rxtid *rxtid;
1674
1675         if (!p_aggr)
1676                 return;
1677
1678         rxtid = &p_aggr->rx_tid[tid];
1679
1680         if (rxtid->aggr)
1681                 aggr_delete_tid_state(p_aggr, tid);
1682 }
1683
1684 void aggr_reset_state(struct aggr_info *aggr_info)
1685 {
1686         u8 tid;
1687
1688         for (tid = 0; tid < NUM_OF_TIDS; tid++)
1689                 aggr_delete_tid_state(aggr_info, tid);
1690 }
1691
1692 /* clean up our amsdu buffer list */
1693 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1694 {
1695         struct htc_packet *packet, *tmp_pkt;
1696
1697         spin_lock_bh(&ar->lock);
1698         if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1699                 spin_unlock_bh(&ar->lock);
1700                 return;
1701         }
1702
1703         list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1704                                  list) {
1705                 list_del(&packet->list);
1706                 spin_unlock_bh(&ar->lock);
1707                 dev_kfree_skb(packet->pkt_cntxt);
1708                 spin_lock_bh(&ar->lock);
1709         }
1710
1711         spin_unlock_bh(&ar->lock);
1712 }
1713
1714 void aggr_module_destroy(struct aggr_info *aggr_info)
1715 {
1716         struct rxtid *rxtid;
1717         u8 i, k;
1718
1719         if (!aggr_info)
1720                 return;
1721
1722         if (aggr_info->timer_scheduled) {
1723                 del_timer(&aggr_info->timer);
1724                 aggr_info->timer_scheduled = false;
1725         }
1726
1727         for (i = 0; i < NUM_OF_TIDS; i++) {
1728                 rxtid = &aggr_info->rx_tid[i];
1729                 if (rxtid->hold_q) {
1730                         for (k = 0; k < rxtid->hold_q_sz; k++)
1731                                 dev_kfree_skb(rxtid->hold_q[k].skb);
1732                         kfree(rxtid->hold_q);
1733                 }
1734
1735                 skb_queue_purge(&rxtid->q);
1736         }
1737
1738         skb_queue_purge(&aggr_info->free_q);
1739         kfree(aggr_info);
1740 }