2 * Marvell Wireless LAN device driver: WMM
4 * Copyright (C) 2011, Marvell International Ltd.
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
29 /* Maximum value FW can accept for driver delay in packet transmission */
30 #define DRV_PKT_DELAY_TO_FW_MAX 512
33 #define WMM_QUEUED_PACKET_LOWER_LIMIT 180
35 #define WMM_QUEUED_PACKET_UPPER_LIMIT 200
37 /* Offset for TOS field in the IP header */
38 #define IPTOS_OFFSET 5
40 static bool disable_tx_amsdu;
41 module_param(disable_tx_amsdu, bool, 0644);
43 /* WMM information IE */
44 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
45 0x00, 0x50, 0xf2, 0x02,
49 static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
55 static u8 tos_to_tid[] = {
56 /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
57 0x01, /* 0 1 0 AC_BK */
58 0x02, /* 0 0 0 AC_BK */
59 0x00, /* 0 0 1 AC_BE */
60 0x03, /* 0 1 1 AC_BE */
61 0x04, /* 1 0 0 AC_VI */
62 0x05, /* 1 0 1 AC_VI */
63 0x06, /* 1 1 0 AC_VO */
64 0x07 /* 1 1 1 AC_VO */
67 static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
70 * This function debug prints the priority parameters for a WMM AC.
73 mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
75 const char *ac_str[] = { "BK", "BE", "VI", "VO" };
77 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
78 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
79 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
80 & MWIFIEX_ACI) >> 5]],
81 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
82 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
83 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
84 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
85 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
86 le16_to_cpu(ac_param->tx_op_limit));
90 * This function allocates a route address list.
92 * The function also initializes the list with the provided RA.
94 static struct mwifiex_ra_list_tbl *
95 mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
97 struct mwifiex_ra_list_tbl *ra_list;
99 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
103 INIT_LIST_HEAD(&ra_list->list);
104 skb_queue_head_init(&ra_list->skb_head);
106 memcpy(ra_list->ra, ra, ETH_ALEN);
108 ra_list->total_pkt_count = 0;
110 dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
115 /* This function returns random no between 16 and 32 to be used as threshold
116 * for no of packets after which BA setup is initiated.
118 static u8 mwifiex_get_random_ba_threshold(void)
121 struct timeval ba_tstamp;
124 /* setup ba_packet_threshold here random number between
125 * [BA_SETUP_PACKET_OFFSET,
126 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
129 do_gettimeofday(&ba_tstamp);
130 sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
131 usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
132 ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
133 + BA_SETUP_PACKET_OFFSET;
139 * This function allocates and adds a RA list for all TIDs
143 mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
146 struct mwifiex_ra_list_tbl *ra_list;
147 struct mwifiex_adapter *adapter = priv->adapter;
148 struct mwifiex_sta_node *node;
151 spin_lock_irqsave(&priv->sta_list_spinlock, flags);
152 node = mwifiex_get_sta_entry(priv, ra);
153 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
155 for (i = 0; i < MAX_NUM_TID; ++i) {
156 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
157 dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
162 ra_list->is_11n_enabled = 0;
163 ra_list->tdls_link = false;
164 if (!mwifiex_queuing_ra_based(priv)) {
165 if (mwifiex_get_tdls_link_status(priv, ra) ==
166 TDLS_SETUP_COMPLETE) {
167 ra_list->is_11n_enabled =
168 mwifiex_tdls_peer_11n_enabled(priv, ra);
170 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
173 ra_list->is_11n_enabled =
174 mwifiex_is_sta_11n_enabled(priv, node);
175 if (ra_list->is_11n_enabled)
176 ra_list->max_amsdu = node->max_amsdu;
179 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
180 ra_list, ra_list->is_11n_enabled);
182 if (ra_list->is_11n_enabled) {
183 ra_list->ba_pkt_count = 0;
184 ra_list->ba_packet_thr =
185 mwifiex_get_random_ba_threshold();
187 list_add_tail(&ra_list->list,
188 &priv->wmm.tid_tbl_ptr[i].ra_list);
193 * This function sets the WMM queue priorities to their default values.
195 static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
197 /* Default queue priorities: VO->VI->BE->BK */
198 priv->wmm.queue_priority[0] = WMM_AC_VO;
199 priv->wmm.queue_priority[1] = WMM_AC_VI;
200 priv->wmm.queue_priority[2] = WMM_AC_BE;
201 priv->wmm.queue_priority[3] = WMM_AC_BK;
205 * This function map ACs to TIDs.
208 mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
210 struct mwifiex_wmm_desc *wmm = &priv->wmm;
211 u8 *queue_priority = wmm->queue_priority;
214 for (i = 0; i < 4; ++i) {
215 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
216 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
219 for (i = 0; i < MAX_NUM_TID; ++i)
220 priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
222 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
226 * This function initializes WMM priority queues.
229 mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
230 struct ieee_types_wmm_parameter *wmm_ie)
232 u16 cw_min, avg_back_off, tmp[4];
236 if (!wmm_ie || !priv->wmm_enabled) {
237 /* WMM is not enabled, just set the defaults and return */
238 mwifiex_wmm_default_queue_priorities(priv);
242 dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
243 "qos_info Parameter Set Count=%d, Reserved=%#x\n",
244 wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
245 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
248 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
249 u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
250 u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
251 cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
252 avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
254 ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
255 priv->wmm.queue_priority[ac_idx] = ac_idx;
256 tmp[ac_idx] = avg_back_off;
258 dev_dbg(priv->adapter->dev,
259 "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
260 (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
261 cw_min, avg_back_off);
262 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
266 for (i = 0; i < num_ac; i++) {
267 for (j = 1; j < num_ac - i; j++) {
268 if (tmp[j - 1] > tmp[j]) {
269 swap(tmp[j - 1], tmp[j]);
270 swap(priv->wmm.queue_priority[j - 1],
271 priv->wmm.queue_priority[j]);
272 } else if (tmp[j - 1] == tmp[j]) {
273 if (priv->wmm.queue_priority[j - 1]
274 < priv->wmm.queue_priority[j])
275 swap(priv->wmm.queue_priority[j - 1],
276 priv->wmm.queue_priority[j]);
281 mwifiex_wmm_queue_priorities_tid(priv);
285 * This function evaluates whether or not an AC is to be downgraded.
287 * In case the AC is not enabled, the highest AC is returned that is
288 * enabled and does not require admission control.
290 static enum mwifiex_wmm_ac_e
291 mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
292 enum mwifiex_wmm_ac_e eval_ac)
295 enum mwifiex_wmm_ac_e ret_ac;
296 struct mwifiex_wmm_ac_status *ac_status;
298 ac_status = &priv->wmm.ac_status[eval_ac];
300 if (!ac_status->disabled)
301 /* Okay to use this AC, its enabled */
304 /* Setup a default return value of the lowest priority */
308 * Find the highest AC that is enabled and does not require
309 * admission control. The spec disallows downgrading to an AC,
310 * which is enabled due to a completed admission control.
311 * Unadmitted traffic is not to be sent on an AC with admitted
314 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
315 ac_status = &priv->wmm.ac_status[down_ac];
317 if (!ac_status->disabled && !ac_status->flow_required)
318 /* AC is enabled and does not require admission
320 ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
327 * This function downgrades WMM priority queue.
330 mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
334 dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
335 "BK(0), BE(1), VI(2), VO(3)\n");
337 if (!priv->wmm_enabled) {
338 /* WMM is not enabled, default priorities */
339 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
340 priv->wmm.ac_down_graded_vals[ac_val] =
341 (enum mwifiex_wmm_ac_e) ac_val;
343 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
344 priv->wmm.ac_down_graded_vals[ac_val]
345 = mwifiex_wmm_eval_downgrade_ac(priv,
346 (enum mwifiex_wmm_ac_e) ac_val);
347 dev_dbg(priv->adapter->dev,
348 "info: WMM: AC PRIO %d maps to %d\n",
349 ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
355 * This function converts the IP TOS field to an WMM AC
358 static enum mwifiex_wmm_ac_e
359 mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
361 /* Map of TOS UP values to WMM AC */
362 const enum mwifiex_wmm_ac_e tos_to_ac[] = { WMM_AC_BE,
372 if (tos >= ARRAY_SIZE(tos_to_ac))
375 return tos_to_ac[tos];
379 * This function evaluates a given TID and downgrades it to a lower
380 * TID if the WMM Parameter IE received from the AP indicates that the
381 * AP is disabled (due to call admission control (ACM bit). Mapping
382 * of TID to AC is taken care of internally.
384 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
386 enum mwifiex_wmm_ac_e ac, ac_down;
389 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
390 ac_down = priv->wmm.ac_down_graded_vals[ac];
392 /* Send the index to tid array, picking from the array will be
393 * taken care by dequeuing function
395 new_tid = ac_to_tid[ac_down][tid % 2];
401 * This function initializes the WMM state information and the
402 * WMM data path queues.
405 mwifiex_wmm_init(struct mwifiex_adapter *adapter)
408 struct mwifiex_private *priv;
410 for (j = 0; j < adapter->priv_num; ++j) {
411 priv = adapter->priv[j];
415 for (i = 0; i < MAX_NUM_TID; ++i) {
416 if (!disable_tx_amsdu &&
417 adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
418 priv->aggr_prio_tbl[i].amsdu =
419 priv->tos_to_tid_inv[i];
421 priv->aggr_prio_tbl[i].amsdu =
422 BA_STREAM_NOT_ALLOWED;
423 priv->aggr_prio_tbl[i].ampdu_ap =
424 priv->tos_to_tid_inv[i];
425 priv->aggr_prio_tbl[i].ampdu_user =
426 priv->tos_to_tid_inv[i];
429 mwifiex_set_ba_params(priv);
430 mwifiex_reset_11n_rx_seq_num(priv);
432 atomic_set(&priv->wmm.tx_pkts_queued, 0);
433 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
438 * This function checks if WMM Tx queue is empty.
441 mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
444 struct mwifiex_private *priv;
446 for (i = 0; i < adapter->priv_num; ++i) {
447 priv = adapter->priv[i];
448 if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
456 * This function deletes all packets in an RA list node.
458 * The packet sent completion callback handler are called with
459 * status failure, after they are dequeued to ensure proper
460 * cleanup. The RA list node itself is freed at the end.
463 mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
464 struct mwifiex_ra_list_tbl *ra_list)
466 struct mwifiex_adapter *adapter = priv->adapter;
467 struct sk_buff *skb, *tmp;
469 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
470 mwifiex_write_data_complete(adapter, skb, 0, -1);
474 * This function deletes all packets in an RA list.
476 * Each nodes in the RA list are freed individually first, and then
477 * the RA list itself is freed.
480 mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
481 struct list_head *ra_list_head)
483 struct mwifiex_ra_list_tbl *ra_list;
485 list_for_each_entry(ra_list, ra_list_head, list)
486 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
490 * This function deletes all packets in all RA lists.
492 static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
496 for (i = 0; i < MAX_NUM_TID; i++)
497 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
500 atomic_set(&priv->wmm.tx_pkts_queued, 0);
501 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
505 * This function deletes all route addresses from all RA lists.
507 static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
509 struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
512 for (i = 0; i < MAX_NUM_TID; ++i) {
513 dev_dbg(priv->adapter->dev,
514 "info: ra_list: freeing buf for tid %d\n", i);
515 list_for_each_entry_safe(ra_list, tmp_node,
516 &priv->wmm.tid_tbl_ptr[i].ra_list,
518 list_del(&ra_list->list);
522 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
527 * This function cleans up the Tx and Rx queues.
530 * - All packets in RA lists
531 * - All entries in Rx reorder table
532 * - All entries in Tx BA stream table
533 * - MPA buffer (if required)
537 mwifiex_clean_txrx(struct mwifiex_private *priv)
540 struct sk_buff *skb, *tmp;
542 mwifiex_11n_cleanup_reorder_tbl(priv);
543 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
545 mwifiex_wmm_cleanup_queues(priv);
546 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
548 if (priv->adapter->if_ops.cleanup_mpa_buf)
549 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
551 mwifiex_wmm_delete_all_ralist(priv);
552 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
554 if (priv->adapter->if_ops.clean_pcie_ring &&
555 !priv->adapter->surprise_removed)
556 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
557 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
559 skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
560 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
564 * This function retrieves a particular RA list node, matching with the
565 * given TID and RA address.
567 static struct mwifiex_ra_list_tbl *
568 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
571 struct mwifiex_ra_list_tbl *ra_list;
573 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
575 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
583 * This function retrieves an RA list node for a given TID and
586 * If no such node is found, a new node is added first and then
589 struct mwifiex_ra_list_tbl *
590 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
592 struct mwifiex_ra_list_tbl *ra_list;
594 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
597 mwifiex_ralist_add(priv, ra_addr);
599 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
603 * This function checks if a particular RA list node exists in a given TID
607 mwifiex_is_ralist_valid(struct mwifiex_private *priv,
608 struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
610 struct mwifiex_ra_list_tbl *rlist;
612 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
614 if (rlist == ra_list)
622 * This function adds a packet to WMM queue.
624 * In disconnected state the packet is immediately dropped and the
625 * packet send completion callback is called with status failure.
627 * Otherwise, the correct RA list node is located and the packet
628 * is queued at the list tail.
631 mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
634 struct mwifiex_adapter *adapter = priv->adapter;
636 struct mwifiex_ra_list_tbl *ra_list;
637 u8 ra[ETH_ALEN], tid_down;
639 struct list_head list_head;
640 int tdls_status = TDLS_NOT_SETUP;
641 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
642 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
644 memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
646 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
647 ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
648 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
649 dev_dbg(adapter->dev,
650 "TDLS setup packet for %pM. Don't block\n", ra);
652 tdls_status = mwifiex_get_tdls_link_status(priv, ra);
655 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
656 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
657 mwifiex_write_data_complete(adapter, skb, 0, -1);
663 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
665 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
667 /* In case of infra as we have already created the list during
668 association we just don't have to call get_queue_raptr, we will
669 have only 1 raptr for a tid in case of infra */
670 if (!mwifiex_queuing_ra_based(priv) &&
671 !mwifiex_is_skb_mgmt_frame(skb)) {
672 switch (tdls_status) {
673 case TDLS_SETUP_COMPLETE:
674 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
676 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
678 case TDLS_SETUP_INPROGRESS:
679 skb_queue_tail(&priv->tdls_txq, skb);
680 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
684 list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
685 if (!list_empty(&list_head))
686 ra_list = list_first_entry(
687 &list_head, struct mwifiex_ra_list_tbl,
694 memcpy(ra, skb->data, ETH_ALEN);
695 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
696 memset(ra, 0xff, ETH_ALEN);
697 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
701 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
702 mwifiex_write_data_complete(adapter, skb, 0, -1);
706 skb_queue_tail(&ra_list->skb_head, skb);
708 ra_list->ba_pkt_count++;
709 ra_list->total_pkt_count++;
711 if (atomic_read(&priv->wmm.highest_queued_prio) <
712 priv->tos_to_tid_inv[tid_down])
713 atomic_set(&priv->wmm.highest_queued_prio,
714 priv->tos_to_tid_inv[tid_down]);
716 atomic_inc(&priv->wmm.tx_pkts_queued);
718 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
722 * This function processes the get WMM status command response from firmware.
724 * The response may contain multiple TLVs -
725 * - AC Queue status TLVs
726 * - Current WMM Parameter IE TLV
727 * - Admission Control action frame TLVs
729 * This function parses the TLVs and then calls further specific functions
730 * to process any changes in the queue prioritize or state.
732 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
733 const struct host_cmd_ds_command *resp)
735 u8 *curr = (u8 *) &resp->params.get_wmm_status;
736 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
739 struct mwifiex_ie_types_data *tlv_hdr;
740 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
741 struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
742 struct mwifiex_wmm_ac_status *ac_status;
744 dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
747 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
748 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
749 tlv_len = le16_to_cpu(tlv_hdr->header.len);
751 if (resp_len < tlv_len + sizeof(tlv_hdr->header))
754 switch (le16_to_cpu(tlv_hdr->header.type)) {
755 case TLV_TYPE_WMMQSTATUS:
757 (struct mwifiex_ie_types_wmm_queue_status *)
759 dev_dbg(priv->adapter->dev,
760 "info: CMD_RESP: WMM_GET_STATUS:"
761 " QSTATUS TLV: %d, %d, %d\n",
762 tlv_wmm_qstatus->queue_index,
763 tlv_wmm_qstatus->flow_required,
764 tlv_wmm_qstatus->disabled);
766 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
768 ac_status->disabled = tlv_wmm_qstatus->disabled;
769 ac_status->flow_required =
770 tlv_wmm_qstatus->flow_required;
771 ac_status->flow_created = tlv_wmm_qstatus->flow_created;
774 case WLAN_EID_VENDOR_SPECIFIC:
776 * Point the regular IEEE IE 2 bytes into the Marvell IE
777 * and setup the IEEE IE type and length byte fields
781 (struct ieee_types_wmm_parameter *) (curr +
783 wmm_param_ie->vend_hdr.len = (u8) tlv_len;
784 wmm_param_ie->vend_hdr.element_id =
785 WLAN_EID_VENDOR_SPECIFIC;
787 dev_dbg(priv->adapter->dev,
788 "info: CMD_RESP: WMM_GET_STATUS:"
789 " WMM Parameter Set Count: %d\n",
790 wmm_param_ie->qos_info_bitmap &
791 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
793 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
794 wmm_ie, wmm_param_ie,
795 wmm_param_ie->vend_hdr.len + 2);
804 curr += (tlv_len + sizeof(tlv_hdr->header));
805 resp_len -= (tlv_len + sizeof(tlv_hdr->header));
808 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
809 mwifiex_wmm_setup_ac_downgrade(priv);
815 * Callback handler from the command module to allow insertion of a WMM TLV.
817 * If the BSS we are associating to supports WMM, this function adds the
818 * required WMM Information IE to the association request command buffer in
819 * the form of a Marvell extended IEEE IE.
822 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
824 struct ieee_types_wmm_parameter *wmm_ie,
825 struct ieee80211_ht_cap *ht_cap)
827 struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
839 dev_dbg(priv->adapter->dev,
840 "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
841 wmm_ie->vend_hdr.element_id);
843 if ((priv->wmm_required ||
844 (ht_cap && (priv->adapter->config_bands & BAND_GN ||
845 priv->adapter->config_bands & BAND_AN))) &&
846 wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
847 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
848 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
849 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
850 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
851 le16_to_cpu(wmm_tlv->header.len));
852 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
853 memcpy((u8 *) (wmm_tlv->wmm_ie
854 + le16_to_cpu(wmm_tlv->header.len)
855 - sizeof(priv->wmm_qosinfo)),
856 &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
858 ret_len = sizeof(wmm_tlv->header)
859 + le16_to_cpu(wmm_tlv->header.len);
861 *assoc_buf += ret_len;
868 * This function computes the time delay in the driver queues for a
871 * When the packet is received at the OS/Driver interface, the current
872 * time is set in the packet structure. The difference between the present
873 * time and that received time is computed in this function and limited
874 * based on pre-compiled limits in the driver.
877 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
878 const struct sk_buff *skb)
881 struct timeval out_tstamp, in_tstamp;
884 do_gettimeofday(&out_tstamp);
885 in_tstamp = ktime_to_timeval(skb->tstamp);
887 queue_delay = (out_tstamp.tv_sec - in_tstamp.tv_sec) * 1000;
888 queue_delay += (out_tstamp.tv_usec - in_tstamp.tv_usec) / 1000;
891 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
892 * by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
894 * Pass max value if queue_delay is beyond the uint8 range
896 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
898 dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
899 " %d ms sent to FW\n", queue_delay, ret_val);
905 * This function retrieves the highest priority RA list table pointer.
907 static struct mwifiex_ra_list_tbl *
908 mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
909 struct mwifiex_private **priv, int *tid)
911 struct mwifiex_private *priv_tmp;
912 struct mwifiex_ra_list_tbl *ptr;
913 struct mwifiex_tid_tbl *tid_ptr;
915 unsigned long flags_bss, flags_ra;
918 /* check the BSS with highest priority first */
919 for (j = adapter->priv_num - 1; j >= 0; --j) {
920 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
923 /* iterate over BSS with the equal priority */
924 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
925 &adapter->bss_prio_tbl[j].bss_prio_head,
928 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
930 if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
933 /* iterate over the WMM queues of the BSS */
934 hqp = &priv_tmp->wmm.highest_queued_prio;
935 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
937 spin_lock_irqsave(&priv_tmp->wmm.
938 ra_list_spinlock, flags_ra);
940 tid_ptr = &(priv_tmp)->wmm.
941 tid_tbl_ptr[tos_to_tid[i]];
943 /* iterate over receiver addresses */
944 list_for_each_entry(ptr, &tid_ptr->ra_list,
947 if (!skb_queue_empty(&ptr->skb_head))
948 /* holds both locks */
952 spin_unlock_irqrestore(&priv_tmp->wmm.
958 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
965 /* holds bss_prio_lock / ra_list_spinlock */
966 if (atomic_read(hqp) > i)
968 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
969 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
973 *tid = tos_to_tid[i];
978 /* This functions rotates ra and bss lists so packets are picked round robin.
980 * After a packet is successfully transmitted, rotate the ra list, so the ra
981 * next to the one transmitted, will come first in the list. This way we pick
982 * the ra' in a round robin fashion. Same applies to bss nodes of equal
985 * Function also increments wmm.packets_out counter.
987 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
988 struct mwifiex_ra_list_tbl *ra,
991 struct mwifiex_adapter *adapter = priv->adapter;
992 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
993 struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
996 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
998 * dirty trick: we remove 'head' temporarily and reinsert it after
999 * curr bss node. imagine list to stay fixed while head is moved
1001 list_move(&tbl[priv->bss_priority].bss_prio_head,
1002 &tbl[priv->bss_priority].bss_prio_cur->list);
1003 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
1005 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1006 if (mwifiex_is_ralist_valid(priv, ra, tid)) {
1007 priv->wmm.packets_out[tid]++;
1009 list_move(&tid_ptr->ra_list, &ra->list);
1011 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1015 * This function checks if 11n aggregation is possible.
1018 mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
1019 struct mwifiex_ra_list_tbl *ptr,
1022 int count = 0, total_size = 0;
1023 struct sk_buff *skb, *tmp;
1026 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1027 ptr->is_11n_enabled)
1028 max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1030 max_amsdu_size = max_buf_size;
1032 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
1033 total_size += skb->len;
1034 if (total_size >= max_amsdu_size)
1036 if (++count >= MIN_NUM_AMSDU)
1044 * This function sends a single packet to firmware for transmission.
1047 mwifiex_send_single_packet(struct mwifiex_private *priv,
1048 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1049 unsigned long ra_list_flags)
1050 __releases(&priv->wmm.ra_list_spinlock)
1052 struct sk_buff *skb, *skb_next;
1053 struct mwifiex_tx_param tx_param;
1054 struct mwifiex_adapter *adapter = priv->adapter;
1055 struct mwifiex_txinfo *tx_info;
1057 if (skb_queue_empty(&ptr->skb_head)) {
1058 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1060 dev_dbg(adapter->dev, "data: nothing to send\n");
1064 skb = skb_dequeue(&ptr->skb_head);
1066 tx_info = MWIFIEX_SKB_TXCB(skb);
1067 dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
1069 ptr->total_pkt_count--;
1071 if (!skb_queue_empty(&ptr->skb_head))
1072 skb_next = skb_peek(&ptr->skb_head);
1076 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1078 tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1079 sizeof(struct txpd) : 0);
1081 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1082 /* Queue the packet back at the head */
1083 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1085 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1086 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1088 mwifiex_write_data_complete(adapter, skb, 0, -1);
1092 skb_queue_tail(&ptr->skb_head, skb);
1094 ptr->total_pkt_count++;
1095 ptr->ba_pkt_count++;
1096 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1097 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1100 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1101 atomic_dec(&priv->wmm.tx_pkts_queued);
1106 * This function checks if the first packet in the given RA list
1107 * is already processed or not.
1110 mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1111 struct mwifiex_ra_list_tbl *ptr)
1113 struct sk_buff *skb;
1114 struct mwifiex_txinfo *tx_info;
1116 if (skb_queue_empty(&ptr->skb_head))
1119 skb = skb_peek(&ptr->skb_head);
1121 tx_info = MWIFIEX_SKB_TXCB(skb);
1122 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1129 * This function sends a single processed packet to firmware for
1133 mwifiex_send_processed_packet(struct mwifiex_private *priv,
1134 struct mwifiex_ra_list_tbl *ptr, int ptr_index,
1135 unsigned long ra_list_flags)
1136 __releases(&priv->wmm.ra_list_spinlock)
1138 struct mwifiex_tx_param tx_param;
1139 struct mwifiex_adapter *adapter = priv->adapter;
1141 struct sk_buff *skb, *skb_next;
1142 struct mwifiex_txinfo *tx_info;
1144 if (skb_queue_empty(&ptr->skb_head)) {
1145 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1150 skb = skb_dequeue(&ptr->skb_head);
1152 if (!skb_queue_empty(&ptr->skb_head))
1153 skb_next = skb_peek(&ptr->skb_head);
1157 tx_info = MWIFIEX_SKB_TXCB(skb);
1159 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1161 if (adapter->iface_type == MWIFIEX_USB) {
1162 adapter->data_sent = true;
1163 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
1166 tx_param.next_pkt_len =
1167 ((skb_next) ? skb_next->len +
1168 sizeof(struct txpd) : 0);
1169 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1175 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
1176 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
1178 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1179 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1181 mwifiex_write_data_complete(adapter, skb, 0, -1);
1185 skb_queue_tail(&ptr->skb_head, skb);
1187 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1188 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1192 if (adapter->iface_type != MWIFIEX_PCIE)
1193 adapter->data_sent = false;
1194 dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
1195 adapter->dbg.num_tx_host_to_card_failure++;
1196 mwifiex_write_data_complete(adapter, skb, 0, ret);
1199 if (adapter->iface_type != MWIFIEX_PCIE)
1200 adapter->data_sent = false;
1204 if (ret != -EBUSY) {
1205 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1206 atomic_dec(&priv->wmm.tx_pkts_queued);
1211 * This function dequeues a packet from the highest priority list
1215 mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1217 struct mwifiex_ra_list_tbl *ptr;
1218 struct mwifiex_private *priv = NULL;
1221 int tid_del = 0, tid = 0;
1222 unsigned long flags;
1224 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1228 tid = mwifiex_get_tid(ptr);
1230 dev_dbg(adapter->dev, "data: tid=%d\n", tid);
1232 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
1233 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1234 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
1238 if (mwifiex_is_ptr_processed(priv, ptr)) {
1239 mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
1240 /* ra_list_spinlock has been freed in
1241 mwifiex_send_processed_packet() */
1245 if (!ptr->is_11n_enabled ||
1246 mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
1247 priv->wps.session_enable) {
1248 if (ptr->is_11n_enabled &&
1249 mwifiex_is_ba_stream_setup(priv, ptr, tid) &&
1250 mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) &&
1251 mwifiex_is_amsdu_allowed(priv, tid) &&
1252 mwifiex_is_11n_aggragation_possible(priv, ptr,
1253 adapter->tx_buf_size))
1254 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1255 /* ra_list_spinlock has been freed in
1256 * mwifiex_11n_aggregate_pkt()
1259 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1260 /* ra_list_spinlock has been freed in
1261 * mwifiex_send_single_packet()
1264 if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
1265 ptr->ba_pkt_count > ptr->ba_packet_thr) {
1266 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1267 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1268 BA_SETUP_INPROGRESS);
1269 mwifiex_send_addba(priv, tid, ptr->ra);
1270 } else if (mwifiex_find_stream_to_delete
1271 (priv, tid, &tid_del, ra)) {
1272 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1273 BA_SETUP_INPROGRESS);
1274 mwifiex_send_delba(priv, tid_del, ra, 1);
1277 if (mwifiex_is_amsdu_allowed(priv, tid) &&
1278 mwifiex_is_11n_aggragation_possible(priv, ptr,
1279 adapter->tx_buf_size))
1280 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
1281 /* ra_list_spinlock has been freed in
1282 mwifiex_11n_aggregate_pkt() */
1284 mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
1285 /* ra_list_spinlock has been freed in
1286 mwifiex_send_single_packet() */
1292 * This function transmits the highest priority packet awaiting in the
1296 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1300 if (adapter->data_sent || adapter->tx_lock_flag)
1303 if (mwifiex_dequeue_tx_packet(adapter))
1305 } while (!mwifiex_wmm_lists_empty(adapter));