1 /******************************************************************************
5 * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
34 #include <linux/ieee80211.h>
36 #include "iwl-trans.h"
37 #include "iwl-agn-hw.h"
41 static const u8 tid_to_ac[] = {
52 static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
53 struct ieee80211_tx_info *info,
54 __le16 fc, __le32 *tx_flags)
56 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
57 info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
58 info->flags & IEEE80211_TX_CTL_AMPDU)
59 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
63 * handle build REPLY_TX command notification.
65 static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
67 struct iwl_tx_cmd *tx_cmd,
68 struct ieee80211_tx_info *info,
69 struct ieee80211_hdr *hdr, u8 sta_id)
71 __le16 fc = hdr->frame_control;
72 __le32 tx_flags = tx_cmd->tx_flags;
74 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
76 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
77 tx_flags |= TX_CMD_FLG_ACK_MSK;
79 tx_flags &= ~TX_CMD_FLG_ACK_MSK;
81 if (ieee80211_is_probe_resp(fc))
82 tx_flags |= TX_CMD_FLG_TSF_MSK;
83 else if (ieee80211_is_back_req(fc))
84 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
85 else if (info->band == IEEE80211_BAND_2GHZ &&
86 priv->cfg->bt_params &&
87 priv->cfg->bt_params->advanced_bt_coexist &&
88 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
89 ieee80211_is_reassoc_req(fc) ||
90 skb->protocol == cpu_to_be16(ETH_P_PAE)))
91 tx_flags |= TX_CMD_FLG_IGNORE_BT;
94 tx_cmd->sta_id = sta_id;
95 if (ieee80211_has_morefrags(fc))
96 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
98 if (ieee80211_is_data_qos(fc)) {
99 u8 *qc = ieee80211_get_qos_ctl(hdr);
100 tx_cmd->tid_tspec = qc[0] & 0xf;
101 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
103 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
104 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
105 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
107 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
110 iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
112 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
113 if (ieee80211_is_mgmt(fc)) {
114 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
115 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
117 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
119 tx_cmd->timeout.pm_frame_timeout = 0;
122 tx_cmd->driver_txop = 0;
123 tx_cmd->tx_flags = tx_flags;
124 tx_cmd->next_frame_len = 0;
127 static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
128 struct iwl_tx_cmd *tx_cmd,
129 struct ieee80211_tx_info *info,
130 struct ieee80211_sta *sta,
140 rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
141 data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
143 /* Set retry limit on RTS packets */
144 rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
146 /* Set retry limit on DATA packets and Probe Responses*/
147 if (ieee80211_is_probe_resp(fc)) {
148 data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
150 min(data_retry_limit, rts_retry_limit);
151 } else if (ieee80211_is_back_req(fc))
152 data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
154 data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
157 tx_cmd->data_retry_limit = data_retry_limit;
158 tx_cmd->rts_retry_limit = rts_retry_limit;
160 /* DATA packets will use the uCode station table for rate/antenna
162 if (ieee80211_is_data(fc)) {
163 tx_cmd->initial_rate_index = 0;
164 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
165 #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
166 if (priv->tm_fixed_rate) {
168 * rate overwrite by testmode
169 * we not only send lq command to change rate
170 * we also re-enforce per data pkt base.
172 tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK;
173 memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
174 sizeof(tx_cmd->rate_n_flags));
178 } else if (ieee80211_is_back_req(fc))
179 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
182 * If the current TX rate stored in mac80211 has the MCS bit set, it's
183 * not really a TX rate. Thus, we use the lowest supported rate for
184 * this band. Also use the lowest supported rate if the stored rate
187 rate_idx = info->control.rates[0].idx;
188 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
189 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
190 rate_idx = rate_lowest_index(
191 &priv->nvm_data->bands[info->band], sta);
192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
193 if (info->band == IEEE80211_BAND_5GHZ)
194 rate_idx += IWL_FIRST_OFDM_RATE;
195 /* Get PLCP rate for tx_cmd->rate_n_flags */
196 rate_plcp = iwl_rates[rate_idx].plcp;
197 /* Zero out flags for this packet */
200 /* Set CCK flag as needed */
201 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
202 rate_flags |= RATE_MCS_CCK_MSK;
204 /* Set up antennas */
205 if (priv->cfg->bt_params &&
206 priv->cfg->bt_params->advanced_bt_coexist &&
207 priv->bt_full_concurrent) {
208 /* operated as 1x1 in full concurrency mode */
209 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
210 first_antenna(priv->nvm_data->valid_tx_ant));
212 priv->mgmt_tx_ant = iwl_toggle_tx_ant(
213 priv, priv->mgmt_tx_ant,
214 priv->nvm_data->valid_tx_ant);
215 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
217 /* Set the rate in the TX cmd */
218 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
221 static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
222 struct ieee80211_tx_info *info,
223 struct iwl_tx_cmd *tx_cmd,
224 struct sk_buff *skb_frag)
226 struct ieee80211_key_conf *keyconf = info->control.hw_key;
228 switch (keyconf->cipher) {
229 case WLAN_CIPHER_SUITE_CCMP:
230 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
231 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
232 if (info->flags & IEEE80211_TX_CTL_AMPDU)
233 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
236 case WLAN_CIPHER_SUITE_TKIP:
237 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
238 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
241 case WLAN_CIPHER_SUITE_WEP104:
242 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
244 case WLAN_CIPHER_SUITE_WEP40:
245 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
246 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
248 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
250 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
251 "with key %d\n", keyconf->keyidx);
255 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
261 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
262 * @context: the current context
263 * @sta: mac80211 station
265 * In certain circumstances mac80211 passes a station pointer
266 * that may be %NULL, for example during TX or key setup. In
267 * that case, we need to use the broadcast station, so this
268 * inline wraps that pattern.
270 static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
271 struct ieee80211_sta *sta)
276 return context->bcast_sta_id;
278 sta_id = iwl_sta_id(sta);
281 * mac80211 should not be passing a partially
282 * initialised station!
284 WARN_ON(sta_id == IWL_INVALID_STATION);
290 * start REPLY_TX command process
292 int iwlagn_tx_skb(struct iwl_priv *priv,
293 struct ieee80211_sta *sta,
296 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
297 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
298 struct iwl_station_priv *sta_priv = NULL;
299 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
300 struct iwl_device_cmd *dev_cmd;
301 struct iwl_tx_cmd *tx_cmd;
304 u16 len, seq_number = 0;
305 u8 sta_id, tid = IWL_MAX_TID_COUNT;
306 bool is_agg = false, is_data_qos = false;
309 if (info->control.vif)
310 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
312 if (iwl_is_rfkill(priv)) {
313 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
314 goto drop_unlock_priv;
317 fc = hdr->frame_control;
319 #ifdef CONFIG_IWLWIFI_DEBUG
320 if (ieee80211_is_auth(fc))
321 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
322 else if (ieee80211_is_assoc_req(fc))
323 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
324 else if (ieee80211_is_reassoc_req(fc))
325 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
328 if (unlikely(ieee80211_is_probe_resp(fc))) {
329 struct iwl_wipan_noa_data *noa_data =
330 rcu_dereference(priv->noa_data);
333 pskb_expand_head(skb, 0, noa_data->length,
335 memcpy(skb_put(skb, noa_data->length),
336 noa_data->data, noa_data->length);
337 hdr = (struct ieee80211_hdr *)skb->data;
341 hdr_len = ieee80211_hdrlen(fc);
343 /* For management frames use broadcast id to do not break aggregation */
344 if (!ieee80211_is_data(fc))
345 sta_id = ctx->bcast_sta_id;
347 /* Find index into station table for destination station */
348 sta_id = iwl_sta_id_or_broadcast(ctx, sta);
349 if (sta_id == IWL_INVALID_STATION) {
350 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
352 goto drop_unlock_priv;
357 sta_priv = (void *)sta->drv_priv;
359 if (sta_priv && sta_priv->asleep &&
360 (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
362 * This sends an asynchronous command to the device,
363 * but we can rely on it being processed before the
364 * next frame is processed -- and the next frame to
365 * this station is the one that will consume this
367 * For now set the counter to just 1 since we do not
370 * FIXME: If we get two non-bufferable frames one
371 * after the other, we might only send out one of
372 * them because this is racy.
374 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
377 dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
379 if (unlikely(!dev_cmd))
380 goto drop_unlock_priv;
382 memset(dev_cmd, 0, sizeof(*dev_cmd));
383 tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
385 /* Total # bytes to be transmitted */
387 tx_cmd->len = cpu_to_le16(len);
389 if (info->control.hw_key)
390 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
392 /* TODO need this for burst mode later on */
393 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
395 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
397 memset(&info->status, 0, sizeof(info->status));
399 info->driver_data[0] = ctx;
400 info->driver_data[1] = dev_cmd;
401 /* From now on, we cannot access info->control */
403 spin_lock(&priv->sta_lock);
405 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
407 struct iwl_tid_data *tid_data;
408 qc = ieee80211_get_qos_ctl(hdr);
409 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
410 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
411 goto drop_unlock_sta;
412 tid_data = &priv->tid_data[sta_id][tid];
414 /* aggregation is on for this <sta,tid> */
415 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
416 tid_data->agg.state != IWL_AGG_ON) {
417 IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:"
418 " Tx flags = 0x%08x, agg.state = %d",
419 info->flags, tid_data->agg.state);
420 IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
422 IEEE80211_SEQ_TO_SN(tid_data->seq_number));
423 goto drop_unlock_sta;
426 /* We can receive packets from the stack in IWL_AGG_{ON,OFF}
427 * only. Check this here.
429 if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
430 tid_data->agg.state != IWL_AGG_OFF,
431 "Tx while agg.state = %d", tid_data->agg.state))
432 goto drop_unlock_sta;
434 seq_number = tid_data->seq_number;
435 seq_number &= IEEE80211_SCTL_SEQ;
436 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
437 hdr->seq_ctrl |= cpu_to_le16(seq_number);
440 if (info->flags & IEEE80211_TX_CTL_AMPDU)
445 /* Copy MAC header from skb into command buffer */
446 memcpy(tx_cmd->hdr, hdr, hdr_len);
449 txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
450 else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
452 * Send this frame after DTIM -- there's a special queue
453 * reserved for this for contexts that support AP mode.
455 txq_id = ctx->mcast_queue;
458 * The microcode will clear the more data
459 * bit in the last frame it transmits.
461 hdr->frame_control |=
462 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
463 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
464 txq_id = IWL_AUX_QUEUE;
466 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
468 WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
469 WARN_ON_ONCE(is_agg &&
470 priv->queue_to_mac80211[txq_id] != info->hw_queue);
472 IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
475 if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
476 goto drop_unlock_sta;
478 if (is_data_qos && !ieee80211_has_morefrags(fc))
479 priv->tid_data[sta_id][tid].seq_number = seq_number;
481 spin_unlock(&priv->sta_lock);
484 * Avoid atomic ops if it isn't an associated client.
485 * Also, if this is a packet for aggregation, don't
486 * increase the counter because the ucode will stop
487 * aggregation queues when their respective station
490 if (sta_priv && sta_priv->client && !is_agg)
491 atomic_inc(&sta_priv->pending_frames);
493 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
494 iwl_scan_offchannel_skb(priv);
500 iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
501 spin_unlock(&priv->sta_lock);
506 static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
510 for (q = IWLAGN_FIRST_AMPDU_QUEUE;
511 q < priv->cfg->base_params->num_of_queues; q++) {
512 if (!test_and_set_bit(q, priv->agg_q_alloc)) {
513 priv->queue_to_mac80211[q] = mq;
521 static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
523 clear_bit(q, priv->agg_q_alloc);
524 priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
527 int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
528 struct ieee80211_sta *sta, u16 tid)
530 struct iwl_tid_data *tid_data;
532 enum iwl_agg_state agg_state;
534 sta_id = iwl_sta_id(sta);
536 if (sta_id == IWL_INVALID_STATION) {
537 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
541 spin_lock_bh(&priv->sta_lock);
543 tid_data = &priv->tid_data[sta_id][tid];
544 txq_id = tid_data->agg.txq_id;
546 switch (tid_data->agg.state) {
547 case IWL_EMPTYING_HW_QUEUE_ADDBA:
549 * This can happen if the peer stops aggregation
550 * again before we've had a chance to drain the
551 * queue we selected previously, i.e. before the
552 * session was really started completely.
554 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
556 case IWL_AGG_STARTING:
558 * This can happen when the session is stopped before
559 * we receive ADDBA response
561 IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
567 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
568 sta_id, tid, tid_data->agg.state);
569 spin_unlock_bh(&priv->sta_lock);
573 tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
575 /* There are still packets for this RA / TID in the HW */
576 if (!test_bit(txq_id, priv->agg_q_alloc)) {
577 IWL_DEBUG_TX_QUEUES(priv,
578 "stopping AGG on STA/TID %d/%d but hwq %d not used\n",
579 sta_id, tid, txq_id);
580 } else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
581 IWL_DEBUG_TX_QUEUES(priv,
582 "Can't proceed: ssn %d, next_recl = %d\n",
584 tid_data->next_reclaimed);
585 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
586 spin_unlock_bh(&priv->sta_lock);
590 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
593 agg_state = tid_data->agg.state;
594 tid_data->agg.state = IWL_AGG_OFF;
596 spin_unlock_bh(&priv->sta_lock);
598 if (test_bit(txq_id, priv->agg_q_alloc)) {
600 * If the transport didn't know that we wanted to start
601 * agreggation, don't tell it that we want to stop them.
602 * This can happen when we don't get the addBA response on
603 * time, or we hadn't time to drain the AC queues.
605 if (agg_state == IWL_AGG_ON)
606 iwl_trans_txq_disable(priv->trans, txq_id);
608 IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
610 iwlagn_dealloc_agg_txq(priv, txq_id);
613 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
618 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
619 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
621 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
622 struct iwl_tid_data *tid_data;
623 int sta_id, txq_id, ret;
625 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
628 sta_id = iwl_sta_id(sta);
629 if (sta_id == IWL_INVALID_STATION) {
630 IWL_ERR(priv, "Start AGG on invalid station\n");
633 if (unlikely(tid >= IWL_MAX_TID_COUNT))
636 if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
637 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
641 txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
643 IWL_DEBUG_TX_QUEUES(priv,
644 "No free aggregation queue for %pM/%d\n",
649 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
653 spin_lock_bh(&priv->sta_lock);
654 tid_data = &priv->tid_data[sta_id][tid];
655 tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
656 tid_data->agg.txq_id = txq_id;
658 *ssn = tid_data->agg.ssn;
660 if (*ssn == tid_data->next_reclaimed) {
661 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
663 tid_data->agg.state = IWL_AGG_STARTING;
664 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
666 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
667 "next_reclaimed = %d\n",
669 tid_data->next_reclaimed);
670 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
672 spin_unlock_bh(&priv->sta_lock);
677 int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
678 struct ieee80211_sta *sta, u16 tid, u8 buf_size)
680 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
681 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
685 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
687 spin_lock_bh(&priv->sta_lock);
688 ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
689 q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
690 priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
691 spin_unlock_bh(&priv->sta_lock);
693 fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
695 iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
699 * If the limit is 0, then it wasn't initialised yet,
700 * use the default. We can do that since we take the
701 * minimum below, and we don't want to go above our
702 * default due to hardware restrictions.
704 if (sta_priv->max_agg_bufsize == 0)
705 sta_priv->max_agg_bufsize =
706 LINK_QUAL_AGG_FRAME_LIMIT_DEF;
709 * Even though in theory the peer could have different
710 * aggregation reorder buffer sizes for different sessions,
711 * our ucode doesn't allow for that and has a global limit
712 * for each station. Therefore, use the minimum of all the
713 * aggregation sessions and our default value.
715 sta_priv->max_agg_bufsize =
716 min(sta_priv->max_agg_bufsize, buf_size);
718 if (priv->hw_params.use_rts_for_aggregation) {
720 * switch to RTS/CTS if it is the prefer protection
721 * method for HT traffic
724 sta_priv->lq_sta.lq.general_params.flags |=
725 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
727 priv->agg_tids_count++;
728 IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
729 priv->agg_tids_count);
731 sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
732 sta_priv->max_agg_bufsize;
734 IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
737 return iwl_send_lq_cmd(priv, ctx,
738 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
741 static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
743 struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
744 enum iwl_rxon_context_id ctx;
745 struct ieee80211_vif *vif;
748 lockdep_assert_held(&priv->sta_lock);
750 addr = priv->stations[sta_id].sta.sta.addr;
751 ctx = priv->stations[sta_id].ctxid;
752 vif = priv->contexts[ctx].vif;
754 switch (priv->tid_data[sta_id][tid].agg.state) {
755 case IWL_EMPTYING_HW_QUEUE_DELBA:
756 /* There are no packets for this RA / TID in the HW any more */
757 if (tid_data->agg.ssn == tid_data->next_reclaimed) {
758 IWL_DEBUG_TX_QUEUES(priv,
759 "Can continue DELBA flow ssn = next_recl ="
760 " %d", tid_data->next_reclaimed);
761 iwl_trans_txq_disable(priv->trans,
762 tid_data->agg.txq_id);
763 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
764 tid_data->agg.state = IWL_AGG_OFF;
765 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
768 case IWL_EMPTYING_HW_QUEUE_ADDBA:
769 /* There are no packets for this RA / TID in the HW any more */
770 if (tid_data->agg.ssn == tid_data->next_reclaimed) {
771 IWL_DEBUG_TX_QUEUES(priv,
772 "Can continue ADDBA flow ssn = next_recl ="
773 " %d", tid_data->next_reclaimed);
774 tid_data->agg.state = IWL_AGG_STARTING;
775 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
783 static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
784 struct iwl_rxon_context *ctx,
787 struct ieee80211_sta *sta;
788 struct iwl_station_priv *sta_priv;
791 sta = ieee80211_find_sta(ctx->vif, addr1);
793 sta_priv = (void *)sta->drv_priv;
794 /* avoid atomic ops if this isn't a client */
795 if (sta_priv->client &&
796 atomic_dec_return(&sta_priv->pending_frames) == 0)
797 ieee80211_sta_block_awake(priv->hw, sta, false);
803 * translate ucode response to mac80211 tx status control values
805 static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
806 struct ieee80211_tx_info *info)
808 struct ieee80211_tx_rate *r = &info->status.rates[0];
810 info->status.antenna =
811 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
812 if (rate_n_flags & RATE_MCS_HT_MSK)
813 r->flags |= IEEE80211_TX_RC_MCS;
814 if (rate_n_flags & RATE_MCS_GF_MSK)
815 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
816 if (rate_n_flags & RATE_MCS_HT40_MSK)
817 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
818 if (rate_n_flags & RATE_MCS_DUP_MSK)
819 r->flags |= IEEE80211_TX_RC_DUP_DATA;
820 if (rate_n_flags & RATE_MCS_SGI_MSK)
821 r->flags |= IEEE80211_TX_RC_SHORT_GI;
822 r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
825 #ifdef CONFIG_IWLWIFI_DEBUG
826 const char *iwl_get_tx_fail_reason(u32 status)
828 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
829 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
831 switch (status & TX_STATUS_MSK) {
832 case TX_STATUS_SUCCESS:
834 TX_STATUS_POSTPONE(DELAY);
835 TX_STATUS_POSTPONE(FEW_BYTES);
836 TX_STATUS_POSTPONE(BT_PRIO);
837 TX_STATUS_POSTPONE(QUIET_PERIOD);
838 TX_STATUS_POSTPONE(CALC_TTAK);
839 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
840 TX_STATUS_FAIL(SHORT_LIMIT);
841 TX_STATUS_FAIL(LONG_LIMIT);
842 TX_STATUS_FAIL(FIFO_UNDERRUN);
843 TX_STATUS_FAIL(DRAIN_FLOW);
844 TX_STATUS_FAIL(RFKILL_FLUSH);
845 TX_STATUS_FAIL(LIFE_EXPIRE);
846 TX_STATUS_FAIL(DEST_PS);
847 TX_STATUS_FAIL(HOST_ABORTED);
848 TX_STATUS_FAIL(BT_RETRY);
849 TX_STATUS_FAIL(STA_INVALID);
850 TX_STATUS_FAIL(FRAG_DROPPED);
851 TX_STATUS_FAIL(TID_DISABLE);
852 TX_STATUS_FAIL(FIFO_FLUSHED);
853 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
854 TX_STATUS_FAIL(PASSIVE_NO_RX);
855 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
860 #undef TX_STATUS_FAIL
861 #undef TX_STATUS_POSTPONE
863 #endif /* CONFIG_IWLWIFI_DEBUG */
865 static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
867 status &= AGG_TX_STATUS_MSK;
870 case AGG_TX_STATE_UNDERRUN_MSK:
871 priv->reply_agg_tx_stats.underrun++;
873 case AGG_TX_STATE_BT_PRIO_MSK:
874 priv->reply_agg_tx_stats.bt_prio++;
876 case AGG_TX_STATE_FEW_BYTES_MSK:
877 priv->reply_agg_tx_stats.few_bytes++;
879 case AGG_TX_STATE_ABORT_MSK:
880 priv->reply_agg_tx_stats.abort++;
882 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
883 priv->reply_agg_tx_stats.last_sent_ttl++;
885 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
886 priv->reply_agg_tx_stats.last_sent_try++;
888 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
889 priv->reply_agg_tx_stats.last_sent_bt_kill++;
891 case AGG_TX_STATE_SCD_QUERY_MSK:
892 priv->reply_agg_tx_stats.scd_query++;
894 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
895 priv->reply_agg_tx_stats.bad_crc32++;
897 case AGG_TX_STATE_RESPONSE_MSK:
898 priv->reply_agg_tx_stats.response++;
900 case AGG_TX_STATE_DUMP_TX_MSK:
901 priv->reply_agg_tx_stats.dump_tx++;
903 case AGG_TX_STATE_DELAY_TX_MSK:
904 priv->reply_agg_tx_stats.delay_tx++;
907 priv->reply_agg_tx_stats.unknown++;
912 static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
914 return le32_to_cpup((__le32 *)&tx_resp->status +
915 tx_resp->frame_count) & IEEE80211_MAX_SN;
918 static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
919 struct iwlagn_tx_resp *tx_resp)
921 struct agg_tx_status *frame_status = &tx_resp->status;
922 int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
923 IWLAGN_TX_RES_TID_POS;
924 int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
925 IWLAGN_TX_RES_RA_POS;
926 struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
927 u32 status = le16_to_cpu(tx_resp->status.status);
930 WARN_ON(tid == IWL_TID_NON_QOS);
932 if (agg->wait_for_ba)
933 IWL_DEBUG_TX_REPLY(priv,
934 "got tx response w/o block-ack\n");
936 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
937 agg->wait_for_ba = (tx_resp->frame_count > 1);
940 * If the BT kill count is non-zero, we'll get this
941 * notification again.
943 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
944 priv->cfg->bt_params &&
945 priv->cfg->bt_params->advanced_bt_coexist) {
946 IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
949 if (tx_resp->frame_count == 1)
952 IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n",
954 le32_to_cpu(tx_resp->rate_n_flags),
955 iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count);
957 /* Construct bit-map of pending frames within Tx window */
958 for (i = 0; i < tx_resp->frame_count; i++) {
959 u16 fstatus = le16_to_cpu(frame_status[i].status);
960 u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS;
962 if (status & AGG_TX_STATUS_MSK)
963 iwlagn_count_agg_tx_err_status(priv, fstatus);
965 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
966 AGG_TX_STATE_ABORT_MSK))
969 if (status & AGG_TX_STATUS_MSK || retry_cnt > 1)
970 IWL_DEBUG_TX_REPLY(priv,
971 "%d: status %s (0x%04x), try-count (0x%01x)\n",
973 iwl_get_agg_tx_fail_reason(fstatus),
974 fstatus & AGG_TX_STATUS_MSK,
979 #ifdef CONFIG_IWLWIFI_DEBUG
980 #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
982 const char *iwl_get_agg_tx_fail_reason(u16 status)
984 status &= AGG_TX_STATUS_MSK;
986 case AGG_TX_STATE_TRANSMITTED:
988 AGG_TX_STATE_FAIL(UNDERRUN_MSK);
989 AGG_TX_STATE_FAIL(BT_PRIO_MSK);
990 AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
991 AGG_TX_STATE_FAIL(ABORT_MSK);
992 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
993 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
994 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
995 AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
996 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
997 AGG_TX_STATE_FAIL(RESPONSE_MSK);
998 AGG_TX_STATE_FAIL(DUMP_TX_MSK);
999 AGG_TX_STATE_FAIL(DELAY_TX_MSK);
1004 #endif /* CONFIG_IWLWIFI_DEBUG */
1006 static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
1008 status &= TX_STATUS_MSK;
1011 case TX_STATUS_POSTPONE_DELAY:
1012 priv->reply_tx_stats.pp_delay++;
1014 case TX_STATUS_POSTPONE_FEW_BYTES:
1015 priv->reply_tx_stats.pp_few_bytes++;
1017 case TX_STATUS_POSTPONE_BT_PRIO:
1018 priv->reply_tx_stats.pp_bt_prio++;
1020 case TX_STATUS_POSTPONE_QUIET_PERIOD:
1021 priv->reply_tx_stats.pp_quiet_period++;
1023 case TX_STATUS_POSTPONE_CALC_TTAK:
1024 priv->reply_tx_stats.pp_calc_ttak++;
1026 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
1027 priv->reply_tx_stats.int_crossed_retry++;
1029 case TX_STATUS_FAIL_SHORT_LIMIT:
1030 priv->reply_tx_stats.short_limit++;
1032 case TX_STATUS_FAIL_LONG_LIMIT:
1033 priv->reply_tx_stats.long_limit++;
1035 case TX_STATUS_FAIL_FIFO_UNDERRUN:
1036 priv->reply_tx_stats.fifo_underrun++;
1038 case TX_STATUS_FAIL_DRAIN_FLOW:
1039 priv->reply_tx_stats.drain_flow++;
1041 case TX_STATUS_FAIL_RFKILL_FLUSH:
1042 priv->reply_tx_stats.rfkill_flush++;
1044 case TX_STATUS_FAIL_LIFE_EXPIRE:
1045 priv->reply_tx_stats.life_expire++;
1047 case TX_STATUS_FAIL_DEST_PS:
1048 priv->reply_tx_stats.dest_ps++;
1050 case TX_STATUS_FAIL_HOST_ABORTED:
1051 priv->reply_tx_stats.host_abort++;
1053 case TX_STATUS_FAIL_BT_RETRY:
1054 priv->reply_tx_stats.bt_retry++;
1056 case TX_STATUS_FAIL_STA_INVALID:
1057 priv->reply_tx_stats.sta_invalid++;
1059 case TX_STATUS_FAIL_FRAG_DROPPED:
1060 priv->reply_tx_stats.frag_drop++;
1062 case TX_STATUS_FAIL_TID_DISABLE:
1063 priv->reply_tx_stats.tid_disable++;
1065 case TX_STATUS_FAIL_FIFO_FLUSHED:
1066 priv->reply_tx_stats.fifo_flush++;
1068 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
1069 priv->reply_tx_stats.insuff_cf_poll++;
1071 case TX_STATUS_FAIL_PASSIVE_NO_RX:
1072 priv->reply_tx_stats.fail_hw_drop++;
1074 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
1075 priv->reply_tx_stats.sta_color_mismatch++;
1078 priv->reply_tx_stats.unknown++;
1083 static void iwlagn_set_tx_status(struct iwl_priv *priv,
1084 struct ieee80211_tx_info *info,
1085 struct iwlagn_tx_resp *tx_resp)
1087 u16 status = le16_to_cpu(tx_resp->status.status);
1089 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1091 info->status.rates[0].count = tx_resp->failure_frame + 1;
1092 info->flags |= iwl_tx_status_to_mac80211(status);
1093 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
1095 if (!iwl_is_tx_success(status))
1096 iwlagn_count_tx_err_status(priv, status);
1099 static void iwl_check_abort_status(struct iwl_priv *priv,
1100 u8 frame_count, u32 status)
1102 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
1103 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
1104 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
1105 queue_work(priv->workqueue, &priv->tx_flush);
1109 int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1110 struct iwl_device_cmd *cmd)
1112 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1113 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1114 int txq_id = SEQ_TO_QUEUE(sequence);
1115 int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
1116 struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
1117 struct ieee80211_hdr *hdr;
1118 u32 status = le16_to_cpu(tx_resp->status.status);
1119 u16 ssn = iwlagn_get_scd_ssn(tx_resp);
1123 struct ieee80211_tx_info *info;
1124 struct sk_buff_head skbs;
1125 struct sk_buff *skb;
1126 struct iwl_rxon_context *ctx;
1127 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1128 bool is_offchannel_skb;
1130 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
1131 IWLAGN_TX_RES_TID_POS;
1132 sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
1133 IWLAGN_TX_RES_RA_POS;
1135 spin_lock_bh(&priv->sta_lock);
1138 WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT ||
1139 tid >= IWL_MAX_TID_COUNT);
1140 if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id)
1141 IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id,
1142 priv->tid_data[sta_id][tid].agg.txq_id);
1143 iwl_rx_reply_tx_agg(priv, tx_resp);
1146 __skb_queue_head_init(&skbs);
1148 is_offchannel_skb = false;
1150 if (tx_resp->frame_count == 1) {
1151 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
1152 next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
1155 /* If this is an aggregation queue, we can rely on the
1156 * ssn since the wifi sequence number corresponds to
1157 * the index in the TFD ring (%256).
1158 * The seq_ctl is the sequence control of the packet
1159 * to which this Tx response relates. But if there is a
1160 * hole in the bitmap of the BA we received, this Tx
1161 * response may allow to reclaim the hole and all the
1162 * subsequent packets that were already acked.
1163 * In that case, seq_ctl != ssn, and the next packet
1164 * to be reclaimed will be ssn and not seq_ctl.
1166 next_reclaimed = ssn;
1169 if (tid != IWL_TID_NON_QOS) {
1170 priv->tid_data[sta_id][tid].next_reclaimed =
1172 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1176 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
1178 iwlagn_check_ratid_empty(priv, sta_id, tid);
1181 /* process frames */
1182 skb_queue_walk(&skbs, skb) {
1183 hdr = (struct ieee80211_hdr *)skb->data;
1185 if (!ieee80211_is_data_qos(hdr->frame_control))
1186 priv->last_seq_ctl = tx_resp->seq_ctl;
1188 info = IEEE80211_SKB_CB(skb);
1189 ctx = info->driver_data[0];
1190 iwl_trans_free_tx_cmd(priv->trans,
1191 info->driver_data[1]);
1193 memset(&info->status, 0, sizeof(info->status));
1195 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
1196 iwl_is_associated_ctx(ctx) && ctx->vif &&
1197 ctx->vif->type == NL80211_IFTYPE_STATION) {
1198 /* block and stop all queues */
1199 priv->passive_no_rx = true;
1200 IWL_DEBUG_TX_QUEUES(priv, "stop all queues: "
1202 ieee80211_stop_queues(priv->hw);
1204 IWL_DEBUG_TX_REPLY(priv,
1205 "TXQ %d status %s (0x%08x) "
1206 "rate_n_flags 0x%x retries %d\n",
1208 iwl_get_tx_fail_reason(status),
1210 le32_to_cpu(tx_resp->rate_n_flags),
1211 tx_resp->failure_frame);
1213 IWL_DEBUG_TX_REPLY(priv,
1214 "FrameCnt = %d, idx=%d\n",
1215 tx_resp->frame_count, cmd_index);
1218 /* check if BAR is needed */
1219 if (is_agg && !iwl_is_tx_success(status))
1220 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1221 iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
1224 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1227 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
1231 if (tid != IWL_TID_NON_QOS) {
1232 priv->tid_data[sta_id][tid].next_reclaimed =
1234 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1238 if (!is_agg && freed != 1)
1239 IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
1242 * An offchannel frame can be send only on the AUX queue, where
1243 * there is no aggregation (and reordering) so it only is single
1244 * skb is expected to be processed.
1246 if (is_offchannel_skb && freed != 1)
1247 IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed);
1249 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
1250 iwl_get_tx_fail_reason(status), status);
1252 IWL_DEBUG_TX_REPLY(priv,
1253 "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
1254 le32_to_cpu(tx_resp->rate_n_flags),
1255 tx_resp->failure_frame,
1256 SEQ_TO_INDEX(sequence), ssn,
1257 le16_to_cpu(tx_resp->seq_ctl));
1260 iwl_check_abort_status(priv, tx_resp->frame_count, status);
1261 spin_unlock_bh(&priv->sta_lock);
1263 while (!skb_queue_empty(&skbs)) {
1264 skb = __skb_dequeue(&skbs);
1265 ieee80211_tx_status_ni(priv->hw, skb);
1268 if (is_offchannel_skb)
1269 iwl_scan_offchannel_skb_status(priv);
1275 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1277 * Handles block-acknowledge notification from device, which reports success
1278 * of frames sent via aggregation.
1280 int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1281 struct iwl_rx_cmd_buffer *rxb,
1282 struct iwl_device_cmd *cmd)
1284 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1285 struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
1286 struct iwl_ht_agg *agg;
1287 struct sk_buff_head reclaimed_skbs;
1288 struct ieee80211_tx_info *info;
1289 struct ieee80211_hdr *hdr;
1290 struct sk_buff *skb;
1295 /* "flow" corresponds to Tx queue */
1296 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1298 /* "ssn" is start of block-ack Tx window, corresponds to index
1299 * (in Tx queue's circular buffer) of first TFD/frame in window */
1300 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1302 if (scd_flow >= priv->cfg->base_params->num_of_queues) {
1304 "BUG_ON scd_flow is bigger than number of queues\n");
1308 sta_id = ba_resp->sta_id;
1310 agg = &priv->tid_data[sta_id][tid].agg;
1312 spin_lock_bh(&priv->sta_lock);
1314 if (unlikely(!agg->wait_for_ba)) {
1315 if (unlikely(ba_resp->bitmap))
1316 IWL_ERR(priv, "Received BA when not expected\n");
1317 spin_unlock_bh(&priv->sta_lock);
1321 if (unlikely(scd_flow != agg->txq_id)) {
1323 * FIXME: this is a uCode bug which need to be addressed,
1324 * log the information and return for now.
1325 * Since it is can possibly happen very often and in order
1326 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1328 IWL_DEBUG_TX_QUEUES(priv,
1329 "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1330 scd_flow, sta_id, tid, agg->txq_id);
1331 spin_unlock_bh(&priv->sta_lock);
1335 __skb_queue_head_init(&reclaimed_skbs);
1337 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1338 * block-ack window (we assume that they've been successfully
1339 * transmitted ... if not, it's too late anyway). */
1340 iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
1343 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1346 (u8 *) &ba_resp->sta_addr_lo32,
1348 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
1349 "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1350 ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
1351 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1352 scd_flow, ba_resp_scd_ssn, ba_resp->txed,
1353 ba_resp->txed_2_done);
1355 /* Mark that the expected block-ack response arrived */
1356 agg->wait_for_ba = false;
1358 /* Sanity check values reported by uCode */
1359 if (ba_resp->txed_2_done > ba_resp->txed) {
1360 IWL_DEBUG_TX_REPLY(priv,
1361 "bogus sent(%d) and ack(%d) count\n",
1362 ba_resp->txed, ba_resp->txed_2_done);
1364 * set txed_2_done = txed,
1365 * so it won't impact rate scale
1367 ba_resp->txed = ba_resp->txed_2_done;
1370 priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
1372 iwlagn_check_ratid_empty(priv, sta_id, tid);
1375 skb_queue_walk(&reclaimed_skbs, skb) {
1376 hdr = (struct ieee80211_hdr *)skb->data;
1378 if (ieee80211_is_data_qos(hdr->frame_control))
1383 info = IEEE80211_SKB_CB(skb);
1384 iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
1387 /* this is the first skb we deliver in this batch */
1388 /* put the rate scaling data there */
1389 info = IEEE80211_SKB_CB(skb);
1390 memset(&info->status, 0, sizeof(info->status));
1391 info->flags |= IEEE80211_TX_STAT_ACK;
1392 info->flags |= IEEE80211_TX_STAT_AMPDU;
1393 info->status.ampdu_ack_len = ba_resp->txed_2_done;
1394 info->status.ampdu_len = ba_resp->txed;
1395 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
1400 spin_unlock_bh(&priv->sta_lock);
1402 while (!skb_queue_empty(&reclaimed_skbs)) {
1403 skb = __skb_dequeue(&reclaimed_skbs);
1404 ieee80211_tx_status_ni(priv->hw, skb);