2 * This file is part of wl1271
4 * Copyright (C) 2009 Nokia Corporation
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/etherdevice.h>
37 * TODO: this is here just for now, it must be removed when the data
38 * operations are in place.
40 #include "../wl12xx/reg.h"
42 static int wl1271_set_default_wep_key(struct wl1271 *wl,
43 struct wl12xx_vif *wlvif, u8 id)
46 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
49 ret = wl12xx_cmd_set_default_wep_key(wl, id,
50 wlvif->ap.bcast_hlid);
52 ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
57 wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
61 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
65 id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
66 if (id >= wl->num_tx_desc)
69 __set_bit(id, wl->tx_frames_map);
70 wl->tx_frames[id] = skb;
75 void wl1271_free_tx_id(struct wl1271 *wl, int id)
77 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
78 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
79 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
81 wl->tx_frames[id] = NULL;
85 EXPORT_SYMBOL(wl1271_free_tx_id);
87 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
90 struct ieee80211_hdr *hdr;
93 * add the station to the known list before transmitting the
94 * authentication response. this way it won't get de-authed by FW
95 * when transmitting too soon.
97 hdr = (struct ieee80211_hdr *)(skb->data +
98 sizeof(struct wl1271_tx_hw_descr));
99 if (ieee80211_is_auth(hdr->frame_control))
100 wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
103 static void wl1271_tx_regulate_link(struct wl1271 *wl,
104 struct wl12xx_vif *wlvif,
107 bool fw_ps, single_link;
110 if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
113 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
114 tx_pkts = wl->links[hlid].allocated_pkts;
115 single_link = (wl->active_link_count == 1);
118 * if in FW PS and there is enough data in FW we can put the link
119 * into high-level PS and clean out its TX queues.
120 * Make an exception if this is the only connected link. In this
121 * case FW-memory congestion is less of a problem.
123 if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
124 wl12xx_ps_link_start(wl, wlvif, hlid, true);
127 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
129 return wl->dummy_packet == skb;
131 EXPORT_SYMBOL(wl12xx_is_dummy_packet);
133 static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
134 struct sk_buff *skb, struct ieee80211_sta *sta)
137 struct wl1271_station *wl_sta;
139 wl_sta = (struct wl1271_station *)sta->drv_priv;
142 struct ieee80211_hdr *hdr;
144 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
145 return wl->system_hlid;
147 hdr = (struct ieee80211_hdr *)skb->data;
148 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
149 return wlvif->ap.bcast_hlid;
151 return wlvif->ap.global_hlid;
155 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
156 struct sk_buff *skb, struct ieee80211_sta *sta)
158 struct ieee80211_tx_info *control;
160 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
161 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
163 control = IEEE80211_SKB_CB(skb);
164 if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
165 wl1271_debug(DEBUG_TX, "tx offchannel");
166 return wlvif->dev_hlid;
169 return wlvif->sta.hlid;
172 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
173 unsigned int packet_length)
175 if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
176 !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
177 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
179 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
181 EXPORT_SYMBOL(wlcore_calc_packet_alignment);
183 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
184 struct sk_buff *skb, u32 extra, u32 buf_offset,
185 u8 hlid, bool is_gem)
187 struct wl1271_tx_hw_descr *desc;
188 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
190 int id, ret = -EBUSY, ac;
193 if (buf_offset + total_len > wl->aggr_buf_size)
196 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
198 /* allocate free identifier for the packet */
199 id = wl1271_alloc_tx_id(wl, skb);
203 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
205 if (total_blocks <= wl->tx_blocks_available) {
206 desc = (struct wl1271_tx_hw_descr *)skb_push(
207 skb, total_len - skb->len);
209 wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
214 wl->tx_blocks_available -= total_blocks;
215 wl->tx_allocated_blocks += total_blocks;
217 /* If the FW was empty before, arm the Tx watchdog */
218 if (wl->tx_allocated_blocks == total_blocks)
219 wl12xx_rearm_tx_watchdog_locked(wl);
221 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
222 wl->tx_allocated_pkts[ac]++;
224 if (test_bit(hlid, wl->links_map))
225 wl->links[hlid].allocated_pkts++;
229 wl1271_debug(DEBUG_TX,
230 "tx_allocate: size: %d, blocks: %d, id: %d",
231 total_len, total_blocks, id);
233 wl1271_free_tx_id(wl, id);
239 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
240 struct sk_buff *skb, u32 extra,
241 struct ieee80211_tx_info *control, u8 hlid)
244 struct wl1271_tx_hw_descr *desc;
248 __le16 frame_control;
249 struct ieee80211_hdr *hdr;
253 desc = (struct wl1271_tx_hw_descr *) skb->data;
254 frame_start = (u8 *)(desc + 1);
255 hdr = (struct ieee80211_hdr *)(frame_start + extra);
256 frame_control = hdr->frame_control;
258 /* relocate space for security header */
260 int hdrlen = ieee80211_hdrlen(frame_control);
261 memmove(frame_start, hdr, hdrlen);
262 skb_set_network_header(skb, skb_network_offset(skb) + extra);
265 /* configure packet life time */
267 hosttime = (timespec_to_ns(&ts) >> 10);
268 desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
270 is_dummy = wl12xx_is_dummy_packet(wl, skb);
271 if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
272 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
274 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
277 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
278 desc->tid = skb->priority;
282 * FW expects the dummy packet to have an invalid session id -
283 * any session id that is different than the one set in the join
285 tx_attr = (SESSION_COUNTER_INVALID <<
286 TX_HW_ATTR_OFST_SESSION_COUNTER) &
287 TX_HW_ATTR_SESSION_COUNTER;
289 tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
291 u8 session_id = wl->session_ids[hlid];
293 if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
294 (wlvif->bss_type == BSS_TYPE_AP_BSS))
297 /* configure the tx attributes */
298 tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER;
302 if (is_dummy || !wlvif)
304 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
306 * if the packets are data packets
307 * send them with AP rate policies (EAPOLs are an exception),
308 * otherwise use default basic rates
310 if (skb->protocol == cpu_to_be16(ETH_P_PAE))
311 rate_idx = wlvif->sta.basic_rate_idx;
312 else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
313 rate_idx = wlvif->sta.p2p_rate_idx;
314 else if (ieee80211_is_data(frame_control))
315 rate_idx = wlvif->sta.ap_rate_idx;
317 rate_idx = wlvif->sta.basic_rate_idx;
319 if (hlid == wlvif->ap.global_hlid)
320 rate_idx = wlvif->ap.mgmt_rate_idx;
321 else if (hlid == wlvif->ap.bcast_hlid ||
322 skb->protocol == cpu_to_be16(ETH_P_PAE) ||
323 !ieee80211_is_data(frame_control))
325 * send non-data, bcast and EAPOLs using the
328 rate_idx = wlvif->ap.bcast_rate_idx;
330 rate_idx = wlvif->ap.ucast_rate_idx[ac];
333 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
335 /* for WEP shared auth - no fw encryption is needed */
336 if (ieee80211_is_auth(frame_control) &&
337 ieee80211_has_protected(frame_control))
338 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
340 desc->tx_attr = cpu_to_le16(tx_attr);
342 wlcore_hw_set_tx_desc_csum(wl, desc, skb);
343 wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
346 /* caller must hold wl->mutex */
347 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
348 struct sk_buff *skb, u32 buf_offset, u8 hlid)
350 struct ieee80211_tx_info *info;
358 wl1271_error("discarding null skb");
362 if (hlid == WL12XX_INVALID_LINK_ID) {
363 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
367 info = IEEE80211_SKB_CB(skb);
369 is_dummy = wl12xx_is_dummy_packet(wl, skb);
371 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
372 info->control.hw_key &&
373 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
374 extra = WL1271_EXTRA_SPACE_TKIP;
376 if (info->control.hw_key) {
378 u8 idx = info->control.hw_key->hw_key_idx;
379 u32 cipher = info->control.hw_key->cipher;
381 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
382 (cipher == WLAN_CIPHER_SUITE_WEP104);
384 if (unlikely(is_wep && wlvif->default_key != idx)) {
385 ret = wl1271_set_default_wep_key(wl, wlvif, idx);
388 wlvif->default_key = idx;
391 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
394 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
399 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
401 if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
402 wl1271_tx_ap_update_inconnection_sta(wl, skb);
403 wl1271_tx_regulate_link(wl, wlvif, hlid);
407 * The length of each packet is stored in terms of
408 * words. Thus, we must pad the skb data to make sure its
409 * length is aligned. The number of padding bytes is computed
410 * and set in wl1271_tx_fill_hdr.
411 * In special cases, we want to align to a specific block size
412 * (eg. for wl128x with SDIO we align to 256).
414 total_len = wlcore_calc_packet_alignment(wl, skb->len);
416 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
417 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
419 /* Revert side effects in the dummy packet skb, so it can be reused */
421 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
426 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
427 enum ieee80211_band rate_band)
429 struct ieee80211_supported_band *band;
430 u32 enabled_rates = 0;
433 band = wl->hw->wiphy->bands[rate_band];
434 for (bit = 0; bit < band->n_bitrates; bit++) {
436 enabled_rates |= band->bitrates[bit].hw_value;
440 /* MCS rates indication are on bits 16 - 31 */
441 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
443 for (bit = 0; bit < 16; bit++) {
445 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
449 return enabled_rates;
452 void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
455 struct wl12xx_vif *wlvif;
457 wl12xx_for_each_wlvif(wl, wlvif) {
458 for (i = 0; i < NUM_TX_QUEUES; i++) {
459 if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i,
460 WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
461 wlvif->tx_queue_count[i] <=
462 WL1271_TX_QUEUE_LOW_WATERMARK)
463 /* firmware buffer has space, restart queues */
464 wlcore_wake_queue(wl, wlvif, i,
465 WLCORE_QUEUE_STOP_REASON_WATERMARK);
470 static int wlcore_select_ac(struct wl1271 *wl)
473 u32 min_pkts = 0xffffffff;
476 * Find a non-empty ac where:
477 * 1. There are packets to transmit
478 * 2. The FW has the least allocated blocks
480 * We prioritize the ACs according to VO>VI>BE>BK
482 for (i = 0; i < NUM_TX_QUEUES; i++) {
483 ac = wl1271_tx_get_queue(i);
484 if (wl->tx_queue_count[ac] &&
485 wl->tx_allocated_pkts[ac] < min_pkts) {
487 min_pkts = wl->tx_allocated_pkts[q];
494 static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
495 struct wl1271_link *lnk, u8 q)
500 skb = skb_dequeue(&lnk->tx_queue[q]);
502 spin_lock_irqsave(&wl->wl_lock, flags);
503 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
504 wl->tx_queue_count[q]--;
506 WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
507 lnk->wlvif->tx_queue_count[q]--;
509 spin_unlock_irqrestore(&wl->wl_lock, flags);
515 static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
519 struct wl1271_link *lnk = &wl->links[hlid];
521 if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) {
522 if (*low_prio_hlid == WL12XX_INVALID_LINK_ID &&
523 !skb_queue_empty(&lnk->tx_queue[ac]) &&
524 wlcore_hw_lnk_low_prio(wl, hlid, lnk))
525 /* we found the first non-empty low priority queue */
526 *low_prio_hlid = hlid;
531 return wlcore_lnk_dequeue(wl, lnk, ac);
534 static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
535 struct wl12xx_vif *wlvif,
539 struct sk_buff *skb = NULL;
540 int i, h, start_hlid;
542 /* start from the link after the last one */
543 start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
545 /* dequeue according to AC, round robin on each link */
546 for (i = 0; i < WL12XX_MAX_LINKS; i++) {
547 h = (start_hlid + i) % WL12XX_MAX_LINKS;
549 /* only consider connected stations */
550 if (!test_bit(h, wlvif->links_map))
553 skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
558 wlvif->last_tx_hlid = h;
563 wlvif->last_tx_hlid = 0;
565 *hlid = wlvif->last_tx_hlid;
569 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
572 struct wl12xx_vif *wlvif = wl->last_wlvif;
573 struct sk_buff *skb = NULL;
575 u8 low_prio_hlid = WL12XX_INVALID_LINK_ID;
577 ac = wlcore_select_ac(wl);
581 /* continue from last wlvif (round robin) */
583 wl12xx_for_each_wlvif_continue(wl, wlvif) {
584 if (!wlvif->tx_queue_count[ac])
587 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
592 wl->last_wlvif = wlvif;
597 /* dequeue from the system HLID before the restarting wlvif list */
599 skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
602 *hlid = wl->system_hlid;
603 wl->last_wlvif = NULL;
607 /* Do a new pass over the wlvif list. But no need to continue
608 * after last_wlvif. The previous pass should have found it. */
610 wl12xx_for_each_wlvif(wl, wlvif) {
611 if (!wlvif->tx_queue_count[ac])
614 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
617 wl->last_wlvif = wlvif;
622 if (wlvif == wl->last_wlvif)
627 /* no high priority skbs found - but maybe a low priority one? */
628 if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
629 struct wl1271_link *lnk = &wl->links[low_prio_hlid];
630 skb = wlcore_lnk_dequeue(wl, lnk, ac);
632 WARN_ON(!skb); /* we checked this before */
633 *hlid = low_prio_hlid;
635 /* ensure proper round robin in the vif/link levels */
636 wl->last_wlvif = lnk->wlvif;
638 lnk->wlvif->last_tx_hlid = low_prio_hlid;
643 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
646 skb = wl->dummy_packet;
647 *hlid = wl->system_hlid;
648 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
649 spin_lock_irqsave(&wl->wl_lock, flags);
650 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
651 wl->tx_queue_count[q]--;
652 spin_unlock_irqrestore(&wl->wl_lock, flags);
659 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
660 struct sk_buff *skb, u8 hlid)
663 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
665 if (wl12xx_is_dummy_packet(wl, skb)) {
666 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
668 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
670 /* make sure we dequeue the same packet next time */
671 wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
675 spin_lock_irqsave(&wl->wl_lock, flags);
676 wl->tx_queue_count[q]++;
678 wlvif->tx_queue_count[q]++;
679 spin_unlock_irqrestore(&wl->wl_lock, flags);
682 static bool wl1271_tx_is_data_present(struct sk_buff *skb)
684 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
686 return ieee80211_is_data_present(hdr->frame_control);
689 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
691 struct wl12xx_vif *wlvif;
695 if (!wl->conf.rx_streaming.interval)
698 if (!wl->conf.rx_streaming.always &&
699 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
702 timeout = wl->conf.rx_streaming.duration;
703 wl12xx_for_each_wlvif_sta(wl, wlvif) {
705 for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
706 if (test_bit(hlid, wlvif->links_map)) {
715 /* enable rx streaming */
716 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
717 ieee80211_queue_work(wl->hw,
718 &wlvif->rx_streaming_enable_work);
720 mod_timer(&wlvif->rx_streaming_timer,
721 jiffies + msecs_to_jiffies(timeout));
726 * Returns failure values only in case of failed bus ops within this function.
727 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
728 * triggering recovery by higher layers when not necessary.
729 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
730 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
731 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
732 * within prepare_tx_frame code but there's nothing we should do about those
735 int wlcore_tx_work_locked(struct wl1271 *wl)
737 struct wl12xx_vif *wlvif;
739 struct wl1271_tx_hw_descr *desc;
740 u32 buf_offset = 0, last_len = 0;
741 bool sent_packets = false;
742 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
747 if (unlikely(wl->state != WLCORE_STATE_ON))
750 while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
751 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
752 bool has_data = false;
755 if (!wl12xx_is_dummy_packet(wl, skb))
756 wlvif = wl12xx_vif_to_data(info->control.vif);
758 hlid = wl->system_hlid;
760 has_data = wlvif && wl1271_tx_is_data_present(skb);
761 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
763 if (ret == -EAGAIN) {
765 * Aggregation buffer is full.
766 * Flush buffer and try again.
768 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
770 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
772 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
773 wl->aggr_buf, buf_offset, true);
780 } else if (ret == -EBUSY) {
782 * Firmware buffer is full.
783 * Queue back last skb, and stop aggregating.
785 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
786 /* No work left, avoid scheduling redundant tx work */
787 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
789 } else if (ret < 0) {
790 if (wl12xx_is_dummy_packet(wl, skb))
792 * fw still expects dummy packet,
795 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
797 ieee80211_free_txskb(wl->hw, skb);
801 buf_offset += last_len;
802 wl->tx_packets_count++;
804 desc = (struct wl1271_tx_hw_descr *) skb->data;
805 __set_bit(desc->hlid, active_hlids);
811 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
812 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
821 * Interrupt the firmware with the new packets. This is only
822 * required for older hardware revisions
824 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
825 bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
826 wl->tx_packets_count);
831 wl1271_handle_tx_low_watermark(wl);
833 wl12xx_rearm_rx_streaming(wl, active_hlids);
839 void wl1271_tx_work(struct work_struct *work)
841 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
844 mutex_lock(&wl->mutex);
845 ret = wl1271_ps_elp_wakeup(wl);
849 ret = wlcore_tx_work_locked(wl);
851 wl12xx_queue_recovery_work(wl);
855 wl1271_ps_elp_sleep(wl);
857 mutex_unlock(&wl->mutex);
860 static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
865 * TODO: use wl12xx constants when this code is moved to wl12xx, as
866 * only it uses Tx-completion.
868 if (rate_class_index <= 8)
869 flags |= IEEE80211_TX_RC_MCS;
872 * TODO: use wl12xx constants when this code is moved to wl12xx, as
873 * only it uses Tx-completion.
875 if (rate_class_index == 0)
876 flags |= IEEE80211_TX_RC_SHORT_GI;
881 static void wl1271_tx_complete_packet(struct wl1271 *wl,
882 struct wl1271_tx_hw_res_descr *result)
884 struct ieee80211_tx_info *info;
885 struct ieee80211_vif *vif;
886 struct wl12xx_vif *wlvif;
893 /* check for id legality */
894 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
895 wl1271_warning("TX result illegal id: %d", id);
899 skb = wl->tx_frames[id];
900 info = IEEE80211_SKB_CB(skb);
902 if (wl12xx_is_dummy_packet(wl, skb)) {
903 wl1271_free_tx_id(wl, id);
907 /* info->control is valid as long as we don't update info->status */
908 vif = info->control.vif;
909 wlvif = wl12xx_vif_to_data(vif);
911 /* update the TX status info */
912 if (result->status == TX_SUCCESS) {
913 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
914 info->flags |= IEEE80211_TX_STAT_ACK;
915 rate = wlcore_rate_to_idx(wl, result->rate_class_index,
917 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
918 retries = result->ack_failures;
919 } else if (result->status == TX_RETRY_EXCEEDED) {
920 wl->stats.excessive_retries++;
921 retries = result->ack_failures;
924 info->status.rates[0].idx = rate;
925 info->status.rates[0].count = retries;
926 info->status.rates[0].flags = rate_flags;
927 info->status.ack_signal = -1;
929 wl->stats.retry_count += result->ack_failures;
932 * update sequence number only when relevant, i.e. only in
933 * sessions of TKIP, AES and GEM (not in open or WEP sessions)
935 if (info->control.hw_key &&
936 (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
937 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
938 info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
939 u8 fw_lsb = result->tx_security_sequence_number_lsb;
940 u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
943 * update security sequence number, taking care of potential
946 wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
947 wlvif->tx_security_last_seq_lsb = fw_lsb;
950 /* remove private header from packet */
951 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
953 /* remove TKIP header space if present */
954 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
955 info->control.hw_key &&
956 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
957 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
958 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
960 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
963 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
965 result->id, skb, result->ack_failures,
966 result->rate_class_index, result->status);
968 /* return the packet to the stack */
969 skb_queue_tail(&wl->deferred_tx_queue, skb);
970 queue_work(wl->freezable_wq, &wl->netstack_work);
971 wl1271_free_tx_id(wl, result->id);
974 /* Called upon reception of a TX complete interrupt */
975 int wlcore_tx_complete(struct wl1271 *wl)
977 struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
978 u32 count, fw_counter;
982 /* read the tx results from the chipset */
983 ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
984 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
988 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
990 /* write host counter to chipset (to ack) */
991 ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
992 offsetof(struct wl1271_tx_hw_res_if,
993 tx_result_host_counter), fw_counter);
997 count = fw_counter - wl->tx_results_count;
998 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
1000 /* verify that the result buffer is not getting overrun */
1001 if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
1002 wl1271_warning("TX result overflow from chipset: %d", count);
1004 /* process the results */
1005 for (i = 0; i < count; i++) {
1006 struct wl1271_tx_hw_res_descr *result;
1007 u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
1009 /* process the packet */
1010 result = &(wl->tx_res_if->tx_results_queue[offset]);
1011 wl1271_tx_complete_packet(wl, result);
1013 wl->tx_results_count++;
1019 EXPORT_SYMBOL(wlcore_tx_complete);
1021 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
1023 struct sk_buff *skb;
1025 unsigned long flags;
1026 struct ieee80211_tx_info *info;
1027 int total[NUM_TX_QUEUES];
1028 struct wl1271_link *lnk = &wl->links[hlid];
1030 for (i = 0; i < NUM_TX_QUEUES; i++) {
1032 while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
1033 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
1035 if (!wl12xx_is_dummy_packet(wl, skb)) {
1036 info = IEEE80211_SKB_CB(skb);
1037 info->status.rates[0].idx = -1;
1038 info->status.rates[0].count = 0;
1039 ieee80211_tx_status_ni(wl->hw, skb);
1046 spin_lock_irqsave(&wl->wl_lock, flags);
1047 for (i = 0; i < NUM_TX_QUEUES; i++) {
1048 wl->tx_queue_count[i] -= total[i];
1050 lnk->wlvif->tx_queue_count[i] -= total[i];
1052 spin_unlock_irqrestore(&wl->wl_lock, flags);
1054 wl1271_handle_tx_low_watermark(wl);
1057 /* caller must hold wl->mutex and TX must be stopped */
1058 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1063 for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
1064 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
1065 /* this calls wl12xx_free_link */
1066 wl1271_free_sta(wl, wlvif, i);
1069 wl12xx_free_link(wl, wlvif, &hlid);
1072 wlvif->last_tx_hlid = 0;
1074 for (i = 0; i < NUM_TX_QUEUES; i++)
1075 wlvif->tx_queue_count[i] = 0;
1077 /* caller must hold wl->mutex and TX must be stopped */
1078 void wl12xx_tx_reset(struct wl1271 *wl)
1081 struct sk_buff *skb;
1082 struct ieee80211_tx_info *info;
1084 /* only reset the queues if something bad happened */
1085 if (wl1271_tx_total_queue_count(wl) != 0) {
1086 for (i = 0; i < WL12XX_MAX_LINKS; i++)
1087 wl1271_tx_reset_link_queues(wl, i);
1089 for (i = 0; i < NUM_TX_QUEUES; i++)
1090 wl->tx_queue_count[i] = 0;
1094 * Make sure the driver is at a consistent state, in case this
1095 * function is called from a context other than interface removal.
1096 * This call will always wake the TX queues.
1098 wl1271_handle_tx_low_watermark(wl);
1100 for (i = 0; i < wl->num_tx_desc; i++) {
1101 if (wl->tx_frames[i] == NULL)
1104 skb = wl->tx_frames[i];
1105 wl1271_free_tx_id(wl, i);
1106 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
1108 if (!wl12xx_is_dummy_packet(wl, skb)) {
1110 * Remove private headers before passing the skb to
1113 info = IEEE80211_SKB_CB(skb);
1114 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1115 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
1116 info->control.hw_key &&
1117 info->control.hw_key->cipher ==
1118 WLAN_CIPHER_SUITE_TKIP) {
1119 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1120 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
1122 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
1125 info->status.rates[0].idx = -1;
1126 info->status.rates[0].count = 0;
1128 ieee80211_tx_status_ni(wl->hw, skb);
1133 #define WL1271_TX_FLUSH_TIMEOUT 500000
1135 /* caller must *NOT* hold wl->mutex */
1136 void wl1271_tx_flush(struct wl1271 *wl)
1138 unsigned long timeout, start_time;
1140 start_time = jiffies;
1141 timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1143 /* only one flush should be in progress, for consistent queue state */
1144 mutex_lock(&wl->flush_mutex);
1146 mutex_lock(&wl->mutex);
1147 if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
1148 mutex_unlock(&wl->mutex);
1152 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1154 while (!time_after(jiffies, timeout)) {
1155 wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
1157 wl1271_tx_total_queue_count(wl));
1159 /* force Tx and give the driver some time to flush data */
1160 mutex_unlock(&wl->mutex);
1161 if (wl1271_tx_total_queue_count(wl))
1162 wl1271_tx_work(&wl->tx_work);
1164 mutex_lock(&wl->mutex);
1166 if ((wl->tx_frames_cnt == 0) &&
1167 (wl1271_tx_total_queue_count(wl) == 0)) {
1168 wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
1169 jiffies_to_msecs(jiffies - start_time));
1174 wl1271_warning("Unable to flush all TX buffers, "
1175 "timed out (timeout %d ms",
1176 WL1271_TX_FLUSH_TIMEOUT / 1000);
1178 /* forcibly flush all Tx buffers on our queues */
1179 for (i = 0; i < WL12XX_MAX_LINKS; i++)
1180 wl1271_tx_reset_link_queues(wl, i);
1183 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1184 mutex_unlock(&wl->mutex);
1186 mutex_unlock(&wl->flush_mutex);
1188 EXPORT_SYMBOL_GPL(wl1271_tx_flush);
1190 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1192 if (WARN_ON(!rate_set))
1195 return BIT(__ffs(rate_set));
1197 EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get);
1199 void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1200 u8 queue, enum wlcore_queue_stop_reason reason)
1202 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1203 bool stopped = !!wl->queue_stop_reasons[hwq];
1205 /* queue should not be stopped for this reason */
1206 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq]));
1211 ieee80211_stop_queue(wl->hw, hwq);
1214 void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1215 enum wlcore_queue_stop_reason reason)
1217 unsigned long flags;
1219 spin_lock_irqsave(&wl->wl_lock, flags);
1220 wlcore_stop_queue_locked(wl, wlvif, queue, reason);
1221 spin_unlock_irqrestore(&wl->wl_lock, flags);
1224 void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1225 enum wlcore_queue_stop_reason reason)
1227 unsigned long flags;
1228 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1230 spin_lock_irqsave(&wl->wl_lock, flags);
1232 /* queue should not be clear for this reason */
1233 WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq]));
1235 if (wl->queue_stop_reasons[hwq])
1238 ieee80211_wake_queue(wl->hw, hwq);
1241 spin_unlock_irqrestore(&wl->wl_lock, flags);
1244 void wlcore_stop_queues(struct wl1271 *wl,
1245 enum wlcore_queue_stop_reason reason)
1248 unsigned long flags;
1250 spin_lock_irqsave(&wl->wl_lock, flags);
1252 /* mark all possible queues as stopped */
1253 for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
1254 WARN_ON_ONCE(test_and_set_bit(reason,
1255 &wl->queue_stop_reasons[i]));
1257 /* use the global version to make sure all vifs in mac80211 we don't
1260 ieee80211_stop_queues(wl->hw);
1262 spin_unlock_irqrestore(&wl->wl_lock, flags);
1265 void wlcore_wake_queues(struct wl1271 *wl,
1266 enum wlcore_queue_stop_reason reason)
1269 unsigned long flags;
1271 spin_lock_irqsave(&wl->wl_lock, flags);
1273 /* mark all possible queues as awake */
1274 for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
1275 WARN_ON_ONCE(!test_and_clear_bit(reason,
1276 &wl->queue_stop_reasons[i]));
1278 /* use the global version to make sure all vifs in mac80211 we don't
1279 * know are woken up.
1281 ieee80211_wake_queues(wl->hw);
1283 spin_unlock_irqrestore(&wl->wl_lock, flags);
1286 bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
1287 struct wl12xx_vif *wlvif, u8 queue,
1288 enum wlcore_queue_stop_reason reason)
1290 unsigned long flags;
1293 spin_lock_irqsave(&wl->wl_lock, flags);
1294 stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
1296 spin_unlock_irqrestore(&wl->wl_lock, flags);
1301 bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
1302 struct wl12xx_vif *wlvif, u8 queue,
1303 enum wlcore_queue_stop_reason reason)
1305 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1307 WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock));
1308 return test_bit(reason, &wl->queue_stop_reasons[hwq]);
1311 bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1314 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1316 WARN_ON_ONCE(!spin_is_locked(&wl->wl_lock));
1317 return !!wl->queue_stop_reasons[hwq];