1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *****************************************************************************/
66 #include <linux/etherdevice.h>
67 #include <net/mac80211.h>
70 #include "iwl-eeprom-parse.h"
71 #include "fw-api-scan.h"
73 #define IWL_PLCP_QUIET_THRESH 1
74 #define IWL_ACTIVE_QUIET_TIME 10
75 #define IWL_DENSE_EBS_SCAN_RATIO 5
76 #define IWL_SPARSE_EBS_SCAN_RATIO 1
78 struct iwl_mvm_scan_params {
81 bool passive_fragmented;
86 } dwell[IEEE80211_NUM_BANDS];
89 enum iwl_umac_scan_uid_type {
90 IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0),
91 IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1),
92 IWL_UMAC_SCAN_UID_ALL = IWL_UMAC_SCAN_UID_REG_SCAN |
93 IWL_UMAC_SCAN_UID_SCHED_SCAN,
96 static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
97 enum iwl_umac_scan_uid_type type, bool notify);
99 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
101 if (mvm->scan_rx_ant != ANT_NONE)
102 return mvm->scan_rx_ant;
103 return iwl_mvm_get_valid_rx_ant(mvm);
106 static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
111 rx_ant = iwl_mvm_scan_rx_ant(mvm);
112 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
113 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
114 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
115 rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
116 return cpu_to_le16(rx_chain);
119 static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
121 if (band == IEEE80211_BAND_2GHZ)
122 return cpu_to_le32(PHY_BAND_24);
124 return cpu_to_le32(PHY_BAND_5);
128 iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
133 mvm->scan_last_antenna_idx =
134 iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
135 mvm->scan_last_antenna_idx);
136 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
138 if (band == IEEE80211_BAND_2GHZ && !no_cck)
139 return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
142 return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
146 * We insert the SSIDs in an inverted order, because the FW will
147 * invert it back. The most prioritized SSID, which is first in the
148 * request list, is not copied here, but inserted directly to the probe
151 static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
152 struct cfg80211_ssid *ssids,
153 int n_ssids, int first)
157 for (req_idx = n_ssids - 1, fw_idx = 0; req_idx >= first;
158 req_idx--, fw_idx++) {
159 cmd_ssid[fw_idx].id = WLAN_EID_SSID;
160 cmd_ssid[fw_idx].len = ssids[req_idx].ssid_len;
161 memcpy(cmd_ssid[fw_idx].ssid,
163 ssids[req_idx].ssid_len);
168 * If req->n_ssids > 0, it means we should do an active scan.
169 * In case of active scan w/o directed scan, we receive a zero-length SSID
170 * just to notify that this scan is active and not passive.
171 * In order to notify the FW of the number of SSIDs we wish to scan (including
172 * the zero-length one), we need to set the corresponding bits in chan->type,
173 * one for each SSID, and set the active bit (first). If the first SSID is
174 * already included in the probe template, so we need to set only
175 * req->n_ssids - 1 bits in addition to the first bit.
177 static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
178 enum ieee80211_band band, int n_ssids)
180 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
182 if (band == IEEE80211_BAND_2GHZ)
183 return 20 + 3 * (n_ssids + 1);
184 return 10 + 2 * (n_ssids + 1);
187 static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
188 enum ieee80211_band band)
190 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
192 return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
195 static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
196 struct ieee80211_vif *vif)
198 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
199 int *global_cnt = data;
201 if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
202 mvmvif->phy_ctxt->id < MAX_PHYS)
206 static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
207 struct ieee80211_vif *vif,
208 int n_ssids, u32 flags,
209 struct iwl_mvm_scan_params *params)
212 enum ieee80211_band band;
213 u8 frag_passive_dwell = 0;
215 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
216 IEEE80211_IFACE_ITER_NORMAL,
217 iwl_mvm_scan_condition_iterator,
223 params->suspend_time = 30;
224 params->max_out_time = 120;
226 if (iwl_mvm_low_latency(mvm)) {
227 if (mvm->fw->ucode_capa.api[0] &
228 IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
229 params->suspend_time = 105;
231 * If there is more than one active interface make
232 * passive scan more fragmented.
234 frag_passive_dwell = 40;
235 params->max_out_time = frag_passive_dwell;
237 params->suspend_time = 120;
238 params->max_out_time = 120;
242 if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] &
243 IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
245 * P2P device scan should not be fragmented to avoid negative
246 * impact on P2P device discovery. Configure max_out_time to be
247 * equal to dwell time on passive channel. Take a longest
248 * possible value, one that corresponds to 2GHz band
250 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
252 iwl_mvm_get_passive_dwell(mvm,
253 IEEE80211_BAND_2GHZ);
254 params->max_out_time = passive_dwell;
256 params->passive_fragmented = true;
260 if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
261 params->max_out_time = 200;
265 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
266 if (params->passive_fragmented)
267 params->dwell[band].fragmented = frag_passive_dwell;
269 params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
271 params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
276 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
278 /* require rrm scan whenever the fw supports it */
279 return mvm->fw->ucode_capa.capa[0] &
280 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT;
283 static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
288 max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
290 /* we create the 802.11 header and SSID element */
291 max_probe_len -= 24 + 2;
293 /* DS parameter set element is added on 2.4GHZ band if required */
294 if (iwl_mvm_rrm_scan_needed(mvm))
297 return max_probe_len;
300 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
302 int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
304 /* TODO: [BUG] This function should return the maximum allowed size of
305 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
306 * in the same command. So the correct implementation of this function
307 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
308 * command has only 512 bytes and it would leave us with about 240
309 * bytes for scan IEs, which is clearly not enough. So meanwhile
310 * we will report an incorrect value. This may result in a failure to
311 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
312 * functions with -ENOBUFS, if a large enough probe will be provided.
317 int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
318 struct iwl_rx_cmd_buffer *rxb,
319 struct iwl_device_cmd *cmd)
321 struct iwl_rx_packet *pkt = rxb_addr(rxb);
322 struct iwl_scan_complete_notif *notif = (void *)pkt->data;
325 "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
326 notif->status, notif->scanned_channels);
330 int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
331 struct iwl_rx_cmd_buffer *rxb,
332 struct iwl_device_cmd *cmd)
334 IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
335 ieee80211_sched_scan_results(mvm->hw);
340 int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
341 struct iwl_rx_cmd_buffer *rxb,
342 struct iwl_device_cmd *cmd)
344 struct iwl_rx_packet *pkt = rxb_addr(rxb);
345 struct iwl_periodic_scan_complete *scan_notif;
347 scan_notif = (void *)pkt->data;
349 /* scan status must be locked for proper checking */
350 lockdep_assert_held(&mvm->mutex);
353 "%s completed, status %s, EBS status %s\n",
354 mvm->scan_status == IWL_MVM_SCAN_SCHED ?
355 "Scheduled scan" : "Scan",
356 scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
357 "completed" : "aborted",
358 scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
359 "success" : "failed");
362 /* only call mac80211 completion if the stop was initiated by FW */
363 if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
364 mvm->scan_status = IWL_MVM_SCAN_NONE;
365 ieee80211_sched_scan_stopped(mvm->hw);
366 } else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
367 mvm->scan_status = IWL_MVM_SCAN_NONE;
368 ieee80211_scan_completed(mvm->hw,
369 scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
370 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
373 if (scan_notif->ebs_status)
374 mvm->last_ebs_successful = false;
379 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
383 for (i = 0; i < PROBE_OPTION_MAX; i++) {
384 if (!ssid_list[i].len)
386 if (ssid_list[i].len == ssid_len &&
387 !memcmp(ssid_list->ssid, ssid, ssid_len))
393 static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
394 struct iwl_ssid_ie *direct_scan,
395 u32 *ssid_bitmap, bool basic_ssid)
401 * copy SSIDs from match list.
402 * iwl_config_sched_scan_profiles() uses the order of these ssids to
405 for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
406 /* skip empty SSID matchsets */
407 if (!req->match_sets[i].ssid.ssid_len)
409 direct_scan[i].id = WLAN_EID_SSID;
410 direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
411 memcpy(direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
415 /* add SSIDs from scan SSID list */
417 for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
418 index = iwl_ssid_exist(req->ssids[j].ssid,
419 req->ssids[j].ssid_len,
422 if (!req->ssids[j].ssid_len && basic_ssid)
424 direct_scan[i].id = WLAN_EID_SSID;
425 direct_scan[i].len = req->ssids[j].ssid_len;
426 memcpy(direct_scan[i].ssid, req->ssids[j].ssid,
428 *ssid_bitmap |= BIT(i + 1);
431 *ssid_bitmap |= BIT(index + 1);
436 int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
437 struct cfg80211_sched_scan_request *req)
439 struct iwl_scan_offload_profile *profile;
440 struct iwl_scan_offload_profile_cfg *profile_cfg;
441 struct iwl_scan_offload_blacklist *blacklist;
442 struct iwl_host_cmd cmd = {
443 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
444 .len[1] = sizeof(*profile_cfg),
445 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
446 .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
452 if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
455 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
456 blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
458 blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
460 blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
464 profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
470 cmd.data[0] = blacklist;
471 cmd.len[0] = sizeof(*blacklist) * blacklist_len;
472 cmd.data[1] = profile_cfg;
474 /* No blacklist configuration */
476 profile_cfg->num_profiles = req->n_match_sets;
477 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
478 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
479 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
480 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
481 profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
483 for (i = 0; i < req->n_match_sets; i++) {
484 profile = &profile_cfg->profiles[i];
485 profile->ssid_index = i;
486 /* Support any cipher and auth algorithm */
487 profile->unicast_cipher = 0xff;
488 profile->auth_alg = 0xff;
489 profile->network_type = IWL_NETWORK_TYPE_ANY;
490 profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
491 profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
494 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
496 ret = iwl_mvm_send_cmd(mvm, &cmd);
504 static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
505 struct cfg80211_sched_scan_request *req)
507 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
509 "Sending scheduled scan with filtering, n_match_sets %d\n",
514 IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
518 int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
519 struct ieee80211_vif *vif,
520 struct cfg80211_sched_scan_request *req,
521 struct ieee80211_scan_ies *ies)
525 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
526 ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
529 ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
531 mvm->scan_status = IWL_MVM_SCAN_SCHED;
532 ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
535 ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
541 static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
544 struct iwl_host_cmd cmd = {
545 .id = SCAN_OFFLOAD_ABORT_CMD,
549 /* Exit instantly with error when device is not ready
550 * to receive scan abort command or it does not perform
551 * scheduled scan currently */
552 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
555 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
559 if (status != CAN_ABORT_STATUS) {
561 * The scan abort will return 1 for success or
562 * 2 for "failure". A failure condition can be
563 * due to simply not being in an active scan which
564 * can occur if we send the scan abort before the
565 * microcode has notified us that a scan is completed.
567 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
574 int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
577 struct iwl_notification_wait wait_scan_done;
578 static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
579 bool sched = mvm->scan_status == IWL_MVM_SCAN_SCHED;
581 lockdep_assert_held(&mvm->mutex);
583 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
584 return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
587 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
590 if (iwl_mvm_is_radio_killed(mvm)) {
595 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
597 ARRAY_SIZE(scan_done_notif),
600 ret = iwl_mvm_send_scan_offload_abort(mvm);
602 IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
603 sched ? "offloaded " : "", ret);
604 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
608 IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
609 sched ? "offloaded " : "");
611 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
614 * Clear the scan status so the next scan requests will succeed. This
615 * also ensures the Rx handler doesn't do anything, as the scan was
616 * stopped from above. Since the rx handler won't do anything now,
617 * we have to release the scan reference here.
619 if (mvm->scan_status == IWL_MVM_SCAN_OS)
620 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
622 mvm->scan_status = IWL_MVM_SCAN_NONE;
626 ieee80211_sched_scan_stopped(mvm->hw);
628 ieee80211_scan_completed(mvm->hw, true);
634 static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
635 struct iwl_scan_req_tx_cmd *tx_cmd,
638 tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
640 tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
643 tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
645 tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
647 tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
650 tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
654 iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
655 struct ieee80211_channel **channels,
656 int n_channels, u32 ssid_bitmap,
657 struct iwl_scan_req_unified_lmac *cmd)
659 struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
662 for (i = 0; i < n_channels; i++) {
663 channel_cfg[i].channel_num =
664 cpu_to_le16(channels[i]->hw_value);
665 channel_cfg[i].iter_count = cpu_to_le16(1);
666 channel_cfg[i].iter_interval = 0;
667 channel_cfg[i].flags =
668 cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
673 static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
674 size_t len, u8 *const pos)
676 static const u8 before_ds_params[] = {
680 WLAN_EID_EXT_SUPP_RATES,
685 if (!iwl_mvm_rrm_scan_needed(mvm)) {
686 memcpy(newpos, ies, len);
690 offs = ieee80211_ie_split(ies, len,
692 ARRAY_SIZE(before_ds_params),
695 memcpy(newpos, ies, offs);
698 /* Add a placeholder for DS Parameter Set element */
699 *newpos++ = WLAN_EID_DS_PARAMS;
703 memcpy(newpos, ies + offs, len - offs);
704 newpos += len - offs;
710 iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
711 struct ieee80211_scan_ies *ies,
712 struct iwl_scan_probe_req *preq,
713 const u8 *mac_addr, const u8 *mac_addr_mask)
715 struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
719 * Unfortunately, right now the offload scan doesn't support randomising
720 * within the firmware, so until the firmware API is ready we implement
721 * it in the driver. This means that the scan iterations won't really be
722 * random, only when it's restarted, but at least that helps a bit.
725 get_random_mask_addr(frame->sa, mac_addr, mac_addr_mask);
727 memcpy(frame->sa, vif->addr, ETH_ALEN);
729 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
730 eth_broadcast_addr(frame->da);
731 eth_broadcast_addr(frame->bssid);
734 pos = frame->u.probe_req.variable;
735 *pos++ = WLAN_EID_SSID;
738 preq->mac_header.offset = 0;
739 preq->mac_header.len = cpu_to_le16(24 + 2);
741 /* Insert ds parameter set element on 2.4 GHz band */
742 newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
743 ies->ies[IEEE80211_BAND_2GHZ],
744 ies->len[IEEE80211_BAND_2GHZ],
746 preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
747 preq->band_data[0].len = cpu_to_le16(newpos - pos);
750 memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
751 ies->len[IEEE80211_BAND_5GHZ]);
752 preq->band_data[1].offset = cpu_to_le16(pos - preq->buf);
753 preq->band_data[1].len = cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
754 pos += ies->len[IEEE80211_BAND_5GHZ];
756 memcpy(pos, ies->common_ies, ies->common_ie_len);
757 preq->common_data.offset = cpu_to_le16(pos - preq->buf);
758 preq->common_data.len = cpu_to_le16(ies->common_ie_len);
762 iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
763 struct iwl_scan_req_unified_lmac *cmd,
764 struct iwl_mvm_scan_params *params)
766 memset(cmd, 0, ksize(cmd));
767 cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
768 cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
769 if (params->passive_fragmented)
770 cmd->fragmented_dwell =
771 params->dwell[IEEE80211_BAND_2GHZ].fragmented;
772 cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
773 cmd->max_out_time = cpu_to_le32(params->max_out_time);
774 cmd->suspend_time = cpu_to_le32(params->suspend_time);
775 cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
776 cmd->iter_num = cpu_to_le32(1);
778 if (iwl_mvm_rrm_scan_needed(mvm))
780 cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
783 int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
784 struct ieee80211_vif *vif,
785 struct ieee80211_scan_request *req)
787 struct iwl_host_cmd hcmd = {
788 .id = SCAN_OFFLOAD_REQUEST_CMD,
789 .len = { sizeof(struct iwl_scan_req_unified_lmac) +
790 sizeof(struct iwl_scan_channel_cfg_lmac) *
791 mvm->fw->ucode_capa.n_scan_channels +
792 sizeof(struct iwl_scan_probe_req), },
793 .data = { mvm->scan_cmd, },
794 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
796 struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
797 struct iwl_scan_probe_req *preq;
798 struct iwl_mvm_scan_params params = {};
803 lockdep_assert_held(&mvm->mutex);
805 /* we should have failed registration if scan_cmd was NULL */
806 if (WARN_ON(mvm->scan_cmd == NULL))
809 if (req->req.n_ssids > PROBE_OPTION_MAX ||
810 req->ies.common_ie_len + req->ies.len[NL80211_BAND_2GHZ] +
811 req->ies.len[NL80211_BAND_5GHZ] >
812 iwl_mvm_max_scan_ie_fw_cmd_room(mvm, false) ||
813 req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
816 mvm->scan_status = IWL_MVM_SCAN_OS;
818 iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
821 iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, ¶ms);
823 cmd->n_channels = (u8)req->req.n_channels;
825 flags = IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
827 if (req->req.n_ssids == 1 && req->req.ssids[0].ssid_len != 0)
828 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
830 if (params.passive_fragmented)
831 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
833 if (req->req.n_ssids == 0)
834 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
836 cmd->scan_flags |= cpu_to_le32(flags);
838 cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band);
839 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
840 MAC_FILTER_IN_BEACON);
841 iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, req->req.no_cck);
842 iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->req.ssids,
843 req->req.n_ssids, 0);
845 cmd->schedule[0].delay = 0;
846 cmd->schedule[0].iterations = 1;
847 cmd->schedule[0].full_scan_mul = 0;
848 cmd->schedule[1].delay = 0;
849 cmd->schedule[1].iterations = 0;
850 cmd->schedule[1].full_scan_mul = 0;
852 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
853 mvm->last_ebs_successful) {
854 cmd->channel_opt[0].flags =
855 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
856 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
857 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
858 cmd->channel_opt[0].non_ebs_ratio =
859 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
860 cmd->channel_opt[1].flags =
861 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
862 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
863 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
864 cmd->channel_opt[1].non_ebs_ratio =
865 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
868 for (i = 1; i <= req->req.n_ssids; i++)
869 ssid_bitmap |= BIT(i);
871 iwl_mvm_lmac_scan_cfg_channels(mvm, req->req.channels,
872 req->req.n_channels, ssid_bitmap,
875 preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
876 mvm->fw->ucode_capa.n_scan_channels);
878 iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq,
879 req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
880 req->req.mac_addr : NULL,
881 req->req.mac_addr_mask);
883 ret = iwl_mvm_send_cmd(mvm, &hcmd);
885 IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
888 * If the scan failed, it usually means that the FW was unable
889 * to allocate the time events. Warn on it, but maybe we
890 * should try to send the command again with different params.
892 IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
893 mvm->scan_status = IWL_MVM_SCAN_NONE;
899 int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
900 struct ieee80211_vif *vif,
901 struct cfg80211_sched_scan_request *req,
902 struct ieee80211_scan_ies *ies)
904 struct iwl_host_cmd hcmd = {
905 .id = SCAN_OFFLOAD_REQUEST_CMD,
906 .len = { sizeof(struct iwl_scan_req_unified_lmac) +
907 sizeof(struct iwl_scan_channel_cfg_lmac) *
908 mvm->fw->ucode_capa.n_scan_channels +
909 sizeof(struct iwl_scan_probe_req), },
910 .data = { mvm->scan_cmd, },
911 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
913 struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
914 struct iwl_scan_probe_req *preq;
915 struct iwl_mvm_scan_params params = {};
917 u32 flags = 0, ssid_bitmap = 0;
919 lockdep_assert_held(&mvm->mutex);
921 /* we should have failed registration if scan_cmd was NULL */
922 if (WARN_ON(mvm->scan_cmd == NULL))
925 if (req->n_ssids > PROBE_OPTION_MAX ||
926 ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
927 ies->len[NL80211_BAND_5GHZ] >
928 iwl_mvm_max_scan_ie_fw_cmd_room(mvm, true) ||
929 req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
932 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, ¶ms);
934 iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, ¶ms);
936 cmd->n_channels = (u8)req->n_channels;
938 if (iwl_mvm_scan_pass_all(mvm, req))
939 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
941 flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
943 if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
944 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
946 if (params.passive_fragmented)
947 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
949 if (req->n_ssids == 0)
950 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
952 #ifdef CONFIG_IWLWIFI_DEBUGFS
953 if (mvm->scan_iter_notif_enabled)
954 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
957 cmd->scan_flags |= cpu_to_le32(flags);
959 cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
960 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
961 MAC_FILTER_IN_BEACON);
962 iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, false);
963 iwl_scan_offload_build_ssid(req, cmd->direct_scan, &ssid_bitmap, false);
965 cmd->schedule[0].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
966 cmd->schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
967 cmd->schedule[0].full_scan_mul = 1;
969 cmd->schedule[1].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
970 cmd->schedule[1].iterations = 0xff;
971 cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
973 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
974 mvm->last_ebs_successful) {
975 cmd->channel_opt[0].flags =
976 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
977 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
978 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
979 cmd->channel_opt[0].non_ebs_ratio =
980 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
981 cmd->channel_opt[1].flags =
982 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
983 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
984 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
985 cmd->channel_opt[1].non_ebs_ratio =
986 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
989 iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
992 preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
993 mvm->fw->ucode_capa.n_scan_channels);
995 iwl_mvm_build_unified_scan_probe(mvm, vif, ies, preq,
996 req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
997 req->mac_addr : NULL,
1000 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1003 "Sched scan request was sent successfully\n");
1006 * If the scan failed, it usually means that the FW was unable
1007 * to allocate the time events. Warn on it, but maybe we
1008 * should try to send the command again with different params.
1010 IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
1011 mvm->scan_status = IWL_MVM_SCAN_NONE;
1018 int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
1020 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
1021 return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
1024 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
1027 if (iwl_mvm_is_radio_killed(mvm)) {
1028 ieee80211_scan_completed(mvm->hw, true);
1029 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1030 mvm->scan_status = IWL_MVM_SCAN_NONE;
1034 return iwl_mvm_scan_offload_stop(mvm, true);
1039 struct iwl_umac_scan_done {
1040 struct iwl_mvm *mvm;
1041 enum iwl_umac_scan_uid_type type;
1044 static int rate_to_scan_rate_flag(unsigned int rate)
1046 static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
1047 [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
1048 [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
1049 [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
1050 [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
1051 [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
1052 [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
1053 [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
1054 [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
1055 [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
1056 [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
1057 [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
1058 [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
1061 return rate_to_scan_rate[rate];
1064 static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
1066 struct ieee80211_supported_band *band;
1067 unsigned int rates = 0;
1070 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
1071 for (i = 0; i < band->n_bitrates; i++)
1072 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1073 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
1074 for (i = 0; i < band->n_bitrates; i++)
1075 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1077 /* Set both basic rates and supported rates */
1078 rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
1080 return cpu_to_le32(rates);
1083 int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1086 struct iwl_scan_config *scan_config;
1087 struct ieee80211_supported_band *band;
1089 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
1090 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
1091 int ret, i, j = 0, cmd_size, data_size;
1092 struct iwl_host_cmd cmd = {
1096 if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
1099 cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
1101 scan_config = kzalloc(cmd_size, GFP_KERNEL);
1105 data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
1106 scan_config->hdr.size = cpu_to_le16(data_size);
1107 scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
1108 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
1109 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
1110 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
1111 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
1112 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
1113 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
1114 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
1115 SCAN_CONFIG_N_CHANNELS(num_channels));
1116 scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1117 scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1118 scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
1119 scan_config->out_of_channel_time = cpu_to_le32(170);
1120 scan_config->suspend_time = cpu_to_le32(30);
1121 scan_config->dwell_active = 20;
1122 scan_config->dwell_passive = 110;
1123 scan_config->dwell_fragmented = 20;
1125 memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
1127 scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
1128 scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
1129 IWL_CHANNEL_FLAG_ACCURATE_EBS |
1130 IWL_CHANNEL_FLAG_EBS_ADD |
1131 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
1133 band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
1134 for (i = 0; i < band->n_channels; i++, j++)
1135 scan_config->channel_array[j] = band->channels[i].hw_value;
1136 band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
1137 for (i = 0; i < band->n_channels; i++, j++)
1138 scan_config->channel_array[j] = band->channels[i].hw_value;
1140 cmd.data[0] = scan_config;
1141 cmd.len[0] = cmd_size;
1142 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1144 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1146 ret = iwl_mvm_send_cmd(mvm, &cmd);
1152 static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
1156 for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
1157 if (mvm->scan_uid[i] == uid)
1163 static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
1165 return iwl_mvm_find_scan_uid(mvm, 0);
1168 static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
1169 enum iwl_umac_scan_uid_type type)
1173 for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
1174 if (mvm->scan_uid[i] & type)
1180 static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
1181 enum iwl_umac_scan_uid_type type)
1185 /* make sure exactly one bit is on in scan type */
1186 WARN_ON(hweight8(type) != 1);
1189 * Make sure scan uids are unique. If one scan lasts long time while
1190 * others are completing frequently, the seq number will wrap up and
1191 * we may have more than one scan with the same uid.
1194 uid = type | (mvm->scan_seq_num <<
1195 IWL_UMAC_SCAN_UID_SEQ_OFFSET);
1196 mvm->scan_seq_num++;
1197 } while (iwl_mvm_find_scan_uid(mvm, uid) <
1198 IWL_MVM_MAX_SIMULTANEOUS_SCANS);
1200 IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
1206 iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
1207 struct iwl_scan_req_umac *cmd,
1208 struct iwl_mvm_scan_params *params)
1210 memset(cmd, 0, ksize(cmd));
1211 cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
1212 sizeof(struct iwl_mvm_umac_cmd_hdr));
1213 cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
1214 cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
1215 if (params->passive_fragmented)
1216 cmd->fragmented_dwell =
1217 params->dwell[IEEE80211_BAND_2GHZ].fragmented;
1218 cmd->max_out_time = cpu_to_le32(params->max_out_time);
1219 cmd->suspend_time = cpu_to_le32(params->suspend_time);
1220 cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
1224 iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
1225 struct ieee80211_channel **channels,
1226 int n_channels, u32 ssid_bitmap,
1227 struct iwl_scan_req_umac *cmd)
1229 struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
1232 for (i = 0; i < n_channels; i++) {
1233 channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
1234 channel_cfg[i].channel_num = channels[i]->hw_value;
1235 channel_cfg[i].iter_count = 1;
1236 channel_cfg[i].iter_interval = 0;
1240 static u32 iwl_mvm_scan_umac_common_flags(struct iwl_mvm *mvm, int n_ssids,
1241 struct cfg80211_ssid *ssids,
1247 flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
1249 if (n_ssids == 1 && ssids[0].ssid_len != 0)
1250 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
1253 flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
1255 if (iwl_mvm_rrm_scan_needed(mvm))
1256 flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
1261 int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1262 struct ieee80211_scan_request *req)
1264 struct iwl_host_cmd hcmd = {
1265 .id = SCAN_REQ_UMAC,
1266 .len = { iwl_mvm_scan_size(mvm), },
1267 .data = { mvm->scan_cmd, },
1268 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1270 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
1271 struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
1272 sizeof(struct iwl_scan_channel_cfg_umac) *
1273 mvm->fw->ucode_capa.n_scan_channels;
1274 struct iwl_mvm_scan_params params = {};
1276 u32 ssid_bitmap = 0;
1277 int ret, i, uid_idx;
1279 lockdep_assert_held(&mvm->mutex);
1281 uid_idx = iwl_mvm_find_free_scan_uid(mvm);
1282 if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
1285 /* we should have failed registration if scan_cmd was NULL */
1286 if (WARN_ON(mvm->scan_cmd == NULL))
1289 if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX ||
1290 req->ies.common_ie_len +
1291 req->ies.len[NL80211_BAND_2GHZ] +
1292 req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 >
1293 SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels >
1294 mvm->fw->ucode_capa.n_scan_channels))
1297 iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
1300 iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, ¶ms);
1302 uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
1303 mvm->scan_uid[uid_idx] = uid;
1304 cmd->uid = cpu_to_le32(uid);
1306 cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
1308 flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids,
1310 params.passive_fragmented);
1312 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
1314 cmd->general_flags = cpu_to_le32(flags);
1316 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
1317 mvm->last_ebs_successful)
1318 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1319 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1320 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1322 cmd->n_channels = req->req.n_channels;
1324 for (i = 0; i < req->req.n_ssids; i++)
1325 ssid_bitmap |= BIT(i);
1327 iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels,
1328 req->req.n_channels, ssid_bitmap, cmd);
1330 sec_part->schedule[0].iter_count = 1;
1331 sec_part->delay = 0;
1333 iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq,
1334 req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
1335 req->req.mac_addr : NULL,
1336 req->req.mac_addr_mask);
1338 iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids,
1339 req->req.n_ssids, 0);
1341 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1344 "Scan request was sent successfully\n");
1347 * If the scan failed, it usually means that the FW was unable
1348 * to allocate the time events. Warn on it, but maybe we
1349 * should try to send the command again with different params.
1351 IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
1356 int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1357 struct cfg80211_sched_scan_request *req,
1358 struct ieee80211_scan_ies *ies)
1361 struct iwl_host_cmd hcmd = {
1362 .id = SCAN_REQ_UMAC,
1363 .len = { iwl_mvm_scan_size(mvm), },
1364 .data = { mvm->scan_cmd, },
1365 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
1367 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
1368 struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
1369 sizeof(struct iwl_scan_channel_cfg_umac) *
1370 mvm->fw->ucode_capa.n_scan_channels;
1371 struct iwl_mvm_scan_params params = {};
1373 u32 ssid_bitmap = 0;
1376 lockdep_assert_held(&mvm->mutex);
1378 uid_idx = iwl_mvm_find_free_scan_uid(mvm);
1379 if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
1382 /* we should have failed registration if scan_cmd was NULL */
1383 if (WARN_ON(mvm->scan_cmd == NULL))
1386 if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX ||
1387 ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
1388 ies->len[NL80211_BAND_5GHZ] + 24 + 2 >
1389 SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels >
1390 mvm->fw->ucode_capa.n_scan_channels))
1393 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags,
1396 iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, ¶ms);
1398 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1400 uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
1401 mvm->scan_uid[uid_idx] = uid;
1402 cmd->uid = cpu_to_le32(uid);
1404 cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
1406 flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids,
1407 params.passive_fragmented);
1409 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
1411 if (iwl_mvm_scan_pass_all(mvm, req))
1412 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
1414 flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
1416 cmd->general_flags = cpu_to_le32(flags);
1418 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
1419 mvm->last_ebs_successful)
1420 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1421 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1422 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1424 cmd->n_channels = req->n_channels;
1426 iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap,
1429 /* This API uses bits 0-19 instead of 1-20. */
1430 ssid_bitmap = ssid_bitmap >> 1;
1432 iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels,
1435 sec_part->schedule[0].interval =
1436 cpu_to_le16(req->interval / MSEC_PER_SEC);
1437 sec_part->schedule[0].iter_count = 0xff;
1439 sec_part->delay = 0;
1441 iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
1442 req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
1443 req->mac_addr : NULL,
1444 req->mac_addr_mask);
1446 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1449 "Sched scan request was sent successfully\n");
1452 * If the scan failed, it usually means that the FW was unable
1453 * to allocate the time events. Warn on it, but maybe we
1454 * should try to send the command again with different params.
1456 IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
1461 int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1462 struct iwl_rx_cmd_buffer *rxb,
1463 struct iwl_device_cmd *cmd)
1465 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1466 struct iwl_umac_scan_complete *notif = (void *)pkt->data;
1467 u32 uid = __le32_to_cpu(notif->uid);
1468 bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
1469 int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
1472 * Scan uid may be set to zero in case of scan abort request from above.
1474 if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
1478 "Scan completed, uid %u type %s, status %s, EBS status %s\n",
1479 uid, sched ? "sched" : "regular",
1480 notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
1481 "completed" : "aborted",
1482 notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
1483 "success" : "failed");
1485 if (notif->ebs_status)
1486 mvm->last_ebs_successful = false;
1488 mvm->scan_uid[uid_idx] = 0;
1491 ieee80211_scan_completed(mvm->hw,
1493 IWL_SCAN_OFFLOAD_ABORTED);
1494 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1495 } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
1496 ieee80211_sched_scan_stopped(mvm->hw);
1498 IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
1504 static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
1505 struct iwl_rx_packet *pkt, void *data)
1507 struct iwl_umac_scan_done *scan_done = data;
1508 struct iwl_umac_scan_complete *notif = (void *)pkt->data;
1509 u32 uid = __le32_to_cpu(notif->uid);
1510 int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
1512 if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
1515 if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
1519 * Clear scan uid of scans that was aborted from above and completed
1520 * in FW so the RX handler does nothing. Set last_ebs_successful here if
1523 scan_done->mvm->scan_uid[uid_idx] = 0;
1525 if (notif->ebs_status)
1526 scan_done->mvm->last_ebs_successful = false;
1528 return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
1531 static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
1533 struct iwl_umac_scan_abort cmd = {
1534 .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
1535 sizeof(struct iwl_mvm_umac_cmd_hdr)),
1536 .uid = cpu_to_le32(uid),
1539 lockdep_assert_held(&mvm->mutex);
1541 IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
1543 return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
1546 static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
1547 enum iwl_umac_scan_uid_type type, bool notify)
1549 struct iwl_notification_wait wait_scan_done;
1550 static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
1551 struct iwl_umac_scan_done scan_done = {
1557 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
1559 ARRAY_SIZE(scan_done_notif),
1560 iwl_scan_umac_done_check, &scan_done);
1562 IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
1564 for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
1565 if (mvm->scan_uid[i] & type) {
1568 if (iwl_mvm_is_radio_killed(mvm) &&
1569 (type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
1570 ieee80211_scan_completed(mvm->hw, true);
1571 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1575 err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
1582 IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
1583 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
1587 ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
1592 if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
1593 ieee80211_sched_scan_stopped(mvm->hw);
1594 if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
1595 ieee80211_scan_completed(mvm->hw, true);
1596 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1603 int iwl_mvm_scan_size(struct iwl_mvm *mvm)
1605 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
1606 return sizeof(struct iwl_scan_req_umac) +
1607 sizeof(struct iwl_scan_channel_cfg_umac) *
1608 mvm->fw->ucode_capa.n_scan_channels +
1609 sizeof(struct iwl_scan_req_umac_tail);
1611 return sizeof(struct iwl_scan_req_unified_lmac) +
1612 sizeof(struct iwl_scan_channel_cfg_lmac) *
1613 mvm->fw->ucode_capa.n_scan_channels +
1614 sizeof(struct iwl_scan_probe_req);