1 /******************************************************************************
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/etherdevice.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/lockdep.h>
36 #include <linux/init.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
41 #include <net/mac80211.h>
46 _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
48 const int interval = 10; /* microseconds */
52 if ((_il_rd(il, addr) & mask) == (bits & mask))
56 } while (t < timeout);
60 EXPORT_SYMBOL(_il_poll_bit);
63 il_set_bit(struct il_priv *p, u32 r, u32 m)
65 unsigned long reg_flags;
67 spin_lock_irqsave(&p->reg_lock, reg_flags);
69 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
71 EXPORT_SYMBOL(il_set_bit);
74 il_clear_bit(struct il_priv *p, u32 r, u32 m)
76 unsigned long reg_flags;
78 spin_lock_irqsave(&p->reg_lock, reg_flags);
79 _il_clear_bit(p, r, m);
80 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
82 EXPORT_SYMBOL(il_clear_bit);
85 _il_grab_nic_access(struct il_priv *il)
90 /* this bit wakes up the NIC */
91 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
94 * These bits say the device is running, and should keep running for
95 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
96 * but they do not indicate that embedded SRAM is restored yet;
97 * 3945 and 4965 have volatile SRAM, and must save/restore contents
98 * to/from host DRAM when sleeping/waking for power-saving.
99 * Each direction takes approximately 1/4 millisecond; with this
100 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
101 * series of register accesses are expected (e.g. reading Event Log),
102 * to keep device from sleeping.
104 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
105 * SRAM is okay/restored. We don't check that here because this call
106 * is just for hardware register access; but GP1 MAC_SLEEP check is a
107 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
111 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
112 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
113 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
115 val = _il_rd(il, CSR_GP_CNTRL);
116 IL_ERR("MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
117 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
123 EXPORT_SYMBOL_GPL(_il_grab_nic_access);
126 il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
128 const int interval = 10; /* microseconds */
132 if ((il_rd(il, addr) & mask) == mask)
136 } while (t < timeout);
140 EXPORT_SYMBOL(il_poll_bit);
143 il_rd_prph(struct il_priv *il, u32 reg)
145 unsigned long reg_flags;
148 spin_lock_irqsave(&il->reg_lock, reg_flags);
149 _il_grab_nic_access(il);
150 val = _il_rd_prph(il, reg);
151 _il_release_nic_access(il);
152 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
155 EXPORT_SYMBOL(il_rd_prph);
158 il_wr_prph(struct il_priv *il, u32 addr, u32 val)
160 unsigned long reg_flags;
162 spin_lock_irqsave(&il->reg_lock, reg_flags);
163 if (!_il_grab_nic_access(il)) {
164 _il_wr_prph(il, addr, val);
165 _il_release_nic_access(il);
167 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
169 EXPORT_SYMBOL(il_wr_prph);
172 il_read_targ_mem(struct il_priv *il, u32 addr)
174 unsigned long reg_flags;
177 spin_lock_irqsave(&il->reg_lock, reg_flags);
178 _il_grab_nic_access(il);
180 _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
182 value = _il_rd(il, HBUS_TARG_MEM_RDAT);
184 _il_release_nic_access(il);
185 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
188 EXPORT_SYMBOL(il_read_targ_mem);
191 il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
193 unsigned long reg_flags;
195 spin_lock_irqsave(&il->reg_lock, reg_flags);
196 if (!_il_grab_nic_access(il)) {
197 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
199 _il_wr(il, HBUS_TARG_MEM_WDAT, val);
200 _il_release_nic_access(il);
202 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
204 EXPORT_SYMBOL(il_write_targ_mem);
207 il_get_cmd_string(u8 cmd)
213 IL_CMD(C_RXON_ASSOC);
215 IL_CMD(C_RXON_TIMING);
221 IL_CMD(C_RATE_SCALE);
223 IL_CMD(C_TX_LINK_QUALITY_CMD);
224 IL_CMD(C_CHANNEL_SWITCH);
225 IL_CMD(N_CHANNEL_SWITCH);
226 IL_CMD(C_SPECTRUM_MEASUREMENT);
227 IL_CMD(N_SPECTRUM_MEASUREMENT);
230 IL_CMD(N_PM_DEBUG_STATS);
232 IL_CMD(C_SCAN_ABORT);
233 IL_CMD(N_SCAN_START);
234 IL_CMD(N_SCAN_RESULTS);
235 IL_CMD(N_SCAN_COMPLETE);
238 IL_CMD(C_TX_PWR_TBL);
242 IL_CMD(N_CARD_STATE);
243 IL_CMD(N_MISSED_BEACONS);
244 IL_CMD(C_CT_KILL_CONFIG);
245 IL_CMD(C_SENSITIVITY);
246 IL_CMD(C_PHY_CALIBRATION);
250 IL_CMD(N_COMPRESSED_BA);
256 EXPORT_SYMBOL(il_get_cmd_string);
258 #define HOST_COMPLETE_TIMEOUT (HZ / 2)
261 il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
262 struct il_rx_pkt *pkt)
264 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
265 IL_ERR("Bad return from %s (0x%08X)\n",
266 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
269 #ifdef CONFIG_IWLEGACY_DEBUG
270 switch (cmd->hdr.cmd) {
271 case C_TX_LINK_QUALITY_CMD:
273 D_HC_DUMP("back from %s (0x%08X)\n",
274 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
277 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
284 il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
288 BUG_ON(!(cmd->flags & CMD_ASYNC));
290 /* An asynchronous command can not expect an SKB to be set. */
291 BUG_ON(cmd->flags & CMD_WANT_SKB);
293 /* Assign a generic callback if one is not provided */
295 cmd->callback = il_generic_cmd_callback;
297 if (test_bit(S_EXIT_PENDING, &il->status))
300 ret = il_enqueue_hcmd(il, cmd);
302 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
303 il_get_cmd_string(cmd->id), ret);
310 il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
315 lockdep_assert_held(&il->mutex);
317 BUG_ON(cmd->flags & CMD_ASYNC);
319 /* A synchronous command can not have a callback set. */
320 BUG_ON(cmd->callback);
322 D_INFO("Attempting to send sync command %s\n",
323 il_get_cmd_string(cmd->id));
325 set_bit(S_HCMD_ACTIVE, &il->status);
326 D_INFO("Setting HCMD_ACTIVE for command %s\n",
327 il_get_cmd_string(cmd->id));
329 cmd_idx = il_enqueue_hcmd(il, cmd);
332 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
333 il_get_cmd_string(cmd->id), ret);
337 ret = wait_event_timeout(il->wait_command_queue,
338 !test_bit(S_HCMD_ACTIVE, &il->status),
339 HOST_COMPLETE_TIMEOUT);
341 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
342 IL_ERR("Error sending %s: time out after %dms.\n",
343 il_get_cmd_string(cmd->id),
344 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
346 clear_bit(S_HCMD_ACTIVE, &il->status);
347 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
348 il_get_cmd_string(cmd->id));
354 if (test_bit(S_RF_KILL_HW, &il->status)) {
355 IL_ERR("Command %s aborted: RF KILL Switch\n",
356 il_get_cmd_string(cmd->id));
360 if (test_bit(S_FW_ERROR, &il->status)) {
361 IL_ERR("Command %s failed: FW Error\n",
362 il_get_cmd_string(cmd->id));
366 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
367 IL_ERR("Error: Response NULL in '%s'\n",
368 il_get_cmd_string(cmd->id));
377 if (cmd->flags & CMD_WANT_SKB) {
379 * Cancel the CMD_WANT_SKB flag for the cmd in the
380 * TX cmd queue. Otherwise in case the cmd comes
381 * in later, it will possibly set an invalid
382 * address (cmd->meta.source).
384 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
387 if (cmd->reply_page) {
388 il_free_pages(il, cmd->reply_page);
394 EXPORT_SYMBOL(il_send_cmd_sync);
397 il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
399 if (cmd->flags & CMD_ASYNC)
400 return il_send_cmd_async(il, cmd);
402 return il_send_cmd_sync(il, cmd);
404 EXPORT_SYMBOL(il_send_cmd);
407 il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
409 struct il_host_cmd cmd = {
415 return il_send_cmd_sync(il, &cmd);
417 EXPORT_SYMBOL(il_send_cmd_pdu);
420 il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
421 void (*callback) (struct il_priv *il,
422 struct il_device_cmd *cmd,
423 struct il_rx_pkt *pkt))
425 struct il_host_cmd cmd = {
431 cmd.flags |= CMD_ASYNC;
432 cmd.callback = callback;
434 return il_send_cmd_async(il, &cmd);
436 EXPORT_SYMBOL(il_send_cmd_pdu_async);
438 /* default: IL_LED_BLINK(0) using blinking idx table */
440 module_param(led_mode, int, S_IRUGO);
441 MODULE_PARM_DESC(led_mode,
442 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
444 /* Throughput OFF time(ms) ON time (ms)
457 static const struct ieee80211_tpt_blink il_blink[] = {
458 {.throughput = 0, .blink_time = 334},
459 {.throughput = 1 * 1024 - 1, .blink_time = 260},
460 {.throughput = 5 * 1024 - 1, .blink_time = 220},
461 {.throughput = 10 * 1024 - 1, .blink_time = 190},
462 {.throughput = 20 * 1024 - 1, .blink_time = 170},
463 {.throughput = 50 * 1024 - 1, .blink_time = 150},
464 {.throughput = 70 * 1024 - 1, .blink_time = 130},
465 {.throughput = 100 * 1024 - 1, .blink_time = 110},
466 {.throughput = 200 * 1024 - 1, .blink_time = 80},
467 {.throughput = 300 * 1024 - 1, .blink_time = 50},
471 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
472 * Led blink rate analysis showed an average deviation of 0% on 3945,
474 * Need to compensate on the led on/off time per HW according to the deviation
475 * to achieve the desired led frequency
476 * The calculation is: (100-averageDeviation)/100 * blinkTime
477 * For code efficiency the calculation will be:
478 * compensation = (100 - averageDeviation) * 64 / 100
479 * NewBlinkTime = (compensation * BlinkTime) / 64
482 il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
485 IL_ERR("undefined blink compensation: "
486 "use pre-defined blinking time\n");
490 return (u8) ((time * compensation) >> 6);
493 /* Set led pattern command */
495 il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
497 struct il_led_cmd led_cmd = {
499 .interval = IL_DEF_LED_INTRVL
503 if (!test_bit(S_READY, &il->status))
506 if (il->blink_on == on && il->blink_off == off)
510 /* led is SOLID_ON */
514 D_LED("Led blink time compensation=%u\n",
515 il->cfg->base_params->led_compensation);
517 il_blink_compensation(il, on,
518 il->cfg->base_params->led_compensation);
520 il_blink_compensation(il, off,
521 il->cfg->base_params->led_compensation);
523 ret = il->cfg->ops->led->cmd(il, &led_cmd);
532 il_led_brightness_set(struct led_classdev *led_cdev,
533 enum led_brightness brightness)
535 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
536 unsigned long on = 0;
541 il_led_cmd(il, on, 0);
545 il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
546 unsigned long *delay_off)
548 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
550 return il_led_cmd(il, *delay_on, *delay_off);
554 il_leds_init(struct il_priv *il)
559 if (mode == IL_LED_DEFAULT)
560 mode = il->cfg->led_mode;
563 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
564 il->led.brightness_set = il_led_brightness_set;
565 il->led.blink_set = il_led_blink_set;
566 il->led.max_brightness = 1;
573 il->led.default_trigger =
574 ieee80211_create_tpt_led_trigger(il->hw,
575 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
577 ARRAY_SIZE(il_blink));
579 case IL_LED_RF_STATE:
580 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
584 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
590 il->led_registered = true;
592 EXPORT_SYMBOL(il_leds_init);
595 il_leds_exit(struct il_priv *il)
597 if (!il->led_registered)
600 led_classdev_unregister(&il->led);
603 EXPORT_SYMBOL(il_leds_exit);
605 /************************** EEPROM BANDS ****************************
607 * The il_eeprom_band definitions below provide the mapping from the
608 * EEPROM contents to the specific channel number supported for each
611 * For example, il_priv->eeprom.band_3_channels[4] from the band_3
612 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
613 * The specific geography and calibration information for that channel
614 * is contained in the eeprom map itself.
616 * During init, we copy the eeprom information and channel map
617 * information into il->channel_info_24/52 and il->channel_map_24/52
619 * channel_map_24/52 provides the idx in the channel_info array for a
620 * given channel. We have to have two separate maps as there is channel
621 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
624 * A value of 0xff stored in the channel_map indicates that the channel
625 * is not supported by the hardware at all.
627 * A value of 0xfe in the channel_map indicates that the channel is not
628 * valid for Tx with the current hardware. This means that
629 * while the system can tune and receive on a given channel, it may not
630 * be able to associate or transmit any frames on that
631 * channel. There is no corresponding channel information for that
634 *********************************************************************/
637 const u8 il_eeprom_band_1[14] = {
638 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
642 static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
643 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
646 static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
647 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
650 static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
651 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
654 static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
655 145, 149, 153, 157, 161, 165
658 static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */
662 static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */
663 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
666 /******************************************************************************
668 * EEPROM related functions
670 ******************************************************************************/
673 il_eeprom_verify_signature(struct il_priv *il)
675 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
678 D_EEPROM("EEPROM signature=0x%08x\n", gp);
680 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
681 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
684 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
692 il_eeprom_query_addr(const struct il_priv *il, size_t offset)
694 BUG_ON(offset >= il->cfg->base_params->eeprom_size);
695 return &il->eeprom[offset];
697 EXPORT_SYMBOL(il_eeprom_query_addr);
700 il_eeprom_query16(const struct il_priv *il, size_t offset)
704 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
706 EXPORT_SYMBOL(il_eeprom_query16);
709 * il_eeprom_init - read EEPROM contents
711 * Load the EEPROM contents from adapter into il->eeprom
713 * NOTE: This routine uses the non-debug IO access functions.
716 il_eeprom_init(struct il_priv *il)
719 u32 gp = _il_rd(il, CSR_EEPROM_GP);
724 /* allocate eeprom */
725 sz = il->cfg->base_params->eeprom_size;
726 D_EEPROM("NVM size = %d\n", sz);
727 il->eeprom = kzalloc(sz, GFP_KERNEL);
732 e = (__le16 *) il->eeprom;
734 il->cfg->ops->lib->apm_ops.init(il);
736 ret = il_eeprom_verify_signature(il);
738 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
743 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
744 ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
746 IL_ERR("Failed to acquire EEPROM semaphore.\n");
751 /* eeprom is an array of 16bit values */
752 for (addr = 0; addr < sz; addr += sizeof(u16)) {
755 _il_wr(il, CSR_EEPROM_REG,
756 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
759 _il_poll_bit(il, CSR_EEPROM_REG,
760 CSR_EEPROM_REG_READ_VALID_MSK,
761 CSR_EEPROM_REG_READ_VALID_MSK,
762 IL_EEPROM_ACCESS_TIMEOUT);
764 IL_ERR("Time out reading EEPROM[%d]\n", addr);
767 r = _il_rd(il, CSR_EEPROM_REG);
768 e[addr / 2] = cpu_to_le16(r >> 16);
771 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
772 il_eeprom_query16(il, EEPROM_VERSION));
776 il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
781 /* Reset chip to save power until we load uCode during "up". */
786 EXPORT_SYMBOL(il_eeprom_init);
789 il_eeprom_free(struct il_priv *il)
794 EXPORT_SYMBOL(il_eeprom_free);
797 il_init_band_reference(const struct il_priv *il, int eep_band,
798 int *eeprom_ch_count,
799 const struct il_eeprom_channel **eeprom_ch_info,
800 const u8 **eeprom_ch_idx)
803 il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1];
805 case 1: /* 2.4GHz band */
806 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
808 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
810 *eeprom_ch_idx = il_eeprom_band_1;
812 case 2: /* 4.9GHz band */
813 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
815 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
817 *eeprom_ch_idx = il_eeprom_band_2;
819 case 3: /* 5.2GHz band */
820 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
822 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
824 *eeprom_ch_idx = il_eeprom_band_3;
826 case 4: /* 5.5GHz band */
827 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
829 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
831 *eeprom_ch_idx = il_eeprom_band_4;
833 case 5: /* 5.7GHz band */
834 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
836 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
838 *eeprom_ch_idx = il_eeprom_band_5;
840 case 6: /* 2.4GHz ht40 channels */
841 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
843 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
845 *eeprom_ch_idx = il_eeprom_band_6;
847 case 7: /* 5 GHz ht40 channels */
848 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
850 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
852 *eeprom_ch_idx = il_eeprom_band_7;
859 #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
862 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
864 * Does not set up a command, or touch hardware.
867 il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
868 const struct il_eeprom_channel *eeprom_ch,
869 u8 clear_ht40_extension_channel)
871 struct il_channel_info *ch_info;
874 (struct il_channel_info *)il_get_channel_info(il, band, channel);
876 if (!il_is_channel_valid(ch_info))
879 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
880 " Ad-Hoc %ssupported\n", ch_info->channel,
881 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
882 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
883 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
884 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
885 eeprom_ch->max_power_avg,
886 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
887 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
889 ch_info->ht40_eeprom = *eeprom_ch;
890 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
891 ch_info->ht40_flags = eeprom_ch->flags;
892 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
893 ch_info->ht40_extension_channel &=
894 ~clear_ht40_extension_channel;
899 #define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
903 * il_init_channel_map - Set up driver's info for all possible channels
906 il_init_channel_map(struct il_priv *il)
908 int eeprom_ch_count = 0;
909 const u8 *eeprom_ch_idx = NULL;
910 const struct il_eeprom_channel *eeprom_ch_info = NULL;
912 struct il_channel_info *ch_info;
914 if (il->channel_count) {
915 D_EEPROM("Channel map already initialized.\n");
919 D_EEPROM("Initializing regulatory info from EEPROM\n");
922 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
923 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
924 ARRAY_SIZE(il_eeprom_band_5);
926 D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
929 kzalloc(sizeof(struct il_channel_info) * il->channel_count,
931 if (!il->channel_info) {
932 IL_ERR("Could not allocate channel_info\n");
933 il->channel_count = 0;
937 ch_info = il->channel_info;
939 /* Loop through the 5 EEPROM bands adding them in order to the
940 * channel map we maintain (that contains additional information than
941 * what just in the EEPROM) */
942 for (band = 1; band <= 5; band++) {
944 il_init_band_reference(il, band, &eeprom_ch_count,
945 &eeprom_ch_info, &eeprom_ch_idx);
947 /* Loop through each band adding each of the channels */
948 for (ch = 0; ch < eeprom_ch_count; ch++) {
949 ch_info->channel = eeprom_ch_idx[ch];
952 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
954 /* permanently store EEPROM's channel regulatory flags
955 * and max power in channel info database. */
956 ch_info->eeprom = eeprom_ch_info[ch];
958 /* Copy the run-time flags so they are there even on
959 * invalid channels */
960 ch_info->flags = eeprom_ch_info[ch].flags;
961 /* First write that ht40 is not enabled, and then enable
963 ch_info->ht40_extension_channel =
964 IEEE80211_CHAN_NO_HT40;
966 if (!(il_is_channel_valid(ch_info))) {
967 D_EEPROM("Ch. %d Flags %x [%sGHz] - "
968 "No traffic\n", ch_info->channel,
970 il_is_channel_a_band(ch_info) ? "5.2" :
976 /* Initialize regulatory-based run-time data */
977 ch_info->max_power_avg = ch_info->curr_txpow =
978 eeprom_ch_info[ch].max_power_avg;
979 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
980 ch_info->min_power = 0;
982 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
983 " Ad-Hoc %ssupported\n", ch_info->channel,
984 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
985 CHECK_AND_PRINT_I(VALID),
986 CHECK_AND_PRINT_I(IBSS),
987 CHECK_AND_PRINT_I(ACTIVE),
988 CHECK_AND_PRINT_I(RADAR),
989 CHECK_AND_PRINT_I(WIDE),
990 CHECK_AND_PRINT_I(DFS),
991 eeprom_ch_info[ch].flags,
992 eeprom_ch_info[ch].max_power_avg,
993 ((eeprom_ch_info[ch].
994 flags & EEPROM_CHANNEL_IBSS) &&
995 !(eeprom_ch_info[ch].
996 flags & EEPROM_CHANNEL_RADAR)) ? "" :
1003 /* Check if we do have HT40 channels */
1004 if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
1005 EEPROM_REGULATORY_BAND_NO_HT40 &&
1006 il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
1007 EEPROM_REGULATORY_BAND_NO_HT40)
1010 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
1011 for (band = 6; band <= 7; band++) {
1012 enum ieee80211_band ieeeband;
1014 il_init_band_reference(il, band, &eeprom_ch_count,
1015 &eeprom_ch_info, &eeprom_ch_idx);
1017 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
1019 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1021 /* Loop through each band adding each of the channels */
1022 for (ch = 0; ch < eeprom_ch_count; ch++) {
1023 /* Set up driver's info for lower half */
1024 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
1025 &eeprom_ch_info[ch],
1026 IEEE80211_CHAN_NO_HT40PLUS);
1028 /* Set up driver's info for upper half */
1029 il_mod_ht40_chan_info(il, ieeeband,
1030 eeprom_ch_idx[ch] + 4,
1031 &eeprom_ch_info[ch],
1032 IEEE80211_CHAN_NO_HT40MINUS);
1038 EXPORT_SYMBOL(il_init_channel_map);
1041 * il_free_channel_map - undo allocations in il_init_channel_map
1044 il_free_channel_map(struct il_priv *il)
1046 kfree(il->channel_info);
1047 il->channel_count = 0;
1049 EXPORT_SYMBOL(il_free_channel_map);
1052 * il_get_channel_info - Find driver's ilate channel info
1054 * Based on band and channel number.
1056 const struct il_channel_info *
1057 il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
1063 case IEEE80211_BAND_5GHZ:
1064 for (i = 14; i < il->channel_count; i++) {
1065 if (il->channel_info[i].channel == channel)
1066 return &il->channel_info[i];
1069 case IEEE80211_BAND_2GHZ:
1070 if (channel >= 1 && channel <= 14)
1071 return &il->channel_info[channel - 1];
1079 EXPORT_SYMBOL(il_get_channel_info);
1082 * Setting power level allows the card to go to sleep when not busy.
1084 * We calculate a sleep command based on the required latency, which
1085 * we get from mac80211. In order to handle thermal throttling, we can
1086 * also use pre-defined power levels.
1090 * This defines the old power levels. They are still used by default
1091 * (level 1) and for thermal throttle (levels 3 through 5)
1094 struct il_power_vec_entry {
1095 struct il_powertable_cmd cmd;
1096 u8 no_dtim; /* number of skip dtim */
1100 il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
1102 memset(cmd, 0, sizeof(*cmd));
1104 if (il->power_data.pci_pm)
1105 cmd->flags |= IL_POWER_PCI_PM_MSK;
1107 D_POWER("Sleep command for CAM\n");
1111 il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
1113 D_POWER("Sending power/sleep command\n");
1114 D_POWER("Flags value = 0x%08X\n", cmd->flags);
1115 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
1116 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
1117 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
1118 le32_to_cpu(cmd->sleep_interval[0]),
1119 le32_to_cpu(cmd->sleep_interval[1]),
1120 le32_to_cpu(cmd->sleep_interval[2]),
1121 le32_to_cpu(cmd->sleep_interval[3]),
1122 le32_to_cpu(cmd->sleep_interval[4]));
1124 return il_send_cmd_pdu(il, C_POWER_TBL,
1125 sizeof(struct il_powertable_cmd), cmd);
1129 il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
1134 lockdep_assert_held(&il->mutex);
1136 /* Don't update the RX chain when chain noise calibration is running */
1137 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
1138 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
1140 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
1143 if (!il_is_ready_rf(il))
1146 /* scan complete use sleep_power_next, need to be updated */
1147 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
1148 if (test_bit(S_SCANNING, &il->status) && !force) {
1149 D_INFO("Defer power set mode while scanning\n");
1153 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
1154 set_bit(S_POWER_PMI, &il->status);
1156 ret = il_set_power(il, cmd);
1158 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
1159 clear_bit(S_POWER_PMI, &il->status);
1161 if (il->cfg->ops->lib->update_chain_flags && update_chains)
1162 il->cfg->ops->lib->update_chain_flags(il);
1163 else if (il->cfg->ops->lib->update_chain_flags)
1164 D_POWER("Cannot update the power, chain noise "
1165 "calibration running: %d\n",
1166 il->chain_noise_data.state);
1168 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1170 IL_ERR("set power fail, ret = %d", ret);
1176 il_power_update_mode(struct il_priv *il, bool force)
1178 struct il_powertable_cmd cmd;
1180 il_power_sleep_cam_cmd(il, &cmd);
1181 return il_power_set_mode(il, &cmd, force);
1183 EXPORT_SYMBOL(il_power_update_mode);
1185 /* initialize to default */
1187 il_power_initialize(struct il_priv *il)
1189 u16 lctl = il_pcie_link_ctl(il);
1191 il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
1193 il->power_data.debug_sleep_level_override = -1;
1195 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1197 EXPORT_SYMBOL(il_power_initialize);
1199 /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
1200 * sending probe req. This should be set long enough to hear probe responses
1201 * from more than one AP. */
1202 #define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
1203 #define IL_ACTIVE_DWELL_TIME_52 (20)
1205 #define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1206 #define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1208 /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
1209 * Must be set longer than active dwell time.
1210 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
1211 #define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
1212 #define IL_PASSIVE_DWELL_TIME_52 (10)
1213 #define IL_PASSIVE_DWELL_BASE (100)
1214 #define IL_CHANNEL_TUNE_TIME 5
1217 il_send_scan_abort(struct il_priv *il)
1220 struct il_rx_pkt *pkt;
1221 struct il_host_cmd cmd = {
1223 .flags = CMD_WANT_SKB,
1226 /* Exit instantly with error when device is not ready
1227 * to receive scan abort command or it does not perform
1228 * hardware scan currently */
1229 if (!test_bit(S_READY, &il->status) ||
1230 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1231 !test_bit(S_SCAN_HW, &il->status) ||
1232 test_bit(S_FW_ERROR, &il->status) ||
1233 test_bit(S_EXIT_PENDING, &il->status))
1236 ret = il_send_cmd_sync(il, &cmd);
1240 pkt = (struct il_rx_pkt *)cmd.reply_page;
1241 if (pkt->u.status != CAN_ABORT_STATUS) {
1242 /* The scan abort will return 1 for success or
1243 * 2 for "failure". A failure condition can be
1244 * due to simply not being in an active scan which
1245 * can occur if we send the scan abort before we
1246 * the microcode has notified us that a scan is
1248 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1252 il_free_pages(il, cmd.reply_page);
1257 il_complete_scan(struct il_priv *il, bool aborted)
1259 /* check if scan was requested from mac80211 */
1260 if (il->scan_request) {
1261 D_SCAN("Complete scan in mac80211\n");
1262 ieee80211_scan_completed(il->hw, aborted);
1265 il->scan_vif = NULL;
1266 il->scan_request = NULL;
1270 il_force_scan_end(struct il_priv *il)
1272 lockdep_assert_held(&il->mutex);
1274 if (!test_bit(S_SCANNING, &il->status)) {
1275 D_SCAN("Forcing scan end while not scanning\n");
1279 D_SCAN("Forcing scan end\n");
1280 clear_bit(S_SCANNING, &il->status);
1281 clear_bit(S_SCAN_HW, &il->status);
1282 clear_bit(S_SCAN_ABORTING, &il->status);
1283 il_complete_scan(il, true);
1287 il_do_scan_abort(struct il_priv *il)
1291 lockdep_assert_held(&il->mutex);
1293 if (!test_bit(S_SCANNING, &il->status)) {
1294 D_SCAN("Not performing scan to abort\n");
1298 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1299 D_SCAN("Scan abort in progress\n");
1303 ret = il_send_scan_abort(il);
1305 D_SCAN("Send scan abort failed %d\n", ret);
1306 il_force_scan_end(il);
1308 D_SCAN("Successfully send scan abort\n");
1312 * il_scan_cancel - Cancel any currently executing HW scan
1315 il_scan_cancel(struct il_priv *il)
1317 D_SCAN("Queuing abort scan\n");
1318 queue_work(il->workqueue, &il->abort_scan);
1321 EXPORT_SYMBOL(il_scan_cancel);
1324 * il_scan_cancel_timeout - Cancel any currently executing HW scan
1325 * @ms: amount of time to wait (in milliseconds) for scan to abort
1329 il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1331 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1333 lockdep_assert_held(&il->mutex);
1335 D_SCAN("Scan cancel timeout\n");
1337 il_do_scan_abort(il);
1339 while (time_before_eq(jiffies, timeout)) {
1340 if (!test_bit(S_SCAN_HW, &il->status))
1345 return test_bit(S_SCAN_HW, &il->status);
1347 EXPORT_SYMBOL(il_scan_cancel_timeout);
1349 /* Service response to C_SCAN (0x80) */
1351 il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1353 #ifdef CONFIG_IWLEGACY_DEBUG
1354 struct il_rx_pkt *pkt = rxb_addr(rxb);
1355 struct il_scanreq_notification *notif =
1356 (struct il_scanreq_notification *)pkt->u.raw;
1358 D_SCAN("Scan request status = 0x%x\n", notif->status);
1362 /* Service N_SCAN_START (0x82) */
1364 il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1366 struct il_rx_pkt *pkt = rxb_addr(rxb);
1367 struct il_scanstart_notification *notif =
1368 (struct il_scanstart_notification *)pkt->u.raw;
1369 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1370 D_SCAN("Scan start: " "%d [802.11%s] "
1371 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
1372 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
1373 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
1376 /* Service N_SCAN_RESULTS (0x83) */
1378 il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1380 #ifdef CONFIG_IWLEGACY_DEBUG
1381 struct il_rx_pkt *pkt = rxb_addr(rxb);
1382 struct il_scanresults_notification *notif =
1383 (struct il_scanresults_notification *)pkt->u.raw;
1385 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
1386 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
1387 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
1388 le32_to_cpu(notif->stats[0]),
1389 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1393 /* Service N_SCAN_COMPLETE (0x84) */
1395 il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1398 #ifdef CONFIG_IWLEGACY_DEBUG
1399 struct il_rx_pkt *pkt = rxb_addr(rxb);
1400 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1403 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1404 scan_notif->scanned_channels, scan_notif->tsf_low,
1405 scan_notif->tsf_high, scan_notif->status);
1407 /* The HW is no longer scanning */
1408 clear_bit(S_SCAN_HW, &il->status);
1410 D_SCAN("Scan on %sGHz took %dms\n",
1411 (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
1412 jiffies_to_msecs(jiffies - il->scan_start));
1414 queue_work(il->workqueue, &il->scan_completed);
1418 il_setup_rx_scan_handlers(struct il_priv *il)
1421 il->handlers[C_SCAN] = il_hdl_scan;
1422 il->handlers[N_SCAN_START] = il_hdl_scan_start;
1423 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1424 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1426 EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1429 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1432 if (band == IEEE80211_BAND_5GHZ)
1433 return IL_ACTIVE_DWELL_TIME_52 +
1434 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1436 return IL_ACTIVE_DWELL_TIME_24 +
1437 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1439 EXPORT_SYMBOL(il_get_active_dwell_time);
1442 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1443 struct ieee80211_vif *vif)
1445 struct il_rxon_context *ctx = &il->ctx;
1450 IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1451 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1452 IL_PASSIVE_DWELL_TIME_52;
1454 if (il_is_any_associated(il)) {
1456 * If we're associated, we clamp the maximum passive
1457 * dwell time to be 98% of the smallest beacon interval
1458 * (minus 2 * channel tune time)
1460 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
1461 if (value > IL_PASSIVE_DWELL_BASE || !value)
1462 value = IL_PASSIVE_DWELL_BASE;
1463 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1464 passive = min(value, passive);
1469 EXPORT_SYMBOL(il_get_passive_dwell_time);
1472 il_init_scan_params(struct il_priv *il)
1474 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1475 if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
1476 il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1477 if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
1478 il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
1480 EXPORT_SYMBOL(il_init_scan_params);
1483 il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1487 lockdep_assert_held(&il->mutex);
1489 if (WARN_ON(!il->cfg->ops->utils->request_scan))
1492 cancel_delayed_work(&il->scan_check);
1494 if (!il_is_ready_rf(il)) {
1495 IL_WARN("Request scan called when driver not ready.\n");
1499 if (test_bit(S_SCAN_HW, &il->status)) {
1500 D_SCAN("Multiple concurrent scan requests in parallel.\n");
1504 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1505 D_SCAN("Scan request while abort pending.\n");
1509 D_SCAN("Starting scan...\n");
1511 set_bit(S_SCANNING, &il->status);
1512 il->scan_start = jiffies;
1514 ret = il->cfg->ops->utils->request_scan(il, vif);
1516 clear_bit(S_SCANNING, &il->status);
1520 queue_delayed_work(il->workqueue, &il->scan_check,
1521 IL_SCAN_CHECK_WATCHDOG);
1527 il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1528 struct cfg80211_scan_request *req)
1530 struct il_priv *il = hw->priv;
1533 D_MAC80211("enter\n");
1535 if (req->n_channels == 0)
1538 mutex_lock(&il->mutex);
1540 if (test_bit(S_SCANNING, &il->status)) {
1541 D_SCAN("Scan already in progress.\n");
1546 /* mac80211 will only ask for one band at a time */
1547 il->scan_request = req;
1549 il->scan_band = req->channels[0]->band;
1551 ret = il_scan_initiate(il, vif);
1553 D_MAC80211("leave\n");
1556 mutex_unlock(&il->mutex);
1560 EXPORT_SYMBOL(il_mac_hw_scan);
1563 il_bg_scan_check(struct work_struct *data)
1565 struct il_priv *il =
1566 container_of(data, struct il_priv, scan_check.work);
1568 D_SCAN("Scan check work\n");
1570 /* Since we are here firmware does not finish scan and
1571 * most likely is in bad shape, so we don't bother to
1572 * send abort command, just force scan complete to mac80211 */
1573 mutex_lock(&il->mutex);
1574 il_force_scan_end(il);
1575 mutex_unlock(&il->mutex);
1579 * il_fill_probe_req - fill in all required fields and IE for probe request
1583 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1584 const u8 *ta, const u8 *ies, int ie_len, int left)
1589 /* Make sure there is enough space for the probe request,
1590 * two mandatory IEs and the data */
1595 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1596 memcpy(frame->da, il_bcast_addr, ETH_ALEN);
1597 memcpy(frame->sa, ta, ETH_ALEN);
1598 memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
1599 frame->seq_ctrl = 0;
1604 pos = &frame->u.probe_req.variable[0];
1606 /* fill in our indirect SSID IE */
1610 *pos++ = WLAN_EID_SSID;
1615 if (WARN_ON(left < ie_len))
1618 if (ies && ie_len) {
1619 memcpy(pos, ies, ie_len);
1625 EXPORT_SYMBOL(il_fill_probe_req);
1628 il_bg_abort_scan(struct work_struct *work)
1630 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1632 D_SCAN("Abort scan work\n");
1634 /* We keep scan_check work queued in case when firmware will not
1635 * report back scan completed notification */
1636 mutex_lock(&il->mutex);
1637 il_scan_cancel_timeout(il, 200);
1638 mutex_unlock(&il->mutex);
1642 il_bg_scan_completed(struct work_struct *work)
1644 struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1647 D_SCAN("Completed scan.\n");
1649 cancel_delayed_work(&il->scan_check);
1651 mutex_lock(&il->mutex);
1653 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1655 D_SCAN("Aborted scan completed.\n");
1657 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1658 D_SCAN("Scan already completed.\n");
1662 il_complete_scan(il, aborted);
1665 /* Can we still talk to firmware ? */
1666 if (!il_is_ready_rf(il))
1670 * We do not commit power settings while scan is pending,
1671 * do it now if the settings changed.
1673 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1674 il_set_tx_power(il, il->tx_power_next, false);
1676 il->cfg->ops->utils->post_scan(il);
1679 mutex_unlock(&il->mutex);
1683 il_setup_scan_deferred_work(struct il_priv *il)
1685 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1686 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1687 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1689 EXPORT_SYMBOL(il_setup_scan_deferred_work);
1692 il_cancel_scan_deferred_work(struct il_priv *il)
1694 cancel_work_sync(&il->abort_scan);
1695 cancel_work_sync(&il->scan_completed);
1697 if (cancel_delayed_work_sync(&il->scan_check)) {
1698 mutex_lock(&il->mutex);
1699 il_force_scan_end(il);
1700 mutex_unlock(&il->mutex);
1703 EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1705 /* il->sta_lock must be held */
1707 il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1710 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1711 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
1712 sta_id, il->stations[sta_id].sta.sta.addr);
1714 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1715 D_ASSOC("STA id %u addr %pM already present"
1716 " in uCode (according to driver)\n", sta_id,
1717 il->stations[sta_id].sta.sta.addr);
1719 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1720 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
1721 il->stations[sta_id].sta.sta.addr);
1726 il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1727 struct il_rx_pkt *pkt, bool sync)
1729 u8 sta_id = addsta->sta.sta_id;
1730 unsigned long flags;
1733 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1734 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
1738 D_INFO("Processing response for adding station %u\n", sta_id);
1740 spin_lock_irqsave(&il->sta_lock, flags);
1742 switch (pkt->u.add_sta.status) {
1743 case ADD_STA_SUCCESS_MSK:
1744 D_INFO("C_ADD_STA PASSED\n");
1745 il_sta_ucode_activate(il, sta_id);
1748 case ADD_STA_NO_ROOM_IN_TBL:
1749 IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
1751 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1752 IL_ERR("Adding station %d failed, no block ack resource.\n",
1755 case ADD_STA_MODIFY_NON_EXIST_STA:
1756 IL_ERR("Attempting to modify non-existing station %d\n",
1760 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
1764 D_INFO("%s station id %u addr %pM\n",
1765 il->stations[sta_id].sta.mode ==
1766 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
1767 il->stations[sta_id].sta.sta.addr);
1770 * XXX: The MAC address in the command buffer is often changed from
1771 * the original sent to the device. That is, the MAC address
1772 * written to the command buffer often is not the same MAC address
1773 * read from the command buffer when the command returns. This
1774 * issue has not yet been resolved and this debugging is left to
1775 * observe the problem.
1777 D_INFO("%s station according to cmd buffer %pM\n",
1778 il->stations[sta_id].sta.mode ==
1779 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
1780 spin_unlock_irqrestore(&il->sta_lock, flags);
1786 il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1787 struct il_rx_pkt *pkt)
1789 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
1791 il_process_add_sta_resp(il, addsta, pkt, false);
1796 il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1798 struct il_rx_pkt *pkt = NULL;
1800 u8 data[sizeof(*sta)];
1801 struct il_host_cmd cmd = {
1806 u8 sta_id __maybe_unused = sta->sta.sta_id;
1808 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
1809 flags & CMD_ASYNC ? "a" : "");
1811 if (flags & CMD_ASYNC)
1812 cmd.callback = il_add_sta_callback;
1814 cmd.flags |= CMD_WANT_SKB;
1818 cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
1819 ret = il_send_cmd(il, &cmd);
1821 if (ret || (flags & CMD_ASYNC))
1825 pkt = (struct il_rx_pkt *)cmd.reply_page;
1826 ret = il_process_add_sta_resp(il, sta, pkt, true);
1828 il_free_pages(il, cmd.reply_page);
1832 EXPORT_SYMBOL(il_send_add_sta);
1835 il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
1836 struct il_rxon_context *ctx)
1838 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
1842 if (!sta || !sta_ht_inf->ht_supported)
1845 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
1846 D_ASSOC("spatial multiplexing power save mode: %s\n",
1847 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
1848 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
1851 sta_flags = il->stations[idx].sta.station_flags;
1853 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1855 switch (mimo_ps_mode) {
1856 case WLAN_HT_CAP_SM_PS_STATIC:
1857 sta_flags |= STA_FLG_MIMO_DIS_MSK;
1859 case WLAN_HT_CAP_SM_PS_DYNAMIC:
1860 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1862 case WLAN_HT_CAP_SM_PS_DISABLED:
1865 IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
1870 cpu_to_le32((u32) sta_ht_inf->
1871 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1874 cpu_to_le32((u32) sta_ht_inf->
1875 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1877 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
1878 sta_flags |= STA_FLG_HT40_EN_MSK;
1880 sta_flags &= ~STA_FLG_HT40_EN_MSK;
1882 il->stations[idx].sta.station_flags = sta_flags;
1888 * il_prep_station - Prepare station information for addition
1890 * should be called with sta_lock held
1893 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
1894 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
1896 struct il_station_entry *station;
1898 u8 sta_id = IL_INVALID_STATION;
1903 else if (is_broadcast_ether_addr(addr))
1904 sta_id = il->hw_params.bcast_id;
1906 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1907 if (!compare_ether_addr
1908 (il->stations[i].sta.sta.addr, addr)) {
1913 if (!il->stations[i].used &&
1914 sta_id == IL_INVALID_STATION)
1919 * These two conditions have the same outcome, but keep them
1922 if (unlikely(sta_id == IL_INVALID_STATION))
1926 * uCode is not able to deal with multiple requests to add a
1927 * station. Keep track if one is in progress so that we do not send
1930 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1931 D_INFO("STA %d already in process of being added.\n", sta_id);
1935 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1936 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1937 !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
1938 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1943 station = &il->stations[sta_id];
1944 station->used = IL_STA_DRIVER_ACTIVE;
1945 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
1948 /* Set up the C_ADD_STA command to send to device */
1949 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1950 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1951 station->sta.mode = 0;
1952 station->sta.sta.sta_id = sta_id;
1953 station->sta.station_flags = 0;
1956 struct il_station_priv_common *sta_priv;
1958 sta_priv = (void *)sta->drv_priv;
1959 sta_priv->ctx = ctx;
1963 * OK to call unconditionally, since local stations (IBSS BSSID
1964 * STA and broadcast STA) pass in a NULL sta, and mac80211
1965 * doesn't allow HT IBSS.
1967 il_set_ht_add_station(il, sta_id, sta, ctx);
1970 rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
1971 /* Turn on both antennas for the station... */
1972 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
1977 EXPORT_SYMBOL_GPL(il_prep_station);
1979 #define STA_WAIT_TIMEOUT (HZ/2)
1982 * il_add_station_common -
1985 il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
1986 const u8 *addr, bool is_ap, struct ieee80211_sta *sta,
1989 unsigned long flags_spin;
1992 struct il_addsta_cmd sta_cmd;
1995 spin_lock_irqsave(&il->sta_lock, flags_spin);
1996 sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
1997 if (sta_id == IL_INVALID_STATION) {
1998 IL_ERR("Unable to prepare station %pM for addition\n", addr);
1999 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2004 * uCode is not able to deal with multiple requests to add a
2005 * station. Keep track if one is in progress so that we do not send
2008 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
2009 D_INFO("STA %d already in process of being added.\n", sta_id);
2010 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2014 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
2015 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2016 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
2018 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2022 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
2023 memcpy(&sta_cmd, &il->stations[sta_id].sta,
2024 sizeof(struct il_addsta_cmd));
2025 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2027 /* Add station to device's station table */
2028 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2030 spin_lock_irqsave(&il->sta_lock, flags_spin);
2031 IL_ERR("Adding station %pM failed.\n",
2032 il->stations[sta_id].sta.sta.addr);
2033 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2034 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2035 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2040 EXPORT_SYMBOL(il_add_station_common);
2043 * il_sta_ucode_deactivate - deactivate ucode status for a station
2045 * il->sta_lock must be held
2048 il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
2050 /* Ucode must be active and driver must be non active */
2051 if ((il->stations[sta_id].
2052 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
2053 IL_STA_UCODE_ACTIVE)
2054 IL_ERR("removed non active STA %u\n", sta_id);
2056 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
2058 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
2059 D_ASSOC("Removed STA %u\n", sta_id);
2063 il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
2066 struct il_rx_pkt *pkt;
2069 unsigned long flags_spin;
2070 struct il_rem_sta_cmd rm_sta_cmd;
2072 struct il_host_cmd cmd = {
2074 .len = sizeof(struct il_rem_sta_cmd),
2076 .data = &rm_sta_cmd,
2079 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
2080 rm_sta_cmd.num_sta = 1;
2081 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
2083 cmd.flags |= CMD_WANT_SKB;
2085 ret = il_send_cmd(il, &cmd);
2090 pkt = (struct il_rx_pkt *)cmd.reply_page;
2091 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
2092 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
2097 switch (pkt->u.rem_sta.status) {
2098 case REM_STA_SUCCESS_MSK:
2100 spin_lock_irqsave(&il->sta_lock, flags_spin);
2101 il_sta_ucode_deactivate(il, sta_id);
2102 spin_unlock_irqrestore(&il->sta_lock,
2105 D_ASSOC("C_REM_STA PASSED\n");
2109 IL_ERR("C_REM_STA failed\n");
2113 il_free_pages(il, cmd.reply_page);
2119 * il_remove_station - Remove driver's knowledge of station.
2122 il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
2124 unsigned long flags;
2126 if (!il_is_ready(il)) {
2127 D_INFO("Unable to remove station %pM, device not ready.\n",
2130 * It is typical for stations to be removed when we are
2131 * going down. Return success since device will be down
2137 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
2139 if (WARN_ON(sta_id == IL_INVALID_STATION))
2142 spin_lock_irqsave(&il->sta_lock, flags);
2144 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2145 D_INFO("Removing %pM but non DRIVER active\n", addr);
2149 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
2150 D_INFO("Removing %pM but non UCODE active\n", addr);
2154 if (il->stations[sta_id].used & IL_STA_LOCAL) {
2155 kfree(il->stations[sta_id].lq);
2156 il->stations[sta_id].lq = NULL;
2159 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2163 BUG_ON(il->num_stations < 0);
2165 spin_unlock_irqrestore(&il->sta_lock, flags);
2167 return il_send_remove_station(il, addr, sta_id, false);
2169 spin_unlock_irqrestore(&il->sta_lock, flags);
2172 EXPORT_SYMBOL_GPL(il_remove_station);
2175 * il_clear_ucode_stations - clear ucode station table bits
2177 * This function clears all the bits in the driver indicating
2178 * which stations are active in the ucode. Call when something
2179 * other than explicit station management would cause this in
2180 * the ucode, e.g. unassociated RXON.
2183 il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
2186 unsigned long flags_spin;
2187 bool cleared = false;
2189 D_INFO("Clearing ucode stations in driver\n");
2191 spin_lock_irqsave(&il->sta_lock, flags_spin);
2192 for (i = 0; i < il->hw_params.max_stations; i++) {
2193 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2194 D_INFO("Clearing ucode active for station %d\n", i);
2195 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2199 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2202 D_INFO("No active stations found to be cleared\n");
2204 EXPORT_SYMBOL(il_clear_ucode_stations);
2207 * il_restore_stations() - Restore driver known stations to device
2209 * All stations considered active by driver, but not present in ucode, is
2215 il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
2217 struct il_addsta_cmd sta_cmd;
2218 struct il_link_quality_cmd lq;
2219 unsigned long flags_spin;
2225 if (!il_is_ready(il)) {
2226 D_INFO("Not ready yet, not restoring any stations.\n");
2230 D_ASSOC("Restoring all known stations ... start.\n");
2231 spin_lock_irqsave(&il->sta_lock, flags_spin);
2232 for (i = 0; i < il->hw_params.max_stations; i++) {
2233 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2234 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2235 D_ASSOC("Restoring sta %pM\n",
2236 il->stations[i].sta.sta.addr);
2237 il->stations[i].sta.mode = 0;
2238 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2243 for (i = 0; i < il->hw_params.max_stations; i++) {
2244 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2245 memcpy(&sta_cmd, &il->stations[i].sta,
2246 sizeof(struct il_addsta_cmd));
2248 if (il->stations[i].lq) {
2249 memcpy(&lq, il->stations[i].lq,
2250 sizeof(struct il_link_quality_cmd));
2253 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2254 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2256 spin_lock_irqsave(&il->sta_lock, flags_spin);
2257 IL_ERR("Adding station %pM failed.\n",
2258 il->stations[i].sta.sta.addr);
2259 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2260 il->stations[i].used &=
2261 ~IL_STA_UCODE_INPROGRESS;
2262 spin_unlock_irqrestore(&il->sta_lock,
2266 * Rate scaling has already been initialized, send
2267 * current LQ command
2270 il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true);
2271 spin_lock_irqsave(&il->sta_lock, flags_spin);
2272 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2276 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2278 D_INFO("Restoring all known stations"
2279 " .... no stations to be restored.\n");
2281 D_INFO("Restoring all known stations" " .... complete.\n");
2283 EXPORT_SYMBOL(il_restore_stations);
2286 il_get_free_ucode_key_idx(struct il_priv *il)
2290 for (i = 0; i < il->sta_key_max_num; i++)
2291 if (!test_and_set_bit(i, &il->ucode_key_table))
2294 return WEP_INVALID_OFFSET;
2296 EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2299 il_dealloc_bcast_stations(struct il_priv *il)
2301 unsigned long flags;
2304 spin_lock_irqsave(&il->sta_lock, flags);
2305 for (i = 0; i < il->hw_params.max_stations; i++) {
2306 if (!(il->stations[i].used & IL_STA_BCAST))
2309 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2311 BUG_ON(il->num_stations < 0);
2312 kfree(il->stations[i].lq);
2313 il->stations[i].lq = NULL;
2315 spin_unlock_irqrestore(&il->sta_lock, flags);
2317 EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2319 #ifdef CONFIG_IWLEGACY_DEBUG
2321 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2324 D_RATE("lq station id 0x%x\n", lq->sta_id);
2325 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
2326 lq->general_params.dual_stream_ant_msk);
2328 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2329 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
2333 il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2339 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
2341 * It sometimes happens when a HT rate has been in use and we
2342 * loose connectivity with AP then mac80211 will first tell us that the
2343 * current channel is not HT anymore before removing the station. In such a
2344 * scenario the RXON flags will be updated to indicate we are not
2345 * communicating HT anymore, but the LQ command may still contain HT rates.
2346 * Test for this to prevent driver from sending LQ command between the time
2347 * RXON flags are updated and when LQ command is updated.
2350 il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
2351 struct il_link_quality_cmd *lq)
2355 if (ctx->ht.enabled)
2358 D_INFO("Channel %u is not an HT channel\n", il->active.channel);
2359 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2360 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
2361 D_INFO("idx %d of LQ expects HT channel\n", i);
2369 * il_send_lq_cmd() - Send link quality command
2370 * @init: This command is sent as part of station initialization right
2371 * after station has been added.
2373 * The link quality command is sent as the last step of station creation.
2374 * This is the special case in which init is set and we call a callback in
2375 * this case to clear the state indicating that station creation is in
2379 il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
2380 struct il_link_quality_cmd *lq, u8 flags, bool init)
2383 unsigned long flags_spin;
2385 struct il_host_cmd cmd = {
2386 .id = C_TX_LINK_QUALITY_CMD,
2387 .len = sizeof(struct il_link_quality_cmd),
2392 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2395 spin_lock_irqsave(&il->sta_lock, flags_spin);
2396 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2397 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2400 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2402 il_dump_lq_cmd(il, lq);
2403 BUG_ON(init && (cmd.flags & CMD_ASYNC));
2405 if (il_is_lq_table_valid(il, ctx, lq))
2406 ret = il_send_cmd(il, &cmd);
2410 if (cmd.flags & CMD_ASYNC)
2414 D_INFO("init LQ command complete,"
2415 " clearing sta addition status for sta %d\n",
2417 spin_lock_irqsave(&il->sta_lock, flags_spin);
2418 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2419 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2423 EXPORT_SYMBOL(il_send_lq_cmd);
2426 il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2427 struct ieee80211_sta *sta)
2429 struct il_priv *il = hw->priv;
2430 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2433 D_INFO("received request to remove station %pM\n", sta->addr);
2434 mutex_lock(&il->mutex);
2435 D_INFO("proceeding to remove station %pM\n", sta->addr);
2436 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2438 IL_ERR("Error removing station %pM\n", sta->addr);
2439 mutex_unlock(&il->mutex);
2442 EXPORT_SYMBOL(il_mac_sta_remove);
2444 /************************** RX-FUNCTIONS ****************************/
2446 * Rx theory of operation
2448 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
2449 * each of which point to Receive Buffers to be filled by the NIC. These get
2450 * used not only for Rx frames, but for any command response or notification
2451 * from the NIC. The driver and NIC manage the Rx buffers by means
2452 * of idxes into the circular buffer.
2455 * The host/firmware share two idx registers for managing the Rx buffers.
2457 * The READ idx maps to the first position that the firmware may be writing
2458 * to -- the driver can read up to (but not including) this position and get
2460 * The READ idx is managed by the firmware once the card is enabled.
2462 * The WRITE idx maps to the last position the driver has read from -- the
2463 * position preceding WRITE is the last slot the firmware can place a packet.
2465 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
2468 * During initialization, the host sets up the READ queue position to the first
2469 * IDX position, and WRITE to the last (READ - 1 wrapped)
2471 * When the firmware places a packet in a buffer, it will advance the READ idx
2472 * and fire the RX interrupt. The driver can then query the READ idx and
2473 * process as many packets as possible, moving the WRITE idx forward as it
2474 * resets the Rx queue buffers with new memory.
2476 * The management in the driver is as follows:
2477 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
2478 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
2479 * to replenish the iwl->rxq->rx_free.
2480 * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
2481 * iwl->rxq is replenished and the READ IDX is updated (updating the
2482 * 'processed' and 'read' driver idxes as well)
2483 * + A received packet is processed and handed to the kernel network stack,
2484 * detached from the iwl->rxq. The driver 'processed' idx is updated.
2485 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2486 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2487 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
2488 * were enough free buffers and RX_STALLED is set it is cleared.
2493 * il_rx_queue_alloc() Allocates rx_free
2494 * il_rx_replenish() Replenishes rx_free list from rx_used, and calls
2495 * il_rx_queue_restock
2496 * il_rx_queue_restock() Moves available buffers from rx_free into Rx
2497 * queue, updates firmware pointers, and updates
2498 * the WRITE idx. If insufficient rx_free buffers
2499 * are available, schedules il_rx_replenish
2501 * -- enable interrupts --
2502 * ISR - il_rx() Detach il_rx_bufs from pool up to the
2503 * READ IDX, detaching the SKB from the pool.
2504 * Moves the packet buffer from queue to rx_used.
2505 * Calls il_rx_queue_restock to refill any empty
2512 * il_rx_queue_space - Return number of free slots available in queue.
2515 il_rx_queue_space(const struct il_rx_queue *q)
2517 int s = q->read - q->write;
2520 /* keep some buffer to not confuse full and empty queue */
2526 EXPORT_SYMBOL(il_rx_queue_space);
2529 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
2532 il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2534 unsigned long flags;
2535 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2538 spin_lock_irqsave(&q->lock, flags);
2540 if (q->need_update == 0)
2543 /* If power-saving is in use, make sure device is awake */
2544 if (test_bit(S_POWER_PMI, &il->status)) {
2545 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2547 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2548 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
2550 il_set_bit(il, CSR_GP_CNTRL,
2551 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2555 q->write_actual = (q->write & ~0x7);
2556 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2558 /* Else device is assumed to be awake */
2560 /* Device expects a multiple of 8 */
2561 q->write_actual = (q->write & ~0x7);
2562 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2568 spin_unlock_irqrestore(&q->lock, flags);
2570 EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2573 il_rx_queue_alloc(struct il_priv *il)
2575 struct il_rx_queue *rxq = &il->rxq;
2576 struct device *dev = &il->pci_dev->dev;
2579 spin_lock_init(&rxq->lock);
2580 INIT_LIST_HEAD(&rxq->rx_free);
2581 INIT_LIST_HEAD(&rxq->rx_used);
2583 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
2585 dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2591 dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2592 &rxq->rb_stts_dma, GFP_KERNEL);
2596 /* Fill the rx_used queue with _all_ of the Rx buffers */
2597 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2598 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2600 /* Set us so that we have processed and used all buffers, but have
2601 * not restocked the Rx queue with fresh buffers */
2602 rxq->read = rxq->write = 0;
2603 rxq->write_actual = 0;
2604 rxq->free_count = 0;
2605 rxq->need_update = 0;
2609 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2614 EXPORT_SYMBOL(il_rx_queue_alloc);
2617 il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2619 struct il_rx_pkt *pkt = rxb_addr(rxb);
2620 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2622 if (!report->state) {
2623 D_11H("Spectrum Measure Notification: Start\n");
2627 memcpy(&il->measure_report, report, sizeof(*report));
2628 il->measurement_status |= MEASUREMENT_READY;
2630 EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2633 * returns non-zero if packet should be dropped
2636 il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2637 u32 decrypt_res, struct ieee80211_rx_status *stats)
2639 u16 fc = le16_to_cpu(hdr->frame_control);
2642 * All contexts have the same setting here due to it being
2643 * a module parameter, so OK to check any context.
2645 if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2648 if (!(fc & IEEE80211_FCTL_PROTECTED))
2651 D_RX("decrypt_res:0x%x\n", decrypt_res);
2652 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2653 case RX_RES_STATUS_SEC_TYPE_TKIP:
2654 /* The uCode has got a bad phase 1 Key, pushes the packet.
2655 * Decryption will be done in SW. */
2656 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2657 RX_RES_STATUS_BAD_KEY_TTAK)
2660 case RX_RES_STATUS_SEC_TYPE_WEP:
2661 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2662 RX_RES_STATUS_BAD_ICV_MIC) {
2663 /* bad ICV, the packet is destroyed since the
2664 * decryption is inplace, drop it */
2665 D_RX("Packet destroyed\n");
2668 case RX_RES_STATUS_SEC_TYPE_CCMP:
2669 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2670 RX_RES_STATUS_DECRYPT_OK) {
2671 D_RX("hw decrypt successfully!!!\n");
2672 stats->flag |= RX_FLAG_DECRYPTED;
2681 EXPORT_SYMBOL(il_set_decrypted_flag);
2684 * il_txq_update_write_ptr - Send new write idx to hardware
2687 il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2690 int txq_id = txq->q.id;
2692 if (txq->need_update == 0)
2695 /* if we're trying to save power */
2696 if (test_bit(S_POWER_PMI, &il->status)) {
2697 /* wake up nic if it's powered down ...
2698 * uCode will wake up, and interrupt us again, so next
2699 * time we'll skip this part. */
2700 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2702 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2703 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
2705 il_set_bit(il, CSR_GP_CNTRL,
2706 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2710 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2713 * else not in power-save mode,
2714 * uCode will never sleep when we're
2715 * trying to tx (during RFKILL, we're not trying to tx).
2718 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2719 txq->need_update = 0;
2721 EXPORT_SYMBOL(il_txq_update_write_ptr);
2724 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
2727 il_tx_queue_unmap(struct il_priv *il, int txq_id)
2729 struct il_tx_queue *txq = &il->txq[txq_id];
2730 struct il_queue *q = &txq->q;
2735 while (q->write_ptr != q->read_ptr) {
2736 il->cfg->ops->lib->txq_free_tfd(il, txq);
2737 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2740 EXPORT_SYMBOL(il_tx_queue_unmap);
2743 * il_tx_queue_free - Deallocate DMA queue.
2744 * @txq: Transmit queue to deallocate.
2746 * Empty queue by removing and destroying all BD's.
2748 * 0-fill, but do not free "txq" descriptor structure.
2751 il_tx_queue_free(struct il_priv *il, int txq_id)
2753 struct il_tx_queue *txq = &il->txq[txq_id];
2754 struct device *dev = &il->pci_dev->dev;
2757 il_tx_queue_unmap(il, txq_id);
2759 /* De-alloc array of command/tx buffers */
2760 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2763 /* De-alloc circular buffer of TFDs */
2765 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2766 txq->tfds, txq->q.dma_addr);
2768 /* De-alloc array of per-TFD driver data */
2772 /* deallocate arrays */
2778 /* 0-fill queue descriptor structure */
2779 memset(txq, 0, sizeof(*txq));
2781 EXPORT_SYMBOL(il_tx_queue_free);
2784 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
2787 il_cmd_queue_unmap(struct il_priv *il)
2789 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2790 struct il_queue *q = &txq->q;
2796 while (q->read_ptr != q->write_ptr) {
2797 i = il_get_cmd_idx(q, q->read_ptr, 0);
2799 if (txq->meta[i].flags & CMD_MAPPED) {
2800 pci_unmap_single(il->pci_dev,
2801 dma_unmap_addr(&txq->meta[i], mapping),
2802 dma_unmap_len(&txq->meta[i], len),
2803 PCI_DMA_BIDIRECTIONAL);
2804 txq->meta[i].flags = 0;
2807 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2811 if (txq->meta[i].flags & CMD_MAPPED) {
2812 pci_unmap_single(il->pci_dev,
2813 dma_unmap_addr(&txq->meta[i], mapping),
2814 dma_unmap_len(&txq->meta[i], len),
2815 PCI_DMA_BIDIRECTIONAL);
2816 txq->meta[i].flags = 0;
2819 EXPORT_SYMBOL(il_cmd_queue_unmap);
2822 * il_cmd_queue_free - Deallocate DMA queue.
2823 * @txq: Transmit queue to deallocate.
2825 * Empty queue by removing and destroying all BD's.
2827 * 0-fill, but do not free "txq" descriptor structure.
2830 il_cmd_queue_free(struct il_priv *il)
2832 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2833 struct device *dev = &il->pci_dev->dev;
2836 il_cmd_queue_unmap(il);
2838 /* De-alloc array of command/tx buffers */
2839 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2842 /* De-alloc circular buffer of TFDs */
2844 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2845 txq->tfds, txq->q.dma_addr);
2847 /* deallocate arrays */
2853 /* 0-fill queue descriptor structure */
2854 memset(txq, 0, sizeof(*txq));
2856 EXPORT_SYMBOL(il_cmd_queue_free);
2858 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
2861 * Theory of operation
2863 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
2864 * of buffer descriptors, each of which points to one or more data buffers for
2865 * the device to read from or fill. Driver and device exchange status of each
2866 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
2867 * entries in each circular buffer, to protect against confusing empty and full
2870 * The device reads or writes the data in the queues via the device's several
2871 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
2873 * For Tx queue, there are low mark and high mark limits. If, after queuing
2874 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2875 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2878 * See more detailed info in 4965.h.
2879 ***************************************************/
2882 il_queue_space(const struct il_queue *q)
2884 int s = q->read_ptr - q->write_ptr;
2886 if (q->read_ptr > q->write_ptr)
2891 /* keep some reserve to not confuse empty and full situations */
2897 EXPORT_SYMBOL(il_queue_space);
2901 * il_queue_init - Initialize queue's high/low-water and read/write idxes
2904 il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num,
2908 q->n_win = slots_num;
2911 /* count must be power-of-two size, otherwise il_queue_inc_wrap
2912 * and il_queue_dec_wrap are broken. */
2913 BUG_ON(!is_power_of_2(count));
2915 /* slots_num must be power-of-two size, otherwise
2916 * il_get_cmd_idx is broken. */
2917 BUG_ON(!is_power_of_2(slots_num));
2919 q->low_mark = q->n_win / 4;
2920 if (q->low_mark < 4)
2923 q->high_mark = q->n_win / 8;
2924 if (q->high_mark < 2)
2927 q->write_ptr = q->read_ptr = 0;
2933 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
2936 il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2938 struct device *dev = &il->pci_dev->dev;
2939 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2941 /* Driver ilate data, only for Tx (not command) queues,
2942 * not shared with device. */
2943 if (id != il->cmd_queue) {
2944 txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]),
2947 IL_ERR("kmalloc for auxiliary BD "
2948 "structures failed\n");
2955 /* Circular buffer of transmit frame descriptors (TFDs),
2956 * shared with device */
2958 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2960 IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
2975 * il_tx_queue_init - Allocate and initialize one tx/cmd queue
2978 il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
2983 int actual_slots = slots_num;
2986 * Alloc buffer array for commands (Tx or other types of commands).
2987 * For the command queue (#4/#9), allocate command space + one big
2988 * command for scan, since scan command is very huge; the system will
2989 * not have two scans at the same time, so only one is needed.
2990 * For normal Tx queues (all other queues), no super-size command
2993 if (txq_id == il->cmd_queue)
2997 kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
2999 kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
3001 if (!txq->meta || !txq->cmd)
3002 goto out_free_arrays;
3004 len = sizeof(struct il_device_cmd);
3005 for (i = 0; i < actual_slots; i++) {
3006 /* only happens for cmd queue */
3008 len = IL_MAX_CMD_SIZE;
3010 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
3015 /* Alloc driver data array and TFD circular buffer */
3016 ret = il_tx_queue_alloc(il, txq, txq_id);
3020 txq->need_update = 0;
3023 * For the default queues 0-3, set up the swq_id
3024 * already -- all others need to get one later
3025 * (if they need one at all).
3028 il_set_swq_id(txq, txq_id, txq_id);
3030 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
3031 * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
3032 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
3034 /* Initialize queue's high/low-water marks, and head/tail idxes */
3035 il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
3037 /* Tell device where to find queue */
3038 il->cfg->ops->lib->txq_init(il, txq);
3042 for (i = 0; i < actual_slots; i++)
3050 EXPORT_SYMBOL(il_tx_queue_init);
3053 il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
3056 int actual_slots = slots_num;
3058 if (txq_id == il->cmd_queue)
3061 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
3063 txq->need_update = 0;
3065 /* Initialize queue's high/low-water marks, and head/tail idxes */
3066 il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
3068 /* Tell device where to find queue */
3069 il->cfg->ops->lib->txq_init(il, txq);
3071 EXPORT_SYMBOL(il_tx_queue_reset);
3073 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
3076 * il_enqueue_hcmd - enqueue a uCode command
3077 * @il: device ilate data point
3078 * @cmd: a point to the ucode command structure
3080 * The function returns < 0 values to indicate the operation is
3081 * failed. On success, it turns the idx (> 0) of command in the
3085 il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
3087 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3088 struct il_queue *q = &txq->q;
3089 struct il_device_cmd *out_cmd;
3090 struct il_cmd_meta *out_meta;
3091 dma_addr_t phys_addr;
3092 unsigned long flags;
3097 cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
3098 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
3100 /* If any of the command structures end up being larger than
3101 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
3102 * we will need to increase the size of the TFD entries
3103 * Also, check to see if command buffer should not exceed the size
3104 * of device_cmd and max_cmd_size. */
3105 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
3106 !(cmd->flags & CMD_SIZE_HUGE));
3107 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
3109 if (il_is_rfkill(il) || il_is_ctkill(il)) {
3110 IL_WARN("Not sending command - %s KILL\n",
3111 il_is_rfkill(il) ? "RF" : "CT");
3115 spin_lock_irqsave(&il->hcmd_lock, flags);
3117 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
3118 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3120 IL_ERR("Restarting adapter due to command queue full\n");
3121 queue_work(il->workqueue, &il->restart);
3125 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
3126 out_cmd = txq->cmd[idx];
3127 out_meta = &txq->meta[idx];
3129 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
3130 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3134 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
3135 out_meta->flags = cmd->flags | CMD_MAPPED;
3136 if (cmd->flags & CMD_WANT_SKB)
3137 out_meta->source = cmd;
3138 if (cmd->flags & CMD_ASYNC)
3139 out_meta->callback = cmd->callback;
3141 out_cmd->hdr.cmd = cmd->id;
3142 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
3144 /* At this point, the out_cmd now has all of the incoming cmd
3147 out_cmd->hdr.flags = 0;
3148 out_cmd->hdr.sequence =
3149 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
3150 if (cmd->flags & CMD_SIZE_HUGE)
3151 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
3152 len = sizeof(struct il_device_cmd);
3153 if (idx == TFD_CMD_SLOTS)
3154 len = IL_MAX_CMD_SIZE;
3156 #ifdef CONFIG_IWLEGACY_DEBUG
3157 switch (out_cmd->hdr.cmd) {
3158 case C_TX_LINK_QUALITY_CMD:
3160 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
3161 "%d bytes at %d[%d]:%d\n",
3162 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3163 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3164 q->write_ptr, idx, il->cmd_queue);
3167 D_HC("Sending command %s (#%x), seq: 0x%04X, "
3168 "%d bytes at %d[%d]:%d\n",
3169 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3170 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
3171 idx, il->cmd_queue);
3174 txq->need_update = 1;
3176 if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
3177 /* Set up entry in queue's byte count circular buffer */
3178 il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
3181 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
3182 PCI_DMA_BIDIRECTIONAL);
3183 dma_unmap_addr_set(out_meta, mapping, phys_addr);
3184 dma_unmap_len_set(out_meta, len, fix_size);
3186 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size,
3187 1, U32_PAD(cmd->len));
3189 /* Increment and update queue's write idx */
3190 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3191 il_txq_update_write_ptr(il, txq);
3193 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3198 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
3200 * When FW advances 'R' idx, all entries between old and new 'R' idx
3201 * need to be reclaimed. As result, some free space forms. If there is
3202 * enough free space (> low mark), wake the stack that feeds us.
3205 il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3207 struct il_tx_queue *txq = &il->txq[txq_id];
3208 struct il_queue *q = &txq->q;
3211 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3212 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3213 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
3214 q->write_ptr, q->read_ptr);
3218 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3219 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3222 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3223 q->write_ptr, q->read_ptr);
3224 queue_work(il->workqueue, &il->restart);
3231 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3232 * @rxb: Rx buffer to reclaim
3234 * If an Rx buffer has an async callback associated with it the callback
3235 * will be executed. The attached skb (if present) will only be freed
3236 * if the callback returns 1
3239 il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3241 struct il_rx_pkt *pkt = rxb_addr(rxb);
3242 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3243 int txq_id = SEQ_TO_QUEUE(sequence);
3244 int idx = SEQ_TO_IDX(sequence);
3246 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3247 struct il_device_cmd *cmd;
3248 struct il_cmd_meta *meta;
3249 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3250 unsigned long flags;
3252 /* If a Tx command is being handled and it isn't in the actual
3253 * command queue then there a command routing bug has been introduced
3254 * in the queue management code. */
3256 (txq_id != il->cmd_queue,
3257 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3258 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3259 il->txq[il->cmd_queue].q.write_ptr)) {
3260 il_print_hex_error(il, pkt, 32);
3264 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3265 cmd = txq->cmd[cmd_idx];
3266 meta = &txq->meta[cmd_idx];
3268 txq->time_stamp = jiffies;
3270 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
3271 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
3273 /* Input error checking is done when commands are added to queue. */
3274 if (meta->flags & CMD_WANT_SKB) {
3275 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3277 } else if (meta->callback)
3278 meta->callback(il, cmd, pkt);
3280 spin_lock_irqsave(&il->hcmd_lock, flags);
3282 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3284 if (!(meta->flags & CMD_ASYNC)) {
3285 clear_bit(S_HCMD_ACTIVE, &il->status);
3286 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3287 il_get_cmd_string(cmd->hdr.cmd));
3288 wake_up(&il->wait_command_queue);
3291 /* Mark as unmapped */
3294 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3296 EXPORT_SYMBOL(il_tx_cmd_complete);
3298 MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3299 MODULE_VERSION(IWLWIFI_VERSION);
3300 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3301 MODULE_LICENSE("GPL");
3304 * set bt_coex_active to true, uCode will do kill/defer
3305 * every time the priority line is asserted (BT is sending signals on the
3306 * priority line in the PCIx).
3307 * set bt_coex_active to false, uCode will ignore the BT activity and
3308 * perform the normal operation
3310 * User might experience transmit issue on some platform due to WiFi/BT
3311 * co-exist problem. The possible behaviors are:
3312 * Able to scan and finding all the available AP
3313 * Not able to associate with any AP
3314 * On those platforms, WiFi communication can be restored by set
3315 * "bt_coex_active" module parameter to "false"
3317 * default: bt_coex_active = true (BT_COEX_ENABLE)
3319 static bool bt_coex_active = true;
3320 module_param(bt_coex_active, bool, S_IRUGO);
3321 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3324 EXPORT_SYMBOL(il_debug_level);
3326 const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3327 EXPORT_SYMBOL(il_bcast_addr);
3329 /* This function both allocates and initializes hw and il. */
3330 struct ieee80211_hw *
3331 il_alloc_all(struct il_cfg *cfg)
3334 /* mac80211 allocates memory for this device instance, including
3335 * space for this driver's ilate structure */
3336 struct ieee80211_hw *hw;
3338 hw = ieee80211_alloc_hw(sizeof(struct il_priv),
3339 cfg->ops->ieee80211_ops);
3341 pr_err("%s: Can not allocate network device\n", cfg->name);
3351 EXPORT_SYMBOL(il_alloc_all);
3353 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
3354 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
3356 il_init_ht_hw_capab(const struct il_priv *il,
3357 struct ieee80211_sta_ht_cap *ht_info,
3358 enum ieee80211_band band)
3360 u16 max_bit_rate = 0;
3361 u8 rx_chains_num = il->hw_params.rx_chains_num;
3362 u8 tx_chains_num = il->hw_params.tx_chains_num;
3365 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3367 ht_info->ht_supported = true;
3369 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3370 max_bit_rate = MAX_BIT_RATE_20_MHZ;
3371 if (il->hw_params.ht40_channel & BIT(band)) {
3372 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3373 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3374 ht_info->mcs.rx_mask[4] = 0x01;
3375 max_bit_rate = MAX_BIT_RATE_40_MHZ;
3378 if (il->cfg->mod_params->amsdu_size_8K)
3379 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3381 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3382 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3384 ht_info->mcs.rx_mask[0] = 0xFF;
3385 if (rx_chains_num >= 2)
3386 ht_info->mcs.rx_mask[1] = 0xFF;
3387 if (rx_chains_num >= 3)
3388 ht_info->mcs.rx_mask[2] = 0xFF;
3390 /* Highest supported Rx data rate */
3391 max_bit_rate *= rx_chains_num;
3392 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3393 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3395 /* Tx MCS capabilities */
3396 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3397 if (tx_chains_num != rx_chains_num) {
3398 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3399 ht_info->mcs.tx_params |=
3401 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3406 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
3409 il_init_geos(struct il_priv *il)
3411 struct il_channel_info *ch;
3412 struct ieee80211_supported_band *sband;
3413 struct ieee80211_channel *channels;
3414 struct ieee80211_channel *geo_ch;
3415 struct ieee80211_rate *rates;
3417 s8 max_tx_power = 0;
3419 if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
3420 il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
3421 D_INFO("Geography modes already initialized.\n");
3422 set_bit(S_GEO_CONFIGURED, &il->status);
3427 kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
3433 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
3440 /* 5.2GHz channels start after the 2.4GHz channels */
3441 sband = &il->bands[IEEE80211_BAND_5GHZ];
3442 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3444 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3445 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3447 if (il->cfg->sku & IL_SKU_N)
3448 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
3450 sband = &il->bands[IEEE80211_BAND_2GHZ];
3451 sband->channels = channels;
3453 sband->bitrates = rates;
3454 sband->n_bitrates = RATE_COUNT_LEGACY;
3456 if (il->cfg->sku & IL_SKU_N)
3457 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
3459 il->ieee_channels = channels;
3460 il->ieee_rates = rates;
3462 for (i = 0; i < il->channel_count; i++) {
3463 ch = &il->channel_info[i];
3465 if (!il_is_channel_valid(ch))
3468 sband = &il->bands[ch->band];
3470 geo_ch = &sband->channels[sband->n_channels++];
3472 geo_ch->center_freq =
3473 ieee80211_channel_to_frequency(ch->channel, ch->band);
3474 geo_ch->max_power = ch->max_power_avg;
3475 geo_ch->max_antenna_gain = 0xff;
3476 geo_ch->hw_value = ch->channel;
3478 if (il_is_channel_valid(ch)) {
3479 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3480 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
3482 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3483 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
3485 if (ch->flags & EEPROM_CHANNEL_RADAR)
3486 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3488 geo_ch->flags |= ch->ht40_extension_channel;
3490 if (ch->max_power_avg > max_tx_power)
3491 max_tx_power = ch->max_power_avg;
3493 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3496 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
3497 geo_ch->center_freq,
3498 il_is_channel_a_band(ch) ? "5.2" : "2.4",
3500 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
3504 il->tx_power_device_lmt = max_tx_power;
3505 il->tx_power_user_lmt = max_tx_power;
3506 il->tx_power_next = max_tx_power;
3508 if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
3509 (il->cfg->sku & IL_SKU_A)) {
3510 IL_INFO("Incorrectly detected BG card as ABG. "
3511 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
3512 il->pci_dev->device, il->pci_dev->subsystem_device);
3513 il->cfg->sku &= ~IL_SKU_A;
3516 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3517 il->bands[IEEE80211_BAND_2GHZ].n_channels,
3518 il->bands[IEEE80211_BAND_5GHZ].n_channels);
3520 set_bit(S_GEO_CONFIGURED, &il->status);
3524 EXPORT_SYMBOL(il_init_geos);
3527 * il_free_geos - undo allocations in il_init_geos
3530 il_free_geos(struct il_priv *il)
3532 kfree(il->ieee_channels);
3533 kfree(il->ieee_rates);
3534 clear_bit(S_GEO_CONFIGURED, &il->status);
3536 EXPORT_SYMBOL(il_free_geos);
3539 il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
3540 u16 channel, u8 extension_chan_offset)
3542 const struct il_channel_info *ch_info;
3544 ch_info = il_get_channel_info(il, band, channel);
3545 if (!il_is_channel_valid(ch_info))
3548 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3550 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
3551 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3553 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
3559 il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
3560 struct ieee80211_sta_ht_cap *ht_cap)
3562 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
3566 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
3567 * the bit will not set if it is pure 40MHz case
3569 if (ht_cap && !ht_cap->ht_supported)
3572 #ifdef CONFIG_IWLEGACY_DEBUGFS
3573 if (il->disable_ht40)
3577 return il_is_channel_extension(il, il->band,
3578 le16_to_cpu(il->staging.channel),
3579 ctx->ht.extension_chan_offset);
3581 EXPORT_SYMBOL(il_is_ht40_tx_allowed);
3584 il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
3590 * If mac80211 hasn't given us a beacon interval, program
3591 * the default into the device.
3594 return DEFAULT_BEACON_INTERVAL;
3597 * If the beacon interval we obtained from the peer
3598 * is too large, we'll have to wake up more often
3599 * (and in IBSS case, we'll beacon too much)
3601 * For example, if max_beacon_val is 4096, and the
3602 * requested beacon interval is 7000, we'll have to
3603 * use 3500 to be able to wake up on the beacons.
3605 * This could badly influence beacon detection stats.
3608 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3609 new_val = beacon_val / beacon_factor;
3612 new_val = max_beacon_val;
3618 il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
3621 s32 interval_tm, rem;
3622 struct ieee80211_conf *conf = NULL;
3624 struct ieee80211_vif *vif = ctx->vif;
3626 conf = &il->hw->conf;
3628 lockdep_assert_held(&il->mutex);
3630 memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd));
3632 il->timing.timestamp = cpu_to_le64(il->timestamp);
3633 il->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3635 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3638 * TODO: For IBSS we need to get atim_win from mac80211,
3639 * for now just always use 0
3641 il->timing.atim_win = 0;
3644 il_adjust_beacon_interval(beacon_int,
3645 il->hw_params.max_beacon_itrvl *
3647 il->timing.beacon_interval = cpu_to_le16(beacon_int);
3649 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
3650 interval_tm = beacon_int * TIME_UNIT;
3651 rem = do_div(tsf, interval_tm);
3652 il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3654 il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3656 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
3657 le16_to_cpu(il->timing.beacon_interval),
3658 le32_to_cpu(il->timing.beacon_init_val),
3659 le16_to_cpu(il->timing.atim_win));
3661 return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing),
3664 EXPORT_SYMBOL(il_send_rxon_timing);
3667 il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
3670 struct il_rxon_cmd *rxon = &il->staging;
3673 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3675 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3678 EXPORT_SYMBOL(il_set_rxon_hwcrypto);
3680 /* validate RXON structure is valid */
3682 il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
3684 struct il_rxon_cmd *rxon = &il->staging;
3687 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3688 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
3689 IL_WARN("check 2.4G: wrong narrow\n");
3692 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
3693 IL_WARN("check 2.4G: wrong radar\n");
3697 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
3698 IL_WARN("check 5.2G: not short slot!\n");
3701 if (rxon->flags & RXON_FLG_CCK_MSK) {
3702 IL_WARN("check 5.2G: CCK!\n");
3706 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
3707 IL_WARN("mac/bssid mcast!\n");
3711 /* make sure basic rates 6Mbps and 1Mbps are supported */
3712 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3713 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
3714 IL_WARN("neither 1 nor 6 are basic\n");
3718 if (le16_to_cpu(rxon->assoc_id) > 2007) {
3719 IL_WARN("aid > 2007\n");
3723 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
3724 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
3725 IL_WARN("CCK and short slot\n");
3729 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
3730 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
3731 IL_WARN("CCK and auto detect");
3736 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
3737 RXON_FLG_TGG_PROTECT_MSK) {
3738 IL_WARN("TGg but no auto-detect\n");
3743 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
3746 IL_ERR("Invalid RXON\n");
3751 EXPORT_SYMBOL(il_check_rxon_cmd);
3754 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
3755 * @il: staging_rxon is compared to active_rxon
3757 * If the RXON structure is changing enough to require a new tune,
3758 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
3759 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
3762 il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
3764 const struct il_rxon_cmd *staging = &il->staging;
3765 const struct il_rxon_cmd *active = &il->active;
3769 D_INFO("need full RXON - " #cond "\n"); \
3773 #define CHK_NEQ(c1, c2) \
3774 if ((c1) != (c2)) { \
3775 D_INFO("need full RXON - " \
3776 #c1 " != " #c2 " - %d != %d\n", \
3781 /* These items are only settable from the full RXON command */
3782 CHK(!il_is_associated(il));
3783 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
3784 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
3785 CHK(compare_ether_addr
3786 (staging->wlap_bssid_addr, active->wlap_bssid_addr));
3787 CHK_NEQ(staging->dev_type, active->dev_type);
3788 CHK_NEQ(staging->channel, active->channel);
3789 CHK_NEQ(staging->air_propagation, active->air_propagation);
3790 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3791 active->ofdm_ht_single_stream_basic_rates);
3792 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3793 active->ofdm_ht_dual_stream_basic_rates);
3794 CHK_NEQ(staging->assoc_id, active->assoc_id);
3796 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
3797 * be updated with the RXON_ASSOC command -- however only some
3798 * flag transitions are allowed using RXON_ASSOC */
3800 /* Check if we are not switching bands */
3801 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3802 active->flags & RXON_FLG_BAND_24G_MSK);
3804 /* Check if we are switching association toggle */
3805 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3806 active->filter_flags & RXON_FILTER_ASSOC_MSK);
3813 EXPORT_SYMBOL(il_full_rxon_required);
3816 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
3819 * Assign the lowest rate -- should really get this from
3820 * the beacon skb from mac80211.
3822 if (il->staging.flags & RXON_FLG_BAND_24G_MSK)
3823 return RATE_1M_PLCP;
3825 return RATE_6M_PLCP;
3827 EXPORT_SYMBOL(il_get_lowest_plcp);
3830 _il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
3831 struct il_rxon_context *ctx)
3833 struct il_rxon_cmd *rxon = &il->staging;
3835 if (!ctx->ht.enabled) {
3837 ~(RXON_FLG_CHANNEL_MODE_MSK |
3838 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
3839 | RXON_FLG_HT_PROT_MSK);
3844 cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3846 /* Set up channel bandwidth:
3847 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
3848 /* clear the HT channel mode before set the mode */
3850 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3851 if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
3853 if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3854 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3855 /* Note: control channel is opposite of extension channel */
3856 switch (ctx->ht.extension_chan_offset) {
3857 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3859 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3861 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3862 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3866 /* Note: control channel is opposite of extension channel */
3867 switch (ctx->ht.extension_chan_offset) {
3868 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3870 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3871 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3873 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3874 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3875 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3877 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3879 /* channel location only valid if in Mixed mode */
3880 IL_ERR("invalid extension channel offset\n");
3885 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3888 if (il->cfg->ops->hcmd->set_rxon_chain)
3889 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
3891 D_ASSOC("rxon flags 0x%X operation mode :0x%X "
3892 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
3893 ctx->ht.protection, ctx->ht.extension_chan_offset);
3897 il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3899 _il_set_rxon_ht(il, ht_conf, &il->ctx);
3901 EXPORT_SYMBOL(il_set_rxon_ht);
3903 /* Return valid, unused, channel for a passive scan to reset the RF */
3905 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
3907 const struct il_channel_info *ch_info;
3912 if (band == IEEE80211_BAND_5GHZ) {
3914 max = il->channel_count;
3920 for (i = min; i < max; i++) {
3921 channel = il->channel_info[i].channel;
3922 if (channel == le16_to_cpu(il->staging.channel))
3925 ch_info = il_get_channel_info(il, band, channel);
3926 if (il_is_channel_valid(ch_info))
3932 EXPORT_SYMBOL(il_get_single_channel_number);
3935 * il_set_rxon_channel - Set the band and channel values in staging RXON
3936 * @ch: requested channel as a pointer to struct ieee80211_channel
3938 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
3939 * in the staging RXON flag structure based on the ch->band
3942 il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
3943 struct il_rxon_context *ctx)
3945 enum ieee80211_band band = ch->band;
3946 u16 channel = ch->hw_value;
3948 if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
3951 il->staging.channel = cpu_to_le16(channel);
3952 if (band == IEEE80211_BAND_5GHZ)
3953 il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3955 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3959 D_INFO("Staging channel set to %d [%d]\n", channel, band);
3963 EXPORT_SYMBOL(il_set_rxon_channel);
3966 il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
3967 enum ieee80211_band band, struct ieee80211_vif *vif)
3969 if (band == IEEE80211_BAND_5GHZ) {
3970 il->staging.flags &=
3971 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3973 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3975 /* Copied from il_post_associate() */
3976 if (vif && vif->bss_conf.use_short_slot)
3977 il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3979 il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3981 il->staging.flags |= RXON_FLG_BAND_24G_MSK;
3982 il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
3983 il->staging.flags &= ~RXON_FLG_CCK_MSK;
3986 EXPORT_SYMBOL(il_set_flags_for_band);
3989 * initialize rxon structure with default values from eeprom
3992 il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx)
3994 const struct il_channel_info *ch_info;
3996 memset(&il->staging, 0, sizeof(il->staging));
3999 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4001 switch (ctx->vif->type) {
4003 case NL80211_IFTYPE_STATION:
4004 il->staging.dev_type = RXON_DEV_TYPE_ESS;
4005 il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
4008 case NL80211_IFTYPE_ADHOC:
4009 il->staging.dev_type = RXON_DEV_TYPE_IBSS;
4010 il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
4011 il->staging.filter_flags =
4012 RXON_FILTER_BCON_AWARE_MSK |
4013 RXON_FILTER_ACCEPT_GRP_MSK;
4017 IL_ERR("Unsupported interface type %d\n",
4023 /* TODO: Figure out when short_preamble would be set and cache from
4025 if (!hw_to_local(il->hw)->short_preamble)
4026 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
4028 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
4032 il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel));
4035 ch_info = &il->channel_info[0];
4037 il->staging.channel = cpu_to_le16(ch_info->channel);
4038 il->band = ch_info->band;
4040 il_set_flags_for_band(il, ctx, il->band, ctx->vif);
4042 il->staging.ofdm_basic_rates =
4043 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4044 il->staging.cck_basic_rates =
4045 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4047 /* clear both MIX and PURE40 mode flag */
4048 il->staging.flags &=
4049 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
4051 memcpy(il->staging.node_addr, ctx->vif->addr, ETH_ALEN);
4053 il->staging.ofdm_ht_single_stream_basic_rates = 0xff;
4054 il->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
4056 EXPORT_SYMBOL(il_connection_init_rx_config);
4059 il_set_rate(struct il_priv *il)
4061 const struct ieee80211_supported_band *hw = NULL;
4062 struct ieee80211_rate *rate;
4065 hw = il_get_hw_mode(il, il->band);
4067 IL_ERR("Failed to set rate: unable to get hw mode\n");
4071 il->active_rate = 0;
4073 for (i = 0; i < hw->n_bitrates; i++) {
4074 rate = &(hw->bitrates[i]);
4075 if (rate->hw_value < RATE_COUNT_LEGACY)
4076 il->active_rate |= (1 << rate->hw_value);
4079 D_RATE("Set active_rate = %0x\n", il->active_rate);
4081 il->staging.cck_basic_rates =
4082 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
4084 il->staging.ofdm_basic_rates =
4085 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
4087 EXPORT_SYMBOL(il_set_rate);
4090 il_chswitch_done(struct il_priv *il, bool is_success)
4092 struct il_rxon_context *ctx = &il->ctx;
4094 if (test_bit(S_EXIT_PENDING, &il->status))
4097 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4098 ieee80211_chswitch_done(ctx->vif, is_success);
4100 EXPORT_SYMBOL(il_chswitch_done);
4103 il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
4105 struct il_rx_pkt *pkt = rxb_addr(rxb);
4106 struct il_csa_notification *csa = &(pkt->u.csa_notif);
4107 struct il_rxon_cmd *rxon = (void *)&il->active;
4109 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
4112 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
4113 rxon->channel = csa->channel;
4114 il->staging.channel = csa->channel;
4115 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
4116 il_chswitch_done(il, true);
4118 IL_ERR("CSA notif (fail) : channel %d\n",
4119 le16_to_cpu(csa->channel));
4120 il_chswitch_done(il, false);
4123 EXPORT_SYMBOL(il_hdl_csa);
4125 #ifdef CONFIG_IWLEGACY_DEBUG
4127 il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
4129 struct il_rxon_cmd *rxon = &il->staging;
4131 D_RADIO("RX CONFIG:\n");
4132 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4133 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4134 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4135 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
4136 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4137 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
4138 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4139 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
4140 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
4141 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4143 EXPORT_SYMBOL(il_print_rx_config_cmd);
4146 * il_irq_handle_error - called for HW or SW error interrupt from card
4149 il_irq_handle_error(struct il_priv *il)
4151 /* Set the FW error flag -- cleared on il_down */
4152 set_bit(S_FW_ERROR, &il->status);
4154 /* Cancel currently queued command. */
4155 clear_bit(S_HCMD_ACTIVE, &il->status);
4157 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4159 il->cfg->ops->lib->dump_nic_error_log(il);
4160 if (il->cfg->ops->lib->dump_fh)
4161 il->cfg->ops->lib->dump_fh(il, NULL, false);
4162 #ifdef CONFIG_IWLEGACY_DEBUG
4163 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4164 il_print_rx_config_cmd(il, &il->ctx);
4167 wake_up(&il->wait_command_queue);
4169 /* Keep the restart process from trying to send host
4170 * commands by clearing the INIT status bit */
4171 clear_bit(S_READY, &il->status);
4173 if (!test_bit(S_EXIT_PENDING, &il->status)) {
4174 IL_DBG(IL_DL_FW_ERRORS,
4175 "Restarting adapter due to uCode error.\n");
4177 if (il->cfg->mod_params->restart_fw)
4178 queue_work(il->workqueue, &il->restart);
4181 EXPORT_SYMBOL(il_irq_handle_error);
4184 il_apm_stop_master(struct il_priv *il)
4188 /* stop device's busmaster DMA activity */
4189 il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4192 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4193 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4195 IL_WARN("Master Disable Timed Out, 100 usec\n");
4197 D_INFO("stop master\n");
4203 il_apm_stop(struct il_priv *il)
4205 D_INFO("Stop card, put in low power state\n");
4207 /* Stop device's DMA activity */
4208 il_apm_stop_master(il);
4210 /* Reset the entire device */
4211 il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4216 * Clear "initialization complete" bit to move adapter from
4217 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
4219 il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4221 EXPORT_SYMBOL(il_apm_stop);
4224 * Start up NIC's basic functionality after it has been reset
4225 * (e.g. after platform boot, or shutdown via il_apm_stop())
4226 * NOTE: This does not load uCode nor start the embedded processor
4229 il_apm_init(struct il_priv *il)
4234 D_INFO("Init card's basic functions\n");
4237 * Use "set_bit" below rather than "write", to preserve any hardware
4238 * bits already set by default after reset.
4241 /* Disable L0S exit timer (platform NMI Work/Around) */
4242 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4243 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4246 * Disable L0s without affecting L1;
4247 * don't wait for ICH L0s (ICH bug W/A)
4249 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4250 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4252 /* Set FH wait threshold to maximum (HW error during stress W/A) */
4253 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4256 * Enable HAP INTA (interrupt from management bus) to
4257 * wake device's PCI Express link L1a -> L0s
4258 * NOTE: This is no-op for 3945 (non-existent bit)
4260 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4261 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4264 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
4265 * Check if BIOS (or OS) enabled L1-ASPM on this device.
4266 * If so (likely), disable L0S, so device moves directly L0->L1;
4267 * costs negligible amount of power savings.
4268 * If not (unlikely), enable L0S, so there is at least some
4269 * power savings, even without L1.
4271 if (il->cfg->base_params->set_l0s) {
4272 lctl = il_pcie_link_ctl(il);
4273 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
4274 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
4275 /* L1-ASPM enabled; disable(!) L0S */
4276 il_set_bit(il, CSR_GIO_REG,
4277 CSR_GIO_REG_VAL_L0S_ENABLED);
4278 D_POWER("L1 Enabled; Disabling L0S\n");
4280 /* L1-ASPM disabled; enable(!) L0S */
4281 il_clear_bit(il, CSR_GIO_REG,
4282 CSR_GIO_REG_VAL_L0S_ENABLED);
4283 D_POWER("L1 Disabled; Enabling L0S\n");
4287 /* Configure analog phase-lock-loop before activating to D0A */
4288 if (il->cfg->base_params->pll_cfg_val)
4289 il_set_bit(il, CSR_ANA_PLL_CFG,
4290 il->cfg->base_params->pll_cfg_val);
4293 * Set "initialization complete" bit to move adapter from
4294 * D0U* --> D0A* (powered-up active) state.
4296 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4299 * Wait for clock stabilization; once stabilized, access to
4300 * device-internal resources is supported, e.g. il_wr_prph()
4301 * and accesses to uCode SRAM.
4304 _il_poll_bit(il, CSR_GP_CNTRL,
4305 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4306 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4308 D_INFO("Failed to init the card\n");
4313 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
4314 * BSM (Boostrap State Machine) is only in 3945 and 4965.
4316 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
4317 * do not disable clocks. This preserves any hardware bits already
4318 * set by default in "CLK_CTRL_REG" after reset.
4320 if (il->cfg->base_params->use_bsm)
4321 il_wr_prph(il, APMG_CLK_EN_REG,
4322 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4324 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4327 /* Disable L1-Active */
4328 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4329 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4334 EXPORT_SYMBOL(il_apm_init);
4337 il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4343 lockdep_assert_held(&il->mutex);
4345 if (il->tx_power_user_lmt == tx_power && !force)
4348 if (!il->cfg->ops->lib->send_tx_power)
4351 /* 0 dBm mean 1 milliwatt */
4353 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
4357 if (tx_power > il->tx_power_device_lmt) {
4358 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
4359 tx_power, il->tx_power_device_lmt);
4363 if (!il_is_ready_rf(il))
4366 /* scan complete and commit_rxon use tx_power_next value,
4367 * it always need to be updated for newest request */
4368 il->tx_power_next = tx_power;
4370 /* do not set tx power when scanning or channel changing */
4371 defer = test_bit(S_SCANNING, &il->status) ||
4372 memcmp(&il->active, &il->staging, sizeof(il->staging));
4373 if (defer && !force) {
4374 D_INFO("Deferring tx power set\n");
4378 prev_tx_power = il->tx_power_user_lmt;
4379 il->tx_power_user_lmt = tx_power;
4381 ret = il->cfg->ops->lib->send_tx_power(il);
4383 /* if fail to set tx_power, restore the orig. tx power */
4385 il->tx_power_user_lmt = prev_tx_power;
4386 il->tx_power_next = prev_tx_power;
4390 EXPORT_SYMBOL(il_set_tx_power);
4393 il_send_bt_config(struct il_priv *il)
4395 struct il_bt_cmd bt_cmd = {
4396 .lead_time = BT_LEAD_TIME_DEF,
4397 .max_kill = BT_MAX_KILL_DEF,
4402 if (!bt_coex_active)
4403 bt_cmd.flags = BT_COEX_DISABLE;
4405 bt_cmd.flags = BT_COEX_ENABLE;
4407 D_INFO("BT coex %s\n",
4408 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4410 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4411 IL_ERR("failed to send BT Coex Config\n");
4413 EXPORT_SYMBOL(il_send_bt_config);
4416 il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4418 struct il_stats_cmd stats_cmd = {
4419 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
4422 if (flags & CMD_ASYNC)
4423 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4426 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4429 EXPORT_SYMBOL(il_send_stats_request);
4432 il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4434 #ifdef CONFIG_IWLEGACY_DEBUG
4435 struct il_rx_pkt *pkt = rxb_addr(rxb);
4436 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
4437 D_RX("sleep mode: %d, src: %d\n",
4438 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4441 EXPORT_SYMBOL(il_hdl_pm_sleep);
4444 il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4446 struct il_rx_pkt *pkt = rxb_addr(rxb);
4447 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4448 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
4449 il_get_cmd_string(pkt->hdr.cmd));
4450 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4452 EXPORT_SYMBOL(il_hdl_pm_debug_stats);
4455 il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4457 struct il_rx_pkt *pkt = rxb_addr(rxb);
4459 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
4460 "seq 0x%04X ser 0x%08X\n",
4461 le32_to_cpu(pkt->u.err_resp.error_type),
4462 il_get_cmd_string(pkt->u.err_resp.cmd_id),
4463 pkt->u.err_resp.cmd_id,
4464 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4465 le32_to_cpu(pkt->u.err_resp.error_info));
4467 EXPORT_SYMBOL(il_hdl_error);
4470 il_clear_isr_stats(struct il_priv *il)
4472 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4476 il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
4477 const struct ieee80211_tx_queue_params *params)
4479 struct il_priv *il = hw->priv;
4480 unsigned long flags;
4483 D_MAC80211("enter\n");
4485 if (!il_is_ready_rf(il)) {
4486 D_MAC80211("leave - RF not ready\n");
4490 if (queue >= AC_NUM) {
4491 D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
4495 q = AC_NUM - 1 - queue;
4497 spin_lock_irqsave(&il->lock, flags);
4499 il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
4500 cpu_to_le16(params->cw_min);
4501 il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
4502 cpu_to_le16(params->cw_max);
4503 il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4504 il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
4505 cpu_to_le16((params->txop * 32));
4507 il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
4509 spin_unlock_irqrestore(&il->lock, flags);
4511 D_MAC80211("leave\n");
4514 EXPORT_SYMBOL(il_mac_conf_tx);
4517 il_mac_tx_last_beacon(struct ieee80211_hw *hw)
4519 struct il_priv *il = hw->priv;
4521 return il->ibss_manager == IL_IBSS_MANAGER;
4523 EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
4526 il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
4528 il_connection_init_rx_config(il, ctx);
4530 if (il->cfg->ops->hcmd->set_rxon_chain)
4531 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
4533 return il_commit_rxon(il, ctx);
4537 il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx)
4539 struct ieee80211_vif *vif = ctx->vif;
4542 lockdep_assert_held(&il->mutex);
4545 * This variable will be correct only when there's just
4546 * a single context, but all code using it is for hardware
4547 * that supports only one context.
4549 il->iw_mode = vif->type;
4551 ctx->is_active = true;
4553 err = il_set_mode(il, ctx);
4555 if (!ctx->always_active)
4556 ctx->is_active = false;
4564 il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4566 struct il_priv *il = hw->priv;
4567 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
4570 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4572 mutex_lock(&il->mutex);
4574 if (!il_is_ready_rf(il)) {
4575 IL_WARN("Try to add interface when device not ready\n");
4585 vif_priv->ctx = &il->ctx;
4588 err = il_setup_interface(il, &il->ctx);
4591 il->iw_mode = NL80211_IFTYPE_STATION;
4595 mutex_unlock(&il->mutex);
4597 D_MAC80211("leave\n");
4600 EXPORT_SYMBOL(il_mac_add_interface);
4603 il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
4606 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4608 lockdep_assert_held(&il->mutex);
4610 if (il->scan_vif == vif) {
4611 il_scan_cancel_timeout(il, 200);
4612 il_force_scan_end(il);
4616 il_set_mode(il, ctx);
4617 if (!ctx->always_active)
4618 ctx->is_active = false;
4623 il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4625 struct il_priv *il = hw->priv;
4626 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4628 D_MAC80211("enter\n");
4630 mutex_lock(&il->mutex);
4632 WARN_ON(ctx->vif != vif);
4635 il_teardown_interface(il, vif, false);
4637 memset(il->bssid, 0, ETH_ALEN);
4638 mutex_unlock(&il->mutex);
4640 D_MAC80211("leave\n");
4643 EXPORT_SYMBOL(il_mac_remove_interface);
4646 il_alloc_txq_mem(struct il_priv *il)
4650 kzalloc(sizeof(struct il_tx_queue) *
4651 il->cfg->base_params->num_of_queues, GFP_KERNEL);
4653 IL_ERR("Not enough memory for txq\n");
4658 EXPORT_SYMBOL(il_alloc_txq_mem);
4661 il_txq_mem(struct il_priv *il)
4666 EXPORT_SYMBOL(il_txq_mem);
4668 #ifdef CONFIG_IWLEGACY_DEBUGFS
4670 #define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
4673 il_reset_traffic_log(struct il_priv *il)
4675 il->tx_traffic_idx = 0;
4676 il->rx_traffic_idx = 0;
4678 memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
4680 memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
4684 il_alloc_traffic_mem(struct il_priv *il)
4686 u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
4688 if (il_debug_level & IL_DL_TX) {
4689 if (!il->tx_traffic) {
4690 il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL);
4691 if (!il->tx_traffic)
4695 if (il_debug_level & IL_DL_RX) {
4696 if (!il->rx_traffic) {
4697 il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL);
4698 if (!il->rx_traffic)
4702 il_reset_traffic_log(il);
4705 EXPORT_SYMBOL(il_alloc_traffic_mem);
4708 il_free_traffic_mem(struct il_priv *il)
4710 kfree(il->tx_traffic);
4711 il->tx_traffic = NULL;
4713 kfree(il->rx_traffic);
4714 il->rx_traffic = NULL;
4716 EXPORT_SYMBOL(il_free_traffic_mem);
4719 il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
4720 struct ieee80211_hdr *header)
4725 if (likely(!(il_debug_level & IL_DL_TX)))
4728 if (!il->tx_traffic)
4731 fc = header->frame_control;
4732 if (ieee80211_is_data(fc)) {
4735 IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
4736 memcpy((il->tx_traffic +
4737 (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
4739 il->tx_traffic_idx =
4740 (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
4743 EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
4746 il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
4747 struct ieee80211_hdr *header)
4752 if (likely(!(il_debug_level & IL_DL_RX)))
4755 if (!il->rx_traffic)
4758 fc = header->frame_control;
4759 if (ieee80211_is_data(fc)) {
4762 IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
4763 memcpy((il->rx_traffic +
4764 (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
4766 il->rx_traffic_idx =
4767 (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
4770 EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
4773 il_get_mgmt_string(int cmd)
4776 IL_CMD(MANAGEMENT_ASSOC_REQ);
4777 IL_CMD(MANAGEMENT_ASSOC_RESP);
4778 IL_CMD(MANAGEMENT_REASSOC_REQ);
4779 IL_CMD(MANAGEMENT_REASSOC_RESP);
4780 IL_CMD(MANAGEMENT_PROBE_REQ);
4781 IL_CMD(MANAGEMENT_PROBE_RESP);
4782 IL_CMD(MANAGEMENT_BEACON);
4783 IL_CMD(MANAGEMENT_ATIM);
4784 IL_CMD(MANAGEMENT_DISASSOC);
4785 IL_CMD(MANAGEMENT_AUTH);
4786 IL_CMD(MANAGEMENT_DEAUTH);
4787 IL_CMD(MANAGEMENT_ACTION);
4795 il_get_ctrl_string(int cmd)
4798 IL_CMD(CONTROL_BACK_REQ);
4799 IL_CMD(CONTROL_BACK);
4800 IL_CMD(CONTROL_PSPOLL);
4801 IL_CMD(CONTROL_RTS);
4802 IL_CMD(CONTROL_CTS);
4803 IL_CMD(CONTROL_ACK);
4804 IL_CMD(CONTROL_CFEND);
4805 IL_CMD(CONTROL_CFENDACK);
4813 il_clear_traffic_stats(struct il_priv *il)
4815 memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
4816 memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
4820 * if CONFIG_IWLEGACY_DEBUGFS defined,
4821 * il_update_stats function will
4822 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
4823 * Use debugFs to display the rx/rx_stats
4824 * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
4825 * information will be recorded, but DATA pkt still will be recorded
4826 * for the reason of il_led.c need to control the led blinking based on
4827 * number of tx and rx data.
4831 il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
4833 struct traffic_stats *stats;
4836 stats = &il->tx_stats;
4838 stats = &il->rx_stats;
4840 if (ieee80211_is_mgmt(fc)) {
4841 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
4842 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
4843 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
4845 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
4846 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
4848 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
4849 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
4851 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
4852 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
4854 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
4855 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
4857 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
4858 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
4860 case cpu_to_le16(IEEE80211_STYPE_BEACON):
4861 stats->mgmt[MANAGEMENT_BEACON]++;
4863 case cpu_to_le16(IEEE80211_STYPE_ATIM):
4864 stats->mgmt[MANAGEMENT_ATIM]++;
4866 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
4867 stats->mgmt[MANAGEMENT_DISASSOC]++;
4869 case cpu_to_le16(IEEE80211_STYPE_AUTH):
4870 stats->mgmt[MANAGEMENT_AUTH]++;
4872 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
4873 stats->mgmt[MANAGEMENT_DEAUTH]++;
4875 case cpu_to_le16(IEEE80211_STYPE_ACTION):
4876 stats->mgmt[MANAGEMENT_ACTION]++;
4879 } else if (ieee80211_is_ctl(fc)) {
4880 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
4881 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
4882 stats->ctrl[CONTROL_BACK_REQ]++;
4884 case cpu_to_le16(IEEE80211_STYPE_BACK):
4885 stats->ctrl[CONTROL_BACK]++;
4887 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
4888 stats->ctrl[CONTROL_PSPOLL]++;
4890 case cpu_to_le16(IEEE80211_STYPE_RTS):
4891 stats->ctrl[CONTROL_RTS]++;
4893 case cpu_to_le16(IEEE80211_STYPE_CTS):
4894 stats->ctrl[CONTROL_CTS]++;
4896 case cpu_to_le16(IEEE80211_STYPE_ACK):
4897 stats->ctrl[CONTROL_ACK]++;
4899 case cpu_to_le16(IEEE80211_STYPE_CFEND):
4900 stats->ctrl[CONTROL_CFEND]++;
4902 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
4903 stats->ctrl[CONTROL_CFENDACK]++;
4909 stats->data_bytes += len;
4912 EXPORT_SYMBOL(il_update_stats);
4916 il_force_reset(struct il_priv *il, bool external)
4918 struct il_force_reset *force_reset;
4920 if (test_bit(S_EXIT_PENDING, &il->status))
4923 force_reset = &il->force_reset;
4924 force_reset->reset_request_count++;
4926 if (force_reset->last_force_reset_jiffies &&
4927 time_after(force_reset->last_force_reset_jiffies +
4928 force_reset->reset_duration, jiffies)) {
4929 D_INFO("force reset rejected\n");
4930 force_reset->reset_reject_count++;
4934 force_reset->reset_success_count++;
4935 force_reset->last_force_reset_jiffies = jiffies;
4938 * if the request is from external(ex: debugfs),
4939 * then always perform the request in regardless the module
4941 * if the request is from internal (uCode error or driver
4942 * detect failure), then fw_restart module parameter
4943 * need to be check before performing firmware reload
4946 if (!external && !il->cfg->mod_params->restart_fw) {
4947 D_INFO("Cancel firmware reload based on "
4948 "module parameter setting\n");
4952 IL_ERR("On demand firmware reload\n");
4954 /* Set the FW error flag -- cleared on il_down */
4955 set_bit(S_FW_ERROR, &il->status);
4956 wake_up(&il->wait_command_queue);
4958 * Keep the restart process from trying to send host
4959 * commands by clearing the INIT status bit
4961 clear_bit(S_READY, &il->status);
4962 queue_work(il->workqueue, &il->restart);
4968 il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4969 enum nl80211_iftype newtype, bool newp2p)
4971 struct il_priv *il = hw->priv;
4972 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4978 mutex_lock(&il->mutex);
4980 if (!ctx->vif || !il_is_ready_rf(il)) {
4982 * Huh? But wait ... this can maybe happen when
4983 * we're in the middle of a firmware restart!
4990 il_teardown_interface(il, vif, true);
4991 vif->type = newtype;
4993 err = il_setup_interface(il, ctx);
4996 * We've switched internally, but submitting to the
4997 * device may have failed for some reason. Mask this
4998 * error, because otherwise mac80211 will not switch
4999 * (and set the interface type back) and we'll be
5000 * out of sync with it.
5005 mutex_unlock(&il->mutex);
5008 EXPORT_SYMBOL(il_mac_change_interface);
5011 * On every watchdog tick we check (latest) time stamp. If it does not
5012 * change during timeout period and queue is not empty we reset firmware.
5015 il_check_stuck_queue(struct il_priv *il, int cnt)
5017 struct il_tx_queue *txq = &il->txq[cnt];
5018 struct il_queue *q = &txq->q;
5019 unsigned long timeout;
5022 if (q->read_ptr == q->write_ptr) {
5023 txq->time_stamp = jiffies;
5029 msecs_to_jiffies(il->cfg->base_params->wd_timeout);
5031 if (time_after(jiffies, timeout)) {
5032 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
5033 il->cfg->base_params->wd_timeout);
5034 ret = il_force_reset(il, false);
5035 return (ret == -EAGAIN) ? 0 : 1;
5042 * Making watchdog tick be a quarter of timeout assure we will
5043 * discover the queue hung between timeout and 1.25*timeout
5045 #define IL_WD_TICK(timeout) ((timeout) / 4)
5048 * Watchdog timer callback, we check each tx queue for stuck, if if hung
5049 * we reset the firmware. If everything is fine just rearm the timer.
5052 il_bg_watchdog(unsigned long data)
5054 struct il_priv *il = (struct il_priv *)data;
5056 unsigned long timeout;
5058 if (test_bit(S_EXIT_PENDING, &il->status))
5061 timeout = il->cfg->base_params->wd_timeout;
5065 /* monitor and check for stuck cmd queue */
5066 if (il_check_stuck_queue(il, il->cmd_queue))
5069 /* monitor and check for other stuck queues */
5070 if (il_is_any_associated(il)) {
5071 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
5072 /* skip as we already checked the command queue */
5073 if (cnt == il->cmd_queue)
5075 if (il_check_stuck_queue(il, cnt))
5080 mod_timer(&il->watchdog,
5081 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
5083 EXPORT_SYMBOL(il_bg_watchdog);
5086 il_setup_watchdog(struct il_priv *il)
5088 unsigned int timeout = il->cfg->base_params->wd_timeout;
5091 mod_timer(&il->watchdog,
5092 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
5094 del_timer(&il->watchdog);
5096 EXPORT_SYMBOL(il_setup_watchdog);
5099 * extended beacon time format
5100 * time in usec will be changed into a 32-bit value in extended:internal format
5101 * the extended part is the beacon counts
5102 * the internal part is the time in usec within one beacon interval
5105 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
5109 u32 interval = beacon_interval * TIME_UNIT;
5111 if (!interval || !usec)
5116 interval) & (il_beacon_time_mask_high(il,
5118 beacon_time_tsf_bits) >> il->
5119 hw_params.beacon_time_tsf_bits);
5121 (usec % interval) & il_beacon_time_mask_low(il,
5123 beacon_time_tsf_bits);
5125 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
5127 EXPORT_SYMBOL(il_usecs_to_beacons);
5129 /* base is usually what we get from ucode with each received frame,
5130 * the same as HW timer counter counting down
5133 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
5134 u32 beacon_interval)
5136 u32 base_low = base & il_beacon_time_mask_low(il,
5138 beacon_time_tsf_bits);
5139 u32 addon_low = addon & il_beacon_time_mask_low(il,
5141 beacon_time_tsf_bits);
5142 u32 interval = beacon_interval * TIME_UNIT;
5143 u32 res = (base & il_beacon_time_mask_high(il,
5145 beacon_time_tsf_bits)) +
5146 (addon & il_beacon_time_mask_high(il,
5148 beacon_time_tsf_bits));
5150 if (base_low > addon_low)
5151 res += base_low - addon_low;
5152 else if (base_low < addon_low) {
5153 res += interval + base_low - addon_low;
5154 res += (1 << il->hw_params.beacon_time_tsf_bits);
5156 res += (1 << il->hw_params.beacon_time_tsf_bits);
5158 return cpu_to_le32(res);
5160 EXPORT_SYMBOL(il_add_beacon_time);
5165 il_pci_suspend(struct device *device)
5167 struct pci_dev *pdev = to_pci_dev(device);
5168 struct il_priv *il = pci_get_drvdata(pdev);
5171 * This function is called when system goes into suspend state
5172 * mac80211 will call il_mac_stop() from the mac80211 suspend function
5173 * first but since il_mac_stop() has no knowledge of who the caller is,
5174 * it will not call apm_ops.stop() to stop the DMA operation.
5175 * Calling apm_ops.stop here to make sure we stop the DMA.
5181 EXPORT_SYMBOL(il_pci_suspend);
5184 il_pci_resume(struct device *device)
5186 struct pci_dev *pdev = to_pci_dev(device);
5187 struct il_priv *il = pci_get_drvdata(pdev);
5188 bool hw_rfkill = false;
5191 * We disable the RETRY_TIMEOUT register (0x41) to keep
5192 * PCI Tx retries from interfering with C3 CPU state.
5194 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
5196 il_enable_interrupts(il);
5198 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5202 set_bit(S_RF_KILL_HW, &il->status);
5204 clear_bit(S_RF_KILL_HW, &il->status);
5206 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
5210 EXPORT_SYMBOL(il_pci_resume);
5212 const struct dev_pm_ops il_pm_ops = {
5213 .suspend = il_pci_suspend,
5214 .resume = il_pci_resume,
5215 .freeze = il_pci_suspend,
5216 .thaw = il_pci_resume,
5217 .poweroff = il_pci_suspend,
5218 .restore = il_pci_resume,
5220 EXPORT_SYMBOL(il_pm_ops);
5222 #endif /* CONFIG_PM */
5225 il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
5227 if (test_bit(S_EXIT_PENDING, &il->status))
5230 if (!ctx->is_active)
5233 ctx->qos_data.def_qos_parm.qos_flags = 0;
5235 if (ctx->qos_data.qos_active)
5236 ctx->qos_data.def_qos_parm.qos_flags |=
5237 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
5239 if (ctx->ht.enabled)
5240 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
5242 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
5243 ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags);
5245 il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd),
5246 &ctx->qos_data.def_qos_parm, NULL);
5250 * il_mac_config - mac80211 config callback
5253 il_mac_config(struct ieee80211_hw *hw, u32 changed)
5255 struct il_priv *il = hw->priv;
5256 const struct il_channel_info *ch_info;
5257 struct ieee80211_conf *conf = &hw->conf;
5258 struct ieee80211_channel *channel = conf->channel;
5259 struct il_ht_config *ht_conf = &il->current_ht_config;
5260 struct il_rxon_context *ctx = &il->ctx;
5261 unsigned long flags = 0;
5264 int scan_active = 0;
5265 bool ht_changed = false;
5267 if (WARN_ON(!il->cfg->ops->legacy))
5270 mutex_lock(&il->mutex);
5272 D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value,
5275 if (unlikely(test_bit(S_SCANNING, &il->status))) {
5277 D_MAC80211("scan active\n");
5281 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
5282 /* mac80211 uses static for non-HT which is what we want */
5283 il->current_ht_config.smps = conf->smps_mode;
5286 * Recalculate chain counts.
5288 * If monitor mode is enabled then mac80211 will
5289 * set up the SM PS mode to OFF if an HT channel is
5292 if (il->cfg->ops->hcmd->set_rxon_chain)
5293 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
5296 /* during scanning mac80211 will delay channel setting until
5297 * scan finish with changed = 0
5299 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
5304 ch = channel->hw_value;
5305 ch_info = il_get_channel_info(il, channel->band, ch);
5306 if (!il_is_channel_valid(ch_info)) {
5307 D_MAC80211("leave - invalid channel\n");
5312 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5313 !il_is_channel_ibss(ch_info)) {
5314 D_MAC80211("leave - not IBSS channel\n");
5319 spin_lock_irqsave(&il->lock, flags);
5321 /* Configure HT40 channels */
5322 if (ctx->ht.enabled != conf_is_ht(conf)) {
5323 ctx->ht.enabled = conf_is_ht(conf);
5326 if (ctx->ht.enabled) {
5327 if (conf_is_ht40_minus(conf)) {
5328 ctx->ht.extension_chan_offset =
5329 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5330 ctx->ht.is_40mhz = true;
5331 } else if (conf_is_ht40_plus(conf)) {
5332 ctx->ht.extension_chan_offset =
5333 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5334 ctx->ht.is_40mhz = true;
5336 ctx->ht.extension_chan_offset =
5337 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5338 ctx->ht.is_40mhz = false;
5341 ctx->ht.is_40mhz = false;
5344 * Default to no protection. Protection mode will
5345 * later be set from BSS config in il_ht_conf
5347 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5349 /* if we are switching from ht to 2.4 clear flags
5350 * from any ht related info since 2.4 does not
5352 if ((le16_to_cpu(il->staging.channel) != ch))
5353 il->staging.flags = 0;
5355 il_set_rxon_channel(il, channel, ctx);
5356 il_set_rxon_ht(il, ht_conf);
5358 il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
5360 spin_unlock_irqrestore(&il->lock, flags);
5362 if (il->cfg->ops->legacy->update_bcast_stations)
5363 ret = il->cfg->ops->legacy->update_bcast_stations(il);
5366 /* The list of supported rates and rate mask can be different
5367 * for each band; since the band may have changed, reset
5368 * the rate mask to what mac80211 lists */
5372 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5373 ret = il_power_update_mode(il, false);
5375 D_MAC80211("Error setting sleep level\n");
5378 if (changed & IEEE80211_CONF_CHANGE_POWER) {
5379 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5382 il_set_tx_power(il, conf->power_level, false);
5385 if (!il_is_ready(il)) {
5386 D_MAC80211("leave - not ready\n");
5393 if (memcmp(&il->active, &il->staging, sizeof(il->staging)))
5394 il_commit_rxon(il, ctx);
5396 D_INFO("Not re-sending same RXON configuration.\n");
5398 il_update_qos(il, ctx);
5401 D_MAC80211("leave\n");
5402 mutex_unlock(&il->mutex);
5405 EXPORT_SYMBOL(il_mac_config);
5408 il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5410 struct il_priv *il = hw->priv;
5411 unsigned long flags;
5412 struct il_rxon_context *ctx = &il->ctx;
5414 if (WARN_ON(!il->cfg->ops->legacy))
5417 mutex_lock(&il->mutex);
5418 D_MAC80211("enter\n");
5420 spin_lock_irqsave(&il->lock, flags);
5421 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5422 spin_unlock_irqrestore(&il->lock, flags);
5424 spin_lock_irqsave(&il->lock, flags);
5426 /* new association get rid of ibss beacon skb */
5428 dev_kfree_skb(il->beacon_skb);
5430 il->beacon_skb = NULL;
5434 spin_unlock_irqrestore(&il->lock, flags);
5436 il_scan_cancel_timeout(il, 100);
5437 if (!il_is_ready_rf(il)) {
5438 D_MAC80211("leave - not ready\n");
5439 mutex_unlock(&il->mutex);
5443 /* we are restarting association process
5444 * clear RXON_FILTER_ASSOC_MSK bit
5446 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5447 il_commit_rxon(il, ctx);
5451 mutex_unlock(&il->mutex);
5453 D_MAC80211("leave\n");
5455 EXPORT_SYMBOL(il_mac_reset_tsf);
5458 il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5460 struct il_ht_config *ht_conf = &il->current_ht_config;
5461 struct ieee80211_sta *sta;
5462 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
5463 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5465 D_ASSOC("enter:\n");
5467 if (!ctx->ht.enabled)
5470 ctx->ht.protection =
5471 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5472 ctx->ht.non_gf_sta_present =
5474 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5476 ht_conf->single_chain_sufficient = false;
5478 switch (vif->type) {
5479 case NL80211_IFTYPE_STATION:
5481 sta = ieee80211_find_sta(vif, bss_conf->bssid);
5483 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
5488 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5489 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5492 if (ht_cap->mcs.rx_mask[1] == 0 &&
5493 ht_cap->mcs.rx_mask[2] == 0)
5494 ht_conf->single_chain_sufficient = true;
5495 if (maxstreams <= 1)
5496 ht_conf->single_chain_sufficient = true;
5499 * If at all, this can only happen through a race
5500 * when the AP disconnects us while we're still
5501 * setting up the connection, in that case mac80211
5502 * will soon tell us about that.
5504 ht_conf->single_chain_sufficient = true;
5508 case NL80211_IFTYPE_ADHOC:
5509 ht_conf->single_chain_sufficient = true;
5519 il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5521 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5524 * inform the ucode that there is no longer an
5525 * association and that no more packets should be
5528 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5529 il->staging.assoc_id = 0;
5530 il_commit_rxon(il, ctx);
5534 il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5536 struct il_priv *il = hw->priv;
5537 unsigned long flags;
5539 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
5544 D_MAC80211("enter\n");
5546 lockdep_assert_held(&il->mutex);
5548 if (!il->beacon_ctx) {
5549 IL_ERR("update beacon but no beacon context!\n");
5554 spin_lock_irqsave(&il->lock, flags);
5557 dev_kfree_skb(il->beacon_skb);
5559 il->beacon_skb = skb;
5561 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
5562 il->timestamp = le64_to_cpu(timestamp);
5564 D_MAC80211("leave\n");
5565 spin_unlock_irqrestore(&il->lock, flags);
5567 if (!il_is_ready_rf(il)) {
5568 D_MAC80211("leave - RF not ready\n");
5572 il->cfg->ops->legacy->post_associate(il);
5576 il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5577 struct ieee80211_bss_conf *bss_conf, u32 changes)
5579 struct il_priv *il = hw->priv;
5580 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5583 if (WARN_ON(!il->cfg->ops->legacy))
5586 D_MAC80211("changes = 0x%X\n", changes);
5588 mutex_lock(&il->mutex);
5590 if (!il_is_alive(il)) {
5591 mutex_unlock(&il->mutex);
5595 if (changes & BSS_CHANGED_QOS) {
5596 unsigned long flags;
5598 spin_lock_irqsave(&il->lock, flags);
5599 ctx->qos_data.qos_active = bss_conf->qos;
5600 il_update_qos(il, ctx);
5601 spin_unlock_irqrestore(&il->lock, flags);
5604 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5606 * the add_interface code must make sure we only ever
5607 * have a single interface that could be beaconing at
5610 if (vif->bss_conf.enable_beacon)
5611 il->beacon_ctx = ctx;
5613 il->beacon_ctx = NULL;
5616 if (changes & BSS_CHANGED_BSSID) {
5617 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5620 * If there is currently a HW scan going on in the
5621 * background then we need to cancel it else the RXON
5622 * below/in post_associate will fail.
5624 if (il_scan_cancel_timeout(il, 100)) {
5625 IL_WARN("Aborted scan still in progress after 100ms\n");
5626 D_MAC80211("leaving - scan abort failed.\n");
5627 mutex_unlock(&il->mutex);
5631 /* mac80211 only sets assoc when in STATION mode */
5632 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
5633 memcpy(il->staging.bssid_addr, bss_conf->bssid,
5636 /* currently needed in a few places */
5637 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5639 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5645 * This needs to be after setting the BSSID in case
5646 * mac80211 decides to do both changes at once because
5647 * it will invoke post_associate.
5649 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
5650 il_beacon_update(hw, vif);
5652 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5653 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
5654 if (bss_conf->use_short_preamble)
5655 il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5657 il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5660 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5661 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5662 if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
5663 il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5665 il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5666 if (bss_conf->use_cts_prot)
5667 il->staging.flags |= RXON_FLG_SELF_CTS_EN;
5669 il->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5672 if (changes & BSS_CHANGED_BASIC_RATES) {
5673 /* XXX use this information
5675 * To do that, remove code from il_set_rate() and put something
5679 il->staging.ofdm_basic_rates =
5680 bss_conf->basic_rates;
5682 il->staging.ofdm_basic_rates =
5683 bss_conf->basic_rates >> 4;
5684 il->staging.cck_basic_rates =
5685 bss_conf->basic_rates & 0xF;
5689 if (changes & BSS_CHANGED_HT) {
5690 il_ht_conf(il, vif);
5692 if (il->cfg->ops->hcmd->set_rxon_chain)
5693 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
5696 if (changes & BSS_CHANGED_ASSOC) {
5697 D_MAC80211("ASSOC %d\n", bss_conf->assoc);
5698 if (bss_conf->assoc) {
5699 il->timestamp = bss_conf->timestamp;
5701 if (!il_is_rfkill(il))
5702 il->cfg->ops->legacy->post_associate(il);
5704 il_set_no_assoc(il, vif);
5707 if (changes && il_is_associated(il) && bss_conf->aid) {
5708 D_MAC80211("Changes (%#x) while associated\n", changes);
5709 ret = il_send_rxon_assoc(il, ctx);
5711 /* Sync active_rxon with latest change. */
5712 memcpy((void *)&il->active, &il->staging,
5713 sizeof(struct il_rxon_cmd));
5717 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5718 if (vif->bss_conf.enable_beacon) {
5719 memcpy(il->staging.bssid_addr, bss_conf->bssid,
5721 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5722 il->cfg->ops->legacy->config_ap(il);
5724 il_set_no_assoc(il, vif);
5727 if (changes & BSS_CHANGED_IBSS) {
5729 il->cfg->ops->legacy->manage_ibss_station(il, vif,
5733 IL_ERR("failed to %s IBSS station %pM\n",
5734 bss_conf->ibss_joined ? "add" : "remove",
5738 mutex_unlock(&il->mutex);
5740 D_MAC80211("leave\n");
5742 EXPORT_SYMBOL(il_mac_bss_info_changed);
5745 il_isr(int irq, void *data)
5747 struct il_priv *il = data;
5748 u32 inta, inta_mask;
5750 unsigned long flags;
5754 spin_lock_irqsave(&il->lock, flags);
5756 /* Disable (but don't clear!) interrupts here to avoid
5757 * back-to-back ISRs and sporadic interrupts from our NIC.
5758 * If we have something to service, the tasklet will re-enable ints.
5759 * If we *don't* have something, we'll re-enable before leaving here. */
5760 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */
5761 _il_wr(il, CSR_INT_MASK, 0x00000000);
5763 /* Discover which interrupts are active/pending */
5764 inta = _il_rd(il, CSR_INT);
5765 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5767 /* Ignore interrupt if there's nothing in NIC to service.
5768 * This may be due to IRQ shared with another device,
5769 * or due to sporadic interrupts thrown from our NIC. */
5770 if (!inta && !inta_fh) {
5771 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5775 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
5776 /* Hardware disappeared. It might have already raised
5778 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
5782 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
5785 inta &= ~CSR_INT_BIT_SCD;
5787 /* il_irq_tasklet() will service interrupts and re-enable them */
5788 if (likely(inta || inta_fh))
5789 tasklet_schedule(&il->irq_tasklet);
5792 spin_unlock_irqrestore(&il->lock, flags);
5796 /* re-enable interrupts here since we don't have anything to service. */
5797 /* only Re-enable if disabled by irq */
5798 if (test_bit(S_INT_ENABLED, &il->status))
5799 il_enable_interrupts(il);
5800 spin_unlock_irqrestore(&il->lock, flags);
5803 EXPORT_SYMBOL(il_isr);
5806 * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
5810 il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
5811 __le16 fc, __le32 *tx_flags)
5813 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5814 *tx_flags |= TX_CMD_FLG_RTS_MSK;
5815 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5816 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5818 if (!ieee80211_is_mgmt(fc))
5821 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5822 case cpu_to_le16(IEEE80211_STYPE_AUTH):
5823 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5824 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5825 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5826 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5827 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5830 } else if (info->control.rates[0].
5831 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
5832 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5833 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5834 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5837 EXPORT_SYMBOL(il_tx_cmd_protection);