wmi.o \
wmi-tlv.o \
bmi.o \
- hw.o
+ hw.o \
+ p2p.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
+ath10k_core-$(CONFIG_PM) += wow.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
return 0;
}
-static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
+static int ath10k_core_fetch_spec_board_file(struct ath10k *ar)
{
- int ret = 0;
+ char filename[100];
- if (ar->hw_params.fw.fw == NULL) {
- ath10k_err(ar, "firmware file not defined\n");
- return -EINVAL;
- }
+ scnprintf(filename, sizeof(filename), "board-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), ar->spec_board_id);
+
+ ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
+ if (IS_ERR(ar->board))
+ return PTR_ERR(ar->board);
+
+ ar->board_data = ar->board->data;
+ ar->board_len = ar->board->size;
+ ar->spec_board_loaded = true;
- if (ar->hw_params.fw.board == NULL) {
- ath10k_err(ar, "board data file not defined");
+ return 0;
+}
+
+static int ath10k_core_fetch_generic_board_file(struct ath10k *ar)
+{
+ if (!ar->hw_params.fw.board) {
+ ath10k_err(ar, "failed to find board file fw entry\n");
return -EINVAL;
}
ar->board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.board);
- if (IS_ERR(ar->board)) {
- ret = PTR_ERR(ar->board);
- ath10k_err(ar, "could not fetch board data (%d)\n", ret);
- goto err;
- }
+ if (IS_ERR(ar->board))
+ return PTR_ERR(ar->board);
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
+ ar->spec_board_loaded = false;
+
+ return 0;
+}
+
+static int ath10k_core_fetch_board_file(struct ath10k *ar)
+{
+ int ret;
+
+ if (strlen(ar->spec_board_id) > 0) {
+ ret = ath10k_core_fetch_spec_board_file(ar);
+ if (ret) {
+ ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n",
+ ret);
+ goto generic;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n",
+ ar->spec_board_id);
+ return 0;
+ }
+
+generic:
+ ret = ath10k_core_fetch_generic_board_file(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch generic board data: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
+{
+ int ret = 0;
+
+ if (ar->hw_params.fw.fw == NULL) {
+ ath10k_err(ar, "firmware file not defined\n");
+ return -EINVAL;
+ }
ar->firmware = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
ar->wmi.op_version);
break;
+ case ATH10K_FW_IE_HTT_OP_VERSION:
+ if (ie_len != sizeof(u32))
+ break;
+
+ version = (__le32 *)data;
+
+ ar->htt.op_version = le32_to_cpup(version);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
+ ar->htt.op_version);
+ break;
default:
ath10k_warn(ar, "Unknown FW IE: %u\n",
le32_to_cpu(hdr->id));
goto err;
}
- /* now fetch the board file */
- if (ar->hw_params.fw.board == NULL) {
- ath10k_err(ar, "board data file not defined");
- ret = -EINVAL;
- goto err;
- }
-
- ar->board = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(ar->board)) {
- ret = PTR_ERR(ar->board);
- ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
- ar->hw_params.fw.dir, ar->hw_params.fw.board,
- ret);
- goto err;
- }
-
- ar->board_data = ar->board->data;
- ar->board_len = ar->board->size;
-
return 0;
err:
/* calibration file is optional, don't check for any errors */
ath10k_fetch_cal_file(ar);
+ ret = ath10k_core_fetch_board_file(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+ return ret;
+ }
+
+ ar->fw_api = 5;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+ if (ret == 0)
+ goto success;
+
ar->fw_api = 4;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ar->max_num_stations = TARGET_NUM_STATIONS;
ar->max_num_vdevs = TARGET_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+ WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
ar->max_num_stations = TARGET_10X_NUM_STATIONS;
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->max_num_peers = TARGET_TLV_NUM_PEERS;
ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
+ ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+ ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
+ ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+ WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
return -EINVAL;
}
+ /* Backwards compatibility for firmwares without
+ * ATH10K_FW_IE_HTT_OP_VERSION.
+ */
+ if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+ switch (ar->wmi.op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ }
+
return 0;
}
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_wmi_wait_for_service_ready(ar);
- if (status <= 0) {
+ if (status) {
ath10k_warn(ar, "wmi service ready event not received");
- status = -ETIMEDOUT;
goto err_hif_stop;
}
}
}
status = ath10k_wmi_wait_for_unified_ready(ar);
- if (status <= 0) {
+ if (status) {
ath10k_err(ar, "wmi unified ready event not received\n");
- status = -ETIMEDOUT;
goto err_hif_stop;
}
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
{
int ret;
+ unsigned long time_left;
reinit_completion(&ar->target_suspend);
return ret;
}
- ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+ time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
- if (ret == 0) {
+ if (!time_left) {
ath10k_warn(ar, "suspend timed out - target pause event never came\n");
return -ETIMEDOUT;
}
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
init_completion(&ar->target_suspend);
+ init_completion(&ar->wow.wakeup_completed);
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
#include "../dfs_pattern_detector.h"
#include "spectral.h"
#include "thermal.h"
+#include "wow.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
-#define ATH10K_NUM_CHANS 38
+#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
+#define ATH10K_NUM_CHANS 39
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
#define ATH10K_MAX_NUM_MGMT_PENDING 128
-/* number of failed packets */
-#define ATH10K_KICKOUT_THRESHOLD 50
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH10K_KICKOUT_THRESHOLD (20 * 16)
/*
* Use insanely high numbers to make sure that the firmware implementation
dma_addr_t paddr;
u8 eid;
u8 vdev_id;
+ enum ath10k_hw_txrx_mode txmode;
+ bool is_protected;
struct {
u8 tid;
#endif
};
+struct ath10k_chanctx {
+ /* Used to story copy of chanctx_conf to avoid inconsistencies. Ideally
+ * mac80211 should allow some sort of explicit locking to guarantee
+ * that the publicly available chanctx_conf can be accessed safely at
+ * all times.
+ */
+ struct ieee80211_chanctx_conf conf;
+};
+
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
enum ath10k_beacon_state {
enum ath10k_beacon_state beacon_state;
void *beacon_buf;
dma_addr_t beacon_paddr;
+ unsigned long tx_paused; /* arbitrary values defined by target */
struct ath10k *ar;
struct ieee80211_vif *vif;
} ap;
} u;
- u8 fixed_rate;
- u8 fixed_nss;
- u8 force_sgi;
bool use_cts_prot;
int num_legacy_stations;
int txpower;
struct wmi_wmm_params_all_arg wmm_params;
+ struct work_struct ap_csa_work;
+ struct delayed_work connection_loss_work;
+ struct cfg80211_bitrate_mask bitrate_mask;
};
struct ath10k_vif_iter {
*/
ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
+ /* Some firmware revisions have an incomplete WoWLAN implementation
+ * despite WMI service bit being advertised. This feature flag is used
+ * to distinguish whether WoWLAN is really supported or not.
+ */
+ ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
return "unknown";
}
+enum ath10k_tx_pause_reason {
+ ATH10K_TX_PAUSE_Q_FULL,
+ ATH10K_TX_PAUSE_MAX,
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
u32 fw_version_minor;
u16 fw_version_release;
u16 fw_version_build;
+ u32 fw_stats_req_mask;
u32 phy_capability;
u32 hw_min_tx_power;
u32 hw_max_tx_power;
u32 ht_cap_info;
u32 vht_cap_info;
u32 num_rf_chains;
+ /* protected by conf_mutex */
+ bool ani_enabled;
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
const struct firmware *cal_file;
+ char spec_board_id[100];
+ bool spec_board_loaded;
+
int fw_api;
enum ath10k_cal_mode cal_mode;
struct cfg80211_chan_def chandef;
unsigned long long free_vdev_map;
+ struct ath10k_vif *monitor_arvif;
bool monitor;
int monitor_vdev_id;
bool monitor_started;
int max_num_peers;
int max_num_stations;
int max_num_vdevs;
+ int max_num_tdls_vdevs;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
struct dfs_pattern_detector *dfs_detector;
+ unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
+
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
} stats;
struct ath10k_thermal thermal;
+ struct ath10k_wow wow;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
void ath10k_print_driver_info(struct ath10k *ar)
{
- ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+ ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
+ (strlen(ar->spec_board_id) > 0 ? ", " : ""),
+ ar->spec_board_id,
+ (strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded
+ ? " fallback" : ""),
ar->hw->wiphy->fw_version,
ar->fw_api,
ar->htt.target_version_major,
static int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
- unsigned long timeout;
+ unsigned long timeout, time_left;
int ret;
lockdep_assert_held(&ar->conf_mutex);
- timeout = jiffies + msecs_to_jiffies(1*HZ);
+ timeout = jiffies + msecs_to_jiffies(1 * HZ);
ath10k_debug_fw_stats_reset(ar);
reinit_completion(&ar->debug.fw_stats_complete);
- ret = ath10k_wmi_request_stats(ar,
- WMI_STAT_PDEV |
- WMI_STAT_VDEV |
- WMI_STAT_PEER);
+ ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask);
if (ret) {
ath10k_warn(ar, "could not request stats (%d)\n", ret);
return ret;
}
- ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
- 1*HZ);
- if (ret == 0)
+ time_left =
+ wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1 * HZ);
+ if (!time_left)
return -ETIMEDOUT;
spin_lock_bh(&ar->data_lock);
return 0;
}
+static ssize_t ath10k_write_ani_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ u8 enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->ani_enabled == enable) {
+ ret = count;
+ goto exit;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable,
+ enable);
+ if (ret) {
+ ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret);
+ goto exit;
+ }
+ ar->ani_enabled = enable;
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+ char buf[32];
+
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->ani_enabled);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ani_enable = {
+ .read = ath10k_read_ani_enable,
+ .write = ath10k_write_ani_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
.open = simple_open
};
+static ssize_t ath10k_write_quiet_period(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 period;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &period))
+ return -EINVAL;
+
+ if (period < ATH10K_QUIET_PERIOD_MIN) {
+ ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n",
+ period);
+ return -EINVAL;
+ }
+ mutex_lock(&ar->conf_mutex);
+ ar->thermal.quiet_period = period;
+ ath10k_thermal_set_throttling(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->thermal.quiet_period);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_quiet_period = {
+ .read = ath10k_read_quiet_period,
+ .write = ath10k_write_quiet_period,
+ .open = simple_open
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_cal_data);
+ debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_ani_enable);
+
debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
+ debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_quiet_period);
+
return 0;
}
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
-/* assumes tx_lock is held */
-static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
-{
- struct ath10k *ar = ep->htc->ar;
-
- if (!ep->tx_credit_flow_enabled)
- return false;
- if (ep->tx_credits >= ep->tx_credits_per_max_message)
- return false;
-
- ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
- ep->eid);
- return true;
-}
-
static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
+ hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
-
- if (ath10k_htc_ep_need_credit_update(ep))
- hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
-
spin_unlock_bh(&ep->htc->tx_lock);
}
struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
switch (__le16_to_cpu(msg->hdr.message_id)) {
- default:
+ case ATH10K_HTC_MSG_READY_ID:
+ case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
/* handle HTC control message */
if (completion_done(&htc->ctl_resp)) {
/*
break;
case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
htc->htc_ops.target_send_suspend_complete(ar);
+ break;
+ default:
+ ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
+ break;
}
goto out;
}
{
struct ath10k *ar = htc->ar;
int i, status = 0;
+ unsigned long time_left;
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_msg *msg;
u16 credit_count;
u16 credit_size;
- status = wait_for_completion_timeout(&htc->ctl_resp,
- ATH10K_HTC_WAIT_TIMEOUT_HZ);
- if (status == 0) {
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
/* Workaround: In some cases the PCI HIF doesn't
* receive interrupt for the control response message
* even if the buffer was completed. It is suspected
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(htc->ar, i, 1);
- status = wait_for_completion_timeout(&htc->ctl_resp,
- ATH10K_HTC_WAIT_TIMEOUT_HZ);
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
- if (status == 0)
+ if (!time_left)
status = -ETIMEDOUT;
}
struct sk_buff *skb;
unsigned int max_msg_size = 0;
int length, status;
+ unsigned long time_left;
bool disable_credit_flow_ctrl = false;
u16 message_id, service_id, flags = 0;
u8 tx_alloc = 0;
}
/* wait for response */
- status = wait_for_completion_timeout(&htc->ctl_resp,
- ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
- if (status == 0) {
- ath10k_err(ar, "Service connect timeout: %d\n", status);
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_err(ar, "Service connect timeout\n");
return -ETIMEDOUT;
}
#include "core.h"
#include "debug.h"
+static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
+ [HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] =
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
+static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = {
+ [HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+ [HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+ [HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+ [HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+};
+
+static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
+ [HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] =
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+ [HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] =
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+ [HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] =
+ HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+ [HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
+ switch (ar->htt.op_version) {
+ case ATH10K_FW_HTT_OP_VERSION_10_1:
+ ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_TLV:
+ ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_MAIN:
+ ar->htt.t2h_msg_types = htt_main_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_MAX:
+ case ATH10K_FW_HTT_OP_VERSION_UNSET:
+ WARN_ON(1);
+ return -EINVAL;
+ }
return 0;
}
#include <net/mac80211.h>
#include "htc.h"
+#include "hw.h"
#include "rx_desc.h"
+#include "hw.h"
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
/*=== target -> host messages ===============================================*/
-enum htt_t2h_msg_type {
- HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0,
- HTT_T2H_MSG_TYPE_RX_IND = 0x1,
- HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2,
- HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
- HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
- HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
- HTT_T2H_MSG_TYPE_RX_DELBA = 0x6,
- HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
- HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
- HTT_T2H_MSG_TYPE_STATS_CONF = 0x9,
- HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
- HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
- HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
- HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
- HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
- HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
- HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
- HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
- HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
+enum htt_main_t2h_msg_type {
+ HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_MAIN_T2H_MSG_TYPE_TEST,
+ /* keep this last */
+ HTT_MAIN_T2H_NUM_MSGS
+};
+
+enum htt_10x_t2h_msg_type {
+ HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
+ HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_10X_T2H_MSG_TYPE_TEST = 0xe,
+ HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
+ HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11,
+ HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12,
+ HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13,
+ /* keep this last */
+ HTT_10X_T2H_NUM_MSGS
+};
+
+enum htt_tlv_t2h_msg_type {
+ HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */
+ HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
/* 0x13 reservd */
- HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
+ HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
+ HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15,
+ HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16,
+ HTT_TLV_T2H_MSG_TYPE_TEST,
+ /* keep this last */
+ HTT_TLV_T2H_NUM_MSGS
+};
- /* FIXME: Do not depend on this event id. Numbering of this event id is
- * broken across different firmware revisions and HTT version fails to
- * indicate this.
- */
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_RX_IND,
+ HTT_T2H_MSG_TYPE_RX_FLUSH,
+ HTT_T2H_MSG_TYPE_PEER_MAP,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ HTT_T2H_MSG_TYPE_RX_ADDBA,
+ HTT_T2H_MSG_TYPE_RX_DELBA,
+ HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ HTT_T2H_MSG_TYPE_PKTLOG,
+ HTT_T2H_MSG_TYPE_STATS_CONF,
+ HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ HTT_T2H_MSG_TYPE_SEC_IND,
+ HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ HTT_T2H_MSG_TYPE_RX_PN_IND,
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+ HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+ HTT_T2H_MSG_TYPE_AGGR_CONF,
+ HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
HTT_T2H_MSG_TYPE_TEST,
-
/* keep this last */
HTT_T2H_NUM_MSGS
};
u32 msdu_id;
bool discard;
bool no_ack;
+ bool success;
};
struct htt_peer_map_event {
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
+ enum ath10k_fw_htt_op_version op_version;
+
+ const enum htt_t2h_msg_type *t2h_msg_types;
+ u32 t2h_msg_types_max;
struct {
/*
return 0;
}
-struct rfc1042_hdr {
- u8 llc_dsap;
- u8 llc_ssap;
- u8 llc_ctrl;
- u8 snap_oui[3];
- __be16 snap_type;
-} __packed;
-
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
__be16 len;
} __packed;
-static const u8 rx_legacy_rate_idx[] = {
- 3, /* 0x00 - 11Mbps */
- 2, /* 0x01 - 5.5Mbps */
- 1, /* 0x02 - 2Mbps */
- 0, /* 0x03 - 1Mbps */
- 3, /* 0x04 - 11Mbps */
- 2, /* 0x05 - 5.5Mbps */
- 1, /* 0x06 - 2Mbps */
- 0, /* 0x07 - 1Mbps */
- 10, /* 0x08 - 48Mbps */
- 8, /* 0x09 - 24Mbps */
- 6, /* 0x0A - 12Mbps */
- 4, /* 0x0B - 6Mbps */
- 11, /* 0x0C - 54Mbps */
- 9, /* 0x0D - 36Mbps */
- 7, /* 0x0E - 18Mbps */
- 5, /* 0x0F - 9Mbps */
-};
-
static void ath10k_htt_rx_h_rates(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
- enum ieee80211_band band;
- u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+ struct ieee80211_supported_band *sband;
+ u8 cck, rate, bw, sgi, mcs, nss;
u8 preamble = 0;
u32 info1, info2, info3;
- /* Band value can't be set as undefined but freq can be 0 - use that to
- * determine whether band is provided.
- *
- * FIXME: Perhaps this can go away if CCK rate reporting is a little
- * reworked?
- */
- if (!status->freq)
- return;
-
- band = status->band;
info1 = __le32_to_cpu(rxd->ppdu_start.info1);
info2 = __le32_to_cpu(rxd->ppdu_start.info2);
info3 = __le32_to_cpu(rxd->ppdu_start.info3);
switch (preamble) {
case HTT_RX_LEGACY:
+ /* To get legacy rate index band is required. Since band can't
+ * be undefined check if freq is non-zero.
+ */
+ if (!status->freq)
+ return;
+
cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
- rate_idx = 0;
-
- if (rate < 0x08 || rate > 0x0F)
- break;
-
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- if (cck)
- rate &= ~BIT(3);
- rate_idx = rx_legacy_rate_idx[rate];
- break;
- case IEEE80211_BAND_5GHZ:
- rate_idx = rx_legacy_rate_idx[rate];
- /* We are using same rate table registering
- HW - ath10k_rates[]. In case of 5GHz skip
- CCK rates, so -4 here */
- rate_idx -= 4;
- break;
- default:
- break;
- }
+ rate &= ~RX_PPDU_START_RATE_FLAG;
- status->rate_idx = rate_idx;
+ sband = &ar->mac.sbands[status->band];
+ status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
}
}
+static struct ieee80211_channel *
+ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
+{
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
+ u16 peer_id;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!rxd)
+ return NULL;
+
+ if (rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
+ return NULL;
+
+ if (!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
+ return NULL;
+
+ peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_PEER_IDX);
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer)
+ return NULL;
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (WARN_ON_ONCE(!arvif))
+ return NULL;
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return NULL;
+
+ return def.chan;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_id == vdev_id &&
+ ath10k_mac_vif_chan(arvif->vif, &def) == 0)
+ return def.chan;
+ }
+
+ return NULL;
+}
+
+static void
+ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def *def = data;
+
+ *def = conf->def;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_any_channel(struct ath10k *ar)
+{
+ struct cfg80211_chan_def def = {};
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_htt_rx_h_any_chan_iter,
+ &def);
+
+ return def.chan;
+}
+
static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd,
+ u32 vdev_id)
{
struct ieee80211_channel *ch;
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
+ if (!ch)
+ ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
+ if (!ch)
+ ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
+ if (!ch)
+ ch = ath10k_htt_rx_h_any_channel(ar);
spin_unlock_bh(&ar->data_lock);
if (!ch)
static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ u32 vdev_id)
{
struct sk_buff *first;
struct htt_rx_desc *rxd;
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_signal(ar, status, rxd);
- ath10k_htt_rx_h_channel(ar, status);
+ ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
ath10k_htt_rx_h_rates(ar, status, rxd);
}
break;
}
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
return;
}
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
tx_done.no_ack = true;
break;
case HTT_DATA_TX_STATUS_OK:
+ tx_done.success = true;
break;
case HTT_DATA_TX_STATUS_DISCARD:
case HTT_DATA_TX_STATUS_POSTPONE:
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
- ath10k_htt_rx_h_channel(ar, status);
+ ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
ath10k_process_rx(ar, status, msdu);
}
}
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
- ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
ath10k_htt_rx_h_deliver(ar, &amsdu, status);
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
+ enum htt_t2h_msg_type type;
/* confirm alignment */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
resp->hdr.msg_type);
- switch (resp->hdr.msg_type) {
+
+ if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
+ resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
+
+ switch (type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF: {
htt->target_version_major = resp->ver_resp.major;
htt->target_version_minor = resp->ver_resp.minor;
switch (status) {
case HTT_MGMT_TX_STATUS_OK:
+ tx_done.success = true;
break;
case HTT_MGMT_TX_STATUS_RETRY:
tx_done.no_ack = true;
break;
}
case HTT_T2H_MSG_TYPE_TEST:
- /* FIX THIS */
break;
case HTT_T2H_MSG_TYPE_STATS_CONF:
trace_ath10k_htt_stats(ar, skb->data, skb->len);
return;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
- /* FIXME: This WMI-TLV event is overlapping with 10.2
- * CHAN_CHANGE - both being 0xF. Neither is being used in
- * practice so no immediate action is necessary. Nevertheless
- * HTT may need an abstraction layer like WMI has one day.
- */
+ break;
+ case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
break;
default:
ath10k_warn(ar, "htt event (%d) not handled\n",
{
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
- ieee80211_wake_queues(htt->ar->hw);
+ ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
}
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx)
- ieee80211_stop_queues(htt->ar->hw);
+ ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
exit:
spin_unlock_bh(&htt->tx_lock);
int res;
u8 flags0 = 0;
u16 msdu_id, flags1 = 0;
- dma_addr_t paddr;
- u32 frags_paddr;
- bool use_frags;
+ dma_addr_t paddr = 0;
+ u32 frags_paddr = 0;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
- /* Since HTT 3.0 there is no separate mgmt tx command. However in case
- * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
- * fragment list host driver specifies directly frame pointer. */
- use_frags = htt->target_version_major < 3 ||
- !ieee80211_is_mgmt(hdr->frame_control);
-
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
&paddr);
if (!skb_cb->htt.txbuf) {
if (res)
goto err_free_txbuf;
- if (likely(use_frags)) {
+ switch (skb_cb->txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ /* pass through */
+ case ATH10K_HW_TXRX_ETHERNET:
frags = skb_cb->htt.txbuf->frags;
frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
frags[1].paddr = 0;
frags[1].len = 0;
- flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
- HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
frags_paddr = skb_cb->htt.txbuf_paddr;
- } else {
+ break;
+ case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
frags_paddr = skb_cb->paddr;
+ break;
}
/* Normally all commands go through HTC which manages tx credits for
prefetch_len);
skb_cb->htt.txbuf->htc_hdr.flags = 0;
- if (!ieee80211_has_protected(hdr->frame_control))
+ if (!skb_cb->is_protected)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
- flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
-
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
if (msdu->ip_summed == CHECKSUM_PARTIAL) {
/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
#define ATH10K_FW_API4_FILE "firmware-4.bin"
+/* HTT id conflict fix for management frames over HTT */
+#define ATH10K_FW_API5_FILE "firmware-5.bin"
+
#define ATH10K_FW_UTF_FILE "utf.bin"
/* includes also the null byte */
* FW API 4 and above.
*/
ATH10K_FW_IE_WMI_OP_VERSION = 5,
+
+ /* HTT "operations" interface version, 32 bit value. Supported from
+ * FW API 5 and above.
+ */
+ ATH10K_FW_IE_HTT_OP_VERSION = 6,
};
enum ath10k_fw_wmi_op_version {
ATH10K_FW_WMI_OP_VERSION_MAX,
};
+enum ath10k_fw_htt_op_version {
+ ATH10K_FW_HTT_OP_VERSION_UNSET = 0,
+
+ ATH10K_FW_HTT_OP_VERSION_MAIN = 1,
+
+ /* also used in 10.2 and 10.2.4 branches */
+ ATH10K_FW_HTT_OP_VERSION_10_1 = 2,
+
+ ATH10K_FW_HTT_OP_VERSION_TLV = 3,
+
+ /* keep last */
+ ATH10K_FW_HTT_OP_VERSION_MAX,
+};
+
enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
u8 payload[0];
} __packed;
+enum ath10k_hw_rate_ofdm {
+ ATH10K_HW_RATE_OFDM_48M = 0,
+ ATH10K_HW_RATE_OFDM_24M,
+ ATH10K_HW_RATE_OFDM_12M,
+ ATH10K_HW_RATE_OFDM_6M,
+ ATH10K_HW_RATE_OFDM_54M,
+ ATH10K_HW_RATE_OFDM_36M,
+ ATH10K_HW_RATE_OFDM_18M,
+ ATH10K_HW_RATE_OFDM_9M,
+};
+
+enum ath10k_hw_rate_cck {
+ ATH10K_HW_RATE_CCK_LP_11M = 0,
+ ATH10K_HW_RATE_CCK_LP_5_5M,
+ ATH10K_HW_RATE_CCK_LP_2M,
+ ATH10K_HW_RATE_CCK_LP_1M,
+ ATH10K_HW_RATE_CCK_SP_11M,
+ ATH10K_HW_RATE_CCK_SP_5_5M,
+ ATH10K_HW_RATE_CCK_SP_2M,
+};
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
#define TARGET_10X_NUM_WDS_ENTRIES 32
#define TARGET_10X_DMA_BURST_SIZE 0
#define TARGET_10X_MAC_AGGR_DELIM 0
-#define TARGET_10X_AST_SKID_LIMIT 16
+#define TARGET_10X_AST_SKID_LIMIT 128
#define TARGET_10X_NUM_STATIONS 128
#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
(TARGET_10X_NUM_VDEVS))
#define TARGET_10_2_DMA_BURST_SIZE 1
/* Target specific defines for WMI-TLV firmware */
-#define TARGET_TLV_NUM_VDEVS 3
+#define TARGET_TLV_NUM_VDEVS 4
#define TARGET_TLV_NUM_STATIONS 32
-#define TARGET_TLV_NUM_PEERS ((TARGET_TLV_NUM_STATIONS) + \
- (TARGET_TLV_NUM_VDEVS) + \
- 2)
+#define TARGET_TLV_NUM_PEERS 35
+#define TARGET_TLV_NUM_TDLS_VDEVS 1
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
+#define TARGET_TLV_NUM_WOW_PATTERNS 22
/* Number of Copy Engines supported */
#define CE_COUNT 8
#include "txrx.h"
#include "testmode.h"
#include "wmi.h"
+#include "wmi-tlv.h"
#include "wmi-ops.h"
+#include "wow.h"
+
+/*********/
+/* Rates */
+/*********/
+
+static struct ieee80211_rate ath10k_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
+#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
+
+#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
+ ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+static bool ath10k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+static u8 ath10k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u32
+ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
/**********/
/* Crypto */
static int ath10k_send_key(struct ath10k_vif *arvif,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd,
- const u8 *macaddr, bool def_idx)
+ const u8 *macaddr, u32 flags)
{
struct ath10k *ar = arvif->ar;
struct wmi_vdev_install_key_arg arg = {
.key_idx = key->keyidx,
.key_len = key->keylen,
.key_data = key->key,
+ .key_flags = flags,
.macaddr = macaddr,
};
lockdep_assert_held(&arvif->ar->conf_mutex);
- if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
- arg.key_flags = WMI_KEY_PAIRWISE;
- else
- arg.key_flags = WMI_KEY_GROUP;
-
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
arg.key_cipher = WMI_CIPHER_AES_CCM;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
arg.key_cipher = WMI_CIPHER_WEP;
- /* AP/IBSS mode requires self-key to be groupwise
- * Otherwise pairwise key must be set */
- if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
- arg.key_flags = WMI_KEY_PAIRWISE;
-
- if (def_idx)
- arg.key_flags |= WMI_KEY_TX_USAGE;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- /* this one needs to be done in software */
- return 1;
+ WARN_ON(1);
+ return -EINVAL;
default:
ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
return -EOPNOTSUPP;
static int ath10k_install_key(struct ath10k_vif *arvif,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd,
- const u8 *macaddr, bool def_idx)
+ const u8 *macaddr, u32 flags)
{
struct ath10k *ar = arvif->ar;
int ret;
+ unsigned long time_left;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->install_key_done);
- ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx);
+ ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
if (ret)
return ret;
- ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ);
- if (ret == 0)
+ time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
+ if (time_left == 0)
return -ETIMEDOUT;
return 0;
struct ath10k_peer *peer;
int ret;
int i;
- bool def_idx;
+ u32 flags;
lockdep_assert_held(&ar->conf_mutex);
for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
if (arvif->wep_keys[i] == NULL)
continue;
- /* set TX_USAGE flag for default key id */
- if (arvif->def_wep_key_idx == i)
- def_idx = true;
- else
- def_idx = false;
+
+ flags = 0;
+ flags |= WMI_KEY_PAIRWISE;
+
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
+ addr, flags);
+ if (ret)
+ return ret;
+
+ flags = 0;
+ flags |= WMI_KEY_GROUP;
ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
- addr, def_idx);
+ addr, flags);
if (ret)
return ret;
spin_unlock_bh(&ar->data_lock);
}
+ /* In some cases (notably with static WEP IBSS with multiple keys)
+ * multicast Tx becomes broken. Both pairwise and groupwise keys are
+ * installed already. Using WMI_KEY_TX_USAGE in different combinations
+ * didn't seem help. Using def_keyid vdev parameter seems to be
+ * effective so use that.
+ *
+ * FIXME: Revisit. Perhaps this can be done in a less hacky way.
+ */
+ if (arvif->def_wep_key_idx == -1)
+ return 0;
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ arvif->def_wep_key_idx);
+ if (ret) {
+ ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
return 0;
}
int first_errno = 0;
int ret;
int i;
+ u32 flags = 0;
lockdep_assert_held(&ar->conf_mutex);
/* key flags are not required to delete the key */
ret = ath10k_install_key(arvif, peer->keys[i],
- DISABLE_KEY, addr, false);
+ DISABLE_KEY, addr, flags);
if (ret && first_errno == 0)
first_errno = ret;
int first_errno = 0;
int ret;
int i;
+ u32 flags = 0;
lockdep_assert_held(&ar->conf_mutex);
if (i == ARRAY_SIZE(peer->keys))
break;
/* key flags are not required to delete the key */
- ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false);
+ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
if (ret && first_errno == 0)
first_errno = ret;
return first_errno;
}
+static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(peer, &ar->peers, list) {
+ if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+ continue;
+
+ if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+ continue;
+
+ if (peer->keys[key->keyidx] == key)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
+ arvif->vdev_id, key->keyidx);
+
+ ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
+ arvif->vdev_id, peer->addr, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/*********************/
/* General utilities */
/*********************/
}
}
-static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ int *num = data;
+
+ (*num)++;
+}
+
+static int ath10k_mac_num_chanctxs(struct ath10k *ar)
+{
+ int num = 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_num_chanctxs_iter,
+ &num);
+
+ return num;
+}
+
+static void
+ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+ enum wmi_peer_type peer_type)
{
int ret;
if (ar->num_peers >= ar->max_num_peers)
return -ENOBUFS;
- ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
+ ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
if (ret) {
ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
ar->num_stations = 0;
}
+static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
+ struct ieee80211_sta *sta,
+ enum wmi_tdls_peer_state state)
+{
+ int ret;
+ struct wmi_tdls_peer_update_cmd_arg arg = {};
+ struct wmi_tdls_peer_capab_arg cap = {};
+ struct wmi_channel_arg chan_arg = {};
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arg.vdev_id = vdev_id;
+ arg.peer_state = state;
+ ether_addr_copy(arg.addr, sta->addr);
+
+ cap.peer_max_sp = sta->max_sp;
+ cap.peer_uapsd_queues = sta->uapsd_queues;
+
+ if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
+ !sta->tdls_initiator)
+ cap.is_peer_responder = 1;
+
+ ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
+ arg.addr, vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/************************/
/* Interface management */
/************************/
static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
{
- int ret;
+ unsigned long time_left;
lockdep_assert_held(&ar->conf_mutex);
if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
return -ESHUTDOWN;
- ret = wait_for_completion_timeout(&ar->vdev_setup_done,
- ATH10K_VDEV_SETUP_TIMEOUT_HZ);
- if (ret == 0)
+ time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+ if (time_left == 0)
return -ETIMEDOUT;
return 0;
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
- struct cfg80211_chan_def *chandef = &ar->chandef;
+ struct cfg80211_chan_def *chandef = NULL;
struct ieee80211_channel *channel = chandef->chan;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_get_any_chandef_iter,
+ &chandef);
+ if (WARN_ON_ONCE(!chandef))
+ return -ENOENT;
+
+ channel = chandef->chan;
+
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = chandef->center_freq1;
return 0;
}
+static bool ath10k_mac_should_disable_promisc(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ if (!(ar->filter_flags & FIF_PROMISC_IN_BSS))
+ return true;
+
+ if (!ar->num_started_vdevs)
+ return false;
+
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return false;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac disabling promiscuous mode because vdev is started\n");
+ return true;
+}
+
+static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
+{
+ int num_ctx;
+
+ /* At least one chanctx is required to derive a channel to start
+ * monitor vdev on.
+ */
+ num_ctx = ath10k_mac_num_chanctxs(ar);
+ if (num_ctx == 0)
+ return false;
+
+ /* If there's already an existing special monitor interface then don't
+ * bother creating another monitor vdev.
+ */
+ if (ar->monitor_arvif)
+ return false;
+
+ return ar->monitor ||
+ !ath10k_mac_should_disable_promisc(ar) ||
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+}
+
+static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
+{
+ int num_ctx;
+
+ num_ctx = ath10k_mac_num_chanctxs(ar);
+
+ /* FIXME: Current interface combinations and cfg80211/mac80211 code
+ * shouldn't allow this but make sure to prevent handling the following
+ * case anyway since multi-channel DFS hasn't been tested at all.
+ */
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
+ return false;
+
+ return true;
+}
+
static int ath10k_monitor_recalc(struct ath10k *ar)
{
- bool should_start;
+ bool needed;
+ bool allowed;
+ int ret;
lockdep_assert_held(&ar->conf_mutex);
- should_start = ar->monitor ||
- ar->filter_flags & FIF_PROMISC_IN_BSS ||
- test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ needed = ath10k_mac_monitor_vdev_is_needed(ar);
+ allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac monitor recalc started? %d should? %d\n",
- ar->monitor_started, should_start);
+ "mac monitor recalc started? %d needed? %d allowed? %d\n",
+ ar->monitor_started, needed, allowed);
+
+ if (WARN_ON(needed && !allowed)) {
+ if (ar->monitor_started) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
+
+ ret = ath10k_monitor_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", ret);
+ /* not serious */
+ }
+
+ return -EPERM;
+ }
- if (should_start == ar->monitor_started)
+ if (needed == ar->monitor_started)
return 0;
- if (should_start)
+ if (needed)
return ath10k_monitor_start(ar);
-
- return ath10k_monitor_stop(ar);
+ else
+ return ath10k_monitor_stop(ar);
}
static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
vdev_param = ar->wmi.vdev_param->enable_rtscts;
- if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
- rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+ rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
if (arvif->num_legacy_stations > 0)
rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
WMI_RTSCTS_PROFILE);
+ else
+ rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
+ WMI_RTSCTS_PROFILE);
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
rts_cts);
return 0;
}
+static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ bool *ret = data;
+
+ if (!*ret && conf->radar_enabled)
+ *ret = true;
+}
+
+static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
+{
+ bool has_radar = false;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_has_radar_iter,
+ &has_radar);
+
+ return has_radar;
+}
+
static void ath10k_recalc_radar_detection(struct ath10k *ar)
{
int ret;
ath10k_stop_cac(ar);
- if (!ar->radar_enabled)
+ if (!ath10k_mac_has_radar_enabled(ar))
return;
if (ar->num_started_vdevs > 0)
}
}
-static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ if (ar->num_started_vdevs != 0) {
+ ar->num_started_vdevs--;
+ ath10k_recalc_radar_detection(ar);
+ }
+
+ return ret;
+}
+
+static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *chandef,
+ bool restart)
{
struct ath10k *ar = arvif->ar;
- struct cfg80211_chan_def *chandef = &ar->chandef;
struct wmi_vdev_start_request_arg arg = {};
- int ret = 0;
+ int ret = 0, ret2;
lockdep_assert_held(&ar->conf_mutex);
ar->num_started_vdevs++;
ath10k_recalc_radar_detection(ar);
+ ret = ath10k_monitor_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "mac failed to recalc monitor for vdev %i restart %d: %d\n",
+ arg.vdev_id, restart, ret);
+ ret2 = ath10k_vdev_stop(arvif);
+ if (ret2)
+ ath10k_warn(ar, "mac failed to stop vdev %i restart %d: %d\n",
+ arg.vdev_id, restart, ret2);
+ }
+
return ret;
}
-static int ath10k_vdev_start(struct ath10k_vif *arvif)
+static int ath10k_vdev_start(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *def)
{
- return ath10k_vdev_start_restart(arvif, false);
+ return ath10k_vdev_start_restart(arvif, def, false);
}
-static int ath10k_vdev_restart(struct ath10k_vif *arvif)
+static int ath10k_vdev_restart(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *def)
{
- return ath10k_vdev_start_restart(arvif, true);
+ return ath10k_vdev_start_restart(arvif, def, true);
}
-static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
+ struct sk_buff *bcn)
{
struct ath10k *ar = arvif->ar;
+ struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
- reinit_completion(&ar->vdev_setup_done);
-
- ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
- if (ret) {
- ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- ret = ath10k_vdev_setup_sync(ar);
- if (ret) {
- ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- WARN_ON(ar->num_started_vdevs == 0);
-
- if (ar->num_started_vdevs != 0) {
- ar->num_started_vdevs--;
- ath10k_recalc_radar_detection(ar);
- }
-
- return ret;
-}
-
-static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
- struct sk_buff *bcn)
-{
- struct ath10k *ar = arvif->ar;
- struct ieee80211_mgmt *mgmt;
- const u8 *p2p_ie;
- int ret;
-
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
- return 0;
-
- if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
- return 0;
+ if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ return 0;
mgmt = (void *)bcn->data;
p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
return 0;
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return 0;
+
bcn = ieee80211_beacon_get_template(hw, vif, &offs);
if (!bcn) {
ath10k_warn(ar, "failed to get beacon template from mac80211\n");
if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
return 0;
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
prb = ieee80211_proberesp_get(hw, vif);
if (!prb) {
ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
return 0;
}
+static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct cfg80211_chan_def def;
+ int ret;
+
+ /* When originally vdev is started during assign_vif_chanctx() some
+ * information is missing, notably SSID. Firmware revisions with beacon
+ * offloading require the SSID to be provided during vdev (re)start to
+ * handle hidden SSID properly.
+ *
+ * Vdev restart must be done after vdev has been both started and
+ * upped. Otherwise some firmware revisions (at least 10.2) fail to
+ * deliver vdev restart response event causing timeouts during vdev
+ * syncing in ath10k.
+ *
+ * Note: The vdev down/up and template reinstallation could be skipped
+ * since only wmi-tlv firmware are known to have beacon offload and
+ * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
+ * response delivery. It's probably more robust to keep it as is.
+ */
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ if (WARN_ON(!arvif->is_started))
+ return -EINVAL;
+
+ if (WARN_ON(!arvif->is_up))
+ return -EINVAL;
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return -EINVAL;
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
+ * firmware will crash upon vdev up.
+ */
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update presp template: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_restart(arvif, &def);
+ if (ret) {
+ ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void ath10k_control_beaconing(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info)
{
lockdep_assert_held(&arvif->ar->conf_mutex);
if (!info->enable_beacon) {
- ath10k_vdev_stop(arvif);
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
- arvif->is_started = false;
arvif->is_up = false;
spin_lock_bh(&arvif->ar->data_lock);
arvif->tx_seq_no = 0x1000;
- ret = ath10k_vdev_start(arvif);
- if (ret)
- return;
-
arvif->aid = 0;
ether_addr_copy(arvif->bssid, info->bssid);
if (ret) {
ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
- ath10k_vdev_stop(arvif);
return;
}
- arvif->is_started = true;
arvif->is_up = true;
+ ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
lockdep_assert_held(&arvif->ar->conf_mutex);
if (!info->ibss_joined) {
- ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
- if (ret)
- ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n",
- self_peer, arvif->vdev_id, ret);
-
if (is_zero_ether_addr(arvif->bssid))
return;
return;
}
- ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
- if (ret) {
- ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n",
- self_peer, arvif->vdev_id, ret);
- return;
- }
-
vdev_param = arvif->ar->wmi.vdev_param->atim_window;
ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
ATH10K_DEFAULT_ATIM);
return 0;
}
+static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
+ return;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return;
+
+ if (!vif->csa_active)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ if (!ieee80211_csa_is_complete(vif)) {
+ ieee80211_csa_update_counter(vif);
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+ ret);
+ } else {
+ ieee80211_csa_finish(vif);
+ }
+}
+
+static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ ap_csa_work);
+ struct ath10k *ar = arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_mac_vif_ap_csa_count_down(arvif);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = data;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+ return;
+
+ cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_handle_beacon_iter,
+ skb);
+}
+
+static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u32 *vdev_id = data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+
+ if (arvif->vdev_id != *vdev_id)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_beacon_loss(vif);
+
+ /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+ * (done by mac80211) succeeds but beacons do not resume then it
+ * doesn't make sense to continue operation. Queue connection loss work
+ * which can be cancelled when beacon is received.
+ */
+ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+ ATH10K_CONNECTION_LOSS_HZ);
+}
+
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_handle_beacon_miss_iter,
+ &vdev_id);
+}
+
+static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ connection_loss_work.work);
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
/**********************/
/* Station management */
/**********************/
struct wmi_peer_assoc_complete_arg *arg)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ u32 aid;
lockdep_assert_held(&ar->conf_mutex);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->bss_conf.aid;
+ else
+ aid = sta->aid;
+
ether_addr_copy(arg->addr, sta->addr);
arg->vdev_id = arvif->vdev_id;
- arg->peer_aid = sta->aid;
+ arg->peer_aid = aid;
arg->peer_flags |= WMI_PEER_AUTH;
arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
arg->peer_num_spatial_streams = 1;
struct wmi_peer_assoc_complete_arg *arg)
{
struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
lockdep_assert_held(&ar->conf_mutex);
- bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
- info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY,
- IEEE80211_PRIVACY_ANY);
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (bss) {
const struct cfg80211_bss_ies *ies;
}
static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
+ enum ieee80211_band band;
u32 ratemask;
+ u8 rate;
int i;
lockdep_assert_held(&ar->conf_mutex);
- sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
- ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
rateset->num_rates = 0;
if (!(ratemask & 1))
continue;
- rateset->rates[rateset->num_rates] = rates->hw_value;
+ rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
rateset->num_rates++;
}
}
+static bool
+ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int i, n;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ int i, n, max_nss;
u32 stbc;
lockdep_assert_held(&ar->conf_mutex);
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
if (!ht_cap->ht_supported)
return;
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
+ ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
arg->peer_flags |= WMI_PEER_HT;
arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ht_cap->ampdu_factor)) - 1;
arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
}
- if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
- arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
- if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
- arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+ }
if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
else if (ht_cap->mcs.rx_mask[1])
arg->peer_rate_caps |= WMI_RC_DS_FLAG;
- for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++)
- if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
arg->peer_ht_rates.rates[n++] = i;
+ }
/*
* This is a workaround for HT-enabled STAs which break the spec
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_num_spatial_streams = sta->rx_nss;
+ arg->peer_num_spatial_streams = max_nss;
}
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
return 0;
}
+static u16
+ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0: /* fall through */
+ case 1: /* fall through */
+ case 2: /* fall through */
+ case 3: /* fall through */
+ case 4: /* fall through */
+ case 5: /* fall through */
+ case 6: /* fall through */
+ default:
+ /* see ath10k_mac_can_set_bitrate_mask() */
+ WARN_ON(1);
+ /* fall through */
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u16 *vht_mcs_mask;
u8 ampdu_factor;
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
if (!vht_cap->vht_supported)
return;
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
arg->peer_flags |= WMI_PEER_VHT;
- if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+ if (def.chan->band == IEEE80211_BAND_2GHZ)
arg->peer_flags |= WMI_PEER_VHT_2G;
arg->peer_vht_caps = vht_cap->cap;
__le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
arg->peer_vht_rates.tx_max_rate =
__le16_to_cpu(vht_cap->vht_mcs.tx_highest);
- arg->peer_vht_rates.tx_mcs_set =
- __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+ arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
sta->addr, arg->peer_max_mpdu, arg->peer_flags);
sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
}
-static bool ath10k_mac_sta_has_11g_rates(struct ieee80211_sta *sta)
+static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
- /* First 4 rates in ath10k_rates are CCK (11b) rates. */
- return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 4;
+ return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
+ ATH10K_MAC_FIRST_OFDM_RATE_IDX;
}
static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
- switch (ar->hw->conf.chandef.chan->band) {
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ switch (band) {
case IEEE80211_BAND_2GHZ:
- if (sta->vht_cap.vht_supported) {
+ if (sta->vht_cap.vht_supported &&
+ !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->ht_cap.ht_supported &&
+ !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
- } else if (ath10k_mac_sta_has_11g_rates(sta)) {
+ } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
phymode = MODE_11G;
} else {
phymode = MODE_11B;
/*
* Check VHT first.
*/
- if (sta->vht_cap.vht_supported) {
+ if (sta->vht_cap.vht_supported &&
+ !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AC_VHT80;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
phymode = MODE_11AC_VHT20;
- } else if (sta->ht_cap.ht_supported) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ } else if (sta->ht_cap.ht_supported &&
+ !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
ath10k_peer_assoc_h_crypto(ar, vif, arg);
- ath10k_peer_assoc_h_rates(ar, sta, arg);
- ath10k_peer_assoc_h_ht(ar, sta, arg);
- ath10k_peer_assoc_h_vht(ar, sta, arg);
+ ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
}
arvif->is_up = false;
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
}
static int ath10k_station_assoc(struct ath10k *ar,
return ret;
}
- peer_arg.peer_reassoc = reassoc;
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
/* TX handlers */
/***************/
-static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
{
- if (ieee80211_is_mgmt(hdr->frame_control))
- return HTT_DATA_TX_EXT_TID_MGMT;
+ lockdep_assert_held(&ar->htt.tx_lock);
- if (!ieee80211_is_data_qos(hdr->frame_control))
- return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+ WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+ ar->tx_paused |= BIT(reason);
+ ieee80211_stop_queues(ar->hw);
+}
- if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
- return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
- return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (arvif->tx_paused)
+ return;
+
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
+{
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+ ar->tx_paused &= ~BIT(reason);
+
+ if (ar->tx_paused)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath10k_mac_tx_unlock_iter,
+ ar);
+}
+
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= BITS_PER_LONG);
+ arvif->tx_paused |= BIT(reason);
+ ieee80211_stop_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= BITS_PER_LONG);
+ arvif->tx_paused &= ~BIT(reason);
+
+ if (ar->tx_paused)
+ return;
+
+ if (arvif->tx_paused)
+ return;
+
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ switch (pause_id) {
+ case WMI_TLV_TX_PAUSE_ID_MCC:
+ case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+ case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PS:
+ case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+ switch (action) {
+ case WMI_TLV_TX_PAUSE_ACTION_STOP:
+ ath10k_mac_vif_tx_lock(arvif, pause_id);
+ break;
+ case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+ ath10k_mac_vif_tx_unlock(arvif, pause_id);
+ break;
+ default:
+ ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+ action, arvif->vdev_id);
+ break;
+ }
+ break;
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+ case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+ case WMI_TLV_TX_PAUSE_ID_HOST:
+ default:
+ /* FIXME: Some pause_ids aren't vdev specific. Instead they
+ * target peer_id and tid. Implementing these could improve
+ * traffic scheduling fairness across multiple connected
+ * stations in AP/IBSS modes.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac ignoring unsupported tx pause vdev %i id %d\n",
+ arvif->vdev_id, pause_id);
+ break;
+ }
+}
+
+struct ath10k_mac_tx_pause {
+ u32 vdev_id;
+ enum wmi_tlv_tx_pause_id pause_id;
+ enum wmi_tlv_tx_pause_action action;
+};
+
+static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_mac_tx_pause *arg = data;
+
+ ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
+}
+
+void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
+{
+ struct ath10k_mac_tx_pause arg = {
+ .vdev_id = vdev_id,
+ .pause_id = pause_id,
+ .action = action,
+ };
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath10k_mac_handle_tx_pause_iter,
+ &arg);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_MGMT;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+ if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+ return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
}
static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
return 0;
}
+static enum ath10k_hw_txrx_mode
+ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct sk_buff *skb)
+{
+ const struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+
+ if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
+ return ATH10K_HW_TXRX_RAW;
+
+ if (ieee80211_is_mgmt(fc))
+ return ATH10K_HW_TXRX_MGMT;
+
+ /* Workaround:
+ *
+ * NullFunc frames are mostly used to ping if a client or AP are still
+ * reachable and responsive. This implies tx status reports must be
+ * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
+ * come to a conclusion that the other end disappeared and tear down
+ * BSS connection or it can never disconnect from BSS/client (which is
+ * the case).
+ *
+ * Firmware with HTT older than 3.0 delivers incorrect tx status for
+ * NullFunc frames to driver. However there's a HTT Mgmt Tx command
+ * which seems to deliver correct tx reports for NullFunc frames. The
+ * downside of using it is it ignores client powersave state so it can
+ * end up disconnecting sleeping clients in AP mode. It should fix STA
+ * mode though because AP don't sleep.
+ */
+ if (ar->htt.target_version_major < 3 &&
+ (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+ !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+ return ATH10K_HW_TXRX_MGMT;
+
+ /* Workaround:
+ *
+ * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
+ * NativeWifi txmode - it selects AP key instead of peer key. It seems
+ * to work with Ethernet txmode so use it.
+ */
+ if (ieee80211_is_data_present(fc) && sta && sta->tdls)
+ return ATH10K_HW_TXRX_ETHERNET;
+
+ return ATH10K_HW_TXRX_NATIVE_WIFI;
+}
+
/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
* Control in the header.
*/
skb->data, (void *)qos_ctl - (void *)skb->data);
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
- /* Fw/Hw generates a corrupted QoS Control Field for QoS NullFunc
- * frames. Powersave is handled by the fw/hw so QoS NyllFunc frames are
- * used only for CQM purposes (e.g. hostapd station keepalive ping) so
- * it is safe to downgrade to NullFunc.
+ /* Some firmware revisions don't handle sending QoS NullFunc well.
+ * These frames are mainly used for CQM purposes so it doesn't really
+ * matter whether QoS NullFunc or NullFunc are sent.
*/
hdr = (void *)skb->data;
- if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
- hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
- }
+
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static void ath10k_tx_h_8023(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct rfc1042_hdr *rfc1042;
+ struct ethhdr *eth;
+ size_t hdrlen;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ __be16 type;
+
+ hdr = (void *)skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ rfc1042 = (void *)skb->data + hdrlen;
+
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ type = rfc1042->snap_type;
+
+ skb_pull(skb, hdrlen + sizeof(*rfc1042));
+ skb_push(skb, sizeof(*eth));
+
+ eth = (void *)skb->data;
+ ether_addr_copy(eth->h_dest, da);
+ ether_addr_copy(eth->h_source, sa);
+ eth->h_proto = type;
}
static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
ar->htt.target_version_minor >= 4);
}
-static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
int ret = 0;
- if (ar->htt.target_version_major >= 3) {
- /* Since HTT 3.0 there is no separate mgmt tx command */
- ret = ath10k_htt_tx(&ar->htt, skb);
- goto exit;
+ spin_lock_bh(&ar->data_lock);
+
+ if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
+ ath10k_warn(ar, "wmi mgmt tx queue is full\n");
+ ret = -ENOSPC;
+ goto unlock;
}
- if (ieee80211_is_mgmt(hdr->frame_control)) {
- if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->fw_features)) {
- if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
- ATH10K_MAX_NUM_MGMT_PENDING) {
- ath10k_warn(ar, "reached WMI management transmit queue limit\n");
- ret = -EBUSY;
- goto exit;
- }
+ __skb_queue_tail(q, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
- skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
- ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
- } else {
- ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- }
- } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->fw_features) &&
- ieee80211_is_nullfunc(hdr->frame_control)) {
- /* FW does not report tx status properly for NullFunc frames
- * unless they are sent through mgmt tx path. mac80211 sends
- * those frames when it detects link/beacon loss and depends
- * on the tx status to be correct. */
- ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- } else {
- ret = ath10k_htt_tx(&ar->htt, skb);
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+ struct ath10k_htt *htt = &ar->htt;
+ int ret = 0;
+
+ switch (cb->txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ case ATH10K_HW_TXRX_ETHERNET:
+ ret = ath10k_htt_tx(htt, skb);
+ break;
+ case ATH10K_HW_TXRX_MGMT:
+ if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->fw_features))
+ ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+ else if (ar->htt.target_version_major >= 3)
+ ret = ath10k_htt_tx(htt, skb);
+ else
+ ret = ath10k_htt_mgmt_tx(htt, skb);
+ break;
}
-exit:
if (ret) {
ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
ret);
const u8 *peer_addr;
int vdev_id;
int ret;
+ unsigned long time_left;
/* FW requirement: We must create a peer before FW will send out
* an offchannel frame. Otherwise the frame will be stuck and
peer_addr, vdev_id);
if (!peer) {
- ret = ath10k_peer_create(ar, vdev_id, peer_addr);
+ ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+ WMI_PEER_TYPE_DEFAULT);
if (ret)
ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
ar->offchan_tx_skb = skb;
spin_unlock_bh(&ar->data_lock);
- ath10k_tx_htt(ar, skb);
+ ath10k_mac_tx(ar, skb);
- ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
- 3 * HZ);
- if (ret == 0)
+ time_left =
+ wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
+ if (time_left == 0)
ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
skb);
struct ath10k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 fc = hdr->frame_control;
/* We should disable CCK RATE due to P2P */
if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
+ ATH10K_SKB_CB(skb)->htt.freq = 0;
ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
+ ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
+ ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
- /* it makes no sense to process injected frames like that */
- if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
+ switch (ATH10K_SKB_CB(skb)->txmode) {
+ case ATH10K_HW_TXRX_MGMT:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
ath10k_tx_h_nwifi(hw, skb);
ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
ath10k_tx_h_seq_no(vif, skb);
+ break;
+ case ATH10K_HW_TXRX_ETHERNET:
+ ath10k_tx_h_8023(skb);
+ break;
+ case ATH10K_HW_TXRX_RAW:
+ /* FIXME: Packet injection isn't implemented. It should be
+ * doable with firmware 10.2 on qca988x.
+ */
+ WARN_ON_ONCE(1);
+ ieee80211_free_txskb(hw, skb);
+ return;
}
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
}
}
- ath10k_tx_htt(ar, skb);
+ ath10k_mac_tx(ar, skb);
}
/* Must not be called with conf_mutex held as workers can use that also. */
clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
ar->filter_flags = 0;
ar->monitor = false;
+ ar->monitor_arvif = NULL;
if (ar->monitor_started)
ath10k_monitor_stop(ar);
ar->monitor_started = false;
+ ar->tx_paused = 0;
ath10k_scan_finish(ar);
ath10k_peer_cleanup_all(ar);
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
+ u32 burst_enable;
int ret = 0;
/*
goto err_core_stop;
}
+ if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+ ret = ath10k_wmi_adaptive_qcs(ar, true);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+
+ if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
+ burst_enable = ar->wmi.pdev_param->burst_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable burst: %d\n", ret);
+ goto err_core_stop;
+ }
+ }
+
if (ar->cfg_tx_chainmask)
__ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
ar->cfg_rx_chainmask);
goto err_core_stop;
}
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->ani_enable, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable ani by default: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+
+ ar->ani_enabled = true;
+
ar->num_started_vdevs = 0;
ath10k_regd_update(ar);
ath10k_spectral_start(ar);
+ ath10k_thermal_set_throttling(ar);
mutex_unlock(&ar->conf_mutex);
return 0;
return ret;
}
-static const char *chandef_get_width(enum nl80211_chan_width width)
-{
- switch (width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- return "20 (noht)";
- case NL80211_CHAN_WIDTH_20:
- return "20";
- case NL80211_CHAN_WIDTH_40:
- return "40";
- case NL80211_CHAN_WIDTH_80:
- return "80";
- case NL80211_CHAN_WIDTH_80P80:
- return "80+80";
- case NL80211_CHAN_WIDTH_160:
- return "160";
- case NL80211_CHAN_WIDTH_5:
- return "5";
- case NL80211_CHAN_WIDTH_10:
- return "10";
- }
- return "?";
-}
-
-static void ath10k_config_chan(struct ath10k *ar)
+static void ath10k_mac_chan_reconfigure(struct ath10k *ar)
{
struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
int ret;
lockdep_assert_held(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
- ar->chandef.chan->center_freq,
- ar->chandef.center_freq1,
- ar->chandef.center_freq2,
- chandef_get_width(ar->chandef.width));
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac chan reconfigure\n");
/* First stop monitor interface. Some FW versions crash if there's a
* lone monitor interface. */
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
continue;
- ret = ath10k_vdev_restart(arvif);
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+ ret);
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ continue;
+
+ ret = ath10k_vdev_restart(arvif, &def);
if (ret) {
ath10k_warn(ar, "failed to restart vdev %d: %d\n",
arvif->vdev_id, ret);
mutex_lock(&ar->conf_mutex);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac config channel %dMHz flags 0x%x radar %d\n",
- conf->chandef.chan->center_freq,
- conf->chandef.chan->flags,
- conf->radar_enabled);
-
- spin_lock_bh(&ar->data_lock);
- ar->rx_channel = conf->chandef.chan;
- spin_unlock_bh(&ar->data_lock);
-
- ar->radar_enabled = conf->radar_enabled;
- ath10k_recalc_radar_detection(ar);
-
- if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
- ar->chandef = conf->chandef;
- ath10k_config_chan(ar);
- }
- }
-
if (changed & IEEE80211_CONF_CHANGE_PS)
ath10k_config_ps(ar);
int ret = 0;
u32 value;
int bit;
+ int i;
u32 vdev_param;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
arvif->vif = vif;
INIT_LIST_HEAD(&arvif->list);
+ INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath10k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
if (ar->free_vdev_map == 0) {
ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
break;
}
+ /* Using vdev_id as queue number will make it very easy to do per-vif
+ * tx queue locking. This shouldn't wrap due to interface combinations
+ * but do a modulo for correctness sake and prevent using offchannel tx
+ * queues for regular vif tx.
+ */
+ vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+
/* Some firmware revisions don't wait for beacon tx completion before
* sending another SWBA event. This could lead to hardware using old
* (freed) beacon data in some cases, e.g. tx credit starvation
}
}
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
+ WMI_PEER_TYPE_DEFAULT);
if (ret) {
- ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n",
+ ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
+ }
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_mac_set_kickout(arvif);
if (ret) {
ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
goto err_peer_delete;
}
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ar->monitor_arvif = arvif;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ goto err_peer_delete;
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
err_peer_delete:
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
err_vdev_delete:
return ret;
}
+static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
+{
+ int i;
+
+ for (i = 0; i < BITS_PER_LONG; i++)
+ ath10k_mac_vif_tx_unlock(arvif, i);
+}
+
static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret;
+ cancel_work_sync(&arvif->ap_csa_work);
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
ar->free_vdev_map |= 1LL << arvif->vdev_id;
list_del(&arvif->list);
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
vif->addr);
if (ret)
- ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
+ ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data);
/* Some firmware revisions don't notify host about self-peer removal
* until after associated vdev is deleted.
*/
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
vif->addr);
if (ret)
ath10k_peer_cleanup(ar, arvif->vdev_id);
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ar->monitor_arvif = NULL;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_mac_vif_tx_unlock_all(arvif);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
mutex_unlock(&ar->conf_mutex);
}
if (ret)
ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
+
+ vdev_param = ar->wmi.vdev_param->protection_mode;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ info->use_cts_prot ? 1 : 0);
+ if (ret)
+ ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
+ info->use_cts_prot, arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
* frames with multi-vif APs. This is not required for main firmware
* branch (e.g. 636).
*
- * FIXME: This has been tested only in AP. It remains unknown if this
- * is required for multi-vif STA interfaces on 10.1 */
+ * This is also needed for 636 fw for IBSS-RSN to work more reliably.
+ *
+ * FIXME: It remains unknown if this is required for multi-vif STA
+ * interfaces on 10.1.
+ */
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
return;
if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
const u8 *peer_addr;
bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104;
- bool def_idx = false;
int ret = 0;
+ int ret2;
+ u32 flags = 0;
+ u32 flags2;
+
+ /* this one needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ return 1;
if (key->keyidx > WMI_MAX_KEY_INDEX)
return -ENOSPC;
key->hw_key_idx = key->keyidx;
+ if (is_wep) {
+ if (cmd == SET_KEY)
+ arvif->wep_keys[key->keyidx] = key;
+ else
+ arvif->wep_keys[key->keyidx] = NULL;
+ }
+
/* the peer should not disappear in mid-way (unless FW goes awry) since
* we already hold conf_mutex. we just make sure its there now. */
spin_lock_bh(&ar->data_lock);
}
}
- if (is_wep) {
- if (cmd == SET_KEY)
- arvif->wep_keys[key->keyidx] = key;
- else
- arvif->wep_keys[key->keyidx] = NULL;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags |= WMI_KEY_PAIRWISE;
+ else
+ flags |= WMI_KEY_GROUP;
+ if (is_wep) {
if (cmd == DISABLE_KEY)
ath10k_clear_vdev_key(arvif, key);
- }
- /* set TX_USAGE flag for all the keys incase of dot1x-WEP. For
- * static WEP, do not set this flag for the keys whose key id
- * is greater than default key id.
- */
- if (arvif->def_wep_key_idx == -1)
- def_idx = true;
+ /* When WEP keys are uploaded it's possible that there are
+ * stations associated already (e.g. when merging) without any
+ * keys. Static WEP needs an explicit per-peer key upload.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ cmd == SET_KEY)
+ ath10k_mac_vif_update_wep_key(arvif, key);
+
+ /* 802.1x never sets the def_wep_key_idx so each set_key()
+ * call changes default tx key.
+ *
+ * Static WEP sets def_wep_key_idx via .set_default_unicast_key
+ * after first set_key().
+ */
+ if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
+ flags |= WMI_KEY_TX_USAGE;
+ }
- ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx);
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
goto exit;
}
+ /* mac80211 sets static WEP keys as groupwise while firmware requires
+ * them to be installed twice as both pairwise and groupwise.
+ */
+ if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
+ flags2 = flags;
+ flags2 &= ~WMI_KEY_GROUP;
+ flags2 |= WMI_KEY_PAIRWISE;
+
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
+ if (ret) {
+ ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret);
+ ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
+ peer_addr, flags);
+ if (ret2)
+ ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret2);
+ goto exit;
+ }
+ }
+
ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
spin_lock_bh(&ar->data_lock);
}
arvif->def_wep_key_idx = keyidx;
+
unlock:
mutex_unlock(&arvif->ar->conf_mutex);
}
struct ath10k_vif *arvif;
struct ath10k_sta *arsta;
struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
u32 changed, bw, nss, smps;
int err;
arvif = arsta->arvif;
ar = arvif->ar;
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
spin_lock_bh(&ar->data_lock);
changed = arsta->changed;
mutex_lock(&ar->conf_mutex);
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+ ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
if (changed & IEEE80211_RC_BW_CHANGED) {
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
sta->addr, bw);
mutex_unlock(&ar->conf_mutex);
}
-static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
+static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->conf_mutex);
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return 0;
if (ar->num_stations >= ar->max_num_stations)
return 0;
}
-static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif)
+static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->conf_mutex);
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return;
ar->num_stations--;
}
-static int ath10k_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
+struct ath10k_mac_tdls_iter_data {
+ u32 num_tdls_stations;
+ struct ieee80211_vif *curr_vif;
+};
+
+static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
+ struct ieee80211_sta *sta)
{
- struct ath10k *ar = hw->priv;
- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_mac_tdls_iter_data *iter_data = data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
- int ret = 0;
+ struct ieee80211_vif *sta_vif = arsta->arvif->vif;
- if (old_state == IEEE80211_STA_NOTEXIST &&
- new_state == IEEE80211_STA_NONE) {
- memset(arsta, 0, sizeof(*arsta));
- arsta->arvif = arvif;
- INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
- }
+ if (sta->tdls && sta_vif == iter_data->curr_vif)
+ iter_data->num_tdls_stations++;
+}
+
+static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_mac_tdls_iter_data data = {};
+
+ data.curr_vif = vif;
+
+ ieee80211_iterate_stations_atomic(hw,
+ ath10k_mac_tdls_vif_stations_count_iter,
+ &data);
+ return data.num_tdls_stations;
+}
+
+static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int *num_tdls_vifs = data;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
+ (*num_tdls_vifs)++;
+}
+
+static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
+{
+ int num_tdls_vifs = 0;
+
+ ieee80211_iterate_active_interfaces_atomic(hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_tdls_vifs_count_iter,
+ &num_tdls_vifs);
+ return num_tdls_vifs;
+}
+
+static int ath10k_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ int ret = 0;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+ }
/* cancel must be done outside the mutex to avoid deadlock */
if ((old_state == IEEE80211_STA_NONE &&
/*
* New station addition.
*/
+ enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
+ u32 num_tdls_stations;
+ u32 num_tdls_vifs;
+
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
arvif->vdev_id, sta->addr,
ar->num_stations + 1, ar->max_num_stations,
ar->num_peers + 1, ar->max_num_peers);
- ret = ath10k_mac_inc_num_stations(arvif);
+ ret = ath10k_mac_inc_num_stations(arvif, sta);
if (ret) {
ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
ar->max_num_stations);
goto exit;
}
- ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
+ if (sta->tdls)
+ peer_type = WMI_PEER_TYPE_TDLS;
+
+ ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
+ peer_type);
if (ret) {
ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
- ath10k_mac_dec_num_stations(arvif);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ goto exit;
+ }
+
+ if (!sta->tdls)
+ goto exit;
+
+ num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
+ num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
+
+ if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
+ num_tdls_stations == 0) {
+ ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
+ arvif->vdev_id, ar->max_num_tdls_vdevs);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ ret = -ENOBUFS;
goto exit;
}
- if (vif->type == NL80211_IFTYPE_STATION) {
- WARN_ON(arvif->is_started);
+ if (num_tdls_stations == 0) {
+ /* This is the first tdls peer in current vif */
+ enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
- ret = ath10k_vdev_start(arvif);
+ ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ state);
if (ret) {
- ath10k_warn(ar, "failed to start vdev %i: %d\n",
+ ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
arvif->vdev_id, ret);
- WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id,
- sta->addr));
- ath10k_mac_dec_num_stations(arvif);
+ ath10k_peer_delete(ar, arvif->vdev_id,
+ sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
goto exit;
}
+ }
+
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+ WMI_TDLS_PEER_STATE_PEERING);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
- arvif->is_started = true;
+ if (num_tdls_stations != 0)
+ goto exit;
+ ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_DISABLE);
}
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
"mac vdev %d peer delete %pM (sta gone)\n",
arvif->vdev_id, sta->addr);
- if (vif->type == NL80211_IFTYPE_STATION) {
- WARN_ON(!arvif->is_started);
-
- ret = ath10k_vdev_stop(arvif);
- if (ret)
- ath10k_warn(ar, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
-
- arvif->is_started = false;
- }
-
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
sta->addr, arvif->vdev_id, ret);
- ath10k_mac_dec_num_stations(arvif);
+ ath10k_mac_dec_num_stations(arvif, sta);
+
+ if (!sta->tdls)
+ goto exit;
+
+ if (ath10k_mac_tdls_vif_stations_count(hw, vif))
+ goto exit;
+
+ /* This was the last tdls peer in current vif */
+ ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_DISABLE);
+ if (ret) {
+ ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+ arvif->vdev_id, ret);
+ }
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
+ new_state == IEEE80211_STA_AUTHORIZED &&
+ sta->tdls) {
+ /*
+ * Tdls station authorized.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
+ sta->addr);
+
+ ret = ath10k_station_assoc(ar, vif, sta, false);
+ if (ret) {
+ ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto exit;
+ }
+
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+ WMI_TDLS_PEER_STATE_CONNECTED);
+ if (ret)
+ ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
/*
* Disassociation.
*/
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct wmi_start_scan_arg arg;
int ret = 0;
+ u32 scan_time_msec;
mutex_lock(&ar->conf_mutex);
if (ret)
goto exit;
- duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC);
+ scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
memset(&arg, 0, sizeof(arg));
ath10k_wmi_start_scan_init(ar, &arg);
arg.scan_id = ATH10K_SCAN_ID;
arg.n_channels = 1;
arg.channels[0] = chan->center_freq;
- arg.dwell_time_active = duration;
- arg.dwell_time_passive = duration;
- arg.max_scan_time = 2 * duration;
+ arg.dwell_time_active = scan_time_msec;
+ arg.dwell_time_passive = scan_time_msec;
+ arg.max_scan_time = scan_time_msec;
arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg.burst_duration_ms = duration;
ret = ath10k_start_scan(ar, &arg);
if (ret) {
goto exit;
}
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
ret = 0;
exit:
mutex_unlock(&ar->conf_mutex);
return 1;
}
-#ifdef CONFIG_PM
-static int ath10k_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan)
-{
- struct ath10k *ar = hw->priv;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);
- if (ret) {
- if (ret == -ETIMEDOUT)
- goto resume;
- ret = 1;
- goto exit;
- }
-
- ret = ath10k_hif_suspend(ar);
- if (ret) {
- ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
- goto resume;
- }
-
- ret = 0;
- goto exit;
-resume:
- ret = ath10k_wmi_pdev_resume_target(ar);
- if (ret)
- ath10k_warn(ar, "failed to resume target: %d\n", ret);
-
- ret = 1;
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static int ath10k_resume(struct ieee80211_hw *hw)
-{
- struct ath10k *ar = hw->priv;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- ret = ath10k_hif_resume(ar);
- if (ret) {
- ath10k_warn(ar, "failed to resume hif: %d\n", ret);
- ret = 1;
- goto exit;
- }
-
- ret = ath10k_wmi_pdev_resume_target(ar);
- if (ret) {
- ath10k_warn(ar, "failed to resume target: %d\n", ret);
- ret = 1;
- goto exit;
- }
-
- ret = 0;
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-#endif
-
static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
return ret;
}
-/* Helper table for legacy fixed_rate/bitrate_mask */
-static const u8 cck_ofdm_rate[] = {
- /* CCK */
- 3, /* 1Mbps */
- 2, /* 2Mbps */
- 1, /* 5.5Mbps */
- 0, /* 11Mbps */
- /* OFDM */
- 3, /* 6Mbps */
- 7, /* 9Mbps */
- 2, /* 12Mbps */
- 6, /* 18Mbps */
- 1, /* 24Mbps */
- 5, /* 36Mbps */
- 0, /* 48Mbps */
- 4, /* 54Mbps */
-};
-
-/* Check if only one bit set */
-static int ath10k_check_single_mask(u32 mask)
-{
- int bit;
-
- bit = ffs(mask);
- if (!bit)
- return 0;
-
- mask &= ~BIT(bit - 1);
- if (mask)
- return 2;
-
- return 1;
-}
-
static bool
-ath10k_default_bitrate_mask(struct ath10k *ar,
- enum ieee80211_band band,
- const struct cfg80211_bitrate_mask *mask)
+ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
{
- u32 legacy = 0x00ff;
- u8 ht = 0xff, i;
- u16 vht = 0x3ff;
- u16 nrf = ar->num_rf_chains;
-
- if (ar->cfg_tx_chainmask)
- nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask);
-
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- legacy = 0x00fff;
- vht = 0;
- break;
- case IEEE80211_BAND_5GHZ:
- break;
- default:
- return false;
- }
+ int num_rates = 0;
+ int i;
- if (mask->control[band].legacy != legacy)
- return false;
+ num_rates += hweight32(mask->control[band].legacy);
- for (i = 0; i < nrf; i++)
- if (mask->control[band].ht_mcs[i] != ht)
- return false;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight8(mask->control[band].ht_mcs[i]);
- for (i = 0; i < nrf; i++)
- if (mask->control[band].vht_mcs[i] != vht)
- return false;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+ num_rates += hweight16(mask->control[band].vht_mcs[i]);
- return true;
+ return num_rates == 1;
}
static bool
-ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- u8 *fixed_nss)
-{
- int ht_nss = 0, vht_nss = 0, i;
+ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ int i;
- /* check legacy */
- if (ath10k_check_single_mask(mask->control[band].legacy))
+ if (mask->control[band].legacy)
return false;
- /* check HT */
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
- if (mask->control[band].ht_mcs[i] == 0xff)
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
continue;
- else if (mask->control[band].ht_mcs[i] == 0x00)
- break;
-
- return false;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
}
- ht_nss = i;
-
- /* check VHT */
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
- if (mask->control[band].vht_mcs[i] == 0x03ff)
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
continue;
- else if (mask->control[band].vht_mcs[i] == 0x0000)
- break;
-
- return false;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
}
- vht_nss = i;
-
- if (ht_nss > 0 && vht_nss > 0)
+ if (ht_nss_mask != vht_nss_mask)
return false;
- if (ht_nss)
- *fixed_nss = ht_nss;
- else if (vht_nss)
- *fixed_nss = vht_nss;
- else
+ if (ht_nss_mask == 0)
return false;
- return true;
-}
-
-static bool
-ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- enum wmi_rate_preamble *preamble)
-{
- int legacy = 0, ht = 0, vht = 0, i;
-
- *preamble = WMI_RATE_PREAMBLE_OFDM;
-
- /* check legacy */
- legacy = ath10k_check_single_mask(mask->control[band].legacy);
- if (legacy > 1)
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
return false;
- /* check HT */
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
- ht += ath10k_check_single_mask(mask->control[band].ht_mcs[i]);
- if (ht > 1)
- return false;
-
- /* check VHT */
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
- vht += ath10k_check_single_mask(mask->control[band].vht_mcs[i]);
- if (vht > 1)
- return false;
-
- /* Currently we support only one fixed_rate */
- if ((legacy + ht + vht) != 1)
- return false;
-
- if (ht)
- *preamble = WMI_RATE_PREAMBLE_HT;
- else if (vht)
- *preamble = WMI_RATE_PREAMBLE_VHT;
+ *nss = fls(ht_nss_mask);
return true;
}
-static bool
-ath10k_bitrate_mask_rate(struct ath10k *ar,
- const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- u8 *fixed_rate,
- u8 *fixed_nss)
+static int
+ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u8 *rate, u8 *nss)
{
- u8 rate = 0, pream = 0, nss = 0, i;
- enum wmi_rate_preamble preamble;
-
- /* Check if single rate correct */
- if (!ath10k_bitrate_mask_correct(mask, band, &preamble))
- return false;
-
- pream = preamble;
-
- switch (preamble) {
- case WMI_RATE_PREAMBLE_CCK:
- case WMI_RATE_PREAMBLE_OFDM:
- i = ffs(mask->control[band].legacy) - 1;
-
- if (band == IEEE80211_BAND_2GHZ && i < 4)
- pream = WMI_RATE_PREAMBLE_CCK;
-
- if (band == IEEE80211_BAND_5GHZ)
- i += 4;
-
- if (i >= ARRAY_SIZE(cck_ofdm_rate))
- return false;
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ int rate_idx;
+ int i;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
- rate = cck_ofdm_rate[i];
- break;
- case WMI_RATE_PREAMBLE_HT:
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
- if (mask->control[band].ht_mcs[i])
- break;
+ if (hweight32(mask->control[band].legacy) == 1) {
+ rate_idx = ffs(mask->control[band].legacy) - 1;
- if (i == IEEE80211_HT_MCS_MASK_LEN)
- return false;
+ hw_rate = sband->bitrates[rate_idx].hw_value;
+ bitrate = sband->bitrates[rate_idx].bitrate;
- rate = ffs(mask->control[band].ht_mcs[i]) - 1;
- nss = i;
- break;
- case WMI_RATE_PREAMBLE_VHT:
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
- if (mask->control[band].vht_mcs[i])
- break;
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
- if (i == NL80211_VHT_NSS_MAX)
- return false;
+ *nss = 1;
+ *rate = preamble << 6 |
+ (*nss - 1) << 4 |
+ hw_rate << 0;
- rate = ffs(mask->control[band].vht_mcs[i]) - 1;
- nss = i;
- break;
+ return 0;
}
- *fixed_nss = nss + 1;
- nss <<= 4;
- pream <<= 6;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_HT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].ht_mcs[i]) - 1);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
- pream, nss, rate);
-
- *fixed_rate = pream | nss | rate;
+ return 0;
+ }
+ }
- return true;
-}
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_VHT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].vht_mcs[i]) - 1);
-static bool ath10k_get_fixed_rate_nss(struct ath10k *ar,
- const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- u8 *fixed_rate,
- u8 *fixed_nss)
-{
- /* First check full NSS mask, if we can simply limit NSS */
- if (ath10k_bitrate_mask_nss(mask, band, fixed_nss))
- return true;
+ return 0;
+ }
+ }
- /* Next Check single rate is set */
- return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss);
+ return -EINVAL;
}
-static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
- u8 fixed_rate,
- u8 fixed_nss,
- u8 force_sgi)
+static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
+ u8 rate, u8 nss, u8 sgi)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
- int ret = 0;
-
- mutex_lock(&ar->conf_mutex);
-
- if (arvif->fixed_rate == fixed_rate &&
- arvif->fixed_nss == fixed_nss &&
- arvif->force_sgi == force_sgi)
- goto exit;
+ int ret;
- if (fixed_rate == WMI_FIXED_RATE_NONE)
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
+ lockdep_assert_held(&ar->conf_mutex);
- if (force_sgi)
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n");
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+ arvif->vdev_id, rate, nss, sgi);
vdev_param = ar->wmi.vdev_param->fixed_rate;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- vdev_param, fixed_rate);
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
if (ret) {
ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
- fixed_rate, ret);
- ret = -EINVAL;
- goto exit;
+ rate, ret);
+ return ret;
}
- arvif->fixed_rate = fixed_rate;
-
vdev_param = ar->wmi.vdev_param->nss;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- vdev_param, fixed_nss);
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
+ return ret;
+ }
+ vdev_param = ar->wmi.vdev_param->sgi;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
if (ret) {
- ath10k_warn(ar, "failed to set fixed nss param %d: %d\n",
- fixed_nss, ret);
- ret = -EINVAL;
- goto exit;
+ ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
+ return ret;
}
- arvif->fixed_nss = fixed_nss;
+ return 0;
+}
- vdev_param = ar->wmi.vdev_param->sgi;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- force_sgi);
+static bool
+ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 vht_mcs;
- if (ret) {
- ath10k_warn(ar, "failed to set sgi param %d: %d\n",
- force_sgi, ret);
- ret = -EINVAL;
- goto exit;
+ /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
+ * to express all VHT MCS rate masks. Effectively only the following
+ * ranges can be used: none, 0-7, 0-8 and 0-9.
+ */
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
+ return false;
+ }
}
- arvif->force_sgi = force_sgi;
+ return true;
+}
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
+static void ath10k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_vif *arvif = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arvif->ar;
+
+ if (arsta->arvif != arvif)
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
}
-static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const struct cfg80211_bitrate_mask *mask)
+static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
struct ath10k *ar = arvif->ar;
- enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
- u8 fixed_rate = WMI_FIXED_RATE_NONE;
- u8 fixed_nss = ar->num_rf_chains;
- u8 force_sgi;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u8 rate;
+ u8 nss;
+ u8 sgi;
+ int single_nss;
+ int ret;
- if (ar->cfg_tx_chainmask)
- fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+ if (ath10k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
- force_sgi = mask->control[band].gi;
- if (force_sgi == NL80211_TXRATE_FORCE_LGI)
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
- if (!ath10k_default_bitrate_mask(ar, band, mask)) {
- if (!ath10k_get_fixed_rate_nss(ar, mask, band,
- &fixed_rate,
- &fixed_nss))
+ if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
+ ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+ &rate, &nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = min(ar->num_rf_chains,
+ max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+ ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath10k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
}
- if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
- ath10k_warn(ar, "failed to force SGI usage for default rate settings\n");
- return -EINVAL;
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi);
+ if (ret) {
+ ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto exit;
}
- return ath10k_set_fixed_rate_param(arvif, fixed_rate,
- fixed_nss, force_sgi);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
}
static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
return -EINVAL;
}
+static void
+ath10k_mac_update_rx_channel(struct ath10k *ar)
+{
+ struct cfg80211_chan_def *def = NULL;
+
+ /* Both locks are required because ar->rx_channel is modified. This
+ * allows readers to hold either lock.
+ */
+ lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_held(&ar->data_lock);
+
+ /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
+ * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
+ * ppdu on Rx may reduce performance on low-end systems. It should be
+ * possible to make tables/hashmaps to speed the lookup up (be vary of
+ * cpu data cache lines though regarding sizes) but to keep the initial
+ * implementation simple and less intrusive fallback to the slow lookup
+ * only for multi-channel cases. Single-channel cases will remain to
+ * use the old channel derival and thus performance should not be
+ * affected much.
+ */
+ rcu_read_lock();
+ if (ath10k_mac_num_chanctxs(ar) == 1) {
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_get_any_chandef_iter,
+ &def);
+ ar->rx_channel = def->chan;
+ } else {
+ ar->rx_channel = NULL;
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath10k_mac_chan_ctx_init(struct ath10k *ar,
+ struct ath10k_chanctx *arctx,
+ struct ieee80211_chanctx_conf *conf)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_held(&ar->data_lock);
+
+ memset(arctx, 0, sizeof(*arctx));
+
+ arctx->conf = *conf;
+}
+
+static int
+ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx add freq %hu width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_chan_ctx_init(ar, arctx, ctx);
+ ath10k_mac_update_rx_channel(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+ ath10k_monitor_recalc(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void
+ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx remove freq %hu width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_update_rx_channel(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+ ath10k_monitor_recalc(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx change freq %hu->%hu width %d->%d ptr %p changed %x\n",
+ arctx->conf.def.chan->center_freq,
+ ctx->def.chan->center_freq,
+ arctx->conf.def.width, ctx->def.width,
+ ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ spin_lock_bh(&ar->data_lock);
+ arctx->conf = *ctx;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+
+ /* FIXME: How to configure Rx chains properly? */
+
+ /* No other actions are actually necessary. Firmware maintains channel
+ * definitions per vdev internally and there's no host-side channel
+ * context abstraction to configure, e.g. channel width.
+ */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx assign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ if (WARN_ON(arvif->is_started)) {
+ mutex_unlock(&ar->conf_mutex);
+ return -EBUSY;
+ }
+
+ ret = ath10k_vdev_start(arvif, &arctx->conf.def);
+ if (ret) {
+ ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ arctx->conf.def.chan->center_freq, ret);
+ goto err;
+ }
+
+ arvif->is_started = true;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_stop;
+ }
+
+ arvif->is_up = true;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_stop:
+ ath10k_vdev_stop(arvif);
+ arvif->is_started = false;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void
+ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx unassign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ WARN_ON(!arvif->is_started);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ WARN_ON(!arvif->is_up);
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ }
+
+ ret = ath10k_vdev_stop(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif;
+ struct ath10k_chanctx *arctx_new, *arctx_old;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < n_vifs; i++) {
+ arvif = ath10k_vif_to_arvif(vifs[i].vif);
+ arctx_new = (void *)vifs[i].new_ctx->drv_priv;
+ arctx_old = (void *)vifs[i].old_ctx->drv_priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d ptr %p->%p\n",
+ arvif->vdev_id,
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].new_ctx->def.width,
+ arctx_old, arctx_new);
+
+ if (mode == CHANCTX_SWMODE_SWAP_CONTEXTS) {
+ ath10k_mac_chan_ctx_init(ar, arctx_new,
+ vifs[i].new_ctx);
+ }
+
+ arctx_new->conf = *vifs[i].new_ctx;
+
+ /* FIXME: ath10k_mac_chan_reconfigure() uses current, i.e. not
+ * yet updated chanctx_conf pointer.
+ */
+ arctx_old->conf = *vifs[i].new_ctx;
+ }
+ ath10k_mac_update_rx_channel(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* FIXME: Reconfigure only affected vifs */
+ ath10k_mac_chan_reconfigure(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_tx,
.start = ath10k_start,
.get_antenna = ath10k_get_antenna,
.reconfig_complete = ath10k_reconfig_complete,
.get_survey = ath10k_get_survey,
- .set_bitrate_mask = ath10k_set_bitrate_mask,
+ .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
.ampdu_action = ath10k_ampdu_action,
.get_et_sset_count = ath10k_debug_get_et_sset_count,
.get_et_stats = ath10k_debug_get_et_stats,
.get_et_strings = ath10k_debug_get_et_strings,
+ .add_chanctx = ath10k_mac_op_add_chanctx,
+ .remove_chanctx = ath10k_mac_op_remove_chanctx,
+ .change_chanctx = ath10k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
#ifdef CONFIG_PM
- .suspend = ath10k_suspend,
- .resume = ath10k_resume,
+ .suspend = ath10k_wow_op_suspend,
+ .resume = ath10k_wow_op_resume,
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
#endif
};
-#define RATETAB_ENT(_rate, _rateid, _flags) { \
- .bitrate = (_rate), \
- .flags = (_flags), \
- .hw_value = (_rateid), \
-}
-
#define CHAN2G(_channel, _freq, _flags) { \
.band = IEEE80211_BAND_2GHZ, \
.hw_value = (_channel), \
CHAN5G(132, 5660, 0),
CHAN5G(136, 5680, 0),
CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
CHAN5G(149, 5745, 0),
CHAN5G(153, 5765, 0),
CHAN5G(157, 5785, 0),
CHAN5G(165, 5825, 0),
};
-/* Note: Be careful if you re-order these. There is code which depends on this
- * ordering.
- */
-static struct ieee80211_rate ath10k_rates[] = {
- /* CCK */
- RATETAB_ENT(10, 0x82, 0),
- RATETAB_ENT(20, 0x84, 0),
- RATETAB_ENT(55, 0x8b, 0),
- RATETAB_ENT(110, 0x96, 0),
- /* OFDM */
- RATETAB_ENT(60, 0x0c, 0),
- RATETAB_ENT(90, 0x12, 0),
- RATETAB_ENT(120, 0x18, 0),
- RATETAB_ENT(180, 0x24, 0),
- RATETAB_ENT(240, 0x30, 0),
- RATETAB_ENT(360, 0x48, 0),
- RATETAB_ENT(480, 0x60, 0),
- RATETAB_ENT(540, 0x6c, 0),
-};
-
-#define ath10k_a_rates (ath10k_rates + 4)
-#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4)
-#define ath10k_g_rates (ath10k_rates + 0)
-#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
-
struct ath10k *ath10k_mac_create(size_t priv_size)
{
struct ieee80211_hw *hw;
},
};
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC),
+ },
+};
+
+/* FIXME: This is not thouroughly tested. These combinations may over- or
+ * underestimate hw/fw capabilities.
+ */
+static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
+ {
+ .limits = ath10k_tlv_if_limit,
+ .num_different_channels = 1,
+ .max_interfaces = 3,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_if_limit_ibss,
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+ },
+};
+
+static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
+ {
+ .limits = ath10k_tlv_if_limit,
+ .num_different_channels = 2,
+ .max_interfaces = 3,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_if_limit_ibss,
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+ },
+};
+
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
u16 mcs_map;
+ u32 val;
int i;
vht_cap.vht_supported = 1;
vht_cap.cap = ar->vht_cap_info;
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+ val = ar->num_rf_chains - 1;
+ val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+ vht_cap.cap |= val;
+ }
+
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ val = ar->num_rf_chains - 1;
+ val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+ vht_cap.cap |= val;
+ }
+
mcs_map = 0;
for (i = 0; i < 8; i++) {
if (i < ar->num_rf_chains)
ht_cap = ath10k_get_ht_cap(ar);
vht_cap = ath10k_create_vht_cap(ar);
+ BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
+ ARRAY_SIZE(ath10k_5ghz_channels)) !=
+ ATH10K_NUM_CHANS);
+
if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
channels = kmemdup(ath10k_2ghz_channels,
sizeof(ath10k_2ghz_channels),
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_SW_CRYPTO_CONTROL;
+ IEEE80211_HW_SW_CRYPTO_CONTROL |
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_SUPPORTS_PER_STA_GTK |
+ IEEE80211_HW_WANT_MONITOR_VIF |
+ IEEE80211_HW_CHANCTX_STA_CSA |
+ IEEE80211_HW_QUEUE_CONTROL;
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+ ar->hw->chanctx_data_size = sizeof(struct ath10k_chanctx);
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
}
+ if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
+ ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ ret = ath10k_wow_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to init wow: %d\n", ret);
+ goto err_free;
+ }
+
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
*/
- ar->hw->queues = 4;
+ ar->hw->queues = IEEE80211_MAX_QUEUES;
+
+ /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
+ * something that vdev_ids can't reach so that we don't stop the queue
+ * accidentally.
+ */
+ ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
switch (ar->wmi.op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
- case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->hw->wiphy->iface_combinations = ath10k_if_comb;
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_if_comb);
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+ ar->hw->wiphy->iface_combinations =
+ ath10k_tlv_qcs_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
+ } else {
+ ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_tlv_if_comb);
+ }
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
#define WEP_KEYID_SHIFT 6
+enum wmi_tlv_tx_pause_id;
+enum wmi_tlv_tx_pause_action;
+
struct ath10k_generic_iter {
struct ath10k *ar;
int ret;
};
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
struct ath10k *ath10k_mac_create(size_t priv_size);
void ath10k_mac_destroy(struct ath10k *ar);
int ath10k_mac_register(struct ath10k *ar);
void ath10k_drain_tx(struct ath10k *ar);
bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
u8 keyidx);
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def);
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
+void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action);
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate);
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
--- /dev/null
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "wmi.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 ctwindow_oppps = noa->ctwindow_oppps;
+ u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+ bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ u8 noa_descriptors = noa->num_descriptors;
+ int i;
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = noa->index;
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count =
+ __le32_to_cpu(noa->descriptors[i].type_count);
+ noa_attr->desc[i].duration = noa->descriptors[i].duration;
+ noa_attr->desc[i].interval = noa->descriptors[i].interval;
+ noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+
+ if (!noa->num_descriptors &&
+ !(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath10k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath10k_p2p_noa_ie_fill(ie, len, noa);
+ ath10k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath10k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+struct ath10k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct wmi_p2p_noa_info *noa;
+};
+
+static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath10k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_p2p_noa_update_vdev_iter,
+ &arg);
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _P2P_H
+#define _P2P_H
+
+struct ath10k_vif;
+struct wmi_p2p_noa_info;
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa);
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_p2p_noa_info *noa);
+
+#endif
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
- .dest_nentries = 32,
+ .dest_nentries = 128,
},
/* CE3: host->target WMI */
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
+ .nentries = __cpu_to_le32(64),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
return -ETIMEDOUT;
}
+/* The rule is host is forbidden from accessing device registers while it's
+ * asleep. Currently ath10k_pci_wake() and ath10k_pci_sleep() calls aren't
+ * balanced and the device is kept awake all the time. This is intended for a
+ * simpler solution for the following problems:
+ *
+ * * device can enter sleep during s2ram without the host knowing,
+ *
+ * * irq handlers access registers which is a problem if other device asserts
+ * a shared irq line when ath10k is between hif_power_down() and
+ * hif_power_up().
+ *
+ * FIXME: If power consumption is a concern (and there are *real* gains) then a
+ * refcounted wake/sleep needs to be implemented.
+ */
+
static int ath10k_pci_wake(struct ath10k *ar)
{
ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
case QCA6174_HW_1_0_CHIP_ID_REV:
case QCA6174_HW_1_1_CHIP_ID_REV:
+ case QCA6174_HW_2_1_CHIP_ID_REV:
+ case QCA6174_HW_2_2_CHIP_ID_REV:
return 3;
case QCA6174_HW_1_3_CHIP_ID_REV:
return 2;
- case QCA6174_HW_2_1_CHIP_ID_REV:
- case QCA6174_HW_2_2_CHIP_ID_REV:
- return 6;
case QCA6174_HW_3_0_CHIP_ID_REV:
case QCA6174_HW_3_1_CHIP_ID_REV:
case QCA6174_HW_3_2_CHIP_ID_REV:
/* Currently hif_power_up performs effectively a reset and hif_stop
* resets the chip as well so there's no point in resetting here.
*/
-
- ath10k_pci_sleep(ar);
}
#ifdef CONFIG_PM
-#define ATH10K_PCI_PM_CONTROL 0x44
-
static int ath10k_pci_hif_suspend(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct pci_dev *pdev = ar_pci->pdev;
- u32 val;
-
- pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
- if ((val & 0x000000ff) != 0x3) {
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
- (val & 0xffffff00) | 0x03);
- }
+ ath10k_pci_sleep(ar);
return 0;
}
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct pci_dev *pdev = ar_pci->pdev;
u32 val;
+ int ret;
- pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
- if ((val & 0x000000ff) != 0) {
- pci_restore_state(pdev);
- pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
- val & 0xffffff00);
- /*
- * Suspend/Resume resets the PCI configuration space,
- * so we have to re-disable the RETRY_TIMEOUT register (0x41)
- * to keep PCI Tx retries from interfering with C3 CPU state
- */
- pci_read_config_dword(pdev, 0x40, &val);
-
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to wake device up on resume: %d\n", ret);
+ return ret;
}
- return 0;
+ /* Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+ * from interfering with C3 CPU state. pci_restore_state won't help
+ * here since it only restores the first 64 bytes pci config header.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+ return ret;
}
#endif
{
struct ath10k *ar = arg;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
+ return IRQ_NONE;
+ }
if (ar_pci->num_msi_intrs == 0) {
if (!ath10k_pci_irq_pending(ar))
ar_pci->dev = &pdev->dev;
ar_pci->ar = ar;
+ if (pdev->subsystem_vendor || pdev->subsystem_device)
+ scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
+ "%04x:%04x:%04x:%04x",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
spin_lock_init(&ar_pci->ce_lock);
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
(unsigned long)ar);
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, chip_id);
- goto err_sleep;
+ goto err_free_irq;
}
- ath10k_pci_sleep(ar);
-
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
+
+/* QCA988x 2.0 firmware files */
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+
+/* QCA6174 2.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+
+/* QCA6174 3.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
#define RX_PPDU_START_INFO5_SERVICE_LSB 0
+/* No idea what this flag means. It seems to be always set in rate. */
+#define RX_PPDU_START_RATE_FLAG BIT(3)
+
+enum rx_ppdu_start_rate {
+ RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
+ RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
+ RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
+ RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
+ RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
+ RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
+ RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
+ RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
+
+ RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
+ RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
+ RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
+ RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
+ RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
+ RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
+ RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
+};
+
struct rx_ppdu_start {
struct {
u8 pri20_mhz;
#include "debug.h"
#include "wmi-ops.h"
-static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
- enum wmi_vdev_type type)
+static int
+ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
{
- struct ath10k_vif *arvif;
- int count = 0;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- list_for_each_entry(arvif, &ar->arvifs, list) {
- if (!arvif->is_started)
- continue;
-
- if (!arvif->is_up)
- continue;
-
- if (arvif->vdev_type != type)
- continue;
-
- count++;
- }
- return count;
-}
-
-static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
- unsigned long *state)
-{
- *state = ATH10K_QUIET_DUTY_CYCLE_MAX;
+ *state = ATH10K_THERMAL_THROTTLE_MAX;
return 0;
}
-static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
- unsigned long *state)
+static int
+ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
{
struct ath10k *ar = cdev->devdata;
mutex_lock(&ar->conf_mutex);
- *state = ar->thermal.duty_cycle;
+ *state = ar->thermal.throttle_state;
mutex_unlock(&ar->conf_mutex);
return 0;
}
-static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
- unsigned long duty_cycle)
+static int
+ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long throttle_state)
{
struct ath10k *ar = cdev->devdata;
- u32 period, duration, enabled;
- int num_bss, ret = 0;
- mutex_lock(&ar->conf_mutex);
- if (ar->state != ATH10K_STATE_ON) {
- ret = -ENETDOWN;
- goto out;
- }
-
- if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
- ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
- duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
- ret = -EINVAL;
- goto out;
- }
- /* TODO: Right now, thermal mitigation is handled only for single/multi
- * vif AP mode. Since quiet param is not validated in STA mode, it needs
- * to be investigated further to handle multi STA and multi-vif (AP+STA)
- * mode properly.
- */
- num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
- if (!num_bss) {
- ath10k_warn(ar, "no active AP interfaces\n");
- ret = -ENETDOWN;
- goto out;
- }
- period = max(ATH10K_QUIET_PERIOD_MIN,
- (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
- duration = (period * duty_cycle) / 100;
- enabled = duration ? 1 : 0;
-
- ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
- ATH10K_QUIET_START_OFFSET,
- enabled);
- if (ret) {
- ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
- period, duration, enabled, ret);
- goto out;
+ if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) {
+ ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n",
+ throttle_state, ATH10K_THERMAL_THROTTLE_MAX);
+ return -EINVAL;
}
- ar->thermal.duty_cycle = duty_cycle;
-out:
+ mutex_lock(&ar->conf_mutex);
+ ar->thermal.throttle_state = throttle_state;
+ ath10k_thermal_set_throttling(ar);
mutex_unlock(&ar->conf_mutex);
- return ret;
+ return 0;
}
static struct thermal_cooling_device_ops ath10k_thermal_ops = {
- .get_max_state = ath10k_thermal_get_max_dutycycle,
- .get_cur_state = ath10k_thermal_get_cur_dutycycle,
- .set_cur_state = ath10k_thermal_set_cur_dutycycle,
+ .get_max_state = ath10k_thermal_get_max_throttle_state,
+ .get_cur_state = ath10k_thermal_get_cur_throttle_state,
+ .set_cur_state = ath10k_thermal_set_cur_throttle_state,
};
static ssize_t ath10k_thermal_show_temp(struct device *dev,
{
struct ath10k *ar = dev_get_drvdata(dev);
int ret, temperature;
+ unsigned long time_left;
mutex_lock(&ar->conf_mutex);
goto out;
}
- ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
- ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
- if (ret == 0) {
+ time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+ ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
+ if (!time_left) {
ath10k_warn(ar, "failed to synchronize thermal read\n");
ret = -ETIMEDOUT;
goto out;
};
ATTRIBUTE_GROUPS(ath10k_hwmon);
+void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+ u32 period, duration, enabled;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+ return;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return;
+
+ period = ar->thermal.quiet_period;
+ duration = (period * ar->thermal.throttle_state) / 100;
+ enabled = duration ? 1 : 0;
+
+ ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
+ ATH10K_QUIET_START_OFFSET,
+ enabled);
+ if (ret) {
+ ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
+ period, duration, enabled, ret);
+ }
+}
+
int ath10k_thermal_register(struct ath10k *ar)
{
struct thermal_cooling_device *cdev;
ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
"cooling_device");
if (ret) {
- ath10k_err(ar, "failed to create thermal symlink\n");
+ ath10k_err(ar, "failed to create cooling device symlink\n");
goto err_cooling_destroy;
}
ar->thermal.cdev = cdev;
+ ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT;
/* Do not register hwmon device when temperature reading is not
* supported by firmware
return 0;
err_remove_link:
- sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
+ sysfs_remove_link(&ar->dev->kobj, "cooling_device");
err_cooling_destroy:
thermal_cooling_device_unregister(cdev);
return ret;
#define ATH10K_QUIET_PERIOD_DEFAULT 100
#define ATH10K_QUIET_PERIOD_MIN 25
#define ATH10K_QUIET_START_OFFSET 10
-#define ATH10K_QUIET_DUTY_CYCLE_MAX 70
#define ATH10K_HWMON_NAME_LEN 15
#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+#define ATH10K_THERMAL_THROTTLE_MAX 100
struct ath10k_thermal {
struct thermal_cooling_device *cdev;
struct completion wmi_sync;
/* protected by conf_mutex */
- u32 duty_cycle;
+ u32 throttle_state;
+ u32 quiet_period;
/* temperature value in Celcius degree
* protected by data_lock
*/
int ath10k_thermal_register(struct ath10k *ar);
void ath10k_thermal_unregister(struct ath10k *ar);
void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
+void ath10k_thermal_set_throttling(struct ath10k *ar);
#else
static inline int ath10k_thermal_register(struct ath10k *ar)
{
{
}
+static inline void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+}
+
#endif
#endif /* _THERMAL_ */
#include "core.h"
#if !defined(_TRACE_H_)
-static inline u32 ath10k_frm_hdr_len(const void *buf)
+static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
{
const struct ieee80211_hdr *hdr = buf;
- return ieee80211_hdrlen(hdr->frame_control);
+ /* In some rare cases (e.g. fcs error) device reports frame buffer
+ * shorter than what frame header implies (e.g. len = 0). The buffer
+ * can still be accessed so do a simple min() to guarantee caller
+ * doesn't get value greater than len.
+ */
+ return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control));
}
#endif
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath10k
-#define ATH10K_MSG_MAX 200
+#define ATH10K_MSG_MAX 400
DECLARE_EVENT_CLASS(ath10k_log_event,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, len)
- __dynamic_array(u8, data, ath10k_frm_hdr_len(data))
+ __dynamic_array(u8, data, ath10k_frm_hdr_len(data, len))
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
- __entry->len = ath10k_frm_hdr_len(data);
+ __entry->len = ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(data), data, __entry->len);
),
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, len)
- __dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
+ __dynamic_array(u8, payload, (len -
+ ath10k_frm_hdr_len(data, len)))
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
- __entry->len = len - ath10k_frm_hdr_len(data);
+ __entry->len = len - ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(payload),
- data + ath10k_frm_hdr_len(data), __entry->len);
+ data + ath10k_frm_hdr_len(data, len), __entry->len);
),
TP_printk(
lockdep_assert_held(&htt->tx_lock);
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
- tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
+ tx_done->msdu_id, !!tx_done->discard,
+ !!tx_done->no_ack, !!tx_done->success);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
+ if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
struct wmi_rdy_ev_arg *arg);
int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
struct ath10k_fw_stats *stats);
+ int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg);
+ int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN]);
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type);
struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]);
struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
u32 num_ac);
struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg);
+ struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
+ struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+ struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
+ struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id,
+ const u8 *pattern,
+ const u8 *mask,
+ int pattern_len,
+ int pattern_offset);
+ struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id);
+ struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_tdls_state state);
+ struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan);
+ struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
}
+static inline int
+ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_roam_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_wow_event)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_wow_event(ar, skb, arg);
+}
+
static inline int
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{
static inline int
ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN])
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_peer_create)
return -EOPNOTSUPP;
- skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
+ skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
+static inline int
+ath10k_wmi_wow_enable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_enable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_enable(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_enable_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_add_wakeup_event)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_add_pattern)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
+ pattern, mask, pattern_len,
+ pattern_offset);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_del_pattern)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_update_fw_tdls_state)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
+}
+
+static inline int
+ath10k_wmi_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_tdls_peer_update)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->tdls_peer_update_cmdid);
+}
+
+static inline int
+ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_adaptive_qcs)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
+}
+
#endif
*/
#include "core.h"
#include "debug.h"
+#include "mac.h"
#include "hw.h"
+#include "mac.h"
#include "wmi.h"
#include "wmi-ops.h"
#include "wmi-tlv.h"
+#include "p2p.h"
/***************/
/* TLV helpers */
static const struct wmi_tlv_policy wmi_tlv_policies[] = {
[WMI_TLV_TAG_ARRAY_BYTE]
- = { .min_len = sizeof(u8) },
+ = { .min_len = 0 },
[WMI_TLV_TAG_ARRAY_UINT32]
- = { .min_len = sizeof(u32) },
+ = { .min_len = 0 },
[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
= { .min_len = sizeof(struct wmi_scan_event) },
[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
+ [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
+ [WMI_TLV_TAG_STRUCT_ROAM_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_roam_ev) },
+ [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
+ = { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
+ [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
};
static int
{
const void **tb;
const struct wmi_tlv_bcn_tx_status_ev *ev;
+ struct ath10k_vif *arvif;
u32 vdev_id, tx_status;
int ret;
break;
}
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif && arvif->is_up && arvif->vif->csa_active)
+ ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
+
kfree(tb);
return 0;
}
return 0;
}
+static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_p2p_noa_ev *ev;
+ const struct wmi_p2p_noa_info *noa;
+ int ret, vdev_id;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
+ noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
+
+ if (!ev || !noa) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
+ vdev_id, noa->num_descriptors);
+
+ ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_tx_pause_ev *ev;
+ int ret, vdev_id;
+ u32 pause_id, action, vdev_map, peer_id, tid_map;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ pause_id = __le32_to_cpu(ev->pause_id);
+ action = __le32_to_cpu(ev->action);
+ vdev_map = __le32_to_cpu(ev->vdev_map);
+ peer_id = __le32_to_cpu(ev->peer_id);
+ tid_map = __le32_to_cpu(ev->tid_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
+ pause_id, action, vdev_map, peer_id, tid_map);
+
+ for (vdev_id = 0; vdev_map; vdev_id++) {
+ if (!(vdev_map & BIT(vdev_id)))
+ continue;
+
+ vdev_map &= ~BIT(vdev_id);
+ ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
+ }
+
+ kfree(tb);
+ return 0;
+}
+
/***********/
/* TLV ops */
/***********/
case WMI_TLV_DIAG_EVENTID:
ath10k_wmi_tlv_event_diag(ar, skb);
break;
+ case WMI_TLV_P2P_NOA_EVENTID:
+ ath10k_wmi_tlv_event_p2p_noa(ar, skb);
+ break;
+ case WMI_TLV_TX_PAUSE_EVENTID:
+ ath10k_wmi_tlv_event_tx_pause(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
return 0;
}
+static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_roam_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = ev->vdev_id;
+ arg->reason = ev->reason;
+ arg->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_wow_event_info *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = __le32_to_cpu(ev->vdev_id);
+ arg->flag = __le32_to_cpu(ev->flag);
+ arg->wake_reason = __le32_to_cpu(ev->wake_reason);
+ arg->data_len = __le32_to_cpu(ev->data_len);
+
+ kfree(tb);
+ return 0;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
{
cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
- cfg->num_offload_peers = __cpu_to_le32(3);
- cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
+ cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
} else {
cfg->num_offload_peers = __cpu_to_le32(0);
cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
cfg->rx_decap_mode = __cpu_to_le32(1);
cfg->scan_max_pending_reqs = __cpu_to_le32(4);
- cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
- cfg->roam_offload_max_vdev = __cpu_to_le32(3);
+ cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
cfg->num_mcast_groups = __cpu_to_le32(0);
cfg->num_mcast_table_elems = __cpu_to_le32(0);
cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
cfg->max_frag_entries = __cpu_to_le32(2);
- cfg->num_tdls_vdevs = __cpu_to_le32(1);
+ cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
cfg->num_multicast_filter_entries = __cpu_to_le32(5);
- cfg->num_wow_filters = __cpu_to_le32(0x16);
+ cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
cfg->num_keep_alive_pattern = __cpu_to_le32(6);
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
cmd = (void *)tlv->value;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- cmd->burst_duration_ms = __cpu_to_le32(0);
+ cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
cmd->num_channels = __cpu_to_le32(arg->n_channels);
cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
void *ptr;
u32 flags = 0;
- if (WARN_ON(arg->ssid && arg->ssid_len == 0))
- return ERR_PTR(-EINVAL);
if (WARN_ON(arg->hidden_ssid && !arg->ssid))
return ERR_PTR(-EINVAL);
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
static struct sk_buff *
ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN])
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
{
struct wmi_tlv_peer_create_cmd *cmd;
struct wmi_tlv *tlv;
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
- cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
+ cmd->peer_type = __cpu_to_le32(peer_type);
ether_addr_copy(cmd->peer_addr.addr, peer_addr);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
if (!mac)
return ERR_PTR(-EINVAL);
- skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
return skb;
}
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct wmi_tdls_set_state_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+ /* Set to options from wmi_tlv_tdls_options,
+ * for now none of them are enabled.
+ */
+ u32 options = 0;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->state = __cpu_to_le32(state);
+ cmd->notification_interval_ms = __cpu_to_le32(5000);
+ cmd->tx_discovery_threshold = __cpu_to_le32(100);
+ cmd->tx_teardown_threshold = __cpu_to_le32(5);
+ cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
+ cmd->rssi_delta = __cpu_to_le32(-20);
+ cmd->tdls_options = __cpu_to_le32(options);
+ cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
+ cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
+ cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
+ cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
+ cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
+ state, vdev_id);
+ return skb;
+}
+
+static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
+{
+ u32 peer_qos = 0;
+
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
+
+ peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
+
+ return peer_qos;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan_arg)
+{
+ struct wmi_tdls_peer_update_cmd *cmd;
+ struct wmi_tdls_peer_capab *peer_cap;
+ struct wmi_channel *chan;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 peer_qos;
+ void *ptr;
+ int len;
+ int i;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*peer_cap) +
+ sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+ cmd->peer_state = __cpu_to_le32(arg->peer_state);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
+ tlv->len = __cpu_to_le16(sizeof(*peer_cap));
+ peer_cap = (void *)tlv->value;
+ peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
+ cap->peer_max_sp);
+ peer_cap->peer_qos = __cpu_to_le32(peer_qos);
+ peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
+ peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
+ peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
+ peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
+ peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
+ peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
+
+ for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
+ peer_cap->peer_operclass[i] = cap->peer_operclass[i];
+
+ peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
+ peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
+ peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*peer_cap);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
+
+ ptr += sizeof(*tlv);
+
+ for (i = 0; i < cap->peer_chan_len; i++) {
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*chan));
+ chan = (void *)tlv->value;
+ ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*chan);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
+ arg->vdev_id, arg->peer_state, cap->peer_chan_len);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
+{
+ struct wmi_tlv_wow_enable_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->enable = __cpu_to_le32(1);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_tlv_wow_add_del_event_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->is_add = __cpu_to_le32(enable);
+ cmd->event_bitmap = __cpu_to_le32(1 << event);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
+{
+ struct wmi_tlv_wow_host_wakeup_ind *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id, const u8 *pattern,
+ const u8 *bitmask, int pattern_len,
+ int pattern_offset)
+{
+ struct wmi_tlv_wow_add_pattern_cmd *cmd;
+ struct wmi_tlv_wow_bitmap_pattern *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ /* cmd */
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->pattern_id = __cpu_to_le32(pattern_id);
+ cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
+ tlv->len = __cpu_to_le16(sizeof(*bitmap));
+ bitmap = (void *)tlv->value;
+
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
+ bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
+ bitmap->pattern_len = __cpu_to_le32(pattern_len);
+ bitmap->bitmask_len = __cpu_to_le32(pattern_len);
+ bitmap->pattern_id = __cpu_to_le32(pattern_id);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(sizeof(u32));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
+ vdev_id, pattern_id, pattern_offset);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id)
+{
+ struct wmi_tlv_wow_del_pattern_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->pattern_id = __cpu_to_le32(pattern_id);
+ cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+ struct wmi_tlv_adaptive_qcs *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->enable = __cpu_to_le32(enable ? 1 : 0);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
+ return skb;
+}
+
/****************/
/* TLV mappings */
/****************/
.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+ .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
+ .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+ .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
};
static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
+ .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
+ .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+ .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
+ .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
+ .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
+ .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
+ .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
+ .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
+ .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
+ .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
};
/************/
__le32 num_chan_stats;
} __packed;
+struct wmi_tlv_p2p_noa_ev {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_tlv_roam_ev {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+} __packed;
+
+struct wmi_tlv_wow_add_del_event_cmd {
+ __le32 vdev_id;
+ __le32 is_add;
+ __le32 event_bitmap;
+} __packed;
+
+struct wmi_tlv_wow_enable_cmd {
+ __le32 enable;
+} __packed;
+
+struct wmi_tlv_wow_host_wakeup_ind {
+ __le32 reserved;
+} __packed;
+
+struct wmi_tlv_wow_event_info {
+ __le32 vdev_id;
+ __le32 flag;
+ __le32 wake_reason;
+ __le32 data_len;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+struct wmi_tlv_wow_bitmap_pattern {
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ __le32 pattern_offset;
+ __le32 pattern_len;
+ __le32 bitmask_len;
+ __le32 pattern_id;
+} __packed;
+
+struct wmi_tlv_wow_add_pattern_cmd {
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+struct wmi_tlv_wow_del_pattern_cmd {
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+/* TDLS Options */
+enum wmi_tlv_tdls_options {
+ WMI_TLV_TDLS_OFFCHAN_EN = BIT(0),
+ WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1),
+ WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2),
+};
+
+struct wmi_tdls_set_state_cmd {
+ __le32 vdev_id;
+ __le32 state;
+ __le32 notification_interval_ms;
+ __le32 tx_discovery_threshold;
+ __le32 tx_teardown_threshold;
+ __le32 rssi_teardown_threshold;
+ __le32 rssi_delta;
+ __le32 tdls_options;
+ __le32 tdls_peer_traffic_ind_window;
+ __le32 tdls_peer_traffic_response_timeout_ms;
+ __le32 tdls_puapsd_mask;
+ __le32 tdls_puapsd_inactivity_time_ms;
+ __le32 tdls_puapsd_rx_frame_threshold;
+} __packed;
+
+struct wmi_tdls_peer_update_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_state;
+} __packed;
+
+enum {
+ WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0),
+ WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1),
+ WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2),
+ WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3),
+};
+
+#define WMI_TLV_TDLS_PEER_SP_MASK 0x60
+#define WMI_TLV_TDLS_PEER_SP_LSB 5
+
+struct wmi_tdls_peer_capab {
+ __le32 peer_qos;
+ __le32 buff_sta_support;
+ __le32 off_chan_support;
+ __le32 peer_curr_operclass;
+ __le32 self_curr_operclass;
+ __le32 peer_chan_len;
+ __le32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ __le32 is_peer_responder;
+ __le32 pref_offchan_num;
+ __le32 pref_offchan_bw;
+} __packed;
+
+struct wmi_tlv_adaptive_qcs {
+ __le32 enable;
+} __packed;
+
+/**
+ * wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ *
+ * @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
+ * Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep.
+ * Only peer_id is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only
+ * vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only
+ * vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause.
+ */
+enum wmi_tlv_tx_pause_id {
+ WMI_TLV_TX_PAUSE_ID_MCC = 1,
+ WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2,
+ WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3,
+ WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4,
+ WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5,
+ WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6,
+ WMI_TLV_TX_PAUSE_ID_AP_PS = 7,
+ WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8,
+ WMI_TLV_TX_PAUSE_ID_HOST = 21,
+};
+
+enum wmi_tlv_tx_pause_action {
+ WMI_TLV_TX_PAUSE_ACTION_STOP,
+ WMI_TLV_TX_PAUSE_ACTION_WAKE,
+};
+
+struct wmi_tlv_tx_pause_ev {
+ __le32 pause_id;
+ __le32 action;
+ __le32 vdev_map;
+ __le32 peer_id;
+ __le32 tid_map;
+} __packed;
+
void ath10k_wmi_tlv_attach(struct ath10k *ar);
#endif
#include "mac.h"
#include "testmode.h"
#include "wmi-ops.h"
+#include "p2p.h"
/* MAIN WMI cmd track */
static struct wmi_cmd_map wmi_cmd_map = {
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
- int ret;
+ unsigned long time_left;
- ret = wait_for_completion_timeout(&ar->wmi.service_ready,
- WMI_SERVICE_READY_TIMEOUT_HZ);
- return ret;
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+ return 0;
}
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
{
- int ret;
+ unsigned long time_left;
- ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
- WMI_UNIFIED_READY_TIMEOUT_HZ);
- return ret;
+ time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
+ WMI_UNIFIED_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+ return 0;
}
struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
return band;
}
-static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
-{
- u8 rate_idx = 0;
-
- /* rate in Kbps */
- switch (rate) {
- case 1000:
- rate_idx = 0;
- break;
- case 2000:
- rate_idx = 1;
- break;
- case 5500:
- rate_idx = 2;
- break;
- case 11000:
- rate_idx = 3;
- break;
- case 6000:
- rate_idx = 4;
- break;
- case 9000:
- rate_idx = 5;
- break;
- case 12000:
- rate_idx = 6;
- break;
- case 18000:
- rate_idx = 7;
- break;
- case 24000:
- rate_idx = 8;
- break;
- case 36000:
- rate_idx = 9;
- break;
- case 48000:
- rate_idx = 10;
- break;
- case 54000:
- rate_idx = 11;
- break;
- default:
- break;
- }
-
- if (band == IEEE80211_BAND_5GHZ) {
- if (rate_idx > 3)
- /* Omit CCK rates */
- rate_idx -= 4;
- else
- rate_idx = 0;
- }
-
- return rate_idx;
-}
-
/* If keys are configured, HW decrypts all frames
* with protected bit set. Mark such frames as decrypted.
*/
struct wmi_mgmt_rx_ev_arg arg = {};
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
+ struct ieee80211_supported_band *sband;
u32 rx_status;
u32 channel;
u32 phy_mode;
if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+ sband = &ar->mac.sbands[status->band];
+
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
- status->rate_idx = get_rate_idx(rate, status->band);
+ status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
}
}
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath10k_mac_handle_beacon(ar, skb);
+
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
tim->bitmap_ctrl, pvm_len);
}
-static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
- const struct wmi_p2p_noa_info *noa)
-{
- struct ieee80211_p2p_noa_attr *noa_attr;
- u8 ctwindow_oppps = noa->ctwindow_oppps;
- u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
- bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
- __le16 *noa_attr_len;
- u16 attr_len;
- u8 noa_descriptors = noa->num_descriptors;
- int i;
-
- /* P2P IE */
- data[0] = WLAN_EID_VENDOR_SPECIFIC;
- data[1] = len - 2;
- data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
- data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
- data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
- data[5] = WLAN_OUI_TYPE_WFA_P2P;
-
- /* NOA ATTR */
- data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
- noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
- noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
-
- noa_attr->index = noa->index;
- noa_attr->oppps_ctwindow = ctwindow;
- if (oppps)
- noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
-
- for (i = 0; i < noa_descriptors; i++) {
- noa_attr->desc[i].count =
- __le32_to_cpu(noa->descriptors[i].type_count);
- noa_attr->desc[i].duration = noa->descriptors[i].duration;
- noa_attr->desc[i].interval = noa->descriptors[i].interval;
- noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
- }
-
- attr_len = 2; /* index + oppps_ctwindow */
- attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
- *noa_attr_len = __cpu_to_le16(attr_len);
-}
-
-static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
-{
- u32 len = 0;
- u8 noa_descriptors = noa->num_descriptors;
- u8 opp_ps_info = noa->ctwindow_oppps;
- bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
-
- if (!noa_descriptors && !opps_enabled)
- return len;
-
- len += 1 + 1 + 4; /* EID + len + OUI */
- len += 1 + 2; /* noa attr + attr len */
- len += 1 + 1; /* index + oppps_ctwindow */
- len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
-
- return len;
-}
-
static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
struct sk_buff *bcn,
const struct wmi_p2p_noa_info *noa)
{
- u8 *new_data, *old_data = arvif->u.ap.noa_data;
- u32 new_len;
-
if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
return;
ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
- if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
- new_len = ath10k_p2p_calc_noa_ie_len(noa);
- if (!new_len)
- goto cleanup;
- new_data = kmalloc(new_len, GFP_ATOMIC);
- if (!new_data)
- goto cleanup;
-
- ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
-
- spin_lock_bh(&ar->data_lock);
- arvif->u.ap.noa_data = new_data;
- arvif->u.ap.noa_len = new_len;
- spin_unlock_bh(&ar->data_lock);
- kfree(old_data);
- }
+ if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
+ ath10k_p2p_noa_update(arvif, noa);
if (arvif->u.ap.noa_data)
if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
memcpy(skb_put(bcn, arvif->u.ap.noa_len),
arvif->u.ap.noa_data,
arvif->u.ap.noa_len);
- return;
-cleanup:
- spin_lock_bh(&ar->data_lock);
- arvif->u.ap.noa_data = NULL;
- arvif->u.ap.noa_len = 0;
- spin_unlock_bh(&ar->data_lock);
- kfree(old_data);
+ return;
}
static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
u64 tsf)
{
u32 reg0, reg1, tsf32l;
+ struct ieee80211_channel *ch;
struct pulse_event pe;
u64 tsf64;
u8 rssi, width;
if (!ar->dfs_detector)
return;
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->rx_channel;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch) {
+ ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
+ goto radar_detected;
+ }
+
/* report event to DFS pattern detector */
tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
tsf64 = tsf & (~0xFFFFFFFFULL);
rssi = 0;
pe.ts = tsf64;
- pe.freq = ar->hw->conf.chandef.chan->center_freq;
+ pe.freq = ch->center_freq;
pe.width = width;
pe.rssi = rssi;
-
+ pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
pe.freq, pe.width, pe.rssi, pe.ts);
return;
}
+radar_detected:
ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
ATH10K_DFS_STAT_INC(ar, radar_detected);
void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
+ struct wmi_roam_ev_arg arg = {};
+ int ret;
+ u32 vdev_id;
+ u32 reason;
+ s32 rssi;
+
+ ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
+ return;
+ }
+
+ vdev_id = __le32_to_cpu(arg.vdev_id);
+ reason = __le32_to_cpu(arg.reason);
+ rssi = __le32_to_cpu(arg.rssi);
+ rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+ vdev_id, reason, rssi);
+
+ if (reason >= WMI_ROAM_REASON_MAX)
+ ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
+ reason, vdev_id);
+
+ switch (reason) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ ath10k_mac_handle_beacon_miss(ar, vdev_id);
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
+ reason, vdev_id);
+ break;
+ }
}
void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
+ struct wmi_wow_ev_arg ev = {};
+ int ret;
+
+ complete(&ar->wow.wakeup_completed);
+
+ ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
+ wow_reason(ev.wake_reason));
}
void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
+static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ struct wmi_roam_ev *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->reason = ev->reason;
+
+ return 0;
+}
+
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_rdy_ev_arg arg = {};
cmd = (struct wmi_init_cmd_10_2 *)buf->data;
features = WMI_10_2_RX_BATCH_MODE;
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+ features |= WMI_10_2_COEX_GPIO;
cmd->resource_config.feature_mask = __cpu_to_le32(features);
memcpy(&cmd->resource_config.common, &config, sizeof(config));
const char *cmdname;
u32 flags = 0;
- if (WARN_ON(arg->ssid && arg->ssid_len == 0))
- return ERR_PTR(-EINVAL);
if (WARN_ON(arg->hidden_ssid && !arg->ssid))
return ERR_PTR(-EINVAL);
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
static struct sk_buff *
ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN])
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
{
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
};
static const struct wmi_ops wmi_10_1_ops = {
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
};
static const struct wmi_ops wmi_10_2_ops = {
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
};
int ath10k_wmi_attach(struct ath10k *ar)
WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
WMI_SERVICE_MDNS_OFFLOAD,
WMI_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_SERVICE_ATF,
+ WMI_SERVICE_COEX_GPIO,
/* keep last */
WMI_SERVICE_MAX,
WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
WMI_10X_SERVICE_FORCE_FW_HANG,
WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_10X_SERVICE_ATF,
+ WMI_10X_SERVICE_COEX_GPIO,
};
enum wmi_main_service {
SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
+ SVCSTR(WMI_SERVICE_ATF);
+ SVCSTR(WMI_SERVICE_COEX_GPIO);
default:
return NULL;
}
WMI_SERVICE_FORCE_FW_HANG, len);
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_ATF,
+ WMI_SERVICE_ATF, len);
+ SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
+ WMI_SERVICE_COEX_GPIO, len);
}
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
u32 gpio_output_cmdid;
u32 pdev_get_temperature_cmdid;
u32 vdev_set_wmm_params_cmdid;
+ u32 tdls_set_state_cmdid;
+ u32 tdls_peer_update_cmdid;
+ u32 adaptive_qcs_cmdid;
};
/*
enum wmi_10_2_feature_mask {
WMI_10_2_RX_BATCH_MODE = BIT(0),
WMI_10_2_ATF_CONFIG = BIT(1),
+ WMI_10_2_COEX_GPIO = BIT(3),
};
struct wmi_resource_config_10_2 {
u32 max_scan_time;
u32 probe_delay;
u32 scan_ctrl_flags;
+ u32 burst_duration_ms;
u32 ie_len;
u32 n_channels;
struct wmi_mac_addr peer_macaddr;
} __packed;
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
struct wmi_peer_delete_cmd {
__le32 vdev_id;
struct wmi_mac_addr peer_macaddr;
} __packed;
#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
-
-/* FIXME: empirically extrapolated */
-#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
+#define WMI_CHAN_INFO_MSEC(x) ((x) / 88000)
/* Beacon filter wmi command info */
#define BCN_FLT_MAX_SUPPORTED_IES 256
__le32 config_valid;
} __packed;
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_ev {
+ __le32 vdev_id;
+ __le32 reason;
+} __packed;
+
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
const u8 *mac_addr;
};
+struct wmi_roam_ev_arg {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+};
+
struct wmi_pdev_temperature_event {
/* temperature value in Celcius degree */
__le32 temperature;
} __packed;
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+ WOW_BMISS_EVENT = 0,
+ WOW_BETTER_AP_EVENT,
+ WOW_DEAUTH_RECVD_EVENT,
+ WOW_MAGIC_PKT_RECVD_EVENT,
+ WOW_GTK_ERR_EVENT,
+ WOW_FOURWAY_HSHAKE_EVENT,
+ WOW_EAPOL_RECVD_EVENT,
+ WOW_NLO_DETECTED_EVENT,
+ WOW_DISASSOC_RECVD_EVENT,
+ WOW_PATTERN_MATCH_EVENT,
+ WOW_CSA_IE_EVENT,
+ WOW_PROBE_REQ_WPS_IE_EVENT,
+ WOW_AUTH_REQ_EVENT,
+ WOW_ASSOC_REQ_EVENT,
+ WOW_HTT_EVENT,
+ WOW_RA_MATCH_EVENT,
+ WOW_HOST_AUTO_SHUTDOWN_EVENT,
+ WOW_IOAC_MAGIC_EVENT,
+ WOW_IOAC_SHORT_EVENT,
+ WOW_IOAC_EXTEND_EVENT,
+ WOW_IOAC_TIMER_EVENT,
+ WOW_DFS_PHYERR_RADAR_EVENT,
+ WOW_BEACON_EVENT,
+ WOW_CLIENT_KICKOUT_EVENT,
+ WOW_EVENT_MAX,
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+ switch (ev) {
+ C2S(WOW_BMISS_EVENT);
+ C2S(WOW_BETTER_AP_EVENT);
+ C2S(WOW_DEAUTH_RECVD_EVENT);
+ C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+ C2S(WOW_GTK_ERR_EVENT);
+ C2S(WOW_FOURWAY_HSHAKE_EVENT);
+ C2S(WOW_EAPOL_RECVD_EVENT);
+ C2S(WOW_NLO_DETECTED_EVENT);
+ C2S(WOW_DISASSOC_RECVD_EVENT);
+ C2S(WOW_PATTERN_MATCH_EVENT);
+ C2S(WOW_CSA_IE_EVENT);
+ C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+ C2S(WOW_AUTH_REQ_EVENT);
+ C2S(WOW_ASSOC_REQ_EVENT);
+ C2S(WOW_HTT_EVENT);
+ C2S(WOW_RA_MATCH_EVENT);
+ C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+ C2S(WOW_IOAC_MAGIC_EVENT);
+ C2S(WOW_IOAC_SHORT_EVENT);
+ C2S(WOW_IOAC_EXTEND_EVENT);
+ C2S(WOW_IOAC_TIMER_EVENT);
+ C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+ C2S(WOW_BEACON_EVENT);
+ C2S(WOW_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_EVENT_MAX);
+ default:
+ return NULL;
+ }
+}
+
+enum wmi_wow_wake_reason {
+ WOW_REASON_UNSPECIFIED = -1,
+ WOW_REASON_NLOD = 0,
+ WOW_REASON_AP_ASSOC_LOST,
+ WOW_REASON_LOW_RSSI,
+ WOW_REASON_DEAUTH_RECVD,
+ WOW_REASON_DISASSOC_RECVD,
+ WOW_REASON_GTK_HS_ERR,
+ WOW_REASON_EAP_REQ,
+ WOW_REASON_FOURWAY_HS_RECV,
+ WOW_REASON_TIMER_INTR_RECV,
+ WOW_REASON_PATTERN_MATCH_FOUND,
+ WOW_REASON_RECV_MAGIC_PATTERN,
+ WOW_REASON_P2P_DISC,
+ WOW_REASON_WLAN_HB,
+ WOW_REASON_CSA_EVENT,
+ WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+ WOW_REASON_AUTH_REQ_RECV,
+ WOW_REASON_ASSOC_REQ_RECV,
+ WOW_REASON_HTT_EVENT,
+ WOW_REASON_RA_MATCH,
+ WOW_REASON_HOST_AUTO_SHUTDOWN,
+ WOW_REASON_IOAC_MAGIC_EVENT,
+ WOW_REASON_IOAC_SHORT_EVENT,
+ WOW_REASON_IOAC_EXTEND_EVENT,
+ WOW_REASON_IOAC_TIMER_EVENT,
+ WOW_REASON_ROAM_HO,
+ WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+ WOW_REASON_BEACON_RECV,
+ WOW_REASON_CLIENT_KICKOUT_EVENT,
+ WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+ switch (reason) {
+ C2S(WOW_REASON_UNSPECIFIED);
+ C2S(WOW_REASON_NLOD);
+ C2S(WOW_REASON_AP_ASSOC_LOST);
+ C2S(WOW_REASON_LOW_RSSI);
+ C2S(WOW_REASON_DEAUTH_RECVD);
+ C2S(WOW_REASON_DISASSOC_RECVD);
+ C2S(WOW_REASON_GTK_HS_ERR);
+ C2S(WOW_REASON_EAP_REQ);
+ C2S(WOW_REASON_FOURWAY_HS_RECV);
+ C2S(WOW_REASON_TIMER_INTR_RECV);
+ C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+ C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+ C2S(WOW_REASON_P2P_DISC);
+ C2S(WOW_REASON_WLAN_HB);
+ C2S(WOW_REASON_CSA_EVENT);
+ C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+ C2S(WOW_REASON_AUTH_REQ_RECV);
+ C2S(WOW_REASON_ASSOC_REQ_RECV);
+ C2S(WOW_REASON_HTT_EVENT);
+ C2S(WOW_REASON_RA_MATCH);
+ C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+ C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+ C2S(WOW_REASON_IOAC_SHORT_EVENT);
+ C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+ C2S(WOW_REASON_IOAC_TIMER_EVENT);
+ C2S(WOW_REASON_ROAM_HO);
+ C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+ C2S(WOW_REASON_BEACON_RECV);
+ C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_REASON_DEBUG_TEST);
+ default:
+ return NULL;
+ }
+}
+
+#undef C2S
+
+struct wmi_wow_ev_arg {
+ u32 vdev_id;
+ u32 flag;
+ enum wmi_wow_wake_reason wake_reason;
+ u32 data_len;
+};
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+
+enum wmi_tdls_state {
+ WMI_TDLS_DISABLE,
+ WMI_TDLS_ENABLE_PASSIVE,
+ WMI_TDLS_ENABLE_ACTIVE,
+};
+
+enum wmi_tdls_peer_state {
+ WMI_TDLS_PEER_STATE_PEERING,
+ WMI_TDLS_PEER_STATE_CONNECTED,
+ WMI_TDLS_PEER_STATE_TEARDOWN,
+};
+
+struct wmi_tdls_peer_update_cmd_arg {
+ u32 vdev_id;
+ enum wmi_tdls_peer_state peer_state;
+ u8 addr[ETH_ALEN];
+};
+
+#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
+
+struct wmi_tdls_peer_capab_arg {
+ u8 peer_uapsd_queues;
+ u8 peer_max_sp;
+ u32 buff_sta_support;
+ u32 off_chan_support;
+ u32 peer_curr_operclass;
+ u32 self_curr_operclass;
+ u32 peer_chan_len;
+ u32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ u32 is_peer_responder;
+ u32 pref_offchan_num;
+ u32 pref_offchan_bw;
+};
+
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
--- /dev/null
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+static const struct wiphy_wowlan_support ath10k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_cleanup(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ int ret, i;
+ unsigned long wow_mask = 0;
+ struct ath10k *ar = arvif->ar;
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ int pattern_id = 0;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ /* fall through */
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ int j;
+
+ if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+ continue;
+
+ /* convert bytemask to bitmask */
+ for (j = 0; j < patterns[i].pattern_len; j++)
+ if (patterns[i].mask[j / 8] & BIT(j % 8))
+ bitmask[j] = 0xff;
+
+ ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ patterns[i].pattern,
+ bitmask,
+ patterns[i].pattern_len,
+ patterns[i].pkt_offset);
+ if (ret) {
+ ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_set_wakeups(struct ath10k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_enable(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->target_suspend);
+
+ ret = ath10k_wmi_wow_enable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "timed out while waiting for suspend completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_wakeup(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->wow.wakeup_completed);
+
+ ret = ath10k_wmi_wow_host_wakeup_ind(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->fw_features))) {
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_wow_cleanup(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath10k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath10k_wow_enable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath10k_hif_suspend(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath10k_wow_wakeup(ar);
+
+cleanup:
+ ath10k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+int ath10k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->fw_features))) {
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_hif_resume(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_wow_wakeup(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+int ath10k_wow_init(struct ath10k *ar)
+{
+ if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+ return 0;
+
+ if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
+ return -EINVAL;
+
+ ar->wow.wowlan_support = ath10k_wowlan_support;
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath10k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+#ifdef CONFIG_PM
+
+int ath10k_wow_init(struct ath10k *ar);
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath10k_wow_op_resume(struct ieee80211_hw *hw);
+
+#else
+
+static inline int ath10k_wow_init(struct ath10k *ar)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */
/* percentage on ppb threshold to trigger detection */
#define MIN_PPB_THRESH 50
-#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
+#define PPB_THRESH_RATE(PPB, RATE) ((PPB * RATE + 100 - RATE) / 100)
+#define PPB_THRESH(PPB) PPB_THRESH_RATE(PPB, MIN_PPB_THRESH)
#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
/* percentage of pulse width tolerance */
#define WIDTH_TOLERANCE 5
#define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
#define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
-#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
+#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
(PRF2PRI(PMAX) - PRI_TOLERANCE), \
(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
- PPB_THRESH(PPB), PRI_TOLERANCE, \
+ PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
}
/* radar types as defined by ETSI EN-301-893 v1.5.1 */
static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
- ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18),
- ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10),
- ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15),
- ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25),
- ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
- ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10),
- ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15),
+ ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18, false),
+ ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10, false),
+ ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15, false),
+ ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25, false),
+ ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20, false),
+ ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10, false),
+ ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15, false),
};
static const struct radar_types etsi_radar_types_v15 = {
.radar_types = etsi_radar_ref_types_v15,
};
-#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
+#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
PMIN - PRI_TOLERANCE, \
PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
- PPB_THRESH(PPB), PRI_TOLERANCE, \
+ PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
}
+/* radar types released on August 14, 2014
+ * type 1 PRI values randomly selected within the range of 518 and 3066.
+ * divide it to 3 groups is good enough for both of radar detection and
+ * avoiding false detection based on practical test results
+ * collected for more than a year.
+ */
static const struct radar_detector_specs fcc_radar_ref_types[] = {
- FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
- FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
- FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
- FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
- FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
- FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
+ FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18, false),
+ FCC_PATTERN(101, 0, 1, 518, 938, 1, 57, false),
+ FCC_PATTERN(102, 0, 1, 938, 2000, 1, 27, false),
+ FCC_PATTERN(103, 0, 1, 2000, 3066, 1, 18, false),
+ FCC_PATTERN(2, 0, 5, 150, 230, 1, 23, false),
+ FCC_PATTERN(3, 6, 10, 200, 500, 1, 16, false),
+ FCC_PATTERN(4, 11, 20, 200, 500, 1, 12, false),
+ FCC_PATTERN(5, 50, 100, 1000, 2000, 1, 1, true),
+ FCC_PATTERN(6, 0, 1, 333, 333, 1, 9, false),
};
static const struct radar_types fcc_radar_types = {
.radar_types = fcc_radar_ref_types,
};
-#define JP_PATTERN FCC_PATTERN
+#define JP_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, RATE, CHIRP) \
+{ \
+ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
+ PMIN - PRI_TOLERANCE, \
+ PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
+ PPB_THRESH_RATE(PPB, RATE), PRI_TOLERANCE, CHIRP \
+}
static const struct radar_detector_specs jp_radar_ref_types[] = {
- JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
- JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
- JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
- JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
- JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
- JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
- JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
- JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20),
- JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
+ JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
+ JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
+ JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
+ JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
+ JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
+ JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
+ JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
+ JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20, 50, false),
+ JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false),
};
static const struct radar_types jp_radar_types = {
* @freq: channel frequency in MHz
* @width: pulse duration in us
* @rssi: rssi of radar event
+ * @chirp: chirp detected in pulse
*/
struct pulse_event {
u64 ts;
u16 freq;
u8 width;
u8 rssi;
+ bool chirp;
};
/**
* @ppb: pulses per bursts for this type
* @ppb_thresh: number of pulses required to trigger detection
* @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
+ * @chirp: chirp required for the radar pattern
*/
struct radar_detector_specs {
u8 type_id;
u8 ppb;
u8 ppb_thresh;
u8 max_pri_tolerance;
+ bool chirp;
};
/**
if ((ts - de->last_ts) < rs->max_pri_tolerance)
/* if delta to last pulse is too short, don't use this pulse */
return NULL;
+ /* radar detector spec needs chirp, but not detected */
+ if (rs->chirp && rs->chirp != event->chirp)
+ return NULL;
+
de->last_ts = ts;
max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);