# bcmdhd
# 1. WL_IFACE_COMB_NUM_CHANNELS must be added if Android version is 4.4 with Kernel version 3.0~3.4,
# otherwise please remove it.
-MODULE_NAME = bcmdhd
-#CONFIG_RKWIFI = m
-CONFIG_CFG80211 = y
-CONFIG_BCMDHD_OOB = y
+CONFIG_BCMDHD := y
CONFIG_BCMDHD_SDIO := y
#CONFIG_BCMDHD_PCIE := y
+CONFIG_BCMDHD_OOB := y
DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER -DSDTEST \
-DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \
-DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DBCMDBG -DGET_OTP_MAC_ENABLE \
-DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DSUPPORT_PM2_ONLY \
- -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DPNO_SUPPORT -DDHDTCPACK_SUPPRESS \
+ -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DPNO_SUPPORT \
-DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DRXFRAME_THREAD \
- -DSET_RANDOM_MAC_SOFTAP \
+ -DSWTXGLOM \
-DENABLE_INSMOD_NO_FW_LOAD \
-Idrivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd \
-Idrivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include
ifneq ($(CONFIG_BCMDHD_PCIE),)
DHDCFLAGS += \
- -DPCIE_FULL_DONGLE -DBCMPCIE -DSHOW_LOGTRACE -DDPCIE_TX_DEFERRAL \
- -DCUSTOM_DPC_PRIO_SETTING=-1
+ -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1
DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o \
dhd_msgbuf.o
endif
-$(MODULE_NAME)-y += $(DHDOFILES)
+obj-$(CONFIG_BCMDHD) += bcmdhd.o
+bcmdhd-objs += $(DHDOFILES)
#ifeq ($(CONFIG_MACH_ODROID_4210),y)
DHDOFILES += dhd_gpio.o
-DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT -DGET_CUSTOM_MAC_ENABLE
+DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT
#DHDCFLAGS += -DBCMWAPI_WPI -DBCMWAPI_WAI
#endif
-#DHDCFLAGS += -DBAND_AG
+ifeq ($(CONFIG_BCMDHD_AG),y)
+DHDCFLAGS += -DBAND_AG
+endif
ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y)
# add dhd_static_buf to kernel image build
-#obj-y += dhd_static_buf.o
+#DHDOFILES += dhd_static_buf.o
DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT -DENHANCED_STATIC_BUF
endif
ifneq ($(CONFIG_WIRELESS_EXT),)
-bcmdhd-objs += wl_iw.o
+DHDOFILES += wl_iw.o
DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW
endif
ifneq ($(CONFIG_CFG80211),)
-bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o wl_cfg_btcoex.o
-bcmdhd-objs += dhd_cfg80211.o dhd_cfg_vendor.o
+DHDOFILES += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o wl_cfg_btcoex.o
+DHDOFILES += dhd_cfg80211.o dhd_cfg_vendor.o
DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF
-DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
+#DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65
DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15
DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000
else
DHDCFLAGS += -DBUILD_IN_KERNEL
endif
-
-$(MODULE_NAME)-y += rkversion.o
-
-obj-$(CONFIG_RKWIFI) += $(MODULE_NAME).o
-
-KERNEL_DIR = /home/duke/jb_4.2/kernel
-
-all:
- make -j4 -C $(KERNEL_DIR) M=`pwd` modules
- mv wlan.ko rkwifi.oob.ko
-clean:
- rm -fr *.mod.c *.mod *.o .*.cmd *.ko *~
- rm .tmp_versions -fr ; rm Module.symvers -fr
- rm -fr Module.markers ; rm -fr modules.order
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: aiutils.c 467150 2014-04-02 17:30:43Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: aiutils.c 607900 2015-12-22 13:38:53Z $
*/
#include <bcm_cfg.h>
#include <typedefs.h>
#define BCM47162_DMP() (0)
#define BCM5357_DMP() (0)
+#define BCM53573_DMP() (0)
#define BCM4707_DMP() (0)
#define PMU_DMP() (0)
+#define GCI_DMP() (0)
#define remap_coreid(sih, coreid) (coreid)
#define remap_corerev(sih, corerev) (corerev)
{
uint32 ent;
uint inv = 0, nom = 0;
+ uint32 size = 0;
while (TRUE) {
ent = R_REG(si_osh(sih), *eromptr);
if ((ent & mask) == match)
break;
+ /* escape condition related EROM size if it has invalid values */
+ size += sizeof(*eromptr);
+ if (size >= ER_SZ_MAX) {
+ SI_ERROR(("Failed to find end of EROM marker\n"));
+ break;
+ }
+
nom++;
}
}
if (i == 0)
cores_info->wrapba[idx] = addrl;
+ else if (i == 1)
+ cores_info->wrapba2[idx] = addrl;
}
/* And finally slave wrappers */
uint fwp = (nsp == 1) ? 0 : 1;
asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
&sizel, &sizeh);
+
+ /* cache APB bridge wrapper address for set/clear timeout */
+ if ((mfg == MFGID_ARM) && (cid == APB_BRIDGE_ID)) {
+ ASSERT(sii->num_br < SI_MAXBR);
+ sii->br_wrapba[sii->num_br++] = addrl;
+ }
if (asd == 0) {
SI_ERROR(("Missing descriptor for SW %d\n", i));
goto error;
}
if ((nmw == 0) && (i == 0))
cores_info->wrapba[idx] = addrl;
+ else if ((nmw == 0) && (i == 1))
+ cores_info->wrapba2[idx] = addrl;
}
/* This function changes the logical "focus" to the indicated core.
* Return the current core's virtual address.
*/
-void *
-ai_setcoreidx(si_t *sih, uint coreidx)
+static void *
+_ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2)
{
si_info_t *sii = SI_INFO(sih);
si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
- uint32 addr, wrap;
+ uint32 addr, wrap, wrap2;
void *regs;
if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
addr = cores_info->coresba[coreidx];
wrap = cores_info->wrapba[coreidx];
+ wrap2 = cores_info->wrapba2[coreidx];
/*
* If the user has provided an interrupt mask enabled function,
cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
}
- sii->curwrap = cores_info->wrappers[coreidx];
+ if (!cores_info->wrappers2[coreidx] && (wrap2 != 0)) {
+ cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
+ }
+ if (use_wrap2)
+ sii->curwrap = cores_info->wrappers2[coreidx];
+ else
+ sii->curwrap = cores_info->wrappers[coreidx];
break;
case PCI_BUS:
OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
regs = sii->curmap;
/* point bar0 2nd 4KB window to the primary wrapper */
+ if (use_wrap2)
+ wrap = wrap2;
if (PCIE_GEN2(sii))
OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
else
case SPI_BUS:
case SDIO_BUS:
sii->curmap = regs = (void *)((uintptr)addr);
- sii->curwrap = (void *)((uintptr)wrap);
+ if (use_wrap2)
+ sii->curwrap = (void *)((uintptr)wrap2);
+ else
+ sii->curwrap = (void *)((uintptr)wrap);
break;
#endif /* BCMSDIO */
return regs;
}
+void *
+ai_setcoreidx(si_t *sih, uint coreidx)
+{
+ return _ai_setcoreidx(sih, coreidx, 0);
+}
+
+void *
+ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx)
+{
+ return _ai_setcoreidx(sih, coreidx, 1);
+}
void
ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
__FUNCTION__));
return sii->curidx;
}
-
+ if (BCM53573_DMP()) {
+ SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__));
+ return sii->curidx;
+ }
#ifdef REROUTE_OOBINT
if (PMU_DMP()) {
SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
__FUNCTION__));
return PMU_OOB_BIT;
}
+#else
+ if (PMU_DMP()) {
+ uint idx, flag;
+ idx = sii->curidx;
+ ai_setcoreidx(sih, SI_CC_IDX);
+ flag = ai_flag_alt(sih);
+ ai_setcoreidx(sih, idx);
+ return flag;
+ }
#endif /* REROUTE_OOBINT */
ai = sii->curwrap;
}
}
- if (!fast)
- return 0;
+ if (!fast) {
+ ASSERT(sii->curidx == coreidx);
+ r = (uint32*) ((uchar*)sii->curmap + regoff);
+ }
return (r);
}
* bits - core specific bits that are set during and after reset sequence
* resetbits - core specific bits that are set only during reset sequence
*/
-void
-ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+static void
+_ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
{
si_info_t *sii = SI_INFO(sih);
aidmp_t *ai;
volatile uint32 dummy;
uint loop_counter = 10;
+#ifdef CUSTOMER_HW4_DEBUG
+ printf("%s: bits: 0x%x, resetbits: 0x%x\n", __FUNCTION__, bits, resetbits);
+#endif
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
/* ensure there are no pending backplane operations */
SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+#ifdef CUSTOMER_HW4_DEBUG
+ printf("%s: resetstatus: %p dummy: %x\n", __FUNCTION__, &ai->resetstatus, dummy);
+#endif
/* put core into reset state */
+#ifdef CUSTOMER_HW4_DEBUG
+ printf("%s: resetctrl: %p\n", __FUNCTION__, &ai->resetctrl);
+#endif
W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
OSL_DELAY(10);
W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
dummy = R_REG(sii->osh, &ai->ioctrl);
+#ifdef CUSTOMER_HW4_DEBUG
+ printf("%s: ioctrl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy);
+#endif
BCM_REFERENCE(dummy);
/* ensure there are no pending backplane operations */
/* take core out of reset */
W_REG(sii->osh, &ai->resetctrl, 0);
+#ifdef CUSTOMER_HW4_DEBUG
+ printf("%s: loop_counter: %d resetstatus: %p resetctrl: %p\n",
+ __FUNCTION__, loop_counter, &ai->resetstatus, &ai->resetctrl);
+#endif
/* ensure there are no pending backplane operations */
SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
dummy = R_REG(sii->osh, &ai->ioctrl);
+#ifdef CUSTOMER_HW4_DEBUG
+ printf("%s: ioctl: %p dummy: 0x%x\n", __FUNCTION__, &ai->ioctrl, dummy);
+#endif
BCM_REFERENCE(dummy);
OSL_DELAY(1);
}
+void
+ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
+{
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ uint idx = sii->curidx;
+
+ if (cores_info->wrapba2[idx] != 0) {
+ ai_setcoreidx_2ndwrap(sih, idx);
+ _ai_core_reset(sih, bits, resetbits);
+ ai_setcoreidx(sih, idx);
+ }
+
+ _ai_core_reset(sih, bits, resetbits);
+}
+
void
ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
{
}
}
#endif
+
+
+void
+ai_enable_backplane_timeouts(si_t *sih)
+{
+#ifdef AXI_TIMEOUTS
+ si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai;
+ int i;
+
+ for (i = 0; i < sii->num_br; ++i) {
+ ai = (aidmp_t *) sii->br_wrapba[i];
+ W_REG(sii->osh, &ai->errlogctrl, (1 << AIELC_TO_ENAB_SHIFT) |
+ ((AXI_TO_VAL << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK));
+ }
+#endif /* AXI_TIMEOUTS */
+}
+
+void
+ai_clear_backplane_to(si_t *sih)
+{
+#ifdef AXI_TIMEOUTS
+ si_info_t *sii = SI_INFO(sih);
+ aidmp_t *ai;
+ int i;
+ uint32 errlogstatus;
+
+ for (i = 0; i < sii->num_br; ++i) {
+ ai = (aidmp_t *) sii->br_wrapba[i];
+ /* check for backplane timeout & clear backplane hang */
+ errlogstatus = R_REG(sii->osh, &ai->errlogstatus);
+
+ if ((errlogstatus & AIELS_TIMEOUT_MASK) != 0) {
+ /* set ErrDone to clear the condition */
+ W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
+
+ /* SPINWAIT on errlogstatus timeout status bits */
+ while (R_REG(sii->osh, &ai->errlogstatus) & AIELS_TIMEOUT_MASK)
+ ;
+
+ /* only reset APB Bridge on timeout (not slave error, or dec error) */
+ switch (errlogstatus & AIELS_TIMEOUT_MASK) {
+ case 0x1:
+ printf("AXI slave error");
+ break;
+ case 0x2:
+ /* reset APB Bridge */
+ OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ /* sync write */
+ (void)R_REG(sii->osh, &ai->resetctrl);
+ /* clear Reset bit */
+ AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
+ /* sync write */
+ (void)R_REG(sii->osh, &ai->resetctrl);
+ printf("AXI timeout");
+ break;
+ case 0x3:
+ printf("AXI decode error");
+ break;
+ default:
+ ; /* should be impossible */
+ }
+ printf("; APB Bridge %d\n", i);
+ printf("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
+ R_REG(sii->osh, &ai->errlogaddrlo),
+ R_REG(sii->osh, &ai->errlogaddrhi),
+ R_REG(sii->osh, &ai->errlogid),
+ R_REG(sii->osh, &ai->errlogflags));
+ printf(", status 0x%08x\n", errlogstatus);
+ }
+ }
+#endif /* AXI_TIMEOUTS */
+}
--- /dev/null
+/*
+ * Misc utility routines used by kernel or app-level.
+ * Contents are wifi-specific, used by any kernel or app-level
+ * software that might want wifi things as it grows.
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcm_app_utils.c 547371 2015-04-08 12:51:39Z $
+ */
+
+#include <typedefs.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+#define tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
+#else /* BCMDRIVER */
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+#endif /* BCMDRIVER */
+#include <bcmwifi_channels.h>
+
+#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
+#include <bcmstdlib.h> /* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */
+#endif
+
+#include <bcmutils.h>
+#include <wlioctl.h>
+#include <wlioctl_utils.h>
+
+#ifndef BCMDRIVER
+/* Take an array of measurments representing a single channel over time and return
+ a summary. Currently implemented as a simple average but could easily evolve
+ into more cpomplex alogrithms.
+*/
+cca_congest_channel_req_t *
+cca_per_chan_summary(cca_congest_channel_req_t *input, cca_congest_channel_req_t *avg, bool percent)
+{
+ int sec;
+ cca_congest_t totals;
+
+ totals.duration = 0;
+ totals.congest_ibss = 0;
+ totals.congest_obss = 0;
+ totals.interference = 0;
+ avg->num_secs = 0;
+
+ for (sec = 0; sec < input->num_secs; sec++) {
+ if (input->secs[sec].duration) {
+ totals.duration += input->secs[sec].duration;
+ totals.congest_ibss += input->secs[sec].congest_ibss;
+ totals.congest_obss += input->secs[sec].congest_obss;
+ totals.interference += input->secs[sec].interference;
+ avg->num_secs++;
+ }
+ }
+ avg->chanspec = input->chanspec;
+
+ if (!avg->num_secs || !totals.duration)
+ return (avg);
+
+ if (percent) {
+ avg->secs[0].duration = totals.duration / avg->num_secs;
+ avg->secs[0].congest_ibss = totals.congest_ibss * 100/totals.duration;
+ avg->secs[0].congest_obss = totals.congest_obss * 100/totals.duration;
+ avg->secs[0].interference = totals.interference * 100/totals.duration;
+ } else {
+ avg->secs[0].duration = totals.duration / avg->num_secs;
+ avg->secs[0].congest_ibss = totals.congest_ibss / avg->num_secs;
+ avg->secs[0].congest_obss = totals.congest_obss / avg->num_secs;
+ avg->secs[0].interference = totals.interference / avg->num_secs;
+ }
+
+ return (avg);
+}
+
+static void
+cca_info(uint8 *bitmap, int num_bits, int *left, int *bit_pos)
+{
+ int i;
+ for (*left = 0, i = 0; i < num_bits; i++) {
+ if (isset(bitmap, i)) {
+ (*left)++;
+ *bit_pos = i;
+ }
+ }
+}
+
+static uint8
+spec_to_chan(chanspec_t chspec)
+{
+ uint8 center_ch, edge, primary, sb;
+
+ center_ch = CHSPEC_CHANNEL(chspec);
+
+ if (CHSPEC_IS20(chspec)) {
+ return center_ch;
+ } else {
+ /* the lower edge of the wide channel is half the bw from
+ * the center channel.
+ */
+ if (CHSPEC_IS40(chspec)) {
+ edge = center_ch - CH_20MHZ_APART;
+ } else {
+ /* must be 80MHz (until we support more) */
+ ASSERT(CHSPEC_IS80(chspec));
+ edge = center_ch - CH_40MHZ_APART;
+ }
+
+ /* find the channel number of the lowest 20MHz primary channel */
+ primary = edge + CH_10MHZ_APART;
+
+ /* select the actual subband */
+ sb = (chspec & WL_CHANSPEC_CTL_SB_MASK) >> WL_CHANSPEC_CTL_SB_SHIFT;
+ primary = primary + sb * CH_20MHZ_APART;
+
+ return primary;
+ }
+}
+
+/*
+ Take an array of measumrements representing summaries of different channels.
+ Return a recomended channel.
+ Interference is evil, get rid of that first.
+ Then hunt for lowest Other bss traffic.
+ Don't forget that channels with low duration times may not have accurate readings.
+ For the moment, do not overwrite input array.
+*/
+int
+cca_analyze(cca_congest_channel_req_t *input[], int num_chans, uint flags, chanspec_t *answer)
+{
+ uint8 *bitmap = NULL; /* 38 Max channels needs 5 bytes = 40 */
+ int i, left, winner, ret_val = 0;
+ uint32 min_obss = 1 << 30;
+ uint bitmap_sz;
+
+ bitmap_sz = CEIL(num_chans, NBBY);
+ bitmap = (uint8 *)malloc(bitmap_sz);
+ if (bitmap == NULL) {
+ printf("unable to allocate memory\n");
+ return BCME_NOMEM;
+ }
+
+ memset(bitmap, 0, bitmap_sz);
+ /* Initially, all channels are up for consideration */
+ for (i = 0; i < num_chans; i++) {
+ if (input[i]->chanspec)
+ setbit(bitmap, i);
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_TOO_FEW;
+ goto f_exit;
+ }
+
+ /* Filter for 2.4 GHz Band */
+ if (flags & CCA_FLAG_2G_ONLY) {
+ for (i = 0; i < num_chans; i++) {
+ if (!CHSPEC_IS2G(input[i]->chanspec))
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_BAND;
+ goto f_exit;
+ }
+
+ /* Filter for 5 GHz Band */
+ if (flags & CCA_FLAG_5G_ONLY) {
+ for (i = 0; i < num_chans; i++) {
+ if (!CHSPEC_IS5G(input[i]->chanspec))
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_BAND;
+ goto f_exit;
+ }
+
+ /* Filter for Duration */
+ if (!(flags & CCA_FLAG_IGNORE_DURATION)) {
+ for (i = 0; i < num_chans; i++) {
+ if (input[i]->secs[0].duration < CCA_THRESH_MILLI)
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_DURATION;
+ goto f_exit;
+ }
+
+ /* Filter for 1 6 11 on 2.4 Band */
+ if (flags & CCA_FLAGS_PREFER_1_6_11) {
+ int tmp_channel = spec_to_chan(input[i]->chanspec);
+ int is2g = CHSPEC_IS2G(input[i]->chanspec);
+ for (i = 0; i < num_chans; i++) {
+ if (is2g && tmp_channel != 1 && tmp_channel != 6 && tmp_channel != 11)
+ clrbit(bitmap, i);
+ }
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_PREF_CHAN;
+ goto f_exit;
+ }
+
+ /* Toss high interference interference */
+ if (!(flags & CCA_FLAG_IGNORE_INTERFER)) {
+ for (i = 0; i < num_chans; i++) {
+ if (input[i]->secs[0].interference > CCA_THRESH_INTERFERE)
+ clrbit(bitmap, i);
+ }
+ cca_info(bitmap, num_chans, &left, &i);
+ if (!left) {
+ ret_val = CCA_ERRNO_INTERFER;
+ goto f_exit;
+ }
+ }
+
+ /* Now find lowest obss */
+ winner = 0;
+ for (i = 0; i < num_chans; i++) {
+ if (isset(bitmap, i) && input[i]->secs[0].congest_obss < min_obss) {
+ winner = i;
+ min_obss = input[i]->secs[0].congest_obss;
+ }
+ }
+ *answer = input[winner]->chanspec;
+ f_exit:
+ free(bitmap); /* free the allocated memory for bitmap */
+ return ret_val;
+}
+#endif /* !BCMDRIVER */
+
+/* offset of cntmember by sizeof(uint32) from the first cnt variable, txframe. */
+#define IDX_IN_WL_CNT_VER_6_T(cntmember) \
+ ((OFFSETOF(wl_cnt_ver_6_t, cntmember) - OFFSETOF(wl_cnt_ver_6_t, txframe)) / sizeof(uint32))
+
+#define IDX_IN_WL_CNT_VER_11_T(cntmember) \
+ ((OFFSETOF(wl_cnt_ver_11_t, cntmember) - OFFSETOF(wl_cnt_ver_11_t, txframe)) \
+ / sizeof(uint32))
+
+/* Exclude version and length fields */
+#define NUM_OF_CNT_IN_WL_CNT_VER_6_T \
+ ((sizeof(wl_cnt_ver_6_t) - 2 * sizeof(uint16)) / sizeof(uint32))
+/* Exclude macstat cnt variables. wl_cnt_ver_6_t only has 62 macstat cnt variables. */
+#define NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T \
+ (NUM_OF_CNT_IN_WL_CNT_VER_6_T - (WL_CNT_MCST_VAR_NUM - 2))
+
+/* Exclude version and length fields */
+#define NUM_OF_CNT_IN_WL_CNT_VER_11_T \
+ ((sizeof(wl_cnt_ver_11_t) - 2 * sizeof(uint16)) / sizeof(uint32))
+/* Exclude 64 macstat cnt variables. */
+#define NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T \
+ (NUM_OF_CNT_IN_WL_CNT_VER_11_T - WL_CNT_MCST_VAR_NUM)
+
+/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_wlc_t */
+static const uint8 wlcntver6t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T] = {
+ IDX_IN_WL_CNT_VER_6_T(txframe),
+ IDX_IN_WL_CNT_VER_6_T(txbyte),
+ IDX_IN_WL_CNT_VER_6_T(txretrans),
+ IDX_IN_WL_CNT_VER_6_T(txerror),
+ IDX_IN_WL_CNT_VER_6_T(txctl),
+ IDX_IN_WL_CNT_VER_6_T(txprshort),
+ IDX_IN_WL_CNT_VER_6_T(txserr),
+ IDX_IN_WL_CNT_VER_6_T(txnobuf),
+ IDX_IN_WL_CNT_VER_6_T(txnoassoc),
+ IDX_IN_WL_CNT_VER_6_T(txrunt),
+ IDX_IN_WL_CNT_VER_6_T(txchit),
+ IDX_IN_WL_CNT_VER_6_T(txcmiss),
+ IDX_IN_WL_CNT_VER_6_T(txuflo),
+ IDX_IN_WL_CNT_VER_6_T(txphyerr),
+ IDX_IN_WL_CNT_VER_6_T(txphycrs),
+ IDX_IN_WL_CNT_VER_6_T(rxframe),
+ IDX_IN_WL_CNT_VER_6_T(rxbyte),
+ IDX_IN_WL_CNT_VER_6_T(rxerror),
+ IDX_IN_WL_CNT_VER_6_T(rxctl),
+ IDX_IN_WL_CNT_VER_6_T(rxnobuf),
+ IDX_IN_WL_CNT_VER_6_T(rxnondata),
+ IDX_IN_WL_CNT_VER_6_T(rxbadds),
+ IDX_IN_WL_CNT_VER_6_T(rxbadcm),
+ IDX_IN_WL_CNT_VER_6_T(rxfragerr),
+ IDX_IN_WL_CNT_VER_6_T(rxrunt),
+ IDX_IN_WL_CNT_VER_6_T(rxgiant),
+ IDX_IN_WL_CNT_VER_6_T(rxnoscb),
+ IDX_IN_WL_CNT_VER_6_T(rxbadproto),
+ IDX_IN_WL_CNT_VER_6_T(rxbadsrcmac),
+ IDX_IN_WL_CNT_VER_6_T(rxbadda),
+ IDX_IN_WL_CNT_VER_6_T(rxfilter),
+ IDX_IN_WL_CNT_VER_6_T(rxoflo),
+ IDX_IN_WL_CNT_VER_6_T(rxuflo),
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 1,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 2,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 3,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 4,
+ IDX_IN_WL_CNT_VER_6_T(rxuflo) + 5,
+ IDX_IN_WL_CNT_VER_6_T(d11cnt_txrts_off),
+ IDX_IN_WL_CNT_VER_6_T(d11cnt_rxcrc_off),
+ IDX_IN_WL_CNT_VER_6_T(d11cnt_txnocts_off),
+ IDX_IN_WL_CNT_VER_6_T(dmade),
+ IDX_IN_WL_CNT_VER_6_T(dmada),
+ IDX_IN_WL_CNT_VER_6_T(dmape),
+ IDX_IN_WL_CNT_VER_6_T(reset),
+ IDX_IN_WL_CNT_VER_6_T(tbtt),
+ IDX_IN_WL_CNT_VER_6_T(txdmawar),
+ IDX_IN_WL_CNT_VER_6_T(pkt_callback_reg_fail),
+ IDX_IN_WL_CNT_VER_6_T(txfrag),
+ IDX_IN_WL_CNT_VER_6_T(txmulti),
+ IDX_IN_WL_CNT_VER_6_T(txfail),
+ IDX_IN_WL_CNT_VER_6_T(txretry),
+ IDX_IN_WL_CNT_VER_6_T(txretrie),
+ IDX_IN_WL_CNT_VER_6_T(rxdup),
+ IDX_IN_WL_CNT_VER_6_T(txrts),
+ IDX_IN_WL_CNT_VER_6_T(txnocts),
+ IDX_IN_WL_CNT_VER_6_T(txnoack),
+ IDX_IN_WL_CNT_VER_6_T(rxfrag),
+ IDX_IN_WL_CNT_VER_6_T(rxmulti),
+ IDX_IN_WL_CNT_VER_6_T(rxcrc),
+ IDX_IN_WL_CNT_VER_6_T(txfrmsnt),
+ IDX_IN_WL_CNT_VER_6_T(rxundec),
+ IDX_IN_WL_CNT_VER_6_T(tkipmicfaill),
+ IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr),
+ IDX_IN_WL_CNT_VER_6_T(tkipreplay),
+ IDX_IN_WL_CNT_VER_6_T(ccmpfmterr),
+ IDX_IN_WL_CNT_VER_6_T(ccmpreplay),
+ IDX_IN_WL_CNT_VER_6_T(ccmpundec),
+ IDX_IN_WL_CNT_VER_6_T(fourwayfail),
+ IDX_IN_WL_CNT_VER_6_T(wepundec),
+ IDX_IN_WL_CNT_VER_6_T(wepicverr),
+ IDX_IN_WL_CNT_VER_6_T(decsuccess),
+ IDX_IN_WL_CNT_VER_6_T(tkipicverr),
+ IDX_IN_WL_CNT_VER_6_T(wepexcluded),
+ IDX_IN_WL_CNT_VER_6_T(txchanrej),
+ IDX_IN_WL_CNT_VER_6_T(psmwds),
+ IDX_IN_WL_CNT_VER_6_T(phywatchdog),
+ IDX_IN_WL_CNT_VER_6_T(prq_entries_handled),
+ IDX_IN_WL_CNT_VER_6_T(prq_undirected_entries),
+ IDX_IN_WL_CNT_VER_6_T(prq_bad_entries),
+ IDX_IN_WL_CNT_VER_6_T(atim_suppress_count),
+ IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready),
+ IDX_IN_WL_CNT_VER_6_T(bcn_template_not_ready_done),
+ IDX_IN_WL_CNT_VER_6_T(late_tbtt_dpc),
+ IDX_IN_WL_CNT_VER_6_T(rx1mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx2mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx5mbps5),
+ IDX_IN_WL_CNT_VER_6_T(rx6mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx9mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx11mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx12mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx18mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx24mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx36mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx48mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx54mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx108mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx162mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx216mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx270mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx324mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx378mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx432mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx486mbps),
+ IDX_IN_WL_CNT_VER_6_T(rx540mbps),
+ IDX_IN_WL_CNT_VER_6_T(rfdisable),
+ IDX_IN_WL_CNT_VER_6_T(txexptime),
+ IDX_IN_WL_CNT_VER_6_T(txmpdu_sgi),
+ IDX_IN_WL_CNT_VER_6_T(rxmpdu_sgi),
+ IDX_IN_WL_CNT_VER_6_T(txmpdu_stbc),
+ IDX_IN_WL_CNT_VER_6_T(rxmpdu_stbc),
+ IDX_IN_WL_CNT_VER_6_T(rxundec_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipmicfaill_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipcntrmsr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipreplay_mcst),
+ IDX_IN_WL_CNT_VER_6_T(ccmpfmterr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(ccmpreplay_mcst),
+ IDX_IN_WL_CNT_VER_6_T(ccmpundec_mcst),
+ IDX_IN_WL_CNT_VER_6_T(fourwayfail_mcst),
+ IDX_IN_WL_CNT_VER_6_T(wepundec_mcst),
+ IDX_IN_WL_CNT_VER_6_T(wepicverr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(decsuccess_mcst),
+ IDX_IN_WL_CNT_VER_6_T(tkipicverr_mcst),
+ IDX_IN_WL_CNT_VER_6_T(wepexcluded_mcst)
+};
+
+/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_wlc_t */
+static const uint8 wlcntver11t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T] = {
+ IDX_IN_WL_CNT_VER_11_T(txframe),
+ IDX_IN_WL_CNT_VER_11_T(txbyte),
+ IDX_IN_WL_CNT_VER_11_T(txretrans),
+ IDX_IN_WL_CNT_VER_11_T(txerror),
+ IDX_IN_WL_CNT_VER_11_T(txctl),
+ IDX_IN_WL_CNT_VER_11_T(txprshort),
+ IDX_IN_WL_CNT_VER_11_T(txserr),
+ IDX_IN_WL_CNT_VER_11_T(txnobuf),
+ IDX_IN_WL_CNT_VER_11_T(txnoassoc),
+ IDX_IN_WL_CNT_VER_11_T(txrunt),
+ IDX_IN_WL_CNT_VER_11_T(txchit),
+ IDX_IN_WL_CNT_VER_11_T(txcmiss),
+ IDX_IN_WL_CNT_VER_11_T(txuflo),
+ IDX_IN_WL_CNT_VER_11_T(txphyerr),
+ IDX_IN_WL_CNT_VER_11_T(txphycrs),
+ IDX_IN_WL_CNT_VER_11_T(rxframe),
+ IDX_IN_WL_CNT_VER_11_T(rxbyte),
+ IDX_IN_WL_CNT_VER_11_T(rxerror),
+ IDX_IN_WL_CNT_VER_11_T(rxctl),
+ IDX_IN_WL_CNT_VER_11_T(rxnobuf),
+ IDX_IN_WL_CNT_VER_11_T(rxnondata),
+ IDX_IN_WL_CNT_VER_11_T(rxbadds),
+ IDX_IN_WL_CNT_VER_11_T(rxbadcm),
+ IDX_IN_WL_CNT_VER_11_T(rxfragerr),
+ IDX_IN_WL_CNT_VER_11_T(rxrunt),
+ IDX_IN_WL_CNT_VER_11_T(rxgiant),
+ IDX_IN_WL_CNT_VER_11_T(rxnoscb),
+ IDX_IN_WL_CNT_VER_11_T(rxbadproto),
+ IDX_IN_WL_CNT_VER_11_T(rxbadsrcmac),
+ IDX_IN_WL_CNT_VER_11_T(rxbadda),
+ IDX_IN_WL_CNT_VER_11_T(rxfilter),
+ IDX_IN_WL_CNT_VER_11_T(rxoflo),
+ IDX_IN_WL_CNT_VER_11_T(rxuflo),
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 1,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 2,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 3,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 4,
+ IDX_IN_WL_CNT_VER_11_T(rxuflo) + 5,
+ IDX_IN_WL_CNT_VER_11_T(d11cnt_txrts_off),
+ IDX_IN_WL_CNT_VER_11_T(d11cnt_rxcrc_off),
+ IDX_IN_WL_CNT_VER_11_T(d11cnt_txnocts_off),
+ IDX_IN_WL_CNT_VER_11_T(dmade),
+ IDX_IN_WL_CNT_VER_11_T(dmada),
+ IDX_IN_WL_CNT_VER_11_T(dmape),
+ IDX_IN_WL_CNT_VER_11_T(reset),
+ IDX_IN_WL_CNT_VER_11_T(tbtt),
+ IDX_IN_WL_CNT_VER_11_T(txdmawar),
+ IDX_IN_WL_CNT_VER_11_T(pkt_callback_reg_fail),
+ IDX_IN_WL_CNT_VER_11_T(txfrag),
+ IDX_IN_WL_CNT_VER_11_T(txmulti),
+ IDX_IN_WL_CNT_VER_11_T(txfail),
+ IDX_IN_WL_CNT_VER_11_T(txretry),
+ IDX_IN_WL_CNT_VER_11_T(txretrie),
+ IDX_IN_WL_CNT_VER_11_T(rxdup),
+ IDX_IN_WL_CNT_VER_11_T(txrts),
+ IDX_IN_WL_CNT_VER_11_T(txnocts),
+ IDX_IN_WL_CNT_VER_11_T(txnoack),
+ IDX_IN_WL_CNT_VER_11_T(rxfrag),
+ IDX_IN_WL_CNT_VER_11_T(rxmulti),
+ IDX_IN_WL_CNT_VER_11_T(rxcrc),
+ IDX_IN_WL_CNT_VER_11_T(txfrmsnt),
+ IDX_IN_WL_CNT_VER_11_T(rxundec),
+ IDX_IN_WL_CNT_VER_11_T(tkipmicfaill),
+ IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr),
+ IDX_IN_WL_CNT_VER_11_T(tkipreplay),
+ IDX_IN_WL_CNT_VER_11_T(ccmpfmterr),
+ IDX_IN_WL_CNT_VER_11_T(ccmpreplay),
+ IDX_IN_WL_CNT_VER_11_T(ccmpundec),
+ IDX_IN_WL_CNT_VER_11_T(fourwayfail),
+ IDX_IN_WL_CNT_VER_11_T(wepundec),
+ IDX_IN_WL_CNT_VER_11_T(wepicverr),
+ IDX_IN_WL_CNT_VER_11_T(decsuccess),
+ IDX_IN_WL_CNT_VER_11_T(tkipicverr),
+ IDX_IN_WL_CNT_VER_11_T(wepexcluded),
+ IDX_IN_WL_CNT_VER_11_T(txchanrej),
+ IDX_IN_WL_CNT_VER_11_T(psmwds),
+ IDX_IN_WL_CNT_VER_11_T(phywatchdog),
+ IDX_IN_WL_CNT_VER_11_T(prq_entries_handled),
+ IDX_IN_WL_CNT_VER_11_T(prq_undirected_entries),
+ IDX_IN_WL_CNT_VER_11_T(prq_bad_entries),
+ IDX_IN_WL_CNT_VER_11_T(atim_suppress_count),
+ IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready),
+ IDX_IN_WL_CNT_VER_11_T(bcn_template_not_ready_done),
+ IDX_IN_WL_CNT_VER_11_T(late_tbtt_dpc),
+ IDX_IN_WL_CNT_VER_11_T(rx1mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx2mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx5mbps5),
+ IDX_IN_WL_CNT_VER_11_T(rx6mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx9mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx11mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx12mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx18mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx24mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx36mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx48mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx54mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx108mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx162mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx216mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx270mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx324mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx378mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx432mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx486mbps),
+ IDX_IN_WL_CNT_VER_11_T(rx540mbps),
+ IDX_IN_WL_CNT_VER_11_T(rfdisable),
+ IDX_IN_WL_CNT_VER_11_T(txexptime),
+ IDX_IN_WL_CNT_VER_11_T(txmpdu_sgi),
+ IDX_IN_WL_CNT_VER_11_T(rxmpdu_sgi),
+ IDX_IN_WL_CNT_VER_11_T(txmpdu_stbc),
+ IDX_IN_WL_CNT_VER_11_T(rxmpdu_stbc),
+ IDX_IN_WL_CNT_VER_11_T(rxundec_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipmicfaill_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipcntrmsr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipreplay_mcst),
+ IDX_IN_WL_CNT_VER_11_T(ccmpfmterr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(ccmpreplay_mcst),
+ IDX_IN_WL_CNT_VER_11_T(ccmpundec_mcst),
+ IDX_IN_WL_CNT_VER_11_T(fourwayfail_mcst),
+ IDX_IN_WL_CNT_VER_11_T(wepundec_mcst),
+ IDX_IN_WL_CNT_VER_11_T(wepicverr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(decsuccess_mcst),
+ IDX_IN_WL_CNT_VER_11_T(tkipicverr_mcst),
+ IDX_IN_WL_CNT_VER_11_T(wepexcluded_mcst),
+ IDX_IN_WL_CNT_VER_11_T(dma_hang),
+ IDX_IN_WL_CNT_VER_11_T(reinit),
+ IDX_IN_WL_CNT_VER_11_T(pstatxucast),
+ IDX_IN_WL_CNT_VER_11_T(pstatxnoassoc),
+ IDX_IN_WL_CNT_VER_11_T(pstarxucast),
+ IDX_IN_WL_CNT_VER_11_T(pstarxbcmc),
+ IDX_IN_WL_CNT_VER_11_T(pstatxbcmc),
+ IDX_IN_WL_CNT_VER_11_T(cso_passthrough),
+ IDX_IN_WL_CNT_VER_11_T(cso_normal),
+ IDX_IN_WL_CNT_VER_11_T(chained),
+ IDX_IN_WL_CNT_VER_11_T(chainedsz1),
+ IDX_IN_WL_CNT_VER_11_T(unchained),
+ IDX_IN_WL_CNT_VER_11_T(maxchainsz),
+ IDX_IN_WL_CNT_VER_11_T(currchainsz),
+ IDX_IN_WL_CNT_VER_11_T(pciereset),
+ IDX_IN_WL_CNT_VER_11_T(cfgrestore),
+ IDX_IN_WL_CNT_VER_11_T(reinitreason),
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 1,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 2,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 3,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 4,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 5,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 6,
+ IDX_IN_WL_CNT_VER_11_T(reinitreason) + 7,
+ IDX_IN_WL_CNT_VER_11_T(rxrtry),
+ IDX_IN_WL_CNT_VER_11_T(rxmpdu_mu),
+ IDX_IN_WL_CNT_VER_11_T(txbar),
+ IDX_IN_WL_CNT_VER_11_T(rxbar),
+ IDX_IN_WL_CNT_VER_11_T(txpspoll),
+ IDX_IN_WL_CNT_VER_11_T(rxpspoll),
+ IDX_IN_WL_CNT_VER_11_T(txnull),
+ IDX_IN_WL_CNT_VER_11_T(rxnull),
+ IDX_IN_WL_CNT_VER_11_T(txqosnull),
+ IDX_IN_WL_CNT_VER_11_T(rxqosnull),
+ IDX_IN_WL_CNT_VER_11_T(txassocreq),
+ IDX_IN_WL_CNT_VER_11_T(rxassocreq),
+ IDX_IN_WL_CNT_VER_11_T(txreassocreq),
+ IDX_IN_WL_CNT_VER_11_T(rxreassocreq),
+ IDX_IN_WL_CNT_VER_11_T(txdisassoc),
+ IDX_IN_WL_CNT_VER_11_T(rxdisassoc),
+ IDX_IN_WL_CNT_VER_11_T(txassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(rxassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(txreassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(rxreassocrsp),
+ IDX_IN_WL_CNT_VER_11_T(txauth),
+ IDX_IN_WL_CNT_VER_11_T(rxauth),
+ IDX_IN_WL_CNT_VER_11_T(txdeauth),
+ IDX_IN_WL_CNT_VER_11_T(rxdeauth),
+ IDX_IN_WL_CNT_VER_11_T(txprobereq),
+ IDX_IN_WL_CNT_VER_11_T(rxprobereq),
+ IDX_IN_WL_CNT_VER_11_T(txprobersp),
+ IDX_IN_WL_CNT_VER_11_T(rxprobersp),
+ IDX_IN_WL_CNT_VER_11_T(txaction),
+ IDX_IN_WL_CNT_VER_11_T(rxaction)
+};
+
+/* Index conversion table from wl_cnt_ver_11_t to
+ * either wl_cnt_ge40mcst_v1_t or wl_cnt_lt40mcst_v1_t
+ */
+static const uint8 wlcntver11t_to_wlcntXX40mcstv1t[WL_CNT_MCST_VAR_NUM] = {
+ IDX_IN_WL_CNT_VER_11_T(txallfrm),
+ IDX_IN_WL_CNT_VER_11_T(txrtsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txctsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txackfrm),
+ IDX_IN_WL_CNT_VER_11_T(txdnlfrm),
+ IDX_IN_WL_CNT_VER_11_T(txbcnfrm),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5,
+ IDX_IN_WL_CNT_VER_11_T(txfbw),
+ IDX_IN_WL_CNT_VER_11_T(txmpdu),
+ IDX_IN_WL_CNT_VER_11_T(txtplunfl),
+ IDX_IN_WL_CNT_VER_11_T(txphyerror),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxducast),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt),
+ IDX_IN_WL_CNT_VER_11_T(rxinvmachdr),
+ IDX_IN_WL_CNT_VER_11_T(rxbadfcs),
+ IDX_IN_WL_CNT_VER_11_T(rxbadplcp),
+ IDX_IN_WL_CNT_VER_11_T(rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxstrt),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmucast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxackucast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconobss),
+ IDX_IN_WL_CNT_VER_11_T(rxrsptmout),
+ IDX_IN_WL_CNT_VER_11_T(bcntxcancl),
+ IDX_IN_WL_CNT_VER_11_T(rxnodelim),
+ IDX_IN_WL_CNT_VER_11_T(rxf0ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf1ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf2ovfl),
+ IDX_IN_WL_CNT_VER_11_T(txsfovfl),
+ IDX_IN_WL_CNT_VER_11_T(pmqovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl),
+ IDX_IN_WL_CNT_VER_11_T(txcgprsfail),
+ IDX_IN_WL_CNT_VER_11_T(txcgprssuc),
+ IDX_IN_WL_CNT_VER_11_T(prs_timeout),
+ IDX_IN_WL_CNT_VER_11_T(rxnack),
+ IDX_IN_WL_CNT_VER_11_T(frmscons),
+ IDX_IN_WL_CNT_VER_11_T(txnack),
+ IDX_IN_WL_CNT_VER_11_T(rxback),
+ IDX_IN_WL_CNT_VER_11_T(txback),
+ IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxdrop20s),
+ IDX_IN_WL_CNT_VER_11_T(rxtoolate),
+ IDX_IN_WL_CNT_VER_11_T(bphy_badplcp)
+};
+
+/* For mcst offsets that were not used. (2 Pads) */
+#define INVALID_MCST_IDX ((uint8)(-1))
+/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_v_le10_mcst_t */
+static const uint8 wlcntver11t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = {
+ IDX_IN_WL_CNT_VER_11_T(txallfrm),
+ IDX_IN_WL_CNT_VER_11_T(txrtsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txctsfrm),
+ IDX_IN_WL_CNT_VER_11_T(txackfrm),
+ IDX_IN_WL_CNT_VER_11_T(txdnlfrm),
+ IDX_IN_WL_CNT_VER_11_T(txbcnfrm),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl),
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 1,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 2,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 3,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 4,
+ IDX_IN_WL_CNT_VER_11_T(txfunfl) + 5,
+ IDX_IN_WL_CNT_VER_11_T(txfbw),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_11_T(txtplunfl),
+ IDX_IN_WL_CNT_VER_11_T(txphyerror),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxducast),
+ IDX_IN_WL_CNT_VER_11_T(pktengrxdmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtoolong),
+ IDX_IN_WL_CNT_VER_11_T(rxfrmtooshrt),
+ IDX_IN_WL_CNT_VER_11_T(rxinvmachdr),
+ IDX_IN_WL_CNT_VER_11_T(rxbadfcs),
+ IDX_IN_WL_CNT_VER_11_T(rxbadplcp),
+ IDX_IN_WL_CNT_VER_11_T(rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxstrt),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmucastmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmucast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsucast),
+ IDX_IN_WL_CNT_VER_11_T(rxackucast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmocast),
+ IDX_IN_WL_CNT_VER_11_T(rxrtsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxctsocast),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxmfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxcfrmmcast),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconmbss),
+ IDX_IN_WL_CNT_VER_11_T(rxdfrmucastobss),
+ IDX_IN_WL_CNT_VER_11_T(rxbeaconobss),
+ IDX_IN_WL_CNT_VER_11_T(rxrsptmout),
+ IDX_IN_WL_CNT_VER_11_T(bcntxcancl),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_11_T(rxf0ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf1ovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxf2ovfl),
+ IDX_IN_WL_CNT_VER_11_T(txsfovfl),
+ IDX_IN_WL_CNT_VER_11_T(pmqovfl),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprqfrm),
+ IDX_IN_WL_CNT_VER_11_T(rxcgprsqovfl),
+ IDX_IN_WL_CNT_VER_11_T(txcgprsfail),
+ IDX_IN_WL_CNT_VER_11_T(txcgprssuc),
+ IDX_IN_WL_CNT_VER_11_T(prs_timeout),
+ IDX_IN_WL_CNT_VER_11_T(rxnack),
+ IDX_IN_WL_CNT_VER_11_T(frmscons),
+ IDX_IN_WL_CNT_VER_11_T(txnack),
+ IDX_IN_WL_CNT_VER_11_T(rxback),
+ IDX_IN_WL_CNT_VER_11_T(txback),
+ IDX_IN_WL_CNT_VER_11_T(bphy_rxcrsglitch),
+ IDX_IN_WL_CNT_VER_11_T(rxdrop20s),
+ IDX_IN_WL_CNT_VER_11_T(rxtoolate),
+ IDX_IN_WL_CNT_VER_11_T(bphy_badplcp)
+};
+
+
+/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_v_le10_mcst_t */
+static const uint8 wlcntver6t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = {
+ IDX_IN_WL_CNT_VER_6_T(txallfrm),
+ IDX_IN_WL_CNT_VER_6_T(txrtsfrm),
+ IDX_IN_WL_CNT_VER_6_T(txctsfrm),
+ IDX_IN_WL_CNT_VER_6_T(txackfrm),
+ IDX_IN_WL_CNT_VER_6_T(txdnlfrm),
+ IDX_IN_WL_CNT_VER_6_T(txbcnfrm),
+ IDX_IN_WL_CNT_VER_6_T(txfunfl),
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 1,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 2,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 3,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 4,
+ IDX_IN_WL_CNT_VER_6_T(txfunfl) + 5,
+ IDX_IN_WL_CNT_VER_6_T(txfbw),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_6_T(txtplunfl),
+ IDX_IN_WL_CNT_VER_6_T(txphyerror),
+ IDX_IN_WL_CNT_VER_6_T(pktengrxducast),
+ IDX_IN_WL_CNT_VER_6_T(pktengrxdmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxfrmtoolong),
+ IDX_IN_WL_CNT_VER_6_T(rxfrmtooshrt),
+ IDX_IN_WL_CNT_VER_6_T(rxinvmachdr),
+ IDX_IN_WL_CNT_VER_6_T(rxbadfcs),
+ IDX_IN_WL_CNT_VER_6_T(rxbadplcp),
+ IDX_IN_WL_CNT_VER_6_T(rxcrsglitch),
+ IDX_IN_WL_CNT_VER_6_T(rxstrt),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmucastmbss),
+ IDX_IN_WL_CNT_VER_6_T(rxmfrmucastmbss),
+ IDX_IN_WL_CNT_VER_6_T(rxcfrmucast),
+ IDX_IN_WL_CNT_VER_6_T(rxrtsucast),
+ IDX_IN_WL_CNT_VER_6_T(rxctsucast),
+ IDX_IN_WL_CNT_VER_6_T(rxackucast),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmocast),
+ IDX_IN_WL_CNT_VER_6_T(rxmfrmocast),
+ IDX_IN_WL_CNT_VER_6_T(rxcfrmocast),
+ IDX_IN_WL_CNT_VER_6_T(rxrtsocast),
+ IDX_IN_WL_CNT_VER_6_T(rxctsocast),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxmfrmmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxcfrmmcast),
+ IDX_IN_WL_CNT_VER_6_T(rxbeaconmbss),
+ IDX_IN_WL_CNT_VER_6_T(rxdfrmucastobss),
+ IDX_IN_WL_CNT_VER_6_T(rxbeaconobss),
+ IDX_IN_WL_CNT_VER_6_T(rxrsptmout),
+ IDX_IN_WL_CNT_VER_6_T(bcntxcancl),
+ INVALID_MCST_IDX,
+ IDX_IN_WL_CNT_VER_6_T(rxf0ovfl),
+ IDX_IN_WL_CNT_VER_6_T(rxf1ovfl),
+ IDX_IN_WL_CNT_VER_6_T(rxf2ovfl),
+ IDX_IN_WL_CNT_VER_6_T(txsfovfl),
+ IDX_IN_WL_CNT_VER_6_T(pmqovfl),
+ IDX_IN_WL_CNT_VER_6_T(rxcgprqfrm),
+ IDX_IN_WL_CNT_VER_6_T(rxcgprsqovfl),
+ IDX_IN_WL_CNT_VER_6_T(txcgprsfail),
+ IDX_IN_WL_CNT_VER_6_T(txcgprssuc),
+ IDX_IN_WL_CNT_VER_6_T(prs_timeout),
+ IDX_IN_WL_CNT_VER_6_T(rxnack),
+ IDX_IN_WL_CNT_VER_6_T(frmscons),
+ IDX_IN_WL_CNT_VER_6_T(txnack),
+ IDX_IN_WL_CNT_VER_6_T(rxback),
+ IDX_IN_WL_CNT_VER_6_T(txback),
+ IDX_IN_WL_CNT_VER_6_T(bphy_rxcrsglitch),
+ IDX_IN_WL_CNT_VER_6_T(rxdrop20s),
+ IDX_IN_WL_CNT_VER_6_T(rxtoolate),
+ IDX_IN_WL_CNT_VER_6_T(bphy_badplcp)
+};
+
+/* copy wlc layer counters from old type cntbuf to wl_cnt_wlc_t type. */
+static int
+wl_copy_wlccnt(uint16 cntver, uint32 *dst, uint32 *src, uint8 src_max_idx)
+{
+ uint i;
+ if (dst == NULL || src == NULL) {
+ return BCME_ERROR;
+ }
+
+ /* Init wlccnt with invalid value. Unchanged value will not be printed out */
+ for (i = 0; i < (sizeof(wl_cnt_wlc_t) / sizeof(uint32)); i++) {
+ dst[i] = INVALID_CNT_VAL;
+ }
+
+ if (cntver == WL_CNT_VERSION_6) {
+ for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T; i++) {
+ if (wlcntver6t_to_wlcntwlct[i] >= src_max_idx) {
+ /* src buffer does not have counters from here */
+ break;
+ }
+ dst[i] = src[wlcntver6t_to_wlcntwlct[i]];
+ }
+ } else {
+ for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T; i++) {
+ if (wlcntver11t_to_wlcntwlct[i] >= src_max_idx) {
+ /* src buffer does not have counters from here */
+ break;
+ }
+ dst[i] = src[wlcntver11t_to_wlcntwlct[i]];
+ }
+ }
+ return BCME_OK;
+}
+
+/* copy macstat counters from old type cntbuf to wl_cnt_v_le10_mcst_t type. */
+static int
+wl_copy_macstat_upto_ver10(uint16 cntver, uint32 *dst, uint32 *src)
+{
+ uint i;
+
+ if (dst == NULL || src == NULL) {
+ return BCME_ERROR;
+ }
+
+ if (cntver == WL_CNT_VERSION_6) {
+ for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+ if (wlcntver6t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) {
+ /* This mcst counter does not exist in wl_cnt_ver_6_t */
+ dst[i] = INVALID_CNT_VAL;
+ } else {
+ dst[i] = src[wlcntver6t_to_wlcntvle10mcstt[i]];
+ }
+ }
+ } else {
+ for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+ if (wlcntver11t_to_wlcntvle10mcstt[i] == INVALID_MCST_IDX) {
+ /* This mcst counter does not exist in wl_cnt_ver_11_t */
+ dst[i] = INVALID_CNT_VAL;
+ } else {
+ dst[i] = src[wlcntver11t_to_wlcntvle10mcstt[i]];
+ }
+ }
+ }
+ return BCME_OK;
+}
+
+static int
+wl_copy_macstat_ver11(uint32 *dst, uint32 *src)
+{
+ uint i;
+
+ if (dst == NULL || src == NULL) {
+ return BCME_ERROR;
+ }
+
+ for (i = 0; i < WL_CNT_MCST_VAR_NUM; i++) {
+ dst[i] = src[wlcntver11t_to_wlcntXX40mcstv1t[i]];
+ }
+ return BCME_OK;
+}
+
+/**
+ * Translate non-xtlv 'wl counters' IOVar buffer received by old driver/FW to xtlv format.
+ * Parameters:
+ * cntbuf: pointer to non-xtlv 'wl counters' IOVar buffer received by old driver/FW.
+ * Newly translated xtlv format is written to this pointer.
+ * buflen: length of the "cntbuf" without any padding.
+ * corerev: chip core revision of the driver/FW.
+ */
+int
+wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf, int buflen, uint32 corerev)
+{
+ wl_cnt_wlc_t *wlccnt = NULL;
+ uint32 *macstat = NULL;
+ xtlv_desc_t xtlv_desc[3];
+ uint16 mcst_xtlv_id;
+ int res = BCME_OK;
+ wl_cnt_info_t *cntinfo = cntbuf;
+ void *xtlvbuf_p = cntinfo->data;
+ uint16 ver = cntinfo->version;
+ uint16 xtlvbuflen = (uint16)buflen;
+ uint16 src_max_idx;
+#ifdef BCMDRIVER
+ osl_t *osh = ctx;
+#else
+ BCM_REFERENCE(ctx);
+#endif
+
+ if (ver == WL_CNT_T_VERSION) {
+ /* Already in xtlv format. */
+ goto exit;
+ }
+
+#ifdef BCMDRIVER
+ wlccnt = MALLOC(osh, sizeof(*wlccnt));
+ macstat = MALLOC(osh, WL_CNT_MCST_STRUCT_SZ);
+#else
+ wlccnt = (wl_cnt_wlc_t *)malloc(sizeof(*wlccnt));
+ macstat = (uint32 *)malloc(WL_CNT_MCST_STRUCT_SZ);
+#endif
+ if (!wlccnt) {
+ printf("wl_cntbuf_to_xtlv_format malloc fail!\n");
+ res = BCME_NOMEM;
+ goto exit;
+ }
+
+ /* Check if the max idx in the struct exceeds the boundary of uint8 */
+ if (NUM_OF_CNT_IN_WL_CNT_VER_6_T > ((uint8)(-1) + 1) ||
+ NUM_OF_CNT_IN_WL_CNT_VER_11_T > ((uint8)(-1) + 1)) {
+ printf("wlcntverXXt_to_wlcntwlct and src_max_idx need"
+ " to be of uint16 instead of uint8\n");
+ res = BCME_ERROR;
+ goto exit;
+ }
+
+ /* Exclude version and length fields in either wlc_cnt_ver_6_t or wlc_cnt_ver_11_t */
+ src_max_idx = (cntinfo->datalen - OFFSETOF(wl_cnt_info_t, data)) / sizeof(uint32);
+
+ if (src_max_idx > (uint8)(-1)) {
+ printf("wlcntverXXt_to_wlcntwlct and src_max_idx need"
+ " to be of uint16 instead of uint8\n"
+ "Try updating wl utility to the latest.\n");
+ res = BCME_ERROR;
+ }
+
+ /* Copy wlc layer counters to wl_cnt_wlc_t */
+ res = wl_copy_wlccnt(ver, (uint32 *)wlccnt, (uint32 *)cntinfo->data, (uint8)src_max_idx);
+ if (res != BCME_OK) {
+ printf("wl_copy_wlccnt fail!\n");
+ goto exit;
+ }
+
+ /* Copy macstat counters to wl_cnt_wlc_t */
+ if (ver == WL_CNT_VERSION_11) {
+ res = wl_copy_macstat_ver11(macstat, (uint32 *)cntinfo->data);
+ if (res != BCME_OK) {
+ printf("wl_copy_macstat_ver11 fail!\n");
+ goto exit;
+ }
+ if (corerev >= 40) {
+ mcst_xtlv_id = WL_CNT_XTLV_GE40_UCODE_V1;
+ } else {
+ mcst_xtlv_id = WL_CNT_XTLV_LT40_UCODE_V1;
+ }
+ } else {
+ res = wl_copy_macstat_upto_ver10(ver, macstat, (uint32 *)cntinfo->data);
+ if (res != BCME_OK) {
+ printf("wl_copy_macstat_upto_ver10 fail!\n");
+ goto exit;
+ }
+ mcst_xtlv_id = WL_CNT_XTLV_CNTV_LE10_UCODE;
+ }
+
+ xtlv_desc[0].type = WL_CNT_XTLV_WLC;
+ xtlv_desc[0].len = sizeof(*wlccnt);
+ xtlv_desc[0].ptr = wlccnt;
+
+ xtlv_desc[1].type = mcst_xtlv_id;
+ xtlv_desc[1].len = WL_CNT_MCST_STRUCT_SZ;
+ xtlv_desc[1].ptr = macstat;
+
+ xtlv_desc[2].type = 0;
+ xtlv_desc[2].len = 0;
+ xtlv_desc[2].ptr = NULL;
+
+ memset(cntbuf, 0, WL_CNTBUF_MAX_SIZE);
+
+ res = bcm_pack_xtlv_buf_from_mem(&xtlvbuf_p, &xtlvbuflen,
+ xtlv_desc, BCM_XTLV_OPTION_ALIGN32);
+ cntinfo->datalen = (buflen - xtlvbuflen);
+exit:
+#ifdef BCMDRIVER
+ if (wlccnt) {
+ MFREE(osh, wlccnt, sizeof(*wlccnt));
+ }
+ if (macstat) {
+ MFREE(osh, macstat, WL_CNT_MCST_STRUCT_SZ);
+ }
+#else
+ if (wlccnt) {
+ free(wlccnt);
+ }
+ if (macstat) {
+ free(macstat);
+ }
+#endif
+ return res;
+}
/*
* bcmevent read-only data shared by kernel or app layers
*
- * $Copyright Open Broadcom Corporation$
- * $Id: bcmevent.c 492377 2014-07-21 19:54:06Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmevent.c 530174 2015-01-29 09:47:55Z $
*/
#include <typedefs.h>
#include <bcmutils.h>
+#include <bcmendian.h>
#include <proto/ethernet.h>
#include <proto/bcmeth.h>
#include <proto/bcmevent.h>
BCMEVENT_NAME(WLC_E_BCNLOST_MSG),
BCMEVENT_NAME(WLC_E_ROAM_PREP),
BCMEVENT_NAME(WLC_E_PFN_NET_FOUND),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
BCMEVENT_NAME(WLC_E_PFN_NET_LOST),
#if defined(IBSS_PEER_DISCOVERY_EVENT)
BCMEVENT_NAME(WLC_E_IBSS_ASSOC),
#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
BCMEVENT_NAME(WLC_E_RADIO),
BCMEVENT_NAME(WLC_E_PSM_WATCHDOG),
-#if defined(BCMCCX) && defined(CCX_SDK)
- BCMEVENT_NAME(WLC_E_CCX_ASSOC_START),
- BCMEVENT_NAME(WLC_E_CCX_ASSOC_ABORT),
-#endif /* BCMCCX && CCX_SDK */
BCMEVENT_NAME(WLC_E_PROBREQ_MSG),
BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND),
BCMEVENT_NAME(WLC_E_PSK_SUP),
BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR),
BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR),
BCMEVENT_NAME(WLC_E_TRACE),
-#ifdef WLBTAMP
- BCMEVENT_NAME(WLC_E_BTA_HCI_EVENT),
-#endif
BCMEVENT_NAME(WLC_E_IF),
#ifdef WLP2P
BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE),
#endif
BCMEVENT_NAME(WLC_E_RSSI),
- BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE),
BCMEVENT_NAME(WLC_E_EXTLOG_MSG),
-#ifdef WIFI_ACT_FRAME
BCMEVENT_NAME(WLC_E_ACTION_FRAME),
BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX),
BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE),
-#endif
-#ifdef BCMWAPI_WAI
- BCMEVENT_NAME(WLC_E_WAI_STA_EVENT),
- BCMEVENT_NAME(WLC_E_WAI_MSG),
-#endif /* BCMWAPI_WAI */
BCMEVENT_NAME(WLC_E_ESCAN_RESULT),
BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE),
#ifdef WLP2P
#ifdef PROP_TXSTATUS
BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT),
#endif
+ BCMEVENT_NAME(WLC_E_PSTA_PRIMARY_INTF_IND),
BCMEVENT_NAME(WLC_E_TXFAIL_THRESH),
-#ifdef WLAIBSS
- BCMEVENT_NAME(WLC_E_AIBSS_TXFAIL),
-#endif /* WLAIBSS */
+#ifdef GSCAN_SUPPORT
+ BCMEVENT_NAME(WLC_E_PFN_GSCAN_FULL_RESULT),
+ BCMEVENT_NAME(WLC_E_PFN_SWC),
+#endif /* GSCAN_SUPPORT */
#ifdef WLBSSLOAD_REPORT
BCMEVENT_NAME(WLC_E_BSS_LOAD),
#endif
#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW)
BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ),
#endif
-#ifdef WLFBT
- BCMEVENT_NAME(WLC_E_FBT_AUTH_REQ_IND),
-#endif /* WLFBT */
+ BCMEVENT_NAME(WLC_E_AUTHORIZED),
+ BCMEVENT_NAME(WLC_E_PROBREQ_MSG_RX),
+ BCMEVENT_NAME(WLC_E_CSA_START_IND),
+ BCMEVENT_NAME(WLC_E_CSA_DONE_IND),
+ BCMEVENT_NAME(WLC_E_CSA_FAILURE_IND),
BCMEVENT_NAME(WLC_E_RMC_EVENT),
+ BCMEVENT_NAME(WLC_E_DPSTA_INTF_IND),
};
*/
return ((event_name) ? event_name : "Unknown Event");
}
+
+void
+wl_event_to_host_order(wl_event_msg_t * evt)
+{
+ /* Event struct members passed from dongle to host are stored in network
+ * byte order. Convert all members to host-order.
+ */
+ evt->event_type = ntoh32(evt->event_type);
+ evt->flags = ntoh16(evt->flags);
+ evt->status = ntoh32(evt->status);
+ evt->reason = ntoh32(evt->reason);
+ evt->auth_type = ntoh32(evt->auth_type);
+ evt->datalen = ntoh32(evt->datalen);
+ evt->version = ntoh16(evt->version);
+}
+
+void
+wl_event_to_network_order(wl_event_msg_t * evt)
+{
+ /* Event struct members passed from dongle to host are stored in network
+ * byte order. Convert all members to host-order.
+ */
+ evt->event_type = hton32(evt->event_type);
+ evt->flags = hton16(evt->flags);
+ evt->status = hton32(evt->status);
+ evt->reason = hton32(evt->reason);
+ evt->auth_type = hton32(evt->auth_type);
+ evt->datalen = hton32(evt->datalen);
+ evt->version = hton16(evt->version);
+}
* BCMSDH interface glue
* implement bcmsdh API for SDIOH driver
*
- * $ Copyright Open Broadcom Corporation $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdh.c 450676 2014-01-22 22:45:13Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdh.c 514727 2014-11-12 03:02:48Z $
*/
/**
/* local copy of bcm sd handler */
bcmsdh_info_t * l_bcmsdh = NULL;
-#if 0 && (NDISVER < 0x0630)
-extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
-#endif
-#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+#if defined(OOB_INTR_ONLY) && defined(HW_OOB) || defined(FORCE_WOWLAN)
extern int
sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
if (bcmsdh != NULL) {
-#if 0 && (NDISVER < 0x0630)
- if (bcmsdh->sdioh)
- sdioh_detach(osh, bcmsdh->sdioh);
-#endif
MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
}
return sdioh_gpioout(sd, gpio, enab);
}
+
+uint
+bcmsdh_set_mode(void *sdh, uint mode)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ return (sdioh_set_mode(bcmsdh->sdioh, mode));
+}
+
+#if defined(SWTXGLOM)
+int
+bcmsdh_send_swtxglom_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ SDIOH_API_RC status;
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ ASSERT(bcmsdh);
+ ASSERT(bcmsdh->init_success);
+
+ BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ __FUNCTION__, fn, addr, nbytes));
+
+ /* Async not implemented yet */
+ ASSERT(!(flags & SDIO_REQ_ASYNC));
+ if (flags & SDIO_REQ_ASYNC)
+ return BCME_UNSUPPORTED;
+
+ if ((err = bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)))
+ return err;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = sdioh_request_swtxglom_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
+ SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
+
+ return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
+}
+
+void
+bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ sdioh_glom_post(bcmsdh->sdioh, frame, pkt, len);
+}
+
+void
+bcmsdh_glom_clear(void *sdh)
+{
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
+ sdioh_glom_clear(bcmsdh->sdioh);
+}
+#endif /* SWTXGLOM */
/*
* SDIO access interface for drivers - linux specific (pci only)
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdh_linux.c 461444 2014-03-12 02:55:28Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdh_linux.c 514727 2014-11-12 03:02:48Z $
*/
/**
free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh);
bcmsdh_osinfo->oob_irq_registered = FALSE;
}
-#endif
+#endif
/* Module parameters specific to each host-controller driver */
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2014, Broadcom Corporation
+ * Copyright (C) 1999-2016, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdh_sdmmc.c 459285 2014-03-03 02:54:39Z $
+ *
+ * <<Broadcom-WL-IPTag/Proprietary,Open:>>
+ *
+ * $Id: bcmsdh_sdmmc.c 591104 2015-10-07 04:45:18Z $
*/
#include <typedefs.h>
sdio_claim_host(sd->func[2]);
sd->client_block_size[2] = sd_f2_blocksize;
+ printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
sdio_release_host(sd->func[2]);
if (err_ret) {
/* Now set it */
si->client_block_size[func] = blksize;
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ if (si->func[func] == NULL) {
+ sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
+ bcmerror = BCME_NORESOURCE;
+ break;
+ }
+ sdio_claim_host(si->func[func]);
+ bcmerror = sdio_set_block_size(si->func[func], blksize);
+ if (bcmerror)
+ sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
+ __FUNCTION__, func, blksize, bcmerror));
+ sdio_release_host(si->func[func]);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
break;
}
return bcmerror;
}
-#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN)
SDIOH_API_RC
sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
uint8 data;
if (enable)
+#ifdef HW_OOB_LOW_LEVEL
+ data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
+#else
data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE | SDIO_SEPINT_ACT_HI;
+#endif
else
data = SDIO_SEPINT_ACT_HI; /* disable hw oob interrupt */
return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
}
+#if defined(SWTXGLOM)
+static INLINE int sdioh_request_packet_align(uint pkt_len, uint write, uint func, int blk_size)
+{
+ /* Align Patch */
+ if (!write || pkt_len < 32)
+ pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
+ else if ((pkt_len > blk_size) && (pkt_len % blk_size)) {
+ if (func == SDIO_FUNC_2) {
+ sd_err(("%s: [%s] dhd_sdio must align %d bytes"
+ " packet larger than a %d bytes blk size by a blk size\n",
+ __FUNCTION__, write ? "W" : "R", pkt_len, blk_size));
+ }
+ pkt_len += blk_size - (pkt_len % blk_size);
+ }
+#ifdef CONFIG_MMC_MSM7X00A
+ if ((pkt_len % 64) == 32) {
+ sd_err(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
+ pkt_len += 32;
+ }
+#endif /* CONFIG_MMC_MSM7X00A */
+ return pkt_len;
+}
+
+void
+sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len)
+{
+ void *phead = sd->glom_info.glom_pkt_head;
+ void *ptail = sd->glom_info.glom_pkt_tail;
+
+ BCM_REFERENCE(frame);
+
+ ASSERT(!PKTLINK(pkt));
+ if (!phead) {
+ ASSERT(!phead);
+ sd->glom_info.glom_pkt_head = sd->glom_info.glom_pkt_tail = pkt;
+ }
+ else {
+ ASSERT(ptail);
+ PKTSETNEXT(sd->osh, ptail, pkt);
+ sd->glom_info.glom_pkt_tail = pkt;
+ }
+ sd->glom_info.count++;
+}
+
+void
+sdioh_glom_clear(sdioh_info_t *sd)
+{
+ void *pnow, *pnext;
+
+ pnext = sd->glom_info.glom_pkt_head;
+
+ if (!pnext) {
+ sd_err(("sdioh_glom_clear: no first packet to clear!\n"));
+ return;
+ }
+
+ while (pnext) {
+ pnow = pnext;
+ pnext = PKTNEXT(sd->osh, pnow);
+ PKTSETNEXT(sd->osh, pnow, NULL);
+ sd->glom_info.count--;
+ }
+
+ sd->glom_info.glom_pkt_head = NULL;
+ sd->glom_info.glom_pkt_tail = NULL;
+ if (sd->glom_info.count != 0) {
+ sd_err(("sdioh_glom_clear: glom count mismatch!\n"));
+ sd->glom_info.count = 0;
+ }
+}
+
+static SDIOH_API_RC
+sdioh_request_swtxglom_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+ uint addr, void *pkt)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ uint32 SGCount = 0;
+ int err_ret = 0;
+ void *pnext;
+ uint ttl_len, dma_len, lft_len, xfred_len, pkt_len;
+ uint blk_num;
+ int blk_size;
+ struct mmc_request mmc_req;
+ struct mmc_command mmc_cmd;
+ struct mmc_data mmc_dat;
+#ifdef BCMSDIOH_TXGLOM
+ uint8 *localbuf = NULL;
+ uint local_plen = 0;
+ bool need_txglom = write &&
+ (pkt == sd->glom_info.glom_pkt_tail) &&
+ (sd->glom_info.glom_pkt_head != sd->glom_info.glom_pkt_tail);
+#endif /* BCMSDIOH_TXGLOM */
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(pkt);
+ DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+ ttl_len = xfred_len = 0;
+#ifdef BCMSDIOH_TXGLOM
+ if (need_txglom) {
+ pkt = sd->glom_info.glom_pkt_head;
+ }
+#endif /* BCMSDIOH_TXGLOM */
+
+ /* at least 4 bytes alignment of skb buff is guaranteed */
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext))
+ ttl_len += PKTLEN(sd->osh, pnext);
+
+ blk_size = sd->client_block_size[func];
+ if (((!write && sd->use_rxchain) ||
+#ifdef BCMSDIOH_TXGLOM
+ (need_txglom && sd->txglom_mode == SDPCM_TXGLOM_MDESC) ||
+#endif
+ 0) && (ttl_len >= blk_size)) {
+ blk_num = ttl_len / blk_size;
+ dma_len = blk_num * blk_size;
+ } else {
+ blk_num = 0;
+ dma_len = 0;
+ }
+
+ lft_len = ttl_len - dma_len;
+
+ sd_trace(("%s: %s %dB to func%d:%08x, %d blks with DMA, %dB leftover\n",
+ __FUNCTION__, write ? "W" : "R",
+ ttl_len, func, addr, blk_num, lft_len));
+
+ if (0 != dma_len) {
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ memset(&mmc_dat, 0, sizeof(struct mmc_data));
+
+ /* Set up DMA descriptors */
+ for (pnext = pkt;
+ pnext && dma_len;
+ pnext = PKTNEXT(sd->osh, pnext)) {
+ pkt_len = PKTLEN(sd->osh, pnext);
+
+ if (dma_len > pkt_len)
+ dma_len -= pkt_len;
+ else {
+ pkt_len = xfred_len = dma_len;
+ dma_len = 0;
+ pkt = pnext;
+ }
+
+ sg_set_buf(&sd->sg_list[SGCount++],
+ (uint8*)PKTDATA(sd->osh, pnext),
+ pkt_len);
+
+ if (SGCount >= SDIOH_SDMMC_MAX_SG_ENTRIES) {
+ sd_err(("%s: sg list entries exceed limit\n",
+ __FUNCTION__));
+ return (SDIOH_API_RC_FAIL);
+ }
+ }
+
+ mmc_dat.sg = sd->sg_list;
+ mmc_dat.sg_len = SGCount;
+ mmc_dat.blksz = blk_size;
+ mmc_dat.blocks = blk_num;
+ mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+
+ mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
+ mmc_cmd.arg = write ? 1<<31 : 0;
+ mmc_cmd.arg |= (func & 0x7) << 28;
+ mmc_cmd.arg |= 1<<27;
+ mmc_cmd.arg |= fifo ? 0 : 1<<26;
+ mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
+ mmc_cmd.arg |= blk_num & 0x1FF;
+ mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ mmc_req.cmd = &mmc_cmd;
+ mmc_req.data = &mmc_dat;
+
+ sdio_claim_host(sd->func[func]);
+ mmc_set_data_timeout(&mmc_dat, sd->func[func]->card);
+ mmc_wait_for_req(sd->func[func]->card->host, &mmc_req);
+ sdio_release_host(sd->func[func]);
+
+ err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
+ if (0 != err_ret) {
+ sd_err(("%s:CMD53 %s failed with code %d\n",
+ __FUNCTION__,
+ write ? "write" : "read",
+ err_ret));
+ }
+ if (!fifo) {
+ addr = addr + ttl_len - lft_len - dma_len;
+ }
+ }
+
+ /* PIO mode */
+ if (0 != lft_len) {
+ /* Claim host controller */
+ sdio_claim_host(sd->func[func]);
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+ uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext) +
+ xfred_len;
+ uint pad = 0;
+ pkt_len = PKTLEN(sd->osh, pnext);
+ if (0 != xfred_len) {
+ pkt_len -= xfred_len;
+ xfred_len = 0;
+ }
+#ifdef BCMSDIOH_TXGLOM
+ if (need_txglom) {
+ if (!localbuf) {
+ uint prev_lft_len = lft_len;
+ lft_len = sdioh_request_packet_align(lft_len, write,
+ func, blk_size);
+
+ if (lft_len > prev_lft_len) {
+ sd_err(("%s: padding is unexpected! lft_len %d,"
+ " prev_lft_len %d %s\n",
+ __FUNCTION__, lft_len, prev_lft_len,
+ write ? "Write" : "Read"));
+ }
+
+ localbuf = (uint8 *)MALLOC(sd->osh, lft_len);
+ if (localbuf == NULL) {
+ sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n",
+ __FUNCTION__, (write) ? "TX" : "RX"));
+ need_txglom = FALSE;
+ goto txglomfail;
+ }
+ }
+ bcopy(buf, (localbuf + local_plen), pkt_len);
+ local_plen += pkt_len;
+
+ if (PKTNEXT(sd->osh, pnext)) {
+ continue;
+ }
+
+ buf = localbuf;
+ pkt_len = local_plen;
+ }
+
+txglomfail:
+#endif /* BCMSDIOH_TXGLOM */
+
+ if (
+#ifdef BCMSDIOH_TXGLOM
+ !need_txglom &&
+#endif
+ TRUE) {
+ pkt_len = sdioh_request_packet_align(pkt_len, write,
+ func, blk_size);
+
+ pad = pkt_len - PKTLEN(sd->osh, pnext);
+
+ if (pad > 0) {
+ if (func == SDIO_FUNC_2) {
+ sd_err(("%s: padding is unexpected! pkt_len %d,"
+ " PKTLEN %d lft_len %d %s\n",
+ __FUNCTION__, pkt_len, PKTLEN(sd->osh, pnext),
+ lft_len, write ? "Write" : "Read"));
+ }
+ if (PKTTAILROOM(sd->osh, pkt) < pad) {
+ sd_info(("%s: insufficient tailroom %d, pad %d,"
+ " lft_len %d pktlen %d, func %d %s\n",
+ __FUNCTION__, (int)PKTTAILROOM(sd->osh, pkt),
+ pad, lft_len, PKTLEN(sd->osh, pnext), func,
+ write ? "W" : "R"));
+ if (PKTPADTAILROOM(sd->osh, pkt, pad)) {
+ sd_err(("%s: padding error size %d.\n",
+ __FUNCTION__, pad));
+ return SDIOH_API_RC_FAIL;
+ }
+ }
+ }
+ }
+
+ if ((write) && (!fifo))
+ err_ret = sdio_memcpy_toio(
+ sd->func[func],
+ addr, buf, pkt_len);
+ else if (write)
+ err_ret = sdio_memcpy_toio(
+ sd->func[func],
+ addr, buf, pkt_len);
+ else if (fifo)
+ err_ret = sdio_readsb(
+ sd->func[func],
+ buf, addr, pkt_len);
+ else
+ err_ret = sdio_memcpy_fromio(
+ sd->func[func],
+ buf, addr, pkt_len);
+
+ if (err_ret)
+ sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, SGCount, addr, pkt_len, err_ret));
+ else
+ sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, SGCount, addr, pkt_len));
+
+ if (!fifo)
+ addr += pkt_len;
+ SGCount ++;
+ }
+ sdio_release_host(sd->func[func]);
+ }
+#ifdef BCMSDIOH_TXGLOM
+ if (localbuf)
+ MFREE(sd->osh, localbuf, lft_len);
+#endif /* BCMSDIOH_TXGLOM */
+
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+/*
+ * This function takes a buffer or packet, and fixes everything up so that in the
+ * end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer, and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain. If it is a packet chain,
+ * then all the packets in the chain must be properly aligned. If the packet data is not
+ * aligned, then there may only be one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+extern SDIOH_API_RC
+sdioh_request_swtxglom_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
+ uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+ SDIOH_API_RC Status;
+ void *tmppkt;
+ void *orig_buf = NULL;
+ uint copylen = 0;
+
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+
+ DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+ DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+
+ if (pkt == NULL) {
+ /* Case 1: we don't have a packet. */
+ orig_buf = buffer;
+ copylen = buflen_u;
+ } else if ((ulong)PKTDATA(sd->osh, pkt) & DMA_ALIGN_MASK) {
+ /* Case 2: We have a packet, but it is unaligned.
+ * in this case, we cannot have a chain.
+ */
+ ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
+
+ orig_buf = PKTDATA(sd->osh, pkt);
+ copylen = PKTLEN(sd->osh, pkt);
+ }
+
+ tmppkt = pkt;
+ if (copylen) {
+ tmppkt = PKTGET_STATIC(sd->osh, copylen, write ? TRUE : FALSE);
+ if (tmppkt == NULL) {
+ sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, copylen));
+ return SDIOH_API_RC_FAIL;
+ }
+ /* For a write, copy the buffer data into the packet. */
+ if (write)
+ bcopy(orig_buf, PKTDATA(sd->osh, tmppkt), copylen);
+ }
+
+ Status = sdioh_request_swtxglom_packet(sd, fix_inc, write, func, addr, tmppkt);
+
+ if (copylen) {
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write)
+ bcopy(PKTDATA(sd->osh, tmppkt), orig_buf, PKTLEN(sd->osh, tmppkt));
+ PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
+ }
+
+ return (Status);
+}
+#endif
+
+uint
+sdioh_set_mode(sdioh_info_t *sd, uint mode)
+{
+ if (mode == SDPCM_TXGLOM_CPY)
+ sd->txglom_mode = mode;
+ else if (mode == SDPCM_TXGLOM_MDESC)
+ sd->txglom_mode = mode;
+ printf("%s: set txglom_mode to %s\n", __FUNCTION__, mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy");
+
+ return (sd->txglom_mode);
+}
+
extern SDIOH_API_RC
sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
uint32 *word, uint nbytes)
if (err_ret)
#endif /* MMC_SDIO_ABORT */
{
- sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x\n",
- rw ? "Write" : "Read", err_ret));
+ sd_err(("bcmsdh_sdmmc: Failed to %s word F%d:@0x%05x=%02x, Err: 0x%08x\n",
+ rw ? "Write" : "Read", func, addr, *word, err_ret));
}
}
uint32 sg_count;
struct sdio_func *sdio_func = sd->func[func];
struct mmc_host *host = sdio_func->card->host;
+#ifdef BCMSDIOH_TXGLOM
+ uint8 *localbuf = NULL;
+ uint local_plen = 0;
+ uint pkt_len = 0;
+#endif /* BCMSDIOH_TXGLOM */
sd_trace(("%s: Enter\n", __FUNCTION__));
ASSERT(pkt);
pkt_offset = 0;
pnext = pkt;
+#ifdef BCMSDIOH_TXGLOM
+ ttl_len = 0;
+ sg_count = 0;
+ if(sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
+#endif
while (pnext != NULL) {
ttl_len = 0;
sg_count = 0;
return SDIOH_API_RC_FAIL;
}
}
+#ifdef BCMSDIOH_TXGLOM
+ } else if(sd->txglom_mode == SDPCM_TXGLOM_CPY) {
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+ ttl_len += PKTLEN(sd->osh, pnext);
+ }
+ /* Claim host controller */
+ sdio_claim_host(sd->func[func]);
+ for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
+ uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext);
+ pkt_len = PKTLEN(sd->osh, pnext);
+
+ if (!localbuf) {
+ localbuf = (uint8 *)MALLOC(sd->osh, ttl_len);
+ if (localbuf == NULL) {
+ sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n",
+ __FUNCTION__, (write) ? "TX" : "RX"));
+ goto txglomfail;
+ }
+ }
+
+ bcopy(buf, (localbuf + local_plen), pkt_len);
+ local_plen += pkt_len;
+ if (PKTNEXT(sd->osh, pnext))
+ continue;
+
+ buf = localbuf;
+ pkt_len = local_plen;
+txglomfail:
+ /* Align Patch */
+ if (!write || pkt_len < 32)
+ pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
+ else if (pkt_len % blk_size)
+ pkt_len += blk_size - (pkt_len % blk_size);
+
+ if ((write) && (!fifo))
+ err_ret = sdio_memcpy_toio(
+ sd->func[func],
+ addr, buf, pkt_len);
+ else if (write)
+ err_ret = sdio_memcpy_toio(
+ sd->func[func],
+ addr, buf, pkt_len);
+ else if (fifo)
+ err_ret = sdio_readsb(
+ sd->func[func],
+ buf, addr, pkt_len);
+ else
+ err_ret = sdio_memcpy_fromio(
+ sd->func[func],
+ buf, addr, pkt_len);
+
+ if (err_ret)
+ sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, sg_count, addr, pkt_len, err_ret));
+ else
+ sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+ __FUNCTION__,
+ (write) ? "TX" : "RX",
+ pnext, sg_count, addr, pkt_len));
+
+ if (!fifo)
+ addr += pkt_len;
+ sg_count ++;
+ }
+ sdio_release_host(sd->func[func]);
+ } else {
+ sd_err(("%s: set to wrong glom mode %d\n", __FUNCTION__, sd->txglom_mode));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (localbuf)
+ MFREE(sd->osh, localbuf, ttl_len);
+#endif /* BCMSDIOH_TXGLOM */
sd_trace(("%s: Exit\n", __FUNCTION__));
return SDIOH_API_RC_SUCCESS;
sdio_claim_host(sd->func[2]);
sd->client_block_size[2] = sd_f2_blocksize;
+ printf("%s: set sd_f2_blocksize %d\n", __FUNCTION__, sd_f2_blocksize);
ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
if (ret) {
sd_err(("bcmsdh_sdmmc: Failed to set F2 "
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2014, Broadcom Corporation
+ * Copyright (C) 1999-2016, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdh_sdmmc_linux.c 434777 2013-11-07 09:30:27Z $
+ *
+ * <<Broadcom-WL-IPTag/Proprietary,Open:>>
+ *
+ * $Id: bcmsdh_sdmmc_linux.c 591173 2015-10-07 06:24:22Z $
*/
#include <typedefs.h>
if (func->num != 2)
return 0;
+ dhd_mmc_suspend = TRUE;
sdioh = sdio_get_drvdata(func);
err = bcmsdh_suspend(sdioh->bcmsdh);
if (err) {
printf("%s bcmsdh_suspend err=%d\n", __FUNCTION__, err);
+ dhd_mmc_suspend = FALSE;
return err;
}
sdio_flags = sdio_get_host_pm_caps(func);
if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__));
+ dhd_mmc_suspend = FALSE;
return -EINVAL;
}
err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (err) {
sd_err(("%s: error while trying to keep power\n", __FUNCTION__));
+ dhd_mmc_suspend = FALSE;
return err;
}
#if defined(OOB_INTR_ONLY)
bcmsdh_oob_intr_set(sdioh->bcmsdh, FALSE);
#endif
- dhd_mmc_suspend = TRUE;
smp_mb();
printf("%s Exit\n", __FUNCTION__);
--- /dev/null
+/*
+ * Broadcom SPI Host Controller Driver - Linux Per-port
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdspi_linux.c 514727 2014-11-12 03:02:48Z $
+ */
+
+#include <typedefs.h>
+#include <bcmutils.h>
+
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* to get msglevel bit values */
+
+#include <pcicfg.h>
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+#include <linux/sched.h> /* request_irq(), free_irq() */
+#include <bcmsdspi.h>
+#include <bcmspi.h>
+
+extern uint sd_crc;
+module_param(sd_crc, uint, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define KERNEL26
+#endif
+
+struct sdos_info {
+ sdioh_info_t *sd;
+ spinlock_t lock;
+ wait_queue_head_t intr_wait_queue;
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define BLOCKABLE() (!in_atomic())
+#else
+#define BLOCKABLE() (!in_interrupt())
+#endif
+
+/* Interrupt handler */
+static irqreturn_t
+sdspi_isr(int irq, void *dev_id
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
+, struct pt_regs *ptregs
+#endif
+)
+{
+ sdioh_info_t *sd;
+ struct sdos_info *sdos;
+ bool ours;
+
+ sd = (sdioh_info_t *)dev_id;
+ sd->local_intrcount++;
+
+ if (!sd->card_init_done) {
+ sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq));
+ return IRQ_RETVAL(FALSE);
+ } else {
+ ours = spi_check_client_intr(sd, NULL);
+
+ /* For local interrupts, wake the waiting process */
+ if (ours && sd->got_hcint) {
+ sdos = (struct sdos_info *)sd->sdos_info;
+ wake_up_interruptible(&sdos->intr_wait_queue);
+ }
+
+ return IRQ_RETVAL(ours);
+ }
+}
+
+
+/* Register with Linux for interrupts */
+int
+spi_register_irq(sdioh_info_t *sd, uint irq)
+{
+ sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq));
+ if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) {
+ sd_err(("%s: request_irq() failed\n", __FUNCTION__));
+ return ERROR;
+ }
+ return SUCCESS;
+}
+
+/* Free Linux irq */
+void
+spi_free_irq(uint irq, sdioh_info_t *sd)
+{
+ free_irq(irq, sd);
+}
+
+/* Map Host controller registers */
+uint32 *
+spi_reg_map(osl_t *osh, uintptr addr, int size)
+{
+ return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+spi_reg_unmap(osl_t *osh, uintptr addr, int size)
+{
+ REG_UNMAP((void*)(uintptr)addr);
+}
+
+int
+spi_osinit(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
+ sd->sdos_info = (void*)sdos;
+ if (sdos == NULL)
+ return BCME_NOMEM;
+
+ sdos->sd = sd;
+ spin_lock_init(&sdos->lock);
+ init_waitqueue_head(&sdos->intr_wait_queue);
+ return BCME_OK;
+}
+
+void
+spi_osfree(sdioh_info_t *sd)
+{
+ struct sdos_info *sdos;
+ ASSERT(sd && sd->sdos_info);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ MFREE(sd->osh, sdos, sizeof(struct sdos_info));
+}
+
+/* Interrupt enable/disable */
+SDIOH_API_RC
+sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ if (!(sd->host_init_done && sd->card_init_done)) {
+ sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+ sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
+
+ /* Ensure atomicity for enable/disable calls */
+ spin_lock_irqsave(&sdos->lock, flags);
+
+ sd->client_intr_enabled = enable;
+ if (enable && !sd->lockcount)
+ spi_devintr_on(sd);
+ else
+ spi_devintr_off(sd);
+
+ spin_unlock_irqrestore(&sdos->lock, flags);
+
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Protect against reentrancy (disable device interrupts while executing) */
+void
+spi_lock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount));
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (sd->lockcount) {
+ sd_err(("%s: Already locked!\n", __FUNCTION__));
+ ASSERT(sd->lockcount == 0);
+ }
+ spi_devintr_off(sd);
+ sd->lockcount++;
+ spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+/* Enable client interrupt */
+void
+spi_unlock(sdioh_info_t *sd)
+{
+ ulong flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled));
+ ASSERT(sd->lockcount > 0);
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ ASSERT(sdos);
+
+ spin_lock_irqsave(&sdos->lock, flags);
+ if (--sd->lockcount == 0 && sd->client_intr_enabled) {
+ spi_devintr_on(sd);
+ }
+ spin_unlock_irqrestore(&sdos->lock, flags);
+}
+
+void spi_waitbits(sdioh_info_t *sd, bool yield)
+{
+#ifndef BCMSDYIELD
+ ASSERT(!yield);
+#endif
+ sd_trace(("%s: yield %d canblock %d\n",
+ __FUNCTION__, yield, BLOCKABLE()));
+
+ /* Clear the "interrupt happened" flag and last intrstatus */
+ sd->got_hcint = FALSE;
+
+#ifdef BCMSDYIELD
+ if (yield && BLOCKABLE()) {
+ struct sdos_info *sdos;
+ sdos = (struct sdos_info *)sd->sdos_info;
+ /* Wait for the indication, the interrupt will be masked when the ISR fires. */
+ wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint));
+ } else
+#endif /* BCMSDYIELD */
+ {
+ spi_spinbits(sd);
+ }
+
+}
--- /dev/null
+/*
+ * Broadcom BCMSDH to gSPI Protocol Conversion Layer
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmspibrcm.c 591086 2015-10-07 02:51:01Z $
+ */
+
+#define HSMODE
+
+#include <typedefs.h>
+
+#include <bcmdevs.h>
+#include <bcmendian.h>
+#include <bcmutils.h>
+#include <osl.h>
+#include <hndsoc.h>
+#include <siutils.h>
+#include <sbchipc.h>
+#include <sbsdio.h> /* SDIO device core hardware definitions. */
+#include <spid.h>
+
+#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
+#include <sdiovar.h> /* ioctl/iovars */
+#include <sdio.h> /* SDIO Device and Protocol Specs */
+
+#include <pcicfg.h>
+
+
+#include <bcmspibrcm.h>
+#include <bcmspi.h>
+
+/* these are for the older cores... for newer cores we have control for each of them */
+#define F0_RESPONSE_DELAY 16
+#define F1_RESPONSE_DELAY 16
+#define F2_RESPONSE_DELAY F0_RESPONSE_DELAY
+
+
+#define GSPI_F0_RESP_DELAY 0
+#define GSPI_F1_RESP_DELAY F1_RESPONSE_DELAY
+#define GSPI_F2_RESP_DELAY 0
+#define GSPI_F3_RESP_DELAY 0
+
+#define CMDLEN 4
+
+#define DWORDMODE_ON (sd->chip == BCM4329_CHIP_ID) && (sd->chiprev == 2) && (sd->dwordmode == TRUE)
+
+/* Globals */
+#if defined(DHD_DEBUG)
+uint sd_msglevel = SDH_ERROR_VAL;
+#else
+uint sd_msglevel = 0;
+#endif
+
+uint sd_hiok = FALSE; /* Use hi-speed mode if available? */
+uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */
+uint sd_f2_blocksize = 64; /* Default blocksize */
+
+
+uint sd_divisor = 2;
+uint sd_power = 1; /* Default to SD Slot powered ON */
+uint sd_clock = 1; /* Default to SD Clock turned ON */
+uint sd_crc = 0; /* Default to SPI CRC Check turned OFF */
+uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
+
+uint8 spi_outbuf[SPI_MAX_PKT_LEN];
+uint8 spi_inbuf[SPI_MAX_PKT_LEN];
+
+/* 128bytes buffer is enough to clear data-not-available and program response-delay F0 bits
+ * assuming we will not exceed F0 response delay > 100 bytes at 48MHz.
+ */
+#define BUF2_PKT_LEN 128
+uint8 spi_outbuf2[BUF2_PKT_LEN];
+uint8 spi_inbuf2[BUF2_PKT_LEN];
+
+#define SPISWAP_WD4(x) bcmswap32(x);
+#define SPISWAP_WD2(x) (bcmswap16(x & 0xffff)) | \
+ (bcmswap16((x & 0xffff0000) >> 16) << 16);
+
+/* Prototypes */
+static bool bcmspi_test_card(sdioh_info_t *sd);
+static bool bcmspi_host_device_init_adapt(sdioh_info_t *sd);
+static int bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode);
+static int bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+ uint32 *data, uint32 datalen);
+static int bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 *data);
+static int bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
+ int regsize, uint32 data);
+static int bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr,
+ uint8 *data);
+static int bcmspi_driver_init(sdioh_info_t *sd);
+static int bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+ uint32 addr, int nbytes, uint32 *data);
+static int bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize,
+ uint32 *data);
+static void bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer);
+static int bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg);
+
+/*
+ * Public entry points & extern's
+ */
+extern sdioh_info_t *
+sdioh_attach(osl_t *osh, void *bar0, uint irq)
+{
+ sdioh_info_t *sd;
+
+ sd_trace(("%s\n", __FUNCTION__));
+ if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
+ sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh)));
+ return NULL;
+ }
+ bzero((char *)sd, sizeof(sdioh_info_t));
+ sd->osh = osh;
+ if (spi_osinit(sd) != 0) {
+ sd_err(("%s: spi_osinit() failed\n", __FUNCTION__));
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
+ }
+
+ sd->bar0 = bar0;
+ sd->irq = irq;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+ sd->intr_handler_valid = FALSE;
+
+ /* Set defaults */
+ sd->use_client_ints = TRUE;
+ sd->sd_use_dma = FALSE; /* DMA Not supported */
+
+ /* Spi device default is 16bit mode, change to 4 when device is changed to 32bit
+ * mode
+ */
+ sd->wordlen = 2;
+
+
+ if (!spi_hw_attach(sd)) {
+ sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__));
+ spi_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+
+ if (bcmspi_driver_init(sd) != SUCCESS) {
+ sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__));
+ spi_hw_detach(sd);
+ spi_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+
+ if (spi_register_irq(sd, irq) != SUCCESS) {
+ sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
+ spi_hw_detach(sd);
+ spi_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return (NULL);
+ }
+
+ sd_trace(("%s: Done\n", __FUNCTION__));
+
+ return sd;
+}
+
+extern SDIOH_API_RC
+sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+ if (sd) {
+ sd_err(("%s: detaching from hardware\n", __FUNCTION__));
+ spi_free_irq(sd->irq, sd);
+ spi_hw_detach(sd);
+ spi_osfree(sd);
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ }
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* Configure callback to client when we recieve client interrupt */
+extern SDIOH_API_RC
+sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+#if !defined(OOB_INTR_ONLY)
+ sd->intr_handler = fn;
+ sd->intr_handler_arg = argh;
+ sd->intr_handler_valid = TRUE;
+#endif /* !defined(OOB_INTR_ONLY) */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_deregister(sdioh_info_t *sd)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+#if !defined(OOB_INTR_ONLY)
+ sd->intr_handler_valid = FALSE;
+ sd->intr_handler = NULL;
+ sd->intr_handler_arg = NULL;
+#endif /* !defined(OOB_INTR_ONLY) */
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
+{
+ sd_trace(("%s: Entering\n", __FUNCTION__));
+ *onoff = sd->client_intr_enabled;
+ return SDIOH_API_RC_SUCCESS;
+}
+
+#if defined(DHD_DEBUG)
+extern bool
+sdioh_interrupt_pending(sdioh_info_t *sd)
+{
+ return 0;
+}
+#endif
+
+extern SDIOH_API_RC
+sdioh_query_device(sdioh_info_t *sd)
+{
+ /* Return a BRCM ID appropriate to the dongle class */
+ return (sd->num_funcs > 1) ? BCM4329_D11N_ID : BCM4318_D11G_ID;
+}
+
+/* Provide dstatus bits of spi-transaction for dhd layers. */
+extern uint32
+sdioh_get_dstatus(sdioh_info_t *sd)
+{
+ return sd->card_dstatus;
+}
+
+extern void
+sdioh_chipinfo(sdioh_info_t *sd, uint32 chip, uint32 chiprev)
+{
+ sd->chip = chip;
+ sd->chiprev = chiprev;
+}
+
+extern void
+sdioh_dwordmode(sdioh_info_t *sd, bool set)
+{
+ uint8 reg = 0;
+ int status;
+
+ if ((status = sdioh_request_byte(sd, SDIOH_READ, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) !=
+ SUCCESS) {
+ sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+ return;
+ }
+
+ if (set) {
+ reg |= DWORD_PKT_LEN_EN;
+ sd->dwordmode = TRUE;
+ sd->client_block_size[SPI_FUNC_2] = 4096; /* h2spi's limit is 4KB, we support 8KB */
+ } else {
+ reg &= ~DWORD_PKT_LEN_EN;
+ sd->dwordmode = FALSE;
+ sd->client_block_size[SPI_FUNC_2] = 2048;
+ }
+
+ if ((status = sdioh_request_byte(sd, SDIOH_WRITE, SPI_FUNC_0, SPID_STATUS_ENABLE, ®)) !=
+ SUCCESS) {
+ sd_err(("%s: Failed to set dwordmode in gSPI\n", __FUNCTION__));
+ return;
+ }
+}
+
+
+uint
+sdioh_query_iofnum(sdioh_info_t *sd)
+{
+ return sd->num_funcs;
+}
+
+/* IOVar table */
+enum {
+ IOV_MSGLEVEL = 1,
+ IOV_BLOCKMODE,
+ IOV_BLOCKSIZE,
+ IOV_DMA,
+ IOV_USEINTS,
+ IOV_NUMINTS,
+ IOV_NUMLOCALINTS,
+ IOV_HOSTREG,
+ IOV_DEVREG,
+ IOV_DIVISOR,
+ IOV_SDMODE,
+ IOV_HISPEED,
+ IOV_HCIREGS,
+ IOV_POWER,
+ IOV_CLOCK,
+ IOV_SPIERRSTATS,
+ IOV_RESP_DELAY_ALL
+};
+
+const bcm_iovar_t sdioh_iovars[] = {
+ {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
+ {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
+ {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
+ {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
+ {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
+ {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
+ {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
+ {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
+ {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
+ {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
+ {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0},
+ {"spi_errstats", IOV_SPIERRSTATS, 0, IOVT_BUFFER, sizeof(struct spierrstats_t) },
+ {"spi_respdelay", IOV_RESP_DELAY_ALL, 0, IOVT_BOOL, 0 },
+ {NULL, 0, 0, 0, 0 }
+};
+
+int
+sdioh_iovar_op(sdioh_info_t *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ int32 int_val = 0;
+ bool bool_val;
+ uint32 actionid;
+/*
+ sdioh_regs_t *regs;
+*/
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get must have return space; Set does not take qualifiers */
+ ASSERT(set || (arg && len));
+ ASSERT(!set || (!params && !plen));
+
+ sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
+
+ if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
+ bcmerror = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
+ goto exit;
+
+ /* Set up params so get and set can share the convenience variables */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ val_size = sizeof(int);
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ switch (actionid) {
+ case IOV_GVAL(IOV_MSGLEVEL):
+ int_val = (int32)sd_msglevel;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_MSGLEVEL):
+ sd_msglevel = int_val;
+ break;
+
+ case IOV_GVAL(IOV_BLOCKSIZE):
+ if ((uint32)int_val > si->num_funcs) {
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = (int32)si->client_block_size[int_val];
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_DMA):
+ int_val = (int32)si->sd_use_dma;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DMA):
+ si->sd_use_dma = (bool)int_val;
+ break;
+
+ case IOV_GVAL(IOV_USEINTS):
+ int_val = (int32)si->use_client_ints;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_USEINTS):
+ break;
+
+ case IOV_GVAL(IOV_DIVISOR):
+ int_val = (uint32)sd_divisor;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DIVISOR):
+ sd_divisor = int_val;
+ if (!spi_start_clock(si, (uint16)sd_divisor)) {
+ sd_err(("%s: set clock failed\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+
+ case IOV_GVAL(IOV_POWER):
+ int_val = (uint32)sd_power;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_POWER):
+ sd_power = int_val;
+ break;
+
+ case IOV_GVAL(IOV_CLOCK):
+ int_val = (uint32)sd_clock;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_CLOCK):
+ sd_clock = int_val;
+ break;
+
+ case IOV_GVAL(IOV_SDMODE):
+ int_val = (uint32)sd_sdmode;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SDMODE):
+ sd_sdmode = int_val;
+ break;
+
+ case IOV_GVAL(IOV_HISPEED):
+ int_val = (uint32)sd_hiok;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_HISPEED):
+ sd_hiok = int_val;
+
+ if (!bcmspi_set_highspeed_mode(si, (bool)sd_hiok)) {
+ sd_err(("%s: Failed changing highspeed mode to %d.\n",
+ __FUNCTION__, sd_hiok));
+ bcmerror = BCME_ERROR;
+ return ERROR;
+ }
+ break;
+
+ case IOV_GVAL(IOV_NUMINTS):
+ int_val = (int32)si->intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_NUMLOCALINTS):
+ int_val = (int32)si->local_intrcount;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_GVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data;
+
+ if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+
+ int_val = (int)data;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+
+ case IOV_SVAL(IOV_DEVREG):
+ {
+ sdreg_t *sd_ptr = (sdreg_t *)params;
+ uint8 data = (uint8)sd_ptr->value;
+
+ if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+ }
+
+
+ case IOV_GVAL(IOV_SPIERRSTATS):
+ {
+ bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SPIERRSTATS):
+ {
+ bzero(&si->spierrstats, sizeof(struct spierrstats_t));
+ break;
+ }
+
+ case IOV_GVAL(IOV_RESP_DELAY_ALL):
+ int_val = (int32)si->resp_delay_all;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RESP_DELAY_ALL):
+ si->resp_delay_all = (bool)int_val;
+ int_val = STATUS_ENABLE|INTR_WITH_STATUS;
+ if (si->resp_delay_all)
+ int_val |= RESP_DELAY_ALL;
+ else {
+ if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_RESPONSE_DELAY, 1,
+ F1_RESPONSE_DELAY) != SUCCESS) {
+ sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ }
+
+ if (bcmspi_card_regwrite(si, SPI_FUNC_0, SPID_STATUS_ENABLE, 1, int_val)
+ != SUCCESS) {
+ sd_err(("%s: Unable to set response delay.\n", __FUNCTION__));
+ bcmerror = BCME_SDIO_ERROR;
+ break;
+ }
+ break;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+exit:
+
+ return bcmerror;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ SDIOH_API_RC status;
+ /* No lock needed since sdioh_request_byte does locking */
+ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
+{
+ /* No lock needed since sdioh_request_byte does locking */
+ SDIOH_API_RC status;
+
+ if ((fnc_num == SPI_FUNC_1) && (addr == SBSDIO_FUNC1_FRAMECTRL)) {
+ uint8 dummy_data;
+ status = sdioh_cfg_read(sd, fnc_num, addr, &dummy_data);
+ if (status) {
+ sd_err(("sdioh_cfg_read() failed.\n"));
+ return status;
+ }
+ }
+
+ status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ return status;
+}
+
+extern SDIOH_API_RC
+sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
+{
+ uint32 count;
+ int offset;
+ uint32 cis_byte;
+ uint16 *cis = (uint16 *)cisd;
+ uint bar0 = SI_ENUM_BASE;
+ int status;
+ uint8 data;
+
+ sd_trace(("%s: Func %d\n", __FUNCTION__, func));
+
+ spi_lock(sd);
+
+ /* Set sb window address to 0x18000000 */
+ data = (bar0 >> 8) & SBSDIO_SBADDRLOW_MASK;
+ status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, &data);
+ if (status == SUCCESS) {
+ data = (bar0 >> 16) & SBSDIO_SBADDRMID_MASK;
+ status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID, &data);
+ } else {
+ sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+ spi_unlock(sd);
+ return (BCME_ERROR);
+ }
+ if (status == SUCCESS) {
+ data = (bar0 >> 24) & SBSDIO_SBADDRHIGH_MASK;
+ status = bcmspi_card_bytewrite(sd, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH, &data);
+ } else {
+ sd_err(("%s: Unable to set sb-addr-windows\n", __FUNCTION__));
+ spi_unlock(sd);
+ return (BCME_ERROR);
+ }
+
+ offset = CC_SROM_OTP; /* OTP offset in chipcommon. */
+ for (count = 0; count < length/2; count++) {
+ if (bcmspi_card_regread (sd, SDIO_FUNC_1, offset, 2, &cis_byte) < 0) {
+ sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
+ spi_unlock(sd);
+ return (BCME_ERROR);
+ }
+
+ *cis = (uint16)cis_byte;
+ cis++;
+ offset += 2;
+ }
+
+ spi_unlock(sd);
+
+ return (BCME_OK);
+}
+
+extern SDIOH_API_RC
+sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 dstatus;
+ uint32 data = (uint32)(*byte);
+
+ spi_lock(sd);
+
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+ if (rw == SDIOH_READ) {
+ sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x\n",
+ __FUNCTION__, cmd_arg, func, regaddr));
+ } else {
+ sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n",
+ __FUNCTION__, cmd_arg, func, regaddr, data));
+ }
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS) {
+ spi_unlock(sd);
+ return status;
+ }
+
+ if (rw == SDIOH_READ) {
+ *byte = (uint8)data;
+ sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *byte));
+ }
+
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ if (dstatus)
+ sd_trace(("dstatus=0x%x\n", dstatus));
+
+ spi_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+extern SDIOH_API_RC
+sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
+ uint32 *word, uint nbytes)
+{
+ int status;
+
+ spi_lock(sd);
+
+ if (rw == SDIOH_READ)
+ status = bcmspi_card_regread(sd, func, addr, nbytes, word);
+ else
+ status = bcmspi_card_regwrite(sd, func, addr, nbytes, *word);
+
+ spi_unlock(sd);
+ return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+}
+
+extern SDIOH_API_RC
+sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
+ uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+{
+ int len;
+ int buflen = (int)buflen_u;
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+
+ spi_lock(sd);
+
+ ASSERT(reg_width == 4);
+ ASSERT(buflen_u < (1 << 30));
+ ASSERT(sd->client_block_size[func]);
+
+ sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
+ __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
+ buflen_u, sd->r_cnt, sd->t_cnt, pkt));
+
+ /* Break buffer down into blocksize chunks. */
+ while (buflen > 0) {
+ len = MIN(sd->client_block_size[func], buflen);
+ if (bcmspi_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
+ sd_err(("%s: bcmspi_card_buf %s failed\n",
+ __FUNCTION__, rw == SDIOH_READ ? "Read" : "Write"));
+ spi_unlock(sd);
+ return SDIOH_API_RC_FAIL;
+ }
+ buffer += len;
+ buflen -= len;
+ if (!fifo)
+ addr += len;
+ }
+ spi_unlock(sd);
+ return SDIOH_API_RC_SUCCESS;
+}
+
+/* This function allows write to gspi bus when another rd/wr function is deep down the call stack.
+ * Its main aim is to have simpler spi writes rather than recursive writes.
+ * e.g. When there is a need to program response delay on the fly after detecting the SPI-func
+ * this call will allow to program the response delay.
+ */
+static int
+bcmspi_card_byterewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 byte)
+{
+ uint32 cmd_arg;
+ uint32 datalen = 1;
+ uint32 hostlen;
+
+ cmd_arg = 0;
+
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, datalen);
+
+ sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+
+
+ /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen
+ * according to the wordlen mode(16/32bit) the device is in.
+ */
+ ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+ datalen = ROUNDUP(datalen, sd->wordlen);
+
+ /* Start by copying command in the spi-outbuffer */
+ if (sd->wordlen == 4) { /* 32bit spid */
+ *(uint32 *)spi_outbuf2 = SPISWAP_WD4(cmd_arg);
+ if (datalen & 0x3)
+ datalen += (4 - (datalen & 0x3));
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ *(uint32 *)spi_outbuf2 = SPISWAP_WD2(cmd_arg);
+ if (datalen & 0x1)
+ datalen++;
+ } else {
+ sd_err(("%s: Host is %d bit spid, could not create SPI command.\n",
+ __FUNCTION__, 8 * sd->wordlen));
+ return ERROR;
+ }
+
+ /* for Write, put the data into the output buffer */
+ if (datalen != 0) {
+ if (sd->wordlen == 4) { /* 32bit spid */
+ *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD4(byte);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ *(uint32 *)&spi_outbuf2[CMDLEN] = SPISWAP_WD2(byte);
+ }
+ }
+
+ /* +4 for cmd, +4 for dstatus */
+ hostlen = datalen + 8;
+ hostlen += (4 - (hostlen & 0x3));
+ spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, hostlen);
+
+ /* Last 4bytes are dstatus. Device is configured to return status bits. */
+ if (sd->wordlen == 4) { /* 32bit spid */
+ sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+ } else {
+ sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+ __FUNCTION__, 8 * sd->wordlen));
+ return ERROR;
+ }
+
+ if (sd->card_dstatus)
+ sd_trace(("dstatus after byte rewrite = 0x%x\n", sd->card_dstatus));
+
+ return (BCME_OK);
+}
+
+/* Program the response delay corresponding to the spi function */
+static int
+bcmspi_prog_resp_delay(sdioh_info_t *sd, int func, uint8 resp_delay)
+{
+ if (sd->resp_delay_all == FALSE)
+ return (BCME_OK);
+
+ if (sd->prev_fun == func)
+ return (BCME_OK);
+
+ if (F0_RESPONSE_DELAY == F1_RESPONSE_DELAY)
+ return (BCME_OK);
+
+ bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_RESPONSE_DELAY, resp_delay);
+
+ /* Remember function for which to avoid reprogramming resp-delay in next iteration */
+ sd->prev_fun = func;
+
+ return (BCME_OK);
+
+}
+
+#define GSPI_RESYNC_PATTERN 0x0
+
+/* A resync pattern is a 32bit MOSI line with all zeros. Its a special command in gSPI.
+ * It resets the spi-bkplane logic so that all F1 related ping-pong buffer logic is
+ * synchronised and all queued resuests are cancelled.
+ */
+static int
+bcmspi_resync_f1(sdioh_info_t *sd)
+{
+ uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0;
+
+
+ /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen
+ * according to the wordlen mode(16/32bit) the device is in.
+ */
+ ASSERT(sd->wordlen == 4 || sd->wordlen == 2);
+ datalen = ROUNDUP(datalen, sd->wordlen);
+
+ /* Start by copying command in the spi-outbuffer */
+ *(uint32 *)spi_outbuf2 = cmd_arg;
+
+ /* for Write, put the data into the output buffer */
+ *(uint32 *)&spi_outbuf2[CMDLEN] = data;
+
+ /* +4 for cmd, +4 for dstatus */
+ spi_sendrecv(sd, spi_outbuf2, spi_inbuf2, datalen + 8);
+
+ /* Last 4bytes are dstatus. Device is configured to return status bits. */
+ if (sd->wordlen == 4) { /* 32bit spid */
+ sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf2[datalen + CMDLEN ]);
+ } else {
+ sd_err(("%s: Host is %d bit machine, could not read SPI dstatus.\n",
+ __FUNCTION__, 8 * sd->wordlen));
+ return ERROR;
+ }
+
+ if (sd->card_dstatus)
+ sd_trace(("dstatus after resync pattern write = 0x%x\n", sd->card_dstatus));
+
+ return (BCME_OK);
+}
+
+uint32 dstatus_count = 0;
+
+static int
+bcmspi_update_stats(sdioh_info_t *sd, uint32 cmd_arg)
+{
+ uint32 dstatus = sd->card_dstatus;
+ struct spierrstats_t *spierrstats = &sd->spierrstats;
+ int err = SUCCESS;
+
+ sd_trace(("cmd = 0x%x, dstatus = 0x%x\n", cmd_arg, dstatus));
+
+ /* Store dstatus of last few gSPI transactions */
+ spierrstats->dstatus[dstatus_count % NUM_PREV_TRANSACTIONS] = dstatus;
+ spierrstats->spicmd[dstatus_count % NUM_PREV_TRANSACTIONS] = cmd_arg;
+ dstatus_count++;
+
+ if (sd->card_init_done == FALSE)
+ return err;
+
+ if (dstatus & STATUS_DATA_NOT_AVAILABLE) {
+ spierrstats->dna++;
+ sd_trace(("Read data not available on F1 addr = 0x%x\n",
+ GFIELD(cmd_arg, SPI_REG_ADDR)));
+ /* Clear dna bit */
+ bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, DATA_UNAVAILABLE);
+ }
+
+ if (dstatus & STATUS_UNDERFLOW) {
+ spierrstats->rdunderflow++;
+ sd_err(("FIFO underflow happened due to current F2 read command.\n"));
+ }
+
+ if (dstatus & STATUS_OVERFLOW) {
+ spierrstats->wroverflow++;
+ sd_err(("FIFO overflow happened due to current (F1/F2) write command.\n"));
+ bcmspi_card_byterewrite(sd, SPI_FUNC_0, SPID_INTR_REG, F1_OVERFLOW);
+ bcmspi_resync_f1(sd);
+ sd_err(("Recovering from F1 FIFO overflow.\n"));
+ }
+
+ if (dstatus & STATUS_F2_INTR) {
+ spierrstats->f2interrupt++;
+ sd_trace(("Interrupt from F2. SW should clear corresponding IntStatus bits\n"));
+ }
+
+ if (dstatus & STATUS_F3_INTR) {
+ spierrstats->f3interrupt++;
+ sd_err(("Interrupt from F3. SW should clear corresponding IntStatus bits\n"));
+ }
+
+ if (dstatus & STATUS_HOST_CMD_DATA_ERR) {
+ spierrstats->hostcmddataerr++;
+ sd_err(("Error in CMD or Host data, detected by CRC/Checksum (optional)\n"));
+ }
+
+ if (dstatus & STATUS_F2_PKT_AVAILABLE) {
+ spierrstats->f2pktavailable++;
+ sd_trace(("Packet is available/ready in F2 TX FIFO\n"));
+ sd_trace(("Packet length = %d\n", sd->dwordmode ?
+ ((dstatus & STATUS_F2_PKT_LEN_MASK) >> (STATUS_F2_PKT_LEN_SHIFT - 2)) :
+ ((dstatus & STATUS_F2_PKT_LEN_MASK) >> STATUS_F2_PKT_LEN_SHIFT)));
+ }
+
+ if (dstatus & STATUS_F3_PKT_AVAILABLE) {
+ spierrstats->f3pktavailable++;
+ sd_err(("Packet is available/ready in F3 TX FIFO\n"));
+ sd_err(("Packet length = %d\n",
+ (dstatus & STATUS_F3_PKT_LEN_MASK) >> STATUS_F3_PKT_LEN_SHIFT));
+ }
+
+ return err;
+}
+
+extern int
+sdioh_abort(sdioh_info_t *sd, uint func)
+{
+ return 0;
+}
+
+int
+sdioh_start(sdioh_info_t *sd, int stage)
+{
+ return SUCCESS;
+}
+
+int
+sdioh_stop(sdioh_info_t *sd)
+{
+ return SUCCESS;
+}
+
+int
+sdioh_waitlockfree(sdioh_info_t *sd)
+{
+ return SUCCESS;
+}
+
+
+/*
+ * Private/Static work routines
+ */
+static int
+bcmspi_host_init(sdioh_info_t *sd)
+{
+
+ /* Default power on mode */
+ sd->sd_mode = SDIOH_MODE_SPI;
+ sd->polled_mode = TRUE;
+ sd->host_init_done = TRUE;
+ sd->card_init_done = FALSE;
+ sd->adapter_slot = 1;
+
+ return (SUCCESS);
+}
+
+static int
+get_client_blocksize(sdioh_info_t *sd)
+{
+ uint32 regdata[2];
+ int status;
+
+ /* Find F1/F2/F3 max packet size */
+ if ((status = bcmspi_card_regread(sd, 0, SPID_F1_INFO_REG,
+ 8, regdata)) != SUCCESS) {
+ return status;
+ }
+
+ sd_trace(("pkt_size regdata[0] = 0x%x, regdata[1] = 0x%x\n",
+ regdata[0], regdata[1]));
+
+ sd->client_block_size[1] = (regdata[0] & F1_MAX_PKT_SIZE) >> 2;
+ sd_trace(("Func1 blocksize = %d\n", sd->client_block_size[1]));
+ ASSERT(sd->client_block_size[1] == BLOCK_SIZE_F1);
+
+ sd->client_block_size[2] = ((regdata[0] >> 16) & F2_MAX_PKT_SIZE) >> 2;
+ sd_trace(("Func2 blocksize = %d\n", sd->client_block_size[2]));
+ ASSERT(sd->client_block_size[2] == BLOCK_SIZE_F2);
+
+ sd->client_block_size[3] = (regdata[1] & F3_MAX_PKT_SIZE) >> 2;
+ sd_trace(("Func3 blocksize = %d\n", sd->client_block_size[3]));
+ ASSERT(sd->client_block_size[3] == BLOCK_SIZE_F3);
+
+ return 0;
+}
+
+static int
+bcmspi_client_init(sdioh_info_t *sd)
+{
+ uint32 status_en_reg = 0;
+ sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
+
+#ifdef HSMODE
+ if (!spi_start_clock(sd, (uint16)sd_divisor)) {
+ sd_err(("spi_start_clock failed\n"));
+ return ERROR;
+ }
+#else
+ /* Start at ~400KHz clock rate for initialization */
+ if (!spi_start_clock(sd, 128)) {
+ sd_err(("spi_start_clock failed\n"));
+ return ERROR;
+ }
+#endif /* HSMODE */
+
+ if (!bcmspi_host_device_init_adapt(sd)) {
+ sd_err(("bcmspi_host_device_init_adapt failed\n"));
+ return ERROR;
+ }
+
+ if (!bcmspi_test_card(sd)) {
+ sd_err(("bcmspi_test_card failed\n"));
+ return ERROR;
+ }
+
+ sd->num_funcs = SPI_MAX_IOFUNCS;
+
+ get_client_blocksize(sd);
+
+ /* Apply resync pattern cmd with all zeros to reset spi-bkplane F1 logic */
+ bcmspi_resync_f1(sd);
+
+ sd->dwordmode = FALSE;
+
+ bcmspi_card_regread(sd, 0, SPID_STATUS_ENABLE, 1, &status_en_reg);
+
+ sd_trace(("%s: Enabling interrupt with dstatus \n", __FUNCTION__));
+ status_en_reg |= INTR_WITH_STATUS;
+
+ if (bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_STATUS_ENABLE, 1,
+ status_en_reg & 0xff) != SUCCESS) {
+ sd_err(("%s: Unable to set response delay for all fun's.\n", __FUNCTION__));
+ return ERROR;
+ }
+
+#ifndef HSMODE
+ /* After configuring for High-Speed mode, set the desired clock rate. */
+ if (!spi_start_clock(sd, 4)) {
+ sd_err(("spi_start_clock failed\n"));
+ return ERROR;
+ }
+#endif /* HSMODE */
+
+ /* check to see if the response delay needs to be programmed properly */
+ {
+ uint32 f1_respdelay = 0;
+ bcmspi_card_regread(sd, 0, SPID_RESP_DELAY_F1, 1, &f1_respdelay);
+ if ((f1_respdelay == 0) || (f1_respdelay == 0xFF)) {
+ /* older sdiodevice core and has no separte resp delay for each of */
+ sd_err(("older corerev < 4 so use the same resp delay for all funcs\n"));
+ sd->resp_delay_new = FALSE;
+ }
+ else {
+ /* older sdiodevice core and has no separte resp delay for each of */
+ int ret_val;
+ sd->resp_delay_new = TRUE;
+ sd_err(("new corerev >= 4 so set the resp delay for each of the funcs\n"));
+ sd_trace(("resp delay for funcs f0(%d), f1(%d), f2(%d), f3(%d)\n",
+ GSPI_F0_RESP_DELAY, GSPI_F1_RESP_DELAY,
+ GSPI_F2_RESP_DELAY, GSPI_F3_RESP_DELAY));
+ ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F0, 1,
+ GSPI_F0_RESP_DELAY);
+ if (ret_val != SUCCESS) {
+ sd_err(("%s: Unable to set response delay for F0\n", __FUNCTION__));
+ return ERROR;
+ }
+ ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F1, 1,
+ GSPI_F1_RESP_DELAY);
+ if (ret_val != SUCCESS) {
+ sd_err(("%s: Unable to set response delay for F1\n", __FUNCTION__));
+ return ERROR;
+ }
+ ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F2, 1,
+ GSPI_F2_RESP_DELAY);
+ if (ret_val != SUCCESS) {
+ sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__));
+ return ERROR;
+ }
+ ret_val = bcmspi_card_regwrite(sd, SPI_FUNC_0, SPID_RESP_DELAY_F3, 1,
+ GSPI_F3_RESP_DELAY);
+ if (ret_val != SUCCESS) {
+ sd_err(("%s: Unable to set response delay for F2\n", __FUNCTION__));
+ return ERROR;
+ }
+ }
+ }
+
+
+ sd->card_init_done = TRUE;
+
+ /* get the device rev to program the prop respdelays */
+
+ return SUCCESS;
+}
+
+static int
+bcmspi_set_highspeed_mode(sdioh_info_t *sd, bool hsmode)
+{
+ uint32 regdata;
+ int status;
+
+ if ((status = bcmspi_card_regread(sd, 0, SPID_CONFIG,
+ 4, ®data)) != SUCCESS)
+ return status;
+
+ sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata));
+
+
+ if (hsmode == TRUE) {
+ sd_trace(("Attempting to enable High-Speed mode.\n"));
+
+ if (regdata & HIGH_SPEED_MODE) {
+ sd_trace(("Device is already in High-Speed mode.\n"));
+ return status;
+ } else {
+ regdata |= HIGH_SPEED_MODE;
+ sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+ if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+ 4, regdata)) != SUCCESS) {
+ return status;
+ }
+ }
+ } else {
+ sd_trace(("Attempting to disable High-Speed mode.\n"));
+
+ if (regdata & HIGH_SPEED_MODE) {
+ regdata &= ~HIGH_SPEED_MODE;
+ sd_trace(("Writing %08x to device at %08x\n", regdata, SPID_CONFIG));
+ if ((status = bcmspi_card_regwrite(sd, 0, SPID_CONFIG,
+ 4, regdata)) != SUCCESS)
+ return status;
+ }
+ else {
+ sd_trace(("Device is already in Low-Speed mode.\n"));
+ return status;
+ }
+ }
+ spi_controller_highspeed_mode(sd, hsmode);
+
+ return TRUE;
+}
+
+#define bcmspi_find_curr_mode(sd) { \
+ sd->wordlen = 2; \
+ status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \
+ regdata &= 0xff; \
+ if ((regdata == 0xad) || (regdata == 0x5b) || \
+ (regdata == 0x5d) || (regdata == 0x5a)) \
+ break; \
+ sd->wordlen = 4; \
+ status = bcmspi_card_regread_fixedaddr(sd, 0, SPID_TEST_READ, 4, ®data); \
+ regdata &= 0xff; \
+ if ((regdata == 0xad) || (regdata == 0x5b) || \
+ (regdata == 0x5d) || (regdata == 0x5a)) \
+ break; \
+ sd_trace(("Silicon testability issue: regdata = 0x%x." \
+ " Expected 0xad, 0x5a, 0x5b or 0x5d.\n", regdata)); \
+ OSL_DELAY(100000); \
+}
+
+#define INIT_ADAPT_LOOP 100
+
+/* Adapt clock-phase-speed-bitwidth between host and device */
+static bool
+bcmspi_host_device_init_adapt(sdioh_info_t *sd)
+{
+ uint32 wrregdata, regdata = 0;
+ int status;
+ int i;
+
+ /* Due to a silicon testability issue, the first command from the Host
+ * to the device will get corrupted (first bit will be lost). So the
+ * Host should poll the device with a safe read request. ie: The Host
+ * should try to read F0 addr 0x14 using the Fixed address mode
+ * (This will prevent a unintended write command to be detected by device)
+ */
+ for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+ /* If device was not power-cycled it will stay in 32bit mode with
+ * response-delay-all bit set. Alternate the iteration so that
+ * read either with or without response-delay for F0 to succeed.
+ */
+ bcmspi_find_curr_mode(sd);
+ sd->resp_delay_all = (i & 0x1) ? TRUE : FALSE;
+
+ bcmspi_find_curr_mode(sd);
+ sd->dwordmode = TRUE;
+
+ bcmspi_find_curr_mode(sd);
+ sd->dwordmode = FALSE;
+ }
+
+ /* Bail out, device not detected */
+ if (i == INIT_ADAPT_LOOP)
+ return FALSE;
+
+ /* Softreset the spid logic */
+ if ((sd->dwordmode) || (sd->wordlen == 4)) {
+ bcmspi_card_regwrite(sd, 0, SPID_RESET_BP, 1, RESET_ON_WLAN_BP_RESET|RESET_SPI);
+ bcmspi_card_regread(sd, 0, SPID_RESET_BP, 1, ®data);
+ sd_trace(("reset reg read = 0x%x\n", regdata));
+ sd_trace(("dwordmode = %d, wordlen = %d, resp_delay_all = %d\n", sd->dwordmode,
+ sd->wordlen, sd->resp_delay_all));
+ /* Restore default state after softreset */
+ sd->wordlen = 2;
+ sd->dwordmode = FALSE;
+ }
+
+ if (sd->wordlen == 4) {
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) !=
+ SUCCESS)
+ return FALSE;
+ if (regdata == TEST_RO_DATA_32BIT_LE) {
+ sd_trace(("Spid is already in 32bit LE mode. Value read = 0x%x\n",
+ regdata));
+ sd_trace(("Spid power was left on.\n"));
+ } else {
+ sd_err(("Spid power was left on but signature read failed."
+ " Value read = 0x%x\n", regdata));
+ return FALSE;
+ }
+ } else {
+ sd->wordlen = 2;
+
+#define CTRL_REG_DEFAULT 0x00010430 /* according to the host m/c */
+
+ wrregdata = (CTRL_REG_DEFAULT);
+
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS)
+ return FALSE;
+ sd_trace(("(we are still in 16bit mode) 32bit READ LE regdata = 0x%x\n", regdata));
+
+#ifndef HSMODE
+ wrregdata |= (CLOCK_PHASE | CLOCK_POLARITY);
+ wrregdata &= ~HIGH_SPEED_MODE;
+ bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+#endif /* HSMODE */
+
+ for (i = 0; i < INIT_ADAPT_LOOP; i++) {
+ if ((regdata == 0xfdda7d5b) || (regdata == 0xfdda7d5a)) {
+ sd_trace(("0xfeedbead was leftshifted by 1-bit.\n"));
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4,
+ ®data)) != SUCCESS)
+ return FALSE;
+ }
+ OSL_DELAY(1000);
+ }
+
+#if defined(CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH)
+ /* Change to host controller intr-polarity of active-high */
+ wrregdata |= INTR_POLARITY;
+#else
+ /* Change to host controller intr-polarity of active-low */
+ wrregdata &= ~INTR_POLARITY;
+#endif /* CHANGE_SPI_INTR_POLARITY_ACTIVE_HIGH */
+
+ sd_trace(("(we are still in 16bit mode) 32bit Write LE reg-ctrl-data = 0x%x\n",
+ wrregdata));
+ /* Change to 32bit mode */
+ wrregdata |= WORD_LENGTH_32;
+ bcmspi_card_regwrite(sd, 0, SPID_CONFIG, 4, wrregdata);
+
+ /* Change command/data packaging in 32bit LE mode */
+ sd->wordlen = 4;
+
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS)
+ return FALSE;
+
+ if (regdata == TEST_RO_DATA_32BIT_LE) {
+ sd_trace(("Read spid passed. Value read = 0x%x\n", regdata));
+ sd_trace(("Spid had power-on cycle OR spi was soft-resetted \n"));
+ } else {
+ sd_err(("Stale spid reg values read as it was kept powered. Value read ="
+ "0x%x\n", regdata));
+ return FALSE;
+ }
+ }
+
+
+ return TRUE;
+}
+
+static bool
+bcmspi_test_card(sdioh_info_t *sd)
+{
+ uint32 regdata;
+ int status;
+
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_READ, 4, ®data)) != SUCCESS)
+ return FALSE;
+
+ if (regdata == (TEST_RO_DATA_32BIT_LE))
+ sd_trace(("32bit LE regdata = 0x%x\n", regdata));
+ else {
+ sd_trace(("Incorrect 32bit LE regdata = 0x%x\n", regdata));
+ return FALSE;
+ }
+
+
+#define RW_PATTERN1 0xA0A1A2A3
+#define RW_PATTERN2 0x4B5B6B7B
+
+ regdata = RW_PATTERN1;
+ if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+ return FALSE;
+ regdata = 0;
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS)
+ return FALSE;
+ if (regdata != RW_PATTERN1) {
+ sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+ RW_PATTERN1, regdata));
+ return FALSE;
+ } else
+ sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+ regdata = RW_PATTERN2;
+ if ((status = bcmspi_card_regwrite(sd, 0, SPID_TEST_RW, 4, regdata)) != SUCCESS)
+ return FALSE;
+ regdata = 0;
+ if ((status = bcmspi_card_regread(sd, 0, SPID_TEST_RW, 4, ®data)) != SUCCESS)
+ return FALSE;
+ if (regdata != RW_PATTERN2) {
+ sd_err(("Write-Read spid failed. Value wrote = 0x%x, Value read = 0x%x\n",
+ RW_PATTERN2, regdata));
+ return FALSE;
+ } else
+ sd_trace(("R/W spid passed. Value read = 0x%x\n", regdata));
+
+ return TRUE;
+}
+
+static int
+bcmspi_driver_init(sdioh_info_t *sd)
+{
+ sd_trace(("%s\n", __FUNCTION__));
+ if ((bcmspi_host_init(sd)) != SUCCESS) {
+ return ERROR;
+ }
+
+ if (bcmspi_client_init(sd) != SUCCESS) {
+ return ERROR;
+ }
+
+ return SUCCESS;
+}
+
+/* Read device reg */
+static int
+bcmspi_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+ int status;
+ uint32 cmd_arg, dstatus;
+
+ ASSERT(regsize);
+
+ if (func == 2)
+ sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+ sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n",
+ __FUNCTION__, cmd_arg, func, regaddr, regsize));
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS)
+ return status;
+
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ if (dstatus)
+ sd_trace(("dstatus =0x%x\n", dstatus));
+
+ return SUCCESS;
+}
+
+static int
+bcmspi_card_regread_fixedaddr(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
+{
+
+ int status;
+ uint32 cmd_arg;
+ uint32 dstatus;
+
+ ASSERT(regsize);
+
+ if (func == 2)
+ sd_trace(("Reg access on F2 will generate error indication in dstatus bits.\n"));
+
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 0);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0); /* Fixed access */
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize);
+
+ sd_trace(("%s: RD cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d\n",
+ __FUNCTION__, cmd_arg, func, regaddr, regsize));
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, regsize)) != SUCCESS)
+ return status;
+
+ sd_trace(("%s: RD result=0x%x\n", __FUNCTION__, *data));
+
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ sd_trace(("dstatus =0x%x\n", dstatus));
+ return SUCCESS;
+}
+
+/* write a device register */
+static int
+bcmspi_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
+{
+ int status;
+ uint32 cmd_arg, dstatus;
+
+ ASSERT(regsize);
+
+ cmd_arg = 0;
+
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, regsize == BLOCK_SIZE_F2 ? 0 : regsize);
+
+ sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x regsize=%d data=0x%x\n",
+ __FUNCTION__, cmd_arg, func, regaddr, regsize, data));
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, regsize)) != SUCCESS)
+ return status;
+
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ if (dstatus)
+ sd_trace(("dstatus=0x%x\n", dstatus));
+
+ return SUCCESS;
+}
+
+/* write a device register - 1 byte */
+static int
+bcmspi_card_bytewrite(sdioh_info_t *sd, int func, uint32 regaddr, uint8 *byte)
+{
+ int status;
+ uint32 cmd_arg;
+ uint32 dstatus;
+ uint32 data = (uint32)(*byte);
+
+ cmd_arg = 0;
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1); /* Incremental access */
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, regaddr);
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, 1);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, 1);
+
+ sd_trace(("%s: WR cmd_arg=0x%x func=%d regaddr=0x%x data=0x%x\n",
+ __FUNCTION__, cmd_arg, func, regaddr, data));
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, &data, 1)) != SUCCESS)
+ return status;
+
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ if (dstatus)
+ sd_trace(("dstatus =0x%x\n", dstatus));
+
+ return SUCCESS;
+}
+
+void
+bcmspi_cmd_getdstatus(sdioh_info_t *sd, uint32 *dstatus_buffer)
+{
+ *dstatus_buffer = sd->card_dstatus;
+}
+
+/* 'data' is of type uint32 whereas other buffers are of type uint8 */
+static int
+bcmspi_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd_arg,
+ uint32 *data, uint32 datalen)
+{
+ uint32 i, j;
+ uint8 resp_delay = 0;
+ int err = SUCCESS;
+ uint32 hostlen;
+ uint32 spilen = 0;
+ uint32 dstatus_idx = 0;
+ uint16 templen, buslen, len, *ptr = NULL;
+
+ sd_trace(("spi cmd = 0x%x\n", cmd_arg));
+
+ if (DWORDMODE_ON) {
+ spilen = GFIELD(cmd_arg, SPI_LEN);
+ if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_0) ||
+ (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_1))
+ dstatus_idx = spilen * 3;
+
+ if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) &&
+ (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) {
+ spilen = spilen << 2;
+ dstatus_idx = (spilen % 16) ? (16 - (spilen % 16)) : 0;
+ /* convert len to mod16 size */
+ spilen = ROUNDUP(spilen, 16);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+ }
+ }
+
+ /* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen
+ * according to the wordlen mode(16/32bit) the device is in.
+ */
+ if (sd->wordlen == 4) { /* 32bit spid */
+ *(uint32 *)spi_outbuf = SPISWAP_WD4(cmd_arg);
+ if (datalen & 0x3)
+ datalen += (4 - (datalen & 0x3));
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ *(uint32 *)spi_outbuf = SPISWAP_WD2(cmd_arg);
+ if (datalen & 0x1)
+ datalen++;
+ if (datalen < 4)
+ datalen = ROUNDUP(datalen, 4);
+ } else {
+ sd_err(("Host is %d bit spid, could not create SPI command.\n",
+ 8 * sd->wordlen));
+ return ERROR;
+ }
+
+ /* for Write, put the data into the output buffer */
+ if (GFIELD(cmd_arg, SPI_RW_FLAG) == 1) {
+ /* We send len field of hw-header always a mod16 size, both from host and dongle */
+ if (DWORDMODE_ON) {
+ if (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) {
+ ptr = (uint16 *)&data[0];
+ templen = *ptr;
+ /* ASSERT(*ptr == ~*(ptr + 1)); */
+ templen = ROUNDUP(templen, 16);
+ *ptr = templen;
+ sd_trace(("actual tx len = %d\n", (uint16)(~*(ptr+1))));
+ }
+ }
+
+ if (datalen != 0) {
+ for (i = 0; i < datalen/4; i++) {
+ if (sd->wordlen == 4) { /* 32bit spid */
+ *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] =
+ SPISWAP_WD4(data[i]);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ *(uint32 *)&spi_outbuf[i * 4 + CMDLEN] =
+ SPISWAP_WD2(data[i]);
+ }
+ }
+ }
+ }
+
+ /* Append resp-delay number of bytes and clock them out for F0/1/2 reads. */
+ if ((GFIELD(cmd_arg, SPI_RW_FLAG) == 0)) {
+ int func = GFIELD(cmd_arg, SPI_FUNCTION);
+ switch (func) {
+ case 0:
+ if (sd->resp_delay_new)
+ resp_delay = GSPI_F0_RESP_DELAY;
+ else
+ resp_delay = sd->resp_delay_all ? F0_RESPONSE_DELAY : 0;
+ break;
+ case 1:
+ if (sd->resp_delay_new)
+ resp_delay = GSPI_F1_RESP_DELAY;
+ else
+ resp_delay = F1_RESPONSE_DELAY;
+ break;
+ case 2:
+ if (sd->resp_delay_new)
+ resp_delay = GSPI_F2_RESP_DELAY;
+ else
+ resp_delay = sd->resp_delay_all ? F2_RESPONSE_DELAY : 0;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ /* Program response delay */
+ if (sd->resp_delay_new == FALSE)
+ bcmspi_prog_resp_delay(sd, func, resp_delay);
+ }
+
+ /* +4 for cmd and +4 for dstatus */
+ hostlen = datalen + 8 + resp_delay;
+ hostlen += dstatus_idx;
+ hostlen += (4 - (hostlen & 0x3));
+ spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen);
+
+ /* for Read, get the data into the input buffer */
+ if (datalen != 0) {
+ if (GFIELD(cmd_arg, SPI_RW_FLAG) == 0) { /* if read cmd */
+ for (j = 0; j < datalen/4; j++) {
+ if (sd->wordlen == 4) { /* 32bit spid */
+ data[j] = SPISWAP_WD4(*(uint32 *)&spi_inbuf[j * 4 +
+ CMDLEN + resp_delay]);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ data[j] = SPISWAP_WD2(*(uint32 *)&spi_inbuf[j * 4 +
+ CMDLEN + resp_delay]);
+ }
+ }
+
+ if ((DWORDMODE_ON) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+ ptr = (uint16 *)&data[0];
+ templen = *ptr;
+ buslen = len = ~(*(ptr + 1));
+ buslen = ROUNDUP(buslen, 16);
+ /* populate actual len in hw-header */
+ if (templen == buslen)
+ *ptr = len;
+ }
+ }
+ }
+
+ /* Restore back the len field of the hw header */
+ if (DWORDMODE_ON) {
+ if ((GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2) &&
+ (GFIELD(cmd_arg, SPI_RW_FLAG) == 1)) {
+ ptr = (uint16 *)&data[0];
+ *ptr = (uint16)(~*(ptr+1));
+ }
+ }
+
+ dstatus_idx += (datalen + CMDLEN + resp_delay);
+ /* Last 4bytes are dstatus. Device is configured to return status bits. */
+ if (sd->wordlen == 4) { /* 32bit spid */
+ sd->card_dstatus = SPISWAP_WD4(*(uint32 *)&spi_inbuf[dstatus_idx]);
+ } else if (sd->wordlen == 2) { /* 16bit spid */
+ sd->card_dstatus = SPISWAP_WD2(*(uint32 *)&spi_inbuf[dstatus_idx]);
+ } else {
+ sd_err(("Host is %d bit machine, could not read SPI dstatus.\n",
+ 8 * sd->wordlen));
+ return ERROR;
+ }
+ if (sd->card_dstatus == 0xffffffff) {
+ sd_err(("looks like not a GSPI device or device is not powered.\n"));
+ }
+
+ err = bcmspi_update_stats(sd, cmd_arg);
+
+ return err;
+
+}
+
+static int
+bcmspi_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
+ uint32 addr, int nbytes, uint32 *data)
+{
+ int status;
+ uint32 cmd_arg;
+ bool write = rw == SDIOH_READ ? 0 : 1;
+ uint retries = 0;
+
+ bool enable;
+ uint32 spilen;
+
+ cmd_arg = 0;
+
+ ASSERT(nbytes);
+ ASSERT(nbytes <= sd->client_block_size[func]);
+
+ if (write) sd->t_cnt++; else sd->r_cnt++;
+
+ if (func == 2) {
+ /* Frame len check limited by gSPI. */
+ if ((nbytes > 2000) && write) {
+ sd_trace((">2KB write: F2 wr of %d bytes\n", nbytes));
+ }
+ /* ASSERT(nbytes <= 2048); Fix bigger len gspi issue and uncomment. */
+ /* If F2 fifo on device is not ready to receive data, don't do F2 transfer */
+ if (write) {
+ uint32 dstatus;
+ /* check F2 ready with cached one */
+ bcmspi_cmd_getdstatus(sd, &dstatus);
+ if ((dstatus & STATUS_F2_RX_READY) == 0) {
+ retries = WAIT_F2RXFIFORDY;
+ enable = 0;
+ while (retries-- && !enable) {
+ OSL_DELAY(WAIT_F2RXFIFORDY_DELAY * 1000);
+ bcmspi_card_regread(sd, SPI_FUNC_0, SPID_STATUS_REG, 4,
+ &dstatus);
+ if (dstatus & STATUS_F2_RX_READY)
+ enable = TRUE;
+ }
+ if (!enable) {
+ struct spierrstats_t *spierrstats = &sd->spierrstats;
+ spierrstats->f2rxnotready++;
+ sd_err(("F2 FIFO is not ready to receive data.\n"));
+ return ERROR;
+ }
+ sd_trace(("No of retries on F2 ready %d\n",
+ (WAIT_F2RXFIFORDY - retries)));
+ }
+ }
+ }
+
+ /* F2 transfers happen on 0 addr */
+ addr = (func == 2) ? 0 : addr;
+
+ /* In pio mode buffer is read using fixed address fifo in func 1 */
+ if ((func == 1) && (fifo))
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 0);
+ else
+ cmd_arg = SFIELD(cmd_arg, SPI_ACCESS, 1);
+
+ cmd_arg = SFIELD(cmd_arg, SPI_FUNCTION, func);
+ cmd_arg = SFIELD(cmd_arg, SPI_REG_ADDR, addr);
+ cmd_arg = SFIELD(cmd_arg, SPI_RW_FLAG, write);
+ spilen = sd->data_xfer_count = MIN(sd->client_block_size[func], nbytes);
+ if ((sd->dwordmode == TRUE) && (GFIELD(cmd_arg, SPI_FUNCTION) == SPI_FUNC_2)) {
+ /* convert len to mod4 size */
+ spilen = spilen + ((spilen & 0x3) ? (4 - (spilen & 0x3)): 0);
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, (spilen >> 2));
+ } else
+ cmd_arg = SFIELD(cmd_arg, SPI_LEN, spilen);
+
+ if ((func == 2) && (fifo == 1)) {
+ sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+ __FUNCTION__, write ? "Wr" : "Rd", func, "INCR",
+ addr, nbytes, sd->r_cnt, sd->t_cnt));
+ }
+
+ sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+ sd_data(("%s: %s func %d, %s, addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
+ __FUNCTION__, write ? "Wd" : "Rd", func, "INCR",
+ addr, nbytes, sd->r_cnt, sd->t_cnt));
+
+
+ if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, nbytes)) != SUCCESS) {
+ sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__,
+ (write ? "write" : "read")));
+ return status;
+ }
+
+ /* gSPI expects that hw-header-len is equal to spi-command-len */
+ if ((func == 2) && (rw == SDIOH_WRITE) && (sd->dwordmode == FALSE)) {
+ ASSERT((uint16)sd->data_xfer_count == (uint16)(*data & 0xffff));
+ ASSERT((uint16)sd->data_xfer_count == (uint16)(~((*data & 0xffff0000) >> 16)));
+ }
+
+ if ((nbytes > 2000) && !write) {
+ sd_trace((">2KB read: F2 rd of %d bytes\n", nbytes));
+ }
+
+ return SUCCESS;
+}
+
+/* Reset and re-initialize the device */
+int
+sdioh_sdio_reset(sdioh_info_t *si)
+{
+ si->card_init_done = FALSE;
+ return bcmspi_client_init(si);
+}
+
+SDIOH_API_RC
+sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+SDIOH_API_RC
+sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
+{
+ return SDIOH_API_RC_FAIL;
+}
+
+bool
+sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
+{
+ return FALSE;
+}
+
+SDIOH_API_RC
+sdioh_gpio_init(sdioh_info_t *sd)
+{
+ return SDIOH_API_RC_FAIL;
+}
/*
* Driver O/S-independent utility routines
*
- * $Copyright Open Broadcom Corporation$
- * $Id: bcmutils.c 496061 2014-08-11 06:14:48Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmutils.c 591286 2015-10-07 11:59:26Z $
*/
#include <bcm_cfg.h>
void *_bcmutils_dummy_fn = NULL;
-#ifdef CUSTOM_DSCP_TO_PRIO_MAPPING
-#define CUST_IPV4_TOS_PREC_MASK 0x3F
-#define DCSP_MAX_VALUE 64
-/* 0:BE,1:BK,2:RESV(BK):,3:EE,:4:CL,5:VI,6:VO,7:NC */
-int dscp2priomap[DCSP_MAX_VALUE]=
-{
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, /* BK->BE */
- 2, 0, 0, 0, 0, 0, 0, 0,
- 3, 0, 0, 0, 0, 0, 0, 0,
- 4, 0, 0, 0, 0, 0, 0, 0,
- 5, 0, 0, 0, 0, 0, 0, 0,
- 6, 0, 0, 0, 0, 0, 0, 0,
- 7, 0, 0, 0, 0, 0, 0, 0
-};
-#endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */
#ifdef BCMDRIVER
evh->vlan_tag = hton16(vlan_tag);
rc |= PKTPRIO_UPD;
}
+#ifdef DHD_LOSSLESS_ROAMING
+ } else if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+ priority = PRIO_8021D_NC;
+ rc = PKTPRIO_DSCP;
+#endif /* DHD_LOSSLESS_ROAMING */
} else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
(eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
uint8 *ip_body = pktdata + sizeof(struct ether_header);
priority = PRIO_8021D_EE;
break;
default:
-#ifndef CUSTOM_DSCP_TO_PRIO_MAPPING
priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
-#else
- priority = (int)dscp2priomap[((tos_tc >> IPV4_TOS_DSCP_SHIFT)
- & CUST_IPV4_TOS_PREC_MASK)];
-#endif
break;
}
return (rc | priority);
}
+/* lookup user priority for specified DSCP */
+static uint8
+dscp2up(uint8 *up_table, uint8 dscp)
+{
+ uint8 user_priority = 255;
+
+ /* lookup up from table if parameters valid */
+ if (up_table != NULL && dscp < UP_TABLE_MAX) {
+ user_priority = up_table[dscp];
+ }
+
+ /* 255 is unused value so return up from dscp */
+ if (user_priority == 255) {
+ user_priority = dscp >> (IPV4_TOS_PREC_SHIFT - IPV4_TOS_DSCP_SHIFT);
+ }
+
+ return user_priority;
+}
+
+/* set user priority by QoS Map Set table (UP table), table size is UP_TABLE_MAX */
+uint BCMFASTPATH
+pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag)
+{
+ if (up_table) {
+ uint8 *pktdata;
+ uint pktlen;
+ uint8 dscp;
+ uint user_priority = 0;
+ uint rc = 0;
+
+ pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+ pktlen = PKTLEN(OSH_NULL, pkt);
+
+ if (pktgetdscp(pktdata, pktlen, &dscp)) {
+ rc = PKTPRIO_DSCP;
+ user_priority = dscp2up(up_table, dscp);
+ PKTSETPRIO(pkt, user_priority);
+ }
+
+ return (rc | user_priority);
+ } else {
+ return pktsetprio(pkt, update_vtag);
+ }
+}
+
/* Returns TRUE and DSCP if IP header found, FALSE otherwise.
*/
bool BCMFASTPATH
return rc;
}
+/* Add to adjust the 802.1x priority */
+void
+pktset8021xprio(void *pkt, int prio)
+{
+ struct ether_header *eh;
+ uint8 *pktdata;
+ if(prio == PKTPRIO(pkt))
+ return;
+ pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+ ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+ eh = (struct ether_header *) pktdata;
+ if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+ ASSERT(prio >= 0 && prio <= MAXPRIO);
+ PKTSETPRIO(pkt, prio);
+ }
+}
+
/* The 0.5KB string table is not removed by compiler even though it's unused */
static char bcm_undeferrstr[32];
#endif /* BCMDRIVER */
+#ifdef BCM_OBJECT_TRACE
+
+#define BCM_OBJECT_MERGE_SAME_OBJ 0
+
+/* some place may add / remove the object to trace list for Linux: */
+/* add: osl_alloc_skb dev_alloc_skb skb_realloc_headroom dhd_start_xmit */
+/* remove: osl_pktfree dev_kfree_skb netif_rx */
+
+#define BCM_OBJDBG_COUNT (1024 * 100)
+static spinlock_t dbgobj_lock;
+#define BCM_OBJDBG_LOCK_INIT() spin_lock_init(&dbgobj_lock)
+#define BCM_OBJDBG_LOCK_DESTROY()
+#define BCM_OBJDBG_LOCK spin_lock_irqsave
+#define BCM_OBJDBG_UNLOCK spin_unlock_irqrestore
+
+#define BCM_OBJDBG_ADDTOHEAD 0
+#define BCM_OBJDBG_ADDTOTAIL 1
+
+#define BCM_OBJDBG_CALLER_LEN 32
+struct bcm_dbgobj {
+ struct bcm_dbgobj *prior;
+ struct bcm_dbgobj *next;
+ uint32 flag;
+ void *obj;
+ uint32 obj_sn;
+ uint32 obj_state;
+ uint32 line;
+ char caller[BCM_OBJDBG_CALLER_LEN];
+};
+
+static struct bcm_dbgobj *dbgobj_freehead = NULL;
+static struct bcm_dbgobj *dbgobj_freetail = NULL;
+static struct bcm_dbgobj *dbgobj_objhead = NULL;
+static struct bcm_dbgobj *dbgobj_objtail = NULL;
+
+static uint32 dbgobj_sn = 0;
+static int dbgobj_count = 0;
+static struct bcm_dbgobj bcm_dbg_objs[BCM_OBJDBG_COUNT];
+
+void
+bcm_object_trace_init(void)
+{
+ int i = 0;
+ BCM_OBJDBG_LOCK_INIT();
+ memset(&bcm_dbg_objs, 0x00, sizeof(struct bcm_dbgobj) * BCM_OBJDBG_COUNT);
+ dbgobj_freehead = &bcm_dbg_objs[0];
+ dbgobj_freetail = &bcm_dbg_objs[BCM_OBJDBG_COUNT - 1];
+
+ for (i = 0; i < BCM_OBJDBG_COUNT; ++i) {
+ bcm_dbg_objs[i].next = (i == (BCM_OBJDBG_COUNT - 1)) ?
+ dbgobj_freehead : &bcm_dbg_objs[i + 1];
+ bcm_dbg_objs[i].prior = (i == 0) ?
+ dbgobj_freetail : &bcm_dbg_objs[i - 1];
+ }
+}
+
+void
+bcm_object_trace_deinit(void)
+{
+ if (dbgobj_objhead || dbgobj_objtail) {
+ printf("%s: not all objects are released\n", __FUNCTION__);
+ ASSERT(0);
+ }
+ BCM_OBJDBG_LOCK_DESTROY();
+}
+
+static void
+bcm_object_rm_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+ struct bcm_dbgobj *dbgobj)
+{
+ if ((dbgobj == *head) && (dbgobj == *tail)) {
+ *head = NULL;
+ *tail = NULL;
+ } else if (dbgobj == *head) {
+ *head = (*head)->next;
+ } else if (dbgobj == *tail) {
+ *tail = (*tail)->prior;
+ }
+ dbgobj->next->prior = dbgobj->prior;
+ dbgobj->prior->next = dbgobj->next;
+}
+
+static void
+bcm_object_add_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+ struct bcm_dbgobj *dbgobj, int addtotail)
+{
+ if (!(*head) && !(*tail)) {
+ *head = dbgobj;
+ *tail = dbgobj;
+ dbgobj->next = dbgobj;
+ dbgobj->prior = dbgobj;
+ } else if ((*head) && (*tail)) {
+ (*tail)->next = dbgobj;
+ (*head)->prior = dbgobj;
+ dbgobj->next = *head;
+ dbgobj->prior = *tail;
+ if (addtotail == BCM_OBJDBG_ADDTOTAIL)
+ *tail = dbgobj;
+ else
+ *head = dbgobj;
+ } else {
+ ASSERT(0); /* can't be this case */
+ }
+}
+
+static INLINE void
+bcm_object_movetoend(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+ struct bcm_dbgobj *dbgobj, int movetotail)
+{
+ if ((*head) && (*tail)) {
+ if (movetotail == BCM_OBJDBG_ADDTOTAIL) {
+ if (dbgobj != (*tail)) {
+ bcm_object_rm_list(head, tail, dbgobj);
+ bcm_object_add_list(head, tail, dbgobj, movetotail);
+ }
+ } else {
+ if (dbgobj != (*head)) {
+ bcm_object_rm_list(head, tail, dbgobj);
+ bcm_object_add_list(head, tail, dbgobj, movetotail);
+ }
+ }
+ } else {
+ ASSERT(0); /* can't be this case */
+ }
+}
+
+void
+bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line)
+{
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
+
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+ if (opt == BCM_OBJDBG_ADD_PKT ||
+ opt == BCM_OBJDBG_ADD) {
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ printf("%s: obj %p allocated from %s(%d),"
+ " allocate again from %s(%d)\n",
+ __FUNCTION__, dbgobj->obj,
+ dbgobj->caller, dbgobj->line,
+ caller, line);
+ ASSERT(0);
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+#if BCM_OBJECT_MERGE_SAME_OBJ
+ dbgobj = dbgobj_freetail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ goto FREED_ENTRY_FOUND;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_freetail)
+ break;
+ }
+#endif /* BCM_OBJECT_MERGE_SAME_OBJ */
+
+ dbgobj = dbgobj_freehead;
+#if BCM_OBJECT_MERGE_SAME_OBJ
+FREED_ENTRY_FOUND:
+#endif /* BCM_OBJECT_MERGE_SAME_OBJ */
+ if (!dbgobj) {
+ printf("%s: already got %d objects ?????????????????????\n",
+ __FUNCTION__, BCM_OBJDBG_COUNT);
+ ASSERT(0);
+ goto EXIT;
+ }
+
+ bcm_object_rm_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj);
+ dbgobj->obj = obj;
+ strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN);
+ dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0';
+ dbgobj->line = line;
+ dbgobj->flag = 0;
+ if (opt == BCM_OBJDBG_ADD_PKT) {
+ dbgobj->obj_sn = dbgobj_sn++;
+ dbgobj->obj_state = 0;
+ /* first 4 bytes is pkt sn */
+ if (((unsigned long)PKTTAG(obj)) & 0x3)
+ printf("pkt tag address not aligned by 4: %p\n", PKTTAG(obj));
+ *(uint32*)PKTTAG(obj) = dbgobj->obj_sn;
+ }
+ bcm_object_add_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj,
+ BCM_OBJDBG_ADDTOTAIL);
+
+ dbgobj_count++;
+
+ } else if (opt == BCM_OBJDBG_REMOVE) {
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ if (dbgobj->flag) {
+ printf("%s: rm flagged obj %p flag 0x%08x from %s(%d)\n",
+ __FUNCTION__, obj, dbgobj->flag, caller, line);
+ }
+ bcm_object_rm_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj);
+ memset(dbgobj->caller, 0x00, BCM_OBJDBG_CALLER_LEN);
+ strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN);
+ dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0';
+ dbgobj->line = line;
+ bcm_object_add_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj,
+ BCM_OBJDBG_ADDTOTAIL);
+ dbgobj_count--;
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+ dbgobj = dbgobj_freetail;
+ while (dbgobj && dbgobj->obj) {
+ if (dbgobj->obj == obj) {
+ printf("%s: obj %p already freed from from %s(%d),"
+ " try free again from %s(%d)\n",
+ __FUNCTION__, obj,
+ dbgobj->caller, dbgobj->line,
+ caller, line);
+ //ASSERT(0); /* release same obj more than one time? */
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_freetail)
+ break;
+ }
+
+ printf("%s: ################### release none-existing obj %p from %s(%d)\n",
+ __FUNCTION__, obj, caller, line);
+ //ASSERT(0); /* release same obj more than one time? */
+
+ }
+
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
+}
+
+void
+bcm_object_trace_upd(void *obj, void *obj_new)
+{
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
+
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ dbgobj->obj = obj_new;
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
+ }
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
+}
+
+void
+bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn,
+ const char *caller, int line)
+{
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
+
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if ((dbgobj->obj == obj) &&
+ ((!chksn) || (dbgobj->obj_sn == sn))) {
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
+ }
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+ dbgobj = dbgobj_freetail;
+ while (dbgobj) {
+ if ((dbgobj->obj == obj) &&
+ ((!chksn) || (dbgobj->obj_sn == sn))) {
+ printf("%s: (%s:%d) obj %p (sn %d state %d) was freed from %s(%d)\n",
+ __FUNCTION__, caller, line,
+ dbgobj->obj, dbgobj->obj_sn, dbgobj->obj_state,
+ dbgobj->caller, dbgobj->line);
+ goto EXIT;
+ }
+ else if (dbgobj->obj == NULL) {
+ break;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_freetail)
+ break;
+ }
+
+ printf("%s: obj %p not found, check from %s(%d), chksn %s, sn %d\n",
+ __FUNCTION__, obj, caller, line, chksn ? "yes" : "no", sn);
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ printf("%s: (%s:%d) obj %p sn %d was allocated from %s(%d)\n",
+ __FUNCTION__, caller, line,
+ dbgobj->obj, dbgobj->obj_sn, dbgobj->caller, dbgobj->line);
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
+}
+
+void
+bcm_object_feature_set(void *obj, uint32 type, uint32 value)
+{
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
+
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ if (type == BCM_OBJECT_FEATURE_FLAG) {
+ if (value & BCM_OBJECT_FEATURE_CLEAR)
+ dbgobj->flag &= ~(value);
+ else
+ dbgobj->flag |= (value);
+ } else if (type == BCM_OBJECT_FEATURE_PKT_STATE) {
+ dbgobj->obj_state = value;
+ }
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
+ }
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+ printf("%s: obj %p not found in active list\n", __FUNCTION__, obj);
+ ASSERT(0);
+
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
+}
+
+int
+bcm_object_feature_get(void *obj, uint32 type, uint32 value)
+{
+ int rtn = 0;
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
+
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ if (type == BCM_OBJECT_FEATURE_FLAG) {
+ rtn = (dbgobj->flag & value) & (~BCM_OBJECT_FEATURE_CLEAR);
+ }
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
+ }
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+ printf("%s: obj %p not found in active list\n", __FUNCTION__, obj);
+ ASSERT(0);
+
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return rtn;
+}
+
+#endif /* BCM_OBJECT_TRACE */
uint8 *
bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst)
bcm_tlv_t *elt;
int totlen;
- elt = (bcm_tlv_t*)buf;
+ if ((elt = (bcm_tlv_t*)buf) == NULL) {
+ return NULL;
+ }
totlen = buflen;
/* find tagged parameter */
/* indicate the str was too short */
if (flags != 0) {
- if (len < 2)
- p -= 2 - len; /* overwrite last char */
p += snprintf(p, 2, ">");
}
p = line;
for (i = 0; i < nbytes; i++) {
if (i % 16 == 0) {
- nchar = snprintf(p, len, " %04d: ", i); /* line prefix */
+ nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
p += nchar;
len -= nchar;
}
"AES_CCM",
"AES_OCB_MSDU",
"AES_OCB_MPDU",
-#ifdef BCMCCX
- "CKIP",
- "CKIP_MMH",
- "WEP_MMH",
- "NALG",
-#else
"NALG",
"UNDEF",
"UNDEF",
"UNDEF",
-#endif /* BCMCCX */
- "WAPI",
+ "UNDEF"
"PMK",
"BIP",
"AES_GCM",
}
uint
-bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+bcm_mkiovar(const char *name, char *data, uint datalen, char *buf, uint buflen)
{
uint len;
return bitcount;
}
-#ifdef BCMDRIVER
+#if defined(BCMDRIVER) || defined(WL_UNITTEST)
+
+/* triggers bcm_bprintf to print to kernel log */
+bool bcm_bprintf_bypass = FALSE;
/* Initialization of bcmstrbuf structure */
void
va_start(ap, fmt);
r = vsnprintf(b->buf, b->size, fmt, ap);
+ if (bcm_bprintf_bypass == TRUE) {
+ printf(b->buf);
+ goto exit;
+ }
/* Non Ansi C99 compliant returns -1,
* Ansi compliant return r >= b->size,
b->buf += r;
}
+exit:
va_end(ap);
return r;
}
void
-bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len)
+bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, const uint8 *buf, int len)
{
int i;
}
#endif
-#endif /* BCMDRIVER */
+#endif /* BCMDRIVER || WL_UNITTEST */
/*
* ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
}
void
-bcm_bitprint32(const uint32 u32)
+bcm_bitprint32(const uint32 u32arg)
{
int i;
for (i = NBITS(uint32) - 1; i >= 0; i--) {
- isbitset(u32, i) ? printf("1") : printf("0");
+ isbitset(u32arg, i) ? printf("1") : printf("0");
if ((i % NBBY) == 0) printf(" ");
}
printf("\n");
return ((uint16)~sum);
}
-
-#ifdef BCMDRIVER
+#if defined(BCMDRIVER) && !defined(_CFEZ_)
/*
* Hierarchical Multiword bitmap based small id allocator.
*
* with savings in not having to use an indirect access, had it been dynamically
* allocated.
*/
-#define BCM_MWBMAP_ITEMS_MAX (4 * 1024) /* May increase to 16K */
+#define BCM_MWBMAP_ITEMS_MAX (64 * 1024) /* May increase to 64K */
#define BCM_MWBMAP_BITS_WORD (NBITS(uint32))
#define BCM_MWBMAP_WORDS_MAX (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD)
typedef struct bcm_mwbmap { /* Hierarchical multiword bitmap allocator */
uint16 wmaps; /* Total number of words in free wd bitmap */
uint16 imaps; /* Total number of words in free id bitmap */
- int16 ifree; /* Count of free indices. Used only in audits */
+ int32 ifree; /* Count of free indices. Used only in audits */
uint16 total; /* Total indices managed by multiword bitmap */
void * magic; /* Audit handle parameter from user */
/* Initialize runtime multiword bitmap state */
mwbmap_p->imaps = (uint16)words;
- mwbmap_p->ifree = (int16)items_max;
+ mwbmap_p->ifree = (int32)items_max;
mwbmap_p->total = (uint16)items_max;
/* Setup magic, for use in audit of handle */
/* Simple 16bit Id allocator using a stack implementation. */
typedef struct id16_map {
- uint16 total; /* total number of ids managed by allocator */
- uint16 start; /* start value of 16bit ids to be managed */
uint32 failures; /* count of failures */
void *dbg; /* debug placeholder */
+ uint16 total; /* total number of ids managed by allocator */
+ uint16 start; /* start value of 16bit ids to be managed */
int stack_idx; /* index into stack of available ids */
uint16 stack[0]; /* stack of 16 bit ids */
} id16_map_t;
id16_map_t * id16_map;
ASSERT(total_ids > 0);
- ASSERT((start_val16 + total_ids) < ID16_INVALID);
+
+ /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map
+ * with random values.
+ */
+ ASSERT((start_val16 == ID16_UNDEFINED) ||
+ (start_val16 + total_ids) < ID16_INVALID);
id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids));
if (id16_map == NULL) {
id16_map->failures = 0;
id16_map->dbg = NULL;
- /* Populate stack with 16bit id values, commencing with start_val16 */
- id16_map->stack_idx = 0;
- val16 = start_val16;
+ /*
+ * Populate stack with 16bit id values, commencing with start_val16.
+ * if start_val16 is ID16_UNDEFINED, then do not populate the id16 map.
+ */
+ id16_map->stack_idx = -1;
+
+ if (id16_map->start != ID16_UNDEFINED) {
+ val16 = start_val16;
- for (idx = 0; idx < total_ids; idx++, val16++) {
- id16_map->stack_idx = idx;
- id16_map->stack[id16_map->stack_idx] = val16;
+ for (idx = 0; idx < total_ids; idx++, val16++) {
+ id16_map->stack_idx = idx;
+ id16_map->stack[id16_map->stack_idx] = val16;
+ }
}
#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
- id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids));
+ if (id16_map->start != ID16_UNDEFINED) {
+ id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids));
- if (id16_map->dbg) {
- id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+ if (id16_map->dbg) {
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
- id16_map_dbg->total = total_ids;
- for (idx = 0; idx < total_ids; idx++) {
- id16_map_dbg->avail[idx] = TRUE;
+ id16_map_dbg->total = total_ids;
+ for (idx = 0; idx < total_ids; idx++) {
+ id16_map_dbg->avail[idx] = TRUE;
+ }
}
}
#endif /* BCM_DBG && BCM_DBG_ID16 */
id16_map_t * id16_map;
ASSERT(total_ids > 0);
- ASSERT((start_val16 + total_ids) < ID16_INVALID);
+ /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map
+ * with random values.
+ */
+ ASSERT((start_val16 == ID16_UNDEFINED) ||
+ (start_val16 + total_ids) < ID16_INVALID);
id16_map = (id16_map_t *)id16_map_hndl;
if (id16_map == NULL) {
id16_map->failures = 0;
/* Populate stack with 16bit id values, commencing with start_val16 */
- id16_map->stack_idx = 0;
- val16 = start_val16;
+ id16_map->stack_idx = -1;
- for (idx = 0; idx < total_ids; idx++, val16++) {
- id16_map->stack_idx = idx;
- id16_map->stack[id16_map->stack_idx] = val16;
+ if (id16_map->start != ID16_UNDEFINED) {
+ val16 = start_val16;
+
+ for (idx = 0; idx < total_ids; idx++, val16++) {
+ id16_map->stack_idx = idx;
+ id16_map->stack[id16_map->stack_idx] = val16;
+ }
}
#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
- if (id16_map->dbg) {
- id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+ if (id16_map->start != ID16_UNDEFINED) {
+ if (id16_map->dbg) {
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
- id16_map_dbg->total = total_ids;
- for (idx = 0; idx < total_ids; idx++) {
- id16_map_dbg->avail[idx] = TRUE;
+ id16_map_dbg->total = total_ids;
+ for (idx = 0; idx < total_ids; idx++) {
+ id16_map_dbg->avail[idx] = TRUE;
+ }
}
}
#endif /* BCM_DBG && BCM_DBG_ID16 */
id16_map->stack_idx--;
#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
-
- ASSERT(val16 < (id16_map->start + id16_map->total));
+ ASSERT((id16_map->start == ID16_UNDEFINED) ||
+ (val16 < (id16_map->start + id16_map->total)));
if (id16_map->dbg) { /* Validate val16 */
id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
id16_map = (id16_map_t *)id16_map_hndl;
#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
-
- ASSERT(val16 < (id16_map->start + id16_map->total));
+ ASSERT((id16_map->start == ID16_UNDEFINED) ||
+ (val16 < (id16_map->start + id16_map->total)));
if (id16_map->dbg) { /* Validate val16 */
id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
id16_map = (id16_map_t *)id16_map_hndl;
- ASSERT((id16_map->stack_idx > 0) && (id16_map->stack_idx < id16_map->total));
+ ASSERT(id16_map->stack_idx >= -1);
+ ASSERT(id16_map->stack_idx < (int)id16_map->total);
+
+ if (id16_map->start == ID16_UNDEFINED)
+ goto done;
+
for (idx = 0; idx <= id16_map->stack_idx; idx++) {
ASSERT(id16_map->stack[idx] >= id16_map->start);
ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total));
if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) {
insane |= 1;
ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n",
- id16_map_hndl, idx, val16));
+ id16_map_hndl, idx, val16));
}
}
#endif /* BCM_DBG && BCM_DBG_ID16 */
if (avail && (avail != (id16_map->stack_idx + 1))) {
insane |= 1;
ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n",
- id16_map_hndl, avail, id16_map->stack_idx));
+ id16_map_hndl, avail, id16_map->stack_idx));
}
}
#endif /* BCM_DBG && BCM_DBG_ID16 */
+done:
+ /* invoke any other system audits */
return (!!insane);
}
/* END: Simple id16 allocator */
-#endif /* BCMDRIVER */
+#endif
/* calculate a >> b; and returns only lower 32 bits */
void
#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */
#endif /* DEBUG_COUNTER */
-#ifdef BCMDRIVER
+#if defined(BCMDRIVER) && !defined(_CFEZ_)
void
dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size)
{
mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
- if ((dll_pool_p = (dll_pool_t *)MALLOC(osh, mem_size)) == NULL) {
+ if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) {
printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n",
elems_max, elem_size);
ASSERT(0);
return dll_pool_p;
}
- bzero(dll_pool_p, mem_size);
-
dll_init(&dll_pool_p->free_list);
dll_pool_p->elems_max = elems_max;
dll_pool_p->elem_size = elem_size;
dll_pool_p->free_count += 1;
}
-#endif /* BCMDRIVER */
+#endif
* Contents are wifi-specific, used by any kernel or app-level
* software that might want wifi things as it grows.
*
- * $Copyright Open Broadcom Corporation$
- * $Id: bcmwifi_channels.c 309193 2012-01-19 00:03:57Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmwifi_channels.c 591285 2015-10-07 11:56:29Z $
*/
#include <bcm_cfg.h>
"80",
"160",
"80+80",
+#ifdef WL11ULB
+ "2.5"
+#else /* WL11ULB */
"na"
+#endif /* WL11ULB */
};
static const uint8 wf_chspec_bw_mhz[] =
/* parse channel num or band */
if (!read_uint(&a, &num))
return 0;
-
/* if we are looking at a 'g', then the first number was a band */
c = tolower((int)a[0]);
if (c == 'g') {
- a ++; /* consume the char */
+ a++; /* consume the char */
/* band must be "2" or "5" */
if (num == 2)
return 0;
/* convert to chspec value */
- if (bw == 20) {
+ if (bw == 2) {
+ chspec_bw = WL_CHANSPEC_BW_2P5;
+ } else if (bw == 5) {
+ chspec_bw = WL_CHANSPEC_BW_5;
+ } else if (bw == 10) {
+ chspec_bw = WL_CHANSPEC_BW_10;
+ } else if (bw == 20) {
chspec_bw = WL_CHANSPEC_BW_20;
} else if (bw == 40) {
chspec_bw = WL_CHANSPEC_BW_40;
/* So far we have <band>g<chan>/<bw>
* Can now be followed by u/l if bw = 40,
- * or '+80' if bw = 80, to make '80+80' bw.
+ * or '+80' if bw = 80, to make '80+80' bw,
+ * or '.5' if bw = 2.5 to make '2.5' bw .
*/
c = tolower((int)a[0]);
/* check for 80+80 */
if (c == '+') {
/* 80+80 */
- static const char *plus80 = "80/";
+ const char plus80[] = "80/";
/* must be looking at '+80/'
* check and consume this string.
/* consume the '80/' string */
for (i = 0; i < 3; i++) {
- if (*a++ != *plus80++) {
+ if (*a++ != plus80[i]) {
return 0;
}
}
/* read secondary 80MHz channel */
if (!read_uint(&a, &ch2))
return 0;
+ } else if (c == '.') {
+ /* 2.5 */
+ /* must be looking at '.5'
+ * check and consume this string.
+ */
+ chspec_bw = WL_CHANSPEC_BW_2P5;
+
+ a ++; /* consume the char '.' */
+
+ /* consume the '5' string */
+ if (*a++ != '5') {
+ return 0;
+ }
}
done_read:
}
}
/* if the bw is 20, center and sideband are trivial */
- else if (chspec_bw == WL_CHANSPEC_BW_20) {
+ else if (BW_LE20(chspec_bw)) {
chspec_ch = ctl_ch;
chspec_sb = WL_CHANSPEC_CTL_SB_NONE;
}
/* must be 2G or 5G band */
if (CHSPEC_IS2G(chanspec)) {
/* must be valid bandwidth */
- if (chspec_bw != WL_CHANSPEC_BW_20 &&
- chspec_bw != WL_CHANSPEC_BW_40) {
+ if (!BW_LE40(chspec_bw)) {
return TRUE;
}
} else if (CHSPEC_IS5G(chanspec)) {
if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS)
return TRUE;
- } else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 ||
- chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) {
-
+ } else if (BW_LE160(chspec_bw)) {
if (chspec_ch > MAXCHANNEL) {
return TRUE;
}
}
/* side band needs to be consistent with bandwidth */
- if (chspec_bw == WL_CHANSPEC_BW_20) {
+ if (BW_LE20(chspec_bw)) {
if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL)
return TRUE;
} else if (chspec_bw == WL_CHANSPEC_BW_40) {
if (CHSPEC_IS2G(chanspec)) {
/* must be valid bandwidth and channel range */
- if (chspec_bw == WL_CHANSPEC_BW_20) {
+ if (BW_LE20(chspec_bw)) {
if (chspec_ch >= 1 && chspec_ch <= 14)
return TRUE;
} else if (chspec_bw == WL_CHANSPEC_BW_40) {
const uint8 *center_ch;
uint num_ch, i;
- if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40) {
+ if (BW_LE40(chspec_bw)) {
center_ch = wf_5g_40m_chans;
num_ch = WF_NUM_5G_40M_CHANS;
} else if (chspec_bw == WL_CHANSPEC_BW_80) {
}
/* check for a valid center channel */
- if (chspec_bw == WL_CHANSPEC_BW_20) {
+ if (BW_LE20(chspec_bw)) {
/* We don't have an array of legal 20MHz 5G channels, but they are
* each side of the legal 40MHz channels. Check the chanspec
* channel against either side of the 40MHz channels.
ASSERT(!wf_chspec_malformed(chspec));
/* Is there a sideband ? */
- if (CHSPEC_IS20(chspec)) {
+ if (CHSPEC_BW_LE20(chspec)) {
return CHSPEC_CHANNEL(chspec);
} else {
sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
char *
wf_chspec_to_bw_str(chanspec_t chspec)
{
- return (char *)wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)];
+ return (char *)wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)];
}
/*
ASSERT(!wf_chspec_malformed(chspec));
/* Is there a sideband ? */
- if (!CHSPEC_IS20(chspec)) {
+ if (!CHSPEC_BW_LE20(chspec)) {
ctl_chan = wf_chspec_ctlchan(chspec);
ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20;
ctl_chspec |= CHSPEC_BAND(chspec);
center_ch = wf_5g_160m_chans;
num_ch = WF_NUM_5G_160M_CHANS;
bw = 160;
- } else if (bw == WL_CHANSPEC_BW_20) {
+ } else if (BW_LE20(bw)) {
chspec |= ctl_ch;
return chspec;
} else {
--- /dev/null
+/*
+ * Misc utility routines for WL and Apps
+ * This header file housing the define and function prototype use by
+ * both the wl driver, tools & Apps.
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmwifi_channels.h 591285 2015-10-07 11:56:29Z $
+ */
+
+#ifndef _bcmwifi_channels_h_
+#define _bcmwifi_channels_h_
+
+
+/* A chanspec holds the channel number, band, bandwidth and control sideband */
+typedef uint16 chanspec_t;
+
+/* channel defines */
+#define CH_UPPER_SB 0x01
+#define CH_LOWER_SB 0x02
+#define CH_EWA_VALID 0x04
+#define CH_80MHZ_APART 16
+#define CH_40MHZ_APART 8
+#define CH_20MHZ_APART 4
+#define CH_10MHZ_APART 2
+#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
+#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
+
+/* maximum # channels the s/w supports */
+#define MAXCHANNEL 224 /* max # supported channels. The max channel no is above,
+ * this is that + 1 rounded up to a multiple of NBBY (8).
+ * DO NOT MAKE it > 255: channels are uint8's all over
+ */
+#define MAXCHANNEL_NUM (MAXCHANNEL - 1) /* max channel number */
+
+/* channel bitvec */
+typedef struct {
+ uint8 vec[MAXCHANNEL/8]; /* bitvec of channels */
+} chanvec_t;
+
+/* make sure channel num is within valid range */
+#define CH_NUM_VALID_RANGE(ch_num) ((ch_num) > 0 && (ch_num) <= MAXCHANNEL_NUM)
+
+#define CHSPEC_CTLOVLP(sp1, sp2, sep) \
+ (ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < (sep))
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+#define WL_CHANSPEC_CHAN_MASK 0x00ff
+#define WL_CHANSPEC_CHAN_SHIFT 0
+#define WL_CHANSPEC_CHAN1_MASK 0x000f
+#define WL_CHANSPEC_CHAN1_SHIFT 0
+#define WL_CHANSPEC_CHAN2_MASK 0x00f0
+#define WL_CHANSPEC_CHAN2_SHIFT 4
+
+#define WL_CHANSPEC_CTL_SB_MASK 0x0700
+#define WL_CHANSPEC_CTL_SB_SHIFT 8
+#define WL_CHANSPEC_CTL_SB_LLL 0x0000
+#define WL_CHANSPEC_CTL_SB_LLU 0x0100
+#define WL_CHANSPEC_CTL_SB_LUL 0x0200
+#define WL_CHANSPEC_CTL_SB_LUU 0x0300
+#define WL_CHANSPEC_CTL_SB_ULL 0x0400
+#define WL_CHANSPEC_CTL_SB_ULU 0x0500
+#define WL_CHANSPEC_CTL_SB_UUL 0x0600
+#define WL_CHANSPEC_CTL_SB_UUU 0x0700
+#define WL_CHANSPEC_CTL_SB_LL WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_LU WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_UL WL_CHANSPEC_CTL_SB_LUL
+#define WL_CHANSPEC_CTL_SB_UU WL_CHANSPEC_CTL_SB_LUU
+#define WL_CHANSPEC_CTL_SB_L WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_U WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_LOWER WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL
+
+#define WL_CHANSPEC_BW_MASK 0x3800
+#define WL_CHANSPEC_BW_SHIFT 11
+#define WL_CHANSPEC_BW_5 0x0000
+#define WL_CHANSPEC_BW_10 0x0800
+#define WL_CHANSPEC_BW_20 0x1000
+#define WL_CHANSPEC_BW_40 0x1800
+#define WL_CHANSPEC_BW_80 0x2000
+#define WL_CHANSPEC_BW_160 0x2800
+#define WL_CHANSPEC_BW_8080 0x3000
+#define WL_CHANSPEC_BW_2P5 0x3800
+
+#define WL_CHANSPEC_BAND_MASK 0xc000
+#define WL_CHANSPEC_BAND_SHIFT 14
+#define WL_CHANSPEC_BAND_2G 0x0000
+#define WL_CHANSPEC_BAND_3G 0x4000
+#define WL_CHANSPEC_BAND_4G 0x8000
+#define WL_CHANSPEC_BAND_5G 0xc000
+#define INVCHANSPEC 255
+#define MAX_CHANSPEC 0xFFFF
+
+/* channel defines */
+#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \
+ ((channel) - CH_10MHZ_APART) : 0)
+#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
+ ((channel) + CH_10MHZ_APART) : 0)
+
+#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0)
+#define UU_20_SB(channel) (((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \
+ ((channel) + 3 * CH_10MHZ_APART) : 0)
+#define LU_20_SB(channel) LOWER_20_SB(channel)
+#define UL_20_SB(channel) UPPER_20_SB(channel)
+
+#define LOWER_40_SB(channel) ((channel) - CH_20MHZ_APART)
+#define UPPER_40_SB(channel) ((channel) + CH_20MHZ_APART)
+#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
+ (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define CH2P5MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_2P5 | \
+ (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define CH5MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_5 | \
+ (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define CH10MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_10 | \
+ (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
+ ((channel) + CH_20MHZ_APART) : 0)
+#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
+ ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
+ WL_CHANSPEC_BAND_5G))
+#define CH80MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | \
+ WL_CHANSPEC_BW_80 | WL_CHANSPEC_BAND_5G)
+#define CH160MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
+ ((channel) | (ctlsb) | \
+ WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G)
+#define CHBW_CHSPEC(bw, channel) (chanspec_t)((chanspec_t)(channel) | (bw) | \
+ (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+
+/* simple MACROs to get different fields of chanspec */
+#ifdef WL11AC_80P80
+#define CHSPEC_CHANNEL(chspec) wf_chspec_channel(chspec)
+#else
+#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#endif
+#define CHSPEC_CHAN1(chspec) ((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT
+#define CHSPEC_CHAN2(chspec) ((chspec) & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT
+#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
+#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK)
+
+#ifdef WL11N_20MHZONLY
+
+#define CHSPEC_IS2P5(chspec) 0
+#define CHSPEC_IS5(chspec) 0
+#define CHSPEC_IS10(chspec) 0
+#define CHSPEC_IS20(chspec) 1
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) 0
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec) 0
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec) 0
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec) 0
+#endif
+#define BW_LE20(bw) TRUE
+#define CHSPEC_ISLE20(chspec) TRUE
+#else /* !WL11N_20MHZONLY */
+
+#define CHSPEC_IS2P5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_2P5)
+#define CHSPEC_IS5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_5)
+#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
+#ifndef CHSPEC_IS40
+#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
+#endif
+#ifndef CHSPEC_IS80
+#define CHSPEC_IS80(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80)
+#endif
+#ifndef CHSPEC_IS160
+#define CHSPEC_IS160(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160)
+#endif
+#ifndef CHSPEC_IS8080
+#define CHSPEC_IS8080(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080)
+#endif
+
+#ifdef WL11ULB
+#define BW_LT20(bw) (((bw) == WL_CHANSPEC_BW_2P5) || \
+ ((bw) == WL_CHANSPEC_BW_5) || \
+ ((bw) == WL_CHANSPEC_BW_10))
+#define CHSPEC_BW_LT20(chspec) (BW_LT20(CHSPEC_BW(chspec)))
+/* This MACRO is strictly to avoid abandons in existing code with ULB feature and is in no way
+ * optimial to use. Should be replaced with CHSPEC_BW_LE() instead
+ */
+#define BW_LE20(bw) (((bw) == WL_CHANSPEC_BW_2P5) || \
+ ((bw) == WL_CHANSPEC_BW_5) || \
+ ((bw) == WL_CHANSPEC_BW_10) || \
+ ((bw) == WL_CHANSPEC_BW_20))
+#define CHSPEC_ISLE20(chspec) (BW_LE20(CHSPEC_BW(chspec)))
+
+#else /* WL11ULB */
+#define BW_LE20(bw) ((bw) == WL_CHANSPEC_BW_20)
+#define CHSPEC_ISLE20(chspec) (CHSPEC_IS20(chspec))
+#endif /* WL11ULB */
+#endif /* !WL11N_20MHZONLY */
+
+#define BW_LE40(bw) (BW_LE20(bw) || ((bw) == WL_CHANSPEC_BW_40))
+#define BW_LE80(bw) (BW_LE40(bw) || ((bw) == WL_CHANSPEC_BW_80))
+#define BW_LE160(bw) (BW_LE80(bw) || ((bw) == WL_CHANSPEC_BW_160))
+#define CHSPEC_BW_LE20(chspec) (BW_LE20(CHSPEC_BW(chspec)))
+#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
+#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
+#define CHSPEC_SB_UPPER(chspec) \
+ ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \
+ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC_SB_LOWER(chspec) \
+ ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \
+ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
+#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+
+/**
+ * Number of chars needed for wf_chspec_ntoa() destination character buffer.
+ */
+#define CHANSPEC_STR_LEN 20
+
+
+#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\
+ CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080)
+
+/* BW inequality comparisons, LE (<=), GE (>=), LT (<), GT (>), comparisons can be made
+* as simple numeric comparisons, with the exception that 160 is the same BW as 80+80,
+* but have different numeric values; (WL_CHANSPEC_BW_160 < WL_CHANSPEC_BW_8080).
+*
+* The LT/LE/GT/GE macros check first checks whether both chspec bandwidth and bw are 160 wide.
+* If both chspec bandwidth and bw is not 160 wide, then the comparison is made.
+*/
+#ifdef WL11ULB
+#define CHSPEC_BW_GE(chspec, bw) \
+ (((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
+ (CHSPEC_BW(chspec) >= (bw))) && \
+ (!(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5)))
+#else /* WL11ULB */
+#define CHSPEC_BW_GE(chspec, bw) \
+ ((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
+ (CHSPEC_BW(chspec) >= (bw)))
+#endif /* WL11ULB */
+
+#ifdef WL11ULB
+#define CHSPEC_BW_LE(chspec, bw) \
+ (((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
+ (CHSPEC_BW(chspec) <= (bw))) || \
+ (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5))
+#else /* WL11ULB */
+#define CHSPEC_BW_LE(chspec, bw) \
+ ((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
+ (CHSPEC_BW(chspec) <= (bw)))
+#endif /* WL11ULB */
+
+#ifdef WL11ULB
+#define CHSPEC_BW_GT(chspec, bw) \
+ ((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
+ (CHSPEC_BW(chspec) > (bw))) && \
+ (CHSPEC_BW(chspec) != WL_CHANSPEC_BW_2P5))
+#else /* WL11ULB */
+#define CHSPEC_BW_GT(chspec, bw) \
+ (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
+ (CHSPEC_BW(chspec) > (bw)))
+#endif /* WL11ULB */
+
+#ifdef WL11ULB
+#define CHSPEC_BW_LT(chspec, bw) \
+ ((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
+ (CHSPEC_BW(chspec) < (bw))) || \
+ ((CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5)))
+#else /* WL11ULB */
+#define CHSPEC_BW_LT(chspec, bw) \
+ (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
+ (CHSPEC_BW(chspec) < (bw)))
+#endif /* WL11ULB */
+
+/* Legacy Chanspec defines
+ * These are the defines for the previous format of the chanspec_t
+ */
+#define WL_LCHANSPEC_CHAN_MASK 0x00ff
+#define WL_LCHANSPEC_CHAN_SHIFT 0
+
+#define WL_LCHANSPEC_CTL_SB_MASK 0x0300
+#define WL_LCHANSPEC_CTL_SB_SHIFT 8
+#define WL_LCHANSPEC_CTL_SB_LOWER 0x0100
+#define WL_LCHANSPEC_CTL_SB_UPPER 0x0200
+#define WL_LCHANSPEC_CTL_SB_NONE 0x0300
+
+#define WL_LCHANSPEC_BW_MASK 0x0C00
+#define WL_LCHANSPEC_BW_SHIFT 10
+#define WL_LCHANSPEC_BW_10 0x0400
+#define WL_LCHANSPEC_BW_20 0x0800
+#define WL_LCHANSPEC_BW_40 0x0C00
+
+#define WL_LCHANSPEC_BAND_MASK 0xf000
+#define WL_LCHANSPEC_BAND_SHIFT 12
+#define WL_LCHANSPEC_BAND_5G 0x1000
+#define WL_LCHANSPEC_BAND_2G 0x2000
+
+#define LCHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK))
+#define LCHSPEC_BAND(chspec) ((chspec) & WL_LCHANSPEC_BAND_MASK)
+#define LCHSPEC_CTL_SB(chspec) ((chspec) & WL_LCHANSPEC_CTL_SB_MASK)
+#define LCHSPEC_BW(chspec) ((chspec) & WL_LCHANSPEC_BW_MASK)
+#define LCHSPEC_IS10(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_10)
+#define LCHSPEC_IS20(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20)
+#define LCHSPEC_IS40(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40)
+#define LCHSPEC_IS5G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G)
+#define LCHSPEC_IS2G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G)
+
+#define LCHSPEC_SB_UPPER(chspec) \
+ ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_UPPER) && \
+ (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+#define LCHSPEC_SB_LOWER(chspec) \
+ ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_LOWER) && \
+ (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+
+#define LCHSPEC_CREATE(chan, band, bw, sb) ((uint16)((chan) | (sb) | (bw) | (band)))
+
+#define CH20MHZ_LCHSPEC(channel) \
+ (chanspec_t)((chanspec_t)(channel) | WL_LCHANSPEC_BW_20 | \
+ WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G))
+
+/*
+ * WF_CHAN_FACTOR_* constants are used to calculate channel frequency
+ * given a channel number.
+ * chan_freq = chan_factor * 500Mhz + chan_number * 5
+ */
+
+/**
+ * Channel Factor for the starting frequence of 2.4 GHz channels.
+ * The value corresponds to 2407 MHz.
+ */
+#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 5 GHz channels.
+ * The value corresponds to 5000 MHz.
+ */
+#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */
+
+/**
+ * Channel Factor for the starting frequence of 4.9 GHz channels.
+ * The value corresponds to 4000 MHz.
+ */
+#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */
+
+#define WLC_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */
+
+/**
+ * No of sub-band vlaue of the specified Mhz chanspec
+ */
+#define WF_NUM_SIDEBANDS_40MHZ 2
+#define WF_NUM_SIDEBANDS_80MHZ 4
+#define WF_NUM_SIDEBANDS_8080MHZ 4
+#define WF_NUM_SIDEBANDS_160MHZ 8
+
+/**
+ * Convert chanspec to ascii string
+ *
+ * @param chspec chanspec format
+ * @param buf ascii string of chanspec
+ *
+ * @return pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ * Original chanspec in case of error
+ *
+ * @see CHANSPEC_STR_LEN
+ */
+extern char * wf_chspec_ntoa_ex(chanspec_t chspec, char *buf);
+
+/**
+ * Convert chanspec to ascii string
+ *
+ * @param chspec chanspec format
+ * @param buf ascii string of chanspec
+ *
+ * @return pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ * NULL in case of error
+ *
+ * @see CHANSPEC_STR_LEN
+ */
+extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
+
+/**
+ * Convert ascii string to chanspec
+ *
+ * @param a pointer to input string
+ *
+ * @return >= 0 if successful or 0 otherwise
+ */
+extern chanspec_t wf_chspec_aton(const char *a);
+
+/**
+ * Verify the chanspec fields are valid.
+ *
+ * Verify the chanspec is using a legal set field values, i.e. that the chanspec
+ * specified a band, bw, ctl_sb and channel and that the combination could be
+ * legal given some set of circumstances.
+ *
+ * @param chanspec input chanspec to verify
+ *
+ * @return TRUE if the chanspec is malformed, FALSE if it looks good.
+ */
+extern bool wf_chspec_malformed(chanspec_t chanspec);
+
+/**
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ *
+ * @param chanspec input chanspec to verify
+ *
+ * @return TRUE if the chanspec is a valid 802.11 channel
+ */
+extern bool wf_chspec_valid(chanspec_t chanspec);
+
+/**
+ * Return the primary (control) channel.
+ *
+ * This function returns the channel number of the primary 20MHz channel. For
+ * 20MHz channels this is just the channel number. For 40MHz or wider channels
+ * it is the primary 20MHz channel specified by the chanspec.
+ *
+ * @param chspec input chanspec
+ *
+ * @return Returns the channel number of the primary 20MHz channel
+ */
+extern uint8 wf_chspec_ctlchan(chanspec_t chspec);
+
+/*
+ * Return the bandwidth string.
+ *
+ * This function returns the bandwidth string for the passed chanspec.
+ *
+ * @param chspec input chanspec
+ *
+ * @return Returns the bandwidth string
+ */
+extern char * wf_chspec_to_bw_str(chanspec_t chspec);
+
+/**
+ * Return the primary (control) chanspec.
+ *
+ * This function returns the chanspec of the primary 20MHz channel. For 20MHz
+ * channels this is just the chanspec. For 40MHz or wider channels it is the
+ * chanspec of the primary 20MHZ channel specified by the chanspec.
+ *
+ * @param chspec input chanspec
+ *
+ * @return Returns the chanspec of the primary 20MHz channel
+ */
+extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec);
+
+/**
+ * Return a channel number corresponding to a frequency.
+ *
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
+ */
+extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec);
+
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param freq frequency in MHz
+ * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a channel number
+ *
+ * @see WF_CHAN_FACTOR_2_4_G
+ * @see WF_CHAN_FACTOR_5_G
+ */
+extern int wf_mhz2channel(uint freq, uint start_factor);
+
+/**
+ * Return the center frequency in MHz of the given channel and base frequency.
+ *
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param channel input channel number
+ * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a frequency in MHz
+ *
+ * @see WF_CHAN_FACTOR_2_4_G
+ * @see WF_CHAN_FACTOR_5_G
+ */
+extern int wf_channel2mhz(uint channel, uint start_factor);
+
+/**
+ * Returns the chanspec 80Mhz channel corresponding to the following input
+ * parameters
+ *
+ * primary_channel - primary 20Mhz channel
+ * center_channel - center frequecny of the 80Mhz channel
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155}
+ *
+ * returns INVCHANSPEC in case of error
+ */
+extern chanspec_t wf_chspec_80(uint8 center_channel, uint8 primary_channel);
+
+/**
+ * Convert ctl chan and bw to chanspec
+ *
+ * @param ctl_ch channel
+ * @param bw bandwidth
+ *
+ * @return > 0 if successful or 0 otherwise
+ *
+ */
+extern uint16 wf_channel2chspec(uint ctl_ch, uint bw);
+
+extern uint wf_channel2freq(uint channel);
+extern uint wf_freq2channel(uint freq);
+
+/*
+ * Returns the 80+80 MHz chanspec corresponding to the following input parameters
+ *
+ * primary_20mhz - Primary 20 MHz channel
+ * chan0_80MHz - center channel number of one frequency segment
+ * chan1_80MHz - center channel number of the other frequency segment
+ *
+ * Parameters chan0_80MHz and chan1_80MHz are channel numbers in {42, 58, 106, 122, 138, 155}.
+ * The primary channel must be contained in one of the 80MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * Returns INVCHANSPEC in case of error.
+ *
+ * Refer to IEEE802.11ac section 22.3.14 "Channelization".
+ */
+extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz,
+ uint8 chan0_80Mhz, uint8 chan1_80Mhz);
+
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ * chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ * returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec);
+
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ * chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ * returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec);
+
+/*
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ */
+extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec);
+
+#ifdef WL11AC_80P80
+/*
+ * This function returns the centre chanel for the given chanspec.
+ * In case of 80+80 chanspec it returns the primary 80 Mhz centre channel
+ */
+extern uint8 wf_chspec_channel(chanspec_t chspec);
+#endif
+#endif /* _bcmwifi_channels_h_ */
--- /dev/null
+/*
+ * Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmwifi_rates.h 591285 2015-10-07 11:56:29Z $
+ */
+
+#ifndef _bcmwifi_rates_h_
+#define _bcmwifi_rates_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+#define WL_RATESET_SZ_DSSS 4
+#define WL_RATESET_SZ_OFDM 8
+#define WL_RATESET_SZ_VHT_MCS 10
+#define WL_RATESET_SZ_VHT_MCS_P 12
+
+#if defined(WLPROPRIETARY_11N_RATES)
+#define WL_RATESET_SZ_HT_MCS WL_RATESET_SZ_VHT_MCS
+#else
+#define WL_RATESET_SZ_HT_MCS 8
+#endif
+
+#define WL_RATESET_SZ_HT_IOCTL 8 /* MAC histogram, compatibility with wl utility */
+
+#define WL_TX_CHAINS_MAX 4
+
+#define WL_RATE_DISABLED (-128) /* Power value corresponding to unsupported rate */
+
+/* Transmit channel bandwidths */
+typedef enum wl_tx_bw {
+ WL_TX_BW_20,
+ WL_TX_BW_40,
+ WL_TX_BW_80,
+ WL_TX_BW_20IN40,
+ WL_TX_BW_20IN80,
+ WL_TX_BW_40IN80,
+ WL_TX_BW_160,
+ WL_TX_BW_20IN160,
+ WL_TX_BW_40IN160,
+ WL_TX_BW_80IN160,
+ WL_TX_BW_ALL,
+ WL_TX_BW_8080,
+ WL_TX_BW_8080CHAN2,
+ WL_TX_BW_20IN8080,
+ WL_TX_BW_40IN8080,
+ WL_TX_BW_80IN8080,
+ WL_TX_BW_2P5,
+ WL_TX_BW_5,
+ WL_TX_BW_10
+} wl_tx_bw_t;
+
+
+/*
+ * Transmit modes.
+ * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed
+ */
+typedef enum wl_tx_mode {
+ WL_TX_MODE_NONE,
+ WL_TX_MODE_STBC,
+ WL_TX_MODE_CDD,
+ WL_TX_MODE_TXBF,
+ WL_NUM_TX_MODES
+} wl_tx_mode_t;
+
+
+/* Number of transmit chains */
+typedef enum wl_tx_chains {
+ WL_TX_CHAINS_1 = 1,
+ WL_TX_CHAINS_2,
+ WL_TX_CHAINS_3,
+ WL_TX_CHAINS_4
+} wl_tx_chains_t;
+
+
+/* Number of transmit streams */
+typedef enum wl_tx_nss {
+ WL_TX_NSS_1 = 1,
+ WL_TX_NSS_2,
+ WL_TX_NSS_3,
+ WL_TX_NSS_4
+} wl_tx_nss_t;
+
+
+/* This enum maps each rate to a CLM index */
+
+typedef enum clm_rates {
+ /************
+ * 1 chain *
+ ************
+ */
+
+ /* 1 Stream */
+ WL_RATE_1X1_DSSS_1 = 0,
+ WL_RATE_1X1_DSSS_2 = 1,
+ WL_RATE_1X1_DSSS_5_5 = 2,
+ WL_RATE_1X1_DSSS_11 = 3,
+
+ WL_RATE_1X1_OFDM_6 = 4,
+ WL_RATE_1X1_OFDM_9 = 5,
+ WL_RATE_1X1_OFDM_12 = 6,
+ WL_RATE_1X1_OFDM_18 = 7,
+ WL_RATE_1X1_OFDM_24 = 8,
+ WL_RATE_1X1_OFDM_36 = 9,
+ WL_RATE_1X1_OFDM_48 = 10,
+ WL_RATE_1X1_OFDM_54 = 11,
+
+ WL_RATE_1X1_MCS0 = 12,
+ WL_RATE_1X1_MCS1 = 13,
+ WL_RATE_1X1_MCS2 = 14,
+ WL_RATE_1X1_MCS3 = 15,
+ WL_RATE_1X1_MCS4 = 16,
+ WL_RATE_1X1_MCS5 = 17,
+ WL_RATE_1X1_MCS6 = 18,
+ WL_RATE_1X1_MCS7 = 19,
+ WL_RATE_P_1X1_MCS87 = 20,
+ WL_RATE_P_1X1_MCS88 = 21,
+
+ WL_RATE_1X1_VHT0SS1 = 12,
+ WL_RATE_1X1_VHT1SS1 = 13,
+ WL_RATE_1X1_VHT2SS1 = 14,
+ WL_RATE_1X1_VHT3SS1 = 15,
+ WL_RATE_1X1_VHT4SS1 = 16,
+ WL_RATE_1X1_VHT5SS1 = 17,
+ WL_RATE_1X1_VHT6SS1 = 18,
+ WL_RATE_1X1_VHT7SS1 = 19,
+ WL_RATE_1X1_VHT8SS1 = 20,
+ WL_RATE_1X1_VHT9SS1 = 21,
+ WL_RATE_P_1X1_VHT10SS1 = 22,
+ WL_RATE_P_1X1_VHT11SS1 = 23,
+
+
+ /************
+ * 2 chains *
+ ************
+ */
+
+ /* 1 Stream expanded + 1 */
+ WL_RATE_1X2_DSSS_1 = 24,
+ WL_RATE_1X2_DSSS_2 = 25,
+ WL_RATE_1X2_DSSS_5_5 = 26,
+ WL_RATE_1X2_DSSS_11 = 27,
+
+ WL_RATE_1X2_CDD_OFDM_6 = 28,
+ WL_RATE_1X2_CDD_OFDM_9 = 29,
+ WL_RATE_1X2_CDD_OFDM_12 = 30,
+ WL_RATE_1X2_CDD_OFDM_18 = 31,
+ WL_RATE_1X2_CDD_OFDM_24 = 32,
+ WL_RATE_1X2_CDD_OFDM_36 = 33,
+ WL_RATE_1X2_CDD_OFDM_48 = 34,
+ WL_RATE_1X2_CDD_OFDM_54 = 35,
+
+ WL_RATE_1X2_CDD_MCS0 = 36,
+ WL_RATE_1X2_CDD_MCS1 = 37,
+ WL_RATE_1X2_CDD_MCS2 = 38,
+ WL_RATE_1X2_CDD_MCS3 = 39,
+ WL_RATE_1X2_CDD_MCS4 = 40,
+ WL_RATE_1X2_CDD_MCS5 = 41,
+ WL_RATE_1X2_CDD_MCS6 = 42,
+ WL_RATE_1X2_CDD_MCS7 = 43,
+ WL_RATE_P_1X2_CDD_MCS87 = 44,
+ WL_RATE_P_1X2_CDD_MCS88 = 45,
+
+ WL_RATE_1X2_VHT0SS1 = 36,
+ WL_RATE_1X2_VHT1SS1 = 37,
+ WL_RATE_1X2_VHT2SS1 = 38,
+ WL_RATE_1X2_VHT3SS1 = 39,
+ WL_RATE_1X2_VHT4SS1 = 40,
+ WL_RATE_1X2_VHT5SS1 = 41,
+ WL_RATE_1X2_VHT6SS1 = 42,
+ WL_RATE_1X2_VHT7SS1 = 43,
+ WL_RATE_1X2_VHT8SS1 = 44,
+ WL_RATE_1X2_VHT9SS1 = 45,
+ WL_RATE_P_1X2_VHT10SS1 = 46,
+ WL_RATE_P_1X2_VHT11SS1 = 47,
+
+ /* 2 Streams */
+ WL_RATE_2X2_STBC_MCS0 = 48,
+ WL_RATE_2X2_STBC_MCS1 = 49,
+ WL_RATE_2X2_STBC_MCS2 = 50,
+ WL_RATE_2X2_STBC_MCS3 = 51,
+ WL_RATE_2X2_STBC_MCS4 = 52,
+ WL_RATE_2X2_STBC_MCS5 = 53,
+ WL_RATE_2X2_STBC_MCS6 = 54,
+ WL_RATE_2X2_STBC_MCS7 = 55,
+ WL_RATE_P_2X2_STBC_MCS87 = 56,
+ WL_RATE_P_2X2_STBC_MCS88 = 57,
+
+ WL_RATE_2X2_STBC_VHT0SS1 = 48,
+ WL_RATE_2X2_STBC_VHT1SS1 = 49,
+ WL_RATE_2X2_STBC_VHT2SS1 = 50,
+ WL_RATE_2X2_STBC_VHT3SS1 = 51,
+ WL_RATE_2X2_STBC_VHT4SS1 = 52,
+ WL_RATE_2X2_STBC_VHT5SS1 = 53,
+ WL_RATE_2X2_STBC_VHT6SS1 = 54,
+ WL_RATE_2X2_STBC_VHT7SS1 = 55,
+ WL_RATE_2X2_STBC_VHT8SS1 = 56,
+ WL_RATE_2X2_STBC_VHT9SS1 = 57,
+ WL_RATE_P_2X2_STBC_VHT10SS1 = 58,
+ WL_RATE_P_2X2_STBC_VHT11SS1 = 59,
+
+ WL_RATE_2X2_SDM_MCS8 = 60,
+ WL_RATE_2X2_SDM_MCS9 = 61,
+ WL_RATE_2X2_SDM_MCS10 = 62,
+ WL_RATE_2X2_SDM_MCS11 = 63,
+ WL_RATE_2X2_SDM_MCS12 = 64,
+ WL_RATE_2X2_SDM_MCS13 = 65,
+ WL_RATE_2X2_SDM_MCS14 = 66,
+ WL_RATE_2X2_SDM_MCS15 = 67,
+ WL_RATE_P_2X2_SDM_MCS99 = 68,
+ WL_RATE_P_2X2_SDM_MCS100 = 69,
+
+ WL_RATE_2X2_VHT0SS2 = 60,
+ WL_RATE_2X2_VHT1SS2 = 61,
+ WL_RATE_2X2_VHT2SS2 = 62,
+ WL_RATE_2X2_VHT3SS2 = 63,
+ WL_RATE_2X2_VHT4SS2 = 64,
+ WL_RATE_2X2_VHT5SS2 = 65,
+ WL_RATE_2X2_VHT6SS2 = 66,
+ WL_RATE_2X2_VHT7SS2 = 67,
+ WL_RATE_2X2_VHT8SS2 = 68,
+ WL_RATE_2X2_VHT9SS2 = 69,
+ WL_RATE_P_2X2_VHT10SS2 = 70,
+ WL_RATE_P_2X2_VHT11SS2 = 71,
+
+ /****************************
+ * TX Beamforming, 2 chains *
+ ****************************
+ */
+
+ /* 1 Stream expanded + 1 */
+ WL_RATE_1X2_TXBF_OFDM_6 = 72,
+ WL_RATE_1X2_TXBF_OFDM_9 = 73,
+ WL_RATE_1X2_TXBF_OFDM_12 = 74,
+ WL_RATE_1X2_TXBF_OFDM_18 = 75,
+ WL_RATE_1X2_TXBF_OFDM_24 = 76,
+ WL_RATE_1X2_TXBF_OFDM_36 = 77,
+ WL_RATE_1X2_TXBF_OFDM_48 = 78,
+ WL_RATE_1X2_TXBF_OFDM_54 = 79,
+
+ WL_RATE_1X2_TXBF_MCS0 = 80,
+ WL_RATE_1X2_TXBF_MCS1 = 81,
+ WL_RATE_1X2_TXBF_MCS2 = 82,
+ WL_RATE_1X2_TXBF_MCS3 = 83,
+ WL_RATE_1X2_TXBF_MCS4 = 84,
+ WL_RATE_1X2_TXBF_MCS5 = 85,
+ WL_RATE_1X2_TXBF_MCS6 = 86,
+ WL_RATE_1X2_TXBF_MCS7 = 87,
+ WL_RATE_P_1X2_TXBF_MCS87 = 88,
+ WL_RATE_P_1X2_TXBF_MCS88 = 89,
+
+ WL_RATE_1X2_TXBF_VHT0SS1 = 80,
+ WL_RATE_1X2_TXBF_VHT1SS1 = 81,
+ WL_RATE_1X2_TXBF_VHT2SS1 = 82,
+ WL_RATE_1X2_TXBF_VHT3SS1 = 83,
+ WL_RATE_1X2_TXBF_VHT4SS1 = 84,
+ WL_RATE_1X2_TXBF_VHT5SS1 = 85,
+ WL_RATE_1X2_TXBF_VHT6SS1 = 86,
+ WL_RATE_1X2_TXBF_VHT7SS1 = 87,
+ WL_RATE_1X2_TXBF_VHT8SS1 = 88,
+ WL_RATE_1X2_TXBF_VHT9SS1 = 89,
+ WL_RATE_P_1X2_TXBF_VHT10SS1 = 90,
+ WL_RATE_P_1X2_TXBF_VHT11SS1 = 91,
+
+ /* 2 Streams */
+ WL_RATE_2X2_TXBF_SDM_MCS8 = 92,
+ WL_RATE_2X2_TXBF_SDM_MCS9 = 93,
+ WL_RATE_2X2_TXBF_SDM_MCS10 = 94,
+ WL_RATE_2X2_TXBF_SDM_MCS11 = 95,
+ WL_RATE_2X2_TXBF_SDM_MCS12 = 96,
+ WL_RATE_2X2_TXBF_SDM_MCS13 = 97,
+ WL_RATE_2X2_TXBF_SDM_MCS14 = 98,
+ WL_RATE_2X2_TXBF_SDM_MCS15 = 99,
+ WL_RATE_P_2X2_TXBF_SDM_MCS99 = 100,
+ WL_RATE_P_2X2_TXBF_SDM_MCS100 = 101,
+
+ WL_RATE_2X2_TXBF_VHT0SS2 = 92,
+ WL_RATE_2X2_TXBF_VHT1SS2 = 93,
+ WL_RATE_2X2_TXBF_VHT2SS2 = 94,
+ WL_RATE_2X2_TXBF_VHT3SS2 = 95,
+ WL_RATE_2X2_TXBF_VHT4SS2 = 96,
+ WL_RATE_2X2_TXBF_VHT5SS2 = 97,
+ WL_RATE_2X2_TXBF_VHT6SS2 = 98,
+ WL_RATE_2X2_TXBF_VHT7SS2 = 99,
+ WL_RATE_2X2_TXBF_VHT8SS2 = 100,
+ WL_RATE_2X2_TXBF_VHT9SS2 = 101,
+ WL_RATE_P_2X2_TXBF_VHT10SS2 = 102,
+ WL_RATE_P_2X2_TXBF_VHT11SS2 = 103,
+
+
+ /************
+ * 3 chains *
+ ************
+ */
+
+ /* 1 Stream expanded + 2 */
+ WL_RATE_1X3_DSSS_1 = 104,
+ WL_RATE_1X3_DSSS_2 = 105,
+ WL_RATE_1X3_DSSS_5_5 = 106,
+ WL_RATE_1X3_DSSS_11 = 107,
+
+ WL_RATE_1X3_CDD_OFDM_6 = 108,
+ WL_RATE_1X3_CDD_OFDM_9 = 109,
+ WL_RATE_1X3_CDD_OFDM_12 = 110,
+ WL_RATE_1X3_CDD_OFDM_18 = 111,
+ WL_RATE_1X3_CDD_OFDM_24 = 112,
+ WL_RATE_1X3_CDD_OFDM_36 = 113,
+ WL_RATE_1X3_CDD_OFDM_48 = 114,
+ WL_RATE_1X3_CDD_OFDM_54 = 115,
+
+ WL_RATE_1X3_CDD_MCS0 = 116,
+ WL_RATE_1X3_CDD_MCS1 = 117,
+ WL_RATE_1X3_CDD_MCS2 = 118,
+ WL_RATE_1X3_CDD_MCS3 = 119,
+ WL_RATE_1X3_CDD_MCS4 = 120,
+ WL_RATE_1X3_CDD_MCS5 = 121,
+ WL_RATE_1X3_CDD_MCS6 = 122,
+ WL_RATE_1X3_CDD_MCS7 = 123,
+ WL_RATE_P_1X3_CDD_MCS87 = 124,
+ WL_RATE_P_1X3_CDD_MCS88 = 125,
+
+ WL_RATE_1X3_VHT0SS1 = 116,
+ WL_RATE_1X3_VHT1SS1 = 117,
+ WL_RATE_1X3_VHT2SS1 = 118,
+ WL_RATE_1X3_VHT3SS1 = 119,
+ WL_RATE_1X3_VHT4SS1 = 120,
+ WL_RATE_1X3_VHT5SS1 = 121,
+ WL_RATE_1X3_VHT6SS1 = 122,
+ WL_RATE_1X3_VHT7SS1 = 123,
+ WL_RATE_1X3_VHT8SS1 = 124,
+ WL_RATE_1X3_VHT9SS1 = 125,
+ WL_RATE_P_1X3_VHT10SS1 = 126,
+ WL_RATE_P_1X3_VHT11SS1 = 127,
+
+ /* 2 Streams expanded + 1 */
+ WL_RATE_2X3_STBC_MCS0 = 128,
+ WL_RATE_2X3_STBC_MCS1 = 129,
+ WL_RATE_2X3_STBC_MCS2 = 130,
+ WL_RATE_2X3_STBC_MCS3 = 131,
+ WL_RATE_2X3_STBC_MCS4 = 132,
+ WL_RATE_2X3_STBC_MCS5 = 133,
+ WL_RATE_2X3_STBC_MCS6 = 134,
+ WL_RATE_2X3_STBC_MCS7 = 135,
+ WL_RATE_P_2X3_STBC_MCS87 = 136,
+ WL_RATE_P_2X3_STBC_MCS88 = 137,
+
+ WL_RATE_2X3_STBC_VHT0SS1 = 128,
+ WL_RATE_2X3_STBC_VHT1SS1 = 129,
+ WL_RATE_2X3_STBC_VHT2SS1 = 130,
+ WL_RATE_2X3_STBC_VHT3SS1 = 131,
+ WL_RATE_2X3_STBC_VHT4SS1 = 132,
+ WL_RATE_2X3_STBC_VHT5SS1 = 133,
+ WL_RATE_2X3_STBC_VHT6SS1 = 134,
+ WL_RATE_2X3_STBC_VHT7SS1 = 135,
+ WL_RATE_2X3_STBC_VHT8SS1 = 136,
+ WL_RATE_2X3_STBC_VHT9SS1 = 137,
+ WL_RATE_P_2X3_STBC_VHT10SS1 = 138,
+ WL_RATE_P_2X3_STBC_VHT11SS1 = 139,
+
+ WL_RATE_2X3_SDM_MCS8 = 140,
+ WL_RATE_2X3_SDM_MCS9 = 141,
+ WL_RATE_2X3_SDM_MCS10 = 142,
+ WL_RATE_2X3_SDM_MCS11 = 143,
+ WL_RATE_2X3_SDM_MCS12 = 144,
+ WL_RATE_2X3_SDM_MCS13 = 145,
+ WL_RATE_2X3_SDM_MCS14 = 146,
+ WL_RATE_2X3_SDM_MCS15 = 147,
+ WL_RATE_P_2X3_SDM_MCS99 = 148,
+ WL_RATE_P_2X3_SDM_MCS100 = 149,
+
+ WL_RATE_2X3_VHT0SS2 = 140,
+ WL_RATE_2X3_VHT1SS2 = 141,
+ WL_RATE_2X3_VHT2SS2 = 142,
+ WL_RATE_2X3_VHT3SS2 = 143,
+ WL_RATE_2X3_VHT4SS2 = 144,
+ WL_RATE_2X3_VHT5SS2 = 145,
+ WL_RATE_2X3_VHT6SS2 = 146,
+ WL_RATE_2X3_VHT7SS2 = 147,
+ WL_RATE_2X3_VHT8SS2 = 148,
+ WL_RATE_2X3_VHT9SS2 = 149,
+ WL_RATE_P_2X3_VHT10SS2 = 150,
+ WL_RATE_P_2X3_VHT11SS2 = 151,
+
+ /* 3 Streams */
+ WL_RATE_3X3_SDM_MCS16 = 152,
+ WL_RATE_3X3_SDM_MCS17 = 153,
+ WL_RATE_3X3_SDM_MCS18 = 154,
+ WL_RATE_3X3_SDM_MCS19 = 155,
+ WL_RATE_3X3_SDM_MCS20 = 156,
+ WL_RATE_3X3_SDM_MCS21 = 157,
+ WL_RATE_3X3_SDM_MCS22 = 158,
+ WL_RATE_3X3_SDM_MCS23 = 159,
+ WL_RATE_P_3X3_SDM_MCS101 = 160,
+ WL_RATE_P_3X3_SDM_MCS102 = 161,
+
+ WL_RATE_3X3_VHT0SS3 = 152,
+ WL_RATE_3X3_VHT1SS3 = 153,
+ WL_RATE_3X3_VHT2SS3 = 154,
+ WL_RATE_3X3_VHT3SS3 = 155,
+ WL_RATE_3X3_VHT4SS3 = 156,
+ WL_RATE_3X3_VHT5SS3 = 157,
+ WL_RATE_3X3_VHT6SS3 = 158,
+ WL_RATE_3X3_VHT7SS3 = 159,
+ WL_RATE_3X3_VHT8SS3 = 160,
+ WL_RATE_3X3_VHT9SS3 = 161,
+ WL_RATE_P_3X3_VHT10SS3 = 162,
+ WL_RATE_P_3X3_VHT11SS3 = 163,
+
+
+ /****************************
+ * TX Beamforming, 3 chains *
+ ****************************
+ */
+
+ /* 1 Stream expanded + 2 */
+ WL_RATE_1X3_TXBF_OFDM_6 = 164,
+ WL_RATE_1X3_TXBF_OFDM_9 = 165,
+ WL_RATE_1X3_TXBF_OFDM_12 = 166,
+ WL_RATE_1X3_TXBF_OFDM_18 = 167,
+ WL_RATE_1X3_TXBF_OFDM_24 = 168,
+ WL_RATE_1X3_TXBF_OFDM_36 = 169,
+ WL_RATE_1X3_TXBF_OFDM_48 = 170,
+ WL_RATE_1X3_TXBF_OFDM_54 = 171,
+
+ WL_RATE_1X3_TXBF_MCS0 = 172,
+ WL_RATE_1X3_TXBF_MCS1 = 173,
+ WL_RATE_1X3_TXBF_MCS2 = 174,
+ WL_RATE_1X3_TXBF_MCS3 = 175,
+ WL_RATE_1X3_TXBF_MCS4 = 176,
+ WL_RATE_1X3_TXBF_MCS5 = 177,
+ WL_RATE_1X3_TXBF_MCS6 = 178,
+ WL_RATE_1X3_TXBF_MCS7 = 179,
+ WL_RATE_P_1X3_TXBF_MCS87 = 180,
+ WL_RATE_P_1X3_TXBF_MCS88 = 181,
+
+ WL_RATE_1X3_TXBF_VHT0SS1 = 172,
+ WL_RATE_1X3_TXBF_VHT1SS1 = 173,
+ WL_RATE_1X3_TXBF_VHT2SS1 = 174,
+ WL_RATE_1X3_TXBF_VHT3SS1 = 175,
+ WL_RATE_1X3_TXBF_VHT4SS1 = 176,
+ WL_RATE_1X3_TXBF_VHT5SS1 = 177,
+ WL_RATE_1X3_TXBF_VHT6SS1 = 178,
+ WL_RATE_1X3_TXBF_VHT7SS1 = 179,
+ WL_RATE_1X3_TXBF_VHT8SS1 = 180,
+ WL_RATE_1X3_TXBF_VHT9SS1 = 181,
+ WL_RATE_P_1X3_TXBF_VHT10SS1 = 182,
+ WL_RATE_P_1X3_TXBF_VHT11SS1 = 183,
+
+ /* 2 Streams expanded + 1 */
+ WL_RATE_2X3_TXBF_SDM_MCS8 = 184,
+ WL_RATE_2X3_TXBF_SDM_MCS9 = 185,
+ WL_RATE_2X3_TXBF_SDM_MCS10 = 186,
+ WL_RATE_2X3_TXBF_SDM_MCS11 = 187,
+ WL_RATE_2X3_TXBF_SDM_MCS12 = 188,
+ WL_RATE_2X3_TXBF_SDM_MCS13 = 189,
+ WL_RATE_2X3_TXBF_SDM_MCS14 = 190,
+ WL_RATE_2X3_TXBF_SDM_MCS15 = 191,
+ WL_RATE_P_2X3_TXBF_SDM_MCS99 = 192,
+ WL_RATE_P_2X3_TXBF_SDM_MCS100 = 193,
+
+ WL_RATE_2X3_TXBF_VHT0SS2 = 184,
+ WL_RATE_2X3_TXBF_VHT1SS2 = 185,
+ WL_RATE_2X3_TXBF_VHT2SS2 = 186,
+ WL_RATE_2X3_TXBF_VHT3SS2 = 187,
+ WL_RATE_2X3_TXBF_VHT4SS2 = 188,
+ WL_RATE_2X3_TXBF_VHT5SS2 = 189,
+ WL_RATE_2X3_TXBF_VHT6SS2 = 190,
+ WL_RATE_2X3_TXBF_VHT7SS2 = 191,
+ WL_RATE_2X3_TXBF_VHT8SS2 = 192,
+ WL_RATE_2X3_TXBF_VHT9SS2 = 193,
+ WL_RATE_P_2X3_TXBF_VHT10SS2 = 194,
+ WL_RATE_P_2X3_TXBF_VHT11SS2 = 195,
+
+ /* 3 Streams */
+ WL_RATE_3X3_TXBF_SDM_MCS16 = 196,
+ WL_RATE_3X3_TXBF_SDM_MCS17 = 197,
+ WL_RATE_3X3_TXBF_SDM_MCS18 = 198,
+ WL_RATE_3X3_TXBF_SDM_MCS19 = 199,
+ WL_RATE_3X3_TXBF_SDM_MCS20 = 200,
+ WL_RATE_3X3_TXBF_SDM_MCS21 = 201,
+ WL_RATE_3X3_TXBF_SDM_MCS22 = 202,
+ WL_RATE_3X3_TXBF_SDM_MCS23 = 203,
+ WL_RATE_P_3X3_TXBF_SDM_MCS101 = 204,
+ WL_RATE_P_3X3_TXBF_SDM_MCS102 = 205,
+
+ WL_RATE_3X3_TXBF_VHT0SS3 = 196,
+ WL_RATE_3X3_TXBF_VHT1SS3 = 197,
+ WL_RATE_3X3_TXBF_VHT2SS3 = 198,
+ WL_RATE_3X3_TXBF_VHT3SS3 = 199,
+ WL_RATE_3X3_TXBF_VHT4SS3 = 200,
+ WL_RATE_3X3_TXBF_VHT5SS3 = 201,
+ WL_RATE_3X3_TXBF_VHT6SS3 = 202,
+ WL_RATE_3X3_TXBF_VHT7SS3 = 203,
+ WL_RATE_3X3_TXBF_VHT8SS3 = 204,
+ WL_RATE_3X3_TXBF_VHT9SS3 = 205,
+ WL_RATE_P_3X3_TXBF_VHT10SS3 = 206,
+ WL_RATE_P_3X3_TXBF_VHT11SS3 = 207,
+
+
+ /************
+ * 4 chains *
+ ************
+ */
+
+ /* 1 Stream expanded + 3 */
+ WL_RATE_1X4_DSSS_1 = 208,
+ WL_RATE_1X4_DSSS_2 = 209,
+ WL_RATE_1X4_DSSS_5_5 = 210,
+ WL_RATE_1X4_DSSS_11 = 211,
+
+ WL_RATE_1X4_CDD_OFDM_6 = 212,
+ WL_RATE_1X4_CDD_OFDM_9 = 213,
+ WL_RATE_1X4_CDD_OFDM_12 = 214,
+ WL_RATE_1X4_CDD_OFDM_18 = 215,
+ WL_RATE_1X4_CDD_OFDM_24 = 216,
+ WL_RATE_1X4_CDD_OFDM_36 = 217,
+ WL_RATE_1X4_CDD_OFDM_48 = 218,
+ WL_RATE_1X4_CDD_OFDM_54 = 219,
+
+ WL_RATE_1X4_CDD_MCS0 = 220,
+ WL_RATE_1X4_CDD_MCS1 = 221,
+ WL_RATE_1X4_CDD_MCS2 = 222,
+ WL_RATE_1X4_CDD_MCS3 = 223,
+ WL_RATE_1X4_CDD_MCS4 = 224,
+ WL_RATE_1X4_CDD_MCS5 = 225,
+ WL_RATE_1X4_CDD_MCS6 = 226,
+ WL_RATE_1X4_CDD_MCS7 = 227,
+ WL_RATE_P_1X4_CDD_MCS87 = 228,
+ WL_RATE_P_1X4_CDD_MCS88 = 229,
+
+ WL_RATE_1X4_VHT0SS1 = 220,
+ WL_RATE_1X4_VHT1SS1 = 221,
+ WL_RATE_1X4_VHT2SS1 = 222,
+ WL_RATE_1X4_VHT3SS1 = 223,
+ WL_RATE_1X4_VHT4SS1 = 224,
+ WL_RATE_1X4_VHT5SS1 = 225,
+ WL_RATE_1X4_VHT6SS1 = 226,
+ WL_RATE_1X4_VHT7SS1 = 227,
+ WL_RATE_1X4_VHT8SS1 = 228,
+ WL_RATE_1X4_VHT9SS1 = 229,
+ WL_RATE_P_1X4_VHT10SS1 = 230,
+ WL_RATE_P_1X4_VHT11SS1 = 231,
+
+ /* 2 Streams expanded + 2 */
+ WL_RATE_2X4_STBC_MCS0 = 232,
+ WL_RATE_2X4_STBC_MCS1 = 233,
+ WL_RATE_2X4_STBC_MCS2 = 234,
+ WL_RATE_2X4_STBC_MCS3 = 235,
+ WL_RATE_2X4_STBC_MCS4 = 236,
+ WL_RATE_2X4_STBC_MCS5 = 237,
+ WL_RATE_2X4_STBC_MCS6 = 238,
+ WL_RATE_2X4_STBC_MCS7 = 239,
+ WL_RATE_P_2X4_STBC_MCS87 = 240,
+ WL_RATE_P_2X4_STBC_MCS88 = 241,
+
+ WL_RATE_2X4_STBC_VHT0SS1 = 232,
+ WL_RATE_2X4_STBC_VHT1SS1 = 233,
+ WL_RATE_2X4_STBC_VHT2SS1 = 234,
+ WL_RATE_2X4_STBC_VHT3SS1 = 235,
+ WL_RATE_2X4_STBC_VHT4SS1 = 236,
+ WL_RATE_2X4_STBC_VHT5SS1 = 237,
+ WL_RATE_2X4_STBC_VHT6SS1 = 238,
+ WL_RATE_2X4_STBC_VHT7SS1 = 239,
+ WL_RATE_2X4_STBC_VHT8SS1 = 240,
+ WL_RATE_2X4_STBC_VHT9SS1 = 241,
+ WL_RATE_P_2X4_STBC_VHT10SS1 = 242,
+ WL_RATE_P_2X4_STBC_VHT11SS1 = 243,
+
+ WL_RATE_2X4_SDM_MCS8 = 244,
+ WL_RATE_2X4_SDM_MCS9 = 245,
+ WL_RATE_2X4_SDM_MCS10 = 246,
+ WL_RATE_2X4_SDM_MCS11 = 247,
+ WL_RATE_2X4_SDM_MCS12 = 248,
+ WL_RATE_2X4_SDM_MCS13 = 249,
+ WL_RATE_2X4_SDM_MCS14 = 250,
+ WL_RATE_2X4_SDM_MCS15 = 251,
+ WL_RATE_P_2X4_SDM_MCS99 = 252,
+ WL_RATE_P_2X4_SDM_MCS100 = 253,
+
+ WL_RATE_2X4_VHT0SS2 = 244,
+ WL_RATE_2X4_VHT1SS2 = 245,
+ WL_RATE_2X4_VHT2SS2 = 246,
+ WL_RATE_2X4_VHT3SS2 = 247,
+ WL_RATE_2X4_VHT4SS2 = 248,
+ WL_RATE_2X4_VHT5SS2 = 249,
+ WL_RATE_2X4_VHT6SS2 = 250,
+ WL_RATE_2X4_VHT7SS2 = 251,
+ WL_RATE_2X4_VHT8SS2 = 252,
+ WL_RATE_2X4_VHT9SS2 = 253,
+ WL_RATE_P_2X4_VHT10SS2 = 254,
+ WL_RATE_P_2X4_VHT11SS2 = 255,
+
+ /* 3 Streams expanded + 1 */
+ WL_RATE_3X4_SDM_MCS16 = 256,
+ WL_RATE_3X4_SDM_MCS17 = 257,
+ WL_RATE_3X4_SDM_MCS18 = 258,
+ WL_RATE_3X4_SDM_MCS19 = 259,
+ WL_RATE_3X4_SDM_MCS20 = 260,
+ WL_RATE_3X4_SDM_MCS21 = 261,
+ WL_RATE_3X4_SDM_MCS22 = 262,
+ WL_RATE_3X4_SDM_MCS23 = 263,
+ WL_RATE_P_3X4_SDM_MCS101 = 264,
+ WL_RATE_P_3X4_SDM_MCS102 = 265,
+
+ WL_RATE_3X4_VHT0SS3 = 256,
+ WL_RATE_3X4_VHT1SS3 = 257,
+ WL_RATE_3X4_VHT2SS3 = 258,
+ WL_RATE_3X4_VHT3SS3 = 259,
+ WL_RATE_3X4_VHT4SS3 = 260,
+ WL_RATE_3X4_VHT5SS3 = 261,
+ WL_RATE_3X4_VHT6SS3 = 262,
+ WL_RATE_3X4_VHT7SS3 = 263,
+ WL_RATE_3X4_VHT8SS3 = 264,
+ WL_RATE_3X4_VHT9SS3 = 265,
+ WL_RATE_P_3X4_VHT10SS3 = 266,
+ WL_RATE_P_3X4_VHT11SS3 = 267,
+
+
+ /* 4 Streams */
+ WL_RATE_4X4_SDM_MCS24 = 268,
+ WL_RATE_4X4_SDM_MCS25 = 269,
+ WL_RATE_4X4_SDM_MCS26 = 270,
+ WL_RATE_4X4_SDM_MCS27 = 271,
+ WL_RATE_4X4_SDM_MCS28 = 272,
+ WL_RATE_4X4_SDM_MCS29 = 273,
+ WL_RATE_4X4_SDM_MCS30 = 274,
+ WL_RATE_4X4_SDM_MCS31 = 275,
+ WL_RATE_P_4X4_SDM_MCS103 = 276,
+ WL_RATE_P_4X4_SDM_MCS104 = 277,
+
+ WL_RATE_4X4_VHT0SS4 = 268,
+ WL_RATE_4X4_VHT1SS4 = 269,
+ WL_RATE_4X4_VHT2SS4 = 270,
+ WL_RATE_4X4_VHT3SS4 = 271,
+ WL_RATE_4X4_VHT4SS4 = 272,
+ WL_RATE_4X4_VHT5SS4 = 273,
+ WL_RATE_4X4_VHT6SS4 = 274,
+ WL_RATE_4X4_VHT7SS4 = 275,
+ WL_RATE_4X4_VHT8SS4 = 276,
+ WL_RATE_4X4_VHT9SS4 = 277,
+ WL_RATE_P_4X4_VHT10SS4 = 278,
+ WL_RATE_P_4X4_VHT11SS4 = 279,
+
+
+ /****************************
+ * TX Beamforming, 4 chains *
+ ****************************
+ */
+
+ /* 1 Stream expanded + 3 */
+ WL_RATE_1X4_TXBF_OFDM_6 = 280,
+ WL_RATE_1X4_TXBF_OFDM_9 = 281,
+ WL_RATE_1X4_TXBF_OFDM_12 = 282,
+ WL_RATE_1X4_TXBF_OFDM_18 = 283,
+ WL_RATE_1X4_TXBF_OFDM_24 = 284,
+ WL_RATE_1X4_TXBF_OFDM_36 = 285,
+ WL_RATE_1X4_TXBF_OFDM_48 = 286,
+ WL_RATE_1X4_TXBF_OFDM_54 = 287,
+
+ WL_RATE_1X4_TXBF_MCS0 = 288,
+ WL_RATE_1X4_TXBF_MCS1 = 289,
+ WL_RATE_1X4_TXBF_MCS2 = 290,
+ WL_RATE_1X4_TXBF_MCS3 = 291,
+ WL_RATE_1X4_TXBF_MCS4 = 292,
+ WL_RATE_1X4_TXBF_MCS5 = 293,
+ WL_RATE_1X4_TXBF_MCS6 = 294,
+ WL_RATE_1X4_TXBF_MCS7 = 295,
+ WL_RATE_P_1X4_TXBF_MCS87 = 296,
+ WL_RATE_P_1X4_TXBF_MCS88 = 297,
+
+ WL_RATE_1X4_TXBF_VHT0SS1 = 288,
+ WL_RATE_1X4_TXBF_VHT1SS1 = 289,
+ WL_RATE_1X4_TXBF_VHT2SS1 = 290,
+ WL_RATE_1X4_TXBF_VHT3SS1 = 291,
+ WL_RATE_1X4_TXBF_VHT4SS1 = 292,
+ WL_RATE_1X4_TXBF_VHT5SS1 = 293,
+ WL_RATE_1X4_TXBF_VHT6SS1 = 294,
+ WL_RATE_1X4_TXBF_VHT7SS1 = 295,
+ WL_RATE_1X4_TXBF_VHT8SS1 = 296,
+ WL_RATE_1X4_TXBF_VHT9SS1 = 297,
+ WL_RATE_P_1X4_TXBF_VHT10SS1 = 298,
+ WL_RATE_P_1X4_TXBF_VHT11SS1 = 299,
+
+ /* 2 Streams expanded + 2 */
+ WL_RATE_2X4_TXBF_SDM_MCS8 = 300,
+ WL_RATE_2X4_TXBF_SDM_MCS9 = 301,
+ WL_RATE_2X4_TXBF_SDM_MCS10 = 302,
+ WL_RATE_2X4_TXBF_SDM_MCS11 = 303,
+ WL_RATE_2X4_TXBF_SDM_MCS12 = 304,
+ WL_RATE_2X4_TXBF_SDM_MCS13 = 305,
+ WL_RATE_2X4_TXBF_SDM_MCS14 = 306,
+ WL_RATE_2X4_TXBF_SDM_MCS15 = 307,
+ WL_RATE_P_2X4_TXBF_SDM_MCS99 = 308,
+ WL_RATE_P_2X4_TXBF_SDM_MCS100 = 309,
+
+ WL_RATE_2X4_TXBF_VHT0SS2 = 300,
+ WL_RATE_2X4_TXBF_VHT1SS2 = 301,
+ WL_RATE_2X4_TXBF_VHT2SS2 = 302,
+ WL_RATE_2X4_TXBF_VHT3SS2 = 303,
+ WL_RATE_2X4_TXBF_VHT4SS2 = 304,
+ WL_RATE_2X4_TXBF_VHT5SS2 = 305,
+ WL_RATE_2X4_TXBF_VHT6SS2 = 306,
+ WL_RATE_2X4_TXBF_VHT7SS2 = 307,
+ WL_RATE_2X4_TXBF_VHT8SS2 = 308,
+ WL_RATE_2X4_TXBF_VHT9SS2 = 309,
+ WL_RATE_P_2X4_TXBF_VHT10SS2 = 310,
+ WL_RATE_P_2X4_TXBF_VHT11SS2 = 311,
+
+ /* 3 Streams expanded + 1 */
+ WL_RATE_3X4_TXBF_SDM_MCS16 = 312,
+ WL_RATE_3X4_TXBF_SDM_MCS17 = 313,
+ WL_RATE_3X4_TXBF_SDM_MCS18 = 314,
+ WL_RATE_3X4_TXBF_SDM_MCS19 = 315,
+ WL_RATE_3X4_TXBF_SDM_MCS20 = 316,
+ WL_RATE_3X4_TXBF_SDM_MCS21 = 317,
+ WL_RATE_3X4_TXBF_SDM_MCS22 = 318,
+ WL_RATE_3X4_TXBF_SDM_MCS23 = 319,
+ WL_RATE_P_3X4_TXBF_SDM_MCS101 = 320,
+ WL_RATE_P_3X4_TXBF_SDM_MCS102 = 321,
+
+ WL_RATE_3X4_TXBF_VHT0SS3 = 312,
+ WL_RATE_3X4_TXBF_VHT1SS3 = 313,
+ WL_RATE_3X4_TXBF_VHT2SS3 = 314,
+ WL_RATE_3X4_TXBF_VHT3SS3 = 315,
+ WL_RATE_3X4_TXBF_VHT4SS3 = 316,
+ WL_RATE_3X4_TXBF_VHT5SS3 = 317,
+ WL_RATE_3X4_TXBF_VHT6SS3 = 318,
+ WL_RATE_3X4_TXBF_VHT7SS3 = 319,
+ WL_RATE_P_3X4_TXBF_VHT8SS3 = 320,
+ WL_RATE_P_3X4_TXBF_VHT9SS3 = 321,
+ WL_RATE_P_3X4_TXBF_VHT10SS3 = 322,
+ WL_RATE_P_3X4_TXBF_VHT11SS3 = 323,
+
+ /* 4 Streams */
+ WL_RATE_4X4_TXBF_SDM_MCS24 = 324,
+ WL_RATE_4X4_TXBF_SDM_MCS25 = 325,
+ WL_RATE_4X4_TXBF_SDM_MCS26 = 326,
+ WL_RATE_4X4_TXBF_SDM_MCS27 = 327,
+ WL_RATE_4X4_TXBF_SDM_MCS28 = 328,
+ WL_RATE_4X4_TXBF_SDM_MCS29 = 329,
+ WL_RATE_4X4_TXBF_SDM_MCS30 = 330,
+ WL_RATE_4X4_TXBF_SDM_MCS31 = 331,
+ WL_RATE_P_4X4_TXBF_SDM_MCS103 = 332,
+ WL_RATE_P_4X4_TXBF_SDM_MCS104 = 333,
+
+ WL_RATE_4X4_TXBF_VHT0SS4 = 324,
+ WL_RATE_4X4_TXBF_VHT1SS4 = 325,
+ WL_RATE_4X4_TXBF_VHT2SS4 = 326,
+ WL_RATE_4X4_TXBF_VHT3SS4 = 327,
+ WL_RATE_4X4_TXBF_VHT4SS4 = 328,
+ WL_RATE_4X4_TXBF_VHT5SS4 = 329,
+ WL_RATE_4X4_TXBF_VHT6SS4 = 330,
+ WL_RATE_4X4_TXBF_VHT7SS4 = 331,
+ WL_RATE_P_4X4_TXBF_VHT8SS4 = 332,
+ WL_RATE_P_4X4_TXBF_VHT9SS4 = 333,
+ WL_RATE_P_4X4_TXBF_VHT10SS4 = 334,
+ WL_RATE_P_4X4_TXBF_VHT11SS4 = 335
+
+} clm_rates_t;
+
+/* Number of rate codes */
+#define WL_NUMRATES 336
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _bcmwifi_rates_h_ */
--- /dev/null
+/*
+ * Driver O/S-independent utility routines
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmxtlv.c 527361 2015-01-17 01:48:34Z $
+ */
+
+#include <bcm_cfg.h>
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+
+#include <stdarg.h>
+
+#ifdef BCMDRIVER
+#include <osl.h>
+#else /* !BCMDRIVER */
+ #include <stdlib.h> /* AS!!! */
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
+INLINE void* MALLOCZ(void *o, size_t s) { BCM_REFERENCE(o); return calloc(1, s); }
+INLINE void MFREE(void *o, void *p, size_t s) { BCM_REFERENCE(o); BCM_REFERENCE(s); free(p); }
+#endif /* !BCMDRIVER */
+
+#include <bcmendian.h>
+#include <bcmutils.h>
+
+static INLINE int bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts)
+{
+ return ((opts & BCM_XTLV_OPTION_ALIGN32) ? ALIGN_SIZE(dlen + BCM_XTLV_HDR_SIZE, 4)
+ : (dlen + BCM_XTLV_HDR_SIZE));
+}
+
+bcm_xtlv_t *
+bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts)
+{
+ int sz;
+ /* advance to next elt */
+ sz = BCM_XTLV_SIZE(elt, opts);
+ elt = (bcm_xtlv_t*)((uint8 *)elt + sz);
+ *buflen -= sz;
+
+ /* validate next elt */
+ if (!bcm_valid_xtlv(elt, *buflen, opts))
+ return NULL;
+
+ return elt;
+}
+
+int
+bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len, bcm_xtlv_opts_t opts)
+{
+ if (!tlv_buf || !buf || !len)
+ return BCME_BADARG;
+
+ tlv_buf->opts = opts;
+ tlv_buf->size = len;
+ tlv_buf->head = buf;
+ tlv_buf->buf = buf;
+ return BCME_OK;
+}
+
+uint16
+bcm_xtlv_buf_len(bcm_xtlvbuf_t *tbuf)
+{
+ if (tbuf == NULL) return 0;
+ return (uint16)(tbuf->buf - tbuf->head);
+}
+uint16
+bcm_xtlv_buf_rlen(bcm_xtlvbuf_t *tbuf)
+{
+ if (tbuf == NULL) return 0;
+ return tbuf->size - bcm_xtlv_buf_len(tbuf);
+}
+uint8 *
+bcm_xtlv_buf(bcm_xtlvbuf_t *tbuf)
+{
+ if (tbuf == NULL) return NULL;
+ return tbuf->buf;
+}
+uint8 *
+bcm_xtlv_head(bcm_xtlvbuf_t *tbuf)
+{
+ if (tbuf == NULL) return NULL;
+ return tbuf->head;
+}
+int
+bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen)
+{
+ bcm_xtlv_t *xtlv;
+ int size;
+
+ if (tbuf == NULL)
+ return BCME_BADARG;
+ size = bcm_xtlv_size_for_data(dlen, tbuf->opts);
+ if (bcm_xtlv_buf_rlen(tbuf) < size)
+ return BCME_NOMEM;
+ xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+ xtlv->id = htol16(type);
+ xtlv->len = htol16(dlen);
+ memcpy(xtlv->data, data, dlen);
+ tbuf->buf += size;
+ return BCME_OK;
+}
+int
+bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data)
+{
+ bcm_xtlv_t *xtlv;
+ int size;
+
+ if (tbuf == NULL)
+ return BCME_BADARG;
+ size = bcm_xtlv_size_for_data(1, tbuf->opts);
+ if (bcm_xtlv_buf_rlen(tbuf) < size)
+ return BCME_NOMEM;
+ xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+ xtlv->id = htol16(type);
+ xtlv->len = htol16(sizeof(data));
+ xtlv->data[0] = data;
+ tbuf->buf += size;
+ return BCME_OK;
+}
+int
+bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data)
+{
+ bcm_xtlv_t *xtlv;
+ int size;
+
+ if (tbuf == NULL)
+ return BCME_BADARG;
+ size = bcm_xtlv_size_for_data(2, tbuf->opts);
+ if (bcm_xtlv_buf_rlen(tbuf) < size)
+ return BCME_NOMEM;
+
+ xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+ xtlv->id = htol16(type);
+ xtlv->len = htol16(sizeof(data));
+ htol16_ua_store(data, xtlv->data);
+ tbuf->buf += size;
+ return BCME_OK;
+}
+int
+bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data)
+{
+ bcm_xtlv_t *xtlv;
+ int size;
+
+ if (tbuf == NULL)
+ return BCME_BADARG;
+ size = bcm_xtlv_size_for_data(4, tbuf->opts);
+ if (bcm_xtlv_buf_rlen(tbuf) < size)
+ return BCME_NOMEM;
+ xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+ xtlv->id = htol16(type);
+ xtlv->len = htol16(sizeof(data));
+ htol32_ua_store(data, xtlv->data);
+ tbuf->buf += size;
+ return BCME_OK;
+}
+
+/*
+ * upacks xtlv record from buf checks the type
+ * copies data to callers buffer
+ * advances tlv pointer to next record
+ * caller's resposible for dst space check
+ */
+int
+bcm_unpack_xtlv_entry(uint8 **tlv_buf, uint16 xpct_type, uint16 xpct_len, void *dst,
+ bcm_xtlv_opts_t opts)
+{
+ bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf;
+ uint16 len;
+ uint16 type;
+
+ ASSERT(ptlv);
+ /* tlv headr is always packed in LE order */
+ len = ltoh16(ptlv->len);
+ type = ltoh16(ptlv->id);
+ if (len == 0) {
+ /* z-len tlv headers: allow, but don't process */
+ printf("z-len, skip unpack\n");
+ } else {
+ if ((type != xpct_type) ||
+ (len > xpct_len)) {
+ printf("xtlv_unpack Error: found[type:%d,len:%d] != xpct[type:%d,len:%d]\n",
+ type, len, xpct_type, xpct_len);
+ return BCME_BADARG;
+ }
+ /* copy tlv record to caller's buffer */
+ memcpy(dst, ptlv->data, ptlv->len);
+ }
+ *tlv_buf += BCM_XTLV_SIZE(ptlv, opts);
+ return BCME_OK;
+}
+
+/*
+ * packs user data into tlv record
+ * advances tlv pointer to next xtlv slot
+ * buflen is used for tlv_buf space check
+ */
+int
+bcm_pack_xtlv_entry(uint8 **tlv_buf, uint16 *buflen, uint16 type, uint16 len, void *src,
+ bcm_xtlv_opts_t opts)
+{
+ bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf;
+ int size;
+
+ ASSERT(ptlv);
+ ASSERT(src);
+
+ size = bcm_xtlv_size_for_data(len, opts);
+
+ /* copy data from tlv buffer to dst provided by user */
+ if (size > *buflen) {
+ printf("bcm_pack_xtlv_entry: no space tlv_buf: requested:%d, available:%d\n",
+ size, *buflen);
+ return BCME_BADLEN;
+ }
+ ptlv->id = htol16(type);
+ ptlv->len = htol16(len);
+
+ /* copy callers data */
+ memcpy(ptlv->data, src, len);
+
+ /* advance callers pointer to tlv buff */
+ *tlv_buf += size;
+ /* decrement the len */
+ *buflen -= (uint16)size;
+ return BCME_OK;
+}
+
+/*
+ * unpack all xtlv records from the issue a callback
+ * to set function one call per found tlv record
+ */
+int
+bcm_unpack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts,
+ bcm_xtlv_unpack_cbfn_t *cbfn)
+{
+ uint16 len;
+ uint16 type;
+ int res = BCME_OK;
+ int size;
+ bcm_xtlv_t *ptlv;
+ int sbuflen = buflen;
+
+ ASSERT(!buflen || tlv_buf);
+ ASSERT(!buflen || cbfn);
+
+ while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) {
+ ptlv = (bcm_xtlv_t *)tlv_buf;
+
+ /* tlv header is always packed in LE order */
+ len = ltoh16(ptlv->len);
+ type = ltoh16(ptlv->id);
+
+ size = bcm_xtlv_size_for_data(len, opts);
+
+ sbuflen -= size;
+ /* check for possible buffer overrun */
+ if (sbuflen < 0)
+ break;
+
+ if ((res = cbfn(ctx, ptlv->data, type, len)) != BCME_OK)
+ break;
+ tlv_buf += size;
+ }
+ return res;
+}
+
+int
+bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts,
+ bcm_pack_xtlv_next_info_cbfn_t get_next, bcm_pack_xtlv_pack_next_cbfn_t pack_next,
+ int *outlen)
+{
+ int res = BCME_OK;
+ uint16 tlv_id;
+ uint16 tlv_len;
+ uint8 *startp;
+ uint8 *endp;
+ uint8 *buf;
+ bool more;
+ int size;
+
+ ASSERT(get_next && pack_next);
+
+ buf = (uint8 *)tlv_buf;
+ startp = buf;
+ endp = (uint8 *)buf + buflen;
+ more = TRUE;
+ while (more && (buf < endp)) {
+ more = get_next(ctx, &tlv_id, &tlv_len);
+ size = bcm_xtlv_size_for_data(tlv_len, opts);
+ if ((buf + size) >= endp) {
+ res = BCME_BUFTOOSHORT;
+ goto done;
+ }
+
+ htol16_ua_store(tlv_id, buf);
+ htol16_ua_store(tlv_len, buf + sizeof(tlv_id));
+ pack_next(ctx, tlv_id, tlv_len, buf + BCM_XTLV_HDR_SIZE);
+ buf += size;
+ }
+
+ if (more)
+ res = BCME_BUFTOOSHORT;
+
+done:
+ if (outlen) {
+ *outlen = (int)(buf - startp);
+ }
+ return res;
+}
+
+/*
+ * pack xtlv buffer from memory according to xtlv_desc_t
+ */
+int
+bcm_pack_xtlv_buf_from_mem(void **tlv_buf, uint16 *buflen, xtlv_desc_t *items,
+ bcm_xtlv_opts_t opts)
+{
+ int res = BCME_OK;
+ uint8 *ptlv = (uint8 *)*tlv_buf;
+
+ while (items->type != 0) {
+ if ((res = bcm_pack_xtlv_entry(&ptlv,
+ buflen, items->type,
+ items->len, items->ptr, opts) != BCME_OK)) {
+ break;
+ }
+ items++;
+ }
+ *tlv_buf = ptlv; /* update the external pointer */
+ return res;
+}
+
+/*
+ * unpack xtlv buffer to memory according to xtlv_desc_t
+ *
+ */
+int
+bcm_unpack_xtlv_buf_to_mem(void *tlv_buf, int *buflen, xtlv_desc_t *items, bcm_xtlv_opts_t opts)
+{
+ int res = BCME_OK;
+ bcm_xtlv_t *elt;
+
+ elt = bcm_valid_xtlv((bcm_xtlv_t *)tlv_buf, *buflen, opts) ? (bcm_xtlv_t *)tlv_buf : NULL;
+ if (!elt || !items) {
+ res = BCME_BADARG;
+ return res;
+ }
+
+ for (; elt != NULL && res == BCME_OK; elt = bcm_next_xtlv(elt, buflen, opts)) {
+ /* find matches in desc_t items */
+ xtlv_desc_t *dst_desc = items;
+ uint16 len = ltoh16(elt->len);
+
+ while (dst_desc->type != 0) {
+ if (ltoh16(elt->id) == dst_desc->type) {
+ if (len != dst_desc->len) {
+ res = BCME_BADLEN;
+ } else {
+ memcpy(dst_desc->ptr, elt->data, len);
+ }
+ break;
+ }
+ dst_desc++;
+ }
+ }
+
+ if (res == BCME_OK && *buflen != 0)
+ res = BCME_BUFTOOSHORT;
+
+ return res;
+}
+
+/*
+ * return data pointer of a given ID from xtlv buffer.
+ * If the specified xTLV ID is found, on return *data_len_out will contain
+ * the the data length of the xTLV ID.
+ */
+void *
+bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id,
+ uint16 *datalen_out, bcm_xtlv_opts_t opts)
+{
+ void *retptr = NULL;
+ uint16 type, len;
+ int size;
+ bcm_xtlv_t *ptlv;
+ int sbuflen = buflen;
+
+ while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) {
+ ptlv = (bcm_xtlv_t *)tlv_buf;
+
+ /* tlv header is always packed in LE order */
+ type = ltoh16(ptlv->id);
+ len = ltoh16(ptlv->len);
+ size = bcm_xtlv_size_for_data(len, opts);
+
+ sbuflen -= size;
+ /* check for possible buffer overrun */
+ if (sbuflen < 0) {
+ printf("%s %d: Invalid sbuflen %d\n",
+ __FUNCTION__, __LINE__, sbuflen);
+ break;
+ }
+
+ if (id == type) {
+ retptr = ptlv->data;
+ if (datalen_out) {
+ *datalen_out = len;
+ }
+ break;
+ }
+ tlv_buf += size;
+ }
+
+ return retptr;
+}
+
+int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts)
+{
+ int size; /* entire size of the XTLV including header, data, and optional padding */
+ int len; /* XTLV's value real length wthout padding */
+
+ len = BCM_XTLV_LEN(elt);
+
+ size = bcm_xtlv_size_for_data(len, opts);
+
+ return size;
+}
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd.h 504503 2014-09-24 11:28:56Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd.h 610267 2016-01-06 16:03:53Z $
*/
/****************
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
#include <linux/wakelock.h>
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+#include <dhd_buzzz.h>
/* The kernel threading is sdio-specific */
struct task_struct;
struct sched_param;
#include <WdfMiniport.h>
#endif /* (BCMWDF) */
-#if defined(WL11U) && !defined(MFP)
-#define MFP /* Applying interaction with MFP by spec HS2.0 REL2 */
-#endif /* WL11U */
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+#define MAX_RESCHED_CNT 600
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) && LINUX_VERSION_CODE < \
+ KERNEL_VERSION(3, 18, 0) || defined(CONFIG_BCMDHD_VENDOR_EXT))
+#define WL_VENDOR_EXT_SUPPORT
+#endif /* 3.13.0 <= LINUX_KERNEL_VERSION < 3.18.0 || CONFIG_BCMDHD_VENDOR_EXT */
+#if defined(CONFIG_ANDROID) && defined(WL_VENDOR_EXT_SUPPORT)
+#if !defined(GSCAN_SUPPORT)
+#define GSCAN_SUPPORT
+#endif
+#endif /* CONFIG_ANDROID && WL_VENDOR_EXT_SUPPORT */
#if defined(KEEP_ALIVE)
/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
#define KEEP_ALIVE_PERIOD 55000
#define NULL_PKT_STR "null_pkt"
#endif /* KEEP_ALIVE */
+
/* Forward decls */
struct dhd_bus;
struct dhd_prot;
DHD_BUS_LOAD, /* Download access only (CPU reset) */
DHD_BUS_DATA, /* Ready for frame transfers */
DHD_BUS_SUSPEND, /* Bus has been suspended */
+ DHD_BUS_DOWN_IN_PROGRESS, /* Bus going Down */
};
-#if defined(NDISVER)
-#if (NDISVER >= 0x0600)
-/* Firmware requested operation mode */
-#define STA_MASK 0x0001
-#define HOSTAPD_MASK 0x0002
-#define WFD_MASK 0x0004
-#define SOFTAP_FW_MASK 0x0008
-#define P2P_GO_ENABLED 0x0010
-#define P2P_GC_ENABLED 0x0020
-#define CONCURENT_MASK 0x00F0
-#endif /* (NDISVER >= 0x0600) */
-#endif /* #if defined(NDISVER) */
-
-#define DHD_IF_ROLE_STA(role) (role == WLC_E_IF_ROLE_STA ||\
- role == WLC_E_IF_ROLE_P2P_CLIENT)
+/*
+ * Bit fields to Indicate clean up process that wait till they are finished.
+ * Future synchronizable processes can add their bit filed below and update
+ * their functionalities accordingly
+ */
+#define DHD_BUS_BUSY_IN_TX 0x01
+#define DHD_BUS_BUSY_IN_SEND_PKT 0x02
+#define DHD_BUS_BUSY_IN_DPC 0x04
+#define DHD_BUS_BUSY_IN_WD 0x08
+#define DHD_BUS_BUSY_IN_IOVAR 0x10
+#define DHD_BUS_BUSY_IN_DHD_IOVAR 0x20
+#define DHD_BUS_BUSY_IN_SUSPEND 0x40
+#define DHD_BUS_BUSY_IN_RESUME 0x80
+#define DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS 0x100
+#define DHD_BUS_BUSY_RPM_SUSPEND_DONE 0x200
+#define DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS 0x400
+#define DHD_BUS_BUSY_RPM_ALL (DHD_BUS_BUSY_RPM_SUSPEND_DONE | \
+ DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS | \
+ DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS)
+
+/* Download Types */
+typedef enum download_type {
+ FW,
+ NVRAM
+} download_type_t;
+
/* For supporting multiple interfaces */
#define DHD_MAX_IFS 16
DHD_FLAG_P2P_GO_MODE = (1 << (6)),
DHD_FLAG_MBSS_MODE = (1 << (7)), /* MBSS in future */
DHD_FLAG_IBSS_MODE = (1 << (8)),
- DHD_FLAG_MFG_MODE = (1 << (9))
+ DHD_FLAG_MFG_MODE = (1 << (9)),
+ DHD_FLAG_RSDB_MODE = (1 << (10)),
+ DHD_FLAG_MP2P_MODE = (1 << (11))
};
+#define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) \
+ (dhd ? ((((dhd_pub_t *)dhd)->op_mode) & opmode_flag) : -1)
+
/* Max sequential TX/RX Control timeouts to set HANG event */
#ifndef MAX_CNTL_TX_TIMEOUT
#define MAX_CNTL_TX_TIMEOUT 2
#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */
#ifndef POWERUP_MAX_RETRY
-#define POWERUP_MAX_RETRY 0//3 /* how many times we retry to power up the chip */
+#define POWERUP_MAX_RETRY 3 /* how many times we retry to power up the chip */
#endif
#ifndef POWERUP_WAIT_MS
#define POWERUP_WAIT_MS 2000 /* ms: time out in waiting wifi to come up */
#endif
+#define MAX_NVRAMBUF_SIZE (16 * 1024) /* max nvram buf size */
+#ifdef DHD_DEBUG
+#define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */
+#define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */
+#endif
+
+#define FW_VER_STR_LEN 128
enum dhd_bus_wake_state {
WAKE_LOCK_OFF,
DHD_PREALLOC_DHD_INFO = 7,
DHD_PREALLOC_DHD_WLFC_INFO = 8,
DHD_PREALLOC_IF_FLOW_LKUP = 9,
- DHD_PREALLOC_FLOWRING = 10
+ DHD_PREALLOC_MEMDUMP_BUF = 10,
+ DHD_PREALLOC_MEMDUMP_RAM = 11,
+ DHD_PREALLOC_DHD_WLFC_HANGER = 12,
+ DHD_PREALLOC_PKTID_MAP = 13,
+ DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15
+};
+
+enum dhd_dongledump_mode {
+ DUMP_DISABLED = 0,
+ DUMP_MEMONLY,
+ DUMP_MEMFILE,
+ DUMP_MEMFILE_BUGON,
+ DUMP_MEMFILE_MAX
+};
+
+enum dhd_dongledump_type {
+ DUMP_TYPE_RESUMED_ON_TIMEOUT = 1,
+ DUMP_TYPE_D3_ACK_TIMEOUT,
+ DUMP_TYPE_DONGLE_TRAP,
+ DUMP_TYPE_MEMORY_CORRUPTION,
+ DUMP_TYPE_PKTID_AUDIT_FAILURE,
+ DUMP_TYPE_SCAN_TIMEOUT,
+ DUMP_TYPE_SCAN_BUSY,
+ DUMP_TYPE_BY_SYSDUMP,
+ DUMP_TYPE_BY_LIVELOCK,
+ DUMP_TYPE_AP_LINKUP_FAILURE
+};
+
+enum dhd_hang_reason {
+ HANG_REASON_MASK = 0x8000,
+ HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001,
+ HANG_REASON_DONGLE_TRAP = 0x8002,
+ HANG_REASON_D3_ACK_TIMEOUT = 0x8003,
+ HANG_REASON_BUS_DOWN = 0x8004,
+ HANG_REASON_PCIE_LINK_DOWN = 0x8005,
+ HANG_REASON_MSGBUF_LIVELOCK = 0x8006,
+ HANG_REASON_P2P_IFACE_DEL_FAILURE = 0x8007,
+ HANG_REASON_HT_AVAIL_ERROR = 0x8008,
+ HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009,
+ HANG_REASON_MAX = 0x800a
+};
+
+enum dhd_rsdb_scan_features {
+ /* Downgraded scan feature for AP active */
+ RSDB_SCAN_DOWNGRADED_AP_SCAN = 0x01,
+ /* Downgraded scan feature for P2P Discovery */
+ RSDB_SCAN_DOWNGRADED_P2P_DISC_SCAN = 0x02,
+ /* Enable channel pruning for ROAM SCAN */
+ RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM = 0x10,
+ /* Enable channel pruning for any SCAN */
+ RSDB_SCAN_DOWNGRADED_CH_PRUNE_ALL = 0x20
};
/* Packet alignment for most efficient SDIO (can change based on platform) */
#define DHD_SDALIGN 32
#endif
+/**
+ * DMA-able buffer parameters
+ * - dmaaddr_t is 32bits on a 32bit host.
+ * dhd_dma_buf::pa may not be used as a sh_addr_t, bcm_addr64_t or uintptr
+ * - dhd_dma_buf::_alloced is ONLY for freeing a DMA-able buffer.
+ */
+typedef struct dhd_dma_buf {
+ void *va; /* virtual address of buffer */
+ uint32 len; /* user requested buffer length */
+ dmaaddr_t pa; /* physical address of buffer */
+ void *dmah; /* dma mapper handle */
+ void *secdma; /* secure dma sec_cma_info handle */
+ uint32 _alloced; /* actual size of buffer allocated with align and pad */
+} dhd_dma_buf_t;
+
/* host reordering packts logic */
/* followed the structure to hold the reorder buffers (void **p) */
typedef struct reorder_info {
#endif /* DHDTCPACK_SUPPRESS */
+/*
+ * Accumulating the queue lengths of all flowring queues in a parent object,
+ * to assert flow control, when the cummulative queue length crosses an upper
+ * threshold defined on a parent object. Upper threshold may be maintained
+ * at a station level, at an interface level, or at a dhd instance.
+ *
+ * cumm_ctr_t abstraction:
+ * cumm_ctr_t abstraction may be enhanced to use an object with a hysterisis
+ * pause on/off threshold callback.
+ * All macros use the address of the cummulative length in the parent objects.
+ *
+ * BCM_GMAC3 builds use a single perimeter lock, as opposed to a per queue lock.
+ * Cummulative counters in parent objects may be updated without spinlocks.
+ *
+ * In non BCM_GMAC3, if a cummulative queue length is desired across all flows
+ * belonging to either of (a station, or an interface or a dhd instance), then
+ * an atomic operation is required using an atomic_t cummulative counters or
+ * using a spinlock. BCM_ROUTER_DHD uses the Linux atomic_t construct.
+ */
+
+/* Cummulative length not supported. */
+typedef uint32 cumm_ctr_t;
+#define DHD_CUMM_CTR_PTR(clen) ((cumm_ctr_t*)(clen))
+#define DHD_CUMM_CTR(clen) *(DHD_CUMM_CTR_PTR(clen)) /* accessor */
+#define DHD_CUMM_CTR_READ(clen) DHD_CUMM_CTR(clen) /* read access */
+#define DHD_CUMM_CTR_INIT(clen) \
+ ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
+#define DHD_CUMM_CTR_INCR(clen) \
+ ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
+#define DHD_CUMM_CTR_DECR(clen) \
+ ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
+
/* DMA'ing r/w indices for rings supported */
#ifdef BCM_INDX_TCM /* FW gets r/w indices in TCM */
#define DMA_INDX_ENAB(dma_indxsup) 0
} tdls_peer_tbl_t;
#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
+#ifdef DHD_LOG_DUMP
+/* below structure describe ring buffer. */
+struct dhd_log_dump_buf
+{
+ spinlock_t lock;
+ unsigned int wraparound;
+ unsigned long max;
+ unsigned int remain;
+ char* present;
+ char* front;
+ char* buffer;
+};
+
+#define DHD_LOG_DUMP_BUFFER_SIZE (1024 * 1024)
+#define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE 256
+
+extern void dhd_log_dump_print(const char *fmt, ...);
+extern char *dhd_log_dump_get_timestamp(void);
+#endif /* DHD_LOG_DUMP */
+#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/log/"
+
/* Common structure for module and instance linkage */
typedef struct dhd_pub {
/* Linkage ponters */
bool txoff; /* Transmit flow-controlled */
bool dongle_reset; /* TRUE = DEVRESET put dongle into reset */
enum dhd_bus_state busstate;
+ uint dhd_bus_busy_state; /* Bus busy state */
uint hdrlen; /* Total DHD header length (proto + bus) */
uint maxctl; /* Max size rxctl request from proto to bus */
uint rxsz; /* Rx buffer size bus module should use */
ulong rx_dropped; /* Packets dropped locally (no memory) */
ulong rx_flushed; /* Packets flushed due to unscheduled sendup thread */
ulong wd_dpc_sched; /* Number of times dhd dpc scheduled by watchdog timer */
-
+ ulong rx_pktgetfail; /* Number of PKTGET failures in DHD on RX */
+ ulong tx_pktgetfail; /* Number of PKTGET failures in DHD on TX */
ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */
ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */
ulong fc_packets; /* Number of flow control pkts recvd */
int pktfilter_count;
wl_country_t dhd_cspec; /* Current Locale info */
+#ifdef CUSTOM_COUNTRY_CODE
+ u32 dhd_cflags;
+#endif /* CUSTOM_COUNTRY_CODE */
+ bool force_country_change;
char eventmask[WL_EVENTING_MASK_LEN];
int op_mode; /* STA, HostAPD, WFD, SoftAP */
*/
/* #define WL_ENABLE_P2P_IF 1 */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */
#endif
-#ifdef WLBTAMP
- uint16 maxdatablks;
-#endif /* WLBTAMP */
#ifdef PROP_TXSTATUS
bool wlfc_enabled;
int wlfc_mode;
/* platform specific function for wlfc_enable and wlfc_deinit */
void (*plat_init)(void *dhd);
void (*plat_deinit)(void *dhd);
+#ifdef DHD_WLFC_THREAD
+ bool wlfc_thread_go;
+ struct task_struct* wlfc_thread;
+ wait_queue_head_t wlfc_wqhead;
+#endif /* DHD_WLFC_THREAD */
#endif /* PROP_TXSTATUS */
#ifdef PNO_SUPPORT
void *pno_state;
+#endif
+#ifdef RTT_SUPPORT
+ void *rtt_state;
#endif
bool dongle_isolation;
bool dongle_trap_occured; /* flag for sending HANG event to upper layer */
int hang_was_sent;
int rxcnt_timeout; /* counter rxcnt timeout to send HANG */
int txcnt_timeout; /* counter txcnt timeout to send HANG */
+#ifdef BCMPCIE
+ int d3ackcnt_timeout; /* counter d3ack timeout to send HANG */
+#endif /* BCMPCIE */
bool hang_report; /* enable hang report by default */
+ uint16 hang_reason; /* reason codes for HANG event */
#ifdef WLMEDIA_HTSF
uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */
#endif
bool tdls_enable;
#endif
struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS];
- char fw_capabilities[WLC_IOCTL_SMLEN];
+ #define WLC_IOCTL_MAXBUF_FWCAP 512
+ char fw_capabilities[WLC_IOCTL_MAXBUF_FWCAP];
#define MAXSKBPEND 1024
void *skbbuf[MAXSKBPEND];
uint32 store_idx;
#if defined(ARP_OFFLOAD_SUPPORT)
uint32 arp_version;
#endif
-#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
- bool fw_4way_handshake; /* Whether firmware will to do the 4way handshake. */
-#endif
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-#ifdef PKT_FILTER_SUPPORT
- uint pkt_filter_mode;
- uint pkt_filter_ports_count;
- uint16 pkt_filter_ports[WL_PKT_FILTER_PORTS_MAX];
-#endif /* PKT_FILTER_SUPPORT */
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ bool dhd_bug_on;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
#ifdef CUSTOM_SET_CPUCORE
struct task_struct * current_dpc;
struct task_struct * current_rxf;
int chan_isvht80;
#endif /* CUSTOM_SET_CPUCORE */
+
void *sta_pool; /* pre-allocated pool of sta objects */
void *staid_allocator; /* allocator of sta indexes */
-
+#ifdef PCIE_FULL_DONGLE
+ bool flow_rings_inited; /* set this flag after initializing flow rings */
+#endif /* PCIE_FULL_DONGLE */
void *flowid_allocator; /* unique flowid allocator */
void *flow_ring_table; /* flow ring table, include prot and bus info */
void *if_flow_lkup; /* per interface flowid lkup hash table */
void *flowid_lock; /* per os lock for flowid info protection */
+ void *flowring_list_lock; /* per os lock for flowring list protection */
uint32 num_flow_rings;
-
- uint32 d2h_sync_mode; /* D2H DMA completion sync mode */
-
+ cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */
+ uint32 d2h_sync_mode; /* D2H DMA completion sync mode */
uint8 flow_prio_map[NUMPRIO];
uint8 flow_prio_map_type;
char enable_log[MAX_EVENT];
bool dma_d2h_ring_upd_support;
bool dma_h2d_ring_upd_support;
+
#ifdef DHD_WMF
bool wmf_ucast_igmp;
#ifdef DHD_IGMP_UCQUERY
bool wmf_ucast_upnp;
#endif
#endif /* DHD_WMF */
-#ifdef DHD_UNICAST_DHCP
- bool dhcp_unicast;
-#endif /* DHD_UNICAST_DHCP */
#ifdef DHD_L2_FILTER
- bool block_ping;
-#endif
-#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+ unsigned long l2_filter_cnt; /* for L2_FILTER ARP table timeout */
+#endif /* DHD_L2_FILTER */
+ uint8 *soc_ram;
+ uint32 soc_ram_length;
+ uint32 memdump_type;
+#ifdef DHD_FW_COREDUMP
+ uint32 memdump_enabled;
+#endif /* DHD_FW_COREDUMP */
+#ifdef PCIE_FULL_DONGLE
+#ifdef WLTDLS
tdls_peer_tbl_t peer_tbl;
-#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
+#endif /* WLTDLS */
+#endif /* PCIE_FULL_DONGLE */
+#ifdef CACHE_FW_IMAGES
+ char *cached_fw;
+ int cached_fw_length;
+ char *cached_nvram;
+ int cached_nvram_length;
+#endif
+#ifdef WLTDLS
+ uint32 tdls_mode;
+#endif
+#ifdef DHD_LOSSLESS_ROAMING
+ uint8 dequeue_prec_map;
+#endif
+ struct mutex wl_up_lock;
+ bool is_fw_download_done;
+#ifdef DHD_LOG_DUMP
+ struct dhd_log_dump_buf dld_buf;
+ unsigned int dld_enable;
+#endif /* DHD_LOG_DUMP */
char *conf_path; /* module_param: path to config vars file */
struct dhd_conf *conf; /* Bus module handle */
} dhd_pub_t;
+#if defined(PCIE_FULL_DONGLE)
+
+/* Packet Tag for PCIE Full Dongle DHD */
+typedef struct dhd_pkttag_fd {
+ uint16 flowid; /* Flowring Id */
+ uint16 dataoff; /* start of packet */
+ uint16 dma_len; /* pkt len for DMA_MAP/UNMAP */
+ dmaaddr_t pa; /* physical address */
+ void *dmah; /* dma mapper handle */
+ void *secdma; /* secure dma sec_cma_info handle */
+} dhd_pkttag_fd_t;
+
+/* Packet Tag for DHD PCIE Full Dongle */
+#define DHD_PKTTAG_FD(pkt) ((dhd_pkttag_fd_t *)(PKTTAG(pkt)))
+
+#define DHD_PKT_GET_FLOWID(pkt) ((DHD_PKTTAG_FD(pkt))->flowid)
+#define DHD_PKT_SET_FLOWID(pkt, pkt_flowid) \
+ DHD_PKTTAG_FD(pkt)->flowid = (uint16)(pkt_flowid)
+
+#define DHD_PKT_GET_DATAOFF(pkt) ((DHD_PKTTAG_FD(pkt))->dataoff)
+#define DHD_PKT_SET_DATAOFF(pkt, pkt_dataoff) \
+ DHD_PKTTAG_FD(pkt)->dataoff = (uint16)(pkt_dataoff)
+
+#define DHD_PKT_GET_DMA_LEN(pkt) ((DHD_PKTTAG_FD(pkt))->dma_len)
+#define DHD_PKT_SET_DMA_LEN(pkt, pkt_dma_len) \
+ DHD_PKTTAG_FD(pkt)->dma_len = (uint16)(pkt_dma_len)
+
+#define DHD_PKT_GET_PA(pkt) ((DHD_PKTTAG_FD(pkt))->pa)
+#define DHD_PKT_SET_PA(pkt, pkt_pa) \
+ DHD_PKTTAG_FD(pkt)->pa = (dmaaddr_t)(pkt_pa)
+
+#define DHD_PKT_GET_DMAH(pkt) ((DHD_PKTTAG_FD(pkt))->dmah)
+#define DHD_PKT_SET_DMAH(pkt, pkt_dmah) \
+ DHD_PKTTAG_FD(pkt)->dmah = (void *)(pkt_dmah)
+
+#define DHD_PKT_GET_SECDMA(pkt) ((DHD_PKTTAG_FD(pkt))->secdma)
+#define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \
+ DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma)
+#endif /* PCIE_FULL_DONGLE */
+
#if defined(BCMWDF)
typedef struct {
dhd_pub_t *dhd_pub;
} while (0)
#define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200)
#define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0)
- #ifdef CUSTOMER_HW4
- #define DHD_PM_RESUME_RETURN_ERROR(a) do { \
- if (dhd_mmc_suspend) { \
- printf("%s[%d]: mmc is still in suspend state!!!\n", \
- __FUNCTION__, __LINE__); \
- return a; \
- } \
- } while (0)
- #else
- #define DHD_PM_RESUME_RETURN_ERROR(a) do { \
- if (dhd_mmc_suspend) return a; } while (0)
- #endif
+ #define DHD_PM_RESUME_RETURN_ERROR(a) do { \
+ if (dhd_mmc_suspend) { \
+ printf("%s[%d]: mmc is still in suspend state!!!\n", \
+ __FUNCTION__, __LINE__); \
+ return a; \
+ } \
+ } while (0)
#define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0)
#define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
*/
extern int dhd_os_wake_lock(dhd_pub_t *pub);
extern int dhd_os_wake_unlock(dhd_pub_t *pub);
+extern int dhd_event_wake_lock(dhd_pub_t *pub);
+extern int dhd_event_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_waive(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_restore(dhd_pub_t *pub);
extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val);
extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val);
extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub);
extern int dhd_os_wd_wake_lock(dhd_pub_t *pub);
extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub);
+extern void dhd_os_wake_lock_init(struct dhd_info *dhd);
+extern void dhd_os_wake_lock_destroy(struct dhd_info *dhd);
#ifdef BCMPCIE_OOB_HOST_WAKE
-extern int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val);
-extern int dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub);
+extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub);
#endif /* BCMPCIE_OOB_HOST_WAKE */
-extern int dhd_os_wake_lock_waive(dhd_pub_t *pub);
-extern int dhd_os_wake_lock_restore(dhd_pub_t *pub);
+#ifdef DHD_USE_SCAN_WAKELOCK
+extern void dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val);
+extern void dhd_os_scan_wake_unlock(dhd_pub_t *pub);
+#endif /* BCMPCIE_SCAN_WAKELOCK */
inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_init(&dhdp->wl_softap_lock);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
}
inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_lock(&dhdp->wl_softap_lock);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
}
inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&dhdp->wl_softap_lock);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
}
#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub)
#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub)
+#define DHD_EVENT_WAKE_LOCK(pub) dhd_event_wake_lock(pub)
+#define DHD_EVENT_WAKE_UNLOCK(pub) dhd_event_wake_unlock(pub)
#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub)
#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
dhd_os_wake_lock_rx_timeout_enable(pub, val)
dhd_os_wake_lock_ctrl_timeout_enable(pub, val)
#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
dhd_os_wake_lock_ctrl_timeout_cancel(pub)
-#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub)
-#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub)
+#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub)
+#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub)
+#define DHD_OS_WAKE_LOCK_INIT(dhd) dhd_os_wake_lock_init(dhd);
+#define DHD_OS_WAKE_LOCK_DESTROY(dhd) dhd_os_wake_lock_destroy(dhd);
#define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub)
#define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub)
+
#ifdef BCMPCIE_OOB_HOST_WAKE
#define OOB_WAKE_LOCK_TIMEOUT 500
-#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val)
-#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub)
+#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val)
+#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub)
#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+#ifdef DHD_DEBUG_SCAN_WAKELOCK
+#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) \
+ do { \
+ printf("call wake_lock_scan: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_scan_wake_lock_timeout(pub, val); \
+ } while (0)
+#define DHD_OS_SCAN_WAKE_UNLOCK(pub) \
+ do { \
+ printf("call wake_unlock_scan: %s %d\n", \
+ __FUNCTION__, __LINE__); \
+ dhd_os_scan_wake_unlock(pub); \
+ } while (0)
+#else
+#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_scan_wake_lock_timeout(pub, val)
+#define DHD_OS_SCAN_WAKE_UNLOCK(pub) dhd_os_scan_wake_unlock(pub)
+#endif /* DHD_DEBUG_SCAN_WAKELOCK */
+#else
+#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val)
+#define DHD_OS_SCAN_WAKE_UNLOCK(pub)
+#endif /* DHD_USE_SCAN_WAKELOCK */
#define DHD_PACKET_TIMEOUT_MS 500
#define DHD_EVENT_TIMEOUT_MS 1500
+#define SCAN_WAKE_LOCK_TIMEOUT 10000
+/* Enum for IOCTL recieved status */
+typedef enum dhd_ioctl_recieved_status
+{
+ IOCTL_WAIT = 0,
+ IOCTL_RETURN_ON_SUCCESS,
+ IOCTL_RETURN_ON_TRAP,
+ IOCTL_RETURN_ON_BUS_STOP
+} dhd_ioctl_recieved_status_t;
/* interface operations (register, remove) should be atomic, use this lock to prevent race
* condition among wifi on/off and interface operation functions
/* Notify tx completion */
extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
-
+extern void dhd_dpc_enable(dhd_pub_t *dhdp);
+
+#define WIFI_FEATURE_INFRA 0x0001 /* Basic infrastructure mode */
+#define WIFI_FEATURE_INFRA_5G 0x0002 /* Support for 5 GHz Band */
+#define WIFI_FEATURE_HOTSPOT 0x0004 /* Support for GAS/ANQP */
+#define WIFI_FEATURE_P2P 0x0008 /* Wifi-Direct */
+#define WIFI_FEATURE_SOFT_AP 0x0010 /* Soft AP */
+#define WIFI_FEATURE_GSCAN 0x0020 /* Google-Scan APIs */
+#define WIFI_FEATURE_NAN 0x0040 /* Neighbor Awareness Networking */
+#define WIFI_FEATURE_D2D_RTT 0x0080 /* Device-to-device RTT */
+#define WIFI_FEATURE_D2AP_RTT 0x0100 /* Device-to-AP RTT */
+#define WIFI_FEATURE_BATCH_SCAN 0x0200 /* Batched Scan (legacy) */
+#define WIFI_FEATURE_PNO 0x0400 /* Preferred network offload */
+#define WIFI_FEATURE_ADDITIONAL_STA 0x0800 /* Support for two STAs */
+#define WIFI_FEATURE_TDLS 0x1000 /* Tunnel directed link setup */
+#define WIFI_FEATURE_TDLS_OFFCHANNEL 0x2000 /* Support for TDLS off channel */
+#define WIFI_FEATURE_EPR 0x4000 /* Enhanced power reporting */
+#define WIFI_FEATURE_AP_STA 0x8000 /* Support for AP STA Concurrency */
+#define WIFI_FEATURE_LINKSTAT 0x10000 /* Support for Linkstats */
+
+#define MAX_FEATURE_SET_CONCURRRENT_GROUPS 3
+
+extern int dhd_dev_get_feature_set(struct net_device *dev);
+extern int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num);
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+extern int dhd_dev_set_nodfs(struct net_device *dev, uint nodfs);
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
/* OS independent layer functions */
+extern void dhd_os_dhdiovar_lock(dhd_pub_t *pub);
+extern void dhd_os_dhdiovar_unlock(dhd_pub_t *pub);
extern int dhd_os_proto_block(dhd_pub_t * pub);
extern int dhd_os_proto_unblock(dhd_pub_t * pub);
-extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool * pending);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition);
extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+extern void dhd_os_ioctl_resp_lock(dhd_pub_t * pub);
+extern void dhd_os_ioctl_resp_unlock(dhd_pub_t * pub);
+extern int dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason);
+
+#define DHD_OS_IOCTL_RESP_LOCK(x)
+#define DHD_OS_IOCTL_RESP_UNLOCK(x)
+
extern int dhd_os_get_image_block(char * buf, int len, void * image);
extern void * dhd_os_open_image(char * filename);
extern void dhd_os_close_image(void * image);
extern void dhd_os_wd_timer(void *bus, uint wdtick);
+#ifdef DHD_PCIE_RUNTIMEPM
+extern void dhd_os_runtimepm_timer(void *bus, uint tick);
+#endif /* DHD_PCIE_RUNTIMEPM */
extern void dhd_os_sdlock(dhd_pub_t * pub);
extern void dhd_os_sdunlock(dhd_pub_t * pub);
extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
#ifdef DHDTCPACK_SUPPRESS
-extern void dhd_os_tcpacklock(dhd_pub_t *pub);
-extern void dhd_os_tcpackunlock(dhd_pub_t *pub);
+extern unsigned long dhd_os_tcpacklock(dhd_pub_t *pub);
+extern void dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags);
#endif /* DHDTCPACK_SUPPRESS */
extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr);
extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff);
extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf);
+#ifdef CUSTOM_COUNTRY_CODE
+extern void get_customized_country_code(void *adapter, char *country_iso_code,
+wl_country_t *cspec, u32 flags);
+#else
extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable);
#endif
+#if defined(DHD_FW_COREDUMP)
+void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size);
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef SUPPORT_AP_POWERSAVE
+extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable);
+#endif /* SUPPORT_AP_POWERSAVE */
+
#ifdef PKT_FILTER_SUPPORT
#define DHD_UNICAST_FILTER_NUM 0
#define DHD_MULTICAST6_FILTER_NUM 3
#define DHD_MDNS_FILTER_NUM 4
#define DHD_ARP_FILTER_NUM 5
-
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-/* Port based packet filtering command actions */
-#define PKT_FILTER_PORTS_CLEAR 0
-#define PKT_FILTER_PORTS_ADD 1
-#define PKT_FILTER_PORTS_DEL 2
-#define PKT_FILTER_PORTS_LOOPBACK 3
-#define PKT_FILTER_PORTS_MAX PKT_FILTER_PORTS_LOOPBACK
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
-
-extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val);
+extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val);
extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd);
extern int net_os_enable_packet_filter(struct net_device *dev, int val);
extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-extern void dhd_set_packet_filter_mode(struct net_device *dev, char *command);
-extern int dhd_set_packet_filter_ports(struct net_device *dev, char *command);
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
#endif /* PKT_FILTER_SUPPORT */
extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
extern bool dhd_support_sta_mode(dhd_pub_t *dhd);
-#ifdef DHD_DEBUG
extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
-#endif /* DHD_DEBUG */
typedef struct {
uint32 limit; /* Expiration time (usec) */
int num_fmts;
char **fmts;
char *raw_fmts;
+ char *raw_sstr;
+ uint32 ramstart;
+ uint32 rodata_start;
+ uint32 rodata_end;
+ char *rom_raw_sstr;
+ uint32 rom_ramstart;
+ uint32 rom_rodata_start;
+ uint32 rom_rodata_end;
} dhd_event_log_t;
#endif /* SHOW_LOGTRACE */
extern int dhd_timeout_expired(dhd_timeout_t *tmo);
extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
-extern int dhd_ifidx2hostidx(struct dhd_info *dhd, int ifidx);
extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net);
extern struct net_device * dhd_idx2net(void *pub, int ifidx);
extern int net_os_send_hang_message(struct net_device *dev);
+extern int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num);
+extern bool dhd_wowl_cap(void *bus);
+
extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata,
wl_event_msg_t *, void **data_ptr, void *);
extern void wl_event_to_host_order(wl_event_msg_t * evt);
+extern int wl_host_event_get_data(void *pktdata, wl_event_msg_t *event, void **data_ptr);
extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
int ifindex);
+extern int dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
+ int cmd, uint8 set, int ifidx);
+extern int dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
+ int cmd, uint8 set, int ifidx);
extern void dhd_common_init(osl_t *osh);
extern int dhd_do_driver_init(struct net_device *net);
extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
char *name, uint8 *mac);
extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
- uint8 *mac, uint8 bssidx, bool need_rtnl_lock);
+ uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name);
extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock);
extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage);
extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
-extern bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval);
+extern bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval);
#if defined(BCMSDIO) || defined(BCMPCIE)
extern uint dhd_bus_chip_id(dhd_pub_t *dhdp);
extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp);
extern void dhd_del_sta(void *pub, int ifidx, void *ea);
extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx);
extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val);
+#if defined(BCM_GMAC3)
+extern int dhd_set_dev_def(dhd_pub_t *dhdp, uint32 idx, int val);
+#endif
extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx);
+extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_d3ack_wake(dhd_pub_t * pub);
+extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_busbusy_wake(dhd_pub_t * pub);
extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd);
extern int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set);
/* Watchdog timer interval */
extern uint dhd_watchdog_ms;
+extern bool dhd_os_wd_timer_enabled(void *bus);
+
+#ifdef PKT_STATICS
+typedef struct pkt_statics {
+ uint16 event_count;
+ uint32 event_size;
+ uint16 ctrl_count;
+ uint32 ctrl_size;
+ uint32 data_count;
+ uint32 data_size;
+ uint16 glom_1_count;
+ uint16 glom_3_count;
+ uint16 glom_3_8_count;
+ uint16 glom_8_count;
+ uint16 glom_count;
+ uint32 glom_size;
+ uint16 test_count;
+ uint32 test_size;
+} pkt_statics_t;
+#endif
+
+#ifdef DHD_PCIE_RUNTIMEPM
+extern uint dhd_runtimepm_ms;
+#endif /* DHD_PCIE_RUNTIMEPM */
#if defined(DHD_DEBUG)
/* Console output poll interval */
extern uint wl_dbg_level;
#endif
-#ifdef CUSTOMER_HW
-struct wifi_platform_data {
- int (*set_power)(bool val);
- int (*set_carddetect)(bool val);
- void *(*mem_prealloc)(int section, unsigned long size);
- int (*get_mac_addr)(unsigned char *buf);
- void *(*get_country_code)(char *ccode);
-};
-#endif
-
extern uint dhd_slpauto;
/* Use interrupts */
/* SDIO Drive Strength */
extern uint dhd_sdiod_drive_strength;
+/* triggers bcm_bprintf to print to kernel log */
+extern bool bcm_bprintf_bypass;
+
/* Override to force tx queueing all the time */
extern uint dhd_force_tx_queueing;
/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
#endif
#define DEFAULT_WIFI_TURNOFF_DELAY 0
-#ifndef WIFI_TURNOFF_DELAY
#define WIFI_TURNOFF_DELAY DEFAULT_WIFI_TURNOFF_DELAY
-#endif /* WIFI_TURNOFF_DELAY */
#define DEFAULT_WIFI_TURNON_DELAY 200
#ifndef WIFI_TURNON_DELAY
#define WIFI_TURNON_DELAY DEFAULT_WIFI_TURNON_DELAY
#endif /* WIFI_TURNON_DELAY */
-#ifdef BCMSDIO
#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 10 /* msec */
-#else
-#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 0 /* msec */
-#endif
#ifndef CUSTOM_DHD_WATCHDOG_MS
#define CUSTOM_DHD_WATCHDOG_MS DEFAULT_DHD_WATCHDOG_INTERVAL_MS
#endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */
+#define DEFAULT_ASSOC_RETRY_MAX 3
+#ifndef CUSTOM_ASSOC_RETRY_MAX
+#define CUSTOM_ASSOC_RETRY_MAX DEFAULT_ASSOC_RETRY_MAX
+#endif /* DEFAULT_ASSOC_RETRY_MAX */
+
+
#ifdef WLTDLS
#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING
#define CUSTOM_TDLS_IDLE_MODE_SETTING 60000 /* 60sec to tear down TDLS of not active */
/* Flag to indicate if we should download firmware on driver load */
extern uint dhd_download_fw_on_driverload;
+extern int allow_delay_fwdl;
extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
NdisStallExecution(1);
#define IFUNLOCK(lock) InterlockedExchange((lock), 0)
#define IFLOCK_FREE(lock)
-#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, #capa) != NULL))
+#define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, " " #capa " ") != NULL))
#ifdef ARP_OFFLOAD_SUPPORT
#define MAX_IPV4_ENTRIES 8
void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode);
#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef WLTDLS
int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac);
+int dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode);
#ifdef PCIE_FULL_DONGLE
void dhd_tdls_update_peer_info(struct net_device *dev, bool connect_disconnect, uint8 *addr);
#endif /* PCIE_FULL_DONGLE */
#endif /* WLTDLS */
/* Neighbor Discovery Offload Support */
-int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable);
+extern int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable);
int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx);
int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx);
/* ioctl processing for nl80211 */
#define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size)
#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */
+#ifdef USE_WFA_CERT_CONF
+enum {
+ SET_PARAM_BUS_TXGLOM_MODE,
+ SET_PARAM_ROAMOFF,
+#ifdef USE_WL_FRAMEBURST
+ SET_PARAM_FRAMEBURST,
+#endif /* USE_WL_FRAMEBURST */
+#ifdef USE_WL_TXBF
+ SET_PARAM_TXBF,
+#endif /* USE_WL_TXBF */
+#ifdef PROP_TXSTATUS
+ SET_PARAM_PROPTX,
+ SET_PARAM_PROPTXMODE,
+#endif /* PROP_TXSTATUS */
+ PARAM_LAST_VALUE
+};
+extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val);
+#endif /* USE_WFA_CERT_CONF */
#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0)
#define dhd_del_flowid(pub, ifidx, flowid) do {} while (0)
/* Disable router 3GMAC bypass path perimeter lock */
#define DHD_PERIM_LOCK(dhdp) do {} while (0)
#define DHD_PERIM_UNLOCK(dhdp) do {} while (0)
+#define DHD_PERIM_LOCK_ALL(processor_id) do {} while (0)
+#define DHD_PERIM_UNLOCK_ALL(processor_id) do {} while (0)
/* Enable DHD general spin lock/unlock */
#define DHD_GENERAL_LOCK(dhdp, flags) \
#define DHD_FLOWID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
#define DHD_FLOWID_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
+/* Enable DHD common flowring list spin lock/unlock */
+#define DHD_FLOWRING_LIST_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
+#define DHD_FLOWRING_LIST_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
+extern void dhd_dump_to_kernelog(dhd_pub_t *dhdp);
+
+
+#ifdef DHD_L2_FILTER
+extern int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val);
+#endif /* DHD_L2_FILTER */
typedef struct wl_io_pport {
dhd_pub_t *dhd_pub;
uint ifidx;
} wl_io_pport_t;
-extern void *dhd_pub_wlinfo(dhd_pub_t *dhd_pub);
-#ifdef CONFIG_MACH_UNIVERSAL5433
-extern int check_rev(void);
+typedef struct wl_evt_pport {
+ dhd_pub_t *dhd_pub;
+ int *ifidx;
+ void *pktdata;
+ void **data_ptr;
+ void *raw_event;
+} wl_evt_pport_t;
+
+extern void *dhd_pub_shim(dhd_pub_t *dhd_pub);
+#ifdef DHD_FW_COREDUMP
+void dhd_save_fwdump(dhd_pub_t *dhd_pub, void * buffer, uint32 length);
+#endif /* DHD_FW_COREDUMP */
+
+#if defined(SET_RPS_CPUS)
+int dhd_rps_cpus_enable(struct net_device *net, int enable);
+int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len);
+void custom_rps_map_clear(struct netdev_rx_queue *queue);
+#define PRIMARY_INF 0
+#define VIRTUAL_INF 1
+#if defined(CONFIG_MACH_UNIVERSAL5433) || defined(CONFIG_MACH_UNIVERSAL7420) || \
+ defined(CONFIG_SOC_EXYNOS8890)
+#define RPS_CPUS_MASK "10"
+#define RPS_CPUS_MASK_P2P "10"
+#define RPS_CPUS_MASK_IBSS "10"
+#define RPS_CPUS_WLAN_CORE_ID 4
+#else
+#define RPS_CPUS_MASK "6"
+#define RPS_CPUS_MASK_P2P "6"
+#define RPS_CPUS_MASK_IBSS "6"
+#endif /* CONFIG_MACH_UNIVERSAL5433 || CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */
+#endif
+
+int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
+ char ** buffer, int *length);
+
+void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length);
+
+#define dhd_is_device_removed(x) FALSE
+#define dhd_os_ind_firmware_stall(x)
+
+#ifdef DHD_FW_COREDUMP
+extern void dhd_get_memdump_info(dhd_pub_t *dhd);
+#endif /* DHD_FW_COREDUMP */
+#ifdef BCMASSERT_LOG
+extern void dhd_get_assert_info(dhd_pub_t *dhd);
+#endif /* BCMASSERT_LOG */
+
+
+#if defined(DHD_LB_STATS)
+#include <bcmutils.h>
+extern void dhd_lb_stats_init(dhd_pub_t *dhd);
+extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count);
+extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count);
+extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count);
+extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp);
+extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp);
+#define DHD_LB_STATS_INIT(dhdp) dhd_lb_stats_init(dhdp)
+/* Reset is called from common layer so it takes dhd_pub_t as argument */
+#define DHD_LB_STATS_RESET(dhdp) dhd_lb_stats_init(dhdp)
+#define DHD_LB_STATS_CLR(x) (x) = 0U
+#define DHD_LB_STATS_INCR(x) (x) = (x) + 1
+#define DHD_LB_STATS_ADD(x, c) (x) = (x) + (c)
+#define DHD_LB_STATS_PERCPU_ARR_INCR(x) \
+ { \
+ int cpu = get_cpu(); put_cpu(); \
+ DHD_LB_STATS_INCR(x[cpu]); \
+ }
+#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhdp, x) dhd_lb_stats_update_napi_histo(dhdp, x)
+#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhdp, x) dhd_lb_stats_update_txc_histo(dhdp, x)
+#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhdp, x) dhd_lb_stats_update_rxc_histo(dhdp, x)
+#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_txc_percpu_cnt_incr(dhdp)
+#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) dhd_lb_stats_rxc_percpu_cnt_incr(dhdp)
+#else /* !DHD_LB_STATS */
+#define DHD_LB_STATS_NOOP do { /* noop */ } while (0)
+#define DHD_LB_STATS_INIT(dhdp) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_RESET(dhdp) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_CLR(x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_INCR(x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_ADD(x, c) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_PERCPU_ARR_INCR(x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhd, x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, x) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP
+#define DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhdp) DHD_LB_STATS_NOOP
+#endif /* !DHD_LB_STATS */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+extern bool dhd_runtimepm_state(dhd_pub_t *dhd);
+extern bool dhd_runtime_bus_wake(struct dhd_bus *bus, bool wait, void *func_addr);
+extern bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void *func_addr);
+extern void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp);
+extern bool dhdpcie_is_resume_done(dhd_pub_t *dhdp);
+extern void dhd_runtime_pm_disable(dhd_pub_t *dhdp);
+extern void dhd_runtime_pm_enable(dhd_pub_t *dhdp);
+/* Disable the Runtime PM and wake up if the bus is already in suspend */
+#define DHD_DISABLE_RUNTIME_PM(dhdp) \
+do { \
+ dhd_runtime_pm_disable(dhdp); \
+} while (0);
+
+/* Enable the Runtime PM */
+#define DHD_ENABLE_RUNTIME_PM(dhdp) \
+do { \
+ dhd_runtime_pm_enable(dhdp); \
+} while (0);
+#else
+#define DHD_DISABLE_RUNTIME_PM(dhdp)
+#define DHD_ENABLE_RUNTIME_PM(dhdp)
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+extern void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs);
+
+/*
+ * Enable this macro if you want to track the calls to wake lock
+ * This records can be printed using the following command
+ * cat /sys/bcm-dhd/wklock_trace
+ * DHD_TRACE_WAKE_LOCK supports over linux 2.6.0 version
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#undef DHD_TRACE_WAKE_LOCK
+#endif /* KERNEL_VER < KERNEL_VERSION(2, 6, 0) */
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp);
#endif
+
+extern int dhd_prot_debug_info_print(dhd_pub_t *dhd);
+
+#ifdef ENABLE_TEMP_THROTTLING
+#define TEMP_THROTTLE_CONTROL_BIT 0xf //Enable all feature.
+#endif /* ENABLE_TEMP_THROTTLING */
+
+#ifdef DHD_PKTID_AUDIT_ENABLED
+void dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp);
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
#endif /* _dhd_h_ */
/*
* BT-AMP support routines
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_bta.c 434434 2013-11-06 07:16:02Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_bta.c 514727 2014-11-12 03:02:48Z $
*/
-#ifndef WLBTAMP
#error "WLBTAMP is not defined"
-#endif /* WLBTAMP */
#include <typedefs.h>
#include <osl.h>
/*
* BT-AMP support routines
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_bta.h 291086 2011-10-21 01:17:24Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_bta.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef __dhd_bta_h__
#define __dhd_bta_h__
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_bus.h 497466 2014-08-19 15:41:01Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_bus.h 602721 2015-11-27 10:32:48Z $
*/
#ifndef _dhd_bus_h_
extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
#ifdef BCMSDIO
extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val);
+/* return sdio io status */
+extern uint8 dhd_bus_is_ioready(struct dhd_bus *bus);
#else
#define dhd_bus_set_dotxinrx(a, b) do {} while (0)
#endif
#ifdef BCMPCIE
enum {
- DNGL_TO_HOST_BUF_IOCT,
- DNGL_TO_HOST_DMA_SCRATCH_BUFFER,
- DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN,
- HOST_TO_DNGL_DMA_WRITEINDX_BUFFER,
- HOST_TO_DNGL_DMA_READINDX_BUFFER,
- DNGL_TO_HOST_DMA_WRITEINDX_BUFFER,
- DNGL_TO_HOST_DMA_READINDX_BUFFER,
+ /* Scratch buffer confiuguration update */
+ D2H_DMA_SCRATCH_BUF,
+ D2H_DMA_SCRATCH_BUF_LEN,
+
+ /* DMA Indices array buffers for: H2D WR and RD, and D2H WR and RD */
+ H2D_DMA_INDX_WR_BUF, /* update H2D WR dma indices buf base addr to dongle */
+ H2D_DMA_INDX_RD_BUF, /* update H2D RD dma indices buf base addr to dongle */
+ D2H_DMA_INDX_WR_BUF, /* update D2H WR dma indices buf base addr to dongle */
+ D2H_DMA_INDX_RD_BUF, /* update D2H RD dma indices buf base addr to dongle */
+
+ /* DHD sets/gets WR or RD index, in host's H2D and D2H DMA indices buffer */
+ H2D_DMA_INDX_WR_UPD, /* update H2D WR index in H2D WR dma indices buf */
+ H2D_DMA_INDX_RD_UPD, /* update H2D RD index in H2D RD dma indices buf */
+ D2H_DMA_INDX_WR_UPD, /* update D2H WR index in D2H WR dma indices buf */
+ D2H_DMA_INDX_RD_UPD, /* update D2H RD index in D2H RD dma indices buf */
+
+ /* H2D and D2H Mailbox data update */
+ H2D_MB_DATA,
+ D2H_MB_DATA,
+
+ /* (Common) MsgBuf Ring configuration update */
+ RING_BUF_ADDR, /* update ring base address to dongle */
+ RING_ITEM_LEN, /* update ring item size to dongle */
+ RING_MAX_ITEMS, /* update ring max items to dongle */
+
+ /* Update of WR or RD index, for a MsgBuf Ring */
+ RING_RD_UPD, /* update ring read index from/to dongle */
+ RING_WR_UPD, /* update ring write index from/to dongle */
+
TOTAL_LFRAG_PACKET_CNT,
- HTOD_MB_DATA,
- DTOH_MB_DATA,
- RING_BUF_ADDR,
- H2D_DMA_WRITEINDX,
- H2D_DMA_READINDX,
- D2H_DMA_WRITEINDX,
- D2H_DMA_READINDX,
- RING_READ_PTR,
- RING_WRITE_PTR,
- RING_LEN_ITEMS,
- RING_MAX_ITEM,
MAX_HOST_RXBUFS
};
+
typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32);
extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type,
uint16 ringid);
extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count);
extern void dhd_bus_start_queue(struct dhd_bus *bus);
extern void dhd_bus_stop_queue(struct dhd_bus *bus);
-extern void dhd_bus_update_retlen(struct dhd_bus *bus, uint32 retlen, uint32 cmd_id, uint16 status,
- uint32 resp_len);
+
extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus);
extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus,
void * data, uint16 flowid);
extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_node);
extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
-extern uint8 dhd_bus_is_txmode_push(struct dhd_bus *bus);
-extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush);
+extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus);
extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs);
+extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val);
+
+
extern int dhdpcie_bus_clock_start(struct dhd_bus *bus);
extern int dhdpcie_bus_clock_stop(struct dhd_bus *bus);
extern int dhdpcie_bus_enable_device(struct dhd_bus *bus);
extern int dhd_bus_request_irq(struct dhd_bus *bus);
+#ifdef DHD_FW_COREDUMP
+extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
+#endif /* DHD_FW_COREDUMP */
+
#endif /* BCMPCIE */
#endif /* _dhd_bus_h_ */
--- /dev/null
+#ifndef _DHD_BUZZZ_H_INCLUDED_
+#define _DHD_BUZZZ_H_INCLUDED_
+
+/*
+ * Broadcom logging system - Empty implementaiton
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_buzzz.h 591283 2015-10-07 11:52:00Z $
+ */
+
+#define dhd_buzzz_attach() do { /* noop */ } while (0)
+#define dhd_buzzz_detach() do { /* noop */ } while (0)
+#define dhd_buzzz_panic(x) do { /* noop */ } while (0)
+#define BUZZZ_LOG(ID, N, ARG...) do { /* noop */ } while (0)
+
+#endif /* _DHD_BUZZZ_H_INCLUDED_ */
/*
* DHD Protocol Module for CDC and BDC.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_cdc.c 492377 2014-07-21 19:54:06Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_cdc.c 596022 2015-10-29 11:02:47Z $
*
* BDC is like CDC, except it includes a header for data packets to convey
* packet priority over the bus, and flags (e.g. to indicate checksum status
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
do {
ret = dhd_bus_rxctl(dhd->bus, (uchar*)&prot->msg, cdc_len);
if (ret < 0)
break;
} while (CDC_IOC_ID(ltoh32(prot->msg.flags)) != id);
-
return ret;
}
return -EIO;
}
+ if (cmd == WLC_SET_PM) {
+ DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf));
+ }
+
memset(msg, 0, sizeof(cdc_ioctl_t));
msg->cmd = htol32(cmd);
void
dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
{
+ if (!dhdp || !dhdp->prot)
+ return;
bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
#ifdef PROP_TXSTATUS
dhd_wlfc_dump(dhdp, strbuf);
goto exit;
}
- if ((*ifidx = BDC_GET_IF_IDX(h)) >= DHD_MAX_IFS) {
- DHD_ERROR(("%s: rx data ifnum out of range (%d)\n",
- __FUNCTION__, *ifidx));
- return BCME_ERROR;
- }
+ *ifidx = BDC_GET_IF_IDX(h);
if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != BDC_PROTO_VER) {
DHD_ERROR(("%s: non-BDC packet received, flags = 0x%x\n",
PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
#endif /* BDC */
-#if defined(NDISVER)
-#if (NDISVER < 0x0630)
- if (PKTLEN(dhd->osh, pktbuf) < (uint32) (data_offset << 2)) {
- DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
- PKTLEN(dhd->osh, pktbuf), (data_offset * 4)));
- return BCME_ERROR;
- }
-#endif /* #if defined(NDISVER) */
-#endif /* (NDISVER < 0x0630) */
#ifdef PROP_TXSTATUS
if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) {
wlc_rev_info_t revinfo;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+#ifdef BCMASSERT_LOG
+ dhd_get_assert_info(dhd);
+#endif /* BCMASSERT_LOG */
/* Get the device rev info */
memset(&revinfo, 0, sizeof(revinfo));
int dhd_prot_init(dhd_pub_t *dhd)
{
- return TRUE;
+ return BCME_OK;
}
void
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_cfg80211.c 591285 2015-10-07 11:56:29Z $
*/
#include <linux/vmalloc.h>
}
struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name,
- uint8 *mac, uint8 bssidx)
+ uint8 *mac, uint8 bssidx, char *dngl_name)
{
- return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE);
+ return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE, dngl_name);
}
int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev)
return err;
}
+
+int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
+ const struct bcm_nlmsg_hdr *nlioc, void *buf)
+{
+ struct net_device *ndev = NULL;
+ dhd_pub_t *dhd;
+ dhd_ioctl_t ioc = { 0 };
+ int ret = 0;
+ int8 index;
+
+ WL_TRACE(("entry: cmd = %d\n", nlioc->cmd));
+
+ dhd = cfg->pub;
+ DHD_OS_WAKE_LOCK(dhd);
+
+ /* send to dongle only if we are not waiting for reload already */
+ if (dhd->hang_was_sent) {
+ WL_ERR(("HANG was sent up earlier\n"));
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS);
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return OSL_ERROR(BCME_DONGLE_DOWN);
+ }
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+ index = dhd_net2idx(dhd->info, ndev);
+ if (index == DHD_BAD_IF) {
+ WL_ERR(("Bad ifidx from wdev:%p\n", wdev));
+ ret = BCME_ERROR;
+ goto done;
+ }
+
+ ioc.cmd = nlioc->cmd;
+ ioc.len = nlioc->len;
+ ioc.set = nlioc->set;
+ ioc.driver = nlioc->magic;
+ ret = dhd_ioctl_process(dhd, index, &ioc, buf);
+ if (ret) {
+ WL_TRACE(("dhd_ioctl_process return err %d\n", ret));
+ ret = OSL_ERROR(ret);
+ goto done;
+ }
+
+done:
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return ret;
+}
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_cfg80211.h 591285 2015-10-07 11:56:29Z $
*/
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
+#include <brcm_nl80211.h>
#ifndef WL_ERR
#define WL_ERR CFG80211_ERR
s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val);
s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg);
s32 dhd_config_dongle(struct bcm_cfg80211 *cfg);
+int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg,
+ struct wireless_dev *wdev, const struct bcm_nlmsg_hdr *nlioc, void *data);
#endif /* __DHD_CFG80211__ */
/*
* Linux cfg80211 vendor command/event handlers of DHD
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_cfg_vendor.c 495605 2014-08-07 18:41:34Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_cfg_vendor.c 525516 2015-01-09 23:12:53Z $
*/
#include <linux/vmalloc.h>
/*
* Broadcom Dongle Host Driver (DHD), common DHD core.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_common.c 492215 2014-07-20 16:44:15Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_common.c 609263 2015-12-31 16:21:33Z $
*/
#include <typedefs.h>
#include <osl.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
#endif
-#ifdef WLBTAMP
-#include <proto/bt_amp_hci.h>
-#include <dhd_bta.h>
-#endif
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
#endif
#include <dhd_wmf_linux.h>
#endif /* DHD_WMF */
+#ifdef DHD_L2_FILTER
+#include <dhd_l2_filter.h>
+#endif /* DHD_L2_FILTER */
+
+#ifdef DHD_PSTA
+#include <dhd_psta.h>
+#endif /* DHD_PSTA */
+
#ifdef WLMEDIA_HTSF
extern void htsf_update(struct dhd_info *dhd, void *data);
#endif
-int dhd_msg_level = DHD_ERROR_VAL;
+#ifdef DHD_LOG_DUMP
+int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL | DHD_EVENT_VAL;
+#else
+int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL;
+#endif /* DHD_LOG_DUMP */
+
+
+#if defined(WL_WLC_SHIM)
+#include <wl_shim.h>
+#else
+#endif /* WL_WLC_SHIM */
#include <wl_iw.h>
#if !defined(AP) && defined(WLP2P)
extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
#endif
+
+extern int dhd_socram_dump(struct dhd_bus *bus);
+
bool ap_cfg_running = FALSE;
bool ap_fw_loaded = FALSE;
#endif /* DHD_DEBUG */
#if defined(DHD_DEBUG)
-const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
- DHD_COMPILED " on " __DATE__ " at " __TIME__;
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
#else
const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from ";
#endif
+char fw_version[FW_VER_STR_LEN] = "\0";
void dhd_set_timer(void *bus, uint wdtick);
IOV_LOGSTAMP,
IOV_GPIOOB,
IOV_IOCTLTIMEOUT,
-#ifdef WLBTAMP
- IOV_HCI_CMD, /* HCI command */
- IOV_HCI_ACL_DATA, /* HCI data packet */
-#endif
#if defined(DHD_DEBUG)
IOV_CONS,
IOV_DCONSOLE_POLL,
+ IOV_DHD_JOIN_TIMEOUT_DBG,
+ IOV_SCAN_TIMEOUT,
#endif /* defined(DHD_DEBUG) */
#ifdef PROP_TXSTATUS
IOV_PROPTXSTATUS_ENABLE,
IOV_PROPTXSTATUS_MODE,
IOV_PROPTXSTATUS_OPT,
-#ifdef QMONITOR
- IOV_QMON_TIME_THRES,
- IOV_QMON_TIME_PERCENT,
-#endif /* QMONITOR */
IOV_PROPTXSTATUS_MODULE_IGNORE,
IOV_PROPTXSTATUS_CREDIT_IGNORE,
IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
#endif /* DHD_UCAST_UPNP */
#endif /* DHD_WMF */
IOV_AP_ISOLATE,
-#ifdef DHD_UNICAST_DHCP
- IOV_DHCP_UNICAST,
-#endif /* DHD_UNICAST_DHCP */
#ifdef DHD_L2_FILTER
+ IOV_DHCP_UNICAST,
IOV_BLOCK_PING,
-#endif
+ IOV_PROXY_ARP,
+ IOV_GRAT_ARP,
+#endif /* DHD_L2_FILTER */
+#ifdef DHD_PSTA
+ IOV_PSTA,
+#endif /* DHD_PSTA */
+ IOV_CFG80211_OPMODE,
+ IOV_ASSERT_TYPE,
+ IOV_LMTEST,
IOV_LAST
};
{"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 },
{"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 },
{"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 },
-#ifdef WLBTAMP
- {"HCI_cmd", IOV_HCI_CMD, 0, IOVT_BUFFER, 0},
- {"HCI_ACL_data", IOV_HCI_ACL_DATA, 0, IOVT_BUFFER, 0},
-#endif
#ifdef PROP_TXSTATUS
{"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_BOOL, 0 },
/*
*/
{"ptxmode", IOV_PROPTXSTATUS_MODE, 0, IOVT_UINT32, 0 },
{"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, IOVT_UINT32, 0 },
-#ifdef QMONITOR
- {"qtime_thres", IOV_QMON_TIME_THRES, 0, IOVT_UINT32, 0 },
- {"qtime_percent", IOV_QMON_TIME_PERCENT, 0, IOVT_UINT32, 0 },
-#endif /* QMONITOR */
{"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, IOVT_BOOL, 0 },
{"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, IOVT_BOOL, 0 },
{"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, IOVT_BOOL, 0 },
{"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), IOVT_BOOL, 0 },
#endif /* DHD_UCAST_UPNP */
#endif /* DHD_WMF */
-#ifdef DHD_UNICAST_DHCP
+#ifdef DHD_L2_FILTER
{"dhcp_unicast", IOV_DHCP_UNICAST, (0), IOVT_BOOL, 0 },
-#endif /* DHD_UNICAST_DHCP */
+#endif /* DHD_L2_FILTER */
{"ap_isolate", IOV_AP_ISOLATE, (0), IOVT_BOOL, 0},
#ifdef DHD_L2_FILTER
{"block_ping", IOV_BLOCK_PING, (0), IOVT_BOOL, 0},
-#endif
+ {"proxy_arp", IOV_PROXY_ARP, (0), IOVT_BOOL, 0},
+ {"grat_arp", IOV_GRAT_ARP, (0), IOVT_BOOL, 0},
+#endif /* DHD_L2_FILTER */
+#ifdef DHD_PSTA
+ /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
+ {"psta", IOV_PSTA, 0, IOVT_UINT32, 0},
+#endif /* DHD PSTA */
+ {"op_mode", IOV_CFG80211_OPMODE, 0, IOVT_UINT32, 0 },
+ {"assert_type", IOV_ASSERT_TYPE, (0), IOVT_UINT32, 0},
+ {"lmtest", IOV_LMTEST, 0, IOVT_UINT32, 0 },
{NULL, 0, 0, 0, 0 }
};
#define DHD_IOVAR_BUF_SIZE 128
+#ifdef DHD_FW_COREDUMP
+void dhd_save_fwdump(dhd_pub_t *dhd_pub, void * buffer, uint32 length)
+{
+ if (dhd_pub->soc_ram) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ DHD_OS_PREFREE(dhd_pub, dhd_pub->soc_ram, dhd_pub->soc_ram_length);
+#else
+ MFREE(dhd_pub->osh, dhd_pub->soc_ram, dhd_pub->soc_ram_length);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ dhd_pub->soc_ram = NULL;
+ dhd_pub->soc_ram_length = 0;
+ }
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub,
+ DHD_PREALLOC_MEMDUMP_RAM, length);
+ memset(dhd_pub->soc_ram, 0, length);
+#else
+ dhd_pub->soc_ram = (uint8*) MALLOCZ(dhd_pub->osh, length);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ if (dhd_pub->soc_ram == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
+ __FUNCTION__));
+ return;
+ }
+
+ dhd_pub->soc_ram_length = length;
+ memcpy(dhd_pub->soc_ram, buffer, length);
+}
+#endif /* DHD_FW_COREDUMP */
+
/* to NDIS developer, the structure dhd_common is redundant,
* please do NOT merge it back from other branches !!!
*/
struct bcmstrbuf b;
struct bcmstrbuf *strbuf = &b;
+ if (!dhdp || !dhdp->prot || !buf)
+ return BCME_ERROR;
bcm_binit(strbuf, buf, buflen);
dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
dhdp->rx_readahead_cnt, dhdp->tx_realloc);
+ bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
+ dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
bcm_bprintf(strbuf, "\n");
/* Add any prot info */
dhd_bus_dump(dhdp, strbuf);
+#if defined(DHD_LB_STATS)
+ dhd_lb_stats_dump(dhdp, strbuf);
+#endif /* DHD_LB_STATS */
+
return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
}
+void
+dhd_dump_to_kernelog(dhd_pub_t *dhdp)
+{
+ char buf[512];
+
+ DHD_ERROR(("F/W version: %s\n", fw_version));
+ bcm_bprintf_bypass = TRUE;
+ dhd_dump(dhdp, buf, sizeof(buf));
+ bcm_bprintf_bypass = FALSE;
+}
+
int
dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
{
return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
}
+int
+dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
+ int cmd, uint8 set, int ifidx)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ int ret = -1;
+
+ /* memset(iovbuf, 0, sizeof(iovbuf)); */
+ if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
+ ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx);
+ if (!ret) {
+ *pval = ltoh32(*((uint*)iovbuf));
+ } else {
+ DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
+ __FUNCTION__, name, ret));
+ }
+ } else {
+ DHD_ERROR(("%s: mkiovar %s failed\n",
+ __FUNCTION__, name));
+ }
+
+ return ret;
+}
+
+int
+dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
+ int cmd, uint8 set, int ifidx)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ int ret = -1;
+ int lval = htol32(val);
+
+ /* memset(iovbuf, 0, sizeof(iovbuf)); */
+ if (bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf))) {
+ ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx);
+ if (ret) {
+ DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
+ __FUNCTION__, name, ret));
+ }
+ } else {
+ DHD_ERROR(("%s: mkiovar %s failed\n",
+ __FUNCTION__, name));
+ }
+
+ return ret;
+}
+
int
dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
{
int ret = BCME_ERROR;
+ unsigned long flags;
if (dhd_os_proto_block(dhd_pub))
{
+#ifdef DHD_LOG_DUMP
+ int slen, i, val, rem;
+ long int lval;
+ char *pval, *pos, *msg;
+ char tmp[64];
+#endif /* DHD_LOG_DUMP */
+ DHD_GENERAL_LOCK(dhd_pub, flags);
+ if (dhd_pub->busstate == DHD_BUS_DOWN ||
+ dhd_pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
+ DHD_ERROR(("%s: returning as busstate=%d\n",
+ __FUNCTION__, dhd_pub->busstate));
+ DHD_GENERAL_UNLOCK(dhd_pub, flags);
+ dhd_os_proto_unblock(dhd_pub);
+ return -ENODEV;
+ }
+ dhd_pub->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_IOVAR;
+ DHD_GENERAL_UNLOCK(dhd_pub, flags);
+
+#ifdef DHD_LOG_DUMP
+ /* WLC_GET_VAR */
+ if (ioc->cmd == WLC_GET_VAR) {
+ memset(tmp, 0, sizeof(tmp));
+ bcopy(ioc->buf, tmp, strlen(ioc->buf) + 1);
+ }
+#endif /* DHD_LOG_DUMP */
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl);
+#endif /* DHD_PCIE_RUNTIMEPM */
#if defined(WL_WLC_SHIM)
- wl_info_t *wl = dhd_pub_wlinfo(dhd_pub);
+ {
+ struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
- wl_io_pport_t io_pport;
- io_pport.dhd_pub = dhd_pub;
- io_pport.ifidx = ifidx;
+ wl_io_pport_t io_pport;
+ io_pport.dhd_pub = dhd_pub;
+ io_pport.ifidx = ifidx;
- ret = wl_shim_ioctl(wl->shim, ioc, &io_pport);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: wl_shim_ioctl(%d) ERR %d\n", __FUNCTION__, ioc->cmd, ret));
+ ret = wl_shim_ioctl(shim, ioc, len, &io_pport);
+ if (ret != BCME_OK) {
+ DHD_TRACE(("%s: wl_shim_ioctl(%d) ERR %d\n",
+ __FUNCTION__, ioc->cmd, ret));
+ }
}
#else
ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
dhd_pub->busstate = DHD_BUS_DOWN;
}
+ DHD_GENERAL_LOCK(dhd_pub, flags);
+ dhd_pub->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_IOVAR;
+ dhd_os_busbusy_wake(dhd_pub);
+ DHD_GENERAL_UNLOCK(dhd_pub, flags);
+
dhd_os_proto_unblock(dhd_pub);
+#ifdef DHD_LOG_DUMP
+ if (ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) {
+ lval = 0;
+ slen = strlen(ioc->buf) + 1;
+ msg = (char*)ioc->buf;
+ if (ioc->cmd == WLC_GET_VAR) {
+ bcopy(msg, &lval, sizeof(long int));
+ msg = tmp;
+ } else {
+ bcopy((msg + slen), &lval, sizeof(long int));
+ }
+ DHD_ERROR_EX(("%s: cmd: %d, msg: %s, val: 0x%lx, len: %d, set: %d\n",
+ ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
+ ioc->cmd, msg, lval, ioc->len, ioc->set));
+ } else {
+ slen = ioc->len;
+ if (ioc->buf != NULL) {
+ val = *(int*)ioc->buf;
+ pval = (char*)ioc->buf;
+ pos = tmp;
+ rem = sizeof(tmp);
+ memset(tmp, 0, sizeof(tmp));
+ for (i = 0; i < slen; i++) {
+ pos += snprintf(pos, rem, "%02x ", pval[i]);
+ rem = sizeof(tmp) - (int)(pos - tmp);
+ if (rem <= 0) {
+ break;
+ }
+ }
+ DHD_ERROR_EX(("WLC_IOCTL: cmd: %d, val: %d(%s), len: %d, set: %d\n",
+ ioc->cmd, val, tmp, ioc->len, ioc->set));
+ } else {
+ DHD_ERROR_EX(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
+ }
+ }
+#endif /* DHD_LOG_DUMP */
}
return ret;
p = p + 1;
bcopy(p, &bssidx, sizeof(uint32));
/* Get corresponding dhd index */
- bssidx = dhd_bssidx2idx(dhd_pub, bssidx);
+ bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx));
if (bssidx >= DHD_MAX_IFS) {
DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
return BCME_OK;
}
+#if defined(DHD_DEBUG) && defined(BCMDHDUSB)
+/* USB Device console input function */
+int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
+{
+ DHD_TRACE(("%s \n", __FUNCTION__));
+
+ return dhd_iovar(dhd, 0, "cons", msg, msglen, 1);
+
+}
+#endif /* DHD_DEBUG && BCMDHDUSB */
+
+#ifdef PKT_STATICS
+extern pkt_statics_t tx_statics;
+extern void dhdsdio_txpktstatics(void);
+#endif
static int
dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
void *params, int plen, void *arg, int len, int val_size)
case IOV_GVAL(IOV_VERSION):
/* Need to have checked buffer length */
bcm_strncpy_s((char*)arg, len, dhd_version, len);
+#ifdef PKT_STATICS
+ memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t));
+#endif
break;
case IOV_GVAL(IOV_WLMSGLEVEL):
int_val = (int32)wl_dbg_level;
bcopy(&int_val, arg, val_size);
printf("cfg_msg_level=0x%x\n", wl_dbg_level);
+#endif
+#ifdef PKT_STATICS
+ dhdsdio_txpktstatics();
#endif
break;
bcmerror = BCME_NOTUP;
break;
}
+
+ if (CUSTOM_DHD_WATCHDOG_MS == 0 && int_val == 0) {
+ dhd_watchdog_ms = (uint)int_val;
+ }
+
dhd_os_wd_timer(dhd_pub, (uint)int_val);
break;
dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
dhd_pub->tx_dropped = 0;
dhd_pub->rx_dropped = 0;
+ dhd_pub->tx_pktgetfail = 0;
+ dhd_pub->rx_pktgetfail = 0;
dhd_pub->rx_readahead_cnt = 0;
dhd_pub->tx_realloc = 0;
dhd_pub->wd_dpc_sched = 0;
/* clear proptxstatus related counters */
dhd_wlfc_clear_counts(dhd_pub);
#endif /* PROP_TXSTATUS */
+ DHD_LB_STATS_RESET(dhd_pub);
break;
break;
}
-#ifdef WLBTAMP
- case IOV_SVAL(IOV_HCI_CMD): {
- amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)arg;
-
- /* sanity check: command preamble present */
- if (len < HCI_CMD_PREAMBLE_SIZE)
- return BCME_BUFTOOSHORT;
-
- /* sanity check: command parameters are present */
- if (len < (int)(HCI_CMD_PREAMBLE_SIZE + cmd->plen))
- return BCME_BUFTOOSHORT;
-
- dhd_bta_docmd(dhd_pub, cmd, len);
- break;
- }
-
- case IOV_SVAL(IOV_HCI_ACL_DATA): {
- amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)arg;
-
- /* sanity check: HCI header present */
- if (len < HCI_ACL_DATA_PREAMBLE_SIZE)
- return BCME_BUFTOOSHORT;
-
- /* sanity check: ACL data is present */
- if (len < (int)(HCI_ACL_DATA_PREAMBLE_SIZE + ACL_data->dlen))
- return BCME_BUFTOOSHORT;
-
- dhd_bta_tx_hcidata(dhd_pub, ACL_data, len);
- break;
- }
-#endif /* WLBTAMP */
#ifdef PROP_TXSTATUS
case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
dhd_wlfc_set_mode(dhd_pub, int_val);
break;
-#ifdef QMONITOR
- case IOV_GVAL(IOV_QMON_TIME_THRES): {
- int_val = dhd_qmon_thres(dhd_pub, FALSE, 0);
- bcopy(&int_val, arg, val_size);
- break;
- }
-
- case IOV_SVAL(IOV_QMON_TIME_THRES): {
- dhd_qmon_thres(dhd_pub, TRUE, int_val);
- break;
- }
-
- case IOV_GVAL(IOV_QMON_TIME_PERCENT): {
- int_val = dhd_qmon_getpercent(dhd_pub);
- bcopy(&int_val, arg, val_size);
- break;
- }
-#endif /* QMONITOR */
case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
#endif /* DHD_WMF */
-#ifdef DHD_UNICAST_DHCP
- case IOV_GVAL(IOV_DHCP_UNICAST):
- int_val = dhd_pub->dhcp_unicast;
- bcopy(&int_val, arg, val_size);
+#ifdef DHD_L2_FILTER
+ case IOV_GVAL(IOV_DHCP_UNICAST): {
+ uint32 bssidx;
+ char *val;
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
+ __FUNCTION__, name));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
+ memcpy(arg, &int_val, val_size);
break;
- case IOV_SVAL(IOV_DHCP_UNICAST):
- if (dhd_pub->dhcp_unicast == int_val)
+ }
+ case IOV_SVAL(IOV_DHCP_UNICAST): {
+ uint32 bssidx;
+ char *val;
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
+ __FUNCTION__, name));
+ bcmerror = BCME_BADARG;
break;
+ }
+ memcpy(&int_val, val, sizeof(int_val));
+ bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0);
+ break;
+ }
+ case IOV_GVAL(IOV_BLOCK_PING): {
+ uint32 bssidx;
+ char *val;
- if (int_val >= OFF || int_val <= ON) {
- dhd_pub->dhcp_unicast = int_val;
- } else {
- bcmerror = BCME_RANGE;
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
}
+ int_val = dhd_get_block_ping_status(dhd_pub, bssidx);
+ memcpy(arg, &int_val, val_size);
break;
-#endif /* DHD_UNICAST_DHCP */
-#ifdef DHD_L2_FILTER
- case IOV_GVAL(IOV_BLOCK_PING):
- int_val = dhd_pub->block_ping;
+ }
+ case IOV_SVAL(IOV_BLOCK_PING): {
+ uint32 bssidx;
+ char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ memcpy(&int_val, val, sizeof(int_val));
+ bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0);
+ break;
+ }
+ case IOV_GVAL(IOV_PROXY_ARP): {
+ uint32 bssidx;
+ char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = dhd_get_parp_status(dhd_pub, bssidx);
bcopy(&int_val, arg, val_size);
break;
- case IOV_SVAL(IOV_BLOCK_PING):
- if (dhd_pub->block_ping == int_val)
+ }
+ case IOV_SVAL(IOV_PROXY_ARP): {
+ uint32 bssidx;
+ char *val;
+ char iobuf[32];
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
break;
- if (int_val >= OFF || int_val <= ON) {
- dhd_pub->block_ping = int_val;
- } else {
- bcmerror = BCME_RANGE;
+ }
+ bcopy(val, &int_val, sizeof(int_val));
+
+ /* Issue a iovar request to WL to update the proxy arp capability bit
+ * in the Extended Capability IE of beacons/probe responses.
+ */
+ bcm_mkiovar("proxy_arp_advertise", val, sizeof(int_val), iobuf,
+ sizeof(iobuf));
+ bcmerror = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, iobuf,
+ sizeof(iobuf), TRUE, bssidx);
+
+ if (bcmerror == BCME_OK) {
+ dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0);
}
break;
-#endif
+ }
+ case IOV_GVAL(IOV_GRAT_ARP): {
+ uint32 bssidx;
+ char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = dhd_get_grat_arp_status(dhd_pub, bssidx);
+ memcpy(arg, &int_val, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_GRAT_ARP): {
+ uint32 bssidx;
+ char *val;
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ memcpy(&int_val, val, sizeof(int_val));
+ bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0);
+ break;
+ }
+#endif /* DHD_L2_FILTER */
case IOV_GVAL(IOV_AP_ISOLATE): {
uint32 bssidx;
char *val;
dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
break;
}
+#ifdef DHD_PSTA
+ case IOV_GVAL(IOV_PSTA): {
+ int_val = dhd_get_psta_mode(dhd_pub);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_PSTA): {
+ if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) {
+ dhd_set_psta_mode(dhd_pub, int_val);
+ } else {
+ bcmerror = BCME_RANGE;
+ }
+ break;
+ }
+#endif /* DHD_PSTA */
+ case IOV_GVAL(IOV_CFG80211_OPMODE): {
+ int_val = (int32)dhd_pub->op_mode;
+ bcopy(&int_val, arg, sizeof(int_val));
+ break;
+ }
+ case IOV_SVAL(IOV_CFG80211_OPMODE): {
+ if (int_val <= 0)
+ bcmerror = BCME_BADARG;
+ else
+ dhd_pub->op_mode = int_val;
+ break;
+ }
+
+ case IOV_GVAL(IOV_ASSERT_TYPE):
+ int_val = g_assert_type;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_ASSERT_TYPE):
+ g_assert_type = (uint32)int_val;
+ break;
+
+
+ case IOV_GVAL(IOV_LMTEST): {
+ *(uint32 *)arg = (uint32)lmtest;
+ break;
+ }
+
+ case IOV_SVAL(IOV_LMTEST): {
+ uint32 val = *(uint32 *)arg;
+ if (val > 50)
+ bcmerror = BCME_BADARG;
+ else {
+ lmtest = (uint)val;
+ DHD_ERROR(("%s: lmtest %s\n",
+ __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON"));
+ }
+ break;
+ }
default:
bcmerror = BCME_UNSUPPORTED;
dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen)
{
int bcmerror = 0;
+ unsigned long flags;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
return BCME_BADARG;
}
+ dhd_os_dhdiovar_lock(dhd_pub);
switch (ioc->cmd) {
- case DHD_GET_MAGIC:
- if (buflen < sizeof(int))
- bcmerror = BCME_BUFTOOSHORT;
- else
- *(int*)buf = DHD_IOCTL_MAGIC;
- break;
+ case DHD_GET_MAGIC:
+ if (buflen < sizeof(int))
+ bcmerror = BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_MAGIC;
+ break;
- case DHD_GET_VERSION:
- if (buflen < sizeof(int))
- bcmerror = BCME_BUFTOOSHORT;
- else
- *(int*)buf = DHD_IOCTL_VERSION;
- break;
+ case DHD_GET_VERSION:
+ if (buflen < sizeof(int))
+ bcmerror = BCME_BUFTOOSHORT;
+ else
+ *(int*)buf = DHD_IOCTL_VERSION;
+ break;
- case DHD_GET_VAR:
- case DHD_SET_VAR: {
- char *arg;
- uint arglen;
+ case DHD_GET_VAR:
+ case DHD_SET_VAR:
+ {
+ char *arg;
+ uint arglen;
+
+ DHD_GENERAL_LOCK(dhd_pub, flags);
+ if (dhd_pub->busstate == DHD_BUS_DOWN ||
+ dhd_pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
+ /* In platforms like FC19, the FW download is done via IOCTL
+ * and should not return error for IOCTLs fired before FW
+ * Download is done
+ */
+ if (dhd_pub->is_fw_download_done) {
+ DHD_ERROR(("%s: returning as busstate=%d\n",
+ __FUNCTION__, dhd_pub->busstate));
+ DHD_GENERAL_UNLOCK(dhd_pub, flags);
+ dhd_os_dhdiovar_unlock(dhd_pub);
+ return -ENODEV;
+ }
+ }
+ dhd_pub->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DHD_IOVAR;
+ DHD_GENERAL_UNLOCK(dhd_pub, flags);
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ /* scan past the name to any arguments */
+ for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
+ ;
+
+ if (*arg) {
+ bcmerror = BCME_BUFTOOSHORT;
+ goto unlock_exit;
+ }
- /* scan past the name to any arguments */
- for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
- ;
+ /* account for the NUL terminator */
+ arg++, arglen--;
- if (*arg) {
- bcmerror = BCME_BUFTOOSHORT;
- break;
- }
+ /* call with the appropriate arguments */
+ if (ioc->cmd == DHD_GET_VAR) {
+ bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
+ buf, buflen, IOV_GET);
+ } else {
+ bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0,
+ arg, arglen, IOV_SET);
+ }
+ if (bcmerror != BCME_UNSUPPORTED) {
+ goto unlock_exit;
+ }
- /* account for the NUL terminator */
- arg++, arglen--;
+ /* not in generic table, try protocol module */
+ if (ioc->cmd == DHD_GET_VAR) {
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
+ arglen, buf, buflen, IOV_GET);
+ } else {
+ bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+ }
+ if (bcmerror != BCME_UNSUPPORTED) {
+ goto unlock_exit;
+ }
- /* call with the appropriate arguments */
- if (ioc->cmd == DHD_GET_VAR)
- bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
- buf, buflen, IOV_GET);
- else
- bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET);
- if (bcmerror != BCME_UNSUPPORTED)
+ /* if still not found, try bus module */
+ if (ioc->cmd == DHD_GET_VAR) {
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ arg, arglen, buf, buflen, IOV_GET);
+ } else {
+ bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+ }
+ }
+ goto unlock_exit;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ }
+ dhd_os_dhdiovar_unlock(dhd_pub);
+ return bcmerror;
+
+unlock_exit:
+ DHD_GENERAL_LOCK(dhd_pub, flags);
+ dhd_pub->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DHD_IOVAR;
+ dhd_os_busbusy_wake(dhd_pub);
+ DHD_GENERAL_UNLOCK(dhd_pub, flags);
+ dhd_os_dhdiovar_unlock(dhd_pub);
+ return bcmerror;
+}
+
+#ifdef SHOW_EVENTS
+#ifdef SHOW_LOGTRACE
+
+#define MAX_NO_OF_ARG 16
+
+#define FMTSTR_SIZE 132
+#define SIZE_LOC_STR 50
+#define MIN_DLEN 4
+#define TAG_BYTES 12
+#define TAG_WORDS 3
+#define ROMSTR_SIZE 200
+
+
+static int
+check_event_log_sequence_number(uint32 seq_no)
+{
+ int32 diff;
+ uint32 ret;
+ static uint32 logtrace_seqnum_prev = 0;
+
+ diff = ntoh32(seq_no)-logtrace_seqnum_prev;
+ switch (diff)
+ {
+ case 0:
+ ret = -1; /* duplicate packet . drop */
break;
- /* not in generic table, try protocol module */
- if (ioc->cmd == DHD_GET_VAR)
- bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
- arglen, buf, buflen, IOV_GET);
- else
- bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
- NULL, 0, arg, arglen, IOV_SET);
- if (bcmerror != BCME_UNSUPPORTED)
+ case 1:
+ ret =0; /* in order */
break;
- /* if still not found, try bus module */
- if (ioc->cmd == DHD_GET_VAR) {
- bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
- arg, arglen, buf, buflen, IOV_GET);
- } else {
- bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
- NULL, 0, arg, arglen, IOV_SET);
- }
+ default:
+ if ((ntoh32(seq_no) == 0) &&
+ (logtrace_seqnum_prev == 0xFFFFFFFF) ) { /* in-order - Roll over */
+ ret = 0;
+ } else {
- break;
+ if (diff > 0) {
+ DHD_EVENT(("WLC_E_TRACE:"
+ "Event lost (log) seqnum %d nblost %d\n",
+ ntoh32(seq_no), (diff-1)));
+ } else {
+ DHD_EVENT(("WLC_E_TRACE:"
+ "Event Packets coming out of order!!\n"));
+ }
+ ret = 0;
+ }
+ }
+
+ logtrace_seqnum_prev = ntoh32(seq_no);
+
+ return ret;
+}
+
+static void
+dhd_eventmsg_print(dhd_pub_t *dhd_pub, void *event_data, void *raw_event_ptr,
+ uint datalen, const char *event_name)
+{
+ msgtrace_hdr_t hdr;
+ uint32 nblost;
+ uint8 count;
+ char *s, *p;
+ static uint32 seqnum_prev = 0;
+ uint32 *log_ptr = NULL;
+ uchar *buf;
+ event_log_hdr_t event_hdr;
+ uint32 i;
+ int32 j;
+
+ dhd_event_log_t *raw_event = (dhd_event_log_t *) raw_event_ptr;
+
+ char fmtstr_loc_buf[FMTSTR_SIZE] = {0};
+ char (*str_buf)[SIZE_LOC_STR] = NULL;
+ char * str_tmpptr = NULL;
+ uint32 addr = 0;
+ uint32 **hdr_ptr = NULL;
+ uint32 h_i = 0;
+ uint32 hdr_ptr_len = 0;
+
+ typedef union {
+ uint32 val;
+ char * addr;
+ } u_arg;
+ u_arg arg[MAX_NO_OF_ARG] = {{0}};
+ char *c_ptr = NULL;
+ char rom_log_str[ROMSTR_SIZE] = {0};
+ uint32 rom_str_len = 0;
+
+ BCM_REFERENCE(arg);
+
+ if (!DHD_FWLOG_ON())
+ return;
+
+ buf = (uchar *) event_data;
+ memcpy(&hdr, buf, MSGTRACE_HDRLEN);
+
+ if (hdr.version != MSGTRACE_VERSION) {
+ DHD_EVENT(("\nMACEVENT: %s [unsupported version --> "
+ "dhd version:%d dongle version:%d]\n",
+ event_name, MSGTRACE_VERSION, hdr.version));
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+ return;
}
- default:
- bcmerror = BCME_UNSUPPORTED;
- }
+ if (hdr.trace_type == MSGTRACE_HDR_TYPE_MSG) {
+ /* There are 2 bytes available at the end of data */
+ buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
+
+ if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
+ DHD_FWLOG(("WLC_E_TRACE: [Discarded traces in dongle -->"
+ "discarded_bytes %d discarded_printf %d]\n",
+ ntoh32(hdr.discarded_bytes),
+ ntoh32(hdr.discarded_printf)));
+ }
+
+ nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
+ if (nblost > 0) {
+ DHD_FWLOG(("WLC_E_TRACE:"
+ "[Event lost (msg) --> seqnum %d nblost %d\n",
+ ntoh32(hdr.seqnum), nblost));
+ }
+ seqnum_prev = ntoh32(hdr.seqnum);
+
+ /* Display the trace buffer. Advance from
+ * \n to \n to avoid display big
+ * printf (issue with Linux printk )
+ */
+ p = (char *)&buf[MSGTRACE_HDRLEN];
+ while (*p != '\0' && (s = strstr(p, "\n")) != NULL) {
+ *s = '\0';
+ DHD_FWLOG(("[FWLOG] %s\n", p));
+ p = s+1;
+ }
+ if (*p)
+ DHD_FWLOG(("[FWLOG] %s", p));
+
+ /* Reset datalen to avoid display below */
+ datalen = 0;
+
+ } else if (hdr.trace_type == MSGTRACE_HDR_TYPE_LOG) {
+ /* Let the standard event printing work for now */
+ uint32 timestamp, seq, pktlen;
+
+ if (check_event_log_sequence_number(hdr.seqnum)) {
+
+ DHD_EVENT(("%s: WLC_E_TRACE:"
+ "[Event duplicate (log) %d] dropping!!\n",
+ __FUNCTION__, hdr.seqnum));
+ return; /* drop duplicate events */
+ }
+
+ p = (char *)&buf[MSGTRACE_HDRLEN];
+ datalen -= MSGTRACE_HDRLEN;
+ pktlen = ltoh16(*((uint16 *)p));
+ seq = ltoh16(*((uint16 *)(p + 2)));
+ p += MIN_DLEN;
+ datalen -= MIN_DLEN;
+ timestamp = ltoh32(*((uint32 *)p));
+ BCM_REFERENCE(pktlen);
+ BCM_REFERENCE(seq);
+ BCM_REFERENCE(timestamp);
+
+ /*
+ * Allocating max possible number of event TAGs in the received buffer
+ * considering that each event requires minimum of TAG_BYTES.
+ */
+ hdr_ptr_len = ((datalen/TAG_BYTES)+1) * sizeof(uint32*);
+
+ if ((raw_event->fmts)) {
+ if (!(str_buf = MALLOCZ(dhd_pub->osh, (MAX_NO_OF_ARG * SIZE_LOC_STR)))) {
+ DHD_ERROR(("%s: malloc failed str_buf \n", __FUNCTION__));
+ }
+ }
+
+ if (!(hdr_ptr = MALLOCZ(dhd_pub->osh, hdr_ptr_len))) {
+ DHD_ERROR(("%s: malloc failed hdr_ptr \n", __FUNCTION__));
+ }
+
+
+ DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[No.%d]: timestamp 0x%08x length = %d\n",
+ seq, timestamp, pktlen));
+
+ /* (raw_event->fmts) has value */
+
+ log_ptr = (uint32 *) (p + datalen);
+
+ /* Store all hdr pointer while parsing from last of the log buffer
+ * sample format of
+ * 001d3c54 00000064 00000064 001d3c54 001dba08 035d6ce1 0c540639
+ * 001d3c54 00000064 00000064 035d6d89 0c580439
+ * in above example 0c580439 -- 39 is tag , 04 is count, 580c is format number
+ * all these uint32 values comes in reverse order as group as EL data
+ * while decoding we can parse only from last to first
+ */
+
+ while (datalen > MIN_DLEN) {
+ log_ptr--;
+ datalen -= MIN_DLEN;
+ event_hdr.t = *log_ptr;
+ /*
+ * Check for partially overriten entries
+ */
+ if (log_ptr - (uint32 *) p < event_hdr.count) {
+ break;
+ }
+ /*
+ * Check argument count (only when format is valid)
+ */
+ if ((event_hdr.count > MAX_NO_OF_ARG) &&
+ (event_hdr.fmt_num != 0xffff)) {
+ break;
+ }
+ /*
+ * Check for end of the Frame.
+ */
+ if (event_hdr.tag == EVENT_LOG_TAG_NULL) {
+ continue;
+ }
+ log_ptr[0] = event_hdr.t;
+ if (h_i < (hdr_ptr_len / sizeof(uint32*))) {
+ hdr_ptr[h_i++] = log_ptr;
+ }
+
+ /* Now place the header at the front
+ * and copy back.
+ */
+ log_ptr -= event_hdr.count;
+
+ c_ptr = NULL;
+ datalen = datalen - (event_hdr.count * MIN_DLEN);
+ }
+ datalen = 0;
+
+ /* print all log using stored hdr pointer in reverse order of EL data
+ * which is actually print older log first and then other in order
+ */
+
+ for (j = (h_i-1); j >= 0; j--) {
+ if (!(hdr_ptr[j])) {
+ break;
+ }
+
+ event_hdr.t = *hdr_ptr[j];
+
+ log_ptr = hdr_ptr[j];
+
+ /* Now place the header at the front
+ * and copy back.
+ */
+ log_ptr -= event_hdr.count;
+
+ if (event_hdr.tag == EVENT_LOG_TAG_ROM_PRINTF) {
+
+ rom_str_len = ((event_hdr.count)-1) * sizeof(uint32);
+
+ if (rom_str_len >= (ROMSTR_SIZE -1)) {
+ rom_str_len = ROMSTR_SIZE - 1;
+ }
- return bcmerror;
-}
+ /* copy all ascii data for ROM printf to local string */
+ memcpy(rom_log_str, log_ptr, rom_str_len);
+ /* add end of line at last */
+ rom_log_str[rom_str_len] = '\0';
-#ifdef SHOW_EVENTS
-#ifdef SHOW_LOGTRACE
+ DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s",
+ log_ptr[event_hdr.count - 1], rom_log_str));
-#define AVOID_BYTE 64
-#define MAX_NO_OF_ARG 16
+ /* Add newline if missing */
+ if (rom_log_str[strlen(rom_log_str) - 1] != '\n') {
+ DHD_EVENT(("\n"));
+ }
-static int
-check_event_log_sequence_number(uint32 seq_no)
-{
- int32 diff;
- uint32 ret;
- static uint32 logtrace_seqnum_prev = 0;
+ memset(rom_log_str, 0, ROMSTR_SIZE);
- diff = ntoh32(seq_no)-logtrace_seqnum_prev;
- switch (diff)
- {
- case 0:
- ret = -1; /* duplicate packet . drop */
- break;
+ continue;
+ }
- case 1:
- ret =0; /* in order */
- break;
+ /*
+ * Check For Special Time Stamp Packet
+ */
+ if (event_hdr.tag == EVENT_LOG_TAG_TS) {
+ DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n",
+ log_ptr[event_hdr.count-1], log_ptr[0], log_ptr[1]));
+ continue;
+ }
- default:
- if ((ntoh32(seq_no) == 0) &&
- (logtrace_seqnum_prev == 0xFFFFFFFF) ) { /* in-order - Roll over */
- ret = 0;
+ /* Simply print out event dump buffer (fmt_num = 0xffff) */
+ if (!str_buf || event_hdr.fmt_num == 0xffff) {
+ /*
+ * Print out raw value if unable to interpret
+ */
+#ifdef DHD_LOG_DUMP
+ char buf[256];
+ char *pos = buf;
+ memset(buf, 0, sizeof(buf));
+ pos += snprintf(pos, 256,
+#else
+ DHD_MSGTRACE_LOG((
+#endif /* DHD_LOG_DUMP */
+ "EVENT_LOG_BUF[0x%08x]: tag=%d len=%d fmt=%04x",
+ log_ptr[event_hdr.count-1], event_hdr.tag,
+ event_hdr.count, event_hdr.fmt_num
+#ifdef DHD_LOG_DUMP
+);
+#else
+));
+#endif /* DHD_LOG_DUMP */
+
+ for (count = 0; count < (event_hdr.count-1); count++) {
+#ifdef DHD_LOG_DUMP
+ if (strlen(buf) >= (256 - 1)) {
+ DHD_MSGTRACE_LOG(("%s\n", buf));
+ memset(buf, 0, sizeof(buf));
+ pos = buf;
+ }
+ pos += snprintf(pos, (256 - (int)(pos-buf)),
+ " %08x", log_ptr[count]);
+#else
+ if (count % 8 == 0)
+ DHD_MSGTRACE_LOG(("\n\t%08x", log_ptr[count]));
+ else
+ DHD_MSGTRACE_LOG((" %08x", log_ptr[count]));
+#endif /* DHD_LOG_DUMP */
+ }
+#ifdef DHD_LOG_DUMP
+ DHD_MSGTRACE_LOG(("%s\n", buf));
+#else
+ DHD_MSGTRACE_LOG(("\n"));
+#endif /* DHD_LOG_DUMP */
+ continue;
+ }
+
+ /* Copy the format string to parse %s and add "EVENT_LOG: */
+ if ((event_hdr.fmt_num >> 2) < raw_event->num_fmts) {
+ snprintf(fmtstr_loc_buf, FMTSTR_SIZE,
+ "EVENT_LOG[0x%08x]: %s", log_ptr[event_hdr.count-1],
+ raw_event->fmts[event_hdr.fmt_num >> 2]);
+ c_ptr = fmtstr_loc_buf;
} else {
+ DHD_ERROR(("%s: fmt number out of range \n", __FUNCTION__));
+ continue;
+ }
- if (diff > 0) {
- DHD_EVENT(("WLC_E_TRACE:"
- "Event lost (log) seqnum %d nblost %d\n",
- ntoh32(seq_no), (diff-1)));
+ for (count = 0; count < (event_hdr.count-1); count++) {
+ if (c_ptr != NULL) {
+ if ((c_ptr = strstr(c_ptr, "%")) != NULL) {
+ c_ptr++;
+ }
+ }
+
+ if ((c_ptr != NULL) && (*c_ptr == 's')) {
+ if ((raw_event->raw_sstr) &&
+ ((log_ptr[count] > raw_event->rodata_start) &&
+ (log_ptr[count] < raw_event->rodata_end))) {
+ /* ram static string */
+ addr = log_ptr[count] - raw_event->rodata_start;
+ str_tmpptr = raw_event->raw_sstr + addr;
+ memcpy(str_buf[count], str_tmpptr, SIZE_LOC_STR);
+ str_buf[count][SIZE_LOC_STR-1] = '\0';
+ arg[count].addr = str_buf[count];
+ } else if ((raw_event->rom_raw_sstr) &&
+ ((log_ptr[count] >
+ raw_event->rom_rodata_start) &&
+ (log_ptr[count] <
+ raw_event->rom_rodata_end))) {
+ /* rom static string */
+ addr = log_ptr[count] - raw_event->rom_rodata_start;
+ str_tmpptr = raw_event->rom_raw_sstr + addr;
+ memcpy(str_buf[count], str_tmpptr, SIZE_LOC_STR);
+ str_buf[count][SIZE_LOC_STR-1] = '\0';
+ arg[count].addr = str_buf[count];
+ } else {
+ /*
+ * Dynamic string OR
+ * No data for static string.
+ * So store all string's address as string.
+ */
+ snprintf(str_buf[count], SIZE_LOC_STR, "(s)0x%x",
+ log_ptr[count]);
+ arg[count].addr = str_buf[count];
+ }
} else {
- DHD_EVENT(("WLC_E_TRACE:"
- "Event Packets coming out of order!!\n"));
+ /* Other than string */
+ arg[count].val = log_ptr[count];
}
- ret = 0;
}
- }
- logtrace_seqnum_prev = ntoh32(seq_no);
+ DHD_MSGTRACE_LOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
+ arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
+ arg[11], arg[12], arg[13], arg[14], arg[15]));
- return ret;
+ if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n') {
+ /* Add newline if missing */
+ DHD_MSGTRACE_LOG(("\n"));
+ }
+
+ memset(fmtstr_loc_buf, 0, FMTSTR_SIZE);
+
+ for (i = 0; i < MAX_NO_OF_ARG; i++) {
+ arg[i].addr = 0;
+ }
+ for (i = 0; i < MAX_NO_OF_ARG; i++) {
+ memset(str_buf[i], 0, SIZE_LOC_STR);
+ }
+
+ }
+ DHD_MSGTRACE_LOG(("\n"));
+
+ if (str_buf) {
+ MFREE(dhd_pub->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR));
+ }
+
+ if (hdr_ptr) {
+ MFREE(dhd_pub->osh, hdr_ptr, hdr_ptr_len);
+ }
+ }
}
+
#endif /* SHOW_LOGTRACE */
static void
{
uint i, status, reason;
bool group = FALSE, flush_txq = FALSE, link = FALSE;
+ bool host_data = FALSE; /* prints event data after the case when set */
const char *auth_str;
const char *event_name;
uchar *buf;
break;
case WLC_E_TXFAIL:
- DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
+ DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status));
break;
- case WLC_E_SCAN_COMPLETE:
case WLC_E_ASSOC_REQ_IE:
case WLC_E_ASSOC_RESP_IE:
case WLC_E_PMKID_CACHE:
+ case WLC_E_SCAN_COMPLETE:
DHD_EVENT(("MACEVENT: %s\n", event_name));
break;
case WLC_E_PFN_NET_FOUND:
case WLC_E_PFN_NET_LOST:
- case WLC_E_PFN_SCAN_COMPLETE:
case WLC_E_PFN_SCAN_NONE:
case WLC_E_PFN_SCAN_ALLGONE:
+ case WLC_E_PFN_GSCAN_FULL_RESULT:
+ case WLC_E_PFN_SWC:
DHD_EVENT(("PNOEVENT: %s\n", event_name));
break;
#ifdef SHOW_LOGTRACE
case WLC_E_TRACE:
{
- msgtrace_hdr_t hdr;
- uint32 nblost;
- uint8 count;
- char *s, *p;
- static uint32 seqnum_prev = 0;
- uint32 *record = NULL;
- uint32 *log_ptr = NULL;
- uint32 writeindex = 0;
- event_log_hdr_t event_hdr;
- int no_of_fmts = 0;
- char *fmt = NULL;
- dhd_event_log_t *raw_event = (dhd_event_log_t *) raw_event_ptr;
-
- buf = (uchar *) event_data;
- memcpy(&hdr, buf, MSGTRACE_HDRLEN);
-
- if (hdr.version != MSGTRACE_VERSION) {
- DHD_EVENT(("\nMACEVENT: %s [unsupported version --> "
- "dhd version:%d dongle version:%d]\n",
- event_name, MSGTRACE_VERSION, hdr.version));
- /* Reset datalen to avoid display below */
- datalen = 0;
- break;
- }
-
- if (hdr.trace_type == MSGTRACE_HDR_TYPE_MSG) {
- /* There are 2 bytes available at the end of data */
- buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
-
- if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
- DHD_EVENT(("WLC_E_TRACE: [Discarded traces in dongle -->"
- "discarded_bytes %d discarded_printf %d]\n",
- ntoh32(hdr.discarded_bytes),
- ntoh32(hdr.discarded_printf)));
- }
-
- nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
- if (nblost > 0) {
- DHD_EVENT(("WLC_E_TRACE:"
- "[Event lost (msg) --> seqnum %d nblost %d\n",
- ntoh32(hdr.seqnum), nblost));
- }
- seqnum_prev = ntoh32(hdr.seqnum);
-
- /* Display the trace buffer. Advance from
- * \n to \n to avoid display big
- * printf (issue with Linux printk )
- */
- p = (char *)&buf[MSGTRACE_HDRLEN];
- while (*p != '\0' && (s = strstr(p, "\n")) != NULL) {
- *s = '\0';
- DHD_EVENT(("%s\n", p));
- p = s+1;
- }
- if (*p)
- DHD_EVENT(("%s", p));
-
- /* Reset datalen to avoid display below */
- datalen = 0;
-
- } else if (hdr.trace_type == MSGTRACE_HDR_TYPE_LOG) {
- /* Let the standard event printing work for now */
- uint32 timestamp, w, malloc_len;
-
- if (check_event_log_sequence_number(hdr.seqnum)) {
-
- DHD_EVENT(("%s: WLC_E_TRACE:"
- "[Event duplicate (log) %d] dropping!!\n",
- __FUNCTION__, hdr.seqnum));
- return; /* drop duplicate events */
- }
-
- p = (char *)&buf[MSGTRACE_HDRLEN];
- datalen -= MSGTRACE_HDRLEN;
- w = ntoh32((uint32)*p);
- p += 4;
- datalen -= 4;
- timestamp = ntoh32((uint32)*p);
- BCM_REFERENCE(timestamp);
- BCM_REFERENCE(w);
-
- DHD_EVENT(("timestamp %x%x\n", timestamp, w));
-
- if (raw_event->fmts) {
- malloc_len = datalen+ AVOID_BYTE;
- record = (uint32 *)MALLOC(dhd_pub->osh, malloc_len);
- if (record == NULL) {
- DHD_EVENT(("MSGTRACE_HDR_TYPE_LOG:"
- "malloc failed\n"));
- return;
- }
- log_ptr = (uint32 *) (p + datalen);
- writeindex = datalen/4;
-
- if (record) {
- while (datalen > 4) {
- log_ptr--;
- datalen -= 4;
- event_hdr.t = *log_ptr;
- /*
- * Check for partially overriten entries
- */
- if (log_ptr - (uint32 *) p < event_hdr.count) {
- break;
- }
- /*
- * Check for end of the Frame.
- */
- if (event_hdr.tag == EVENT_LOG_TAG_NULL) {
- continue;
- }
- /*
- * Check For Special Time Stamp Packet
- */
- if (event_hdr.tag == EVENT_LOG_TAG_TS) {
- datalen -= 12;
- log_ptr = log_ptr - 3;
- continue;
- }
-
- log_ptr[0] = event_hdr.t;
- if (event_hdr.count > MAX_NO_OF_ARG) {
- break;
- }
- /* Now place the header at the front
- * and copy back.
- */
- log_ptr -= event_hdr.count;
-
- writeindex = writeindex - event_hdr.count;
- record[writeindex++] = event_hdr.t;
- for (count = 0; count < (event_hdr.count-1);
- count++) {
- record[writeindex++] = log_ptr[count];
- }
- writeindex = writeindex - event_hdr.count;
- datalen = datalen - (event_hdr.count * 4);
- no_of_fmts++;
- }
- }
-
- while (no_of_fmts--)
- {
- event_log_hdr_t event_hdr;
- event_hdr.t = record[writeindex];
-
- if ((event_hdr.fmt_num>>2) < raw_event->num_fmts) {
- fmt = raw_event->fmts[event_hdr.fmt_num>>2];
- DHD_EVENT((fmt,
- record[writeindex + 1],
- record[writeindex + 2],
- record[writeindex + 3],
- record[writeindex + 4],
- record[writeindex + 5],
- record[writeindex + 6],
- record[writeindex + 7],
- record[writeindex + 8],
- record[writeindex + 9],
- record[writeindex + 10],
- record[writeindex + 11],
- record[writeindex + 12],
- record[writeindex + 13],
- record[writeindex + 14],
- record[writeindex + 15],
- record[writeindex + 16]));
-
- if (fmt[strlen(fmt) - 1] != '\n') {
- /* Add newline if missing */
- DHD_EVENT(("\n"));
- }
- }
-
- writeindex = writeindex + event_hdr.count;
- }
-
- if (record) {
- MFREE(dhd_pub->osh, record, malloc_len);
- record = NULL;
- }
- } else {
- while (datalen > 4) {
- p += 4;
- datalen -= 4;
- /* Print each word. DO NOT ntoh it. */
- DHD_EVENT((" %8.8x", *((uint32 *) p)));
- }
- DHD_EVENT(("\n"));
- }
- datalen = 0;
- }
+ dhd_eventmsg_print(dhd_pub, event_data, raw_event_ptr, datalen, event_name);
break;
}
#endif /* SHOW_LOGTRACE */
break;
#endif
+ case WLC_E_CCA_CHAN_QUAL:
+ if (datalen) {
+ buf = (uchar *) event_data;
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d, "
+ "channel 0x%02x \n", event_name, event_type, eabuf, (int)status,
+ (int)reason, (int)auth_type, *(buf + 4)));
+ }
+ break;
+ case WLC_E_ESCAN_RESULT:
+ {
+#ifndef DHD_IFDEBUG
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d \n",
+ event_name, event_type, eabuf, (int)status));
+#endif
+ }
+ break;
default:
DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
event_name, event_type, eabuf, (int)status, (int)reason,
break;
}
- /* show any appended data */
- if (DHD_BYTES_ON() && DHD_EVENT_ON() && datalen) {
+ /* show any appended data if message level is set to bytes or host_data is set */
+ if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) {
buf = (uchar *) event_data;
BCM_REFERENCE(buf);
DHD_EVENT((" data (%d) : ", datalen));
}
#endif /* SHOW_EVENTS */
+/* Stub for now. Will become real function as soon as shim
+ * is being integrated to Android, Linux etc.
+ */
int
-wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
- wl_event_msg_t *event, void **data_ptr, void *raw_event)
+wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport)
+{
+ return BCME_OK;
+}
+
+int
+wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, void **data_ptr, void *raw_event)
+{
+ wl_evt_pport_t evt_pport;
+ wl_event_msg_t event;
+
+ /* make sure it is a BRCM event pkt and record event data */
+ int ret = wl_host_event_get_data(pktdata, &event, data_ptr);
+ if (ret != BCME_OK) {
+ return ret;
+ }
+
+ /* convert event from network order to host order */
+ wl_event_to_host_order(&event);
+
+ /* record event params to evt_pport */
+ evt_pport.dhd_pub = dhd_pub;
+ evt_pport.ifidx = ifidx;
+ evt_pport.pktdata = pktdata;
+ evt_pport.data_ptr = data_ptr;
+ evt_pport.raw_event = raw_event;
+
+#if defined(WL_WLC_SHIM) && defined(WL_WLC_SHIM_EVENTS)
+ {
+ struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
+ ASSERT(shim);
+ ret = wl_shim_event_process(shim, &event, &evt_pport);
+ }
+#else
+ ret = wl_event_process_default(&event, &evt_pport);
+#endif
+
+ return ret;
+}
+
+/* Check whether packet is a BRCM event pkt. If it is, record event data. */
+int
+wl_host_event_get_data(void *pktdata, wl_event_msg_t *event, void **data_ptr)
{
- /* check whether packet is a BRCM event pkt */
bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
- uint8 *event_data;
- uint32 type, status, datalen;
- uint16 flags;
- int evlen;
- int hostidx;
if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
- return (BCME_ERROR);
+ return BCME_ERROR;
}
/* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) {
DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__));
- return (BCME_ERROR);
+ return BCME_ERROR;
}
*data_ptr = &pvt_data[1];
- event_data = *data_ptr;
-
/* memcpy since BRCM event pkt may be unaligned. */
memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
+ return BCME_OK;
+}
+
+int
+wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
+ wl_event_msg_t *event, void **data_ptr, void *raw_event)
+{
+ bcm_event_t *pvt_data;
+ uint8 *event_data;
+ uint32 type, status, datalen;
+ uint16 flags;
+ int evlen;
+
+ /* make sure it is a BRCM event pkt and record event data */
+ int ret = wl_host_event_get_data(pktdata, event, data_ptr);
+ if (ret != BCME_OK) {
+ return ret;
+ }
+
+ pvt_data = (bcm_event_t *)pktdata;
+ event_data = *data_ptr;
+
type = ntoh32_ua((void *)&event->event_type);
flags = ntoh16_ua((void *)&event->flags);
status = ntoh32_ua((void *)&event->status);
datalen = ntoh32_ua((void *)&event->datalen);
evlen = datalen + sizeof(bcm_event_t);
- /* find equivalent host index for event ifidx */
- hostidx = dhd_ifidx2hostidx(dhd_pub->info, event->ifidx);
-
switch (type) {
#ifdef PROP_TXSTATUS
case WLC_E_FIFO_CREDIT_MAP:
#endif /* WL_CFG80211 */
}
} else {
-#if !defined(PROP_TXSTATUS) || !defined(PCIE_FULL_DONGLE)
+#if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
__FUNCTION__, ifevent->ifidx, event->ifname));
-#endif /* !PROP_TXSTATUS */
+#endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
}
/* send up the if event: btamp user needs it */
- *ifidx = hostidx;
+ *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
/* push up to external supp/auth */
dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
break;
htsf_update(dhd_pub->info, event_data);
break;
#endif /* WLMEDIA_HTSF */
-#if defined(NDISVER) && (NDISVER >= 0x0630)
case WLC_E_NDIS_LINK:
break;
-#else
- case WLC_E_NDIS_LINK: {
- uint32 temp = hton32(WLC_E_LINK);
-
- memcpy((void *)(&pvt_data->event.event_type), &temp,
- sizeof(pvt_data->event.event_type));
- break;
- }
-#endif /* NDISVER >= 0x0630 */
case WLC_E_PFN_NET_FOUND:
+ case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */
case WLC_E_PFN_NET_LOST:
break;
#if defined(PNO_SUPPORT)
case WLC_E_PFN_BSSID_NET_FOUND:
- case WLC_E_PFN_BSSID_NET_LOST:
case WLC_E_PFN_BEST_BATCHING:
dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
break;
case WLC_E_ASSOC_IND:
case WLC_E_AUTH_IND:
case WLC_E_REASSOC_IND:
- dhd_findadd_sta(dhd_pub, hostidx, &event->addr.octet);
- break;
+ dhd_findadd_sta(dhd_pub,
+ dhd_ifname2idx(dhd_pub->info, event->ifname),
+ &event->addr.octet);
+ break;
+#if defined(DHD_FW_COREDUMP)
+ case WLC_E_PSM_WATCHDOG:
+ DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__));
+ if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) {
+ DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__));
+ }
+ break;
+#endif
case WLC_E_LINK:
#ifdef PCIE_FULL_DONGLE
- if (dhd_update_interface_link_status(dhd_pub, (uint8)hostidx,
- (uint8)flags) != BCME_OK)
+ if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
+ event->ifname), (uint8)flags) != BCME_OK)
break;
if (!flags) {
- dhd_flow_rings_delete(dhd_pub, hostidx);
+ dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
+ event->ifname));
}
/* fall through */
#endif
case WLC_E_DEAUTH_IND:
case WLC_E_DISASSOC:
case WLC_E_DISASSOC_IND:
- if (type != WLC_E_LINK) {
- dhd_del_sta(dhd_pub, hostidx, &event->addr.octet);
- }
DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
__FUNCTION__, type, flags, status));
#ifdef PCIE_FULL_DONGLE
if (type != WLC_E_LINK) {
- uint8 ifindex = (uint8)hostidx;
+ uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
- if (DHD_IF_ROLE_STA(role)) {
- dhd_flow_rings_delete(dhd_pub, ifindex);
- } else {
- dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
- &event->addr.octet[0]);
+ uint8 del_sta = TRUE;
+#ifdef WL_CFG80211
+ if (role == WLC_E_IF_ROLE_STA && !wl_cfg80211_is_roam_offload() &&
+ !wl_cfg80211_is_event_from_connected_bssid(event, *ifidx)) {
+ del_sta = FALSE;
+ }
+#endif /* WL_CFG80211 */
+
+ if (del_sta) {
+ dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
+ event->ifname), &event->addr.octet);
+ if (role == WLC_E_IF_ROLE_STA) {
+ dhd_flow_rings_delete(dhd_pub, ifindex);
+ } else {
+ dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
+ &event->addr.octet[0]);
+ }
}
}
-#endif
+#endif /* PCIE_FULL_DONGLE */
/* fall through */
default:
- *ifidx = hostidx;
+ *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
/* push up to external supp/auth */
dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
}
#ifdef SHOW_EVENTS
- wl_show_host_event(dhd_pub, event,
- (void *)event_data, raw_event, dhd_pub->enable_log);
+ if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
+ wl_show_host_event(dhd_pub, event,
+ (void *)event_data, raw_event, dhd_pub->enable_log);
+ }
#endif /* SHOW_EVENTS */
return (BCME_OK);
}
-void
-wl_event_to_host_order(wl_event_msg_t * evt)
-{
- /* Event struct members passed from dongle to host are stored in network
- * byte order. Convert all members to host-order.
- */
- evt->event_type = ntoh32(evt->event_type);
- evt->flags = ntoh16(evt->flags);
- evt->status = ntoh32(evt->status);
- evt->reason = ntoh32(evt->reason);
- evt->auth_type = ntoh32(evt->auth_type);
- evt->datalen = ntoh32(evt->datalen);
- evt->version = ntoh16(evt->version);
-}
-
void
dhd_print_buf(void *pbuf, int len, int bytes_per_line)
{
__FUNCTION__, enable?"enable":"disable", arg));
/* Contorl the master mode */
- bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
- rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode",
+ master_mode, WLC_SET_VAR, TRUE, 0);
rc = rc >= 0 ? 0 : rc;
if (rc)
DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n",
- __FUNCTION__, master_mode, rc));
+ __FUNCTION__, master_mode, rc));
fail:
if (arg_org)
void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
{
- char iovbuf[32];
int ret;
- bcm_mkiovar("pkt_filter_delete", (char *)&id, 4, iovbuf, sizeof(iovbuf));
- ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete",
+ id, WLC_SET_VAR, TRUE, 0);
if (ret < 0) {
DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
__FUNCTION__, id, ret));
void
dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
{
- char iovbuf[DHD_IOVAR_BUF_SIZE];
- int iovar_len;
int retcode;
- iovar_len = bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
- if (!iovar_len) {
- DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
- __FUNCTION__, sizeof(iovbuf)));
- return;
- }
+ retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol",
+ arp_mode, WLC_SET_VAR, TRUE, 0);
- retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0);
retcode = retcode >= 0 ? 0 : retcode;
if (retcode)
DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
void
dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
{
- char iovbuf[DHD_IOVAR_BUF_SIZE];
- int iovar_len;
int retcode;
- iovar_len = bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
- if (!iovar_len) {
- DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
- __FUNCTION__, sizeof(iovbuf)));
- return;
- }
+ retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
+ arp_enable, WLC_SET_VAR, TRUE, 0);
- retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0);
retcode = retcode >= 0 ? 0 : retcode;
if (retcode)
DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
__FUNCTION__, arp_enable));
if (arp_enable) {
uint32 version;
- bcm_mkiovar("arp_version", 0, 0, iovbuf, sizeof(iovbuf));
- retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+ retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
+ &version, WLC_GET_VAR, FALSE, 0);
if (retcode) {
DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
__FUNCTION__, retcode));
dhd->arp_version = 1;
}
else {
- memcpy(&version, iovbuf, sizeof(version));
DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
dhd->arp_version = version;
}
int
dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
{
- char iovbuf[DHD_IOVAR_BUF_SIZE];
- int iov_len;
int retcode;
if (dhd == NULL)
return -1;
- iov_len = bcm_mkiovar("ndoe", (char *)&ndo_enable, 4, iovbuf, sizeof(iovbuf));
- if (!iov_len) {
- DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
- __FUNCTION__, sizeof(iovbuf)));
- return -1;
- }
- retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
+ retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe",
+ ndo_enable, WLC_SET_VAR, TRUE, 0);
if (retcode)
DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
__FUNCTION__, ndo_enable, retcode));
return retcode;
}
-/* send up locally generated event */
-void
-dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
-{
- switch (ntoh32(event->event_type)) {
-#ifdef WLBTAMP
- case WLC_E_BTA_HCI_EVENT:
- break;
-#endif /* WLBTAMP */
- default:
- break;
- }
-
- /* Call per-port handler. */
- dhd_sendup_event(dhdp, event, data);
-}
/*
* returns = TRUE if associated, FALSE if not associated
*/
-bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval)
+bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval)
{
char bssid[6], zbuf[6];
int ret = -1;
bzero(bssid, 6);
bzero(zbuf, 6);
- ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, ETHER_ADDR_LEN, FALSE, 0);
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid,
+ ETHER_ADDR_LEN, FALSE, ifidx);
DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
if (ret == BCME_NOTASSOCIATED) {
if (ret < 0)
return FALSE;
- if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) != 0)) {
- /* STA is assocoated BSSID is non zero */
-
- if (bss_buf) {
- /* return bss if caller provided buf */
- memcpy(bss_buf, bssid, ETHER_ADDR_LEN);
- }
- return TRUE;
- } else {
+ if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) {
DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
return FALSE;
}
+ return TRUE;
}
/* Function to estimate possible DTIM_SKIP value */
int ret = -1;
int dtim_period = 0;
int ap_beacon = 0;
+#ifndef ENABLE_MAX_DTIM_IN_SUSPEND
int allowed_skip_dtim_cnt = 0;
+#endif /* !ENABLE_MAX_DTIM_IN_SUSPEND */
/* Check if associated */
- if (dhd_is_associated(dhd, NULL, NULL) == FALSE) {
+ if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
goto exit;
}
goto exit;
}
+#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
+ bcn_li_dtim = (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
+ if (bcn_li_dtim == 0) {
+ bcn_li_dtim = 1;
+ }
+#else /* ENABLE_MAX_DTIM_IN_SUSPEND */
/* attemp to use platform defined dtim skip interval */
bcn_li_dtim = dhd->suspend_bcn_li_dtim;
bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
}
+#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
__FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
* SSIDs list parsing from cscan tlv list
*/
int
-wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left)
+wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left)
{
char* str;
int idx = 0;
*bytes_left -= ssid[idx].SSID_len;
str += ssid[idx].SSID_len;
+ ssid[idx].hidden = TRUE;
DHD_TRACE(("%s :size=%d left=%d\n",
(char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
*list_str = str;
return num;
}
+
+
+/* Given filename and download type, returns a buffer pointer and length
+ * for download to f/w. Type can be FW or NVRAM.
+ *
+ */
+int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
+ char ** buffer, int *length)
+
+{
+ int ret = BCME_ERROR;
+ int len = 0;
+ int file_len;
+ void *image = NULL;
+ uint8 *buf = NULL;
+
+ /* Point to cache if available. */
+#ifdef CACHE_FW_IMAGES
+ if (component == FW) {
+ if (dhd->cached_fw_length) {
+ len = dhd->cached_fw_length;
+ buf = dhd->cached_fw;
+ }
+ } else if (component == NVRAM) {
+ if (dhd->cached_nvram_length) {
+ len = dhd->cached_nvram_length;
+ buf = dhd->cached_nvram;
+ }
+ } else {
+ return ret;
+ }
+#endif
+ /* No Valid cache found on this call */
+ if (!len) {
+ file_len = *length;
+ *length = 0;
+
+ if (file_path) {
+ image = dhd_os_open_image(file_path);
+ if (image == NULL) {
+ printf("%s: Open image file failed %s\n", __FUNCTION__, file_path);
+ goto err;
+ }
+ }
+
+ buf = MALLOCZ(dhd->osh, file_len);
+ if (buf == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, file_len));
+ goto err;
+ }
+
+ /* Download image */
+ len = dhd_os_get_image_block(buf, file_len, image);
+ if ((len <= 0 || len > file_len)) {
+ MFREE(dhd->osh, buf, file_len);
+ goto err;
+ }
+ }
+
+ ret = BCME_OK;
+ *length = len;
+ *buffer = buf;
+
+ /* Cache if first call. */
+#ifdef CACHE_FW_IMAGES
+ if (component == FW) {
+ if (!dhd->cached_fw_length) {
+ dhd->cached_fw = buf;
+ dhd->cached_fw_length = len;
+ }
+ } else if (component == NVRAM) {
+ if (!dhd->cached_nvram_length) {
+ dhd->cached_nvram = buf;
+ dhd->cached_nvram_length = len;
+ }
+ }
+#endif
+
+err:
+ if (image)
+ dhd_os_close_image(image);
+
+ return ret;
+}
+
+void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length)
+{
+#ifdef CACHE_FW_IMAGES
+ return;
+#endif
+ MFREE(dhd->osh, buffer, length);
+}
+/* Parse EAPOL 4 way handshake messages */
+void
+dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction)
+{
+ unsigned char type;
+ int pair, ack, mic, kerr, req, sec, install;
+ unsigned short us_tmp;
+ type = dump_data[18];
+ if (type == 2 || type == 254) {
+ us_tmp = (dump_data[19] << 8) | dump_data[20];
+ pair = 0 != (us_tmp & 0x08);
+ ack = 0 != (us_tmp & 0x80);
+ mic = 0 != (us_tmp & 0x100);
+ kerr = 0 != (us_tmp & 0x400);
+ req = 0 != (us_tmp & 0x800);
+ sec = 0 != (us_tmp & 0x200);
+ install = 0 != (us_tmp & 0x40);
+ if (!sec && !mic && ack && !install && pair && !kerr && !req) {
+ DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M1 of 4way\n",
+ ifname, direction ? "TX" : "RX"));
+ } else if (pair && !install && !ack && mic && !sec && !kerr && !req) {
+ DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M2 of 4way\n",
+ ifname, direction ? "TX" : "RX"));
+ } else if (pair && ack && mic && sec && !kerr && !req) {
+ DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M3 of 4way\n",
+ ifname, direction ? "TX" : "RX"));
+ } else if (pair && !install && !ack && mic && sec && !req && !kerr) {
+ DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s] : M4 of 4way\n",
+ ifname, direction ? "TX" : "RX"));
+ } else {
+ DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
+ ifname, direction ? "TX" : "RX",
+ dump_data[14], dump_data[15], dump_data[30]));
+ }
+ } else {
+ DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
+ ifname, direction ? "TX" : "RX",
+ dump_data[14], dump_data[15], dump_data[30]));
+ }
+}
\r
#include <bcmutils.h>\r
#include <hndsoc.h>\r
-#if defined(HW_OOB)\r
+#include <bcmsdbus.h>\r
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
#include <bcmdefs.h>\r
#include <bcmsdh.h>\r
#include <sdio.h>\r
#define FW_TYPE_STA 0\r
#define FW_TYPE_APSTA 1\r
#define FW_TYPE_P2P 2\r
-#define FW_TYPE_MFG 3\r
+#define FW_TYPE_ES 3\r
+#define FW_TYPE_MFG 4\r
#define FW_TYPE_G 0\r
#define FW_TYPE_AG 1\r
\r
+#ifdef CONFIG_PATH_AUTO_SELECT\r
+#define BCM4330B2_CONF_NAME "config_40183b2.txt"\r
+#define BCM43362A0_CONF_NAME "config_40181a0.txt"\r
+#define BCM43362A2_CONF_NAME "config_40181a2.txt"\r
+#define BCM43438A0_CONF_NAME "config_43438a0.txt"\r
+#define BCM43438A1_CONF_NAME "config_43438a1.txt"\r
+#define BCM4334B1_CONF_NAME "config_4334b1.txt"\r
+#define BCM43341B0_CONF_NAME "config_43341b0.txt"\r
+#define BCM43241B4_CONF_NAME "config_43241b4.txt"\r
+#define BCM4339A0_CONF_NAME "config_4339a0.txt"\r
+#define BCM43455C0_CONF_NAME "config_43455c0.txt"\r
+#define BCM4354A1_CONF_NAME "config_4354a1.txt"\r
+#define BCM4356A2_CONF_NAME "config_4356a2.txt"\r
+#define BCM4359B1_CONF_NAME "config_4359b1.txt"\r
+#endif\r
+\r
#ifdef BCMSDIO\r
#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */\r
\r
-#define BCM43362A0_CHIP_REV 0\r
-#define BCM43362A2_CHIP_REV 1\r
-#define BCM43430A0_CHIP_REV 0\r
-#define BCM43430A1_CHIP_REV 1\r
-#define BCM4330B2_CHIP_REV 4\r
-#define BCM43340B0_CHIP_REV 2\r
-#define BCM43341B0_CHIP_REV 2\r
-#define BCM43241B4_CHIP_REV 5\r
-#define BCM4335A0_CHIP_REV 2\r
-#define BCM4339A0_CHIP_REV 1\r
-#define BCM43455C0_CHIP_REV 6\r
-#define BCM4354A1_CHIP_REV 1\r
-#define BCM4356A2_CHIP_REV 2\r
-\r
const static char *bcm4330b2_fw_name[] = {\r
- "fw_RK903b2.bin",\r
- "fw_RK903b2_apsta.bin",\r
- "fw_RK903b2_p2p.bin",\r
- "fw_RK903b2_mfg.bin"\r
+ "fw_bcm40183b2.bin",\r
+ "fw_bcm40183b2_apsta.bin",\r
+ "fw_bcm40183b2_p2p.bin",\r
+ "fw_bcm40183b2_es.bin",\r
+ "fw_bcm40183b2_mfg.bin"\r
};\r
\r
const static char *bcm4330b2_ag_fw_name[] = {\r
- "fw_RK903_ag.bin",\r
- "fw_RK903_ag_apsta.bin",\r
- "fw_RK903_ag_p2p.bin",\r
- "fw_RK903_ag_mfg.bin"\r
+ "fw_bcm40183b2_ag.bin",\r
+ "fw_bcm40183b2_ag_apsta.bin",\r
+ "fw_bcm40183b2_ag_p2p.bin",\r
+ "fw_bcm40183b2_ag_es.bin",\r
+ "fw_bcm40183b2_ag_mfg.bin"\r
};\r
\r
const static char *bcm43362a0_fw_name[] = {\r
- "fw_RK901a0.bin",\r
- "fw_RK901a0_apsta.bin",\r
- "fw_RK901a0_p2p.bin",\r
- "fw_RK901a0_mfg.bin"\r
+ "fw_bcm40181a0.bin",\r
+ "fw_bcm40181a0_apsta.bin",\r
+ "fw_bcm40181a0_p2p.bin",\r
+ "fw_bcm40181a0_es.bin",\r
+ "fw_bcm40181a0_mfg.bin"\r
};\r
\r
const static char *bcm43362a2_fw_name[] = {\r
- "fw_RK901a2.bin",\r
- "fw_RK901a2_apsta.bin",\r
- "fw_RK901a2_p2p.bin",\r
- "fw_RK901a2_mfg.bin"\r
+ "fw_bcm40181a2.bin",\r
+ "fw_bcm40181a2_apsta.bin",\r
+ "fw_bcm40181a2_p2p.bin",\r
+ "fw_bcm40181a2_es.bin",\r
+ "fw_bcm40181a2_mfg.bin"\r
+};\r
+\r
+const static char *bcm4334b1_ag_fw_name[] = {\r
+ "fw_bcm4334b1_ag.bin",\r
+ "fw_bcm4334b1_ag_apsta.bin",\r
+ "fw_bcm4334b1_ag_p2p.bin",\r
+ "fw_bcm4334b1_ag_es.bin",\r
+ "fw_bcm4334b1_ag_mfg.bin"\r
};\r
\r
const static char *bcm43438a0_fw_name[] = {\r
"fw_bcm43438a0.bin",\r
"fw_bcm43438a0_apsta.bin",\r
"fw_bcm43438a0_p2p.bin",\r
+ "fw_bcm43438a0_es.bin",\r
"fw_bcm43438a0_mfg.bin"\r
};\r
\r
"fw_bcm43438a1.bin",\r
"fw_bcm43438a1_apsta.bin",\r
"fw_bcm43438a1_p2p.bin",\r
+ "fw_bcm43438a1_es.bin",\r
"fw_bcm43438a1_mfg.bin"\r
};\r
\r
"fw_bcm43341b0_ag.bin",\r
"fw_bcm43341b0_ag_apsta.bin",\r
"fw_bcm43341b0_ag_p2p.bin",\r
+ "fw_bcm43341b0_ag_es.bin",\r
"fw_bcm43341b0_ag_mfg.bin"\r
};\r
\r
"fw_bcm43241b4_ag.bin",\r
"fw_bcm43241b4_ag_apsta.bin",\r
"fw_bcm43241b4_ag_p2p.bin",\r
+ "fw_bcm43241b4_ag_es.bin",\r
"fw_bcm43241b4_ag_mfg.bin"\r
};\r
\r
"fw_bcm4339a0_ag.bin",\r
"fw_bcm4339a0_ag_apsta.bin",\r
"fw_bcm4339a0_ag_p2p.bin",\r
+ "fw_bcm4339a0_ag_es.bin",\r
"fw_bcm4339a0_ag_mfg.bin"\r
};\r
\r
"fw_bcm43455c0_ag.bin",\r
"fw_bcm43455c0_ag_apsta.bin",\r
"fw_bcm43455c0_ag_p2p.bin",\r
+ "fw_bcm43455c0_ag_es.bin",\r
"fw_bcm43455c0_ag_mfg.bin"\r
};\r
\r
"fw_bcm4354a1_ag.bin",\r
"fw_bcm4354a1_ag_apsta.bin",\r
"fw_bcm4354a1_ag_p2p.bin",\r
+ "fw_bcm4354a1_ag_es.bin",\r
"fw_bcm4354a1_ag_mfg.bin"\r
};\r
\r
"fw_bcm4356a2_ag.bin",\r
"fw_bcm4356a2_ag_apsta.bin",\r
"fw_bcm4356a2_ag_p2p.bin",\r
+ "fw_bcm4356a2_ag_es.bin",\r
"fw_bcm4356a2_ag_mfg.bin"\r
};\r
+\r
+const static char *bcm4359b1_ag_fw_name[] = {\r
+ "fw_bcm4359b1_ag.bin",\r
+ "fw_bcm4359b1_ag_apsta.bin",\r
+ "fw_bcm4359b1_ag_p2p.bin",\r
+ "fw_bcm4359b1_ag_es.bin",\r
+ "fw_bcm4359b1_ag_mfg.bin"\r
+};\r
#endif\r
#ifdef BCMPCIE\r
-#define BCM4356A2_CHIP_REV 2\r
-\r
const static char *bcm4356a2_pcie_ag_fw_name[] = {\r
"fw_bcm4356a2_pcie_ag.bin",\r
"fw_bcm4356a2_pcie_ag_apsta.bin",\r
"fw_bcm4356a2_pcie_ag_p2p.bin",\r
+ "fw_bcm4356a2_pcie_ag_es.bin",\r
"fw_bcm4356a2_pcie_ag_mfg.bin"\r
};\r
#endif\r
chip_nv_list->count = 0;\r
}\r
\r
-#if defined(HW_OOB)\r
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)\r
void\r
dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip)\r
{\r
#endif\r
\r
void\r
-dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path, char *nv_path)\r
+dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path)\r
{\r
int fw_type, ag_type;\r
uint chip, chiprev;\r
- int i, j;\r
+ int i;\r
\r
chip = dhd->conf->chip;\r
chiprev = dhd->conf->chiprev;\r
if (fw_path[i] == '/') break;\r
i--;\r
}\r
- j = strlen(nv_path);\r
- while (j>0){\r
- if (nv_path[j] == '/') break;\r
- j--;\r
- }\r
#ifdef BAND_AG\r
ag_type = FW_TYPE_AG;\r
#else\r
ag_type = strstr(&fw_path[i], "_ag") ? FW_TYPE_AG : FW_TYPE_G;\r
#endif\r
- fw_type = (strstr(&fw_path[i], "_mfg") ?\r
- FW_TYPE_MFG : (strstr(&fw_path[i], "_apsta") ?\r
- FW_TYPE_APSTA : (strstr(&fw_path[i], "_p2p") ?\r
- FW_TYPE_P2P : FW_TYPE_STA)));\r
+ fw_type = (strstr(&fw_path[i], "_mfg") ? FW_TYPE_MFG :\r
+ (strstr(&fw_path[i], "_apsta") ? FW_TYPE_APSTA :\r
+ (strstr(&fw_path[i], "_p2p") ? FW_TYPE_P2P :\r
+ (strstr(&fw_path[i], "_es") ? FW_TYPE_ES :\r
+ FW_TYPE_STA))));\r
\r
switch (chip) {\r
#ifdef BCMSDIO\r
} else {\r
if (chiprev == BCM4330B2_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm4330b2_ag_fw_name[fw_type]);\r
- strcpy(&nv_path[j+1], "nvram_AP6330.txt");\r
break;\r
}\r
case BCM43362_CHIP_ID:\r
strcpy(&fw_path[i+1], bcm43362a0_fw_name[fw_type]);\r
else\r
strcpy(&fw_path[i+1], bcm43362a2_fw_name[fw_type]);\r
- if (strstr(nv_path, "6476") == NULL)\r
- strcpy(&nv_path[j+1], "nvram_AP6210.txt");\r
break;\r
case BCM43430_CHIP_ID:\r
if (chiprev == BCM43430A0_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm43438a0_fw_name[fw_type]);\r
else if (chiprev == BCM43430A1_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm43438a1_fw_name[fw_type]);\r
- strcpy(&nv_path[j+1], "nvram_ap6212.txt");\r
break;\r
- case BCM43340_CHIP_ID:\r
- if (chiprev == BCM43340B0_CHIP_REV)\r
- strcpy(&fw_path[i+1], bcm43341b0_ag_fw_name[fw_type]);\r
+ case BCM4334_CHIP_ID:\r
+ if (chiprev == BCM4334B1_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm4334b1_ag_fw_name[fw_type]);\r
break;\r
+ case BCM43340_CHIP_ID:\r
case BCM43341_CHIP_ID:\r
if (chiprev == BCM43341B0_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm43341b0_ag_fw_name[fw_type]);\r
case BCM4324_CHIP_ID:\r
if (chiprev == BCM43241B4_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm43241b4_ag_fw_name[fw_type]);\r
- strcpy(&nv_path[j+1], "nvram_ap62x2.txt");\r
break;\r
case BCM4335_CHIP_ID:\r
if (chiprev == BCM4335A0_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm4339a0_ag_fw_name[fw_type]);\r
break;\r
case BCM4345_CHIP_ID:\r
+ case BCM43454_CHIP_ID:\r
if (chiprev == BCM43455C0_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm43455c0_ag_fw_name[fw_type]);\r
break;\r
case BCM4339_CHIP_ID:\r
if (chiprev == BCM4339A0_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm4339a0_ag_fw_name[fw_type]);\r
- strcpy(&nv_path[j+1], "nvram_AP6335.txt");\r
break;\r
case BCM4354_CHIP_ID:\r
- if (chiprev == BCM4354A1_CHIP_REV) {\r
+ if (chiprev == BCM4354A1_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm4354a1_ag_fw_name[fw_type]);\r
- strcpy(&nv_path[j+1], "nvram_ap6354.txt");\r
- }\r
- else if (chiprev == BCM4356A2_CHIP_REV) {\r
+ else if (chiprev == BCM4356A2_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm4356a2_ag_fw_name[fw_type]);\r
- strcpy(&nv_path[j+1], "nvram_ap6356.txt"); \r
- }\r
break;\r
case BCM4356_CHIP_ID:\r
- if (chiprev == BCM4356A2_CHIP_REV)\r
- strcpy(&fw_path[i+1], bcm4356a2_ag_fw_name[fw_type]);\r
- break;\r
case BCM4371_CHIP_ID:\r
if (chiprev == BCM4356A2_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm4356a2_ag_fw_name[fw_type]);\r
break;\r
+ case BCM4359_CHIP_ID:\r
+ if (chiprev == BCM4359B1_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm4359b1_ag_fw_name[fw_type]);\r
+ break;\r
#endif\r
#ifdef BCMPCIE\r
+ case BCM4354_CHIP_ID:\r
+ if (chiprev == BCM4356A2_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm4356a2_pcie_ag_fw_name[fw_type]);\r
+ break;\r
case BCM4356_CHIP_ID:\r
if (chiprev == BCM4356A2_CHIP_REV)\r
strcpy(&fw_path[i+1], bcm4356a2_pcie_ag_fw_name[fw_type]);\r
\r
/* find out the last '/' */\r
i = strlen(nv_path);\r
- while (i > 0) {\r
+ while (i > 0) {
if (nv_path[i] == '/') break;\r
i--;\r
}\r
\r
/* find out the last '/' */\r
i = strlen(conf_path);\r
- while (i > 0) {\r
+ while (i > 0) {
if (conf_path[i] == '/') break;\r
i--;\r
}\r
printf("%s: config_path=%s\n", __FUNCTION__, conf_path);\r
}\r
\r
+#ifdef CONFIG_PATH_AUTO_SELECT\r
+void\r
+dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path)\r
+{\r
+ uint chip, chiprev;\r
+ int i;\r
+\r
+ chip = dhd->conf->chip;\r
+ chiprev = dhd->conf->chiprev;\r
+\r
+ if (conf_path[0] == '\0') {\r
+ printf("config path is null\n");\r
+ return;\r
+ }\r
+\r
+ /* find out the last '/' */\r
+ i = strlen(conf_path);\r
+ while (i > 0) {\r
+ if (conf_path[i] == '/') break;\r
+ i--;\r
+ }\r
+\r
+ switch (chip) {\r
+#ifdef BCMSDIO\r
+ case BCM4330_CHIP_ID:\r
+ if (chiprev == BCM4330B2_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM4330B2_CONF_NAME);\r
+ break;\r
+ case BCM43362_CHIP_ID:\r
+ if (chiprev == BCM43362A0_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM43362A0_CONF_NAME);\r
+ else\r
+ strcpy(&conf_path[i+1], BCM43362A2_CONF_NAME);\r
+ break;\r
+ case BCM43430_CHIP_ID:\r
+ if (chiprev == BCM43430A0_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM43438A0_CONF_NAME);\r
+ else if (chiprev == BCM43430A1_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM43438A1_CONF_NAME);\r
+ break;\r
+ case BCM4334_CHIP_ID:\r
+ if (chiprev == BCM4334B1_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM4334B1_CONF_NAME);\r
+ break;\r
+ case BCM43340_CHIP_ID:\r
+ case BCM43341_CHIP_ID:\r
+ if (chiprev == BCM43341B0_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM43341B0_CONF_NAME);\r
+ break;\r
+ case BCM4324_CHIP_ID:\r
+ if (chiprev == BCM43241B4_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM43241B4_CONF_NAME);\r
+ break;\r
+ case BCM4335_CHIP_ID:\r
+ if (chiprev == BCM4335A0_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM4339A0_CONF_NAME);\r
+ break;\r
+ case BCM4345_CHIP_ID:\r
+ case BCM43454_CHIP_ID:\r
+ if (chiprev == BCM43455C0_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM43455C0_CONF_NAME);\r
+ break;\r
+ case BCM4339_CHIP_ID:\r
+ if (chiprev == BCM4339A0_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM4339A0_CONF_NAME);\r
+ break;\r
+ case BCM4354_CHIP_ID:\r
+ if (chiprev == BCM4354A1_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM4354A1_CONF_NAME);\r
+ else if (chiprev == BCM4356A2_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM4356A2_CONF_NAME);\r
+ break;\r
+ case BCM4356_CHIP_ID:\r
+ case BCM4371_CHIP_ID:\r
+ if (chiprev == BCM4356A2_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM4356A2_CONF_NAME);\r
+ break;\r
+ case BCM4359_CHIP_ID:\r
+ if (chiprev == BCM4359B1_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM4359B1_CONF_NAME);\r
+ break;\r
+#endif\r
+#ifdef BCMPCIE\r
+ case BCM4356_CHIP_ID:\r
+ if (chiprev == BCM4356A2_CHIP_REV)\r
+ strcpy(&conf_path[i+1], BCM4356A2_CONF_NAME);\r
+ break;\r
+#endif\r
+ }\r
+\r
+ printf("%s: config_path=%s\n", __FUNCTION__, conf_path);\r
+}\r
+#endif\r
+\r
+int\r
+dhd_conf_set_fw_int_cmd(dhd_pub_t *dhd, char *name, uint cmd, int val,\r
+ int def, bool down)\r
+{\r
+ int bcmerror = -1;\r
+\r
+ if (val >= def) {\r
+ if (down) {\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror));\r
+ }\r
+ printf("%s: set %s %d %d\n", __FUNCTION__, name, cmd, val);\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, cmd, &val, sizeof(val), TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, bcmerror));\r
+ }\r
+ return bcmerror;\r
+}\r
+\r
+int\r
+dhd_conf_set_fw_int_struct_cmd(dhd_pub_t *dhd, char *name, uint cmd,\r
+ int *val, int len, bool down)\r
+{\r
+ int bcmerror = -1;\r
+\r
+ if (down) {\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror));\r
+ }\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, cmd, val, len, TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, bcmerror));\r
+\r
+ return bcmerror;\r
+}\r
+\r
int\r
-dhd_conf_set_band(dhd_pub_t *dhd)\r
+dhd_conf_set_fw_string_cmd(dhd_pub_t *dhd, char *cmd, int val, int def,\r
+ bool down)\r
{\r
int bcmerror = -1;\r
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
+\r
+ if (val >= def) {\r
+ if (down) {\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror));\r
+ }\r
+ printf("%s: set %s %d\n", __FUNCTION__, cmd, val);\r
+ bcm_mkiovar(cmd, (char *)&val, 4, iovbuf, sizeof(iovbuf));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, cmd, bcmerror));\r
+ }\r
+ return bcmerror;\r
+}\r
\r
- printf("%s: Set band %d\n", __FUNCTION__, dhd->conf->band);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &dhd->conf->band,\r
- sizeof(dhd->conf->band), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: WLC_SET_BAND setting failed %d\n", __FUNCTION__, bcmerror));\r
+int\r
+dhd_conf_set_fw_string_struct_cmd(dhd_pub_t *dhd, char *cmd, char *val,\r
+ int len, bool down)\r
+{\r
+ int bcmerror = -1;\r
+ char iovbuf[WLC_IOCTL_SMLEN];\r
+ \r
+ if (down) {\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror));\r
+ }\r
+ printf("%s: set %s\n", __FUNCTION__, cmd);\r
+ bcm_mkiovar(cmd, val, len, iovbuf, sizeof(iovbuf));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, cmd, bcmerror));\r
\r
return bcmerror;\r
}\r
dhd_conf_set_country(dhd_pub_t *dhd)\r
{\r
int bcmerror = -1;\r
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
\r
memset(&dhd->dhd_cspec, 0, sizeof(wl_country_t));\r
- printf("%s: Set country %s, revision %d\n", __FUNCTION__,\r
+ printf("%s: set country %s, revision %d\n", __FUNCTION__,\r
dhd->conf->cspec.ccode, dhd->conf->cspec.rev);\r
- bcm_mkiovar("country", (char *)&dhd->conf->cspec,\r
- sizeof(wl_country_t), iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- printf("%s: country code setting failed %d\n", __FUNCTION__, bcmerror);\r
+ dhd_conf_set_fw_string_struct_cmd(dhd, "country", (char *)&dhd->conf->cspec, sizeof(wl_country_t), FALSE);\r
\r
return bcmerror;\r
}\r
memset(cspec, 0, sizeof(wl_country_t));\r
bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t));\r
if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t), FALSE, 0)) < 0)\r
- printf("%s: country code getting failed %d\n", __FUNCTION__, bcmerror);\r
+ CONFIG_ERROR(("%s: country code getting failed %d\n", __FUNCTION__, bcmerror));\r
else\r
printf("Country code: %s (%s/%d)\n", cspec->country_abbrev, cspec->ccode, cspec->rev);\r
\r
return bcmerror;\r
}\r
\r
+int\r
+dhd_conf_get_country_from_config(dhd_pub_t *dhd, wl_country_t *cspec)\r
+{\r
+ int bcmerror = -1, i;\r
+ struct dhd_conf *conf = dhd->conf;\r
+\r
+ for (i = 0; i < conf->country_list.count; i++) {\r
+ if (strcmp(cspec->country_abbrev, conf->country_list.cspec[i].country_abbrev) == 0) {\r
+ memcpy(cspec->ccode,\r
+ conf->country_list.cspec[i].ccode, WLC_CNTRY_BUF_SZ);\r
+ cspec->rev = conf->country_list.cspec[i].rev;\r
+ printf("%s: %s/%d\n", __FUNCTION__, cspec->ccode, cspec->rev);\r
+ return 0;\r
+ }\r
+ }\r
+\r
+ return bcmerror;\r
+}\r
+\r
int\r
dhd_conf_fix_country(dhd_pub_t *dhd)\r
{\r
dhd_conf_set_roam(dhd_pub_t *dhd)\r
{\r
int bcmerror = -1;\r
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
struct dhd_conf *conf = dhd->conf;\r
\r
- printf("%s: Set roam_off %d\n", __FUNCTION__, conf->roam_off);\r
dhd_roam_disable = conf->roam_off;\r
- bcm_mkiovar("roam_off", (char *)&conf->roam_off, 4, iovbuf, sizeof(iovbuf));\r
- dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);\r
+ dhd_conf_set_fw_string_cmd(dhd, "roam_off", dhd->conf->roam_off, 0, FALSE);\r
\r
if (!conf->roam_off || !conf->roam_off_suspend) {\r
- printf("%s: Set roam_trigger %d\n", __FUNCTION__, conf->roam_trigger[0]);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, conf->roam_trigger,\r
- sizeof(conf->roam_trigger), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: roam trigger setting failed %d\n", __FUNCTION__, bcmerror));\r
-\r
- printf("%s: Set roam_scan_period %d\n", __FUNCTION__, conf->roam_scan_period[0]);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, conf->roam_scan_period,\r
- sizeof(conf->roam_scan_period), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: roam scan period setting failed %d\n", __FUNCTION__, bcmerror));\r
-\r
- printf("%s: Set roam_delta %d\n", __FUNCTION__, conf->roam_delta[0]);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, conf->roam_delta,\r
- sizeof(conf->roam_delta), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: roam delta setting failed %d\n", __FUNCTION__, bcmerror));\r
-\r
- printf("%s: Set fullroamperiod %d\n", __FUNCTION__, conf->fullroamperiod);\r
- bcm_mkiovar("fullroamperiod", (char *)&conf->fullroamperiod, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: roam fullscan period setting failed %d\n", __FUNCTION__, bcmerror));\r
- }\r
+ printf("%s: set roam_trigger %d\n", __FUNCTION__, conf->roam_trigger[0]);\r
+ dhd_conf_set_fw_int_struct_cmd(dhd, "WLC_SET_ROAM_TRIGGER", WLC_SET_ROAM_TRIGGER,\r
+ conf->roam_trigger, sizeof(conf->roam_trigger), FALSE);\r
\r
- return bcmerror;\r
-}\r
-\r
-void\r
-dhd_conf_set_mimo_bw_cap(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
- uint32 mimo_bw_cap;\r
+ printf("%s: set roam_scan_period %d\n", __FUNCTION__, conf->roam_scan_period[0]);\r
+ dhd_conf_set_fw_int_struct_cmd(dhd, "WLC_SET_ROAM_SCAN_PERIOD", WLC_SET_ROAM_SCAN_PERIOD,\r
+ conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE);\r
\r
- if (dhd->conf->mimo_bw_cap >= 0) {\r
- mimo_bw_cap = (uint)dhd->conf->mimo_bw_cap;\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror));\r
- /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */\r
- printf("%s: Set mimo_bw_cap %d\n", __FUNCTION__, mimo_bw_cap);\r
- bcm_mkiovar("mimo_bw_cap", (char *)&mimo_bw_cap, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: mimo_bw_cap setting failed %d\n", __FUNCTION__, bcmerror));\r
+ printf("%s: set roam_delta %d\n", __FUNCTION__, conf->roam_delta[0]);\r
+ dhd_conf_set_fw_int_struct_cmd(dhd, "WLC_SET_ROAM_DELTA", WLC_SET_ROAM_DELTA,\r
+ conf->roam_delta, sizeof(conf->roam_delta), FALSE);\r
+ \r
+ dhd_conf_set_fw_string_cmd(dhd, "fullroamperiod", dhd->conf->fullroamperiod, 1, FALSE);\r
}\r
-}\r
-\r
-void\r
-dhd_conf_force_wme(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
\r
- if (dhd->conf->force_wme_ac) {\r
- bcm_mkiovar("force_wme_ac", (char *)&dhd->conf->force_wme_ac, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: force_wme_ac setting failed %d\n", __FUNCTION__, bcmerror));\r
- }\r
+ return bcmerror;\r
}\r
\r
void\r
void\r
dhd_conf_update_wme(dhd_pub_t *dhd, edcf_acparam_t *acparam_cur, int aci)\r
{\r
- int bcmerror = -1;\r
int aifsn, ecwmin, ecwmax;\r
edcf_acparam_t *acp;\r
- char iovbuf[WLC_IOCTL_SMLEN];\r
struct dhd_conf *conf = dhd->conf;\r
\r
/* Default value */\r
* Put WME acparams after "wme_ac\0" in buf.\r
* NOTE: only one of the four ACs can be set at a time.\r
*/\r
- bcm_mkiovar("wme_ac_sta", (char*)acp, sizeof(edcf_acparam_t), iovbuf,\r
- sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {\r
- CONFIG_ERROR(("%s: wme_ac_sta setting failed %d\n", __FUNCTION__, bcmerror));\r
- return;\r
- }\r
+ dhd_conf_set_fw_string_struct_cmd(dhd, "wme_ac_sta", (char *)acp, sizeof(edcf_acparam_t), FALSE);\r
+\r
}\r
\r
void\r
return;\r
}\r
\r
-void\r
-dhd_conf_set_stbc(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
- uint stbc = 0;\r
-\r
- if (dhd->conf->stbc >= 0) {\r
- stbc = (uint)dhd->conf->stbc;\r
- printf("%s: set stbc_tx %d\n", __FUNCTION__, stbc);\r
- bcm_mkiovar("stbc_tx", (char *)&stbc, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: stbc_tx setting failed %d\n", __FUNCTION__, bcmerror));\r
-\r
- printf("%s: set stbc_rx %d\n", __FUNCTION__, stbc);\r
- bcm_mkiovar("stbc_rx", (char *)&stbc, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: stbc_rx setting failed %d\n", __FUNCTION__, bcmerror));\r
- }\r
-}\r
-\r
-void\r
-dhd_conf_set_phyoclscdenable(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
- uint phy_oclscdenable = 0;\r
-\r
- if (dhd->conf->chip == BCM4324_CHIP_ID && dhd->conf->phy_oclscdenable >= 0) {\r
- phy_oclscdenable = (uint)dhd->conf->phy_oclscdenable;\r
- printf("%s: set stbc_tx %d\n", __FUNCTION__, phy_oclscdenable);\r
- bcm_mkiovar("phy_oclscdenable", (char *)&phy_oclscdenable, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: stbc_tx setting failed %d\n", __FUNCTION__, bcmerror));\r
- }\r
-}\r
-\r
#ifdef PKT_FILTER_SUPPORT\r
void\r
dhd_conf_add_pkt_filter(dhd_pub_t *dhd)\r
{\r
int i;\r
+ char str[12];\r
+#define MACS "%02x%02x%02x%02x%02x%02x"\r
\r
/*\r
* All pkt: pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000\r
printf("%s: %s\n", __FUNCTION__, dhd->pktfilter[i+dhd->pktfilter_count]);\r
}\r
dhd->pktfilter_count += i;\r
+\r
+ if (dhd->conf->pkt_filter_magic) {\r
+ strcpy(&dhd->conf->pkt_filter_add.filter[dhd->conf->pkt_filter_add.count][0], "256 0 1 0 0x");\r
+ for (i=0; i<16; i++)\r
+ strcat(&dhd->conf->pkt_filter_add.filter[dhd->conf->pkt_filter_add.count][0], "FFFFFFFFFFFF");\r
+ strcat(&dhd->conf->pkt_filter_add.filter[dhd->conf->pkt_filter_add.count][0], " 0x");\r
+ sprintf(str, MACS, MAC2STRDBG(dhd->mac.octet));\r
+ for (i=0; i<16; i++)\r
+ strcat(&dhd->conf->pkt_filter_add.filter[dhd->conf->pkt_filter_add.count][0], str);\r
+ dhd->pktfilter[dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[dhd->conf->pkt_filter_add.count];\r
+ dhd->pktfilter_count += 1;\r
+ }\r
}\r
\r
bool\r
\r
void\r
dhd_conf_discard_pkt_filter(dhd_pub_t *dhd)\r
-{\r
+{
+ dhd->pktfilter_count = 6;\r
dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL;\r
dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";\r
dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "102 0 0 0 0xFFFFFF 0x01005E";\r
}\r
#endif /* PKT_FILTER_SUPPORT */\r
\r
-void\r
-dhd_conf_set_srl(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- uint srl = 0;\r
-\r
- if (dhd->conf->srl >= 0) {\r
- srl = (uint)dhd->conf->srl;\r
- printf("%s: set srl %d\n", __FUNCTION__, srl);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_SRL, &srl , sizeof(srl), true, 0)) < 0)\r
- CONFIG_ERROR(("%s: WLC_SET_SRL setting failed %d\n", __FUNCTION__, bcmerror));\r
- }\r
-}\r
-\r
-void\r
-dhd_conf_set_lrl(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- uint lrl = 0;\r
-\r
- if (dhd->conf->lrl >= 0) {\r
- lrl = (uint)dhd->conf->lrl;\r
- printf("%s: set lrl %d\n", __FUNCTION__, lrl);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_LRL, &lrl , sizeof(lrl), true, 0)) < 0)\r
- CONFIG_ERROR(("%s: WLC_SET_LRL setting failed %d\n", __FUNCTION__, bcmerror));\r
- }\r
-}\r
-\r
-void\r
-dhd_conf_set_bus_txglom(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
- uint32 bus_txglom = 0;\r
-\r
- if (dhd->conf->bus_txglom) {\r
- bus_txglom = (uint)dhd->conf->bus_txglom;\r
- printf("%s: set bus:txglom %d\n", __FUNCTION__, bus_txglom);\r
- bcm_mkiovar("bus:txglom", (char *)&bus_txglom, 4, iovbuf, sizeof(iovbuf));\r
- dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: bus:txglom setting failed %d\n", __FUNCTION__, bcmerror));\r
- }\r
-}\r
-\r
-void\r
-dhd_conf_set_ampdu_ba_wsize(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
- uint32 ampdu_ba_wsize = dhd->conf->ampdu_ba_wsize;\r
-\r
- /* Set ampdu_ba_wsize */\r
- if (ampdu_ba_wsize > 0) {\r
- printf("%s: set ampdu_ba_wsize %d\n", __FUNCTION__, ampdu_ba_wsize);\r
- bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,\r
- sizeof(iovbuf), TRUE, 0)) < 0) {\r
- DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",\r
- __FUNCTION__, ampdu_ba_wsize, bcmerror));\r
- }\r
- }\r
-}\r
-\r
-void\r
-dhd_conf_set_spect(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- uint spect = 0;\r
-\r
- if (dhd->conf->spect >= 0) {\r
- spect = (uint)dhd->conf->spect;\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror));\r
- printf("%s: set spect %d\n", __FUNCTION__, spect);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_SPECT_MANAGMENT, &spect , sizeof(spect), true, 0)) < 0)\r
- CONFIG_ERROR(("%s: WLC_SET_SPECT_MANAGMENT setting failed %d\n", __FUNCTION__, bcmerror));\r
- }\r
-}\r
-\r
-void\r
-dhd_conf_set_txbf(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
- int txbf = dhd->conf->txbf;\r
-\r
- /* Set txbf */\r
- if (txbf >= 0) {\r
- printf("%s: set txbf %d\n", __FUNCTION__, txbf);\r
- bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,\r
- sizeof(iovbuf), TRUE, 0)) < 0) {\r
- DHD_ERROR(("%s Set txbf to %d failed %d\n",\r
- __FUNCTION__, txbf, bcmerror));\r
- }\r
- }\r
-}\r
-\r
-void\r
-dhd_conf_set_frameburst(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- int frameburst = dhd->conf->frameburst;\r
-\r
- /* Set txbframeburstf */\r
- if (frameburst >= 0) {\r
- printf("%s: set frameburst %d\n", __FUNCTION__, frameburst);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, &frameburst , sizeof(frameburst), true, 0)) < 0)\r
- CONFIG_ERROR(("%s: WLC_SET_FAKEFRAG setting failed %d\n", __FUNCTION__, bcmerror));\r
- }\r
-}\r
-\r
-void\r
-dhd_conf_set_lpc(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
- int lpc = dhd->conf->lpc;\r
-\r
- /* Set lpc */\r
- if (lpc >= 0) {\r
- printf("%s: set lpc %d\n", __FUNCTION__, lpc);\r
- bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,\r
- sizeof(iovbuf), TRUE, 0)) < 0) {\r
- DHD_ERROR(("%s Set lpc to %d failed %d\n",\r
- __FUNCTION__, lpc, bcmerror));\r
- }\r
- }\r
-}\r
-\r
void\r
dhd_conf_set_disable_proptx(dhd_pub_t *dhd)\r
{\r
dhd_conf_read_log_level(dhd_pub_t *dhd, char *bufp, uint len)\r
{\r
uint len_val;\r
- char pick[MAXSZ_BUF];\r
+ char *pick;\r
+\r
+ pick = MALLOC(dhd->osh, MAXSZ_BUF);\r
+ if (!pick) {\r
+ CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",\r
+ __FUNCTION__, MAXSZ_BUF));\r
+ return;\r
+ }\r
\r
/* Process dhd_msglevel */\r
memset(pick, 0, MAXSZ_BUF);\r
}\r
#endif\r
\r
+#if defined(DHD_DEBUG)\r
/* Process dhd_console_ms */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "dhd_console_ms=");\r
dhd_console_ms = (int)simple_strtol(pick, NULL, 0);\r
printf("%s: dhd_console_ms = 0x%X\n", __FUNCTION__, dhd_console_ms);\r
}\r
+#endif\r
+\r
+ if (pick)\r
+ MFREE(dhd->osh, pick, MAXSZ_BUF);\r
}
void\r
dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *bufp, uint len)\r
{\r
uint len_val;\r
- char pick[MAXSZ_BUF];\r
+ char *pick;\r
struct dhd_conf *conf = dhd->conf;\r
\r
+ pick = MALLOC(dhd->osh, MAXSZ_BUF);\r
+ if (!pick) {\r
+ CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",\r
+ __FUNCTION__, MAXSZ_BUF));\r
+ return;\r
+ }\r
+\r
/* Process WMM parameters */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "force_wme_ac=");\r
}\r
}\r
\r
+ if (pick)\r
+ MFREE(dhd->osh, pick, MAXSZ_BUF);\r
+\r
}\r
\r
void\r
{\r
uint len_val;\r
int i, j;\r
- char pick[MAXSZ_BUF];\r
+ char *pick;\r
char *pch, *pick_tmp;\r
wl_mac_list_t *mac_list;\r
wl_mac_range_t *mac_range;\r
struct dhd_conf *conf = dhd->conf;\r
\r
+ pick = MALLOC(dhd->osh, MAXSZ_BUF);\r
+ if (!pick) {\r
+ CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",\r
+ __FUNCTION__, MAXSZ_BUF));\r
+ return;\r
+ }\r
+\r
/* Process fw_by_mac:\r
* fw_by_mac=[fw_mac_num] \\r
* [fw_name1] [mac_num1] [oui1-1] [nic_start1-1] [nic_end1-1] \\r
}\r
}\r
}\r
+\r
+ if (pick)\r
+ MFREE(dhd->osh, pick, MAXSZ_BUF);\r
}\r
\r
void\r
{\r
uint len_val;\r
int i, j;\r
- char pick[MAXSZ_BUF];\r
+ char *pick;\r
char *pch, *pick_tmp;\r
wl_mac_list_t *mac_list;\r
wl_mac_range_t *mac_range;\r
struct dhd_conf *conf = dhd->conf;\r
\r
+ pick = MALLOC(dhd->osh, MAXSZ_BUF);\r
+ if (!pick) {\r
+ CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",\r
+ __FUNCTION__, MAXSZ_BUF));\r
+ return;\r
+ }\r
+\r
/* Process nv_by_mac:\r
* [nv_by_mac]: The same format as fw_by_mac\r
*/\r
}\r
}\r
}\r
+\r
+ if (pick)\r
+ MFREE(dhd->osh, pick, MAXSZ_BUF);\r
}\r
\r
void\r
{\r
uint len_val;\r
int i;\r
- char pick[MAXSZ_BUF];\r
+ char *pick;\r
char *pch, *pick_tmp;\r
wl_chip_nv_path_t *chip_nv_path;\r
struct dhd_conf *conf = dhd->conf;\r
\r
+ pick = MALLOC(dhd->osh, MAXSZ_BUF);\r
+ if (!pick) {\r
+ CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",\r
+ __FUNCTION__, MAXSZ_BUF));\r
+ return;\r
+ }\r
+\r
/* Process nv_by_chip:\r
* nv_by_chip=[nv_chip_num] \\r
* [chip1] [chiprev1] [nv_name1] [chip2] [chiprev2] [nv_name2] \\r
chip_nv_path[i].chip, chip_nv_path[i].chiprev, chip_nv_path[i].name);\r
}\r
}\r
+\r
+ if (pick)\r
+ MFREE(dhd->osh, pick, MAXSZ_BUF);\r
}\r
\r
void\r
dhd_conf_read_roam_params(dhd_pub_t *dhd, char *bufp, uint len)\r
{\r
uint len_val;\r
- char pick[MAXSZ_BUF];\r
+ char *pick;\r
struct dhd_conf *conf = dhd->conf;\r
\r
+ pick = MALLOC(dhd->osh, MAXSZ_BUF);\r
+ if (!pick) {\r
+ CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",\r
+ __FUNCTION__, MAXSZ_BUF));\r
+ return;\r
+ }\r
+\r
/* Process roam */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "roam_off=");\r
conf->fullroamperiod);\r
}\r
\r
+ if (pick)\r
+ MFREE(dhd->osh, pick, MAXSZ_BUF);\r
+\r
+}\r
+\r
+void\r
+dhd_conf_read_country_list(dhd_pub_t *dhd, char *bufp, uint len)\r
+{\r
+ uint len_val;\r
+ int i;\r
+ char *pick, *pch, *pick_tmp;\r
+ struct dhd_conf *conf = dhd->conf;\r
+\r
+ pick = MALLOC(dhd->osh, MAXSZ_BUF);\r
+ if (!pick) {\r
+ CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",\r
+ __FUNCTION__, MAXSZ_BUF));\r
+ return;\r
+ }\r
+\r
+ /* Process country_list:\r
+ * country_list=[country1]:[ccode1]/[regrev1],\r
+ * [country2]:[ccode2]/[regrev2] \\r
+ * Ex: country_list=US:US/0, TW:TW/1\r
+ */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "country_list=");\r
+ if (len_val) {\r
+ pick_tmp = pick;\r
+ for (i=0; i<CONFIG_COUNTRY_LIST_SIZE; i++) {\r
+ /* Process country code */\r
+ pch = bcmstrtok(&pick_tmp, ":", 0);\r
+ if (!pch)\r
+ break;\r
+ strcpy(conf->country_list.cspec[i].country_abbrev, pch);\r
+ pch = bcmstrtok(&pick_tmp, "/", 0);\r
+ if (!pch)\r
+ break;\r
+ memcpy(conf->country_list.cspec[i].ccode, pch, 2);\r
+ pch = bcmstrtok(&pick_tmp, ", ", 0);\r
+ if (!pch)\r
+ break;\r
+ conf->country_list.cspec[i].rev = (int32)simple_strtol(pch, NULL, 10);\r
+ conf->country_list.count ++;\r
+ CONFIG_TRACE(("%s: country_list abbrev=%s, ccode=%s, regrev=%d\n", __FUNCTION__,\r
+ conf->country_list.cspec[i].country_abbrev,\r
+ conf->country_list.cspec[i].ccode,\r
+ conf->country_list.cspec[i].rev));\r
+ }\r
+ printf("%s: %d country in list\n", __FUNCTION__, conf->country_list.count);\r
+ }\r
+\r
+ if (pick)\r
+ MFREE(dhd->osh, pick, MAXSZ_BUF);\r
}\r
\r
int\r
uint len, len_val;\r
void * image = NULL;\r
char * memblock = NULL;\r
- char *bufp, pick[MAXSZ_BUF], *pch, *pick_tmp;\r
+ char *bufp, *pick = NULL, *pch, *pick_tmp;\r
bool conf_file_exists;\r
struct dhd_conf *conf = dhd->conf;\r
\r
conf_file_exists = ((conf_path != NULL) && (conf_path[0] != '\0'));\r
if (!conf_file_exists) {\r
- printk("%s: config path %s\n", __FUNCTION__, conf_path);\r
+ printf("%s: config path %s\n", __FUNCTION__, conf_path);\r
return (0);\r
}\r
\r
if (conf_file_exists) {\r
image = dhd_os_open_image(conf_path);\r
if (image == NULL) {\r
- printk("%s: Ignore config file %s\n", __FUNCTION__, conf_path);\r
+ printf("%s: Ignore config file %s\n", __FUNCTION__, conf_path);\r
goto err;\r
}\r
}\r
goto err;\r
}\r
\r
+ pick = MALLOC(dhd->osh, MAXSZ_BUF);\r
+ if (!pick) {\r
+ CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",\r
+ __FUNCTION__, MAXSZ_BUF));\r
+ goto err;\r
+ }\r
+\r
/* Read variables */\r
if (conf_file_exists) {\r
len = dhd_os_get_image_block(memblock, MAXSZ_CONFIG, image);\r
dhd_conf_read_fw_by_mac(dhd, bufp, len);\r
dhd_conf_read_nv_by_mac(dhd, bufp, len);\r
dhd_conf_read_nv_by_chip(dhd, bufp, len);\r
+ dhd_conf_read_country_list(dhd, bufp, len);\r
\r
/* Process band:\r
* band=a for 5GHz only and band=b for 2.4GHz only\r
printf("%s: band = %d\n", __FUNCTION__, conf->band);\r
}\r
\r
- /* Process bandwidth */\r
+ /* Process mimo_bw_cap */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "mimo_bw_cap=");\r
if (len_val) {\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "keep_alive_period=");\r
if (len_val) {\r
- conf->keep_alive_period = (int)simple_strtol(pick, NULL, 10);\r
+ conf->keep_alive_period = (uint)simple_strtol(pick, NULL, 10);\r
printf("%s: keep_alive_period = %d\n", __FUNCTION__,\r
conf->keep_alive_period);\r
}\r
dhd_doflow = TRUE;\r
printf("%s: dhd_doflow = %d\n", __FUNCTION__, dhd_doflow);\r
}\r
+\r
+ /* Process dhd_slpauto parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "dhd_slpauto=");\r
+ if (len_val) {\r
+ if (!strncmp(pick, "0", len_val))\r
+ dhd_slpauto = FALSE;\r
+ else\r
+ dhd_slpauto = TRUE;\r
+ printf("%s: dhd_slpauto = %d\n", __FUNCTION__, dhd_slpauto);\r
+ }\r
#endif\r
\r
/* Process dhd_master_mode parameters */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "bcn_timeout=");\r
if (len_val) {\r
- conf->bcn_timeout= (int)simple_strtol(pick, NULL, 10);\r
+ conf->bcn_timeout= (uint)simple_strtol(pick, NULL, 10);\r
printf("%s: bcn_timeout = %d\n", __FUNCTION__, conf->bcn_timeout);\r
}\r
\r
printf("%s: use_rxchain = %d\n", __FUNCTION__, conf->use_rxchain);\r
}\r
\r
+#if defined(BCMSDIOH_TXGLOM)
/* Process txglomsize parameters */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "txglomsize=");\r
if (len_val) {\r
- conf->txglomsize = (int)simple_strtol(pick, NULL, 10);\r
+ conf->txglomsize = (uint)simple_strtol(pick, NULL, 10);\r
+ if (conf->txglomsize > SDPCM_MAXGLOM_SIZE)\r
+ conf->txglomsize = SDPCM_MAXGLOM_SIZE;\r
printf("%s: txglomsize = %d\n", __FUNCTION__, conf->txglomsize);\r
}\r
+
+ /* Process swtxglom parameters */\r
+ memset(pick, 0, MAXSZ_BUF);
+ len_val = process_config_vars(bufp, len, pick, "swtxglom=");\r
+ if (len_val) {
+ if (!strncmp(pick, "0", len_val))
+ conf->swtxglom = FALSE;\r
+ else
+ conf->swtxglom = TRUE;\r
+ printf("%s: swtxglom = %d\n", __FUNCTION__, conf->swtxglom);\r
+ }\r
+\r
+ /* Process txglom_ext parameters */
+ memset(pick, 0, MAXSZ_BUF);
+ len_val = process_config_vars(bufp, len, pick, "txglom_ext=");
+ if (len_val) {
+ if (!strncmp(pick, "0", len_val))
+ conf->txglom_ext = FALSE;
+ else
+ conf->txglom_ext = TRUE;
+ printf("%s: txglom_ext = %d\n", __FUNCTION__, conf->txglom_ext);
+ if (conf->txglom_ext) {
+ if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID))
+ conf->txglom_bucket_size = 1680;
+ else if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||
+ conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID)\r
+ conf->txglom_bucket_size = 1684;
+ }
+ printf("%s: txglom_bucket_size = %d\n", __FUNCTION__, conf->txglom_bucket_size);
+ }
+#endif\r
\r
/* Process disable_proptx parameters */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "disable_proptx=");\r
if (len_val) {\r
- dhd->conf->disable_proptx = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: disable_proptx = %d\n", __FUNCTION__, dhd->conf->disable_proptx);\r
+ conf->disable_proptx = (int)simple_strtol(pick, NULL, 10);
+ printf("%s: disable_proptx = %d\n", __FUNCTION__, conf->disable_proptx);
}\r
\r
/* Process dpc_cpucore parameters */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "tcpack_sup_mode=");\r
if (len_val) {\r
- conf->tcpack_sup_mode = (int)simple_strtol(pick, NULL, 10);\r
+ conf->tcpack_sup_mode = (uint)simple_strtol(pick, NULL, 10);\r
printf("%s: tcpack_sup_mode = %d\n", __FUNCTION__, conf->tcpack_sup_mode);\r
}\r
\r
+ /* Process dhd_poll parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "dhd_poll=");\r
+ if (len_val) {\r
+ if (!strncmp(pick, "0", len_val))\r
+ conf->dhd_poll = 0;\r
+ else\r
+ conf->dhd_poll = 1;\r
+ printf("%s: dhd_poll = %d\n", __FUNCTION__, conf->dhd_poll);\r
+ }\r
+\r
+ /* Process deferred_tx_len parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "deferred_tx_len=");\r
+ if (len_val) {\r
+ conf->deferred_tx_len = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: deferred_tx_len = %d\n", __FUNCTION__, conf->deferred_tx_len);\r
+ }\r
+\r
+ /* Process pktprio8021x parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "pktprio8021x=");\r
+ if (len_val) {\r
+ conf->pktprio8021x = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: pktprio8021x = %d\n", __FUNCTION__, conf->pktprio8021x);\r
+ }
+
+ /* Process txctl_tmo_fix parameters */
+ memset(pick, 0, MAXSZ_BUF);
+ len_val = process_config_vars(bufp, len, pick, "txctl_tmo_fix=");
+ if (len_val) {
+ if (!strncmp(pick, "0", len_val))
+ conf->txctl_tmo_fix = FALSE;
+ else
+ conf->txctl_tmo_fix = TRUE;
+ printf("%s: txctl_tmo_fix = %d\n", __FUNCTION__, conf->txctl_tmo_fix);
+ }
+
+ /* Process tx_in_rx parameters */
+ memset(pick, 0, MAXSZ_BUF);
+ len_val = process_config_vars(bufp, len, pick, "tx_in_rx=");
+ if (len_val) {
+ if (!strncmp(pick, "0", len_val))
+ conf->tx_in_rx = FALSE;
+ else
+ conf->tx_in_rx = TRUE;
+ printf("%s: tx_in_rx = %d\n", __FUNCTION__, conf->tx_in_rx);
+ }
+
+ /* Process dhd_txbound parameters */
+ memset(pick, 0, MAXSZ_BUF);
+ len_val = process_config_vars(bufp, len, pick, "dhd_txbound=");
+ if (len_val) {
+ dhd_txbound = (uint)simple_strtol(pick, NULL, 10);
+ printf("%s: dhd_txbound = %d\n", __FUNCTION__, dhd_txbound);
+ }
+
+ /* Process dhd_rxbound parameters */
+ memset(pick, 0, MAXSZ_BUF);
+ len_val = process_config_vars(bufp, len, pick, "dhd_rxbound=");
+ if (len_val) {
+ dhd_rxbound = (uint)simple_strtol(pick, NULL, 10);
+ printf("%s: dhd_rxbound = %d\n", __FUNCTION__, dhd_rxbound);
+ }\r
+\r
+ /* Process tx_max_offset parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "tx_max_offset=");\r
+ if (len_val) {\r
+ conf->tx_max_offset = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: tx_max_offset = %d\n", __FUNCTION__, conf->tx_max_offset);\r
+ }\r
+\r
+ /* Process rsdb_mode parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "rsdb_mode=");\r
+ if (len_val) {\r
+ conf->rsdb_mode = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: rsdb_mode = %d\n", __FUNCTION__, conf->rsdb_mode);\r
+ }\r
+
+ /* Process txglom_mode parameters */\r
+ memset(pick, 0, MAXSZ_BUF);
+ len_val = process_config_vars(bufp, len, pick, "txglom_mode=");\r
+ if (len_val) {
+ if (!strncmp(pick, "0", len_val))
+ conf->txglom_mode = FALSE;\r
+ else
+ conf->txglom_mode = TRUE;\r
+ printf("%s: txglom_mode = %d\n", __FUNCTION__, conf->txglom_mode);\r
+ }\r
+\r
bcmerror = 0;\r
} else {\r
CONFIG_ERROR(("%s: error reading config file: %d\n", __FUNCTION__, len));\r
}\r
\r
err:\r
+ if (pick)\r
+ MFREE(dhd->osh, pick, MAXSZ_BUF);\r
+\r
if (memblock)\r
MFREE(dhd->osh, memblock, MAXSZ_CONFIG);\r
\r
return 0;\r
}\r
\r
+void\r
+dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable)\r
+{\r
+ struct dhd_conf *conf = dhd->conf;\r
+\r
+ if (enable) {\r
+#if defined(SWTXGLOM)\r
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID ||\r
+ conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||\r
+ conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {\r
+ // 43362/4330/4334/43340/43341/43241 must use 1.88.45.x swtxglom if txglom_ext is true, since 1.201.59 not support swtxglom\r
+ conf->swtxglom = TRUE;\r
+ conf->txglom_ext = TRUE;\r
+ }\r
+ if (conf->chip == BCM43362_CHIP_ID && conf->bus_txglom == 0) {\r
+ conf->bus_txglom = 1; // improve tcp tx tput. and cpu idle for 43362 only\r
+ }\r
+#endif\r
+ // other parameters set in preinit or config.txt\r
+ } else {\r
+ // clear txglom parameters, but don't change swtxglom since it's possible enabled in config.txt\r
+ conf->txglom_ext = FALSE;\r
+ conf->txglom_bucket_size = 0;\r
+ conf->tx_in_rx = TRUE;\r
+ conf->tx_max_offset = 0;\r
+ conf->txglomsize = 0;\r
+ conf->deferred_tx_len = 0;\r
+ }\r
+ printf("%s: swtxglom=%d, txglom_ext=%d\n", __FUNCTION__,\r
+ conf->swtxglom, conf->txglom_ext);\r
+ printf("%s: txglom_bucket_size=%d\n", __FUNCTION__, conf->txglom_bucket_size);\r
+ printf("%s: txglomsize=%d, deferred_tx_len=%d, bus_txglom=%d\n", __FUNCTION__,\r
+ conf->txglomsize, conf->deferred_tx_len, conf->bus_txglom);\r
+ printf("%s: tx_in_rx=%d, tx_max_offset=%d\n", __FUNCTION__,\r
+ conf->tx_in_rx, conf->tx_max_offset);\r
+\r
+}\r
+\r
int\r
dhd_conf_preinit(dhd_pub_t *dhd)\r
{\r
dhd_conf_free_mac_list(&conf->nv_by_mac);\r
dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip);\r
#endif\r
+ memset(&conf->country_list, 0, sizeof(conf_country_list_t));\r
conf->band = WLC_BAND_AUTO;\r
conf->mimo_bw_cap = -1;\r
if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {\r
conf->cspec.rev = 0;\r
} else if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||\r
conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||\r
- conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID) {\r
+ conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||\r
+ conf->chip == BCM4359_CHIP_ID) {\r
strcpy(conf->cspec.country_abbrev, "CN");\r
strcpy(conf->cspec.ccode, "CN");\r
conf->cspec.rev = 38;\r
#ifdef PKT_FILTER_SUPPORT\r
memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t));\r
memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t));\r
+ conf->pkt_filter_magic = FALSE;\r
#endif\r
conf->srl = -1;\r
conf->lrl = -1;\r
conf->lpc = -1;\r
conf->disable_proptx = 0;\r
conf->bus_txglom = 0;\r
- conf->use_rxchain = 1;\r
+ conf->use_rxchain = 0;\r
conf->bus_rxglom = TRUE;\r
- conf->txglomsize = -1;\r
+ conf->txglom_ext = FALSE;\r
+ conf->tx_max_offset = 0;\r
+ conf->deferred_tx_len = 0;\r
+ conf->txglomsize = SDPCM_DEFGLOM_SIZE;\r
conf->ampdu_ba_wsize = 0;\r
conf->dpc_cpucore = 0;\r
conf->frameburst = -1;\r
conf->deepsleep = FALSE;\r
conf->pm = -1;\r
+#ifdef DHDTCPACK_SUPPRESS
conf->tcpack_sup_mode = TCPACK_SUP_OFF;\r
+#endif\r
+ conf->dhd_poll = -1;\r
+ conf->pktprio8021x = -1;
+ conf->txctl_tmo_fix = FALSE;
+ conf->tx_in_rx = TRUE;\r
+ conf->rsdb_mode = -2;\r
+ conf->txglom_mode = SDPCM_TXGLOM_MDESC;\r
if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID)) {\r
conf->disable_proptx = 1;\r
conf->use_rxchain = 0;\r
- }\r
+ }
if (conf->chip == BCM43430_CHIP_ID) {\r
conf->bus_rxglom = FALSE;\r
conf->use_rxchain = 0;\r
if (conf->chip == BCM4371_CHIP_ID) {\r
conf->txbf = 1;\r
}\r
+ if (conf->chip == BCM4359_CHIP_ID) {\r
+ conf->txbf = 1;\r
+ conf->rsdb_mode = 0;\r
+ }\r
#ifdef BCMSDIO\r
if (conf->chip == BCM4356_CHIP_ID) {\r
conf->txbf = 1;\r
}\r
#endif\r
\r
+#if defined(SWTXGLOM)\r
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID ||\r
+ conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||\r
+ conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {\r
+ conf->swtxglom = FALSE; // disabled by default\r
+ conf->txglom_ext = TRUE; // enabled by default\r
+ conf->use_rxchain = 0; // use_rxchain have been disabled if swtxglom enabled\r
+ conf->txglomsize = 16;\r
+ } else {\r
+ conf->swtxglom = FALSE; // use 1.201.59.x txglom by default\r
+ conf->txglom_ext = FALSE;\r
+ }\r
+\r
+ if (conf->chip == BCM43362_CHIP_ID) {\r
+ conf->txglom_bucket_size = 1680; // fixed value, don't change\r
+ conf->tx_in_rx = FALSE;\r
+ conf->tx_max_offset = 1;\r
+ }\r
+ if (conf->chip == BCM4330_CHIP_ID) {\r
+ conf->txglom_bucket_size = 1680; // fixed value, don't change\r
+ conf->tx_in_rx = FALSE;\r
+ conf->tx_max_offset = 0;\r
+ }\r
+ if (conf->chip == BCM4334_CHIP_ID) {\r
+ conf->txglom_bucket_size = 1684; // fixed value, don't change\r
+ conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle\r
+ conf->tx_max_offset = 0; // reduce udp tx: dhdsdio_readframes: got unlikely tx max 109 with tx_seq 110\r
+ }\r
+ if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID) {\r
+ conf->txglom_bucket_size = 1684; // fixed value, don't change\r
+ conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle\r
+ conf->tx_max_offset = 1;\r
+ }\r
+ if (conf->chip == BCM4324_CHIP_ID) {\r
+ conf->txglom_bucket_size = 1684; // fixed value, don't change\r
+ conf->tx_in_rx = TRUE; // improve tcp tx tput. and cpu idle\r
+ conf->tx_max_offset = 0;\r
+ }\r
+#endif\r
+#if defined(BCMSDIOH_TXGLOM_EXT)\r
+ conf->txglom_mode = SDPCM_TXGLOM_CPY;\r
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID ||\r
+ conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||\r
+ conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {\r
+ conf->txglom_ext = TRUE;\r
+ conf->use_rxchain = 0;\r
+ conf->tx_in_rx = TRUE;\r
+ conf->tx_max_offset = 1;\r
+ } else {\r
+ conf->txglom_ext = FALSE;\r
+ }\r
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {\r
+ conf->txglom_bucket_size = 1680; // fixed value, don't change\r
+ conf->txglomsize = 6;\r
+ }\r
+ if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID ||\r
+ conf->chip == BCM43341_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {\r
+ conf->txglom_bucket_size = 1684; // fixed value, don't change\r
+ conf->txglomsize = 16;\r
+ }\r
+#endif\r
+ if (conf->txglomsize > SDPCM_MAXGLOM_SIZE)\r
+ conf->txglomsize = SDPCM_MAXGLOM_SIZE;\r
+ conf->deferred_tx_len = conf->txglomsize;\r
+\r
return 0;\r
}\r
\r
#include <proto/802.11.h>\r
\r
#define FW_PATH_AUTO_SELECT 1\r
+//#define CONFIG_PATH_AUTO_SELECT\r
extern char firmware_path[MOD_PARAM_PATHLEN];\r
extern int disable_proptx;\r
+extern uint dhd_rxbound;
+extern uint dhd_txbound;
+#define TXGLOM_RECV_OFFSET 8
#ifdef BCMSDIO\r
extern uint dhd_doflow;\r
+extern uint dhd_slpauto;\r
+\r
+#define BCM43362A0_CHIP_REV 0\r
+#define BCM43362A2_CHIP_REV 1\r
+#define BCM43430A0_CHIP_REV 0\r
+#define BCM43430A1_CHIP_REV 1\r
+#define BCM4330B2_CHIP_REV 4\r
+#define BCM4334B1_CHIP_REV 3\r
+#define BCM43341B0_CHIP_REV 2\r
+#define BCM43241B4_CHIP_REV 5\r
+#define BCM4335A0_CHIP_REV 2\r
+#define BCM4339A0_CHIP_REV 1\r
+#define BCM43455C0_CHIP_REV 6\r
+#define BCM4354A1_CHIP_REV 1\r
+#define BCM4359B1_CHIP_REV 5\r
#endif\r
+#define BCM4356A2_CHIP_REV 2\r
\r
/* mac range */\r
typedef struct wl_mac_range {\r
} conf_pkt_filter_del_t;\r
#endif\r
\r
+#define CONFIG_COUNTRY_LIST_SIZE 100\r
+/* country list */\r
+typedef struct conf_country_list {\r
+ uint32 count;\r
+ wl_country_t cspec[CONFIG_COUNTRY_LIST_SIZE];\r
+} conf_country_list_t;\r
+\r
typedef struct dhd_conf {\r
uint chip; /* chip number */
uint chiprev; /* chip revision */\r
wl_mac_list_ctrl_t fw_by_mac; /* Firmware auto selection by MAC */\r
wl_mac_list_ctrl_t nv_by_mac; /* NVRAM auto selection by MAC */\r
wl_chip_nv_path_list_ctrl_t nv_by_chip; /* NVRAM auto selection by chip */\r
- uint band; /* Band, b:2.4G only, otherwise for auto */\r
+ conf_country_list_t country_list; /* Country list */\r
+ int band; /* Band, b:2.4G only, otherwise for auto */\r
int mimo_bw_cap; /* Bandwidth, 0:HT20ALL, 1: HT40ALL, 2:HT20IN2G_HT40PIN5G */\r
wl_country_t cspec; /* Country */\r
wl_channel_list_t channels; /* Support channels */\r
int roam_delta[2]; /* Roaming candidate qualification delta */\r
int fullroamperiod; /* Full Roaming period */\r
uint keep_alive_period; /* The perioid in ms to send keep alive packet */\r
- uint force_wme_ac;\r
+ int force_wme_ac;\r
wme_param_t wme; /* WME parameters */\r
int stbc; /* STBC for Tx/Rx */\r
int phy_oclscdenable; /* phy_oclscdenable */\r
#ifdef PKT_FILTER_SUPPORT\r
conf_pkt_filter_add_t pkt_filter_add; /* Packet filter add */\r
conf_pkt_filter_del_t pkt_filter_del; /* Packet filter add */\r
+ bool pkt_filter_magic;\r
#endif\r
int srl; /* short retry limit */\r
int lrl; /* long retry limit */\r
int txbf;\r
int lpc;\r
int disable_proptx;\r
- uint32 bus_txglom; /* bus:txglom */\r
+ int bus_txglom; /* bus:txglom */\r
int use_rxchain;\r
bool bus_rxglom; /* bus:rxglom */\r
- int txglomsize;\r
- uint32 ampdu_ba_wsize;\r
+ uint txglomsize;\r
+ int ampdu_ba_wsize;\r
int dpc_cpucore;\r
int frameburst;\r
bool deepsleep;\r
int pm;\r
uint8 tcpack_sup_mode;\r
+ int dhd_poll;\r
+ uint deferred_tx_len;\r
+ int pktprio8021x;\r
+ bool txctl_tmo_fix;\r
+ bool swtxglom; /* SW TXGLOM */\r
+ bool txglom_ext; /* Only for 43362/4330/43340/43341/43241 */
+ /*txglom_bucket_size:
+ * 43362/4330: 1680
+ * 43340/43341/43241: 1684
+ */
+ int txglom_bucket_size;
+ int tx_max_offset;\r
+ bool tx_in_rx; // Skip tx before rx, in order to get more glomed in tx\r
+ int rsdb_mode;\r
+ bool txglom_mode;\r
} dhd_conf_t;\r
\r
#ifdef BCMSDIO\r
int dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac);\r
void dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path);\r
void dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path);\r
-#if defined(HW_OOB)\r
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)\r
void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip);\r
#endif\r
#endif\r
-void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path, char *nv_path);\r
+void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path);\r
void dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path);\r
void dhd_conf_set_conf_path_by_nv_path(dhd_pub_t *dhd, char *conf_path, char *nv_path);\r
-int dhd_conf_set_band(dhd_pub_t *dhd);\r
+#ifdef CONFIG_PATH_AUTO_SELECT\r
+void dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path);\r
+#endif\r
+int dhd_conf_set_fw_int_cmd(dhd_pub_t *dhd, char *name, uint cmd, int val, int def, bool down);\r
+int dhd_conf_set_fw_string_cmd(dhd_pub_t *dhd, char *cmd, int val, int def, bool down);\r
uint dhd_conf_get_band(dhd_pub_t *dhd);\r
int dhd_conf_set_country(dhd_pub_t *dhd);\r
int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec);\r
+int dhd_conf_get_country_from_config(dhd_pub_t *dhd, wl_country_t *cspec);\r
int dhd_conf_fix_country(dhd_pub_t *dhd);\r
bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel);\r
int dhd_conf_set_roam(dhd_pub_t *dhd);\r
-void dhd_conf_set_mimo_bw_cap(dhd_pub_t *dhd);\r
-void dhd_conf_force_wme(dhd_pub_t *dhd);\r
void dhd_conf_get_wme(dhd_pub_t *dhd, edcf_acparam_t *acp);\r
void dhd_conf_set_wme(dhd_pub_t *dhd);\r
-void dhd_conf_set_stbc(dhd_pub_t *dhd);\r
-void dhd_conf_set_phyoclscdenable(dhd_pub_t *dhd);\r
void dhd_conf_add_pkt_filter(dhd_pub_t *dhd);\r
bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id);\r
void dhd_conf_discard_pkt_filter(dhd_pub_t *dhd);\r
-void dhd_conf_set_srl(dhd_pub_t *dhd);\r
-void dhd_conf_set_lrl(dhd_pub_t *dhd);\r
-void dhd_conf_set_bus_txglom(dhd_pub_t *dhd);\r
-void dhd_conf_set_ampdu_ba_wsize(dhd_pub_t *dhd);\r
-void dhd_conf_set_spect(dhd_pub_t *dhd);\r
-void dhd_conf_set_txbf(dhd_pub_t *dhd);\r
-void dhd_conf_set_frameburst(dhd_pub_t *dhd);\r
-void dhd_conf_set_lpc(dhd_pub_t *dhd);\r
void dhd_conf_set_disable_proptx(dhd_pub_t *dhd);\r
int dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path);\r
int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev);\r
uint dhd_conf_get_chip(void *context);\r
uint dhd_conf_get_chiprev(void *context);\r
+void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable);\r
int dhd_conf_get_pm(dhd_pub_t *dhd);\r
int dhd_conf_get_tcpack_sup_mode(dhd_pub_t *dhd);\r
int dhd_conf_preinit(dhd_pub_t *dhd);\r
/*
-* Customer code to add GPIO control during WLAN start/stop
-* $Copyright Open Broadcom Corporation$
-*
-* $Id: dhd_custom_gpio.c 493822 2014-07-29 13:20:26Z $
-*/
+ * Customer code to add GPIO control during WLAN start/stop
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_custom_gpio.c 591129 2015-10-07 05:22:14Z $
+ */
#include <typedefs.h>
#include <linuxver.h>
#include <dhd_linux.h>
#include <wlioctl.h>
+#if defined(WL_WIRELESS_EXT)
#include <wl_iw.h>
+#endif
#define WL_ERROR(x) printf x
#define WL_TRACE(x)
-#if defined(CUSTOMER_HW2)
-
-#if defined(PLATFORM_MPS)
-int __attribute__ ((weak)) wifi_get_fw_nv_path(char *fw, char *nv) { return 0;};
-#endif
-
-#endif
-
#if defined(OOB_INTR_ONLY)
#if defined(BCMLXSDMMC)
extern int sdioh_mmc_irq(int irq);
#endif /* (BCMLXSDMMC) */
-#if defined(CUSTOMER_HW3) || defined(PLATFORM_MPS)
-#include <mach/gpio.h>
-#endif
-
/* Customer specific Host GPIO defintion */
static int dhd_oob_gpio_num = -1;
{
int host_oob_irq = 0;
-#if defined(CUSTOMER_HW2) && !defined(PLATFORM_MPS)
+#if defined(CUSTOMER_HW2)
host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr);
#else
WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
__FUNCTION__, dhd_oob_gpio_num));
-#if defined CUSTOMER_HW3 || defined(PLATFORM_MPS)
- gpio_request(dhd_oob_gpio_num, "oob irq");
- host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
- gpio_direction_input(dhd_oob_gpio_num);
-#endif /* defined CUSTOMER_HW3 || defined(PLATFORM_MPS) */
#endif
return (host_oob_irq);
return -EINVAL;
/* Customer access to MAC address stored outside of DHD driver */
-#if (defined(CUSTOMER_HW2) || defined(CUSTOMER_HW10)) && (LINUX_VERSION_CODE >= \
- KERNEL_VERSION(2, 6, 35))
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
ret = wifi_platform_get_mac_addr(adapter, buf);
#endif
}
#endif /* GET_CUSTOM_MAC_ENABLE */
+#if !defined(WL_WIRELESS_EXT)
+struct cntry_locales_custom {
+ char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */
+ char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */
+ int32 custom_locale_rev; /* Custom local revisin default -1 */
+};
+#endif /* WL_WIRELESS_EXT */
+
/* Customized Locale table : OPTIONAL feature */
const struct cntry_locales_custom translate_custom_table[] = {
/* Table should be filled out based on custom platform regulatory requirement */
{"TR", "TR", 0},
{"NO", "NO", 0},
#endif /* EXMAPLE_TABLE */
-#if defined(CUSTOMER_HW2) && !defined(CUSTOMER_HW5)
+#if defined(CUSTOMER_HW2)
#if defined(BCM4335_CHIP)
{"", "XZ", 11}, /* Universal if Country code is unknown or empty */
#endif
{"RU", "RU", 1},
{"US", "US", 5}
#endif
-
-#elif defined(CUSTOMER_HW5)
- {"", "XZ", 11},
- {"AE", "AE", 212},
- {"AG", "AG", 2},
- {"AI", "AI", 2},
- {"AL", "AL", 2},
- {"AN", "AN", 3},
- {"AR", "AR", 212},
- {"AS", "AS", 15},
- {"AT", "AT", 4},
- {"AU", "AU", 212},
- {"AW", "AW", 2},
- {"AZ", "AZ", 2},
- {"BA", "BA", 2},
- {"BD", "BD", 2},
- {"BE", "BE", 4},
- {"BG", "BG", 4},
- {"BH", "BH", 4},
- {"BM", "BM", 15},
- {"BN", "BN", 4},
- {"BR", "BR", 212},
- {"BS", "BS", 2},
- {"BY", "BY", 3},
- {"BW", "BW", 1},
- {"CA", "CA", 212},
- {"CH", "CH", 212},
- {"CL", "CL", 212},
- {"CN", "CN", 212},
- {"CO", "CO", 212},
- {"CR", "CR", 21},
- {"CY", "CY", 212},
- {"CZ", "CZ", 212},
- {"DE", "DE", 212},
- {"DK", "DK", 4},
- {"DZ", "DZ", 1},
- {"EC", "EC", 23},
- {"EE", "EE", 4},
- {"EG", "EG", 212},
- {"ES", "ES", 212},
- {"ET", "ET", 2},
- {"FI", "FI", 4},
- {"FR", "FR", 212},
- {"GB", "GB", 212},
- {"GD", "GD", 2},
- {"GF", "GF", 2},
- {"GP", "GP", 2},
- {"GR", "GR", 212},
- {"GT", "GT", 0},
- {"GU", "GU", 17},
- {"HK", "HK", 212},
- {"HR", "HR", 4},
- {"HU", "HU", 4},
- {"IN", "IN", 212},
- {"ID", "ID", 212},
- {"IE", "IE", 5},
- {"IL", "IL", 7},
- {"IN", "IN", 212},
- {"IS", "IS", 4},
- {"IT", "IT", 212},
- {"JO", "JO", 3},
- {"JP", "JP", 212},
- {"KH", "KH", 4},
- {"KI", "KI", 1},
- {"KR", "KR", 212},
- {"KW", "KW", 5},
- {"KY", "KY", 4},
- {"KZ", "KZ", 212},
- {"LA", "LA", 4},
- {"LB", "LB", 6},
- {"LI", "LI", 4},
- {"LK", "LK", 3},
- {"LS", "LS", 2},
- {"LT", "LT", 4},
- {"LR", "LR", 2},
- {"LU", "LU", 3},
- {"LV", "LV", 4},
- {"MA", "MA", 2},
- {"MC", "MC", 1},
- {"MD", "MD", 2},
- {"ME", "ME", 2},
- {"MK", "MK", 2},
- {"MN", "MN", 0},
- {"MO", "MO", 2},
- {"MR", "MR", 2},
- {"MT", "MT", 4},
- {"MQ", "MQ", 2},
- {"MU", "MU", 2},
- {"MV", "MV", 3},
- {"MX", "MX", 212},
- {"MY", "MY", 212},
- {"NI", "NI", 0},
- {"NL", "NL", 212},
- {"NO", "NO", 4},
- {"NP", "NP", 3},
- {"NZ", "NZ", 9},
- {"OM", "OM", 4},
- {"PA", "PA", 17},
- {"PE", "PE", 212},
- {"PG", "PG", 2},
- {"PH", "PH", 212},
- {"PL", "PL", 212},
- {"PR", "PR", 25},
- {"PT", "PT", 212},
- {"PY", "PY", 4},
- {"RE", "RE", 2},
- {"RO", "RO", 212},
- {"RS", "RS", 2},
- {"RU", "RU", 212},
- {"SA", "SA", 212},
- {"SE", "SE", 212},
- {"SG", "SG", 212},
- {"SI", "SI", 4},
- {"SK", "SK", 212},
- {"SN", "SN", 2},
- {"SV", "SV", 25},
- {"TH", "TH", 212},
- {"TR", "TR", 212},
- {"TT", "TT", 5},
- {"TW", "TW", 212},
- {"UA", "UA", 212},
- {"UG", "UG", 2},
- {"US", "US", 212},
- {"UY", "UY", 5},
- {"VA", "VA", 2},
- {"VE", "VE", 3},
- {"VG", "VG", 2},
- {"VI", "VI", 18},
- {"VN", "VN", 4},
- {"YT", "YT", 2},
- {"ZA", "ZA", 212},
- {"ZM", "ZM", 2},
- {"XT", "XT", 212},
- {"XZ", "XZ", 11},
- {"XV", "XV", 17},
- {"Q1", "Q1", 77},
-#endif /* CUSTOMER_HW2 and CUSTOMER_HW5 */
+#endif
};
* input : ISO 3166-1 country abbreviation
* output: customized cspec
*/
+#ifdef CUSTOM_COUNTRY_CODE
+void get_customized_country_code(void *adapter, char *country_iso_code,
+ wl_country_t *cspec, u32 flags)
+#else
void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec)
+#endif /* CUSTOM_COUNTRY_CODE */
{
-#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+#if (defined(CUSTOMER_HW) || defined(CUSTOMER_HW2)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
struct cntry_locales_custom *cloc_ptr;
if (!cspec)
return;
-
+#ifdef CUSTOM_COUNTRY_CODE
+ cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code,
+ flags);
+#else
cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code);
+#endif /* CUSTOM_COUNTRY_CODE */
if (cloc_ptr) {
strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
cspec->rev = cloc_ptr->custom_locale_rev;
/*
* Debug/trace/assert driver definitions for Dongle Host Driver.
*
- * $ Copyright Open Broadcom Corporation $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_dbg.h 491225 2014-07-15 11:58:29Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_dbg.h 598059 2015-11-07 07:31:52Z $
*/
#ifndef _dhd_dbg_
#define USE_NET_RATELIMIT 1
#if defined(DHD_DEBUG)
-
-#define DHD_ERROR(args) do {if ((dhd_msg_level & DHD_ERROR_VAL) && USE_NET_RATELIMIT) \
- printf args;} while (0)
+#ifdef DHD_LOG_DUMP
+extern void dhd_log_dump_print(const char *fmt, ...);
+extern char *dhd_log_dump_get_timestamp(void);
+#define DHD_ERROR(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ printf args; \
+ dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
+ dhd_log_dump_print args; \
+ } \
+} while (0)
+#else
+#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) printf args;} while (0)
+#endif /* DHD_LOG_DUMP */
#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
+#ifdef DHD_LOG_DUMP
+#define DHD_EVENT(args) \
+do { \
+ if (dhd_msg_level & DHD_EVENT_VAL) { \
+ printf args; \
+ dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
+ dhd_log_dump_print args; \
+ } \
+} while (0)
+#else
#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#endif /* DHD_LOG_DUMP */
#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
#define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0)
#define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0)
-
+#ifdef DHD_LOG_DUMP
+#define DHD_MSGTRACE_LOG(args) \
+do { \
+ if (dhd_msg_level & DHD_MSGTRACE_VAL) { \
+ printf args; \
+ dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
+ dhd_log_dump_print args; \
+ } \
+} while (0)
+#else
+#define DHD_MSGTRACE_LOG(args) do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0)
+#endif /* DHD_LOG_DUMP */
+#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0)
+#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0)
+#define DHD_IOV_INFO(args) do {if (dhd_msg_level & DHD_IOV_INFO_VAL) printf args;} while (0)
+
+#ifdef DHD_LOG_DUMP
+#define DHD_ERROR_EX(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
+ dhd_log_dump_print args; \
+ } \
+} while (0)
+#else
+#define DHD_ERROR_EX(args) DHD_ERROR(args)
+#endif /* DHD_LOG_DUMP */
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define DHD_TRACE_HW4 DHD_ERROR
+#define DHD_INFO_HW4 DHD_ERROR
+#else
#define DHD_TRACE_HW4 DHD_TRACE
#define DHD_INFO_HW4 DHD_INFO
+#endif /* CUSTOMER_HW4_DEBUG */
#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL)
#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL)
#define DHD_REORDER_ON() (dhd_msg_level & DHD_REORDER_VAL)
#define DHD_NOCHECKDIED_ON() (dhd_msg_level & DHD_NOCHECKDIED_VAL)
#define DHD_PNO_ON() (dhd_msg_level & DHD_PNO_VAL)
+#define DHD_FWLOG_ON() (dhd_msg_level & DHD_FWLOG_VAL)
+#define DHD_IOV_INFO_ON() (dhd_msg_level & DHD_IOV_INFO_VAL)
#else /* defined(BCMDBG) || defined(DHD_DEBUG) */
-#define DHD_ERROR(args) do {if (USE_NET_RATELIMIT) printf args;} while (0)
+#define DHD_ERROR(args) do {printf args;} while (0)
#define DHD_TRACE(args)
#define DHD_INFO(args)
#define DHD_DATA(args)
#define DHD_ARPOE(args)
#define DHD_REORDER(args)
#define DHD_PNO(args)
-
+#define DHD_MSGTRACE_LOG(args)
+#define DHD_FWLOG(args)
+#define DHD_IOV_INFO(args)
+#define DHD_ERROR_EX(args) DHD_ERROR(args)
+
+#ifdef CUSTOMER_HW4_DEBUG
+#define DHD_TRACE_HW4 DHD_ERROR
+#define DHD_INFO_HW4 DHD_ERROR
+#else
#define DHD_TRACE_HW4 DHD_TRACE
#define DHD_INFO_HW4 DHD_INFO
+#endif /* CUSTOMER_HW4_DEBUG */
#define DHD_ERROR_ON() 0
#define DHD_TRACE_ON() 0
#define DHD_REORDER_ON() 0
#define DHD_NOCHECKDIED_ON() 0
#define DHD_PNO_ON() 0
+#define DHD_FWLOG_ON() 0
+#define DHD_IOV_INFO_ON() 0
#endif
/*
- * Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
- * $Copyright Open Broadcom Corporation$
+ * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
*
- * $Id: dhd_flowrings.c jaganlv $
+ * Flow rings are transmit traffic (=propagating towards antenna) related entities
+ *
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_flowring.c 591285 2015-10-07 11:56:29Z $
*/
+
#include <typedefs.h>
#include <bcmutils.h>
#include <bcmendian.h>
#include <bcmmsgbuf.h>
#include <dhd_pcie.h>
+
+static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
+
+static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex,
+ uint8 prio, char *sa, char *da);
+
static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
uint8 prio, char *sa, char *da);
#define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
#define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
+#ifdef DHD_LOSSLESS_ROAMING
+const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
+#else
const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
+#endif
const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+/** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
+static INLINE int
+dhd_flow_queue_throttle(flow_queue_t *queue)
+{
+ return DHD_FLOW_QUEUE_FULL(queue);
+}
+
int BCMFASTPATH
dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt)
{
return BCME_NORESOURCE;
}
+/** Returns flow ring given a flowid */
+flow_ring_node_t *
+dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid)
+{
+ flow_ring_node_t * flow_ring_node;
+
+ ASSERT(dhdp != (dhd_pub_t*)NULL);
+ ASSERT(flowid < dhdp->num_flow_rings);
+
+ flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
+
+ ASSERT(flow_ring_node->flowid == flowid);
+ return flow_ring_node;
+}
+
+/** Returns 'backup' queue given a flowid */
+flow_queue_t *
+dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
+{
+ flow_ring_node_t * flow_ring_node;
+
+ flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
+ return &flow_ring_node->queue;
+}
+
/* Flow ring's queue management functions */
-void /* Initialize a flow ring's queue */
+/** Initialize a flow ring's queue, called on driver initialization. */
+void
dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
{
ASSERT((queue != NULL) && (max > 0));
dll_init(&queue->list);
queue->head = queue->tail = NULL;
queue->len = 0;
- queue->max = max - 1;
+
+ /* Set queue's threshold and queue's parent cummulative length counter */
+ ASSERT(max > 1);
+ DHD_FLOW_QUEUE_SET_MAX(queue, max);
+ DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max);
+ DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr);
+
queue->failures = 0U;
queue->cb = &dhd_flow_queue_overflow;
}
-void /* Register an enqueue overflow callback handler */
+/** Register an enqueue overflow callback handler */
+void
dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
{
ASSERT(queue != NULL);
queue->cb = cb;
}
-
-int BCMFASTPATH /* Enqueue a packet in a flow ring's queue */
+/**
+ * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on
+ * to the flow ring itself.
+ */
+int BCMFASTPATH
dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
{
int ret = BCME_OK;
ASSERT(queue != NULL);
- if (queue->len >= queue->max) {
+ if (dhd_flow_queue_throttle(queue)) {
queue->failures++;
ret = (*queue->cb)(queue, pkt);
goto done;
queue->tail = pkt; /* at tail */
queue->len++;
+ /* increment parent's cummulative length */
+ DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
done:
return ret;
}
-void * BCMFASTPATH /* Dequeue a packet from a flow ring's queue, from head */
+/** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
+void * BCMFASTPATH
dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue)
{
void * pkt;
queue->tail = NULL;
queue->len--;
+ /* decrement parent's cummulative length */
+ DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
return pkt;
}
-void BCMFASTPATH /* Reinsert a dequeued packet back at the head */
+/** Reinsert a dequeued 802.3 packet back at the head */
+void BCMFASTPATH
dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
{
if (queue->head == NULL) {
FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
queue->head = pkt;
queue->len++;
+ /* increment parent's cummulative length */
+ DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
}
+/** Fetch the backup queue for a flowring, and assign flow control thresholds */
+void
+dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
+ int queue_budget, int cumm_threshold, void *cumm_ctr)
+{
+ flow_queue_t * queue;
-/* Init Flow Ring specific data structures */
+ ASSERT(dhdp != (dhd_pub_t*)NULL);
+ ASSERT(queue_budget > 1);
+ ASSERT(cumm_threshold > 1);
+ ASSERT(cumm_ctr != (void*)NULL);
+
+ queue = dhd_flow_queue(dhdp, flowid);
+
+ DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
+
+ /* Set the queue's parent threshold and cummulative counter */
+ DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
+ DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
+}
+
+/** Initializes data structures of multiple flow rings */
int
dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
{
uint32 idx;
uint32 flow_ring_table_sz;
- uint32 if_flow_lkup_sz;
+ uint32 if_flow_lkup_sz = 0;
void * flowid_allocator;
- flow_ring_table_t *flow_ring_table;
+ flow_ring_table_t *flow_ring_table = NULL;
if_flow_lkup_t *if_flow_lkup = NULL;
-#ifdef PCIE_TX_DEFERRAL
- uint32 count;
-#endif
void *lock = NULL;
+ void *list_lock = NULL;
unsigned long flags;
DHD_INFO(("%s\n", __FUNCTION__));
- /* Construct a 16bit flow1d allocator */
+ /* Construct a 16bit flowid allocator */
flowid_allocator = id16_map_init(dhdp->osh,
num_flow_rings - FLOW_RING_COMMON, FLOWID_RESERVED);
if (flowid_allocator == NULL) {
/* Allocate a flow ring table, comprising of requested number of rings */
flow_ring_table_sz = (num_flow_rings * sizeof(flow_ring_node_t));
- flow_ring_table = (flow_ring_table_t *)MALLOC(dhdp->osh, flow_ring_table_sz);
+ flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz);
if (flow_ring_table == NULL) {
DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
goto fail;
}
/* Initialize flow ring table state */
+ DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr);
bzero((uchar *)flow_ring_table, flow_ring_table_sz);
for (idx = 0; idx < num_flow_rings; idx++) {
flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
FLOW_RING_QUEUE_THRESHOLD);
}
- /* Allocate per interface hash table */
+ /* Allocate per interface hash table (for fast lookup from interface to flow ring) */
if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp,
DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
}
/* Initialize per interface hash table */
- bzero((uchar *)if_flow_lkup, if_flow_lkup_sz);
for (idx = 0; idx < DHD_MAX_IFS; idx++) {
int hash_ix;
if_flow_lkup[idx].status = 0;
if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
}
-#ifdef PCIE_TX_DEFERRAL
- count = BITS_TO_LONGS(num_flow_rings);
- dhdp->bus->delete_flow_map = kzalloc(count, GFP_ATOMIC);
- if (!dhdp->bus->delete_flow_map) {
- DHD_ERROR(("%s: delete_flow_map alloc failure\n", __FUNCTION__));
- goto fail;
- }
-#endif
-
lock = dhd_os_spin_lock_init(dhdp->osh);
if (lock == NULL)
goto fail;
+ list_lock = dhd_os_spin_lock_init(dhdp->osh);
+ if (list_lock == NULL)
+ goto lock_fail;
+
dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
-
+#ifdef DHD_LOSSLESS_ROAMING
+ dhdp->dequeue_prec_map = ALLPRIO;
+#endif
/* Now populate into dhd pub */
DHD_FLOWID_LOCK(lock, flags);
dhdp->num_flow_rings = num_flow_rings;
dhdp->flow_ring_table = (void *)flow_ring_table;
dhdp->if_flow_lkup = (void *)if_flow_lkup;
dhdp->flowid_lock = lock;
+ dhdp->flow_rings_inited = TRUE;
+ dhdp->flowring_list_lock = list_lock;
DHD_FLOWID_UNLOCK(lock, flags);
DHD_INFO(("%s done\n", __FUNCTION__));
return BCME_OK;
-fail:
+lock_fail:
+ /* deinit the spinlock */
+ dhd_os_spin_lock_deinit(dhdp->osh, lock);
-#ifdef PCIE_TX_DEFERRAL
- if (dhdp->bus->delete_flow_map)
- kfree(dhdp->bus->delete_flow_map);
-#endif
+fail:
/* Destruct the per interface flow lkup table */
- if (dhdp->if_flow_lkup != NULL) {
+ if (if_flow_lkup != NULL) {
DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
}
if (flow_ring_table != NULL) {
return BCME_NOMEM;
}
-/* Deinit Flow Ring specific data structures */
+/** Deinit Flow Ring specific data structures */
void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
{
uint16 idx;
DHD_INFO(("dhd_flow_rings_deinit\n"));
+ if (!(dhdp->flow_rings_inited)) {
+ DHD_ERROR(("dhd_flow_rings not initialized!\n"));
+ return;
+ }
+
if (dhdp->flow_ring_table != NULL) {
ASSERT(dhdp->num_flow_rings > 0);
if (flow_ring_table[idx].active) {
dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
}
- ASSERT(flow_queue_empty(&flow_ring_table[idx].queue));
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue));
/* Deinit flow ring queue locks before destroying flow ring table */
dhd_os_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
flow_ring_table[idx].lock = NULL;
+
}
/* Destruct the flow ring table */
/* Destruct the per interface flow lkup table */
if (dhdp->if_flow_lkup != NULL) {
if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
- bzero(dhdp->if_flow_lkup, sizeof(if_flow_lkup_sz));
+ bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz);
DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
dhdp->if_flow_lkup = NULL;
}
-#ifdef PCIE_TX_DEFERRAL
- if (dhdp->bus->delete_flow_map)
- kfree(dhdp->bus->delete_flow_map);
-#endif
-
/* Destruct the flowid allocator */
if (dhdp->flowid_allocator != NULL)
dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
dhdp->num_flow_rings = 0U;
+ bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
+
lock = dhdp->flowid_lock;
dhdp->flowid_lock = NULL;
DHD_FLOWID_UNLOCK(lock, flags);
dhd_os_spin_lock_deinit(dhdp->osh, lock);
+
+ dhd_os_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock);
+ dhdp->flowring_list_lock = NULL;
+
+ ASSERT(dhdp->if_flow_lkup == NULL);
+ ASSERT(dhdp->flowid_allocator == NULL);
+ ASSERT(dhdp->flow_ring_table == NULL);
+ dhdp->flow_rings_inited = FALSE;
}
+/** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */
uint8
dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
{
}
#endif /* WLTDLS */
-/* For a given interface, search the hash table for a matching flow */
-uint16
+/** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
+static INLINE uint16
dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
{
int hash;
DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
- if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) {
+ ASSERT(if_flow_lkup);
+
+ if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) {
#ifdef WLTDLS
if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
is_tdls_destination(dhdp, da)) {
DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
return cur->flowid;
}
-
} else {
if (ETHER_ISMULTI(da)) {
}
DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ DHD_INFO(("%s: cannot find flowid\n", __FUNCTION__));
return FLOWID_INVALID;
-}
+} /* dhd_flowid_find */
-/* Allocate Flow ID */
+/** Create unique Flow ID, called when a flow ring is created. */
static INLINE uint16
dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
{
DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
- if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) {
+
+ if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) {
/* For STA non TDLS dest we allocate entry based on prio only */
#ifdef WLTDLS
if (dhdp->peer_tbl.tdls_peer_count &&
DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
return fl_hash_node->flowid;
-}
+} /* dhd_flowid_alloc */
-/* Get flow ring ID, if not present try to create one */
+/** Get flow ring ID, if not present try to create one */
static INLINE int
dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
uint8 prio, char *sa, char *da, uint16 *flowid)
flow_ring_node_t *flow_ring_node;
flow_ring_table_t *flow_ring_table;
unsigned long flags;
+ int ret;
DHD_INFO(("%s\n", __FUNCTION__));
- if (!dhdp->flow_ring_table)
+ if (!dhdp->flow_ring_table) {
return BCME_ERROR;
+ }
flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
if (!if_flow_lkup[ifindex].status)
return BCME_ERROR;
+
id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
if (id == FLOWID_INVALID) {
DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
/* register this flowid in dhd_pub */
dhd_add_flowid(dhdp, ifindex, prio, da, id);
- }
- ASSERT(id < dhdp->num_flow_rings);
+ ASSERT(id < dhdp->num_flow_rings);
+
+ flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
- flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
- DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
- if (flow_ring_node->active) {
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+ /* Init Flow info */
+ memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
+ memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
+ flow_ring_node->flow_info.tid = prio;
+ flow_ring_node->flow_info.ifindex = ifindex;
+ flow_ring_node->active = TRUE;
+ flow_ring_node->status = FLOW_RING_STATUS_PENDING;
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+
+ /* Create and inform device about the new flow */
+ if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
+ != BCME_OK) {
+ DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
+ return BCME_ERROR;
+ }
+
*flowid = id;
return BCME_OK;
- }
- /* Init Flow info */
- memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
- memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
- flow_ring_node->flow_info.tid = prio;
- flow_ring_node->flow_info.ifindex = ifindex;
- flow_ring_node->active = TRUE;
- flow_ring_node->status = FLOW_RING_STATUS_PENDING;
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
- dll_prepend(&dhdp->bus->const_flowring, &flow_ring_node->list);
- DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
+ } else {
+ /* if the Flow id was found in the hash */
+ ASSERT(id < dhdp->num_flow_rings);
+
+ flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+ /*
+ * If the flow_ring_node is in Open State or Status pending state then
+ * we can return the Flow id to the caller.If the flow_ring_node is in
+ * FLOW_RING_STATUS_PENDING this means the creation is in progress and
+ * hence the packets should be queued.
+ *
+ * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
+ * FLOW_RING_STATUS_CLOSED, then we should return Error.
+ * Note that if the flowing is being deleted we would mark it as
+ * FLOW_RING_STATUS_DELETE_PENDING. Now before Dongle could respond and
+ * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
+ * We should drop the packets in that case.
+ * The decission to return OK should NOT be based on 'active' variable, beause
+ * active is made TRUE when a flow_ring_node gets allocated and is made
+ * FALSE when the flow ring gets removed and does not reflect the True state
+ * of the Flow ring.
+ */
+ if (flow_ring_node->status == FLOW_RING_STATUS_OPEN ||
+ flow_ring_node->status == FLOW_RING_STATUS_PENDING) {
+ *flowid = id;
+ ret = BCME_OK;
+ } else {
+ *flowid = FLOWID_INVALID;
+ ret = BCME_ERROR;
+ }
- /* Create and inform device about the new flow */
- if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
- != BCME_OK) {
- DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
- return BCME_ERROR;
- }
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ return ret;
- *flowid = id;
- return BCME_OK;
-}
+ } /* Flow Id found in the hash */
+} /* dhd_flowid_lookup */
-/* Update flowid information on the packet */
+/**
+ * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
+ * select the flowring to send the packet to the dongle.
+ */
int BCMFASTPATH
dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf)
{
struct ether_header *eh = (struct ether_header *)pktdata;
uint16 flowid;
- if (dhd_bus_is_txmode_push(dhdp->bus))
- return BCME_OK;
-
ASSERT(ifindex < DHD_MAX_IFS);
+
if (ifindex >= DHD_MAX_IFS) {
return BCME_BADARG;
}
DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
return BCME_ERROR;
}
+
if (dhd_flowid_lookup(dhdp, ifindex, prio, eh->ether_shost, eh->ether_dhost,
&flowid) != BCME_OK) {
return BCME_ERROR;
DHD_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
/* Tag the packet with flowid */
- DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), flowid);
+ DHD_PKT_SET_FLOWID(pktbuf, flowid);
return BCME_OK;
}
DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
__FUNCTION__, flowid));
-}
+} /* dhd_flowid_free */
-
-/* Delete all Flow rings assocaited with the given Interface */
+/**
+ * Delete all Flow rings associated with the given interface. Is called when e.g. the dongle
+ * indicates that a wireless link has gone down.
+ */
void
dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
{
flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
for (id = 0; id < dhdp->num_flow_rings; id++) {
if (flow_ring_table[id].active &&
- (flow_ring_table[id].flow_info.ifindex == ifindex) &&
- (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) {
- DHD_INFO(("%s: deleting flowid %d\n",
- __FUNCTION__, flow_ring_table[id].flowid));
+ (flow_ring_table[id].flow_info.ifindex == ifindex)) {
dhd_bus_flow_ring_delete_request(dhdp->bus,
(void *) &flow_ring_table[id]);
}
}
}
-/* Delete flow/s for given peer address */
+/** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */
void
dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
{
flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
for (id = 0; id < dhdp->num_flow_rings; id++) {
if (flow_ring_table[id].active &&
- (flow_ring_table[id].flow_info.ifindex == ifindex) &&
- (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
- (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) {
+ (flow_ring_table[id].flow_info.ifindex == ifindex) &&
+ (!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
+ (flow_ring_table[id].status != FLOW_RING_STATUS_DELETE_PENDING)) {
DHD_INFO(("%s: deleting flowid %d\n",
- __FUNCTION__, flow_ring_table[id].flowid));
+ __FUNCTION__, flow_ring_table[id].flowid));
dhd_bus_flow_ring_delete_request(dhdp->bus,
- (void *) &flow_ring_table[id]);
+ (void *) &flow_ring_table[id]);
}
}
}
-/* Handle Interface ADD, DEL operations */
+/** Handles interface ADD, CHANGE, DEL indications from the dongle */
void
dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
uint8 op, uint8 role)
if_flow_lkup[ifindex].role = role;
- if (!(DHD_IF_ROLE_STA(role))) {
+ if (role != WLC_E_IF_ROLE_STA) {
if_flow_lkup[ifindex].status = TRUE;
DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
__FUNCTION__, ifindex, role));
DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
}
-/* Handle a STA interface link status update */
+/** Handles a STA 'link' indication from the dongle */
int
dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status)
{
DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
- if (DHD_IF_ROLE_STA(if_flow_lkup[ifindex].role)) {
+ if (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) {
if (status)
if_flow_lkup[ifindex].status = TRUE;
else
return BCME_OK;
}
-/* Update flow priority mapping */
+
+/** Update flow priority mapping, called on IOVAR */
int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
{
uint16 flowid;
flow_ring_node_t *flow_ring_node;
- if (map > DHD_FLOW_PRIO_TID_MAP)
+ if (map > DHD_FLOW_PRIO_LLR_MAP)
return BCME_BADOPTION;
/* Check if we need to change prio map */
if (flow_ring_node->active)
return BCME_EPERM;
}
- /* Infor firmware about new mapping type */
+
+ /* Inform firmware about new mapping type */
if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE))
return BCME_ERROR;
return BCME_OK;
}
-/* Set/Get flwo ring priority map */
+/** Inform firmware on updated flow priority mapping, called on IOVAR */
int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
{
uint8 iovbuf[24];
/*
- * Header file describing the flow rings DHD interfaces.
+ * @file Header file describing the flow rings DHD interfaces.
*
- * Provides type definitions and function prototypes used to create, delete and manage
+ * Flow rings are transmit traffic (=propagating towards antenna) related entities.
*
- * flow rings at high level
+ * Provides type definitions and function prototypes used to create, delete and manage flow rings at
+ * high level.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_flowrings.h jaganlv $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_flowring.h 591285 2015-10-07 11:56:29Z $
*/
+
/****************
* Common types *
*/
/* Max pkts held in a flow ring's backup queue */
#define FLOW_RING_QUEUE_THRESHOLD (2048)
-/* Number of H2D common rings : PCIE Spec Rev? */
-#define FLOW_RING_COMMON 2
+/* Number of H2D common rings */
+#define FLOW_RING_COMMON BCMPCIE_H2D_COMMON_MSGRINGS
#define FLOWID_INVALID (ID16_INVALID)
#define FLOWID_RESERVED (FLOW_RING_COMMON)
#define FLOW_RING_STATUS_CLOSED 2
#define FLOW_RING_STATUS_DELETE_PENDING 3
#define FLOW_RING_STATUS_FLUSH_PENDING 4
+#define FLOW_RING_STATUS_STA_FREEING 5
#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048
#define DHD_FLOW_PRIO_AC_MAP 0
#define DHD_FLOW_PRIO_TID_MAP 1
-
+#define DHD_FLOW_PRIO_LLR_MAP 2
/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
typedef struct dhd_pkttag_fr {
uint16 flowid;
+ uint16 ifid;
int dataoff;
+ dmaaddr_t physaddr;
+ uint32 pa_len;
+
} dhd_pkttag_fr_t;
#define DHD_PKTTAG_SET_FLOWID(tag, flow) ((tag)->flowid = (uint16)(flow))
+#define DHD_PKTTAG_SET_IFID(tag, idx) ((tag)->ifid = (uint16)(idx))
#define DHD_PKTTAG_SET_DATAOFF(tag, offset) ((tag)->dataoff = (int)(offset))
+#define DHD_PKTTAG_SET_PA(tag, pa) ((tag)->physaddr = (pa))
+#define DHD_PKTTAG_SET_PA_LEN(tag, palen) ((tag)->pa_len = (palen))
#define DHD_PKTTAG_FLOWID(tag) ((tag)->flowid)
+#define DHD_PKTTAG_IFID(tag) ((tag)->ifid)
#define DHD_PKTTAG_DATAOFF(tag) ((tag)->dataoff)
+#define DHD_PKTTAG_PA(tag) ((tag)->physaddr)
+#define DHD_PKTTAG_PA_LEN(tag) ((tag)->pa_len)
/* Hashing a MacAddress for lkup into a per interface flow hash table */
#define DHD_FLOWRING_HASH_SIZE 256
#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role)
#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP)
+#define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA)
#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO)
#define DHD_FLOW_RING(dhdp, flowid) \
(flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid])
/* Flow Ring Queue Enqueue overflow callback */
typedef int (*flow_queue_cb_t)(struct flow_queue * queue, void * pkt);
+/**
+ * Each flow ring has an associated (tx flow controlled) queue. 802.3 packets are transferred
+ * between queue and ring. A packet from the host stack is first added to the queue, and in a later
+ * stage transferred to the flow ring. Packets in the queue are dhd owned, whereas packets in the
+ * flow ring are device owned.
+ */
typedef struct flow_queue {
- dll_t list; /* manage a flowring queue in a dll */
+ dll_t list; /* manage a flowring queue in a double linked list */
void * head; /* first packet in the queue */
void * tail; /* last packet in the queue */
uint16 len; /* number of packets in the queue */
- uint16 max; /* maximum number of packets, queue may hold */
+ uint16 max; /* maximum or min budget (used in cumm) */
+ uint32 threshold; /* parent's cummulative length threshold */
+ void * clen_ptr; /* parent's cummulative length counter */
uint32 failures; /* enqueue failures due to queue overflow */
flow_queue_cb_t cb; /* callback invoked on threshold crossing */
} flow_queue_t;
-#define flow_queue_len(queue) ((int)(queue)->len)
-#define flow_queue_max(queue) ((int)(queue)->max)
-#define flow_queue_avail(queue) ((int)((queue)->max - (queue)->len))
-#define flow_queue_full(queue) ((queue)->len >= (queue)->max)
-#define flow_queue_empty(queue) ((queue)->len == 0)
+#define DHD_FLOW_QUEUE_LEN(queue) ((int)(queue)->len)
+#define DHD_FLOW_QUEUE_MAX(queue) ((int)(queue)->max)
+#define DHD_FLOW_QUEUE_THRESHOLD(queue) ((int)(queue)->threshold)
+#define DHD_FLOW_QUEUE_EMPTY(queue) ((queue)->len == 0)
+#define DHD_FLOW_QUEUE_FAILURES(queue) ((queue)->failures)
+
+#define DHD_FLOW_QUEUE_AVAIL(queue) ((int)((queue)->max - (queue)->len))
+#define DHD_FLOW_QUEUE_FULL(queue) ((queue)->len >= (queue)->max)
+
+#define DHD_FLOW_QUEUE_OVFL(queue, budget) \
+ (((queue)->len) > budget)
+#define DHD_FLOW_QUEUE_SET_MAX(queue, budget) \
+ ((queue)->max) = ((budget) - 1)
+
+/* Queue's cummulative threshold. */
+#define DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold) \
+ ((queue)->threshold) = ((cumm_threshold) - 1)
+
+/* Queue's cummulative length object accessor. */
+#define DHD_FLOW_QUEUE_CLEN_PTR(queue) ((queue)->clen_ptr)
+
+/* Set a queue's cumm_len point to a parent's cumm_ctr_t cummulative length */
+#define DHD_FLOW_QUEUE_SET_CLEN(queue, parent_clen_ptr) \
+ ((queue)->clen_ptr) = (void *)(parent_clen_ptr)
+
+/* see wlfc_proto.h for tx status details */
+#define DHD_FLOWRING_MAXSTATUS_MSGS 5
+#define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus)
+/** each flow ring is dedicated to a tid/sa/da combination */
typedef struct flow_info {
uint8 tid;
uint8 ifindex;
char da[ETHER_ADDR_LEN];
} flow_info_t;
+/** a flow ring is used for outbound (towards antenna) 802.3 packets */
typedef struct flow_ring_node {
- dll_t list; /* manage a constructed flowring in a dll, must be at first place */
- flow_queue_t queue;
+ dll_t list; /* manage a constructed flowring in a dll, must be at first place */
+ flow_queue_t queue; /* queues packets before they enter the flow ring, flow control */
bool active;
uint8 status;
+ /*
+ * flowid: unique ID of a flow ring, which can either be unicast or broadcast/multicast. For
+ * unicast flow rings, the flow id accelerates ARM 802.3->802.11 header translation.
+ */
uint16 flowid;
flow_info_t flow_info;
void *prot_info;
void *lock; /* lock for flowring access protection */
} flow_ring_node_t;
+
typedef flow_ring_node_t flow_ring_table_t;
typedef struct flow_hash_info {
/* Exported API */
/* Flow ring's queue management functions */
+extern flow_ring_node_t * dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid);
+extern flow_queue_t * dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid);
+
extern void dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max);
extern void dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb);
extern int dhd_flow_queue_enqueue(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt);
extern void * dhd_flow_queue_dequeue(dhd_pub_t *dhdp, flow_queue_t *queue);
extern void dhd_flow_queue_reinsert(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt);
+extern void dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
+ int queue_budget, int cumm_threshold, void *cumm_ctr);
extern int dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings);
extern void dhd_flow_rings_deinit(dhd_pub_t *dhdp);
-extern uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da);
-
extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
void *pktbuf);
#include <osl.h>
-#include <dngl_stats.h>
-#include <dhd.h>
+#include <dhd_linux.h>
#include <linux/rfkill-wlan.h>
#ifdef CONFIG_MACH_ODROID_4210
host_oob_irq = rockchip_wifi_get_oob_irq();
- printf("host_oob_irq: %d \r\n", host_oob_irq);
+ printk("host_oob_irq: %d\n", host_oob_irq);
return host_oob_irq;
}
uint host_oob_irq_flags = 0;
host_oob_irq_flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE) & IRQF_TRIGGER_MASK;
- printf("host_oob_irq_flags=%d\n", host_oob_irq_flags);
+ printk("host_oob_irq_flags=0x%X\n", host_oob_irq_flags);
return host_oob_irq_flags;
}
int err = 0;
if (on) {
- printf("======== PULL WL_REG_ON HIGH! ========\n");
+ printk("======== PULL WL_REG_ON HIGH! ========\n");
rockchip_wifi_power(1);
} else {
- printf("======== PULL WL_REG_ON LOW! ========\n");
+ printk("======== PULL WL_REG_ON LOW! ========\n");
rockchip_wifi_power(0);
}
int err = 0;
if (present) {
- printf("======== Card detection to detect SDIO card! ========\n");
+ printk("======== Card detection to detect SDIO card! ========\n");
rockchip_wifi_set_carddetect(1);
} else {
- printf("======== Card detection to remove SDIO card! ========\n");
+ printk("======== Card detection to remove SDIO card! ========\n");
rockchip_wifi_set_carddetect(0);
}
{
int err = 0;
- printf("======== %s ========\n", __FUNCTION__);
+ printk("======== %s ========\n", __FUNCTION__);
#ifdef EXAMPLE_GET_MAC
/* EXAMPLE code */
{
}
#endif
+#if !defined(WL_WIRELESS_EXT)
+struct cntry_locales_custom {
+ char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */
+ char custom_locale[WLC_CNTRY_BUF_SZ]; /* Custom firmware locale */
+ int32 custom_locale_rev; /* Custom local revisin default -1 */
+};
+#endif
+
+static struct cntry_locales_custom brcm_wlan_translate_custom_table[] = {
+ /* Table should be filled out based on custom platform regulatory requirement */
+ {"", "XT", 49}, /* Universal if Country code is unknown or empty */
+ {"US", "US", 0},
+};
+
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+struct cntry_locales_custom brcm_wlan_translate_nodfs_table[] = {
+ {"", "XT", 50}, /* Universal if Country code is unknown or empty */
+ {"US", "US", 0},
+};
+#endif
+
+static void *bcm_wlan_get_country_code(char *ccode
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ , u32 flags
+#endif
+)
+{
+ struct cntry_locales_custom *locales;
+ int size;
+ int i;
+
+ if (!ccode)
+ return NULL;
+
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ if (flags & WLAN_PLAT_NODFS_FLAG) {
+ locales = brcm_wlan_translate_nodfs_table;
+ size = ARRAY_SIZE(brcm_wlan_translate_nodfs_table);
+ } else {
+#endif
+ locales = brcm_wlan_translate_custom_table;
+ size = ARRAY_SIZE(brcm_wlan_translate_custom_table);
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ }
+#endif
+
+ for (i = 0; i < size; i++)
+ if (strcmp(ccode, locales[i].iso_abbrev) == 0)
+ return &locales[i];
+ return NULL;
+}
+
int bcm_wlan_set_plat_data(void) {
printf("======== %s ========\n", __FUNCTION__);
dhd_wlan_control.set_power = bcm_wlan_set_power;
#ifdef CONFIG_DHD_USE_STATIC_BUF
dhd_wlan_control.mem_prealloc = bcm_wlan_prealloc;
#endif
+ dhd_wlan_control.get_country_code = bcm_wlan_get_country_code;
return 0;
}
/*
* IP Packet Parser Module.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_ip.c 502735 2014-09-16 00:53:02Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_ip.c 569132 2015-07-07 09:09:33Z $
*/
#include <typedefs.h>
#include <osl.h>
}
}
-bool pkt_is_dhcp(osl_t *osh, void *p)
-{
- uint8 *frame;
- int length;
- uint8 *pt; /* Pointer to type field */
- uint16 ethertype;
- struct ipv4_hdr *iph; /* IP frame pointer */
- int ipl; /* IP frame length */
- uint16 src_port;
-
- ASSERT(osh && p);
-
- frame = PKTDATA(osh, p);
- length = PKTLEN(osh, p);
-
- /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
- if (length < ETHER_HDR_LEN) {
- DHD_INFO(("%s: short eth frame (%d)\n", __FUNCTION__, length));
- return FALSE;
- } else if (ntoh16(*(uint16 *)(frame + ETHER_TYPE_OFFSET)) >= ETHER_TYPE_MIN) {
- /* Frame is Ethernet II */
- pt = frame + ETHER_TYPE_OFFSET;
- } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
- !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
- pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
- } else {
- DHD_INFO(("%s: non-SNAP 802.3 frame\n", __FUNCTION__));
- return FALSE;
- }
-
- ethertype = ntoh16(*(uint16 *)pt);
-
- /* Skip VLAN tag, if any */
- if (ethertype == ETHER_TYPE_8021Q) {
- pt += VLAN_TAG_LEN;
-
- if (pt + ETHER_TYPE_LEN > frame + length) {
- DHD_INFO(("%s: short VLAN frame (%d)\n", __FUNCTION__, length));
- return FALSE;
- }
-
- ethertype = ntoh16(*(uint16 *)pt);
- }
-
- if (ethertype != ETHER_TYPE_IP) {
- DHD_INFO(("%s: non-IP frame (ethertype 0x%x, length %d)\n",
- __FUNCTION__, ethertype, length));
- return FALSE;
- }
-
- iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN);
- ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame));
-
- /* We support IPv4 only */
- if ((ipl < (IPV4_OPTIONS_OFFSET + 2)) || (IP_VER(iph) != IP_VER_4)) {
- DHD_INFO(("%s: short frame (%d) or non-IPv4\n", __FUNCTION__, ipl));
- return FALSE;
- }
-
- src_port = ntoh16(*(uint16 *)(pt + ETHER_TYPE_LEN + IPV4_OPTIONS_OFFSET));
-
- return (src_port == 0x43 || src_port == 0x44);
-}
-
#ifdef DHDTCPACK_SUPPRESS
typedef struct {
} tdata_psh_info_t;
typedef struct {
- uint8 src_ip_addr[IPV4_ADDR_LEN]; /* SRC ip addrs of this TCP stream */
- uint8 dst_ip_addr[IPV4_ADDR_LEN]; /* DST ip addrs of this TCP stream */
- uint8 src_tcp_port[TCP_PORT_LEN]; /* SRC tcp ports of this TCP stream */
- uint8 dst_tcp_port[TCP_PORT_LEN]; /* DST tcp ports of this TCP stream */
+ struct {
+ uint8 src[IPV4_ADDR_LEN]; /* SRC ip addrs of this TCP stream */
+ uint8 dst[IPV4_ADDR_LEN]; /* DST ip addrs of this TCP stream */
+ } ip_addr;
+ struct {
+ uint8 src[TCP_PORT_LEN]; /* SRC tcp ports of this TCP stream */
+ uint8 dst[TCP_PORT_LEN]; /* DST tcp ports of this TCP stream */
+ } tcp_port;
tdata_psh_info_t *tdata_psh_info_head; /* Head of received TCP PSH DATA chain */
tdata_psh_info_t *tdata_psh_info_tail; /* Tail of received TCP PSH DATA chain */
uint32 last_used_time; /* The last time this tcpdata_info was used(in ms) */
return tdata_psh_info;
}
+#ifdef BCMSDIO
static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp,
tcpack_sup_module_t *tcpack_sup_mod)
{
return;
}
+#endif /* BCMSDIO */
static void dhd_tcpack_send(ulong data)
{
dhd_pub_t *dhdp;
int ifidx;
void* pkt;
+ unsigned long flags;
if (!cur_tbl) {
return;
return;
}
- dhd_os_tcpacklock(dhdp);
+ flags = dhd_os_tcpacklock(dhdp);
tcpack_sup_mod = dhdp->tcpack_sup_module;
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n",
+ __FUNCTION__, __LINE__));
+ dhd_os_tcpackunlock(dhdp, flags);
+ return;
+ }
pkt = cur_tbl->pkt_in_q;
ifidx = cur_tbl->ifidx;
if (!pkt) {
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
return;
}
cur_tbl->pkt_in_q = NULL;
__FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt));
}
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
dhd_sendpkt(dhdp, ifidx, pkt);
}
int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode)
{
int ret = BCME_OK;
+ unsigned long flags;
- dhd_os_tcpacklock(dhdp);
+ flags = dhd_os_tcpacklock(dhdp);
if (dhdp->tcpack_sup_mode == mode) {
DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode));
if (mode >= TCPACK_SUP_LAST_MODE ||
#ifndef BCMSDIO
mode == TCPACK_SUP_DELAYTX ||
-#endif
+#endif /* !BCMSDIO */
FALSE) {
DHD_ERROR(("%s %d: Invalid mode %d\n", __FUNCTION__, __LINE__, mode));
ret = BCME_BADARG;
DHD_TRACE(("%s: %d -> %d\n",
__FUNCTION__, dhdp->tcpack_sup_mode, mode));
+#ifdef BCMSDIO
/* Old tcpack_sup_mode is TCPACK_SUP_DELAYTX */
if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX) {
tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
if (dhdp->bus)
dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
}
-
+#endif /* BCMSDIO */
dhdp->tcpack_sup_mode = mode;
if (mode == TCPACK_SUP_OFF) {
ASSERT(dhdp->tcpack_sup_module != NULL);
+ /* Clean up timer/data structure for any remaining/pending packet or timer. */
+ dhd_tcpack_info_tbl_clean(dhdp);
MFREE(dhdp->osh, dhdp->tcpack_sup_module, sizeof(tcpack_sup_module_t));
dhdp->tcpack_sup_module = NULL;
goto exit;
dhdp->tcpack_sup_module = tcpack_sup_mod;
}
+#ifdef BCMSDIO
if (mode == TCPACK_SUP_DELAYTX) {
ret = _tdata_psh_info_pool_init(dhdp, dhdp->tcpack_sup_module);
if (ret != BCME_OK)
else if (dhdp->bus)
dhd_bus_set_dotxinrx(dhdp->bus, FALSE);
}
+#endif /* BCMSDIO */
if (mode == TCPACK_SUP_HOLD) {
int i;
tcpack_sup_module_t *tcpack_sup_mod =
(tcpack_sup_module_t *)dhdp->tcpack_sup_module;
- dhdp->tcpack_sup_ratio = TCPACK_SUPP_RATIO;
- dhdp->tcpack_sup_delay = TCPACK_DELAY_TIME;
+ dhdp->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO;
+ dhdp->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME;
for (i = 0; i < TCPACK_INFO_MAXNUM; i++)
{
tcpack_sup_mod->tcpack_info_tbl[i].dhdp = dhdp;
}
exit:
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
return ret;
}
{
tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
int i;
+ unsigned long flags;
if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
goto exit;
- dhd_os_tcpacklock(dhdp);
+ flags = dhd_os_tcpacklock(dhdp);
if (!tcpack_sup_mod) {
DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n",
__FUNCTION__, __LINE__));
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
goto exit;
}
bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM);
}
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) {
for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
int ret = BCME_OK;
void *pdata;
uint32 pktlen;
+ unsigned long flags;
if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
goto exit;
goto exit;
}
- dhd_os_tcpacklock(dhdp);
+ flags = dhd_os_tcpacklock(dhdp);
tcpack_sup_mod = dhdp->tcpack_sup_module;
if (!tcpack_sup_mod) {
DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
ret = BCME_ERROR;
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
goto exit;
}
tbl_cnt = tcpack_sup_mod->tcpack_info_cnt;
break;
}
}
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
exit:
return ret;
tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
" TCP port %d %d\n", __FUNCTION__, __LINE__, i,
- IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->src_ip_addr)),
- IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->dst_ip_addr)),
- ntoh16_ua(tcpdata_info_tmp->src_tcp_port),
- ntoh16_ua(tcpdata_info_tmp->dst_tcp_port)));
+ IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.src)),
+ IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->ip_addr.dst)),
+ ntoh16_ua(tcpdata_info_tmp->tcp_port.src),
+ ntoh16_ua(tcpdata_info_tmp->tcp_port.dst)));
/* If either IP address or TCP port number does not match, skip. */
if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
- tcpdata_info_tmp->dst_ip_addr, IPV4_ADDR_LEN) == 0 &&
+ tcpdata_info_tmp->ip_addr.dst, IPV4_ADDR_LEN) == 0 &&
memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET],
- tcpdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN) == 0 &&
+ tcpdata_info_tmp->ip_addr.src, IPV4_ADDR_LEN) == 0 &&
memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
- tcpdata_info_tmp->dst_tcp_port, TCP_PORT_LEN) == 0 &&
+ tcpdata_info_tmp->tcp_port.dst, TCP_PORT_LEN) == 0 &&
memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET],
- tcpdata_info_tmp->src_tcp_port, TCP_PORT_LEN) == 0) {
+ tcpdata_info_tmp->tcp_port.src, TCP_PORT_LEN) == 0) {
tcpdata_info = tcpdata_info_tmp;
break;
}
int i;
bool ret = FALSE;
bool set_dotxinrx = TRUE;
+ unsigned long flags;
+
if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
goto exit;
ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET])));
/* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */
- dhd_os_tcpacklock(dhdp);
+ flags = dhd_os_tcpacklock(dhdp);
#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
counter_printlog(&tack_tbl);
tack_tbl.cnt[0]++;
if (!tcpack_sup_mod) {
DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
ret = BCME_ERROR;
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
goto exit;
}
ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]),
ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET])));
- /* If either of IP address or TCP port number does not match, skip. */
+ /* If either of IP address or TCP port number does not match, skip.
+ * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total.
+ * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total.
+ */
if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET],
&old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) ||
memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET],
__FUNCTION__, __LINE__, old_tcpack_num, oldpkt,
new_tcp_ack_num, pkt));
}
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
goto exit;
}
DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
__FUNCTION__, __LINE__));
}
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
exit:
/* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */
int i;
bool ret = FALSE;
+ unsigned long flags;
if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
goto exit;
ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
tcp_hdr[TCP_FLAGS_OFFSET]));
- dhd_os_tcpacklock(dhdp);
+ flags = dhd_os_tcpacklock(dhdp);
tcpack_sup_mod = dhdp->tcpack_sup_module;
if (!tcpack_sup_mod) {
DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
ret = BCME_ERROR;
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
goto exit;
}
uint32 now_in_ms = OSL_SYSUPTIME();
DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
" TCP port %d %d\n", __FUNCTION__, __LINE__, i,
- IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->src_ip_addr)),
- IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->dst_ip_addr)),
- ntoh16_ua(tdata_info_tmp->src_tcp_port),
- ntoh16_ua(tdata_info_tmp->dst_tcp_port)));
-
- /* If both IP address and TCP port number match, we found it so break. */
+ IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.src)),
+ IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->ip_addr.dst)),
+ ntoh16_ua(tdata_info_tmp->tcp_port.src),
+ ntoh16_ua(tdata_info_tmp->tcp_port.dst)));
+
+ /* If both IP address and TCP port number match, we found it so break.
+ * Note that src/dst addr fields in ip header are contiguous being 8 bytes in total.
+ * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total.
+ */
if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
- tdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN * 2) == 0 &&
+ (void *)&tdata_info_tmp->ip_addr, IPV4_ADDR_LEN * 2) == 0 &&
memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
- tdata_info_tmp->src_tcp_port, TCP_PORT_LEN * 2) == 0) {
+ (void *)&tdata_info_tmp->tcp_port, TCP_PORT_LEN * 2) == 0) {
tcpdata_info = tdata_info_tmp;
tcpdata_info->last_used_time = now_in_ms;
break;
bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t));
}
bzero(last_tdata_info, sizeof(tcpdata_info_t));
- DHD_ERROR(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n",
+ DHD_INFO(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n",
__FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt));
/* Don't increase "i" here, so that the prev last tcpdata_info is checked */
} else
IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
goto exit;
}
tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
/* No TCP flow with the same IP addr and TCP port is found
* in tcp_data_info_tbl. So add this flow to the table.
*/
- DHD_ERROR(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ DHD_INFO(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
" TCP port %d %d\n",
__FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt,
IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
-
- bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], tcpdata_info->src_ip_addr,
+ /* Note that src/dst addr fields in ip header are contiguous being 8 bytes in total.
+ * Also, src/dst port fields in TCP header are contiguous being 4 bytes in total.
+ */
+ bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], (void *)&tcpdata_info->ip_addr,
IPV4_ADDR_LEN * 2);
- bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], tcpdata_info->src_tcp_port,
+ bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], (void *)&tcpdata_info->tcp_port,
TCP_PORT_LEN * 2);
tcpdata_info->last_used_time = OSL_SYSUPTIME();
if (tdata_psh_info == NULL) {
DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__));
ret = BCME_ERROR;
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
goto exit;
}
tdata_psh_info->end_seq = end_tcp_seq_num;
}
tcpdata_info->tdata_psh_info_tail = tdata_psh_info;
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
exit:
return ret;
tcpack_info_t *tcpack_info_tbl;
int i, free_slot = TCPACK_INFO_MAXNUM;
bool hold = FALSE;
+ unsigned long flags;
if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
goto exit;
ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET])));
/* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */
- dhd_os_tcpacklock(dhdp);
+ flags = dhd_os_tcpacklock(dhdp);
tcpack_sup_mod = dhdp->tcpack_sup_module;
tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
if (!tcpack_sup_mod) {
DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
goto exit;
}
DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d\n",
__FUNCTION__, __LINE__, i));
hold = FALSE;
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
goto exit;
}
} else {
PKTFREE(dhdp->osh, pkt, TRUE);
}
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
if (!hold) {
del_timer_sync(&tcpack_info_tbl[i].timer);
DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
__FUNCTION__, __LINE__));
}
- dhd_os_tcpackunlock(dhdp);
+ dhd_os_tcpackunlock(dhdp, flags);
exit:
return hold;
*
* Provides type definitions and function prototypes used to parse ip packet.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_ip.h 502735 2014-09-16 00:53:02Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_ip.h 537119 2015-02-25 04:24:14Z $
*/
#ifndef _dhd_ip_h_
} pkt_frag_t;
extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p);
-extern bool pkt_is_dhcp(osl_t *osh, void *p);
#ifdef DHDTCPACK_SUPPRESS
#define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN)
#define TCPDATA_INFO_TIMEOUT 5000 /* Remove tcpdata_info if inactive for this time (in ms) */
-#define TCPACK_SUPP_RATIO 3
-#define TCPACK_DELAY_TIME 10 /* ms */
+#define DEFAULT_TCPACK_SUPP_RATIO 3
+#ifndef CUSTOM_TCPACK_SUPP_RATIO
+#define CUSTOM_TCPACK_SUPP_RATIO DEFAULT_TCPACK_SUPP_RATIO
+#endif /* CUSTOM_TCPACK_SUPP_RATIO */
+
+#define DEFAULT_TCPACK_DELAY_TIME 10 /* ms */
+#ifndef CUSTOM_TCPACK_DELAY_TIME
+#define CUSTOM_TCPACK_DELAY_TIME DEFAULT_TCPACK_DELAY_TIME
+#endif /* CUSTOM_TCPACK_DELAY_TIME */
extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on);
extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp);
* Broadcom Dongle Host Driver (DHD), Linux-specific network interface
* Basically selected code segments from usb-cdc.c and usb-rndis.c
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_linux.c 505753 2014-10-01 01:40:15Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux.c 609723 2016-01-05 08:40:45Z $
*/
#include <typedefs.h>
#include <proto/ethernet.h>
#include <proto/bcmevent.h>
#include <proto/vlan.h>
-#ifdef DHD_L2_FILTER
-#include <proto/bcmicmp.h>
-#endif
#include <proto/802.3.h>
#include <dngl_stats.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
#endif
-#ifdef P2PONEINT
-#include <wl_cfgp2p.h>
-#endif
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
#endif
-#ifdef WLBTAMP
-#include <proto/802.11_bta.h>
-#include <proto/bt_amp_hci.h>
-#include <dhd_bta.h>
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
#endif
#ifdef CONFIG_COMPAT
#include <dhd_wmf_linux.h>
#endif /* DHD_WMF */
-#ifdef AMPDU_VO_ENABLE
-#include <proto/802.1d.h>
-#endif /* AMPDU_VO_ENABLE */
+#ifdef DHD_L2_FILTER
+#include <proto/bcmicmp.h>
+#include <bcm_l2_filter.h>
+#include <dhd_l2_filter.h>
+#endif /* DHD_L2_FILTER */
+
+#ifdef DHD_PSTA
+#include <dhd_psta.h>
+#endif /* DHD_PSTA */
+
+
#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
#endif /* DHDTCPACK_SUPPRESS */
-#if defined(DHD_TCP_WINSIZE_ADJUST)
-#include <linux/tcp.h>
-#include <net/tcp.h>
-#endif /* DHD_TCP_WINSIZE_ADJUST */
+#ifdef DHD_DEBUG_PAGEALLOC
+typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
+void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
+extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
+#endif /* DHD_DEBUG_PAGEALLOC */
+
+
+#if defined(DHD_LB)
+/* Dynamic CPU selection for load balancing */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+#if !defined(DHD_LB_PRIMARY_CPUS)
+#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
+#endif
+
+#if !defined(DHD_LB_SECONDARY_CPUS)
+#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
+#endif
+
+#define HIST_BIN_SIZE 8
+
+#if defined(DHD_LB_RXP)
+static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
+#endif /* DHD_LB_RXP */
+
+#endif /* DHD_LB */
#ifdef WLMEDIA_HTSF
#include <linux/time.h>
static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
#endif /* WLMEDIA_HTSF */
-#if defined(DHD_TCP_WINSIZE_ADJUST)
-#define MIN_TCP_WIN_SIZE 18000
-#define WIN_SIZE_SCALE_FACTOR 2
-#define MAX_TARGET_PORTS 5
+#ifdef STBLINUX
+#ifdef quote_str
+#undef quote_str
+#endif /* quote_str */
+#ifdef to_str
+#undef to_str
+#endif /* quote_str */
+#define to_str(s) #s
+#define quote_str(s) to_str(s)
-static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0};
-static uint dhd_use_tcp_window_size_adjust = FALSE;
-static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb);
-#endif /* DHD_TCP_WINSIZE_ADJUST */
+static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
+#endif /* STBLINUX */
#if defined(SOFTAP)
extern bool ap_cfg_running;
extern bool ap_fw_loaded;
#endif
+extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
-
+#ifdef FIX_CPU_MIN_CLOCK
+#include <linux/pm_qos.h>
+#endif /* FIX_CPU_MIN_CLOCK */
+#ifdef SET_RANDOM_MAC_SOFTAP
+#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
+#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
+#endif
+static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
+#endif /* SET_RANDOM_MAC_SOFTAP */
#ifdef ENABLE_ADAPTIVE_SCHED
#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
#ifndef CUSTOM_CPUFREQ_THRESH
#include <wl_android.h>
-#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
-#include <sdaudio.h>
-#endif /* CUSTOMER_HW20 && WLANAUDIO */
-
/* Maximum STA per radio */
#define DHD_MAX_STA 32
+
const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
static bool dhd_inetaddr_notifier_registered = FALSE;
#endif /* ARP_OFFLOAD_SUPPORT */
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
static int dhd_inet6addr_notifier_call(struct notifier_block *this,
unsigned long event, void *ptr);
static struct notifier_block dhd_inet6addr_notifier = {
* created in kernel notifier link list (with 'next' pointing to itself)
*/
static bool dhd_inet6addr_notifier_registered = FALSE;
-#endif
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
#include <linux/suspend.h>
DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
-#if defined(OOB_INTR_ONLY)
+#if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL and additional rights");
#endif /* LinuxVer */
#include <dhd_bus.h>
extern void dhd_wlfc_plat_init(void *dhd);
extern void dhd_wlfc_plat_deinit(void *dhd);
#endif /* PROP_TXSTATUS */
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+extern uint sd_f2_blocksize;
+extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
const char *
extern wl_iw_extra_params_t g_wl_iw_params;
#endif /* defined(WL_WIRELESS_EXT) */
+#ifdef CONFIG_PARTIALSUSPEND_SLP
+#include <linux/partialsuspend_slp.h>
+#define CONFIG_HAS_EARLYSUSPEND
+#define DHD_USE_EARLYSUSPEND
+#define register_early_suspend register_pre_suspend
+#define unregister_early_suspend unregister_pre_suspend
+#define early_suspend pre_suspend
+#define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
+#else
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
#include <linux/earlysuspend.h>
#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+#endif /* CONFIG_PARTIALSUSPEND_SLP */
extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
#endif
-#if defined(SOFTAP_TPUT_ENHANCE)
-extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
-extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time);
-#endif /* SOFTAP_TPUT_ENHANCE */
-#ifdef SET_RPS_CPUS
-int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len);
-void custom_rps_map_clear(struct netdev_rx_queue *queue);
-#ifdef CONFIG_MACH_UNIVERSAL5433
-#define RPS_CPUS_MASK "10"
-#else
-#define RPS_CPUS_MASK "6"
-#endif /* CONFIG_MACH_UNIVERSAL5433 */
-#endif /* SET_RPS_CPUS */
+
+#ifdef DHD_FW_COREDUMP
+static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
+#endif /* DHD_FW_COREDUMP */
+#ifdef DHD_LOG_DUMP
+static void dhd_log_dump_init(dhd_pub_t *dhd);
+static void dhd_log_dump_deinit(dhd_pub_t *dhd);
+static void dhd_log_dump(void *handle, void *event_info, u8 event);
+void dhd_schedule_log_dump(dhd_pub_t *dhdp);
+static int do_dhd_log_dump(dhd_pub_t *dhdp);
+#endif /* DHD_LOG_DUMP */
static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
static struct notifier_block dhd_reboot_notifier = {
- .notifier_call = dhd_reboot_callback,
- .priority = 1,
+ .notifier_call = dhd_reboot_callback,
+ .priority = 1,
};
+#ifdef BCMPCIE
+static int is_reboot = 0;
+#endif /* BCMPCIE */
typedef struct dhd_if_event {
struct list_head list;
bool attached; /* Delayed attachment when unset */
bool txflowcontrol; /* Per interface flow control indicator */
char name[IFNAMSIZ+1]; /* linux interface name */
+ char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
struct net_device_stats stats;
#ifdef DHD_WMF
dhd_wmf_t wmf; /* per bsscfg wmf setting */
#endif /* ! BCM_GMAC3 */
#endif /* PCIE_FULL_DONGLE */
uint32 ap_isolate; /* ap-isolation settings */
+#ifdef DHD_L2_FILTER
+ bool parp_enable;
+ bool parp_discard;
+ bool parp_allnode;
+ arp_table_t *phnd_arp_table;
+/* for Per BSS modification */
+ bool dhcp_unicast;
+ bool block_ping;
+ bool grat_arp;
+#endif /* DHD_L2_FILTER */
} dhd_if_t;
#ifdef WLMEDIA_HTSF
unsigned long event;
};
-#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
-#define MAX_WLANAUDIO_BLACKLIST 4
-
-struct wlanaudio_blacklist {
- bool is_blacklist;
- uint32 cnt;
- ulong txfail_jiffies;
- struct ether_addr blacklist_addr;
-};
-#endif /* CUSTOMER_HW20 && WLANAUDIO */
+#ifdef DHD_DEBUG
+typedef struct dhd_dump {
+ uint8 *buf;
+ int bufsize;
+} dhd_dump_t;
+#endif /* DHD_DEBUG */
/* When Perimeter locks are deployed, any blocking calls must be preceeded
* with a PERIM UNLOCK and followed by a PERIM LOCK.
char nv_path[PATH_MAX]; /* path to nvram vars file */
char conf_path[PATH_MAX]; /* path to config vars file */
+ /* serialize dhd iovars */
+ struct mutex dhd_iovar_mutex;
+
struct semaphore proto_sem;
#ifdef PROP_TXSTATUS
spinlock_t wlfc_spinlock;
htsf_t htsf;
#endif
wait_queue_head_t ioctl_resp_wait;
+ wait_queue_head_t d3ack_wait;
+ wait_queue_head_t dhd_bus_busy_state_wait;
uint32 default_wd_interval;
struct timer_list timer;
bool wd_timer_valid;
+#ifdef DHD_PCIE_RUNTIMEPM
+ struct timer_list rpm_timer;
+ bool rpm_timer_valid;
+ tsk_ctl_t thr_rpm_ctl;
+#endif /* DHD_PCIE_RUNTIMEPM */
struct tasklet_struct tasklet;
spinlock_t sdlock;
spinlock_t txqlock;
struct wake_lock wl_rxwake; /* Wifi rx wakelock */
struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
struct wake_lock wl_wdwake; /* Wifi wd wakelock */
+ struct wake_lock wl_evtwake; /* Wifi event wakelock */
#ifdef BCMPCIE_OOB_HOST_WAKE
struct wake_lock wl_intrwake; /* Host wakeup wakelock */
#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ struct wake_lock wl_scanwake; /* Wifi scan wakelock */
+#endif /* DHD_USE_SCAN_WAKELOCK */
#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
/* net_device interface lock, prevent race conditions among net_dev interface
* calls and wifi_on or wifi_off
*/
struct mutex dhd_suspend_mutex;
#endif
spinlock_t wakelock_spinlock;
+ spinlock_t wakelock_evt_spinlock;
+ uint32 wakelock_event_counter;
uint32 wakelock_counter;
int wakelock_wd_counter;
int wakelock_rx_timeout_enable;
void *rpc_osh;
struct timer_list rpcth_timer;
bool rpcth_timer_active;
- bool fdaggr;
+ uint8 fdaggr;
#endif
#ifdef DHDTCPACK_SUPPRESS
spinlock_t tcpack_lock;
#endif /* DHDTCPACK_SUPPRESS */
+#ifdef FIX_CPU_MIN_CLOCK
+ bool cpufreq_fix_status;
+ struct mutex cpufreq_fix;
+ struct pm_qos_request dhd_cpu_qos;
+#ifdef FIX_BUS_MIN_CLOCK
+ struct pm_qos_request dhd_bus_qos;
+#endif /* FIX_BUS_MIN_CLOCK */
+#endif /* FIX_CPU_MIN_CLOCK */
void *dhd_deferred_wq;
#ifdef DEBUG_CPU_FREQ
struct notifier_block freq_trans;
#endif
unsigned int unit;
struct notifier_block pm_notifier;
-#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
- struct wlanaudio_blacklist wlanaudio_blist[MAX_WLANAUDIO_BLACKLIST];
- bool is_wlanaudio_blist;
-#endif /* CUSTOMER_HW20 && WLANAUDIO */
+#ifdef DHD_PSTA
+ uint32 psta_mode; /* PSTA or PSR */
+#endif /* DHD_PSTA */
+#ifdef DHD_DEBUG
+ dhd_dump_t *dump;
+ struct timer_list join_timer;
+ u32 join_timeout_val;
+ bool join_timer_active;
+ uint scan_time_count;
+ struct timer_list scan_timer;
+ bool scan_timer_active;
+#endif
+#if defined(DHD_LB)
+ /* CPU Load Balance dynamic CPU selection */
+
+ /* Variable that tracks the currect CPUs available for candidacy */
+ cpumask_var_t cpumask_curr_avail;
+
+ /* Primary and secondary CPU mask */
+ cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
+ cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
+
+ struct notifier_block cpu_notifier;
+
+ /* Tasklet to handle Tx Completion packet freeing */
+ struct tasklet_struct tx_compl_tasklet;
+ atomic_t tx_compl_cpu;
+
+
+ /* Tasklet to handle RxBuf Post during Rx completion */
+ struct tasklet_struct rx_compl_tasklet;
+ atomic_t rx_compl_cpu;
+
+ /* Napi struct for handling rx packet sendup. Packets are removed from
+ * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
+ * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
+ * to run to rx_napi_cpu.
+ */
+ struct sk_buff_head rx_pend_queue ____cacheline_aligned;
+ struct sk_buff_head rx_napi_queue ____cacheline_aligned;
+ struct napi_struct rx_napi_struct ____cacheline_aligned;
+ atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
+ struct net_device *rx_napi_netdev; /* netdev of primary interface */
+
+ struct work_struct rx_napi_dispatcher_work;
+ struct work_struct tx_compl_dispatcher_work;
+ struct work_struct rx_compl_dispatcher_work;
+ /* Number of times DPC Tasklet ran */
+ uint32 dhd_dpc_cnt;
+
+ /* Number of times NAPI processing got scheduled */
+ uint32 napi_sched_cnt;
+
+ /* Number of times NAPI processing ran on each available core */
+ uint32 napi_percpu_run_cnt[NR_CPUS];
+
+ /* Number of times RX Completions got scheduled */
+ uint32 rxc_sched_cnt;
+ /* Number of times RX Completion ran on each available core */
+ uint32 rxc_percpu_run_cnt[NR_CPUS];
+
+ /* Number of times TX Completions got scheduled */
+ uint32 txc_sched_cnt;
+ /* Number of times TX Completions ran on each available core */
+ uint32 txc_percpu_run_cnt[NR_CPUS];
+
+ /* CPU status */
+ /* Number of times each CPU came online */
+ uint32 cpu_online_cnt[NR_CPUS];
+
+ /* Number of times each CPU went offline */
+ uint32 cpu_offline_cnt[NR_CPUS];
+
+ /*
+ * Consumer Histogram - NAPI RX Packet processing
+ * -----------------------------------------------
+ * On Each CPU, when the NAPI RX Packet processing call back was invoked
+ * how many packets were processed is captured in this data structure.
+ * Now its difficult to capture the "exact" number of packets processed.
+ * So considering the packet counter to be a 32 bit one, we have a
+ * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
+ * processed is rounded off to the next power of 2 and put in the
+ * approriate "bin" the value in the bin gets incremented.
+ * For example, assume that in CPU 1 if NAPI Rx runs 3 times
+ * and the packet count processed is as follows (assume the bin counters are 0)
+ * iteration 1 - 10 (the bin counter 2^4 increments to 1)
+ * iteration 2 - 30 (the bin counter 2^5 increments to 1)
+ * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
+ */
+ uint32 napi_rx_hist[NR_CPUS][HIST_BIN_SIZE];
+ uint32 txc_hist[NR_CPUS][HIST_BIN_SIZE];
+ uint32 rxc_hist[NR_CPUS][HIST_BIN_SIZE];
+#endif /* DHD_LB */
+
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+
+ struct kobject dhd_kobj;
+#ifdef SUPPORT_SENSORHUB
+ uint32 shub_enable;
+#endif /* SUPPORT_SENSORHUB */
+
+ struct delayed_work dhd_memdump_work;
} dhd_info_t;
#define DHDIF_FWDER(dhdif) FALSE
/* Flag to indicate if we should download firmware on driver load */
uint dhd_download_fw_on_driverload = TRUE;
+/* Flag to indicate if driver is initialized */
+uint dhd_driver_init_done = FALSE;
+
/* Definitions to provide path to the firmware and nvram
* example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
*/
int op_mode = 0;
int disable_proptx = 0;
module_param(op_mode, int, 0644);
+
+#if defined(DHD_LB_RXP)
+static int dhd_napi_weight = 32;
+module_param(dhd_napi_weight, int, 0644);
+#endif /* DHD_LB_RXP */
+
extern int wl_control_wl_start(struct net_device *dev);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
struct semaphore dhd_registration_sem;
static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
-#endif
-
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
#ifdef WL_CFG80211
extern void dhd_netdev_free(struct net_device *ndev);
#endif /* WL_CFG80211 */
/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
+#ifdef ENABLE_ARP_SNOOP_MODE
+uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP;
+#else
uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
+#endif /* ENABLE_ARP_SNOOP_MODE */
module_param(dhd_arp_mode, uint, 0);
#endif /* ARP_OFFLOAD_SUPPORT */
uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
module_param(dhd_watchdog_ms, uint, 0);
+#ifdef DHD_PCIE_RUNTIMEPM
+uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
+#endif /* DHD_PCIE_RUNTIMEPMT */
#if defined(DHD_DEBUG)
/* Console poll interval */
uint dhd_console_ms = 0;
module_param(dhd_pkt_filter_init, uint, 0);
/* Pkt filter mode control */
+#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
+uint dhd_master_mode = FALSE;
+#else
uint dhd_master_mode = FALSE;
+#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
module_param(dhd_master_mode, uint, 0);
int dhd_watchdog_prio = 0;
static int instance_base = 0; /* Starting instance number */
module_param(instance_base, int, 0644);
-#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
-dhd_info_t *dhd_global = NULL;
-#endif /* CUSTOMER_HW20 && WLANAUDIO */
+/* Functions to manage sysfs interface for dhd */
+static int dhd_sysfs_init(dhd_info_t *dhd);
+static void dhd_sysfs_exit(dhd_info_t *dhd);
+#if defined(DHD_LB)
+static void
+dhd_lb_set_default_cpus(dhd_info_t *dhd)
+{
+ /* Default CPU allocation for the jobs */
+ atomic_set(&dhd->rx_napi_cpu, 1);
+ atomic_set(&dhd->rx_compl_cpu, 2);
+ atomic_set(&dhd->tx_compl_cpu, 2);
+}
-/* DHD Perimiter lock only used in router with bypass forwarding. */
-#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
-#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
-#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
-#define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0)
-#define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0)
-
-#ifdef PCIE_FULL_DONGLE
-#if defined(BCM_GMAC3)
-#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
-#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
-#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
-#else /* ! BCM_GMAC3 */
-#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
-#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
- spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
-#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
- spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
-#endif /* ! BCM_GMAC3 */
-#endif /* PCIE_FULL_DONGLE */
-
-/* Control fw roaming */
-#ifdef BCMCCX
-uint dhd_roam_disable = 0;
-#else
-uint dhd_roam_disable = 0;
-#endif /* BCMCCX */
+static void
+dhd_cpumasks_deinit(dhd_info_t *dhd)
+{
+ free_cpumask_var(dhd->cpumask_curr_avail);
+ free_cpumask_var(dhd->cpumask_primary);
+ free_cpumask_var(dhd->cpumask_primary_new);
+ free_cpumask_var(dhd->cpumask_secondary);
+ free_cpumask_var(dhd->cpumask_secondary_new);
+}
-/* Control radio state */
-uint dhd_radio_up = 1;
+static int
+dhd_cpumasks_init(dhd_info_t *dhd)
+{
+ int id;
+ uint32 cpus;
+ int ret = 0;
-/* Network inteface name */
-char iface_name[IFNAMSIZ] = {'\0'};
-module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+ if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
+ DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto fail;
+ }
-/* The following are specific to the SDIO dongle */
+ cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
+ cpumask_clear(dhd->cpumask_primary);
+ cpumask_clear(dhd->cpumask_secondary);
-/* IOCTL response timeout */
-int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+ cpus = DHD_LB_PRIMARY_CPUS;
+ for (id = 0; id < NR_CPUS; id++) {
+ if (isset(&cpus, id))
+ cpumask_set_cpu(id, dhd->cpumask_primary);
+ }
-/* Idle timeout for backplane clock */
-int dhd_idletime = DHD_IDLETIME_TICKS;
-module_param(dhd_idletime, int, 0);
+ cpus = DHD_LB_SECONDARY_CPUS;
+ for (id = 0; id < NR_CPUS; id++) {
+ if (isset(&cpus, id))
+ cpumask_set_cpu(id, dhd->cpumask_secondary);
+ }
-/* Use polling */
-uint dhd_poll = FALSE;
-module_param(dhd_poll, uint, 0);
+ return ret;
+fail:
+ dhd_cpumasks_deinit(dhd);
+ return ret;
+}
-/* Use interrupts */
-uint dhd_intr = TRUE;
-module_param(dhd_intr, uint, 0);
+/*
+ * The CPU Candidacy Algorithm
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * The available CPUs for selection are divided into two groups
+ * Primary Set - A CPU mask that carries the First Choice CPUs
+ * Secondary Set - A CPU mask that carries the Second Choice CPUs.
+ *
+ * There are two types of Job, that needs to be assigned to
+ * the CPUs, from one of the above mentioned CPU group. The Jobs are
+ * 1) Rx Packet Processing - napi_cpu
+ * 2) Completion Processiong (Tx, RX) - compl_cpu
+ *
+ * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
+ * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
+ * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
+ * If there are more processors free, it assigns one to compl_cpu.
+ * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
+ * CPU, as much as possible.
+ *
+ * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
+ * would allow Tx completion skb's to be released into a local free pool from
+ * which the rx buffer posts could have been serviced. it is important to note
+ * that a Tx packet may not have a large enough buffer for rx posting.
+ */
+void dhd_select_cpu_candidacy(dhd_info_t *dhd)
+{
+ uint32 primary_available_cpus; /* count of primary available cpus */
+ uint32 secondary_available_cpus; /* count of secondary available cpus */
+ uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
+ uint32 compl_cpu = 0; /* cpu selected for completion jobs */
-/* SDIO Drive Strength (in milliamps) */
-uint dhd_sdiod_drive_strength = 6;
-module_param(dhd_sdiod_drive_strength, uint, 0);
+ cpumask_clear(dhd->cpumask_primary_new);
+ cpumask_clear(dhd->cpumask_secondary_new);
-#ifdef BCMSDIO
-/* Tx/Rx bounds */
-extern uint dhd_txbound;
-extern uint dhd_rxbound;
-module_param(dhd_txbound, uint, 0);
-module_param(dhd_rxbound, uint, 0);
+ /*
+ * Now select from the primary mask. Even if a Job is
+ * already running on a CPU in secondary group, we still move
+ * to primary CPU. So no conditional checks.
+ */
+ cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
+ dhd->cpumask_curr_avail);
-/* Deferred transmits */
-extern uint dhd_deferred_tx;
-module_param(dhd_deferred_tx, uint, 0);
+ cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
+ dhd->cpumask_curr_avail);
-#ifdef BCMDBGFS
-extern void dhd_dbg_init(dhd_pub_t *dhdp);
-extern void dhd_dbg_remove(void);
-#endif /* BCMDBGFS */
+ primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
-#endif /* BCMSDIO */
+ if (primary_available_cpus > 0) {
+ napi_cpu = cpumask_first(dhd->cpumask_primary_new);
+ /* If no further CPU is available,
+ * cpumask_next returns >= nr_cpu_ids
+ */
+ compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
+ if (compl_cpu >= nr_cpu_ids)
+ compl_cpu = 0;
+ }
-#ifdef SDTEST
-/* Echo packet generator (pkts/s) */
-uint dhd_pktgen = 0;
-module_param(dhd_pktgen, uint, 0);
+ DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d\n",
+ __FUNCTION__, napi_cpu, compl_cpu));
-/* Echo packet len (0 => sawtooth, max 2040) */
-uint dhd_pktgen_len = 0;
-module_param(dhd_pktgen_len, uint, 0);
-#endif /* SDTEST */
+ /* -- Now check for the CPUs from the secondary mask -- */
+ secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
-#if defined(BCMSUP_4WAY_HANDSHAKE)
-/* Use in dongle supplicant for 4-way handshake */
-uint dhd_use_idsup = 0;
-module_param(dhd_use_idsup, uint, 0);
-#endif /* BCMSUP_4WAY_HANDSHAKE */
+ DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
+ __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
-extern char dhd_version[];
+ if (secondary_available_cpus > 0) {
+ /* At this point if napi_cpu is unassigned it means no CPU
+ * is online from Primary Group
+ */
+ if (napi_cpu == 0) {
+ napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ compl_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
+ } else if (compl_cpu == 0) {
+ compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ }
-int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
-static void dhd_net_if_lock_local(dhd_info_t *dhd);
-static void dhd_net_if_unlock_local(dhd_info_t *dhd);
-static void dhd_suspend_lock(dhd_pub_t *dhdp);
-static void dhd_suspend_unlock(dhd_pub_t *dhdp);
+ /* If no CPU was available for completion, choose CPU 0 */
+ if (compl_cpu >= nr_cpu_ids)
+ compl_cpu = 0;
+ }
+ if ((primary_available_cpus == 0) &&
+ (secondary_available_cpus == 0)) {
+ /* No CPUs available from primary or secondary mask */
+ napi_cpu = 0;
+ compl_cpu = 0;
+ }
-#ifdef WLMEDIA_HTSF
-void htsf_update(dhd_info_t *dhd, void *data);
-tsf_t prev_tsf, cur_tsf;
+ DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d\n",
+ __FUNCTION__, napi_cpu, compl_cpu));
+ ASSERT(napi_cpu < nr_cpu_ids);
+ ASSERT(compl_cpu < nr_cpu_ids);
-uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
-static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
-static void dhd_dump_latency(void);
-static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
-static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
-static void dhd_dump_htsfhisto(histo_t *his, char *s);
-#endif /* WLMEDIA_HTSF */
+ atomic_set(&dhd->rx_napi_cpu, napi_cpu);
+ atomic_set(&dhd->tx_compl_cpu, compl_cpu);
+ atomic_set(&dhd->rx_compl_cpu, compl_cpu);
+ return;
+}
-/* Monitor interface */
-int dhd_monitor_init(void *dhd_pub);
-int dhd_monitor_uninit(void);
+/*
+ * Function to handle CPU Hotplug notifications.
+ * One of the task it does is to trigger the CPU Candidacy algorithm
+ * for load balancing.
+ */
+int
+dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned int)(long)hcpu;
+ dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
-#if defined(WL_WIRELESS_EXT)
-struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
-#endif /* defined(WL_WIRELESS_EXT) */
+ switch (action)
+ {
+ case CPU_ONLINE:
+ DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
+ cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+ break;
-static void dhd_dpc(ulong data);
-/* forward decl */
-extern int dhd_wait_pend8021x(struct net_device *dev);
-void dhd_os_wd_timer_extend(void *bus, bool extend);
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
+ cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+ break;
+ default:
+ break;
+ }
-#ifdef TOE
-#ifndef BDC
-#error TOE requires BDC
-#endif /* !BDC */
-static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
-static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
-#endif /* TOE */
+ return NOTIFY_OK;
+}
-static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
- wl_event_msg_t *event_ptr, void **data_ptr);
-#ifdef DHD_UNICAST_DHCP
-static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
-static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
- int *len_ptr, uint8 *prot_ptr);
-static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
- int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
-
-static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx);
-#endif /* DHD_UNICAST_DHCP */
-#ifdef DHD_L2_FILTER
-static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx);
-#endif
-#if defined(CONFIG_PM_SLEEP)
-static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+#if defined(DHD_LB_STATS)
+void dhd_lb_stats_init(dhd_pub_t *dhdp)
{
- int ret = NOTIFY_DONE;
- bool suspend = FALSE;
- dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
+ dhd_info_t *dhd;
+ int i, j;
- BCM_REFERENCE(dhdinfo);
- switch (action) {
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- suspend = TRUE;
- break;
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- suspend = FALSE;
- break;
+ if (dhdp == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhdp is NULL \n",
+ __FUNCTION__));
+ return;
}
-#if defined(SUPPORT_P2P_GO_PS)
-#ifdef PROP_TXSTATUS
- if (suspend) {
- DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
- dhd_wlfc_suspend(&dhdinfo->pub);
- DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
- } else
- dhd_wlfc_resume(&dhdinfo->pub);
-#endif
-#endif /* defined(SUPPORT_P2P_GO_PS) */
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
- KERNEL_VERSION(2, 6, 39))
+ DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
+ DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
+ DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
+ DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
+
+ for (i = 0; i < NR_CPUS; i++) {
+ DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
+ DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
+ DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
+
+ DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
+ DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
+ }
+
+ for (i = 0; i < NR_CPUS; i++) {
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ DHD_LB_STATS_CLR(dhd->napi_rx_hist[i][j]);
+ DHD_LB_STATS_CLR(dhd->txc_hist[i][j]);
+ DHD_LB_STATS_CLR(dhd->rxc_hist[i][j]);
+ }
+ }
+
+ return;
+}
+
+static void dhd_lb_stats_dump_histo(
+ struct bcmstrbuf *strbuf, uint32 (*hist)[HIST_BIN_SIZE])
+{
+ int i, j;
+ uint32 per_cpu_total[NR_CPUS] = {0};
+ uint32 total = 0;
+
+ bcm_bprintf(strbuf, "CPU: \t\t");
+ for (i = 0; i < num_possible_cpus(); i++)
+ bcm_bprintf(strbuf, "%d\t", i);
+ bcm_bprintf(strbuf, "\nBin\n");
+
+ for (i = 0; i < HIST_BIN_SIZE; i++) {
+ bcm_bprintf(strbuf, "%d:\t\t", 1<<(i+1));
+ for (j = 0; j < num_possible_cpus(); j++) {
+ bcm_bprintf(strbuf, "%d\t", hist[j][i]);
+ }
+ bcm_bprintf(strbuf, "\n");
+ }
+ bcm_bprintf(strbuf, "Per CPU Total \t");
+ total = 0;
+ for (i = 0; i < num_possible_cpus(); i++) {
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ per_cpu_total[i] += (hist[i][j] * (1<<(j+1)));
+ }
+ bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
+ total += per_cpu_total[i];
+ }
+ bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
+
+ return;
+}
+
+static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
+{
+ int i;
+
+ bcm_bprintf(strbuf, "CPU: \t");
+ for (i = 0; i < num_possible_cpus(); i++)
+ bcm_bprintf(strbuf, "%d\t", i);
+ bcm_bprintf(strbuf, "\n");
+
+ bcm_bprintf(strbuf, "Val: \t");
+ for (i = 0; i < num_possible_cpus(); i++)
+ bcm_bprintf(strbuf, "%u\t", *(p+i));
+ bcm_bprintf(strbuf, "\n");
+ return;
+}
+
+void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ dhd_info_t *dhd;
+
+ if (dhdp == NULL || strbuf == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
+ __FUNCTION__, dhdp, strbuf));
+ return;
+ }
+
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
+
+ bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
+
+ bcm_bprintf(strbuf, "cpu_offline_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
+
+ bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
+ dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
+ dhd->txc_sched_cnt);
+#ifdef DHD_LB_RXP
+ bcm_bprintf(strbuf, "napi_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
+ dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
+#endif /* DHD_LB_RXP */
+
+#ifdef DHD_LB_RXC
+ bcm_bprintf(strbuf, "rxc_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
+ dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
+#endif /* DHD_LB_RXC */
+
+
+#ifdef DHD_LB_TXC
+ bcm_bprintf(strbuf, "txc_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
+ dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
+#endif /* DHD_LB_TXC */
+}
+
+static void dhd_lb_stats_update_histo(uint32 *bin, uint32 count)
+{
+ uint32 bin_power;
+ uint32 *p = NULL;
+
+ bin_power = next_larger_power2(count);
+
+ switch (bin_power) {
+ case 0: break;
+ case 1: /* Fall through intentionally */
+ case 2: p = bin + 0; break;
+ case 4: p = bin + 1; break;
+ case 8: p = bin + 2; break;
+ case 16: p = bin + 3; break;
+ case 32: p = bin + 4; break;
+ case 64: p = bin + 5; break;
+ case 128: p = bin + 6; break;
+ default : p = bin + 7; break;
+ }
+ if (p)
+ *p = *p + 1;
+ return;
+}
+
+extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(&dhd->napi_rx_hist[cpu][0], count);
+
+ return;
+}
+
+extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(&dhd->txc_hist[cpu][0], count);
+
+ return;
+}
+
+extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(&dhd->rxc_hist[cpu][0], count);
+
+ return;
+}
+
+extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
+}
+
+extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
+}
+
+#endif /* DHD_LB_STATS */
+#endif /* DHD_LB */
+
+
+#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
+int g_frameburst = 1;
+#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
+
+static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
+
+/* DHD Perimiter lock only used in router with bypass forwarding. */
+#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
+#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
+#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
+
+#ifdef PCIE_FULL_DONGLE
+#if defined(BCM_GMAC3)
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
+
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
+#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
+
+#else /* ! BCM_GMAC3 */
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
+ spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
+ spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
+
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
+ struct list_head *snapshot_list);
+static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
+#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
+#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
+
+#endif /* ! BCM_GMAC3 */
+#endif /* PCIE_FULL_DONGLE */
+
+/* Control fw roaming */
+uint dhd_roam_disable = 0;
+
+#ifdef BCMDBGFS
+extern int dhd_dbg_init(dhd_pub_t *dhdp);
+extern void dhd_dbg_remove(void);
+#endif
+
+/* Control radio state */
+uint dhd_radio_up = 1;
+
+/* Network inteface name */
+char iface_name[IFNAMSIZ] = {'\0'};
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+
+/* The following are specific to the SDIO dongle */
+
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
+
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
+
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
+
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
+
+#ifdef BCMSDIO
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
+
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
+
+#endif /* BCMSDIO */
+
+
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
+
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
+
+
+
+/* Allow delayed firmware download for debug purpose */
+int allow_delay_fwdl = FALSE;
+module_param(allow_delay_fwdl, int, 0);
+
+extern char dhd_version[];
+extern char fw_version[];
+
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+static void dhd_suspend_lock(dhd_pub_t *dhdp);
+static void dhd_suspend_unlock(dhd_pub_t *dhdp);
+
+#ifdef WLMEDIA_HTSF
+void htsf_update(dhd_info_t *dhd, void *data);
+tsf_t prev_tsf, cur_tsf;
+
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
+static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
+static void dhd_dump_latency(void);
+static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_dump_htsfhisto(histo_t *his, char *s);
+#endif /* WLMEDIA_HTSF */
+
+/* Monitor interface */
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
+
+
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(WL_WIRELESS_EXT) */
+
+static void dhd_dpc(ulong data);
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+void dhd_os_wd_timer_extend(void *bus, bool extend);
+
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
+ wl_event_msg_t *event_ptr, void **data_ptr);
+
+#if defined(CONFIG_PM_SLEEP)
+static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+ int ret = NOTIFY_DONE;
+ bool suspend = FALSE;
+ dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
+
+ BCM_REFERENCE(dhdinfo);
+
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ suspend = TRUE;
+ break;
+
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ suspend = FALSE;
+ break;
+ }
+
+#if defined(SUPPORT_P2P_GO_PS)
+#ifdef PROP_TXSTATUS
+ if (suspend) {
+ DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
+ dhd_wlfc_suspend(&dhdinfo->pub);
+ DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
+ } else
+ dhd_wlfc_resume(&dhdinfo->pub);
+#endif /* PROP_TXSTATUS */
+#endif /* defined(SUPPORT_P2P_GO_PS) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+ KERNEL_VERSION(2, 6, 39))
dhd_mmc_suspend = suspend;
smp_mb();
#endif
return ret;
}
-static struct notifier_block dhd_pm_notifier = {
- .notifier_call = dhd_pm_callback,
- .priority = 10
-};
/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
* created in kernel notifier link list (with 'next' pointing to itself)
*/
#ifdef DHDTCPACK_SUPPRESS
.tcpack_sup_mode = TCPACK_SUP_REPLACE,
#endif /* DHDTCPACK_SUPPRESS */
- .up = FALSE, .busstate = DHD_BUS_DOWN
+ .up = FALSE,
+ .busstate = DHD_BUS_DOWN
}
};
#define DHD_INFO_NULL (&dhd_info_null)
/* Construct/Destruct a sta pool. */
static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
+/* Clear the pool of dhd_sta_t objects for built-in type driver */
static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
{
int prio;
- ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
+ ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
+
+ ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+
+ /*
+ * Flush and free all packets in all flowring's queues belonging to sta.
+ * Packets in flow ring will be flushed later.
+ */
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ uint16 flowid = sta->flowid[prio];
+
+ if (flowid != FLOWID_INVALID) {
+ unsigned long flags;
+ flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
+ flow_ring_node_t * flow_ring_node;
+
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(dhdp);
+#endif /* DHDTCPACK_SUPPRESS */
+
+ flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
+
+ if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
+ void * pkt;
+ while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
+ PKTFREE(dhdp->osh, pkt, TRUE);
+ }
+ }
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+ }
+
+ sta->flowid[prio] = FLOWID_INVALID;
+ }
- ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
id16_map_free(dhdp->staid_allocator, sta->idx);
- for (prio = 0; prio < (int)NUMPRIO; prio++)
- sta->flowid[prio] = FLOWID_INVALID;
+ DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
sta->ifidx = DHD_BAD_IF;
bzero(sta->ea.octet, ETHER_ADDR_LEN);
ASSERT((sta->idx == ID16_INVALID) &&
(sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
+
+ DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
+
sta->idx = idx; /* implying allocated */
return sta;
static int
dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
{
- int idx, sta_pool_memsz;
+ int idx, prio, sta_pool_memsz;
dhd_sta_t * sta;
dhd_sta_pool_t * sta_pool;
void * staid_allocator;
/* Now place them into the pre-allocated free pool. */
for (idx = 1; idx <= max_sta; idx++) {
sta = &sta_pool[idx];
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
+ }
dhd_sta_free(dhdp, sta);
}
static void
dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
{
- int idx, sta_pool_memsz;
+ int idx, prio, sta_pool_memsz;
dhd_sta_t * sta;
dhd_sta_pool_t * sta_pool;
void *staid_allocator;
/* Now place them into the pre-allocated free pool. */
for (idx = 1; idx <= max_sta; idx++) {
sta = &sta_pool[idx];
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
+ }
dhd_sta_free(dhdp, sta);
}
}
}
DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-
+#ifdef DHD_L2_FILTER
+ if (ifp->parp_enable) {
+ /* clear Proxy ARP cache of specific Ethernet Address */
+ bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
+ ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
+ }
+#endif /* DHD_L2_FILTER */
return;
}
return sta;
}
+
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+#if !defined(BCM_GMAC3)
+static struct list_head *
+dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
+{
+ unsigned long flags;
+ dhd_sta_t *sta, *snapshot;
+
+ INIT_LIST_HEAD(snapshot_list);
+
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+ list_for_each_entry(sta, &ifp->sta_list, list) {
+ /* allocate one and add to snapshot */
+ snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
+ if (snapshot == NULL) {
+ DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
+ continue;
+ }
+
+ memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
+
+ INIT_LIST_HEAD(&snapshot->list);
+ list_add_tail(&snapshot->list, snapshot_list);
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+ return snapshot_list;
+}
+
+static void
+dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
+{
+ dhd_sta_t *sta, *next;
+
+ list_for_each_entry_safe(sta, next, snapshot_list, list) {
+ list_del(&sta->list);
+ MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
+ }
+}
+#endif /* !BCM_GMAC3 */
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
+
#else
static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
#endif /* PCIE_FULL_DONGLE */
-/* Returns dhd iflist index correspondig the the bssidx provided by apps */
+#if defined(DHD_LB)
+
+#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC)
+/**
+ * dhd_tasklet_schedule - Function that runs in IPI context of the destination
+ * CPU and schedules a tasklet.
+ * @tasklet: opaque pointer to the tasklet
+ */
+static INLINE void
+dhd_tasklet_schedule(void *tasklet)
+{
+ tasklet_schedule((struct tasklet_struct *)tasklet);
+}
+
+/**
+ * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
+ * @tasklet: tasklet to be scheduled
+ * @on_cpu: cpu core id
+ *
+ * If the requested cpu is online, then an IPI is sent to this cpu via the
+ * smp_call_function_single with no wait and the tasklet_schedule function
+ * will be invoked to schedule the specified tasklet on the requested CPU.
+ */
+static void
+dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
+{
+ const int wait = 0;
+ smp_call_function_single(on_cpu,
+ dhd_tasklet_schedule, (void *)tasklet, wait);
+}
+#endif /* DHD_LB_TXC || DHD_LB_RXC */
+
+
+#if defined(DHD_LB_TXC)
+/**
+ * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
+ * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
+ * freeing the packets placed in the tx_compl workq
+ */
+void
+dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu, on_cpu;
+
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
+ /*
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
+ */
+ curr_cpu = get_cpu();
+ put_cpu();
+
+ on_cpu = atomic_read(&dhd->tx_compl_cpu);
+
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+ dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
+ } else {
+ schedule_work(&dhd->tx_compl_dispatcher_work);
+ }
+}
+
+static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
+{
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, tx_compl_dispatcher_work);
+ int cpu;
+
+ get_online_cpus();
+ cpu = atomic_read(&dhd->tx_compl_cpu);
+ if (!cpu_online(cpu))
+ dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
+ else
+ dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
+ put_online_cpus();
+}
+
+#endif /* DHD_LB_TXC */
+
+
+#if defined(DHD_LB_RXC)
+/**
+ * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
+ * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
+ * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
+ * placed in the rx_compl workq.
+ *
+ * @dhdp: pointer to dhd_pub object
+ */
+void
+dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu, on_cpu;
+
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
+ /*
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
+ */
+ curr_cpu = get_cpu();
+ put_cpu();
+
+ on_cpu = atomic_read(&dhd->rx_compl_cpu);
+
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+ dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
+ } else {
+ schedule_work(&dhd->rx_compl_dispatcher_work);
+ }
+}
+
+static void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
+{
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, rx_compl_dispatcher_work);
+ int cpu;
+
+ get_online_cpus();
+ cpu = atomic_read(&dhd->tx_compl_cpu);
+ if (!cpu_online(cpu))
+ dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
+ else
+ dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
+ put_online_cpus();
+}
+
+#endif /* DHD_LB_RXC */
+
+
+#if defined(DHD_LB_RXP)
+/**
+ * dhd_napi_poll - Load balance napi poll function to process received
+ * packets and send up the network stack using netif_receive_skb()
+ *
+ * @napi: napi object in which context this poll function is invoked
+ * @budget: number of packets to be processed.
+ *
+ * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
+ * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
+ * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
+ * packet tag and sendup.
+ */
+static int
+dhd_napi_poll(struct napi_struct *napi, int budget)
+{
+ int ifid;
+ const int pkt_count = 1;
+ const int chan = 0;
+ struct sk_buff * skb;
+ unsigned long flags;
+ struct dhd_info *dhd;
+ int processed = 0;
+ struct sk_buff_head rx_process_queue;
+
+ dhd = container_of(napi, struct dhd_info, rx_napi_struct);
+ DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
+ __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
+
+ __skb_queue_head_init(&rx_process_queue);
+
+ /* extract the entire rx_napi_queue into local rx_process_queue */
+ spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
+ skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
+ spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
+
+ while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
+ OSL_PREFETCH(skb->data);
+
+ ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
+
+ DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
+ __FUNCTION__, skb, ifid));
+
+ dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
+ processed++;
+ }
+
+ DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
+
+ DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
+ napi_complete(napi);
+
+ return budget - 1;
+}
+
+/**
+ * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
+ * poll list. This function may be invoked via the smp_call_function_single
+ * from a remote CPU.
+ *
+ * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
+ * after the napi_struct is added to the softnet data's poll_list
+ *
+ * @info: pointer to a dhd_info struct
+ */
+static void
+dhd_napi_schedule(void *info)
+{
+ dhd_info_t *dhd = (dhd_info_t *)info;
+
+ DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
+ __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
+
+ /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
+ if (napi_schedule_prep(&dhd->rx_napi_struct)) {
+ __napi_schedule(&dhd->rx_napi_struct);
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
+ }
+
+ /*
+ * If the rx_napi_struct was already running, then we let it complete
+ * processing all its packets. The rx_napi_struct may only run on one
+ * core at a time, to avoid out-of-order handling.
+ */
+}
+
+/**
+ * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
+ * action after placing the dhd's rx_process napi object in the the remote CPU's
+ * softnet data's poll_list.
+ *
+ * @dhd: dhd_info which has the rx_process napi object
+ * @on_cpu: desired remote CPU id
+ */
+static INLINE int
+dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
+{
+ int wait = 0; /* asynchronous IPI */
+
+ DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
+ __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
+
+ if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
+ DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
+ __FUNCTION__, on_cpu));
+ }
+
+ DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
+
+ return 0;
+}
+
+/*
+ * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
+ * Why should we do this?
+ * The candidacy algorithm is run from the call back function
+ * registered to CPU hotplug notifier. This call back happens from Worker
+ * context. The dhd_napi_schedule_on is also from worker context.
+ * Note that both of this can run on two different CPUs at the same time.
+ * So we can possibly have a window where a given CPUn is being brought
+ * down from CPUm while we try to run a function on CPUn.
+ * To prevent this its better have the whole code to execute an SMP
+ * function under get_online_cpus.
+ * This function call ensures that hotplug mechanism does not kick-in
+ * until we are done dealing with online CPUs
+ * If the hotplug worker is already running, no worries because the
+ * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
+ *
+ * The below mentioned code structure is proposed in
+ * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
+ * for the question
+ * Q: I need to ensure that a particular cpu is not removed when there is some
+ * work specific to this cpu is in progress
+ *
+ * According to the documentation calling get_online_cpus is NOT required, if
+ * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
+ * run from Work Queue context we have to call these functions
+ */
+static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
+{
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, rx_napi_dispatcher_work);
+ int cpu;
+
+ get_online_cpus();
+ cpu = atomic_read(&dhd->rx_napi_cpu);
+ if (!cpu_online(cpu))
+ dhd_napi_schedule(dhd);
+ else
+ dhd_napi_schedule_on(dhd, cpu);
+ put_online_cpus();
+}
+
+/**
+ * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
+ * to run on another CPU. The rx_napi_struct's poll function will retrieve all
+ * the packets enqueued into the rx_napi_queue and sendup.
+ * The producer's rx packet queue is appended to the rx_napi_queue before
+ * dispatching the rx_napi_struct.
+ */
+void
+dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
+{
+ unsigned long flags;
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu;
+ int on_cpu;
+
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
+ skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
+
+ /* append the producer's queue of packets to the napi's rx process queue */
+ spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
+ skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
+ spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
+
+ /*
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
+ */
+ curr_cpu = get_cpu();
+ put_cpu();
+
+ on_cpu = atomic_read(&dhd->rx_napi_cpu);
+
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+ dhd_napi_schedule(dhd);
+ } else {
+ schedule_work(&dhd->rx_napi_dispatcher_work);
+ }
+}
+
+/**
+ * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
+ */
+void
+dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
+{
+ dhd_info_t *dhd = dhdp->info;
+
+ DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
+ pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
+ DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
+ __skb_queue_tail(&dhd->rx_pend_queue, pkt);
+}
+#endif /* DHD_LB_RXP */
+
+#endif /* DHD_LB */
+
+static void dhd_memdump_work_handler(struct work_struct * work)
+{
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, dhd_memdump_work.work);
+
+ BCM_REFERENCE(dhd);
+#ifdef BCMPCIE
+ dhd_prot_collect_memdump(&dhd->pub);
+#endif
+}
+
+
+/** Returns dhd iflist index corresponding the the bssidx provided by apps */
int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
{
dhd_if_t *ifp;
int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
{
-#ifndef CUSTOMER_HW10
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
-#endif /* !CUSTOMER_HW10 */
if (prepost) { /* pre process */
dhd_read_macaddr(dhd);
}
#endif /* PKT_FILTER_SUPPORT && !GAN_LITE_NAT_KEEPALIVE_FILTER */
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-#ifdef PKT_FILTER_SUPPORT
-void
-dhd_set_packet_filter_mode(struct net_device *dev, char *command)
-{
- dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
-
- dhdi->pub.pkt_filter_mode = bcm_strtoul(command, &command, 0);
-}
-
-int
-dhd_set_packet_filter_ports(struct net_device *dev, char *command)
-{
- int i = 0, error = BCME_OK, count = 0, get_count = 0, action = 0;
- uint16 portnum = 0, *ports = NULL, get_ports[WL_PKT_FILTER_PORTS_MAX];
- dhd_info_t *dhdi = *(dhd_info_t **)netdev_priv(dev);
- dhd_pub_t *dhdp = &dhdi->pub;
- char iovbuf[WLC_IOCTL_SMLEN];
-
- /* get action */
- action = bcm_strtoul(command, &command, 0);
- if (action > PKT_FILTER_PORTS_MAX)
- return BCME_BADARG;
-
- if (action == PKT_FILTER_PORTS_LOOPBACK) {
- /* echo the loopback value if port filter is supported else error */
- bcm_mkiovar("cap", NULL, 0, iovbuf, sizeof(iovbuf));
- error = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
- if (error < 0) {
- DHD_ERROR(("%s: Get Capability failed (error=%d)\n", __FUNCTION__, error));
- return error;
- }
-
- if (strstr(iovbuf, "pktfltr2"))
- return bcm_strtoul(command, &command, 0);
- else {
- DHD_ERROR(("%s: pktfltr2 is not supported\n", __FUNCTION__));
- return BCME_UNSUPPORTED;
- }
- }
-
- if (action == PKT_FILTER_PORTS_CLEAR) {
- /* action 0 is clear all ports */
- dhdp->pkt_filter_ports_count = 0;
- bzero(dhdp->pkt_filter_ports, sizeof(dhdp->pkt_filter_ports));
- }
- else {
- portnum = bcm_strtoul(command, &command, 0);
- if (portnum == 0) {
- /* no ports to add or remove */
- return BCME_BADARG;
- }
-
- /* get configured ports */
- count = dhdp->pkt_filter_ports_count;
- ports = dhdp->pkt_filter_ports;
-
- if (action == PKT_FILTER_PORTS_ADD) {
- /* action 1 is add ports */
-
- /* copy new ports */
- while ((portnum != 0) && (count < WL_PKT_FILTER_PORTS_MAX)) {
- for (i = 0; i < count; i++) {
- /* duplicate port */
- if (portnum == ports[i])
- break;
- }
- if (portnum != ports[i])
- ports[count++] = portnum;
- portnum = bcm_strtoul(command, &command, 0);
- }
- } else if ((action == PKT_FILTER_PORTS_DEL) && (count > 0)) {
- /* action 2 is remove ports */
- bcopy(dhdp->pkt_filter_ports, get_ports, count * sizeof(uint16));
- get_count = count;
-
- while (portnum != 0) {
- count = 0;
- for (i = 0; i < get_count; i++) {
- if (portnum != get_ports[i])
- ports[count++] = get_ports[i];
- }
- get_count = count;
- bcopy(ports, get_ports, count * sizeof(uint16));
- portnum = bcm_strtoul(command, &command, 0);
- }
- }
- dhdp->pkt_filter_ports_count = count;
- }
- return error;
-}
-
-static void
-dhd_enable_packet_filter_ports(dhd_pub_t *dhd, bool enable)
-{
- int error = 0;
- wl_pkt_filter_ports_t *portlist = NULL;
- const uint pkt_filter_ports_buf_len = sizeof("pkt_filter_ports")
- + WL_PKT_FILTER_PORTS_FIXED_LEN + (WL_PKT_FILTER_PORTS_MAX * sizeof(uint16));
- char pkt_filter_ports_buf[pkt_filter_ports_buf_len];
- char iovbuf[pkt_filter_ports_buf_len];
-
- DHD_TRACE(("%s: enable %d, in_suspend %d, mode %d, port count %d\n", __FUNCTION__,
- enable, dhd->in_suspend, dhd->pkt_filter_mode,
- dhd->pkt_filter_ports_count));
-
- bzero(pkt_filter_ports_buf, sizeof(pkt_filter_ports_buf));
- portlist = (wl_pkt_filter_ports_t*)pkt_filter_ports_buf;
- portlist->version = WL_PKT_FILTER_PORTS_VERSION;
- portlist->reserved = 0;
-
- if (enable) {
- if (!(dhd->pkt_filter_mode & PKT_FILTER_MODE_PORTS_ONLY))
- return;
-
- /* enable port filter */
- dhd_master_mode |= PKT_FILTER_MODE_PORTS_ONLY;
- if (dhd->pkt_filter_mode & PKT_FILTER_MODE_FORWARD_ON_MATCH)
- /* whitelist mode: FORWARD_ON_MATCH */
- dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
- else
- /* blacklist mode: DISCARD_ON_MATCH */
- dhd_master_mode &= ~PKT_FILTER_MODE_FORWARD_ON_MATCH;
-
- portlist->count = dhd->pkt_filter_ports_count;
- bcopy(dhd->pkt_filter_ports, portlist->ports,
- dhd->pkt_filter_ports_count * sizeof(uint16));
- } else {
- /* disable port filter */
- portlist->count = 0;
- dhd_master_mode &= ~PKT_FILTER_MODE_PORTS_ONLY;
- dhd_master_mode |= PKT_FILTER_MODE_FORWARD_ON_MATCH;
- }
-
- DHD_INFO(("%s: update: mode %d, port count %d\n", __FUNCTION__, dhd_master_mode,
- portlist->count));
-
- /* update ports */
- bcm_mkiovar("pkt_filter_ports",
- (char*)portlist,
- (WL_PKT_FILTER_PORTS_FIXED_LEN + (portlist->count * sizeof(uint16))),
- iovbuf, sizeof(iovbuf));
- error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
- if (error < 0)
- DHD_ERROR(("%s: set pkt_filter_ports failed %d\n", __FUNCTION__, error));
-
- /* update mode */
- bcm_mkiovar("pkt_filter_mode", (char*)&dhd_master_mode,
- sizeof(dhd_master_mode), iovbuf, sizeof(iovbuf));
- error = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
- if (error < 0)
- DHD_ERROR(("%s: set pkt_filter_mode failed %d\n", __FUNCTION__, error));
-
- return;
-}
-#endif /* PKT_FILTER_SUPPORT */
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
-
void dhd_set_packet_filter(dhd_pub_t *dhd)
{
#ifdef PKT_FILTER_SUPPORT
#ifdef PKT_FILTER_SUPPORT
int i;
- DHD_TRACE(("%s: enter, value = %d\n", __FUNCTION__, value));
-
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
- dhd_enable_packet_filter_ports(dhd, value);
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+ DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
+ if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
+ DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
+ return;
+ }
/* 1 - Enable packet filter, only allow unicast packet to send up */
/* 0 - Disable packet filter */
if (dhd_pkt_filter_enable && (!value ||
#ifndef SUPPORT_PM2_ONLY
int power_mode = PM_MAX;
#endif /* SUPPORT_PM2_ONLY */
+#ifdef SUPPORT_SENSORHUB
+ uint32 shub_msreq;
+#endif /* SUPPORT_SENSORHUB */
/* wl_pkt_filter_enable_t enable_parm; */
char iovbuf[32];
int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
+ int bcn_timeout = 0;
+#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+ int roam_time_thresh = 0; /* (ms) */
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
uint roamvar = dhd->conf->roam_off_suspend;
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ int bcn_li_bcn;
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
uint nd_ra_filter = 0;
int ret = 0;
+#endif /* DHD_USE_EARLYSUSPEND */
+#ifdef PASS_ALL_MCAST_PKTS
+ struct dhd_info *dhdinfo;
+ uint32 allmulti;
+ uint i;
+#endif /* PASS_ALL_MCAST_PKTS */
+#ifdef DYNAMIC_SWOOB_DURATION
+#ifndef CUSTOM_INTR_WIDTH
+#define CUSTOM_INTR_WIDTH 100
+ int intr_width = 0;
+#endif /* CUSTOM_INTR_WIDTH */
+#endif /* DYNAMIC_SWOOB_DURATION */
if (!dhd)
return -ENODEV;
+#ifdef PASS_ALL_MCAST_PKTS
+ dhdinfo = dhd->info;
+#endif /* PASS_ALL_MCAST_PKTS */
+
DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
__FUNCTION__, value, dhd->in_suspend));
/* Kernel suspended */
DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
+#ifdef SUPPORT_SENSORHUB
+ shub_msreq = 1;
+ if (dhd->info->shub_enable == 1) {
+ bcm_mkiovar("shub_msreq", (char *)&shub_msreq, 4,
+ iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Sensor Hub move/stop start: failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* SUPPORT_SENSORHUB */
+
#ifndef SUPPORT_PM2_ONLY
dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
sizeof(power_mode), TRUE, 0);
#endif /* SUPPORT_PM2_ONLY */
- /* Enable packet filter, only allow unicast packet to send up */
+#ifdef PKT_FILTER_SUPPORT
+ /* Enable packet filter,
+ * only allow unicast packet to send up
+ */
dhd_enable_packet_filter(1, dhd);
+#endif /* PKT_FILTER_SUPPORT */
+
+#ifdef PASS_ALL_MCAST_PKTS
+ allmulti = 0;
+ bcm_mkiovar("allmulti", (char *)&allmulti, 4,
+ iovbuf, sizeof(iovbuf));
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, i);
+ }
+#endif /* PASS_ALL_MCAST_PKTS */
/* If DTIM skip is set up as default, force it to wake
* each third DTIM for better power savings. Note that
* one side effect is a chance to miss BC/MC packet.
*/
+#ifdef WLTDLS
+ /* Do not set bcn_li_ditm on WFD mode */
+ if (dhd->tdls_mode) {
+ bcn_li_dtim = 0;
+ } else
+#endif /* WLTDLS */
bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
4, iovbuf, sizeof(iovbuf));
TRUE, 0) < 0)
DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
+ bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
+ bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
+ 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+ roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
+ bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
+ 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
/* Disable firmware roaming during suspend */
bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ bcn_li_bcn = 0;
+ bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
+ 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
if (FW_SUPPORTED(dhd, ndoe)) {
/* enable IPv6 RA filter in firmware during suspend */
nd_ra_filter = 1;
DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
ret));
}
+#ifdef DYNAMIC_SWOOB_DURATION
+ intr_width = CUSTOM_INTR_WIDTH;
+ bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
+ iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("failed to set intr_width (%d)\n", ret));
+ }
+#endif /* DYNAMIC_SWOOB_DURATION */
+#endif /* DHD_USE_EARLYSUSPEND */
} else {
#ifdef PKT_FILTER_SUPPORT
dhd->early_suspended = 0;
/* Kernel resumed */
DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__));
+#ifdef SUPPORT_SENSORHUB
+ shub_msreq = 0;
+ if (dhd->info->shub_enable == 1) {
+ bcm_mkiovar("shub_msreq", (char *)&shub_msreq,
+ 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Sensor Hub move/stop stop:"
+ "failed %d\n", __FUNCTION__, ret));
+ }
+ }
+#endif /* SUPPORT_SENSORHUB */
+
+
+#ifdef DYNAMIC_SWOOB_DURATION
+ intr_width = 0;
+ bcm_mkiovar("bus:intr_width", (char *)&intr_width, 4,
+ iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("failed to set intr_width (%d)\n", ret));
+ }
+#endif /* DYNAMIC_SWOOB_DURATION */
#ifndef SUPPORT_PM2_ONLY
power_mode = PM_FAST;
dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
/* disable pkt filter */
dhd_enable_packet_filter(0, dhd);
#endif /* PKT_FILTER_SUPPORT */
+#ifdef PASS_ALL_MCAST_PKTS
+ allmulti = 1;
+ bcm_mkiovar("allmulti", (char *)&allmulti, 4,
+ iovbuf, sizeof(iovbuf));
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, i);
+ }
+#endif /* PASS_ALL_MCAST_PKTS */
/* restore pre-suspend setting for dtim_skip */
bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
+ bcn_timeout = CUSTOM_BCN_TIMEOUT;
+ bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
+ 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+ roam_time_thresh = 2000;
+ bcm_mkiovar("roam_time_thresh", (char *)&roam_time_thresh,
+ 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
roamvar = dhd_roam_disable;
bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ bcn_li_bcn = 1;
+ bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn,
+ 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
if (FW_SUPPORTED(dhd, ndoe)) {
/* disable IPv6 RA filter in firmware during suspend */
nd_ra_filter = 0;
DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
ret));
}
+#endif /* DHD_USE_EARLYSUSPEND */
}
}
dhd_suspend_unlock(dhd);
DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
return DHD_BAD_IF;
}
+
while (i < DHD_MAX_IFS) {
if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
return i;
return 0;
while (--i > 0)
- if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
+ if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
break;
DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
return i; /* default - the primary interface */
}
-int
-dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx)
-{
- int i = DHD_MAX_IFS;
-
- ASSERT(dhd);
-
- while (--i > 0)
- if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx))
- break;
-
- DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx));
-
- return i; /* default - the primary interface */
-}
-
char *
dhd_ifname(dhd_pub_t *dhdp, int ifidx)
{
uint buflen;
int ret;
- ASSERT(dhd && dhd->iflist[ifidx]);
+ if (!dhd->iflist[ifidx]) {
+ DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
+ return;
+ }
dev = dhd->iflist[ifidx]->net;
if (!dev)
return;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
-#endif
+#endif /* LINUX >= 2.6.27 */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
cnt = netdev_mc_count(dev);
#else
cnt = dev->mc_count;
-#endif /* LINUX_VERSION_CODE */
-
+#endif /* LINUX >= 2.6.35 */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
-#endif
+#endif /* LINUX >= 2.6.27 */
/* Determine initial value of allmulti flag */
allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
+#ifdef PASS_ALL_MCAST_PKTS
+#ifdef PKT_FILTER_SUPPORT
+ if (!dhd->pub.early_suspended)
+#endif /* PKT_FILTER_SUPPORT */
+ allmulti = TRUE;
+#endif /* PASS_ALL_MCAST_PKTS */
+
/* Send down the multicast list first. */
memcpy(bufp, &cnt, sizeof(cnt));
bufp += sizeof(cnt);
-
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_lock_bh(dev);
-#endif
+#endif /* LINUX >= 2.6.27 */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
netdev_for_each_mc_addr(ha, dev) {
if (!cnt)
bufp += ETHER_ADDR_LEN;
cnt--;
}
-#else
+#else /* LINUX < 2.6.35 */
for (mclist = dev->mc_list; (mclist && (cnt > 0));
cnt--, mclist = mclist->next) {
memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
bufp += ETHER_ADDR_LEN;
}
-#endif /* LINUX_VERSION_CODE */
-
+#endif /* LINUX >= 2.6.35 */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
netif_addr_unlock_bh(dev);
-#endif
+#endif /* LINUX >= 2.6.27 */
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = WLC_SET_VAR;
extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
#endif
+#ifdef DHD_PSTA
+/* Get psta/psr configuration configuration */
+int dhd_get_psta_mode(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ return (int)dhd->psta_mode;
+}
+/* Set psta/psr configuration configuration */
+int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd->psta_mode = val;
+ return 0;
+}
+#endif /* DHD_PSTA */
+
static void
dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
{
struct net_device *ndev;
int ifidx, bssidx;
int ret;
-#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
struct wireless_dev *vwdev, *primary_wdev;
struct net_device *primary_ndev;
#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
bssidx = if_event->event.bssidx;
DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
+ /* This path is for non-android case */
+ /* The interface name in host and in event msg are same */
+ /* if name in event msg is used to create dongle if list on host */
ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
- if_event->mac, bssidx, TRUE);
+ if_event->mac, bssidx, TRUE, if_event->name);
if (!ndev) {
DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
goto done;
}
-#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
if (unlikely(!vwdev)) {
- WL_ERR(("Could not allocate wireless device\n"));
+ DHD_ERROR(("Could not allocate wireless device\n"));
goto done;
}
primary_ndev = dhd->pub.info->iflist[0]->net;
}
#ifdef PCIE_FULL_DONGLE
/* Turn on AP isolation in the firmware for interfaces operating in AP mode */
- if (FW_SUPPORTED((&dhd->pub), ap) && !(DHD_IF_ROLE_STA(if_event->event.role))) {
+ if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
char iovbuf[WLC_IOCTL_SMLEN];
uint32 var_int = 1;
}
}
#endif /* PCIE_FULL_DONGLE */
+
done:
MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
ifidx = if_event->event.ifidx;
DHD_TRACE(("Removing interface with idx %d\n", ifidx));
+ DHD_PERIM_UNLOCK(&dhd->pub);
dhd_remove_if(&dhd->pub, ifidx, TRUE);
+ DHD_PERIM_LOCK(&dhd->pub);
MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
#if defined(DHD_TX_DUMP)
void
-dhd_tx_dump(osl_t *osh, void *pkt)
+dhd_tx_dump(struct net_device *ndev, osl_t *osh, void *pkt)
{
uint8 *dump_data;
uint16 protocol;
- struct ether_header *eh;
+ char *ifname;
dump_data = PKTDATA(osh, pkt);
- eh = (struct ether_header *) dump_data;
- protocol = ntoh16(eh->ether_type);
-
- DHD_ERROR(("TX DUMP - %s\n", _get_packet_type_str(protocol)));
+ protocol = (dump_data[12] << 8) | dump_data[13];
+ ifname = ndev ? ndev->name : "N/A";
+
+ DHD_ERROR(("TX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
if (protocol == ETHER_TYPE_802_1X) {
- DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
- dump_data[14], dump_data[15], dump_data[30]));
+ dhd_dump_eapol_4way_message(ifname, dump_data, TRUE);
}
#if defined(DHD_TX_FULL_DUMP)
datalen = PKTLEN(osh, pkt);
for (i = 0; i < datalen; i++) {
- DHD_ERROR(("%02X ", dump_data[i]));
+ printk("%02X ", dump_data[i]);
if ((i & 15) == 15)
printk("\n");
}
- DHD_ERROR(("\n"));
+ printk("\n");
}
#endif /* DHD_TX_FULL_DUMP */
}
#endif /* DHD_TX_DUMP */
+/* This routine do not support Packet chain feature, Currently tested for
+ * proxy arp feature
+ */
+int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
+{
+ struct sk_buff *skb;
+ void *skbhead = NULL;
+ void *skbprev = NULL;
+ dhd_if_t *ifp;
+ ASSERT(!PKTISCHAINED(p));
+ skb = PKTTONATIVE(dhdp->osh, p);
+
+ ifp = dhdp->info->iflist[ifidx];
+ skb->dev = ifp->net;
+#if defined(BCM_GMAC3)
+ /* Forwarder capable interfaces use WOFA based forwarding */
+ if (ifp->fwdh) {
+ struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p);
+ uint16 * da = (uint16 *)(eh->ether_dhost);
+ wofa_t wofa;
+ ASSERT(ISALIGNED(da, 2));
+
+ wofa = fwder_lookup(ifp->fwdh->mate, da, ifp->idx);
+ if (wofa == FWDER_WOFA_INVALID) { /* Unknown MAC address */
+ if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) {
+ return BCME_OK;
+ }
+ }
+ PKTFRMNATIVE(dhdp->osh, p);
+ PKTFREE(dhdp->osh, p, FALSE);
+ return BCME_OK;
+ }
+#endif /* BCM_GMAC3 */
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (in_interrupt()) {
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ netif_rx(skb);
+ } else {
+ if (dhdp->info->rxthread_enabled) {
+ if (!skbhead) {
+ skbhead = skb;
+ } else {
+ PKTSETNEXT(dhdp->osh, skbprev, skb);
+ }
+ skbprev = skb;
+ } else {
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ netif_rx_ni(skb);
+#else
+ ulong flags;
+ netif_rx(skb);
+ local_irq_save(flags);
+ RAISE_RX_SOFTIRQ();
+ local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+ }
+ }
+
+ if (dhdp->info->rxthread_enabled && skbhead)
+ dhd_sched_rxf(dhdp, skbhead);
+
+ return BCME_OK;
+}
+
int BCMFASTPATH
-dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
{
int ret = BCME_OK;
dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
struct ether_header *eh = NULL;
+#ifdef DHD_L2_FILTER
+ dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
+#endif
+#ifdef DHD_8021X_DUMP
+ struct net_device *ndev;
+#endif /* DHD_8021X_DUMP */
/* Reject if down */
if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
/* free the packet here since the caller won't */
- PKTFREE(dhdp->osh, pktbuf, TRUE);
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
return -ENODEV;
}
}
#endif /* PCIE_FULL_DONGLE */
-#ifdef DHD_UNICAST_DHCP
+#ifdef DHD_L2_FILTER
/* if dhcp_unicast is enabled, we need to convert the */
/* broadcast DHCP ACK/REPLY packets to Unicast. */
- if (dhdp->dhcp_unicast) {
- dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx);
+ if (ifp->dhcp_unicast) {
+ uint8* mac_addr;
+ uint8* ehptr = NULL;
+ int ret;
+ ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
+ if (ret == BCME_OK) {
+ /* if given mac address having valid entry in sta list
+ * copy the given mac address, and return with BCME_OK
+ */
+ if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
+ ehptr = PKTDATA(dhdp->osh, pktbuf);
+ bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+ }
+ }
+ }
+
+ if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+ if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
+ }
+
+ if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+ ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
+
+ /* Drop the packets if l2 filter has processed it already
+ * otherwise continue with the normal path
+ */
+ if (ret == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
}
-#endif /* DHD_UNICAST_DHCP */
+#endif /* DHD_L2_FILTER */
/* Update multicast statistic */
if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
dhdp->tx_multicast++;
if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
atomic_inc(&dhd->pend_8021x_cnt);
+#ifdef DHD_DHCP_DUMP
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+ uint16 dump_hex;
+ uint16 source_port;
+ uint16 dest_port;
+ uint16 udp_port_pos;
+ uint8 *ptr8 = (uint8 *)&pktdata[ETHER_HDR_LEN];
+ uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
+ struct net_device *net;
+ char *ifname;
+
+ net = dhd_idx2net(dhdp, ifidx);
+ ifname = net ? net->name : "N/A";
+ udp_port_pos = ETHER_HDR_LEN + ip_header_len;
+ source_port = (pktdata[udp_port_pos] << 8) | pktdata[udp_port_pos+1];
+ dest_port = (pktdata[udp_port_pos+2] << 8) | pktdata[udp_port_pos+3];
+ if (source_port == 0x0044 || dest_port == 0x0044) {
+ dump_hex = (pktdata[udp_port_pos+249] << 8) |
+ pktdata[udp_port_pos+250];
+ if (dump_hex == 0x0101) {
+ DHD_ERROR(("DHCP[%s] - DISCOVER [TX]", ifname));
+ } else if (dump_hex == 0x0102) {
+ DHD_ERROR(("DHCP[%s] - OFFER [TX]", ifname));
+ } else if (dump_hex == 0x0103) {
+ DHD_ERROR(("DHCP[%s] - REQUEST [TX]", ifname));
+ } else if (dump_hex == 0x0105) {
+ DHD_ERROR(("DHCP[%s] - ACK [TX]", ifname));
+ } else {
+ DHD_ERROR(("DHCP[%s] - 0x%X [TX]", ifname, dump_hex));
+ }
+#ifdef DHD_LOSSLESS_ROAMING
+ if (dhdp->dequeue_prec_map != (uint8)ALLPRIO) {
+ DHD_ERROR(("/%d", dhdp->dequeue_prec_map));
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
+ DHD_ERROR(("\n"));
+ } else if (source_port == 0x0043 || dest_port == 0x0043) {
+ DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
+ }
+ }
+#endif /* DHD_DHCP_DUMP */
} else {
- PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
return BCME_ERROR;
}
/* Look into the packet and update the packet priority */
#ifndef PKTPRIO_OVERRIDE
if (PKTPRIO(pktbuf) == 0)
-#endif
+#endif /* !PKTPRIO_OVERRIDE */
+ {
+#ifdef QOS_MAP_SET
+ pktsetprio_qms(pktbuf, wl_get_up_table(), FALSE);
+#else
pktsetprio(pktbuf, FALSE);
+#endif /* QOS_MAP_SET */
+ }
-#if defined(PCIE_FULL_DONGLE) && !defined(PCIE_TX_DEFERRAL)
+#ifdef PCIE_FULL_DONGLE
/*
* Lkup the per interface hash table, for a matching flowring. If one is not
* available, allocate a unique flowid and add a flowring entry.
return ret;
}
#endif
+
#if defined(DHD_TX_DUMP)
- dhd_tx_dump(dhdp->osh, pktbuf);
+ ndev = dhd_idx2net(dhdp, ifidx);
+ dhd_tx_dump(ndev, dhdp->osh, pktbuf);
#endif
+ /* terence 20150901: Micky add to ajust the 802.1X priority */
+ /* Set the 802.1X packet with the highest priority 7 */
+ if (dhdp->conf->pktprio8021x >= 0)
+ pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
#ifdef PROP_TXSTATUS
if (dhd_wlfc_is_supported(dhdp)) {
DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
} else
#endif /* PROP_TXSTATUS */
- /* If the protocol uses a data header, apply it */
- dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+ {
+ /* If the protocol uses a data header, apply it */
+ dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+ }
/* Use bus module to send data frame */
#ifdef WLMEDIA_HTSF
dhd_htsf_addtxts(dhdp, pktbuf);
#endif
-
#ifdef PROP_TXSTATUS
{
if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
return ret;
}
+int BCMFASTPATH
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (dhdp->busstate == DHD_BUS_DOWN ||
+ dhdp->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
+ DHD_ERROR(("%s: returning as busstate=%d\n",
+ __FUNCTION__, dhdp->busstate));
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return -ENODEV;
+ }
+ dhdp->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT;
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
+ DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ ret = -EBUSY;
+ goto exit;
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+exit:
+#endif
+ DHD_GENERAL_LOCK(dhdp, flags);
+ dhdp->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT;
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ return ret;
+}
+
int BCMFASTPATH
dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
{
dhd_info_t *dhd = DHD_DEV_INFO(net);
dhd_if_t *ifp = NULL;
int ifidx;
+ unsigned long flags;
#ifdef WLMEDIA_HTSF
uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
#else
uint8 htsfdlystat_sz = 0;
-#endif
+#endif
#ifdef DHD_WMF
struct ether_header *eh;
uint8 *iph;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef PCIE_FULL_DONGLE
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ dhd->pub.dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX;
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
+ /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
+ /* stop the network queue temporarily until resume done */
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ if (!dhdpcie_is_resume_done(&dhd->pub)) {
+ dhd_bus_stop_queue(dhd->pub.bus);
+ }
+ dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
+#else
+ return NETDEV_TX_BUSY;
+#endif
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+#ifdef PCIE_FULL_DONGLE
+ if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
+ dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
+#else
+ return NETDEV_TX_BUSY;
+#endif
+ }
+#endif /* PCIE_FULL_DONGLE */
+
DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+ DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
/* Reject if down */
- if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
+ if (dhd->pub.hang_was_sent || dhd->pub.busstate == DHD_BUS_DOWN ||
+ dhd->pub.busstate == DHD_BUS_DOWN_IN_PROGRESS) {
DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
__FUNCTION__, dhd->pub.up, dhd->pub.busstate));
netif_stop_queue(net);
/* Send Event when bus down detected during data session */
- if (dhd->pub.up) {
+ if (dhd->pub.up && !dhd->pub.hang_was_sent) {
DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
+ dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
net_os_send_hang_message(net);
}
- DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+#ifdef PCIE_FULL_DONGLE
+ dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#endif /* PCIE_FULL_DONGLE */
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
return -ENODEV;
ifp = DHD_DEV_IFP(net);
ifidx = DHD_DEV_IFIDX(net);
-
- ASSERT(ifidx == dhd_net2idx(dhd, net));
- ASSERT((ifp != NULL) && (ifp == dhd->iflist[ifidx]));
+ BUZZZ_LOG(START_XMIT_BGN, 2, (uint32)ifidx, (uintptr)skb);
if (ifidx == DHD_BAD_IF) {
DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
netif_stop_queue(net);
- DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+#ifdef PCIE_FULL_DONGLE
+ dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#endif /* PCIE_FULL_DONGLE */
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
return -ENODEV;
return NETDEV_TX_BUSY;
#endif
}
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+ ASSERT(ifidx == dhd_net2idx(dhd, net));
+ ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
+
+ bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
/* re-align socket buffer if "skb->data" is odd address */
if (((unsigned long)(skb->data)) & 0x1) {
datalen = PKTLEN(dhd->pub.osh, skb);
/* Make sure there's enough room for any header */
-
if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
struct sk_buff *skb2;
dhd_ifname(&dhd->pub, ifidx)));
dhd->pub.tx_realloc++;
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
dev_kfree_skb(skb);
ret = -ENOMEM;
goto done;
}
+ bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
}
/* Convert to packet */
if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
dhd_ifname(&dhd->pub, ifidx)));
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
dev_kfree_skb_any(skb);
ret = -ENOMEM;
goto done;
}
-#ifdef WLMEDIA_HTSF
+
+#if defined(WLMEDIA_HTSF)
if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
struct ether_header *eh = (struct ether_header *)pktdata;
eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
}
}
-#endif
+#endif
+
#ifdef DHD_WMF
eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
iph = (uint8 *)eh + ETHER_HDR_LEN;
#endif /* DHD_IGMP_UCQUERY */
if (ucast_convert) {
dhd_sta_t *sta;
+#ifdef PCIE_FULL_DONGLE
unsigned long flags;
+#endif
+ struct list_head snapshot_list;
+ struct list_head *wmf_ucforward_list;
+
+ ret = NETDEV_TX_OK;
- DHD_IF_STA_LIST_LOCK(ifp, flags);
+ /* For non BCM_GMAC3 platform we need a snapshot sta_list to
+ * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
+ */
+ wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
/* Convert upnp/igmp query to unicast for each assoc STA */
- list_for_each_entry(sta, &ifp->sta_list, list) {
+ list_for_each_entry(sta, wmf_ucforward_list, list) {
if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
- DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return (WMF_NOP);
+ ret = WMF_NOP;
+ break;
}
dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
}
+ DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
- DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+#ifdef PCIE_FULL_DONGLE
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#endif /* PCIE_FULL_DONGLE */
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
- PKTFREE(dhd->pub.osh, pktbuf, TRUE);
- return NETDEV_TX_OK;
+ if (ret == NETDEV_TX_OK)
+ PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+
+ return ret;
} else
#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
{
/* Either taken by WMF or we should drop it.
* Exiting send path
*/
- DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+#ifdef PCIE_FULL_DONGLE
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#endif /* PCIE_FULL_DONGLE */
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
return NETDEV_TX_OK;
default:
}
}
#endif /* DHD_WMF */
+#ifdef DHD_PSTA
+ /* PSR related packet proto manipulation should be done in DHD
+ * since dongle doesn't have complete payload
+ */
+ if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub,
+ ifidx, &pktbuf, TRUE) < 0)) {
+ DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
+ dhd_ifname(&dhd->pub, ifidx)));
+ }
+#endif /* DHD_PSTA */
#ifdef DHDTCPACK_SUPPRESS
if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
/* If this packet has been hold or got freed, just return */
- if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx))
- return 0;
+ if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
+ ret = 0;
+ goto done;
+ }
} else {
/* If this packet has replaced another packet and got freed, just return */
- if (dhd_tcpack_suppress(&dhd->pub, pktbuf))
- return 0;
+ if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
+ ret = 0;
+ goto done;
+ }
}
#endif /* DHDTCPACK_SUPPRESS */
- ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+ /* no segmented SKB support (Kernel-3.18.y) */
+ if ((PKTLINK(skb) != NULL) && (PKTLINK(skb) == skb)) {
+ PKTSETLINK(skb, NULL);
+ }
+
+ ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
done:
if (ret) {
ifp->stats.tx_dropped++;
dhd->pub.tx_dropped++;
- }
- else {
+ } else {
#ifdef PROP_TXSTATUS
/* tx_packets counter can counted only when wlfc is disabled */
}
}
- DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+#ifdef PCIE_FULL_DONGLE
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ dhd->pub.dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX;
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#endif /* PCIE_FULL_DONGLE */
+
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ BUZZZ_LOG(START_XMIT_END, 0);
/* Return ok: we always eat the packet */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
ASSERT(dhd);
+#ifdef DHD_LOSSLESS_ROAMING
+ /* block flowcontrol during roaming */
+ if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
+ return;
+ }
+#endif
+
if (ifidx == ALL_INTERFACES) {
/* Flow control on all active interfaces */
dhdp->txoff = state;
netif_wake_queue(net);
}
}
- }
- else {
+ } else {
if (dhd->iflist[ifidx]) {
net = dhd->iflist[ifidx]->net;
if (state == ON)
}
#endif /* DHD_WMF */
+/** Called when a frame is received by the dongle on interface 'ifidx' */
void
dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
{
int tout_ctrl = 0;
void *skbhead = NULL;
void *skbprev = NULL;
-#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
+#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
char *dump_data;
uint16 protocol;
-#endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
+ char *ifname;
+#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
struct ether_header *eh;
-#ifdef WLBTAMP
- struct dot11_llc_snap_header *lsh;
-#endif
pnext = PKTNEXT(dhdp->osh, pktbuf);
PKTSETNEXT(dhdp->osh, pktbuf, NULL);
continue;
}
-#ifdef WLBTAMP
- lsh = (struct dot11_llc_snap_header *)&eh[1];
-
- if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
- (PKTLEN(dhdp->osh, pktbuf) >= RFC1042_HDR_LEN) &&
- bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
- lsh->type == HTON16(BTA_PROT_L2CAP)) {
- amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
- ((uint8 *)eh + RFC1042_HDR_LEN);
- ACL_data = NULL;
- }
-#endif /* WLBTAMP */
#ifdef PROP_TXSTATUS
if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
#endif
#ifdef DHD_L2_FILTER
/* If block_ping is enabled drop the ping packet */
- if (dhdp->block_ping) {
- if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) {
- PKTFREE(dhdp->osh, pktbuf, FALSE);
+ if (ifp->block_ping) {
+ if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
continue;
}
}
-#endif
+ if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
+ if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+ }
+ if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+ int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
+
+ /* Drop the packets if l2 filter has processed it already
+ * otherwise continue with the normal path
+ */
+ if (ret == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+ }
+#endif /* DHD_L2_FILTER */
#ifdef DHD_WMF
/* WMF processing for multicast packets */
if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
}
}
#endif /* DHD_WMF */
+
#ifdef DHDTCPACK_SUPPRESS
dhd_tcpdata_info_get(dhdp, pktbuf);
-#endif
- skb = PKTTONATIVE(dhdp->osh, pktbuf);
-
- ifp = dhd->iflist[ifidx];
- if (ifp == NULL)
- ifp = dhd->iflist[0];
+#endif
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
ASSERT(ifp);
skb->dev = ifp->net;
+#ifdef DHD_PSTA
+ if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
+ DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
+ dhd_ifname(dhdp, ifidx)));
+ }
+#endif /* DHD_PSTA */
+
#ifdef PCIE_FULL_DONGLE
if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
(!ifp->ap_isolate)) {
}
} else {
void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
- dhd_sendpkt(dhdp, ifidx, npktbuf);
+ if (npktbuf)
+ dhd_sendpkt(dhdp, ifidx, npktbuf);
}
}
#endif /* PCIE_FULL_DONGLE */
eth = skb->data;
len = skb->len;
-#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
+#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP) || defined(DHD_DHCP_DUMP)
dump_data = skb->data;
protocol = (dump_data[12] << 8) | dump_data[13];
-
+ ifname = skb->dev ? skb->dev->name : "N/A";
+#endif /* DHD_RX_DUMP || DHD_8021X_DUMP || DHD_DHCP_DUMP */
+#ifdef DHD_8021X_DUMP
if (protocol == ETHER_TYPE_802_1X) {
- DHD_ERROR(("ETHER_TYPE_802_1X [RX]: "
- "ver %d, type %d, replay %d\n",
- dump_data[14], dump_data[15],
- dump_data[30]));
+ dhd_dump_eapol_4way_message(ifname, dump_data, FALSE);
+ }
+#endif /* DHD_8021X_DUMP */
+#ifdef DHD_DHCP_DUMP
+ if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
+ uint16 dump_hex;
+ uint16 source_port;
+ uint16 dest_port;
+ uint16 udp_port_pos;
+ uint8 *ptr8 = (uint8 *)&dump_data[ETHER_HDR_LEN];
+ uint8 ip_header_len = (*ptr8 & 0x0f)<<2;
+
+ udp_port_pos = ETHER_HDR_LEN + ip_header_len;
+ source_port = (dump_data[udp_port_pos] << 8) | dump_data[udp_port_pos+1];
+ dest_port = (dump_data[udp_port_pos+2] << 8) | dump_data[udp_port_pos+3];
+ if (source_port == 0x0044 || dest_port == 0x0044) {
+ dump_hex = (dump_data[udp_port_pos+249] << 8) |
+ dump_data[udp_port_pos+250];
+ if (dump_hex == 0x0101) {
+ DHD_ERROR(("DHCP[%s] - DISCOVER [RX]\n", ifname));
+ } else if (dump_hex == 0x0102) {
+ DHD_ERROR(("DHCP[%s] - OFFER [RX]\n", ifname));
+ } else if (dump_hex == 0x0103) {
+ DHD_ERROR(("DHCP[%s] - REQUEST [RX]\n", ifname));
+ } else if (dump_hex == 0x0105) {
+ DHD_ERROR(("DHCP[%s] - ACK [RX]\n", ifname));
+ } else {
+ DHD_ERROR(("DHCP[%s] - 0x%X [RX]\n", ifname, dump_hex));
+ }
+ } else if (source_port == 0x0043 || dest_port == 0x0043) {
+ DHD_ERROR(("DHCP[%s] - BOOTP [RX]\n", ifname));
+ }
}
-#endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
+#endif /* DHD_DHCP_DUMP */
#if defined(DHD_RX_DUMP)
- DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
+ DHD_ERROR(("RX DUMP[%s] - %s\n", ifname, _get_packet_type_str(protocol)));
if (protocol != ETHER_TYPE_BRCM) {
if (dump_data[0] == 0xFF) {
DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
{
int k;
for (k = 0; k < skb->len; k++) {
- DHD_ERROR(("%02X ", dump_data[k]));
+ printk("%02X ", dump_data[k]);
if ((k & 15) == 15)
- DHD_ERROR(("\n"));
+ printk("\n");
}
- DHD_ERROR(("\n"));
+ printk("\n");
}
#endif /* DHD_RX_FULL_DUMP */
}
wl_event_to_host_order(&event);
if (!tout_ctrl)
tout_ctrl = DHD_PACKET_TIMEOUT_MS;
-#ifdef WLBTAMP
- if (event.event_type == WLC_E_BTA_HCI_EVENT) {
- dhd_bta_doevt(dhdp, data, event.datalen);
- }
-#endif /* WLBTAMP */
#if defined(PNO_SUPPORT)
if (event.event_type == WLC_E_PFN_NET_FOUND) {
#endif /* PNO_SUPPORT */
#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
continue;
#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
} else {
ifp->stats.rx_bytes += skb->len;
ifp->stats.rx_packets++;
}
-#if defined(DHD_TCP_WINSIZE_ADJUST)
- if (dhd_use_tcp_window_size_adjust) {
- if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) {
- dhd_adjust_tcp_winsize(dhdp->op_mode, skb);
- }
- }
-#endif /* DHD_TCP_WINSIZE_ADJUST */
if (in_interrupt()) {
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+#if defined(DHD_LB) && defined(DHD_LB_RXP)
+ netif_receive_skb(skb);
+#else
netif_rx(skb);
+#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
+ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
} else {
if (dhd->rxthread_enabled) {
if (!skbhead)
* by netif_rx_ni(), but in earlier kernels, we need
* to do it manually.
*/
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+
+#if defined(DHD_LB) && defined(DHD_LB_RXP)
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+ netif_receive_skb(skb);
+ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+#else
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
netif_rx_ni(skb);
+ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
#else
ulong flags;
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
netif_rx(skb);
+ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
local_irq_save(flags);
RAISE_RX_SOFTIRQ();
local_irq_restore(flags);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+#endif /* !defined(DHD_LB) && !defined(DHD_LB_RXP) */
}
}
}
DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
+ DHD_OS_WAKE_LOCK_TIMEOUT(dhdp);
}
void
dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
struct ether_header *eh;
uint16 type;
-#ifdef WLBTAMP
- uint len;
-#endif
dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
type = ntoh16(eh->ether_type);
- if (type == ETHER_TYPE_802_1X)
+ if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0))
atomic_dec(&dhd->pend_8021x_cnt);
-#ifdef WLBTAMP
- /* Crack open the packet and check to see if it is BT HCI ACL data packet.
- * If yes generate packet completion event.
- */
- len = PKTLEN(dhdp->osh, txp);
-
- /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */
- if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) {
- struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1];
-
- if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
- ntoh16(lsh->type) == BTA_PROT_L2CAP) {
-
- dhd_bta_tx_hcidata_complete(dhdp, txp, success);
- }
- }
-#endif /* WLBTAMP */
#ifdef PROP_TXSTATUS
if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
uint datalen = PKTLEN(dhd->pub.osh, txp);
-
- if (success) {
- dhd->pub.tx_packets++;
- ifp->stats.tx_packets++;
- ifp->stats.tx_bytes += datalen;
- } else {
- ifp->stats.tx_dropped++;
+ if (ifp != NULL) {
+ if (success) {
+ dhd->pub.tx_packets++;
+ ifp->stats.tx_packets++;
+ ifp->stats.tx_bytes += datalen;
+ } else {
+ ifp->stats.tx_dropped++;
+ }
}
}
#endif
setScheduler(current, SCHED_FIFO, ¶m);
}
- while (1)
+ while (1) {
if (down_interruptible (&tsk->sema) == 0) {
unsigned long flags;
unsigned long jiffies_at_start = jiffies;
unsigned long time_lapse;
+ DHD_OS_WD_WAKE_LOCK(&dhd->pub);
SMP_RD_BARRIER_DEPENDS();
if (tsk->terminated) {
break;
if (dhd->pub.dongle_reset == FALSE) {
DHD_TIMER(("%s:\n", __FUNCTION__));
-
- /* Call the bus module watchdog */
dhd_bus_watchdog(&dhd->pub);
-
DHD_GENERAL_LOCK(&dhd->pub, flags);
/* Count the tick for reference */
dhd->pub.tickcnt++;
+#ifdef DHD_L2_FILTER
+ dhd_l2_filter_watchdog(&dhd->pub);
+#endif /* DHD_L2_FILTER */
time_lapse = jiffies - jiffies_at_start;
/* Reschedule the watchdog */
- if (dhd->wd_timer_valid)
+ if (dhd->wd_timer_valid) {
mod_timer(&dhd->timer,
jiffies +
msecs_to_jiffies(dhd_watchdog_ms) -
min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
}
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ }
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
} else {
break;
+ }
}
complete_and_exit(&tsk->completed, 0);
return;
}
+ if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
+ DHD_ERROR(("%s wd while suspend in progress \n", __FUNCTION__));
+ return;
+ }
+
if (dhd->thr_wdt_ctl.thr_pid >= 0) {
up(&dhd->thr_wdt_ctl.sema);
return;
}
+ DHD_OS_WD_WAKE_LOCK(&dhd->pub);
/* Call the bus module watchdog */
dhd_bus_watchdog(&dhd->pub);
-
DHD_GENERAL_LOCK(&dhd->pub, flags);
/* Count the tick for reference */
dhd->pub.tickcnt++;
+#ifdef DHD_L2_FILTER
+ dhd_l2_filter_watchdog(&dhd->pub);
+#endif /* DHD_L2_FILTER */
/* Reschedule the watchdog */
if (dhd->wd_timer_valid)
mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+}
+
+#ifdef DHD_PCIE_RUNTIMEPM
+static int
+dhd_rpm_state_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+
+ while (1) {
+ if (down_interruptible (&tsk->sema) == 0) {
+ unsigned long flags;
+ unsigned long jiffies_at_start = jiffies;
+ unsigned long time_lapse;
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ if (dhd->pub.dongle_reset == FALSE) {
+ DHD_TIMER(("%s:\n", __FUNCTION__));
+ if (dhd->pub.up) {
+ dhd_runtimepm_state(&dhd->pub);
+ }
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ time_lapse = jiffies - jiffies_at_start;
+
+ /* Reschedule the watchdog */
+ if (dhd->rpm_timer_valid) {
+ mod_timer(&dhd->rpm_timer,
+ jiffies +
+ msecs_to_jiffies(dhd_runtimepm_ms) -
+ min(msecs_to_jiffies(dhd_runtimepm_ms),
+ time_lapse));
+ }
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ }
+ } else {
+ break;
+ }
+ }
+
+ complete_and_exit(&tsk->completed, 0);
+}
+static void dhd_runtimepm(ulong data)
+{
+ dhd_info_t *dhd = (dhd_info_t *)data;
+
+ if (dhd->pub.dongle_reset) {
+ return;
+ }
+
+ if (dhd->thr_rpm_ctl.thr_pid >= 0) {
+ up(&dhd->thr_rpm_ctl.sema);
+ return;
+ }
+}
+
+void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
+{
+ dhd_os_runtimepm_timer(dhdp, 0);
+ dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
+ DHD_ERROR(("DHD Runtime PM Disabled \n"));
}
+void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
+{
+ dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
+ DHD_ERROR(("DHD Runtime PM Enabled \n"));
+}
+
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+
#ifdef ENABLE_ADAPTIVE_SCHED
static void
dhd_sched_policy(int prio)
/* Call bus dpc unless it indicated down (then clean stop) */
if (dhd->pub.busstate != DHD_BUS_DOWN) {
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ int resched_cnt = 0;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
dhd_os_wd_timer_extend(&dhd->pub, TRUE);
while (dhd_bus_dpc(dhd->pub.bus)) {
/* process all data */
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ resched_cnt++;
+ if (resched_cnt > MAX_RESCHED_CNT) {
+ DHD_INFO(("%s Calling msleep to"
+ "let other processes run. \n",
+ __FUNCTION__));
+ dhd->pub.dhd_bug_on = true;
+ resched_cnt = 0;
+ OSL_SLEEP(1);
+ }
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
}
dhd_os_wd_timer_extend(&dhd->pub, FALSE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
-
} else {
if (dhd->pub.up)
dhd_bus_stop(dhd->pub.bus, TRUE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
}
- }
- else
+ } else {
break;
+ }
}
complete_and_exit(&tsk->completed, 0);
}
while (skb) {
void *skbnext = PKTNEXT(pub->osh, skb);
PKTSETNEXT(pub->osh, skb, NULL);
-
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
netif_rx_ni(skb);
#else
#endif
DHD_OS_WAKE_UNLOCK(pub);
- }
- else
+ } else {
break;
+ }
}
complete_and_exit(&tsk->completed, 0);
}
#ifdef BCMPCIE
-void dhd_dpc_kill(dhd_pub_t *dhdp)
+void dhd_dpc_enable(dhd_pub_t *dhdp)
{
dhd_info_t *dhd;
- if (!dhdp)
+ if (!dhdp || !dhdp->info)
+ return;
+ dhd = dhdp->info;
+
+#ifdef DHD_LB
+#ifdef DHD_LB_RXP
+ __skb_queue_head_init(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+#ifdef DHD_LB_TXC
+ if (atomic_read(&dhd->tx_compl_tasklet.count) == 1)
+ tasklet_enable(&dhd->tx_compl_tasklet);
+#endif /* DHD_LB_TXC */
+#ifdef DHD_LB_RXC
+ if (atomic_read(&dhd->rx_compl_tasklet.count) == 1)
+ tasklet_enable(&dhd->rx_compl_tasklet);
+#endif /* DHD_LB_RXC */
+#endif /* DHD_LB */
+ if (atomic_read(&dhd->tasklet.count) == 1)
+ tasklet_enable(&dhd->tasklet);
+}
+#endif /* BCMPCIE */
+
+
+#ifdef BCMPCIE
+void
+dhd_dpc_kill(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+
+ if (!dhdp) {
return;
+ }
dhd = dhdp->info;
- if (!dhd)
+ if (!dhd) {
return;
+ }
- tasklet_kill(&dhd->tasklet);
- DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
+ if (dhd->thr_dpc_ctl.thr_pid < 0) {
+ tasklet_disable(&dhd->tasklet);
+ tasklet_kill(&dhd->tasklet);
+ DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
+ }
+#if defined(DHD_LB)
+#ifdef DHD_LB_RXP
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+ /* Kill the Load Balancing Tasklets */
+#if defined(DHD_LB_TXC)
+ tasklet_disable(&dhd->tx_compl_tasklet);
+ tasklet_kill(&dhd->tx_compl_tasklet);
+#endif /* DHD_LB_TXC */
+#if defined(DHD_LB_RXC)
+ tasklet_disable(&dhd->rx_compl_tasklet);
+ tasklet_kill(&dhd->rx_compl_tasklet);
+#endif /* DHD_LB_RXC */
+#endif /* DHD_LB */
}
#endif /* BCMPCIE */
*/
/* Call bus dpc unless it indicated down (then clean stop) */
if (dhd->pub.busstate != DHD_BUS_DOWN) {
- if (dhd_bus_dpc(dhd->pub.bus))
+ if (dhd_bus_dpc(dhd->pub.bus)) {
+ DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
tasklet_schedule(&dhd->tasklet);
- else
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
} else {
dhd_bus_stop(dhd->pub.bus, TRUE);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
}
}
{
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
- DHD_OS_WAKE_LOCK(dhdp);
if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ DHD_OS_WAKE_LOCK(dhdp);
/* If the semaphore does not get up,
* wake unlock should be done here
*/
- if (!binary_sema_up(&dhd->thr_dpc_ctl))
+ if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
DHD_OS_WAKE_UNLOCK(dhdp);
+ }
return;
} else {
tasklet_schedule(&dhd->tasklet);
while (skbp) {
void *skbnext = PKTNEXT(dhdp->osh, skbp);
PKTSETNEXT(dhdp->osh, skbp, NULL);
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
netif_rx_ni(skbp);
skbp = skbnext;
}
DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
- }
- else {
+ } else {
if (dhd->thr_rxf_ctl.thr_pid >= 0) {
up(&dhd->thr_rxf_ctl.sema);
}
#endif /* RXF_DEQUEUE_ON_BUSY */
}
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+
#ifdef TOE
/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
static int
}
#endif /* TOE */
-#if defined(WL_CFG80211)
+#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
void dhd_set_scb_probe(dhd_pub_t *dhd)
{
-#define NUM_SCB_MAX_PROBE 3
int ret = 0;
wl_scb_probe_t scb_probe;
- char iovbuf[WL_EVENTING_MASK_LEN + 12];
+ char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
- if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
return;
+ }
bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
+ }
memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
bcm_mkiovar("scb_probe", (char *)&scb_probe,
sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
-#undef NUM_SCB_MAX_PROBE
- return;
+ return;
+ }
}
-#endif /* WL_CFG80211 */
+#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
static void
}
#endif
-#ifdef CONFIG_MACH_UNIVERSAL5433
- /* old revision does not send hang message */
- if ((check_rev() && (error == -ETIMEDOUT)) || (error == -EREMOTEIO) ||
-#else
if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
-#endif /* CONFIG_MACH_UNIVERSAL5433 */
((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
+#ifdef BCMPCIE
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
+ __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
+ dhdp->d3ackcnt_timeout, error, dhdp->busstate));
+#else
DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
+#endif /* BCMPCIE */
+ if (dhdp->hang_reason == 0) {
+ if (dhdp->dongle_trap_occured) {
+ dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
+#ifdef BCMPCIE
+ } else if (dhdp->d3ackcnt_timeout) {
+ dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
+#endif /* BCMPCIE */
+ } else {
+ dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
+ }
+ }
net_os_send_hang_message(net);
return TRUE;
}
}
/* send to dongle (must be up, and wl). */
- if (pub->busstate != DHD_BUS_DATA) {
- bcmerror = BCME_DONGLE_DOWN;
- goto done;
+ if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
+ if (allow_delay_fwdl) {
+ int ret = dhd_bus_start(pub);
+ if (ret != 0) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+ } else {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
}
if (!pub->iswl) {
#endif
goto done;
}
+
+#ifdef DHD_DEBUG
+ if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) {
+ if (ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) {
+ /* Print IOVAR Information */
+ DHD_IOV_INFO(("%s: IOVAR_INFO name = %s set = %d\n",
+ __FUNCTION__, (char *)data_buf, ioc->set));
+ if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
+ prhex(NULL, data_buf + strlen(data_buf) + 1,
+ buflen - strlen(data_buf) - 1);
+ }
+ } else {
+ /* Print IOCTL Information */
+ DHD_IOV_INFO(("%s: IOCTL_INFO cmd = %d set = %d\n",
+ __FUNCTION__, ioc->cmd, ioc->set));
+ if ((dhd_msg_level & DHD_IOV_INFO_VAL) && ioc->set && data_buf) {
+ prhex(NULL, data_buf, buflen);
+ }
+ }
+ }
+#endif /* DHD_DEBUG */
+
bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
done:
{
dhd_info_t *dhd = DHD_DEV_INFO(net);
dhd_ioctl_t ioc;
- int bcmerror = 0;
int ifidx;
int ret;
void *local_buf = NULL;
DHD_PERIM_LOCK(&dhd->pub);
/* Interface up check for built-in type */
- if (!dhd_download_fw_on_driverload && dhd->pub.up == 0) {
+ if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return BCME_NOTUP;
+ ret = BCME_NOTUP;
+ goto exit;
}
/* send to dongle only if we are not waiting for reload already */
if (dhd->pub.hang_was_sent) {
DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return OSL_ERROR(BCME_DONGLE_DOWN);
+ ret = BCME_DONGLE_DOWN;
+ goto exit;
}
ifidx = dhd_net2idx(dhd, net);
if (ifidx == DHD_BAD_IF) {
DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return -1;
+ ret = -1;
+ goto exit;
}
#if defined(WL_WIRELESS_EXT)
if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
/* may recurse, do NOT lock */
ret = wl_iw_ioctl(net, ifr, cmd);
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return ret;
+ goto exit;
}
#endif /* defined(WL_WIRELESS_EXT) */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
if (cmd == SIOCETHTOOL) {
ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return ret;
+ goto exit;
}
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
if (cmd == SIOCDEVPRIVATE+1) {
ret = wl_android_priv_cmd(net, ifr, cmd);
dhd_check_hang(net, &dhd->pub, ret);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return ret;
+ goto exit;
}
if (cmd != SIOCDEVPRIVATE) {
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto exit;
}
memset(&ioc, 0, sizeof(ioc));
if (is_compat_task()) {
compat_wl_ioctl_t compat_ioc;
if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
- bcmerror = BCME_BADADDR;
+ ret = BCME_BADADDR;
goto done;
}
ioc.cmd = compat_ioc.cmd;
/* To differentiate between wl and dhd read 4 more byes */
if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
sizeof(uint)) != 0)) {
- bcmerror = BCME_BADADDR;
+ ret = BCME_BADADDR;
goto done;
}
} else
{
/* Copy the ioc control structure part of ioctl request */
if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
- bcmerror = BCME_BADADDR;
+ ret = BCME_BADADDR;
goto done;
}
/* To differentiate between wl and dhd read 4 more byes */
if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
sizeof(uint)) != 0)) {
- bcmerror = BCME_BADADDR;
+ ret = BCME_BADADDR;
goto done;
}
}
if (!capable(CAP_NET_ADMIN)) {
- bcmerror = BCME_EPERM;
+ ret = BCME_EPERM;
goto done;
}
if (ioc.len > 0) {
buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
- bcmerror = BCME_NOMEM;
+ ret = BCME_NOMEM;
goto done;
}
DHD_PERIM_UNLOCK(&dhd->pub);
if (copy_from_user(local_buf, ioc.buf, buflen)) {
DHD_PERIM_LOCK(&dhd->pub);
- bcmerror = BCME_BADADDR;
+ ret = BCME_BADADDR;
goto done;
}
DHD_PERIM_LOCK(&dhd->pub);
*(char *)(local_buf + buflen) = '\0';
}
- bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
+ ret = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
- if (!bcmerror && buflen && local_buf && ioc.buf) {
+ if (!ret && buflen && local_buf && ioc.buf) {
DHD_PERIM_UNLOCK(&dhd->pub);
if (copy_to_user(ioc.buf, local_buf, buflen))
- bcmerror = -EFAULT;
+ ret = -EFAULT;
DHD_PERIM_LOCK(&dhd->pub);
}
if (local_buf)
MFREE(dhd->pub.osh, local_buf, buflen+1);
+exit:
DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return OSL_ERROR(bcmerror);
+ return OSL_ERROR(ret);
+}
+
+
+#ifdef FIX_CPU_MIN_CLOCK
+static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
+{
+ if (dhd) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_init(&dhd->cpufreq_fix);
+#endif
+ dhd->cpufreq_fix_status = FALSE;
+ }
+ return 0;
+}
+
+static void dhd_fix_cpu_freq(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_lock(&dhd->cpufreq_fix);
+#endif
+ if (dhd && !dhd->cpufreq_fix_status) {
+ pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
+#ifdef FIX_BUS_MIN_CLOCK
+ pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
+#endif /* FIX_BUS_MIN_CLOCK */
+ DHD_ERROR(("pm_qos_add_requests called\n"));
+
+ dhd->cpufreq_fix_status = TRUE;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_unlock(&dhd->cpufreq_fix);
+#endif
+}
+
+static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_lock(&dhd ->cpufreq_fix);
+#endif
+ if (dhd && dhd->cpufreq_fix_status != TRUE) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_unlock(&dhd->cpufreq_fix);
+#endif
+ return;
+ }
+
+ pm_qos_remove_request(&dhd->dhd_cpu_qos);
+#ifdef FIX_BUS_MIN_CLOCK
+ pm_qos_remove_request(&dhd->dhd_bus_qos);
+#endif /* FIX_BUS_MIN_CLOCK */
+ DHD_ERROR(("pm_qos_add_requests called\n"));
+
+ dhd->cpufreq_fix_status = FALSE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_unlock(&dhd->cpufreq_fix);
+#endif
}
+#endif /* FIX_CPU_MIN_CLOCK */
#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
int dhd_deepsleep(dhd_info_t *dhd, int flag)
DHD_OS_WAKE_LOCK(&dhd->pub);
DHD_PERIM_LOCK(&dhd->pub);
printf("%s: Enter %p\n", __FUNCTION__, net);
+ dhd->pub.rxcnt_timeout = 0;
+ dhd->pub.txcnt_timeout = 0;
+
+#ifdef BCMPCIE
+ dhd->pub.d3ackcnt_timeout = 0;
+#endif /* BCMPCIE */
+
if (dhd->pub.up == 0) {
goto exit;
}
dhd_if_flush_sta(DHD_DEV_IFP(net));
+ /* Disable Runtime PM before interface down */
+ DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+
+#ifdef FIX_CPU_MIN_CLOCK
+ if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
+ dhd_rollback_cpu_freq(dhd);
+#endif /* FIX_CPU_MIN_CLOCK */
ifidx = dhd_net2idx(dhd, net);
BCM_REFERENCE(ifidx);
#ifdef WL_CFG80211
if (ifidx == 0) {
+ dhd_if_t *ifp;
wl_cfg80211_down(NULL);
+ ifp = dhd->iflist[0];
+ ASSERT(ifp && ifp->net);
/*
* For CFG80211: Clean up all the left over virtual interfaces
* when the primary Interface is brought down. [ifconfig wlan0 down]
(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
int i;
+#ifdef WL_CFG80211_P2P_DEV_IF
+ wl_cfg80211_del_p2p_wdev();
+#endif /* WL_CFG80211_P2P_DEV_IF */
+
dhd_net_if_lock_local(dhd);
for (i = 1; i < DHD_MAX_IFS; i++)
dhd_remove_if(&dhd->pub, i, FALSE);
+
+ if (ifp && ifp->net) {
+ dhd_if_del_sta_list(ifp);
+ }
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = FALSE;
+ unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+ if (dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = FALSE;
+ unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
+ }
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
dhd_net_if_unlock_local(dhd);
}
+ cancel_work_sync(dhd->dhd_deferred_wq);
+#if defined(DHD_LB) && defined(DHD_LB_RXP)
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB && DHD_LB_RXP */
}
+
+#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
+#if defined(DHD_LB) && defined(DHD_LB_RXP)
+ if (ifp->net == dhd->rx_napi_netdev) {
+ DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
+ __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
+ skb_queue_purge(&dhd->rx_napi_queue);
+ napi_disable(&dhd->rx_napi_struct);
+ netif_napi_del(&dhd->rx_napi_struct);
+ dhd->rx_napi_netdev = NULL;
+ }
+#endif /* DHD_LB && DHD_LB_RXP */
+
}
#endif /* WL_CFG80211 */
OLD_MOD_DEC_USE_COUNT;
exit:
if (ifidx == 0 && !dhd_download_fw_on_driverload)
- wl_android_wifi_off(net);
+ wl_android_wifi_off(net, TRUE);
else {
if (dhd->pub.conf->deepsleep)
dhd_deepsleep(dhd, 1);
}
- dhd->pub.rxcnt_timeout = 0;
- dhd->pub.txcnt_timeout = 0;
-
dhd->pub.hang_was_sent = 0;
/* Clear country spec for for built-in type driver */
dhd->pub.dhd_cspec.ccode[0] = 0x00;
}
- printf("%s: Exit\n", __FUNCTION__);
+#ifdef BCMDBGFS
+ dhd_dbg_remove();
+#endif
+
DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ /* Destroy wakelock */
+ if (!dhd_download_fw_on_driverload &&
+ (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_OS_WAKE_LOCK_DESTROY(dhd);
+ dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
+ }
+ printf("%s: Exit\n", __FUNCTION__);
+
return 0;
}
int ret = BCME_OK;
bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ if (ret < 0) {
DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
}
/* basic capabilities for HS20 REL2 */
uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
- iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
- DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set wnm returned (%d)\n", __FUNCTION__, ret));
}
}
#ifdef TOE
uint32 toe_ol;
#endif
+#ifdef BCM_FD_AGGR
+ char iovbuf[WLC_IOCTL_SMLEN];
+ dbus_config_t config;
+ uint32 agglimit = 0;
+ uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
+#endif /* BCM_FD_AGGR */
int ifidx;
int32 ret = 0;
+ if (!dhd_download_fw_on_driverload && !dhd_driver_init_done) {
+ DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
+ return -1;
+ }
+
printf("%s: Enter %p\n", __FUNCTION__, net);
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
mutex_lock(&_dhd_sdio_mutex_lock_);
#endif
#endif /* MULTIPLE_SUPPLICANT */
+ /* Init wakelock */
+ if (!dhd_download_fw_on_driverload &&
+ !(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_OS_WAKE_LOCK_INIT(dhd);
+ dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+ }
+
+#ifdef PREVENT_REOPEN_DURING_HANG
+ /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
+ if (dhd->pub.hang_was_sent == 1) {
+ DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
+ /* Force to bring down WLAN interface in case dhd_stop() is not called
+ * from the upper layer when HANG event is triggered.
+ */
+ if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
+ DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
+ dhd_stop(net);
+ } else {
+ return -1;
+ }
+ }
+#endif /* PREVENT_REOPEN_DURING_HANG */
+
DHD_OS_WAKE_LOCK(&dhd->pub);
DHD_PERIM_LOCK(&dhd->pub);
dhd->pub.dongle_trap_occured = 0;
dhd->pub.hang_was_sent = 0;
-
+ dhd->pub.hang_reason = 0;
+#ifdef DHD_LOSSLESS_ROAMING
+ dhd->pub.dequeue_prec_map = ALLPRIO;
+#endif
#if 0
/*
* Force start if ifconfig_up gets called before START command
goto exit;
}
}
+#ifdef FIX_CPU_MIN_CLOCK
+ if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
+ dhd_init_cpufreq_fix(dhd);
+ dhd_fix_cpu_freq(dhd);
+ }
+#endif /* FIX_CPU_MIN_CLOCK */
if (dhd->pub.busstate != DHD_BUS_DATA) {
dhd_deepsleep(dhd, 0);
}
+#ifdef BCM_FD_AGGR
+ config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
+
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
+ iovbuf, sizeof(iovbuf));
+
+ if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
+ agglimit = *(uint32 *)iovbuf;
+ config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
+ config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
+ DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
+ agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
+ if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
+ DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
+ }
+ } else {
+ DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
+ rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
+ }
+
+ /* Set aggregation for TX */
+ bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
+ rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
+
+ /* Set aggregation for RX */
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
+ if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
+ dhd->pub.info->fdaggr = 0;
+ if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
+ dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
+ if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
+ dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
+ } else {
+ DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* BCM_FD_AGGR */
+
/* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
#ifdef TOE
/* Get current TOE mode from dongle */
- if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
+ if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
- else
+ } else {
dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+ }
#endif /* TOE */
#if defined(WL_CFG80211)
ret = -1;
goto exit;
}
+ if (!dhd_download_fw_on_driverload) {
+#ifdef ARP_OFFLOAD_SUPPORT
+ dhd->pend_ipaddr = 0;
+ if (!dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = TRUE;
+ register_inetaddr_notifier(&dhd_inetaddr_notifier);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+ if (!dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = TRUE;
+ register_inet6addr_notifier(&dhd_inet6addr_notifier);
+ }
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+#ifdef DHD_LB
+ DHD_LB_STATS_INIT(&dhd->pub);
+#ifdef DHD_LB_RXP
+ __skb_queue_head_init(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+#endif /* DHD_LB */
+ }
+
+#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
+#if defined(SET_RPS_CPUS)
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#else
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
+#endif
+#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
+#if defined(DHD_LB) && defined(DHD_LB_RXP)
+ if (dhd->rx_napi_netdev == NULL) {
+ dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
+ memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
+ netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
+ dhd_napi_poll, dhd_napi_weight);
+ DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
+ __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
+ napi_enable(&dhd->rx_napi_struct);
+ DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
+ skb_queue_head_init(&dhd->rx_napi_queue);
+ }
+#endif /* DHD_LB && DHD_LB_RXP */
+#if defined(NUM_SCB_MAX_PROBE)
dhd_set_scb_probe(&dhd->pub);
+#endif /* NUM_SCB_MAX_PROBE */
#endif /* WL_CFG80211 */
}
netif_start_queue(net);
dhd->pub.up = 1;
+ OLD_MOD_INC_USE_COUNT;
+
#ifdef BCMDBGFS
dhd_dbg_init(&dhd->pub);
#endif
- OLD_MOD_INC_USE_COUNT;
exit:
- if (ret)
+ if (ret) {
dhd_stop(net);
+ }
DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
*/
if (ifevent->ifidx > 0) {
dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+ if (if_event == NULL) {
+ DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
+ MALLOCED(dhdinfo->pub.osh)));
+ return BCME_NOMEM;
+ }
memcpy(&if_event->event, ifevent, sizeof(if_event->event));
memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
{
dhd_if_event_t *if_event;
-#if defined(WL_CFG80211) && !defined(P2PONEINT)
+#ifdef WL_CFG80211
if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
return BCME_OK;
#endif /* WL_CFG80211 */
* anything else
*/
if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+ if (if_event == NULL) {
+ DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
+ MALLOCED(dhdinfo->pub.osh)));
+ return BCME_NOMEM;
+ }
memcpy(&if_event->event, ifevent, sizeof(if_event->event));
memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
strncpy(if_event->name, name, IFNAMSIZ);
*/
struct net_device*
dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
- uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
+ uint8 *mac, uint8 bssidx, bool need_rtnl_lock, char *dngl_name)
{
dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
dhd_if_t *ifp;
strncpy(ifp->net->name, name, IFNAMSIZ);
ifp->net->name[IFNAMSIZ - 1] = '\0';
}
+
#ifdef WL_CFG80211
if (ifidx == 0)
ifp->net->destructor = free_netdev;
ifp->name[IFNAMSIZ - 1] = '\0';
dhdinfo->iflist[ifidx] = ifp;
+/* initialize the dongle provided if name */
+ if (dngl_name)
+ strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
+ else
+ strncpy(ifp->dngl_name, name, IFNAMSIZ);
+
#ifdef PCIE_FULL_DONGLE
/* Initialize STA info list */
INIT_LIST_HEAD(&ifp->sta_list);
DHD_IF_STA_LIST_LOCK_INIT(ifp);
#endif /* PCIE_FULL_DONGLE */
+#ifdef DHD_L2_FILTER
+ ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
+ ifp->parp_allnode = TRUE;
+#endif
return ifp->net;
fail:
+
if (ifp != NULL) {
if (ifp->net != NULL) {
dhd_dev_priv_clear(ifp->net);
MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
ifp = NULL;
}
+
dhdinfo->iflist[ifidx] = NULL;
return NULL;
}
dhd_if_t *ifp;
ifp = dhdinfo->iflist[ifidx];
+
if (ifp != NULL) {
if (ifp->net != NULL) {
DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
free_netdev(ifp->net);
} else {
- netif_stop_queue(ifp->net);
+ netif_tx_disable(ifp->net);
-#ifdef SET_RPS_CPUS
+#if defined(SET_RPS_CPUS)
custom_rps_map_clear(ifp->net->_rx);
#endif /* SET_RPS_CPUS */
+#if defined(SET_RPS_CPUS)
+#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
+ dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
+#endif
if (need_rtnl_lock)
unregister_netdev(ifp->net);
else
unregister_netdevice(ifp->net);
}
ifp->net = NULL;
+ dhdinfo->iflist[ifidx] = NULL;
}
#ifdef DHD_WMF
dhd_wmf_cleanup(dhdpub, ifidx);
#endif /* DHD_WMF */
+#ifdef DHD_L2_FILTER
+ bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
+ NULL, FALSE, dhdpub->tickcnt);
+ deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
+ ifp->phnd_arp_table = NULL;
+#endif /* DHD_L2_FILTER */
dhd_if_del_sta_list(ifp);
- dhdinfo->iflist[ifidx] = NULL;
MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
}
.ndo_set_multicast_list = dhd_set_multicast_list,
#endif
};
-
-#ifdef P2PONEINT
-extern int wl_cfgp2p_if_open(struct net_device *net);
-extern int wl_cfgp2p_if_stop(struct net_device *net);
-
-static struct net_device_ops dhd_cfgp2p_ops_virt = {
- .ndo_open = wl_cfgp2p_if_open,
- .ndo_stop = wl_cfgp2p_if_stop,
- .ndo_get_stats = dhd_get_stats,
- .ndo_do_ioctl = dhd_ioctl_entry,
- .ndo_start_xmit = dhd_start_xmit,
- .ndo_set_mac_address = dhd_set_mac_address,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
- .ndo_set_rx_mode = dhd_set_multicast_list,
-#else
- .ndo_set_multicast_list = dhd_set_multicast_list,
-#endif
-};
-#endif /* P2PONEINT */
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
#ifdef DEBUGGER
#ifdef SHOW_LOGTRACE
static char *logstrs_path = "/root/logstrs.bin";
+static char *st_str_file_path = "/root/rtecdc.bin";
+static char *map_file_path = "/root/rtecdc.map";
+static char *rom_st_str_file_path = "/root/roml.bin";
+static char *rom_map_file_path = "/root/roml.map";
+
+#define BYTES_AHEAD_NUM 11 /* address in map file is before these many bytes */
+#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
+#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
+static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */
+static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */
+static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */
+static char *ram_file_str = "rtecdc";
+static char *rom_file_str = "roml";
+#define RAMSTART_BIT 0x01
+#define RDSTART_BIT 0x02
+#define RDEND_BIT 0x04
+#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
+
module_param(logstrs_path, charp, S_IRUGO);
+module_param(st_str_file_path, charp, S_IRUGO);
+module_param(map_file_path, charp, S_IRUGO);
+module_param(rom_st_str_file_path, charp, S_IRUGO);
+module_param(rom_map_file_path, charp, S_IRUGO);
-int
+static void
dhd_init_logstrs_array(dhd_event_log_t *temp)
{
struct file *filep = NULL;
int num_fmts = 0;
uint32 i = 0;
int error = 0;
- set_fs(KERNEL_DS);
+
fs = get_fs();
+ set_fs(KERNEL_DS);
+
filep = filp_open(logstrs_path, O_RDONLY, 0);
+
if (IS_ERR(filep)) {
- DHD_ERROR(("Failed to open the file logstrs.bin in %s\n", __FUNCTION__));
+ DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
goto fail;
}
error = vfs_stat(logstrs_path, &stat);
if (error) {
- DHD_ERROR(("Failed in %s to find file stat\n", __FUNCTION__));
+ DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
goto fail;
}
logstrs_size = (int) stat.size;
raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
if (raw_fmts == NULL) {
- DHD_ERROR(("Failed to allocate raw_fmts memory\n"));
+ DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
goto fail;
}
if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
- DHD_ERROR(("Error: Log strings file read failed\n"));
+ DHD_ERROR(("%s: Failed to read file %s", __FUNCTION__, logstrs_path));
goto fail;
}
temp->num_fmts = num_fmts;
filp_close(filep, NULL);
set_fs(fs);
- return 0;
+ return;
fail:
if (raw_fmts) {
kfree(raw_fmts);
filp_close(filep, NULL);
set_fs(fs);
temp->fmts = NULL;
- return -1;
+ return;
+}
+
+static int
+dhd_read_map(char *fname, uint32 *ramstart, uint32 *rodata_start,
+ uint32 *rodata_end)
+{
+ struct file *filep = NULL;
+ mm_segment_t fs;
+ char *raw_fmts = NULL;
+ uint32 read_size = READ_NUM_BYTES;
+ int error = 0;
+ char * cptr = NULL;
+ char c;
+ uint8 count = 0;
+
+ *ramstart = 0;
+ *rodata_start = 0;
+ *rodata_end = 0;
+
+ if (fname == NULL) {
+ DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ filep = filp_open(fname, O_RDONLY, 0);
+ if (IS_ERR(filep)) {
+ DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname));
+ goto fail;
+ }
+
+ /* Allocate 1 byte more than read_size to terminate it with NULL */
+ raw_fmts = kmalloc(read_size + 1, GFP_KERNEL);
+ if (raw_fmts == NULL) {
+ DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* read ram start, rodata_start and rodata_end values from map file */
+
+ while (count != ALL_MAP_VAL)
+ {
+ error = vfs_read(filep, raw_fmts, read_size, (&filep->f_pos));
+ if (error < 0) {
+ DHD_ERROR(("%s: read failed %s err:%d \n", __FUNCTION__,
+ map_file_path, error));
+ goto fail;
+ }
+
+ if (error < read_size) {
+ /*
+ * since we reset file pos back to earlier pos by
+ * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
+ * So if ret value is less than read_size, reached EOF don't read further
+ */
+ break;
+ }
+ /* End raw_fmts with NULL as strstr expects NULL terminated strings */
+ raw_fmts[read_size] = '\0';
+
+ /* Get ramstart address */
+ if ((cptr = strstr(raw_fmts, ramstart_str))) {
+ cptr = cptr - BYTES_AHEAD_NUM;
+ sscanf(cptr, "%x %c text_start", ramstart, &c);
+ count |= RAMSTART_BIT;
+ }
+
+ /* Get ram rodata start address */
+ if ((cptr = strstr(raw_fmts, rodata_start_str))) {
+ cptr = cptr - BYTES_AHEAD_NUM;
+ sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
+ count |= RDSTART_BIT;
+ }
+
+ /* Get ram rodata end address */
+ if ((cptr = strstr(raw_fmts, rodata_end_str))) {
+ cptr = cptr - BYTES_AHEAD_NUM;
+ sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
+ count |= RDEND_BIT;
+ }
+ memset(raw_fmts, 0, read_size);
+ /*
+ * go back to predefined NUM of bytes so that we won't miss
+ * the string and addr even if it comes as splited in next read.
+ */
+ filep->f_pos = filep->f_pos - GO_BACK_FILE_POS_NUM_BYTES;
+ }
+
+ DHD_ERROR(("---ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
+ *ramstart, *rodata_start, *rodata_end));
+
+ DHD_ERROR(("readmap over \n"));
+
+fail:
+ if (raw_fmts) {
+ kfree(raw_fmts);
+ raw_fmts = NULL;
+ }
+ if (!IS_ERR(filep))
+ filp_close(filep, NULL);
+
+ set_fs(fs);
+ if (count == ALL_MAP_VAL) {
+ return BCME_OK;
+ }
+ DHD_ERROR(("readmap error 0X%x \n", count));
+ return BCME_ERROR;
+}
+
+static void
+dhd_init_static_strs_array(dhd_event_log_t *temp, char *str_file, char *map_file)
+{
+ struct file *filep = NULL;
+ mm_segment_t fs;
+ char *raw_fmts = NULL;
+ uint32 logstrs_size = 0;
+
+ int error = 0;
+ uint32 ramstart = 0;
+ uint32 rodata_start = 0;
+ uint32 rodata_end = 0;
+ uint32 logfilebase = 0;
+
+ error = dhd_read_map(map_file, &ramstart, &rodata_start, &rodata_end);
+ if (error == BCME_ERROR) {
+ DHD_ERROR(("readmap Error!! \n"));
+ /* don't do event log parsing in actual case */
+ temp->raw_sstr = NULL;
+ return;
+ }
+ DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
+ ramstart, rodata_start, rodata_end));
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ filep = filp_open(str_file, O_RDONLY, 0);
+ if (IS_ERR(filep)) {
+ DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
+ goto fail;
+ }
+
+ /* Full file size is huge. Just read required part */
+ logstrs_size = rodata_end - rodata_start;
+
+ raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
+ if (raw_fmts == NULL) {
+ DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
+ goto fail;
+ }
+
+ logfilebase = rodata_start - ramstart;
+
+ error = generic_file_llseek(filep, logfilebase, SEEK_SET);
+ if (error < 0) {
+ DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
+ goto fail;
+ }
+
+ error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
+ if (error != logstrs_size) {
+ DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
+ goto fail;
+ }
+
+ if (strstr(str_file, ram_file_str) != NULL) {
+ temp->raw_sstr = raw_fmts;
+ temp->ramstart = ramstart;
+ temp->rodata_start = rodata_start;
+ temp->rodata_end = rodata_end;
+ } else if (strstr(str_file, rom_file_str) != NULL) {
+ temp->rom_raw_sstr = raw_fmts;
+ temp->rom_ramstart = ramstart;
+ temp->rom_rodata_start = rodata_start;
+ temp->rom_rodata_end = rodata_end;
+ }
+
+ filp_close(filep, NULL);
+ set_fs(fs);
+
+ return;
+fail:
+ if (raw_fmts) {
+ kfree(raw_fmts);
+ raw_fmts = NULL;
+ }
+ if (!IS_ERR(filep))
+ filp_close(filep, NULL);
+ set_fs(fs);
+ if (strstr(str_file, ram_file_str) != NULL) {
+ temp->raw_sstr = NULL;
+ } else if (strstr(str_file, rom_file_str) != NULL) {
+ temp->rom_raw_sstr = NULL;
+ }
+ return;
}
+
#endif /* SHOW_LOGTRACE */
dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+#ifdef STBLINUX
+ DHD_ERROR(("%s\n", driver_target));
+#endif /* STBLINUX */
/* will implement get_ids for DBUS later */
#if defined(BCMSDIO)
dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
#ifdef GET_CUSTOM_MAC_ENABLE
wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
#endif /* GET_CUSTOM_MAC_ENABLE */
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
+ dhd->pub.force_country_change = TRUE;
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+#ifdef CUSTOM_COUNTRY_CODE
+ get_customized_country_code(dhd->adapter,
+ dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
+ dhd->pub.dhd_cflags);
+#endif /* CUSTOM_COUNTRY_CODE */
dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
strcat(if_name, "%d");
}
- net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
- if (net == NULL)
+
+ /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
+ net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
+ if (net == NULL) {
goto fail;
- dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+ }
+
+ dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+#ifdef DHD_L2_FILTER
+ /* initialize the l2_filter_cnt */
+ dhd->pub.l2_filter_cnt = 0;
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
net->open = NULL;
#else
net->netdev_ops = NULL;
#endif
+ mutex_init(&dhd->dhd_iovar_mutex);
sema_init(&dhd->proto_sem, 1);
#ifdef PROP_TXSTATUS
dhd->pub.skip_fc = dhd_wlfc_skip_fc;
dhd->pub.plat_init = dhd_wlfc_plat_init;
dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
+
+#ifdef DHD_WLFC_THREAD
+ init_waitqueue_head(&dhd->pub.wlfc_wqhead);
+ dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
+ if (IS_ERR(dhd->pub.wlfc_thread)) {
+ DHD_ERROR(("create wlfc thread failed\n"));
+ goto fail;
+ } else {
+ wake_up_process(dhd->pub.wlfc_thread);
+ }
+#endif /* DHD_WLFC_THREAD */
#endif /* PROP_TXSTATUS */
/* Initialize other structure content */
init_waitqueue_head(&dhd->ioctl_resp_wait);
+ init_waitqueue_head(&dhd->d3ack_wait);
init_waitqueue_head(&dhd->ctrl_wait);
+ init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
+ dhd->pub.dhd_bus_busy_state = 0;
/* Initialize the spinlocks */
spin_lock_init(&dhd->sdlock);
/* Initialize Wakelock stuff */
spin_lock_init(&dhd->wakelock_spinlock);
- dhd->wakelock_counter = 0;
+ spin_lock_init(&dhd->wakelock_evt_spinlock);
+ DHD_OS_WAKE_LOCK_INIT(dhd);
dhd->wakelock_wd_counter = 0;
- dhd->wakelock_rx_timeout_enable = 0;
- dhd->wakelock_ctrl_timeout_enable = 0;
#ifdef CONFIG_HAS_WAKELOCK
- wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
- wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
- wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
-#ifdef BCMPCIE_OOB_HOST_WAKE
- wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
-#endif /* BCMPCIE_OOB_HOST_WAKE */
#endif /* CONFIG_HAS_WAKELOCK */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_init(&dhd->dhd_net_if_mutex);
mutex_init(&dhd->dhd_suspend_mutex);
#endif
dhd_monitor_init(&dhd->pub);
dhd_state |= DHD_ATTACH_STATE_CFG80211;
#endif
+#ifdef DHD_LOG_DUMP
+ dhd_log_dump_init(&dhd->pub);
+#endif /* DHD_LOG_DUMP */
#if defined(WL_WIRELESS_EXT)
/* Attach and link in the iw */
if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
#ifdef SHOW_LOGTRACE
dhd_init_logstrs_array(&dhd->event_data);
+ dhd_init_static_strs_array(&dhd->event_data, st_str_file_path, map_file_path);
+ dhd_init_static_strs_array(&dhd->event_data, rom_st_str_file_path, rom_map_file_path);
#endif /* SHOW_LOGTRACE */
if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
}
+
/* Set up the watchdog timer */
init_timer(&dhd->timer);
dhd->timer.data = (ulong)dhd;
if (dhd_watchdog_prio >= 0) {
/* Initialize watchdog thread */
PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
+ if (dhd->thr_wdt_ctl.thr_pid < 0) {
+ goto fail;
+ }
} else {
dhd->thr_wdt_ctl.thr_pid = -1;
}
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Setup up the runtime PM Idlecount timer */
+ init_timer(&dhd->rpm_timer);
+ dhd->rpm_timer.data = (ulong)dhd;
+ dhd->rpm_timer.function = dhd_runtimepm;
+ dhd->rpm_timer_valid = FALSE;
+
+ dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
+ PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
+ if (dhd->thr_rpm_ctl.thr_pid < 0) {
+ goto fail;
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+
#ifdef DEBUGGER
debugger_init((void *) bus);
#endif
if (dhd_dpc_prio >= 0) {
/* Initialize DPC thread */
PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
+ if (dhd->thr_dpc_ctl.thr_pid < 0) {
+ goto fail;
+ }
} else {
/* use tasklet for dpc */
tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
/* Initialize RXF thread */
PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
+ if (dhd->thr_rxf_ctl.thr_pid < 0) {
+ goto fail;
+ }
}
dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
#if defined(CONFIG_PM_SLEEP)
if (!dhd_pm_notifier_registered) {
dhd_pm_notifier_registered = TRUE;
- register_pm_notifier(&dhd_pm_notifier);
+ dhd->pm_notifier.notifier_call = dhd_pm_callback;
+ dhd->pm_notifier.priority = 10;
+ register_pm_notifier(&dhd->pm_notifier);
}
+
#endif /* CONFIG_PM_SLEEP */
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
register_inetaddr_notifier(&dhd_inetaddr_notifier);
}
#endif /* ARP_OFFLOAD_SUPPORT */
-#ifdef CONFIG_IPV6
+
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
if (!dhd_inet6addr_notifier_registered) {
dhd_inet6addr_notifier_registered = TRUE;
register_inet6addr_notifier(&dhd_inet6addr_notifier);
}
-#endif
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
#ifdef DEBUG_CPU_FREQ
dhd->new_freq = alloc_percpu(int);
#endif /* BCMSDIO */
#endif /* DHDTCPACK_SUPPRESS */
- dhd_state |= DHD_ATTACH_STATE_DONE;
- dhd->dhd_state = dhd_state;
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+
+ dhd_state |= DHD_ATTACH_STATE_DONE;
+ dhd->dhd_state = dhd_state;
+
+ dhd_found++;
+#ifdef DHD_DEBUG_PAGEALLOC
+ register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
+#endif /* DHD_DEBUG_PAGEALLOC */
+
+#if defined(DHD_LB)
+ DHD_ERROR(("DHD LOAD BALANCING Enabled\n"));
+
+ dhd_lb_set_default_cpus(dhd);
+
+ /* Initialize the CPU Masks */
+ if (dhd_cpumasks_init(dhd) == 0) {
+
+ /* Now we have the current CPU maps, run through candidacy */
+ dhd_select_cpu_candidacy(dhd);
+
+ /*
+ * If we are able to initialize CPU masks, lets register to the
+ * CPU Hotplug framework to change the CPU for each job dynamically
+ * using candidacy algorithm.
+ */
+ dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
+ register_cpu_notifier(&dhd->cpu_notifier); /* Register a callback */
+ } else {
+ /*
+ * We are unable to initialize CPU masks, so candidacy algorithm
+ * won't run, but still Load Balancing will be honoured based
+ * on the CPUs allocated for a given job statically during init
+ */
+ dhd->cpu_notifier.notifier_call = NULL;
+ DHD_ERROR(("%s(): dhd_cpumasks_init failed CPUs for JOB would be static\n",
+ __FUNCTION__));
+ }
+
+
+ DHD_LB_STATS_INIT(&dhd->pub);
+
+ /* Initialize the Load Balancing Tasklets and Napi object */
+#if defined(DHD_LB_TXC)
+ tasklet_init(&dhd->tx_compl_tasklet,
+ dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
+ INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
+ DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
+#endif /* DHD_LB_TXC */
+
+#if defined(DHD_LB_RXC)
+ tasklet_init(&dhd->rx_compl_tasklet,
+ dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
+ INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
+ DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
+#endif /* DHD_LB_RXC */
+
+#if defined(DHD_LB_RXP)
+ __skb_queue_head_init(&dhd->rx_pend_queue);
+ skb_queue_head_init(&dhd->rx_napi_queue);
+
+ /* Initialize the work that dispatches NAPI job to a given core */
+ INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
+ DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
+#endif /* DHD_LB_RXP */
+
+#endif /* DHD_LB */
+
+ INIT_DELAYED_WORK(&dhd->dhd_memdump_work, dhd_memdump_work_handler);
+
+ (void)dhd_sysfs_init(dhd);
- dhd_found++;
-#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
- dhd_global = dhd;
-#endif /* CUSTOMER_HW20 && WLANAUDIO */
return &dhd->pub;
fail:
return NULL;
}
+#include <linux/delay.h>
+
+void dhd_memdump_work_schedule(dhd_pub_t *dhdp, unsigned long msecs)
+{
+ dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+
+ schedule_delayed_work(&dhd->dhd_memdump_work, msecs_to_jiffies(msecs));
+}
+
int dhd_get_fw_mode(dhd_info_t *dhdinfo)
{
if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
}
extern int rkwifi_set_firmware(char *fw, char *nvram);
-
bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
{
int fw_len;
#if 0
/* clear the path in module parameter */
- firmware_path[0] = '\0';
- nvram_path[0] = '\0';
- config_path[0] = '\0';
+ if (dhd_download_fw_on_driverload) {
+ firmware_path[0] = '\0';
+ nvram_path[0] = '\0';
+ config_path[0] = '\0';
+ }
#endif
#ifndef BCMEMBEDIMAGE
if (dhdinfo->conf_path[0] == '\0') {
dhd_conf_set_conf_path_by_nv_path(&dhdinfo->pub, dhdinfo->conf_path, dhdinfo->nv_path);
}
+#ifdef CONFIG_PATH_AUTO_SELECT
+ dhd_conf_set_conf_name_by_chip(&dhdinfo->pub, dhdinfo->conf_path);
+#endif
#endif /* BCMEMBEDIMAGE */
return TRUE;
}
+#ifdef CUSTOMER_HW4_DEBUG
+bool dhd_validate_chipid(dhd_pub_t *dhdp)
+{
+ uint chipid = dhd_bus_chip_id(dhdp);
+ uint config_chipid;
+
+#ifdef BCM4359_CHIP
+ config_chipid = BCM4359_CHIP_ID;
+#elif defined(BCM4358_CHIP)
+ config_chipid = BCM4358_CHIP_ID;
+#elif defined(BCM4354_CHIP)
+ config_chipid = BCM4354_CHIP_ID;
+#elif defined(BCM4356_CHIP)
+ config_chipid = BCM4356_CHIP_ID;
+#elif defined(BCM4339_CHIP)
+ config_chipid = BCM4339_CHIP_ID;
+#elif defined(BCM43349_CHIP)
+ config_chipid = BCM43349_CHIP_ID;
+#elif defined(BCM4335_CHIP)
+ config_chipid = BCM4335_CHIP_ID;
+#elif defined(BCM43241_CHIP)
+ config_chipid = BCM4324_CHIP_ID;
+#elif defined(BCM4330_CHIP)
+ config_chipid = BCM4330_CHIP_ID;
+#elif defined(BCM43430_CHIP)
+ config_chipid = BCM43430_CHIP_ID;
+#elif defined(BCM4334W_CHIP)
+ config_chipid = BCM43342_CHIP_ID;
+#elif defined(BCM43455_CHIP)
+ config_chipid = BCM4345_CHIP_ID;
+#else
+ DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
+ " please add CONFIG_BCMXXXX into the Kernel and"
+ " BCMXXXX_CHIP definition into the DHD driver\n",
+ __FUNCTION__));
+ config_chipid = 0;
+
+ return FALSE;
+#endif /* BCM4354_CHIP */
+
+#if defined(BCM4359_CHIP)
+ if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
+ return TRUE;
+ }
+#endif /* BCM4359_CHIP */
+
+ return config_chipid == chipid;
+}
+#endif /* CUSTOMER_HW4_DEBUG */
int
dhd_bus_start(dhd_pub_t *dhdp)
/* try to download image and nvram to the dongle */
if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
+ /* Indicate FW Download has not yet done */
+ dhd->pub.is_fw_download_done = FALSE;
DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
__FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
DHD_PERIM_UNLOCK(dhdp);
return ret;
}
+ /* Indicate FW Download has succeeded */
+ dhd->pub.is_fw_download_done = TRUE;
}
if (dhd->pub.busstate != DHD_BUS_LOAD) {
DHD_PERIM_UNLOCK(dhdp);
/* Start the watchdog timer */
dhd->pub.tickcnt = 0;
dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+ DHD_ENABLE_RUNTIME_PM(&dhd->pub);
/* Bring up the bus */
if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
del_timer_sync(&dhd->timer);
dhd_os_sdunlock(dhdp);
-#endif /* BCMPCIE_OOB_HOST_WAKE */
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
+ DHD_DISABLE_RUNTIME_PM(&dhd->pub);
DHD_PERIM_UNLOCK(dhdp);
DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
/* Enable oob at firmware */
dhd_enable_oob_intr(dhd->pub.bus, TRUE);
#endif /* BCMPCIE_OOB_HOST_WAKE */
+#elif defined(FORCE_WOWLAN)
+ /* Enable oob at firmware */
+ dhd_enable_oob_intr(dhd->pub.bus, TRUE);
#endif
#ifdef PCIE_FULL_DONGLE
{
- uint8 txpush = 0;
- uint32 num_flowrings; /* includes H2D common rings */
- num_flowrings = dhd_bus_max_h2d_queues(dhd->pub.bus, &txpush);
- DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__,
- num_flowrings));
- if ((ret = dhd_flow_rings_init(&dhd->pub, num_flowrings)) != BCME_OK) {
+ /* max_h2d_rings includes H2D common rings */
+ uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
+
+ DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
+ max_h2d_rings));
+ if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
dhd_os_sdunlock(dhdp);
DHD_PERIM_UNLOCK(dhdp);
return ret;
#endif /* PCIE_FULL_DONGLE */
/* Do protocol initialization necessary for IOCTL/IOVAR */
- dhd_prot_init(&dhd->pub);
+#ifdef PCIE_FULL_DONGLE
+ dhd_os_sdunlock(dhdp);
+#endif /* PCIE_FULL_DONGLE */
+ ret = dhd_prot_init(&dhd->pub);
+ if (unlikely(ret) != BCME_OK) {
+ DHD_PERIM_UNLOCK(dhdp);
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
+#ifdef PCIE_FULL_DONGLE
+ dhd_os_sdlock(dhdp);
+#endif /* PCIE_FULL_DONGLE */
/* If bus is not ready, can't come up */
if (dhd->pub.busstate != DHD_BUS_DATA) {
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
del_timer_sync(&dhd->timer);
DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+ DHD_DISABLE_RUNTIME_PM(&dhd->pub);
dhd_os_sdunlock(dhdp);
DHD_PERIM_UNLOCK(dhdp);
DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
/* Bus is ready, query any dongle information */
if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+ DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
+ DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
DHD_PERIM_UNLOCK(dhdp);
return ret;
}
ret = BCME_ERROR;
return ret;
}
+int
+dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ int ret = 0;
+ bool auto_on = false;
+ uint32 mode = wfd_mode;
+
+#ifdef ENABLE_TDLS_AUTO_MODE
+ if (wfd_mode) {
+ auto_on = false;
+ } else {
+ auto_on = true;
+ }
+#else
+ auto_on = false;
+#endif /* ENABLE_TDLS_AUTO_MODE */
+ ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
+ if (ret < 0) {
+ DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
+ return ret;
+ }
+
+
+ bcm_mkiovar("tdls_wfd_mode", (char *)&mode, sizeof(mode),
+ iovbuf, sizeof(iovbuf));
+ if (((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) &&
+ (ret != BCME_UNSUPPORTED)) {
+ DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
+ if (ret < 0) {
+ DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
+ return ret;
+ }
+
+ dhd->tdls_mode = mode;
+ return ret;
+}
#ifdef PCIE_FULL_DONGLE
void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
{
if (!FW_SUPPORTED(dhd, p2p)) {
DHD_TRACE(("Chip does not support p2p\n"));
return 0;
- }
- else {
+ } else {
/* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
memset(buf, 0, sizeof(buf));
bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
FALSE, 0)) < 0) {
DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
return 0;
- }
- else {
+ } else {
if (buf[0] == 1) {
/* By default, chip supports single chan concurrency,
* now lets check for mchan
ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
if (mchan_supported)
ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ ret |= DHD_FLAG_RSDB_MODE;
+ }
+ if (FW_SUPPORTED(dhd, mp2p)) {
+ ret |= DHD_FLAG_MP2P_MODE;
+ }
#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
- /* For customer_hw4, although ICS,
- * we still support concurrent mode
- */
return ret;
#else
return 0;
-#endif
+#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
}
}
}
return 0;
}
-#endif
+#endif
#ifdef SUPPORT_AP_POWERSAVE
#define RXCHAIN_PWRSAVE_PPS 10
#endif /* SUPPORT_AP_POWERSAVE */
-#if defined(READ_CONFIG_FROM_FILE)
-#include <linux/fs.h>
-#include <linux/ctype.h>
-
-#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
-bool PM_control = TRUE;
-
-static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value)
-{
- int var_int;
- wl_country_t cspec = {{0}, -1, {0}};
- char *revstr;
- char *endptr = NULL;
- int iolen;
- char smbuf[WLC_IOCTL_SMLEN*2];
-
- if (!strcmp(name, "country")) {
- revstr = strchr(value, '/');
- if (revstr) {
- cspec.rev = strtoul(revstr + 1, &endptr, 10);
- memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
- cspec.country_abbrev[2] = '\0';
- memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ);
- } else {
- cspec.rev = -1;
- memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
- memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
- get_customized_country_code(dhd->info->adapter,
- (char *)&cspec.country_abbrev, &cspec);
- }
- memset(smbuf, 0, sizeof(smbuf));
- DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
- cspec.country_abbrev, cspec.rev));
- iolen = bcm_mkiovar("country", (char*)&cspec, sizeof(cspec),
- smbuf, sizeof(smbuf));
- return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
- smbuf, iolen, TRUE, 0);
- } else if (!strcmp(name, "roam_scan_period")) {
- var_int = (int)simple_strtol(value, NULL, 0);
- return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD,
- &var_int, sizeof(var_int), TRUE, 0);
- } else if (!strcmp(name, "roam_delta")) {
- struct {
- int val;
- int band;
- } x;
- x.val = (int)simple_strtol(value, NULL, 0);
- /* x.band = WLC_BAND_AUTO; */
- x.band = WLC_BAND_ALL;
- return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0);
- } else if (!strcmp(name, "roam_trigger")) {
- int ret = 0;
-
- roam_trigger[0] = (int)simple_strtol(value, NULL, 0);
- roam_trigger[1] = WLC_BAND_ALL;
- ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger,
- sizeof(roam_trigger), TRUE, 0);
-
- return ret;
- } else if (!strcmp(name, "PM")) {
- int ret = 0;
- var_int = (int)simple_strtol(value, NULL, 0);
-
- ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_PM,
- &var_int, sizeof(var_int), TRUE, 0);
-
-#if defined(CONFIG_PM_LOCK)
- if (var_int == 0) {
- g_pm_control = TRUE;
- printk("%s var_int=%d don't control PM\n", __func__, var_int);
- } else {
- g_pm_control = FALSE;
- printk("%s var_int=%d do control PM\n", __func__, var_int);
- }
-#endif
-
- return ret;
- }
-#ifdef WLBTAMP
- else if (!strcmp(name, "btamp_chan")) {
- int btamp_chan;
- int iov_len = 0;
- char iovbuf[128];
- int ret;
-
- btamp_chan = (int)simple_strtol(value, NULL, 0);
- iov_len = bcm_mkiovar("btamp_chan", (char *)&btamp_chan, 4, iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
- DHD_ERROR(("%s btamp_chan=%d set failed code %d\n",
- __FUNCTION__, btamp_chan, ret));
- else
- DHD_ERROR(("%s btamp_chan %d set success\n",
- __FUNCTION__, btamp_chan));
- }
-#endif /* WLBTAMP */
- else if (!strcmp(name, "band")) {
- int ret;
- if (!strcmp(value, "auto"))
- var_int = WLC_BAND_AUTO;
- else if (!strcmp(value, "a"))
- var_int = WLC_BAND_5G;
- else if (!strcmp(value, "b"))
- var_int = WLC_BAND_2G;
- else if (!strcmp(value, "all"))
- var_int = WLC_BAND_ALL;
- else {
- printk(" set band value should be one of the a or b or all\n");
- var_int = WLC_BAND_AUTO;
- }
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int,
- sizeof(var_int), TRUE, 0)) < 0)
- printk(" set band err=%d\n", ret);
- return ret;
- } else if (!strcmp(name, "cur_etheraddr")) {
- struct ether_addr ea;
- char buf[32];
- uint iovlen;
- int ret;
-
- bcm_ether_atoe(value, &ea);
-
- ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN);
- if (ret == 0) {
- DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__));
- return 0;
- }
-
- DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__,
- ea.octet[0], ea.octet[1], ea.octet[2],
- ea.octet[3], ea.octet[4], ea.octet[5]));
-
- iovlen = bcm_mkiovar("cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, buf, 32);
-
- ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0);
- if (ret < 0) {
- DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
- return ret;
- }
- else {
- memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN);
- return ret;
- }
- } else if (!strcmp(name, "lpc")) {
- int ret = 0;
- char buf[32];
- uint iovlen;
- var_int = (int)simple_strtol(value, NULL, 0);
- if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
- DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
- }
- iovlen = bcm_mkiovar("lpc", (char *)&var_int, 4, buf, sizeof(buf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
- DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
- }
- if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
- DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
- }
- return ret;
- } else if (!strcmp(name, "vht_features")) {
- int ret = 0;
- char buf[32];
- uint iovlen;
- var_int = (int)simple_strtol(value, NULL, 0);
-
- if (dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
- DHD_ERROR(("%s: wl down failed\n", __FUNCTION__));
- }
- iovlen = bcm_mkiovar("vht_features", (char *)&var_int, 4, buf, sizeof(buf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0)) < 0) {
- DHD_ERROR(("%s Set vht_features failed %d\n", __FUNCTION__, ret));
- }
- if (dhd_wl_ioctl_cmd(dhd, WLC_UP, NULL, 0, TRUE, 0) < 0) {
- DHD_ERROR(("%s: wl up failed\n", __FUNCTION__));
- }
- return ret;
- } else {
- uint iovlen;
- char iovbuf[WLC_IOCTL_SMLEN];
-
- /* wlu_iovar_setint */
- var_int = (int)simple_strtol(value, NULL, 0);
-
- /* Setup timeout bcn_timeout from dhd driver 4.217.48 */
- if (!strcmp(name, "roam_off")) {
- /* Setup timeout if Beacons are lost to report link down */
- if (var_int) {
- uint bcn_timeout = 2;
- bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4,
- iovbuf, sizeof(iovbuf));
- dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
- }
- }
- /* Setup timeout bcm_timeout from dhd driver 4.217.48 */
-
- DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int));
-
- iovlen = bcm_mkiovar(name, (char *)&var_int, sizeof(var_int),
- iovbuf, sizeof(iovbuf));
- return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
- iovbuf, iovlen, TRUE, 0);
- }
-
- return 0;
-}
-
-static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx)
-{
- mm_segment_t old_fs;
- struct kstat stat;
- struct file *fp = NULL;
- unsigned int len;
- char *buf = NULL, *p, *name, *value;
- int ret = 0;
- char *config_path;
-
- config_path = CONFIG_BCMDHD_CONFIG_PATH;
-
- if (!config_path)
- {
- printk(KERN_ERR "config_path can't read. \n");
- return 0;
- }
-
- old_fs = get_fs();
- set_fs(get_ds());
- if ((ret = vfs_stat(config_path, &stat))) {
- set_fs(old_fs);
- printk(KERN_ERR "%s: Failed to get information (%d)\n",
- config_path, ret);
- return ret;
- }
- set_fs(old_fs);
-
- if (!(buf = MALLOC(dhd->osh, stat.size + 1))) {
- printk(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size);
- return -ENOMEM;
- }
-
- printk("dhd_preinit_config : config path : %s \n", config_path);
-
- if (!(fp = dhd_os_open_image(config_path)) ||
- (len = dhd_os_get_image_block(buf, stat.size, fp)) < 0)
- goto err;
-
- buf[stat.size] = '\0';
- for (p = buf; *p; p++) {
- if (isspace(*p))
- continue;
- for (name = p++; *p && !isspace(*p); p++) {
- if (*p == '=') {
- *p = '\0';
- p++;
- for (value = p; *p && !isspace(*p); p++);
- *p = '\0';
- if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) {
- printk(KERN_ERR "%s: %s=%s\n",
- bcmerrorstr(ret), name, value);
- }
- break;
- }
- }
- }
- ret = 0;
-
-out:
- if (fp)
- dhd_os_close_image(fp);
- if (buf)
- MFREE(dhd->osh, buf, stat.size+1);
- return ret;
-
-err:
- ret = -1;
- goto out;
-}
-#endif /* READ_CONFIG_FROM_FILE */
-
int
dhd_preinit_ioctls(dhd_pub_t *dhd)
{
eventmsgs_ext_t *eventmask_msg = NULL;
char* iov_buf = NULL;
int ret2 = 0;
-#ifdef WLAIBSS
- aibss_bcn_force_config_t bcn_config;
- uint32 aibss;
-#ifdef WLAIBSS_PS
- uint32 aibss_ps;
-#endif /* WLAIBSS_PS */
-#endif /* WLAIBSS */
-#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
- uint32 sup_wpa = 0;
-#endif
-#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
- defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
+#if defined(CUSTOM_AMPDU_BA_WSIZE)
uint32 ampdu_ba_wsize = 0;
-#endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
+#endif
#if defined(CUSTOM_AMPDU_MPDU)
int32 ampdu_mpdu = 0;
#endif
#if defined(CUSTOM_AMSDU_AGGSF)
int32 amsdu_aggsf = 0;
#endif
-
+#ifdef SUPPORT_SENSORHUB
+ int32 shub_enable = 0;
+#endif /* SUPPORT_SENSORHUB */
#if defined(BCMSDIO)
#ifdef PROP_TXSTATUS
int wlfc_enable = TRUE;
uint32 wl_ap_isolate;
#endif /* PCIE_FULL_DONGLE */
+#if defined(BCMSDIO)
+ /* by default frame burst is enabled for PCIe and disabled for SDIO dongles */
+ uint32 frameburst = 0;
+#else
+ uint32 frameburst = 1;
+#endif /* BCMSDIO */
+
#ifdef DHD_ENABLE_LPC
uint32 lpc = 1;
#endif /* DHD_ENABLE_LPC */
uint power_mode = PM_FAST;
- uint32 dongle_align = DHD_SDALIGN;
#if defined(BCMSDIO)
+ uint32 dongle_align = DHD_SDALIGN;
uint32 glom = CUSTOM_GLOM_SETTING;
#endif /* defined(BCMSDIO) */
#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
uint32 credall = 1;
#endif
uint bcn_timeout = dhd->conf->bcn_timeout;
- uint retry_max = 3;
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ uint32 bcn_li_bcn = 1;
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+ uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
#if defined(ARP_OFFLOAD_SUPPORT)
int arpoe = 1;
#endif
uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
struct ether_addr p2p_ea;
#endif
-#ifdef BCMCCX
- uint32 ccx = 1;
-#endif
#ifdef SOFTAP_UAPSD_OFF
uint32 wme_apsd = 0;
#endif /* SOFTAP_UAPSD_OFF */
uint32 nmode = 0;
#endif /* DISABLE_11N */
-#if defined(DISABLE_11AC)
- uint32 vhtmode = 0;
-#endif /* DISABLE_11AC */
#ifdef USE_WL_TXBF
uint32 txbf = 1;
#endif /* USE_WL_TXBF */
-#ifdef AMPDU_VO_ENABLE
- struct ampdu_tid_control tid;
-#endif
-#ifdef USE_WL_FRAMEBURST
- uint32 frameburst = 1;
-#endif /* USE_WL_FRAMEBURST */
-#ifdef DHD_SET_FW_HIGHSPEED
- uint32 ack_ratio = 250;
- uint32 ack_ratio_depth = 64;
-#endif /* DHD_SET_FW_HIGHSPEED */
-#ifdef SUPPORT_2G_VHT
- uint32 vht_features = 0x3; /* 2G enable | rates all */
-#endif /* SUPPORT_2G_VHT */
+#if defined(PROP_TXSTATUS)
+#ifdef USE_WFA_CERT_CONF
+ uint32 proptx = 0;
+#endif /* USE_WFA_CERT_CONF */
+#endif /* PROP_TXSTATUS */
#ifdef CUSTOM_PSPRETEND_THR
uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
#endif
+ uint32 rsdb_mode = 0;
+#ifdef ENABLE_TEMP_THROTTLING
+ wl_temp_control_t temp_control;
+#endif /* ENABLE_TEMP_THROTTLING */
+#ifdef DISABLE_PRUNED_SCAN
+ uint32 scan_features = 0;
+#endif /* DISABLE_PRUNED_SCAN */
+#ifdef CUSTOM_EVENT_PM_WAKE
+ uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
+#endif /* CUSTOM_EVENT_PM_WAKE */
#ifdef PKT_FILTER_SUPPORT
dhd_pkt_filter_enable = TRUE;
#endif /* PKT_FILTER_SUPPORT */
#ifdef WLTDLS
dhd->tdls_enable = FALSE;
+ dhd_tdls_set_mode(dhd, false);
#endif /* WLTDLS */
dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
DHD_TRACE(("Enter %s\n", __FUNCTION__));
- dhd_conf_set_band(dhd);
+ dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_BAND", WLC_SET_BAND, dhd->conf->band, 0, FALSE);
+#ifdef DHDTCPACK_SUPPRESS
printf("%s: Set tcpack_sup_mode %d\n", __FUNCTION__, dhd->conf->tcpack_sup_mode);
dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
+#endif
dhd->op_mode = 0;
+#ifdef CUSTOMER_HW4_DEBUG
+ if (!dhd_validate_chipid(dhd)) {
+ DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
+ __FUNCTION__, dhd_bus_chip_id(dhd)));
+#ifndef SUPPORT_MULTIPLE_CHIPS
+ ret = BCME_BADARG;
+ goto done;
+#endif /* !SUPPORT_MULTIPLE_CHIPS */
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
(op_mode == DHD_FLAG_MFG_MODE)) {
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Disable RuntimePM in mfg mode */
+ DHD_DISABLE_RUNTIME_PM(dhd);
+ DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
+#endif /* DHD_PCIE_RUNTIME_PM */
/* Check and adjust IOCTL response timeout for Manufactring firmware */
dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
__FUNCTION__));
- }
- else {
+ } else {
dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
}
#endif /* GET_CUSTOM_MAC_ENABLE */
/* get a capabilities from firmware */
- memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
- bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
- sizeof(dhd->fw_capabilities), FALSE, 0)) < 0) {
- DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
- __FUNCTION__, ret));
- goto done;
+ {
+ uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
+ memset(dhd->fw_capabilities, 0, cap_buf_size);
+ bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, cap_buf_size - 1);
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, dhd->fw_capabilities,
+ (cap_buf_size - 1), FALSE, 0)) < 0)
+ {
+ DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
+ __FUNCTION__, ret));
+ return 0;
+ }
+
+ memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
+ dhd->fw_capabilities[0] = ' ';
+ dhd->fw_capabilities[cap_buf_size - 2] = ' ';
+ dhd->fw_capabilities[cap_buf_size - 1] = '\0';
}
+
if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
(op_mode == DHD_FLAG_HOSTAP_MODE)) {
#ifdef SET_RANDOM_MAC_SOFTAP
uint rand_mac;
-#endif
+#endif /* SET_RANDOM_MAC_SOFTAP */
dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
#if defined(ARP_OFFLOAD_SUPPORT)
arpoe = 0;
#ifdef SET_RANDOM_MAC_SOFTAP
SRANDOM32((uint)jiffies);
rand_mac = RANDOM32();
- iovbuf[0] = 0x02; /* locally administered bit */
- iovbuf[1] = 0x1A;
- iovbuf[2] = 0x11;
+ iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
+ iovbuf[1] = (unsigned char)(vendor_oui >> 8);
+ iovbuf[2] = (unsigned char)vendor_oui;
iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
iovbuf[4] = (unsigned char)(rand_mac >> 8);
iovbuf[5] = (unsigned char)(rand_mac >> 16);
DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
}
#endif
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
#ifdef SUPPORT_AP_POWERSAVE
dhd_set_ap_powersave(dhd, 0, TRUE);
-#endif
+#endif /* SUPPORT_AP_POWERSAVE */
#ifdef SOFTAP_UAPSD_OFF
bcm_mkiovar("wme_apsd", (char *)&wme_apsd, 4, iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
- DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n", __FUNCTION__, ret));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
+ __FUNCTION__, ret));
+ }
#endif /* SOFTAP_UAPSD_OFF */
} else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
(op_mode == DHD_FLAG_MFG_MODE)) {
dhd_pkt_filter_enable = FALSE;
#endif /* PKT_FILTER_SUPPORT */
dhd->op_mode = DHD_FLAG_MFG_MODE;
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ rsdb_mode = 0;
+ bcm_mkiovar("rsdb_mode", (char *)&rsdb_mode, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
+ __FUNCTION__, ret));
+ }
+ }
} else {
uint32 concurrent_mode = 0;
if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
#endif
}
+#ifdef RSDB_MODE_FROM_FILE
+ (void)dhd_rsdb_mode_from_file(dhd);
+#endif /* RSDB_MODE_FROM_FILE */
+
+#ifdef DISABLE_PRUNED_SCAN
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("scan_features", (char *)&scan_features,
+ 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR,
+ iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s get scan_features is failed ret=%d\n",
+ __FUNCTION__, ret));
+ } else {
+ memcpy(&scan_features, iovbuf, 4);
+ scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("scan_features", (char *)&scan_features,
+ 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s set scan_features is failed ret=%d\n",
+ __FUNCTION__, ret));
+ }
+ }
+ }
+#endif /* DISABLE_PRUNED_SCAN */
+
DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
+ #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
+ if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
+ dhd->info->rxthread_enabled = FALSE;
+ else
+ dhd->info->rxthread_enabled = TRUE;
+ #endif
/* Set Country code */
if (dhd->dhd_cspec.ccode[0] != 0) {
printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
}
dhd_conf_get_country(dhd, &dhd->dhd_cspec);
-#if defined(DISABLE_11AC)
- bcm_mkiovar("vhtmode", (char *)&vhtmode, 4, iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
- DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret));
-#endif /* DISABLE_11AC */
/* Set Listen Interval */
bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+#ifdef USE_WFA_CERT_CONF
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
+ DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
+ }
+#endif /* USE_WFA_CERT_CONF */
/* Disable built-in roaming to allowed ext supplicant to take care of roaming */
bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
#endif /* ROAM_ENABLE */
dhd_conf_set_roam(dhd);
-#ifdef BCMCCX
- bcm_mkiovar("ccx_enable", (char *)&ccx, 4, iovbuf, sizeof(iovbuf));
- dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
-#endif /* BCMCCX */
+#ifdef CUSTOM_EVENT_PM_WAKE
+ bcm_mkiovar("const_awake_thresh", (char *)&pm_awake_thresh, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* CUSTOM_EVENT_PM_WAKE */
#ifdef WLTDLS
+#ifdef ENABLE_TDLS_AUTO_MODE
+ /* by default TDLS on and auto mode on */
+ _dhd_tdls_enable(dhd, true, true, NULL);
+#else
/* by default TDLS on and auto mode off */
_dhd_tdls_enable(dhd, true, false, NULL);
+#endif /* ENABLE_TDLS_AUTO_MODE */
#endif /* WLTDLS */
#ifdef DHD_ENABLE_LPC
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
sizeof(iovbuf), TRUE, 0)) < 0) {
DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
+
+ if (ret == BCME_NOTDOWN) {
+ uint wl_down = 1;
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
+ (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+ DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
+
+ bcm_mkiovar("lpc", (char *)&lpc, 4, iovbuf, sizeof(iovbuf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
+ }
}
#endif /* DHD_ENABLE_LPC */
- dhd_conf_set_lpc(dhd);
+ dhd_conf_set_fw_string_cmd(dhd, "lpc", dhd->conf->lpc, 0, FALSE);
/* Set PowerSave mode */
if (dhd->conf->pm >= 0)
power_mode = dhd->conf->pm;
dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
+#if defined(BCMSDIO)
/* Match Host and Dongle rx alignment */
bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
#endif
-#if defined(BCMSDIO)
+#ifdef USE_WFA_CERT_CONF
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
+ DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
+ }
+#endif /* USE_WFA_CERT_CONF */
if (glom != DEFAULT_GLOM_VALUE) {
DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
}
#endif /* defined(BCMSDIO) */
- dhd_conf_set_bus_txglom(dhd);
/* Setup timeout if Beacons are lost and roam is off to report link down */
bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
#endif /* defined(AP) && !defined(WLP2P) */
- dhd_conf_set_mimo_bw_cap(dhd);
- dhd_conf_force_wme(dhd);
- dhd_conf_set_stbc(dhd);
- dhd_conf_set_srl(dhd);
- dhd_conf_set_lrl(dhd);
- dhd_conf_set_spect(dhd);
+ /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */
+ dhd_conf_set_fw_string_cmd(dhd, "mimo_bw_cap", dhd->conf->mimo_bw_cap, 1, TRUE);
+ dhd_conf_set_fw_string_cmd(dhd, "force_wme_ac", dhd->conf->force_wme_ac, 1, FALSE);
+ dhd_conf_set_fw_string_cmd(dhd, "stbc_tx", dhd->conf->stbc, 0, FALSE);
+ dhd_conf_set_fw_string_cmd(dhd, "stbc_rx", dhd->conf->stbc, 0, FALSE);
+ dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SRL", WLC_SET_SRL, dhd->conf->srl, 0, TRUE);
+ dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_LRL", WLC_SET_LRL, dhd->conf->lrl, 0, FALSE);
+ dhd_conf_set_fw_int_cmd(dhd, "WLC_SET_SPECT_MANAGMENT", WLC_SET_SPECT_MANAGMENT, dhd->conf->spect, 0, FALSE);
+ dhd_conf_set_fw_string_cmd(dhd, "rsdb_mode", dhd->conf->rsdb_mode, -1, TRUE);
+
+#ifdef MIMO_ANT_SETTING
+ dhd_sel_ant_from_file(dhd);
+#endif /* MIMO_ANT_SETTING */
#if defined(SOFTAP)
if (ap_fw_loaded == TRUE) {
bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
sizeof(iovbuf), TRUE, 0)) < 0) {
- DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
+ DHD_ERROR(("%s Set txbf returned (%d)\n", __FUNCTION__, ret));
}
#endif /* USE_WL_TXBF */
- dhd_conf_set_txbf(dhd);
+ dhd_conf_set_fw_string_cmd(dhd, "txbf", dhd->conf->txbf, 0, FALSE);
+
+#ifdef USE_WFA_CERT_CONF
#ifdef USE_WL_FRAMEBURST
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
+ DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
+ }
+#endif /* USE_WL_FRAMEBURST */
+#ifdef DISABLE_FRAMEBURST_VSDB
+ g_frameburst = frameburst;
+#endif /* DISABLE_FRAMEBURST_VSDB */
+#endif /* USE_WFA_CERT_CONF */
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+ /* Disable Framebursting for SofAP */
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ frameburst = 0;
+ }
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
/* Set frameburst to value */
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
sizeof(frameburst), TRUE, 0)) < 0) {
- DHD_ERROR(("%s Set frameburst failed %d\n", __FUNCTION__, ret));
+ DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
}
-#endif /* USE_WL_FRAMEBURST */
- dhd_conf_set_frameburst(dhd);
-#ifdef DHD_SET_FW_HIGHSPEED
- /* Set ack_ratio */
- bcm_mkiovar("ack_ratio", (char *)&ack_ratio, 4, iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf), TRUE, 0)) < 0) {
- DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
- }
-
- /* Set ack_ratio_depth */
- bcm_mkiovar("ack_ratio_depth", (char *)&ack_ratio_depth, 4, iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf), TRUE, 0)) < 0) {
- DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
- }
-#endif /* DHD_SET_FW_HIGHSPEED */
-#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
- defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
+ dhd_conf_set_fw_string_cmd(dhd, "frameburst", dhd->conf->frameburst, 0, FALSE);
+#if defined(CUSTOM_AMPDU_BA_WSIZE)
/* Set ampdu ba wsize to 64 or 16 */
#ifdef CUSTOM_AMPDU_BA_WSIZE
ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
#endif
-#if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
- if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
- ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
-#endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
if (ampdu_ba_wsize != 0) {
bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
__FUNCTION__, ampdu_ba_wsize, ret));
}
}
-#endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
- dhd_conf_set_ampdu_ba_wsize(dhd);
+#endif
+ dhd_conf_set_fw_string_cmd(dhd, "ampdu_ba_wsize", dhd->conf->ampdu_ba_wsize, 1, FALSE);
iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
if (iov_buf == NULL) {
ret = BCME_NOMEM;
goto done;
}
-#ifdef WLAIBSS
- /* Configure custom IBSS beacon transmission */
- if (dhd->op_mode & DHD_FLAG_IBSS_MODE)
- {
- aibss = 1;
- bcm_mkiovar("aibss", (char *)&aibss, 4, iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf), TRUE, 0)) < 0) {
- DHD_ERROR(("%s Set aibss to %d failed %d\n",
- __FUNCTION__, aibss, ret));
- }
-#ifdef WLAIBSS_PS
- aibss_ps = 1;
- bcm_mkiovar("aibss_ps", (char *)&aibss_ps, 4, iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf), TRUE, 0)) < 0) {
- DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
- __FUNCTION__, aibss, ret));
+#ifdef ENABLE_TEMP_THROTTLING
+ if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+ memset(&temp_control, 0, sizeof(temp_control));
+ temp_control.enable = 1;
+ temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT;
+ bcm_mkiovar("temp_throttle_control", (char *)&temp_control,
+ sizeof(wl_temp_control_t), iov_buf, WLC_IOCTL_SMLEN);
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf, WLC_IOCTL_SMLEN, TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
+ __FUNCTION__, ret));
}
-#endif /* WLAIBSS_PS */
}
- memset(&bcn_config, 0, sizeof(bcn_config));
- bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
- bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
- bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
- bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
- bcn_config.len = sizeof(bcn_config);
-
- bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config,
- sizeof(aibss_bcn_force_config_t), iov_buf, WLC_IOCTL_SMLEN);
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf,
- WLC_IOCTL_SMLEN, TRUE, 0)) < 0) {
- DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
- __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
- AIBSS_BCN_FLOOD_DUR, ret));
- }
-#endif /* WLAIBSS */
-
+#endif /* ENABLE_TEMP_THROTTLING */
#if defined(CUSTOM_AMPDU_MPDU)
ampdu_mpdu = CUSTOM_AMPDU_MPDU;
if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
if (amsdu_aggsf != 0) {
bcm_mkiovar("amsdu_aggsf", (char *)&amsdu_aggsf, 4, iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf), TRUE, 0)) < 0) {
- DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
__FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
}
}
#endif /* CUSTOM_AMSDU_AGGSF */
-#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
- /* Read 4-way handshake requirements */
- if (dhd_use_idsup == 1) {
- bcm_mkiovar("sup_wpa", (char *)&sup_wpa, 4, iovbuf, sizeof(iovbuf));
- ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
- /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
- * in-dongle supplicant.
- */
- if (ret >= 0 || ret == BCME_NOTREADY)
- dhd->fw_4way_handshake = TRUE;
- DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
- }
-#endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
-#ifdef SUPPORT_2G_VHT
- bcm_mkiovar("vht_features", (char *)&vht_features, 4, iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
- DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
- }
-#endif /* SUPPORT_2G_VHT */
#ifdef CUSTOM_PSPRETEND_THR
/* Turn off MPC in AP mode */
bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
setbit(eventmask, WLC_E_ASSOC);
setbit(eventmask, WLC_E_REASSOC);
setbit(eventmask, WLC_E_REASSOC_IND);
- setbit(eventmask, WLC_E_DEAUTH);
+ if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
+ setbit(eventmask, WLC_E_DEAUTH);
setbit(eventmask, WLC_E_DEAUTH_IND);
setbit(eventmask, WLC_E_DISASSOC_IND);
setbit(eventmask, WLC_E_DISASSOC);
setbit(eventmask, WLC_E_ASSOC_IND);
setbit(eventmask, WLC_E_PSK_SUP);
setbit(eventmask, WLC_E_LINK);
- setbit(eventmask, WLC_E_NDIS_LINK);
setbit(eventmask, WLC_E_MIC_ERROR);
setbit(eventmask, WLC_E_ASSOC_REQ_IE);
setbit(eventmask, WLC_E_ASSOC_RESP_IE);
#endif
setbit(eventmask, WLC_E_JOIN_START);
// setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
+#ifdef DHD_DEBUG
+ setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
+#endif
#ifdef WLMEDIA_HTSF
setbit(eventmask, WLC_E_HTSFSYNC);
#endif /* WLMEDIA_HTSF */
/* enable dongle roaming event */
setbit(eventmask, WLC_E_ROAM);
setbit(eventmask, WLC_E_BSSID);
-#ifdef BCMCCX
- setbit(eventmask, WLC_E_ADDTS_IND);
- setbit(eventmask, WLC_E_DELTS_IND);
-#endif /* BCMCCX */
#ifdef WLTDLS
setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
#endif /* WLTDLS */
#ifdef WL_CFG80211
setbit(eventmask, WLC_E_ESCAN_RESULT);
+ setbit(eventmask, WLC_E_AP_STARTED);
if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
setbit(eventmask, WLC_E_ACTION_FRAME_RX);
setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
}
#endif /* WL_CFG80211 */
-#ifdef WLAIBSS
- setbit(eventmask, WLC_E_AIBSS_TXFAIL);
-#endif /* WLAIBSS */
-#ifdef CUSTOMER_HW10
- clrbit(eventmask, WLC_E_TRACE);
-#else
+
+#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
+ if (dhd_logtrace_from_file(dhd)) {
+ setbit(eventmask, WLC_E_TRACE);
+ } else {
+ clrbit(eventmask, WLC_E_TRACE);
+ }
+#elif defined(SHOW_LOGTRACE)
setbit(eventmask, WLC_E_TRACE);
+#else
+ clrbit(eventmask, WLC_E_TRACE);
+#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
+
+ setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
+#ifdef DHD_LOSSLESS_ROAMING
+ setbit(eventmask, WLC_E_ROAM_PREP);
#endif
+#ifdef CUSTOM_EVENT_PM_WAKE
+ setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
+#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
+
/* Write updated Event mask */
bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
/* Read event_msgs_ext mask */
bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, WLC_IOCTL_SMLEN);
ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, WLC_IOCTL_SMLEN, FALSE, 0);
- if (ret2 != BCME_UNSUPPORTED)
- ret = ret2;
if (ret2 == 0) { /* event_msgs_ext must be supported */
bcopy(iov_buf, eventmask_msg, msglen);
-
+#ifdef GSCAN_SUPPORT
+ setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
+ setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
+ setbit(eventmask_msg->mask, WLC_E_PFN_SWC);
+#endif /* GSCAN_SUPPORT */
#ifdef BT_WIFI_HANDOVER
setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
#endif /* BT_WIFI_HANDOVER */
DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
goto done;
}
- } else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) {
+ } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
+ /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
+ DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
+ __FUNCTION__, ret2));
+ } else {
DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
+ ret = ret2;
goto done;
- } /* unsupported is ok */
+ }
dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
sizeof(scan_assoc_time), TRUE, 0);
#ifdef PKT_FILTER_SUPPORT
/* Setup default defintions for pktfilter , enable in suspend */
- dhd->pktfilter_count = 6;
- /* Setup filter to allow only unicast */
if (dhd_master_mode) {
- dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
+ dhd->pktfilter_count = 6;
dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
- /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
- dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
/* apply APP pktfilter */
dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
+
+ /* Setup filter to allow only unicast */
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
+
+ /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
+ dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
+
+#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
+ dhd->pktfilter_count = 4;
+ /* Setup filter to block broadcast and NAT Keepalive packets */
+ /* discard all broadcast packets */
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
+ /* discard NAT Keepalive packets */
+ dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
+ /* discard NAT Keepalive packets */
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
} else
dhd_conf_discard_pkt_filter(dhd);
dhd_conf_add_pkt_filter(dhd);
DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
#endif /* DISABLE_11N */
-#ifdef AMPDU_VO_ENABLE
- tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
- tid.enable = TRUE;
- bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
- dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
-
- tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
- tid.enable = TRUE;
- bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ bcm_mkiovar("bcn_li_bcn", (char *)&bcn_li_bcn, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
-#endif
-#if defined(SOFTAP_TPUT_ENHANCE)
- if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
- dhd_bus_setidletime(dhd, (int)100);
-#ifdef DHDTCPACK_SUPPRESS
- dhd->tcpack_sup_enabled = FALSE;
-#endif
-#if defined(DHD_TCP_WINSIZE_ADJUST)
- dhd_use_tcp_window_size_adjust = TRUE;
-#endif
-
- memset(buf, 0, sizeof(buf));
- bcm_mkiovar("bus:txglom_auto_control", 0, 0, buf, sizeof(buf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) {
- glom = 0;
- bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
- dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
- }
- else {
- if (buf[0] == 0) {
- glom = 1;
- bcm_mkiovar("bus:txglom_auto_control", (char *)&glom, 4, iovbuf,
- sizeof(iovbuf));
- dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
- }
- }
- }
-#endif /* SOFTAP_TPUT_ENHANCE */
-
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
ptr = buf;
bcmstrtok(&ptr, "\n", 0);
/* Print fw version info */
DHD_ERROR(("Firmware version = %s\n", buf));
+ strncpy(fw_version, buf, FW_VER_STR_LEN);
dhd_set_version_info(dhd, buf);
+#ifdef WRITE_WLANINFO
+ sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path);
+#endif /* WRITE_WLANINFO */
}
#if defined(BCMSDIO)
dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
+ // terence 20151210: set bus:txglom after dhd_txglom_enable since it's possible changed in dhd_conf_set_txglom_params
+ dhd_conf_set_fw_string_cmd(dhd, "bus:txglom", dhd->conf->bus_txglom, 1, FALSE);
#endif /* defined(BCMSDIO) */
dhd_conf_set_disable_proptx(dhd);
wlfc_enable = FALSE;
}
+#ifdef USE_WFA_CERT_CONF
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
+ DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
+ wlfc_enable = proptx;
+ }
+#endif /* USE_WFA_CERT_CONF */
+
#ifndef DISABLE_11N
ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
if (ret2 != BCME_UNSUPPORTED)
ret = ret2;
+
+ if (ret == BCME_NOTDOWN) {
+ uint wl_down = 1;
+ ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
+ sizeof(wl_down), TRUE, 0);
+ DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
+ __FUNCTION__, ret2, hostreorder));
+
+ bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4,
+ iovbuf, sizeof(iovbuf));
+ ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
+ if (ret2 != BCME_UNSUPPORTED)
+ ret = ret2;
+ }
if (ret2 != BCME_OK)
hostreorder = 0;
}
#endif /* DISABLE_11N */
-#ifdef READ_CONFIG_FROM_FILE
- dhd_preinit_config(dhd, 0);
-#endif /* READ_CONFIG_FROM_FILE */
if (wlfc_enable)
dhd_wlfc_init(dhd);
dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0);
#endif
+#ifdef SUPPORT_SENSORHUB
+ bcm_mkiovar("shub", (char *)&shub_enable, 4, iovbuf, sizeof(iovbuf));
+ if ((dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf),
+ FALSE, 0)) < 0) {
+ DHD_ERROR(("%s failed to get shub hub enable information %d\n",
+ __FUNCTION__, ret));
+ dhd->info->shub_enable = 0;
+ } else {
+ memcpy(&shub_enable, iovbuf, sizeof(uint32));
+ dhd->info->shub_enable = shub_enable;
+ DHD_ERROR(("%s: checking sensorhub enable %d\n",
+ __FUNCTION__, dhd->info->shub_enable));
+ }
+#endif /* SUPPORT_SENSORHUB */
done:
if (eventmask_msg)
if (dhd_pub->arp_version == 1) {
idx = 0;
- }
- else {
+ } else {
for (idx = 0; idx < DHD_MAX_IFS; idx++) {
if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
break;
}
- if (idx < DHD_MAX_IFS)
+ if (idx < DHD_MAX_IFS) {
DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
- else {
+ } else {
DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
idx = 0;
}
}
#endif /* ARP_OFFLOAD_SUPPORT */
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
/* Neighbor Discovery Offload: defered handler */
static void
dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
switch (ndo_work->event) {
case NETDEV_UP:
- DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n", __FUNCTION__));
+ DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
ret = dhd_ndo_enable(pub, TRUE);
if (ret < 0) {
DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
return NOTIFY_DONE;
dhd_pub = &dhd->pub;
+
if (!FW_SUPPORTED(dhd_pub, ndoe))
return NOTIFY_DONE;
dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
return NOTIFY_DONE;
}
-#endif /* #ifdef CONFIG_IPV6 */
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
int
dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
net = ifp->net;
ASSERT(net && (ifp->idx == ifidx));
-#ifndef P2PONEINT
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
ASSERT(!net->open);
net->get_stats = dhd_get_stats;
ASSERT(!net->netdev_ops);
net->netdev_ops = &dhd_ops_virt;
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
-#else
- net->netdev_ops = &dhd_cfgp2p_ops_virt;
-#endif /* P2PONEINT */
/* Ok, link into the network layer... */
if (ifidx == 0) {
goto fail;
}
-#ifdef SET_RPS_CPUS
- err = custom_rps_map_set(net->_rx, RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
- if (err < 0)
- DHD_ERROR(("%s : custom_rps_map_set done. error : %d\n", __FUNCTION__, err));
-#endif /* SET_RPS_CPUS */
-
printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
+#if defined(CUSTOMER_HW4_DEBUG)
+ MAC2STRDBG(dhd->pub.mac.octet));
+#else
MAC2STRDBG(net->dev_addr));
+#endif /* CUSTOMER_HW4_DEBUG */
#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
// wl_iw_iscan_set_scan_broadcast_prep(net, 1);
#endif
-#if 1 && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
+#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
KERNEL_VERSION(2, 6, 27))))
if (ifidx == 0) {
#ifdef BCMLXSDMMC
up(&dhd_registration_sem);
-#endif
+#endif /* BCMLXSDMMC */
if (!dhd_download_fw_on_driverload) {
+#ifdef WL_CFG80211
+ wl_terminate_event_handler();
+#endif /* WL_CFG80211 */
+#if defined(DHD_LB) && defined(DHD_LB_RXP)
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB && DHD_LB_RXP */
+#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
+ dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
dhd_net_bus_devreset(net, TRUE);
#ifdef BCMLXSDMMC
dhd_net_bus_suspend(net);
dhd_info_t *dhd;
unsigned long flags;
int timer_valid = FALSE;
+ struct net_device *dev;
if (!dhdp)
return;
if (!dhd)
return;
-#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
- dhd_global = NULL;
-#endif /* CUSTOMER_HW20 && WLANAUDIO */
+ dev = dhd->iflist[0]->net;
+
+ if (dev) {
+ rtnl_lock();
+ if (dev->flags & IFF_UP) {
+ /* If IFF_UP is still up, it indicates that
+ * "ifconfig wlan0 down" hasn't been called.
+ * So invoke dev_close explicitly here to
+ * bring down the interface.
+ */
+ DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
+ dev_close(dev);
+ }
+ rtnl_unlock();
+ }
DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
OSL_SLEEP(100);
}
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+
+#ifdef PROP_TXSTATUS
+#ifdef DHD_WLFC_THREAD
+ if (dhd->pub.wlfc_thread) {
+ kthread_stop(dhd->pub.wlfc_thread);
+ dhdp->wlfc_thread_go = TRUE;
+ wake_up_interruptible(&dhdp->wlfc_wqhead);
+ }
+ dhd->pub.wlfc_thread = NULL;
+#endif /* DHD_WLFC_THREAD */
+#endif /* PROP_TXSTATUS */
+
if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
- dhd_bus_detach(dhdp);
-#ifdef PCIE_FULL_DONGLE
- dhd_flow_rings_deinit(dhdp);
-#endif
+ dhd_bus_detach(dhdp);
+#ifdef BCMPCIE
+ if (is_reboot == SYS_RESTART) {
+ extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
+ if (dhd_wifi_platdata && !dhdp->dongle_reset) {
+ dhdpcie_bus_clock_stop(dhdp->bus);
+ wifi_platform_set_power(dhd_wifi_platdata->adapters,
+ FALSE, WIFI_TURNOFF_DELAY);
+ }
+ }
+#endif /* BCMPCIE */
+#ifndef PCIE_FULL_DONGLE
if (dhdp->prot)
dhd_prot_detach(dhdp);
+#endif
}
#ifdef ARP_OFFLOAD_SUPPORT
unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
}
#endif /* ARP_OFFLOAD_SUPPORT */
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
if (dhd_inet6addr_notifier_registered) {
dhd_inet6addr_notifier_registered = FALSE;
unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
}
-#endif
-
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
if (dhd->early_suspend.suspend)
/* in unregister_netdev case, the interface gets freed by net->destructor
* (which is set to free_netdev)
*/
- if (ifp->net->reg_state == NETREG_UNINITIALIZED)
+ if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
free_netdev(ifp->net);
- else {
+ } else {
#ifdef SET_RPS_CPUS
custom_rps_map_clear(ifp->net->_rx);
#endif /* SET_RPS_CPUS */
+ netif_tx_disable(ifp->net);
unregister_netdev(ifp->net);
}
ifp->net = NULL;
#ifdef DHD_WMF
dhd_wmf_cleanup(dhdp, 0);
#endif /* DHD_WMF */
+#ifdef DHD_L2_FILTER
+ bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
+ NULL, FALSE, dhdp->tickcnt);
+ deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
+ ifp->phnd_arp_table = NULL;
+#endif /* DHD_L2_FILTER */
dhd_if_del_sta_list(ifp);
DHD_GENERAL_UNLOCK(&dhd->pub, flags);
if (timer_valid)
del_timer_sync(&dhd->timer);
+ DHD_DISABLE_RUNTIME_PM(&dhd->pub);
if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (dhd->thr_rpm_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_rpm_ctl);
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
if (dhd->thr_wdt_ctl.thr_pid >= 0) {
PROC_STOP(&dhd->thr_wdt_ctl);
}
if (dhd->thr_dpc_ctl.thr_pid >= 0) {
PROC_STOP(&dhd->thr_dpc_ctl);
- } else
+ } else {
tasklet_kill(&dhd->tasklet);
- }
+#ifdef DHD_LB_RXP
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+ }
+ }
+
+#if defined(DHD_LB)
+ /* Kill the Load Balancing Tasklets */
+#if defined(DHD_LB_TXC)
+ tasklet_disable(&dhd->tx_compl_tasklet);
+ tasklet_kill(&dhd->tx_compl_tasklet);
+#endif /* DHD_LB_TXC */
+#if defined(DHD_LB_RXC)
+ tasklet_disable(&dhd->rx_compl_tasklet);
+ tasklet_kill(&dhd->rx_compl_tasklet);
+#endif /* DHD_LB_RXC */
+ if (dhd->cpu_notifier.notifier_call != NULL)
+ unregister_cpu_notifier(&dhd->cpu_notifier);
+ dhd_cpumasks_deinit(dhd);
+#endif /* DHD_LB */
+
+#ifdef DHD_LOG_DUMP
+ dhd_log_dump_deinit(&dhd->pub);
+#endif /* DHD_LOG_DUMP */
#ifdef WL_CFG80211
if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
wl_cfg80211_detach(NULL);
kfree(dhd->event_data.fmts);
if (dhd->event_data.raw_fmts)
kfree(dhd->event_data.raw_fmts);
+ if (dhd->event_data.raw_sstr)
+ kfree(dhd->event_data.raw_sstr);
#endif /* SHOW_LOGTRACE */
#ifdef PNO_SUPPORT
#endif
#if defined(CONFIG_PM_SLEEP)
if (dhd_pm_notifier_registered) {
- unregister_pm_notifier(&dhd_pm_notifier);
+ unregister_pm_notifier(&dhd->pm_notifier);
dhd_pm_notifier_registered = FALSE;
}
#endif /* CONFIG_PM_SLEEP */
+
#ifdef DEBUG_CPU_FREQ
if (dhd->new_freq)
free_percpu(dhd->new_freq);
if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
#ifdef CONFIG_HAS_WAKELOCK
- dhd->wakelock_counter = 0;
dhd->wakelock_wd_counter = 0;
- dhd->wakelock_rx_timeout_enable = 0;
- dhd->wakelock_ctrl_timeout_enable = 0;
- wake_lock_destroy(&dhd->wl_wifi);
- wake_lock_destroy(&dhd->wl_rxwake);
- wake_lock_destroy(&dhd->wl_ctrlwake);
wake_lock_destroy(&dhd->wl_wdwake);
-#ifdef BCMPCIE_OOB_HOST_WAKE
- wake_lock_destroy(&dhd->wl_intrwake);
-#endif /* BCMPCIE_OOB_HOST_WAKE */
#endif /* CONFIG_HAS_WAKELOCK */
+ DHD_OS_WAKE_LOCK_DESTROY(dhd);
}
-
#ifdef DHDTCPACK_SUPPRESS
/* This will free all MEM allocated for TCPACK SUPPRESS */
dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef PCIE_FULL_DONGLE
+ dhd_flow_rings_deinit(dhdp);
+ if (dhdp->prot)
+ dhd_prot_detach(dhdp);
+#endif
+
+
+ dhd_sysfs_exit(dhd);
+ dhd->pub.is_fw_download_done = FALSE;
dhd_conf_detach(dhdp);
}
dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
dhd = (dhd_info_t *)dhdp->info;
+ if (dhdp->soc_ram) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+#else
+ MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ dhdp->soc_ram = NULL;
+ }
+#ifdef CACHE_FW_IMAGES
+ if (dhdp->cached_fw) {
+ MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
+ dhdp->cached_fw = NULL;
+ }
+
+ if (dhdp->cached_nvram) {
+ MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
+ dhdp->cached_nvram = NULL;
+ }
+#endif
/* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
if (dhd &&
dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
if (dhdp) {
int i;
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean up timer/data structure for any remaining/pending packet or timer. */
+ dhd_tcpack_info_tbl_clean(dhdp);
+#endif /* DHDTCPACK_SUPPRESS */
for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
if (dhdp->reorder_bufs[i]) {
reorder_info_t *ptr;
}
dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
+
+ if (dhdp->soc_ram) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+#else
+ MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ dhdp->soc_ram = NULL;
+ }
}
}
printf("%s: Exit\n", __FUNCTION__);
}
-static void
+static void __exit
dhd_module_exit(void)
{
+ dhd_buzzz_detach();
dhd_module_cleanup();
unregister_reboot_notifier(&dhd_reboot_notifier);
}
-static int
+static int __init
dhd_module_init(void)
{
int err;
printf("%s: in\n", __FUNCTION__);
+ dhd_buzzz_attach();
+
DHD_PERIM_RADIO_INIT();
+
if (firmware_path[0] != '\0') {
strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
}
} while (retry--);
- if (err)
+ if (err) {
DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
+ } else {
+ if (!dhd_download_fw_on_driverload) {
+ dhd_driver_init_done = TRUE;
+ }
+ }
printf("%s: Exit err=%d\n", __FUNCTION__, err);
return err;
{
DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
if (code == SYS_RESTART) {
+#ifdef BCMPCIE
+ is_reboot = code;
+#endif /* BCMPCIE */
}
-
return NOTIFY_DONE;
}
-#include <linux/rfkill-wlan.h>
-extern int get_wifi_chip_type(void);
-extern char WIFI_MODULE_NAME[];
-extern char RKWIFI_DRV_VERSION[];
-
-#ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
-static int wifi_init_thread(void *data)
-{
- dhd_module_init();
- return 0;
-}
-#endif
-
-int rockchip_wifi_init_module_rkwifi(void)
-{
-#ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
- int type = get_wifi_chip_type();
- if (type > WIFI_AP6XXX_SERIES) return 0;
-#endif
- printf("=======================================================\n");
- printf("==== Launching Wi-Fi driver! (Powered by Rockchip) ====\n");
- printf("=======================================================\n");
- printf("%s WiFi driver (Powered by Rockchip,Ver %s) init.\n", WIFI_MODULE_NAME, RKWIFI_DRV_VERSION);
-#ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
-{
- struct task_struct *kthread = kthread_run(wifi_init_thread, NULL, "wifi_init_thread");
- if (kthread->pid < 0)
- printf("create wifi_init_thread failed.\n");
- return 0;
-}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+#if defined(CONFIG_DEFERRED_INITCALLS)
+#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
+ defined(CONFIG_ARCH_MSM8996)
+deferred_module_init_sync(dhd_module_init);
#else
- return dhd_module_init();
-#endif
-}
-
-void rockchip_wifi_exit_module_rkwifi(void)
-{
-#ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
- int type = get_wifi_chip_type();
- if (type > WIFI_AP6XXX_SERIES) return;
-#endif
- printf("=======================================================\n");
- printf("== Dis-launching Wi-Fi driver! (Powered by Rockchip) ==\n");
- printf("=======================================================\n");
- dhd_module_exit();
-}
-
-#ifdef CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP
-late_initcall(rockchip_wifi_init_module_rkwifi);
-module_exit(rockchip_wifi_exit_module_rkwifi);
+deferred_module_init(dhd_module_init);
+#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
+ * CONFIG_ARCH_MSM8996
+ */
+#elif defined(USE_LATE_INITCALL_SYNC)
+late_initcall_sync(dhd_module_init);
#else
-EXPORT_SYMBOL(rockchip_wifi_init_module_rkwifi);
-EXPORT_SYMBOL(rockchip_wifi_exit_module_rkwifi);
-#endif
-//#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-//#if defined(CONFIG_DEFERRED_INITCALLS)
-//deferred_module_init(dhd_module_init);
-//#elif defined(USE_LATE_INITCALL_SYNC)
-//late_initcall_sync(dhd_module_init);
-//#else
-//late_initcall(dhd_module_init);
-//#endif /* USE_LATE_INITCALL_SYNC */
-//#else
-//module_init(dhd_module_init);
-//#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
-//
-//module_exit(dhd_module_exit);
+late_initcall(dhd_module_init);
+#endif /* USE_LATE_INITCALL_SYNC */
+#else
+module_init(dhd_module_init);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+
+module_exit(dhd_module_exit);
/*
* OS specific functions required to implement DHD driver in OS independent way
return 0;
}
+void
+dhd_os_dhdiovar_lock(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ mutex_lock(&dhd->dhd_iovar_mutex);
+ }
+}
+
+void
+dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ mutex_unlock(&dhd->dhd_iovar_mutex);
+ }
+}
+
unsigned int
dhd_os_get_ioctl_resp_timeout(void)
{
}
int
-dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
{
dhd_info_t * dhd = (dhd_info_t *)(pub->info);
int timeout;
return 0;
}
+int
+dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ /* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
+#else
+ timeout = dhd_ioctl_timeout_msec * HZ / 1000;
+#endif
+
+ DHD_PERIM_UNLOCK(pub);
+
+ timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
+
+ DHD_PERIM_LOCK(pub);
+
+ return timeout;
+}
+
+int
+dhd_os_d3ack_wake(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ wake_up(&dhd->d3ack_wait);
+ return 0;
+}
+
+int
+dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
+{
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ /* Wait for bus usage contexts to gracefully exit within some timeout value
+ * Set time out to little higher than dhd_ioctl_timeout_msec,
+ * so that IOCTL timeout should not get affected.
+ */
+ /* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
+#else
+ timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
+#endif
+
+ timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
+
+ return timeout;
+}
+
+int INLINE
+dhd_os_busbusy_wake(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ /* Call wmb() to make sure before waking up the other event value gets updated */
+ OSL_SMP_WMB();
+ wake_up(&dhd->dhd_bus_busy_state_wait);
+ return 0;
+}
+
void
dhd_os_wd_timer_extend(void *bus, bool extend)
{
return;
}
+ DHD_OS_WD_WAKE_LOCK(pub);
DHD_GENERAL_LOCK(pub, flags);
/* don't start the wd until fw is loaded */
return;
}
- /* Totally stop the timer */
- if (!wdtick && dhd->wd_timer_valid == TRUE) {
- dhd->wd_timer_valid = FALSE;
+ /* Totally stop the timer */
+ if (!wdtick && dhd->wd_timer_valid == TRUE) {
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(pub, flags);
+ del_timer_sync(&dhd->timer);
+ DHD_OS_WD_WAKE_UNLOCK(pub);
+ return;
+ }
+
+ if (wdtick) {
+ DHD_OS_WD_WAKE_LOCK(pub);
+ dhd_watchdog_ms = (uint)wdtick;
+ /* Re arm the timer, at last watchdog period */
+ mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+ dhd->wd_timer_valid = TRUE;
+ }
+ DHD_GENERAL_UNLOCK(pub, flags);
+ DHD_OS_WD_WAKE_UNLOCK(pub);
+}
+
+#ifdef DHD_PCIE_RUNTIMEPM
+void
+dhd_os_runtimepm_timer(void *bus, uint tick)
+{
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_GENERAL_LOCK(pub, flags);
+
+ /* don't start the RPM until fw is loaded */
+ if (pub->busstate == DHD_BUS_DOWN ||
+ pub->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
DHD_GENERAL_UNLOCK(pub, flags);
- del_timer_sync(&dhd->timer);
- DHD_OS_WD_WAKE_UNLOCK(pub);
return;
}
- if (wdtick) {
- DHD_OS_WD_WAKE_LOCK(pub);
- dhd_watchdog_ms = (uint)wdtick;
- /* Re arm the timer, at last watchdog period */
- mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
- dhd->wd_timer_valid = TRUE;
+ /* If tick is non-zero, the request is to start the timer */
+ if (tick) {
+ /* Start the timer only if its not already running */
+ if (dhd->rpm_timer_valid == FALSE) {
+ mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
+ dhd->rpm_timer_valid = TRUE;
+ }
+ } else {
+ /* tick is zero, we have to stop the timer */
+ /* Stop the timer only if its running, otherwise we don't have to do anything */
+ if (dhd->rpm_timer_valid == TRUE) {
+ dhd->rpm_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(pub, flags);
+ del_timer_sync(&dhd->rpm_timer);
+ /* we have already released the lock, so just go to exit */
+ goto exit;
+ }
}
+
DHD_GENERAL_UNLOCK(pub, flags);
+exit:
+ return;
+
}
+#endif /* DHD_PCIE_RUNTIMEPM */
+
void *
dhd_os_open_image(char *filename)
{
struct file *fp;
+ int size;
fp = filp_open(filename, O_RDONLY, 0);
/*
* fp = open_namei(AT_FDCWD, filename, O_RD, 0);
* ???
*/
- if (IS_ERR(fp))
+ if (IS_ERR(fp)) {
+ fp = NULL;
+ goto err;
+ }
+
+ if (!S_ISREG(file_inode(fp)->i_mode)) {
+ DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
+ fp = NULL;
+ goto err;
+ }
+
+ size = i_size_read(file_inode(fp));
+ if (size <= 0) {
+ DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
fp = NULL;
+ goto err;
+ }
+
+ DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
+err:
return fp;
}
{
struct file *fp = (struct file *)image;
int rdlen;
+ int size;
if (!image)
return 0;
- rdlen = kernel_read(fp, fp->f_pos, buf, len);
+ size = i_size_read(file_inode(fp));
+ rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
+
+ if (len >= size && size != rdlen) {
+ return -EIO;
+ }
+
if (rdlen > 0)
fp->f_pos += rdlen;
}
#ifdef DHDTCPACK_SUPPRESS
-void
+unsigned long
dhd_os_tcpacklock(dhd_pub_t *pub)
{
dhd_info_t *dhd;
+ unsigned long flags = 0;
dhd = (dhd_info_t *)(pub->info);
- spin_lock_bh(&dhd->tcpack_lock);
+ if (dhd) {
+#ifdef BCMSDIO
+ spin_lock_bh(&dhd->tcpack_lock);
+#else
+ spin_lock_irqsave(&dhd->tcpack_lock, flags);
+#endif /* BCMSDIO */
+ }
+
+ return flags;
}
void
-dhd_os_tcpackunlock(dhd_pub_t *pub)
+dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
{
dhd_info_t *dhd;
+#ifdef BCMSDIO
+ BCM_REFERENCE(flags);
+#endif /* BCMSDIO */
+
dhd = (dhd_info_t *)(pub->info);
- spin_unlock_bh(&dhd->tcpack_lock);
+
+ if (dhd) {
+#ifdef BCMSDIO
+ spin_lock_bh(&dhd->tcpack_lock);
+#else
+ spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
+#endif /* BCMSDIO */
+ }
}
#endif /* DHDTCPACK_SUPPRESS */
gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
- if (buf == NULL) {
- DHD_ERROR(("%s: failed to alloc memory, section: %d,"
- " size: %dbytes\n", __FUNCTION__, section, size));
- if (kmalloc_if_fail)
- buf = kmalloc(size, flags);
- }
+ if (buf == NULL && kmalloc_if_fail)
+ buf = kmalloc(size, flags);
return buf;
}
}
#endif /* defined(WL_WIRELESS_EXT) */
-#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
-static int
-dhd_wlanaudio_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
- wl_event_msg_t *event, void **data)
-{
- int cnt;
- char eabuf[ETHER_ADDR_STR_LEN];
- struct ether_addr *addr = &event->addr;
- uint32 type = ntoh32_ua((void *)&event->event_type);
-
- switch (type) {
- case WLC_E_TXFAIL:
- if (addr != NULL)
- bcm_ether_ntoa(addr, eabuf);
- else
- return (BCME_ERROR);
-
- for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
- if (dhd->wlanaudio_blist[cnt].is_blacklist)
- break;
-
- if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
- addr, ETHER_ADDR_LEN)) {
- /* Mac address is Same */
- dhd->wlanaudio_blist[cnt].cnt++;
-
- if (dhd->wlanaudio_blist[cnt].cnt < 15) {
- /* black list is false */
- if ((dhd->wlanaudio_blist[cnt].cnt > 10) &&
- (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
- < 100)) {
- dhd->wlanaudio_blist[cnt].is_blacklist = true;
- dhd->is_wlanaudio_blist = true;
- }
- } else {
- if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
- (jiffies - dhd->wlanaudio_blist[cnt].txfail_jiffies
- > 100)) {
-
- bzero(&dhd->wlanaudio_blist[cnt],
- sizeof(struct wlanaudio_blacklist));
- }
- }
- break;
- } else if ((!dhd->wlanaudio_blist[cnt].is_blacklist) &&
- (!dhd->wlanaudio_blist[cnt].cnt)) {
- bcopy(addr,
- (char*)&dhd->wlanaudio_blist[cnt].blacklist_addr,
- ETHER_ADDR_LEN);
- dhd->wlanaudio_blist[cnt].cnt++;
- dhd->wlanaudio_blist[cnt].txfail_jiffies = jiffies;
-
- bcm_ether_ntoa(&dhd->wlanaudio_blist[cnt].blacklist_addr, eabuf);
- break;
- }
- }
- break;
- case WLC_E_AUTH :
- case WLC_E_AUTH_IND :
- case WLC_E_DEAUTH :
- case WLC_E_DEAUTH_IND :
- case WLC_E_ASSOC:
- case WLC_E_ASSOC_IND:
- case WLC_E_REASSOC:
- case WLC_E_REASSOC_IND:
- case WLC_E_DISASSOC:
- case WLC_E_DISASSOC_IND:
- {
- int bl_cnt = 0;
-
- if (addr != NULL)
- bcm_ether_ntoa(addr, eabuf);
- else
- return (BCME_ERROR);
-
- for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
- if (!bcmp(&dhd->wlanaudio_blist[cnt].blacklist_addr,
- addr, ETHER_ADDR_LEN)) {
- /* Mac address is Same */
- if (dhd->wlanaudio_blist[cnt].is_blacklist) {
- /* black list is true */
- bzero(&dhd->wlanaudio_blist[cnt],
- sizeof(struct wlanaudio_blacklist));
- }
- }
- }
-
- for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
- if (dhd->wlanaudio_blist[cnt].is_blacklist)
- bl_cnt++;
- }
-
- if (!bl_cnt)
- {
- dhd->is_wlanaudio_blist = false;
- }
-
- break;
- }
- }
- return BCME_OK;
-}
-#endif /* CUSTOMER_HW20 && WLANAUDIO */
static int
dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
wl_event_msg_t *event, void **data)
{
int bcmerror = 0;
-
ASSERT(dhd != NULL);
-#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
- bcmerror = dhd_wlanaudio_event(dhd, ifidx, pktdata, event, data);
-
- if (bcmerror != BCME_OK)
- return (bcmerror);
-#endif /* CUSTOMER_HW20 && WLANAUDIO */
-
#ifdef SHOW_LOGTRACE
- bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
+ bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
#else
- bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
+ bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
#endif /* SHOW_LOGTRACE */
if (bcmerror != BCME_OK)
dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
{
switch (ntoh32(event->event_type)) {
-#ifdef WLBTAMP
- /* Send up locally generated AMP HCI Events */
- case WLC_E_BTA_HCI_EVENT: {
- struct sk_buff *p, *skb;
- bcm_event_t *msg;
- wl_event_msg_t *p_bcm_event;
- char *ptr;
- uint32 len;
- uint32 pktlen;
- dhd_if_t *ifp;
- dhd_info_t *dhd;
- uchar *eth;
- int ifidx;
-
- len = ntoh32(event->datalen);
- pktlen = sizeof(bcm_event_t) + len + 2;
- dhd = dhdp->info;
- ifidx = dhd_ifname2idx(dhd, event->ifname);
-
- if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
- ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
-
- msg = (bcm_event_t *) PKTDATA(dhdp->osh, p);
-
- bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN);
- bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN);
- ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost);
-
- msg->eth.ether_type = hton16(ETHER_TYPE_BRCM);
-
- /* BCM Vendor specific header... */
- msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG);
- msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION;
- bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN);
-
- /* vendor spec header length + pvt data length (private indication
- * hdr + actual message itself)
- */
- msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH +
- BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len);
- msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT);
-
- PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
-
- /* copy wl_event_msg_t into sk_buf */
-
- /* pointer to wl_event_msg_t in sk_buf */
- p_bcm_event = &msg->event;
- bcopy(event, p_bcm_event, sizeof(wl_event_msg_t));
-
- /* copy hci event into sk_buf */
- bcopy(data, (p_bcm_event + 1), len);
-
- msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) +
- ntoh16(msg->bcm_hdr.length));
- PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2));
-
- ptr = (char *)(msg + 1);
- /* Last 2 bytes of the message are 0x00 0x00 to signal that there
- * are no ethertypes which are following this
- */
- ptr[len+0] = 0x00;
- ptr[len+1] = 0x00;
-
- skb = PKTTONATIVE(dhdp->osh, p);
- eth = skb->data;
- len = skb->len;
-
- ifp = dhd->iflist[ifidx];
- if (ifp == NULL)
- ifp = dhd->iflist[0];
-
- ASSERT(ifp);
- skb->dev = ifp->net;
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- skb->data = eth;
- skb->len = len;
-
- /* Strip header, count, deliver upward */
- skb_pull(skb, ETH_HLEN);
-
- /* Send the packet */
- if (in_interrupt()) {
- netif_rx(skb);
- } else {
- netif_rx_ni(skb);
- }
- }
- else {
- /* Could not allocate a sk_buf */
- DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
- }
- break;
- } /* case WLC_E_BTA_HCI_EVENT */
-#endif /* WLBTAMP */
default:
break;
/* Strip header, count, deliver upward */
skb_pull(skb, ETH_HLEN);
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
/* Send the packet */
if (in_interrupt()) {
netif_rx(skb);
int
dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
{
- int ret = 0;
+ int ret;
+
dhd_info_t *dhd = DHD_DEV_INFO(dev);
if (flag == TRUE) {
dhd_info_t *dhd = DHD_DEV_INFO(dev);
if (dhd) {
+#ifdef CONFIG_MACH_UNIVERSAL7420
+#endif /* CONFIG_MACH_UNIVERSAL7420 */
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
ret = dhd_set_suspend(val, &dhd->pub);
#else
#ifdef PKT_FILTER_SUPPORT
int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
{
+#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
+ return 0;
+#else
dhd_info_t *dhd = DHD_DEV_INFO(dev);
char *filterp = NULL;
int filter_id = 0;
if (!dhd_master_mode)
add_remove = !add_remove;
-
- if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
- (num == DHD_MDNS_FILTER_NUM))
+ DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
+ if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
return ret;
if (num >= dhd->pub.pktfilter_count)
return -EINVAL;
filterp = "103 0 0 0 0xFFFF 0x3333";
filter_id = 103;
break;
+ case DHD_MDNS_FILTER_NUM:
+ filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
+ filter_id = 104;
+ break;
default:
return -EINVAL;
}
}
}
return ret;
+#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
}
int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
return dhd_os_enable_packet_filter(&dhd->pub, val);
}
#endif /* PKT_FILTER_SUPPORT */
return ret;
}
+int
+dhd_dev_get_feature_set(struct net_device *dev)
+{
+ dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhd = (&ptr->pub);
+ int feature_set = 0;
+
+#ifdef DYNAMIC_SWOOB_DURATION
+#ifndef CUSTOM_INTR_WIDTH
+#define CUSTOM_INTR_WIDTH 100
+ int intr_width = 0;
+#endif /* CUSTOM_INTR_WIDTH */
+#endif /* DYNAMIC_SWOOB_DURATION */
+ if (!dhd)
+ return feature_set;
+
+ if (FW_SUPPORTED(dhd, sta))
+ feature_set |= WIFI_FEATURE_INFRA;
+ if (FW_SUPPORTED(dhd, dualband))
+ feature_set |= WIFI_FEATURE_INFRA_5G;
+ if (FW_SUPPORTED(dhd, p2p))
+ feature_set |= WIFI_FEATURE_P2P;
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
+ feature_set |= WIFI_FEATURE_SOFT_AP;
+ if (FW_SUPPORTED(dhd, tdls))
+ feature_set |= WIFI_FEATURE_TDLS;
+ if (FW_SUPPORTED(dhd, vsdb))
+ feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
+ if (FW_SUPPORTED(dhd, nan)) {
+ feature_set |= WIFI_FEATURE_NAN;
+ /* NAN is essentail for d2d rtt */
+ if (FW_SUPPORTED(dhd, rttd2d))
+ feature_set |= WIFI_FEATURE_D2D_RTT;
+ }
+#ifdef RTT_SUPPORT
+ feature_set |= WIFI_FEATURE_D2AP_RTT;
+#endif /* RTT_SUPPORT */
+#ifdef LINKSTAT_SUPPORT
+ feature_set |= WIFI_FEATURE_LINKSTAT;
+#endif /* LINKSTAT_SUPPORT */
+ /* Supports STA + STA always */
+ feature_set |= WIFI_FEATURE_ADDITIONAL_STA;
+#ifdef PNO_SUPPORT
+ if (dhd_is_pno_supported(dhd)) {
+ feature_set |= WIFI_FEATURE_PNO;
+ feature_set |= WIFI_FEATURE_BATCH_SCAN;
+#ifdef GSCAN_SUPPORT
+ feature_set |= WIFI_FEATURE_GSCAN;
+#endif /* GSCAN_SUPPORT */
+ }
+#endif /* PNO_SUPPORT */
+#ifdef WL11U
+ feature_set |= WIFI_FEATURE_HOTSPOT;
+#endif /* WL11U */
+ return feature_set;
+}
+
+
+int *dhd_dev_get_feature_set_matrix(struct net_device *dev, int *num)
+{
+ int feature_set_full, mem_needed;
+ int *ret;
+
+ *num = 0;
+ mem_needed = sizeof(int) * MAX_FEATURE_SET_CONCURRRENT_GROUPS;
+ ret = (int *) kmalloc(mem_needed, GFP_KERNEL);
+ if (!ret) {
+ DHD_ERROR(("%s: failed to allocate %d bytes\n", __FUNCTION__,
+ mem_needed));
+ return ret;
+ }
+
+ feature_set_full = dhd_dev_get_feature_set(dev);
+
+ ret[0] = (feature_set_full & WIFI_FEATURE_INFRA) |
+ (feature_set_full & WIFI_FEATURE_INFRA_5G) |
+ (feature_set_full & WIFI_FEATURE_NAN) |
+ (feature_set_full & WIFI_FEATURE_D2D_RTT) |
+ (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
+ (feature_set_full & WIFI_FEATURE_PNO) |
+ (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
+ (feature_set_full & WIFI_FEATURE_GSCAN) |
+ (feature_set_full & WIFI_FEATURE_HOTSPOT) |
+ (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA) |
+ (feature_set_full & WIFI_FEATURE_EPR);
+
+ ret[1] = (feature_set_full & WIFI_FEATURE_INFRA) |
+ (feature_set_full & WIFI_FEATURE_INFRA_5G) |
+ /* Not yet verified NAN with P2P */
+ /* (feature_set_full & WIFI_FEATURE_NAN) | */
+ (feature_set_full & WIFI_FEATURE_P2P) |
+ (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
+ (feature_set_full & WIFI_FEATURE_D2D_RTT) |
+ (feature_set_full & WIFI_FEATURE_EPR);
+
+ ret[2] = (feature_set_full & WIFI_FEATURE_INFRA) |
+ (feature_set_full & WIFI_FEATURE_INFRA_5G) |
+ (feature_set_full & WIFI_FEATURE_NAN) |
+ (feature_set_full & WIFI_FEATURE_D2D_RTT) |
+ (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
+ (feature_set_full & WIFI_FEATURE_TDLS) |
+ (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL) |
+ (feature_set_full & WIFI_FEATURE_EPR);
+ *num = MAX_FEATURE_SET_CONCURRRENT_GROUPS;
+
+ return ret;
+}
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+int
+dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (nodfs)
+ dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
+ else
+ dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
+ dhd->pub.force_country_change = TRUE;
+ return 0;
+}
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
#ifdef PNO_SUPPORT
/* Linux wrapper to call common dhd_pno_stop_for_ssid */
int
}
/* Linux wrapper to call common dhd_pno_set_for_ssid */
int
-dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
dhd_info_t *dhd = DHD_DEV_INFO(dev);
return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
}
-/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
+/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
+int
+dhd_dev_pno_stop_for_batch(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_stop_for_batch(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
+int
+dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
+}
+/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
+int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
+}
+/* Linux wrapper to call common dhd_pno_set_mac_oui */
+int
+dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_set_mac_oui(&dhd->pub, oui));
+}
+#endif /* PNO_SUPPORT */
+
+#if defined(PNO_SUPPORT)
+#ifdef GSCAN_SUPPORT
+/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
+int
+dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, uint8 flush)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
+}
+
+/* Linux wrapper to call common dhd_pno_get_gscan */
+void *
+dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *info, uint32 *len)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
+}
+
+/* Linux wrapper to call common dhd_wait_batch_results_complete */
+void
+dhd_dev_wait_batch_results_complete(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_wait_batch_results_complete(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_pno_lock_batch_results */
+void
+dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_lock_batch_results(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_pno_unlock_batch_results */
+void
+dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_unlock_batch_results(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
+int
+dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
+}
+
+/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
+int
+dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
+}
+
+/* Linux wrapper to call common dhd_handle_swc_evt */
+void *
+dhd_dev_swc_scan_event(struct net_device *dev, const void *data, int *send_evt_bytes)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_handle_swc_evt(&dhd->pub, data, send_evt_bytes));
+}
+
+/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
+void *
+dhd_dev_hotlist_scan_event(struct net_device *dev,
+ const void *data, int *send_evt_bytes, hotlist_type_t type)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
+}
+
+/* Linux wrapper to call common dhd_process_full_gscan_result */
+void *
+dhd_dev_process_full_gscan_result(struct net_device *dev,
+const void *data, int *send_evt_bytes)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_process_full_gscan_result(&dhd->pub, data, send_evt_bytes));
+}
+
+void
+dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
+
+ return;
+}
+
+int
+dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_retreive_batch_scan_results */
+int
+dhd_dev_retrieve_batch_scan(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_retreive_batch_scan_results(&dhd->pub));
+}
+#endif /* GSCAN_SUPPORT */
+#endif
+#ifdef RTT_SUPPORT
+/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
+int
+dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_rtt_set_cfg(&dhd->pub, buf));
+}
+int
+dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
+}
int
-dhd_dev_pno_stop_for_batch(struct net_device *dev)
+dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return (dhd_pno_stop_for_batch(&dhd->pub));
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
}
-/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
int
-dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
+dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
}
-/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
+
int
-dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
+dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_rtt_capability(&dhd->pub, capa));
}
-#endif /* PNO_SUPPORT */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
+#endif /* RTT_SUPPORT */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
{
dhd_info_t *dhd;
dev = dhd->iflist[0]->net;
if (dev) {
- rtnl_lock();
- dev_close(dev);
- rtnl_unlock();
#if defined(WL_WIRELESS_EXT)
wl_iw_send_priv_event(dev, "HANG");
#endif
}
}
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+extern dhd_pub_t *link_recovery;
+void dhd_host_recover_link(void)
+{
+ DHD_ERROR(("****** %s ******\n", __FUNCTION__));
+ link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+ dhd_bus_set_linkdown(link_recovery, TRUE);
+ dhd_os_send_hang_message(link_recovery);
+}
+EXPORT_SYMBOL(dhd_host_recover_link);
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
int dhd_os_send_hang_message(dhd_pub_t *dhdp)
{
dhdp->hang_was_sent = 1;
dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
+ dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
}
}
return ret;
}
return ret;
}
+
+int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
+{
+ dhd_info_t *dhd = NULL;
+ dhd_pub_t *dhdp = NULL;
+ int reason;
+
+ dhd = DHD_DEV_INFO(dev);
+ if (dhd) {
+ dhdp = &dhd->pub;
+ }
+
+ if (!dhd || !dhdp) {
+ return 0;
+ }
+
+ reason = bcm_strtoul(string_num, NULL, 0);
+ DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
+
+ if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
+ reason = 0;
+ }
+
+ dhdp->hang_reason = reason;
+
+ return net_os_send_hang_message(dev);
+}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
return wifi_platform_set_power(dhd->adapter, on, delay_msec);
}
+bool dhd_force_country_change(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (dhd && dhd->pub.up)
+ return dhd->pub.force_country_change;
+ return FALSE;
+}
void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
wl_country_t *cspec)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#ifdef CUSTOM_COUNTRY_CODE
+ get_customized_country_code(dhd->adapter, country_iso_code, cspec,
+ dhd->pub.dhd_cflags);
+#else
get_customized_country_code(dhd->adapter, country_iso_code, cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
}
void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
{
static void dhd_net_if_lock_local(dhd_info_t *dhd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
if (dhd)
mutex_lock(&dhd->dhd_net_if_mutex);
#endif
static void dhd_net_if_unlock_local(dhd_info_t *dhd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
if (dhd)
mutex_unlock(&dhd->dhd_net_if_mutex);
#endif
static void dhd_suspend_lock(dhd_pub_t *pub)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd)
mutex_lock(&dhd->dhd_suspend_mutex);
static void dhd_suspend_unlock(dhd_pub_t *pub)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd)
mutex_unlock(&dhd->dhd_suspend_mutex);
void
dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
{
- MFREE(osh, lock, sizeof(spinlock_t) + 4);
+ if (lock)
+ MFREE(osh, lock, sizeof(spinlock_t) + 4);
}
unsigned long
dhd_os_spin_lock(void *lock)
}
#ifdef DHD_DEBUG
+static void
+dhd_convert_memdump_type_to_str(uint32 type, char *buf)
+{
+ char *type_str = NULL;
+
+ switch (type) {
+ case DUMP_TYPE_RESUMED_ON_TIMEOUT:
+ type_str = "resumed_on_timeout";
+ break;
+ case DUMP_TYPE_D3_ACK_TIMEOUT:
+ type_str = "D3_ACK_timeout";
+ break;
+ case DUMP_TYPE_DONGLE_TRAP:
+ type_str = "Dongle_Trap";
+ break;
+ case DUMP_TYPE_MEMORY_CORRUPTION:
+ type_str = "Memory_Corruption";
+ break;
+ case DUMP_TYPE_PKTID_AUDIT_FAILURE:
+ type_str = "PKTID_AUDIT_Fail";
+ break;
+ case DUMP_TYPE_SCAN_TIMEOUT:
+ type_str = "SCAN_timeout";
+ break;
+ case DUMP_TYPE_SCAN_BUSY:
+ type_str = "SCAN_Busy";
+ break;
+ case DUMP_TYPE_BY_SYSDUMP:
+ type_str = "BY_SYSDUMP";
+ break;
+ case DUMP_TYPE_BY_LIVELOCK:
+ type_str = "BY_LIVELOCK";
+ break;
+ case DUMP_TYPE_AP_LINKUP_FAILURE:
+ type_str = "BY_AP_LINK_FAILURE";
+ break;
+ default:
+ type_str = "Unknown_type";
+ break;
+ }
+
+ strncpy(buf, type_str, strlen(type_str));
+ buf[strlen(type_str)] = 0;
+}
+
int
write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
{
int ret = 0;
- struct file *fp;
+ struct file *fp = NULL;
mm_segment_t old_fs;
loff_t pos = 0;
+ char memdump_path[128];
+ char memdump_type[32];
+ struct timeval curtime;
+ uint32 file_mode;
/* change to KERNEL_DS address limit */
old_fs = get_fs();
set_fs(KERNEL_DS);
+ /* Init file name */
+ memset(memdump_path, 0, sizeof(memdump_path));
+ memset(memdump_type, 0, sizeof(memdump_type));
+ do_gettimeofday(&curtime);
+ dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type);
+#ifdef CUSTOMER_HW4_DEBUG
+ snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
+ DHD_COMMON_DUMP_PATH "mem_dump", memdump_type,
+ (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
+#elif defined(CUSTOMER_HW2)
+ snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
+ "/data/misc/wifi/mem_dump", memdump_type,
+ (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
+#else
+ snprintf(memdump_path, sizeof(memdump_path), "%s_%s_%ld.%ld",
+ "/installmedia/mem_dump", memdump_type,
+ (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
+ * calling BUG_ON immediately after collecting the socram dump.
+ * So the file write operation should directly write the contents into the
+ * file instead of caching it. O_TRUNC flag ensures that file will be re-written
+ * instead of appending.
+ */
+ file_mode = O_CREAT | O_WRONLY | O_DIRECT | O_SYNC | O_TRUNC;
+#endif /* CUSTOMER_HW4_DEBUG */
+
+ /* print SOCRAM dump file path */
+ DHD_ERROR(("%s: memdump_path = %s\n", __FUNCTION__, memdump_path));
+
/* open file to write */
- fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
- if (!fp) {
- printf("%s: open file error\n", __FUNCTION__);
- ret = -1;
+ fp = filp_open(memdump_path, file_mode, 0644);
+ if (IS_ERR(fp)) {
+ ret = PTR_ERR(fp);
+ printf("%s: open file error, err = %d\n", __FUNCTION__, ret);
goto exit;
}
fp->f_op->write(fp, buf, size, &pos);
exit:
- /* free buf before return */
- MFREE(dhd->osh, buf, size);
/* close file before return */
- if (fp)
+ if (!ret)
filp_close(fp, current->files);
+
/* restore previous address limit */
set_fs(old_fs);
+ /* free buf before return */
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ DHD_OS_PREFREE(dhd, buf, size);
+#else
+ MFREE(dhd->osh, buf, size);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+
return ret;
}
#endif /* DHD_DEBUG */
return ret;
}
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#include <linux/hashtable.h>
+#else
+#include <linux/hash.h>
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+/* Define 2^5 = 32 bucket size hash table */
+DEFINE_HASHTABLE(wklock_history, 5);
+#else
+/* Define 2^5 = 32 bucket size hash table */
+struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+int trace_wklock_onoff = 1;
+
+typedef enum dhd_wklock_type {
+ DHD_WAKE_LOCK,
+ DHD_WAKE_UNLOCK,
+ DHD_WAIVE_LOCK,
+ DHD_RESTORE_LOCK
+} dhd_wklock_t;
+
+struct wk_trace_record {
+ unsigned long addr; /* Address of the instruction */
+ dhd_wklock_t lock_type; /* lock_type */
+ unsigned long long counter; /* counter information */
+ struct hlist_node wklock_node; /* hash node */
+};
+
+
+static struct wk_trace_record *find_wklock_entry(unsigned long addr)
+{
+ struct wk_trace_record *wklock_info;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
+#else
+ struct hlist_node *entry;
+ int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
+ hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ {
+ if (wklock_info->addr == addr) {
+ return wklock_info;
+ }
+ }
+ return NULL;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#define HASH_ADD(hashtable, node, key) \
+ do { \
+ hash_add(hashtable, node, key); \
+ } while (0);
+#else
+#define HASH_ADD(hashtable, node, key) \
+ do { \
+ int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
+ hlist_add_head(node, &hashtable[index]); \
+ } while (0);
+#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
+
+#define STORE_WKLOCK_RECORD(wklock_type) \
+ do { \
+ struct wk_trace_record *wklock_info = NULL; \
+ unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
+ wklock_info = find_wklock_entry(func_addr); \
+ if (wklock_info) { \
+ if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
+ wklock_info->counter = dhd->wakelock_counter; \
+ } else { \
+ wklock_info->counter++; \
+ } \
+ } else { \
+ wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
+ if (!wklock_info) {\
+ printk("Can't allocate wk_trace_record \n"); \
+ } else { \
+ wklock_info->addr = func_addr; \
+ wklock_info->lock_type = wklock_type; \
+ if (wklock_type == DHD_WAIVE_LOCK || \
+ wklock_type == DHD_RESTORE_LOCK) { \
+ wklock_info->counter = dhd->wakelock_counter; \
+ } else { \
+ wklock_info->counter++; \
+ } \
+ HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
+ } \
+ } \
+ } while (0);
+
+static inline void dhd_wk_lock_rec_dump(void)
+{
+ int bkt;
+ struct wk_trace_record *wklock_info;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
+#else
+ struct hlist_node *entry = NULL;
+ int max_index = ARRAY_SIZE(wklock_history);
+ for (bkt = 0; bkt < max_index; bkt++)
+ hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ {
+ switch (wklock_info->lock_type) {
+ case DHD_WAKE_LOCK:
+ DHD_ERROR(("wakelock lock : %pS lock_counter : %llu\n",
+ (void *)wklock_info->addr, wklock_info->counter));
+ break;
+ case DHD_WAKE_UNLOCK:
+ DHD_ERROR(("wakelock unlock : %pS, unlock_counter : %llu\n",
+ (void *)wklock_info->addr, wklock_info->counter));
+ break;
+ case DHD_WAIVE_LOCK:
+ DHD_ERROR(("wakelock waive : %pS before_waive : %llu\n",
+ (void *)wklock_info->addr, wklock_info->counter));
+ break;
+ case DHD_RESTORE_LOCK:
+ DHD_ERROR(("wakelock restore : %pS, after_waive : %llu\n",
+ (void *)wklock_info->addr, wklock_info->counter));
+ break;
+ }
+ }
+}
+
+static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
+{
+ unsigned long flags;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+ int i;
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_init(wklock_history);
+#else
+ for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
+ INIT_HLIST_HEAD(&wklock_history[i]);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+}
+
+static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
+{
+ int bkt;
+ struct wk_trace_record *wklock_info;
+ struct hlist_node *tmp;
+ unsigned long flags;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+ struct hlist_node *entry = NULL;
+ int max_index = ARRAY_SIZE(wklock_history);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
+#else
+ for (bkt = 0; bkt < max_index; bkt++)
+ hlist_for_each_entry_safe(wklock_info, entry, tmp,
+ &wklock_history[bkt], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_del(&wklock_info->wklock_node);
+#else
+ hlist_del_init(&wklock_info->wklock_node);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
+ kfree(wklock_info);
+ }
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+}
+
+void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ unsigned long flags;
+
+ DHD_ERROR((KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n"));
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ dhd_wk_lock_rec_dump();
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ DHD_ERROR((KERN_ERR"Event wakelock counter %u\n", dhd->wakelock_event_counter));
+}
+#else
+#define STORE_WKLOCK_RECORD(wklock_type)
+#endif /* ! DHD_TRACE_WAKE_LOCK */
+
int dhd_os_wake_lock(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
-
if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
#ifdef CONFIG_HAS_WAKELOCK
wake_lock(&dhd->wl_wifi);
dhd_bus_dev_pm_stay_awake(pub);
#endif
}
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (trace_wklock_onoff) {
+ STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
dhd->wakelock_counter++;
ret = dhd->wakelock_counter;
spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
+
+ return ret;
+}
+
+int dhd_event_wake_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
+ if (dhd->wakelock_event_counter == 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&dhd->wl_evtwake);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_stay_awake(pub);
+#endif
+ }
+ dhd->wakelock_event_counter++;
+ ret = dhd->wakelock_event_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
+ }
+
return ret;
}
dhd_os_wake_lock_timeout(pub);
if (dhd) {
spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+
if (dhd->wakelock_counter > 0) {
dhd->wakelock_counter--;
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (trace_wklock_onoff) {
+ STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
#ifdef CONFIG_HAS_WAKELOCK
wake_unlock(&dhd->wl_wifi);
return ret;
}
+int dhd_event_wake_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_evt_spinlock, flags);
+
+ if (dhd->wakelock_event_counter > 0) {
+ dhd->wakelock_event_counter--;
+ if (dhd->wakelock_event_counter == 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&dhd->wl_evtwake);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_relax(pub);
+#endif
+ }
+ ret = dhd->wakelock_event_counter;
+ }
+ spin_unlock_irqrestore(&dhd->wakelock_evt_spinlock, flags);
+ }
+ return ret;
+}
+
int dhd_os_check_wakelock(dhd_pub_t *pub)
{
#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
return 0;
}
-int dhd_os_check_wakelock_all(dhd_pub_t *pub)
+int
+dhd_os_check_wakelock_all(dhd_pub_t *pub)
{
+#ifdef CONFIG_HAS_WAKELOCK
+ int l1, l2, l3, l4, l7;
+ int l5 = 0, l6 = 0;
+ int c, lock_active;
+#endif /* CONFIG_HAS_WAKELOCK */
#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
KERNEL_VERSION(2, 6, 36)))
dhd_info_t *dhd;
- if (!pub)
+ if (!pub) {
return 0;
+ }
dhd = (dhd_info_t *)(pub->info);
+ if (!dhd) {
+ return 0;
+ }
#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
#ifdef CONFIG_HAS_WAKELOCK
- /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
- if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
- wake_lock_active(&dhd->wl_wdwake) ||
- wake_lock_active(&dhd->wl_rxwake) ||
- wake_lock_active(&dhd->wl_ctrlwake))) {
+ c = dhd->wakelock_counter;
+ l1 = wake_lock_active(&dhd->wl_wifi);
+ l2 = wake_lock_active(&dhd->wl_wdwake);
+ l3 = wake_lock_active(&dhd->wl_rxwake);
+ l4 = wake_lock_active(&dhd->wl_ctrlwake);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ l5 = wake_lock_active(&dhd->wl_intrwake);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ l6 = wake_lock_active(&dhd->wl_scanwake);
+#endif /* DHD_USE_SCAN_WAKELOCK */
+ l7 = wake_lock_active(&dhd->wl_evtwake);
+ lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7);
+
+ /* Indicate to the Host to avoid going to suspend if internal locks are up */
+ if (dhd && lock_active) {
+ DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
+ "ctl-%d intr-%d scan-%d evt-%d\n",
+ __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7));
return 1;
}
#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
- if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
+ if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
return 1;
-#endif
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
return 0;
}
}
#ifdef BCMPCIE_OOB_HOST_WAKE
-int dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
+void
+dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
{
+#ifdef CONFIG_HAS_WAKELOCK
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- int ret = 0;
if (dhd) {
-#ifdef CONFIG_HAS_WAKELOCK
wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
-#endif
}
- return ret;
+#endif /* CONFIG_HAS_WAKELOCK */
}
-int dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
+void
+dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
{
+#ifdef CONFIG_HAS_WAKELOCK
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- int ret = 0;
if (dhd) {
-#ifdef CONFIG_HAS_WAKELOCK
/* if wl_intrwake is active, unlock it */
if (wake_lock_active(&dhd->wl_intrwake)) {
wake_unlock(&dhd->wl_intrwake);
}
-#endif
}
- return ret;
+#endif /* CONFIG_HAS_WAKELOCK */
}
#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+void
+dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+
+void
+dhd_os_scan_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+ /* if wl_scanwake is active, unlock it */
+ if (wake_lock_active(&dhd->wl_scanwake)) {
+ wake_unlock(&dhd->wl_scanwake);
+ }
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
+}
+#endif /* DHD_USE_SCAN_WAKELOCK */
+
/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
* by a paired function call to dhd_wakelock_restore. returns current wakelock counter
*/
if (dhd) {
spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+
/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
if (dhd->waive_wakelock == FALSE) {
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (trace_wklock_onoff) {
+ STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
/* record current lock status */
dhd->wakelock_before_waive = dhd->wakelock_counter;
dhd->waive_wakelock = TRUE;
return 0;
spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+
/* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
if (!dhd->waive_wakelock)
goto exit;
* we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
* the lock in between, do the same by calling wake_unlock or pm_relax
*/
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (trace_wklock_onoff) {
+ STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
+
if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
#ifdef CONFIG_HAS_WAKELOCK
wake_lock(&dhd->wl_wifi);
#endif
} else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
#ifdef CONFIG_HAS_WAKELOCK
- wake_unlock(&dhd->wl_wifi);
-#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
- dhd_bus_dev_pm_relax(&dhd->pub);
-#endif
- }
- dhd->wakelock_before_waive = 0;
-exit:
- ret = dhd->wakelock_wd_counter;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- return ret;
+ wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_relax(&dhd->pub);
+#endif
+ }
+ dhd->wakelock_before_waive = 0;
+exit:
+ ret = dhd->wakelock_wd_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ return ret;
+}
+
+void dhd_os_wake_lock_init(struct dhd_info *dhd)
+{
+ DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
+ dhd->wakelock_event_counter = 0;
+ dhd->wakelock_counter = 0;
+ dhd->wakelock_rx_timeout_enable = 0;
+ dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+ wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+ wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
+ wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#endif /* CONFIG_HAS_WAKELOCK */
+#ifdef DHD_TRACE_WAKE_LOCK
+ dhd_wk_lock_trace_init(dhd);
+#endif /* DHD_TRACE_WAKE_LOCK */
+}
+
+void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
+{
+ DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd->wakelock_event_counter = 0;
+ dhd->wakelock_counter = 0;
+ dhd->wakelock_rx_timeout_enable = 0;
+ dhd->wakelock_ctrl_timeout_enable = 0;
+ wake_lock_destroy(&dhd->wl_wifi);
+ wake_lock_destroy(&dhd->wl_rxwake);
+ wake_lock_destroy(&dhd->wl_ctrlwake);
+ wake_lock_destroy(&dhd->wl_evtwake);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ wake_lock_destroy(&dhd->wl_intrwake);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ wake_lock_destroy(&dhd->wl_scanwake);
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#ifdef DHD_TRACE_WAKE_LOCK
+ dhd_wk_lock_trace_deinit(dhd);
+#endif /* DHD_TRACE_WAKE_LOCK */
+#endif /* CONFIG_HAS_WAKELOCK */
}
bool dhd_os_check_if_up(dhd_pub_t *pub)
void dhd_wlfc_plat_init(void *dhd)
{
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
return;
}
void dhd_wlfc_plat_deinit(void *dhd)
{
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
return;
}
bool dhd_wlfc_skip_fc(void)
{
+#ifdef SKIP_WLFC_ON_CONCURRENT
+#ifdef WL_CFG80211
+
+ /* enable flow control in vsdb mode */
+ return !(wl_cfg80211_is_concurrent_mode());
+#else
+ return TRUE; /* skip flow control */
+#endif /* WL_CFG80211 */
+
+#else
return FALSE;
+#endif /* SKIP_WLFC_ON_CONCURRENT */
}
#endif /* PROP_TXSTATUS */
#ifdef BCMDBGFS
-
#include <linux/debugfs.h>
-extern uint32 dhd_readregl(void *bp, uint32 addr);
-extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
-
typedef struct dhd_dbgfs {
struct dentry *debugfs_dir;
struct dentry *debugfs_mem;
- dhd_pub_t *dhdp;
- uint32 size;
+ dhd_pub_t *dhdp;
+ uint32 size;
} dhd_dbgfs_t;
dhd_dbgfs_t g_dbgfs;
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+
static int
dhd_dbg_state_open(struct inode *inode, struct file *file)
{
void dhd_dbg_init(dhd_pub_t *dhdp)
{
- int err;
-
g_dbgfs.dhdp = dhdp;
g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
if (IS_ERR(g_dbgfs.debugfs_dir)) {
- err = PTR_ERR(g_dbgfs.debugfs_dir);
g_dbgfs.debugfs_dir = NULL;
return;
}
debugfs_remove(g_dbgfs.debugfs_dir);
bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
-
}
-#endif /* ifdef BCMDBGFS */
+#endif /* BCMDBGFS */
#ifdef WLMEDIA_HTSF
if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
memcpy(&old_magic, p1+78, 2);
htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
- }
- else
+ } else {
return;
-
+ }
if (htsf_ts->magic == HTSFMAGIC) {
htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
htsf_ts->cE0 = get_cycles();
t = get_cycles();
cur_cycle = t;
- if (cur_cycle > dhd->htsf.last_cycle)
+ if (cur_cycle > dhd->htsf.last_cycle) {
delta = cur_cycle - dhd->htsf.last_cycle;
- else {
+ } else {
delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
}
baseval2 = (delta*10)/(factor+1);
delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
- }
- else {
+ } else {
DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
}
if (cur_tsf.high > prev_tsf.high) {
tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
- }
- else
+ } else {
return; /* do not update */
+ }
}
if (tsf_delta) {
if (dec1 == 9) {
dec1 = 0;
hfactor++;
- }
- else {
+ } else {
dec1++;
}
- }
- else
+ } else {
dec2++;
+ }
}
}
dhd->htsf.last_tsf = cur_tsf.low;
dhd->htsf.coefdec1 = dec1;
dhd->htsf.coefdec2 = dec2;
- }
- else {
+ } else {
htsf = prev_tsf.low;
}
}
return;
}
#endif /* CUSTOM_SET_CPUCORE */
-#if defined(DHD_TCP_WINSIZE_ADJUST)
-static int dhd_port_list_match(int port)
+
+/* Get interface specific ap_isolate configuration */
+int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
{
- int i;
- for (i = 0; i < MAX_TARGET_PORTS; i++) {
- if (target_ports[i] == port)
- return 1;
- }
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ return ifp->ap_isolate;
+}
+
+/* Set interface specific ap_isolate configuration */
+int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ ifp->ap_isolate = val;
+
return 0;
}
-static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb)
+
+#ifdef DHD_FW_COREDUMP
+
+
+#ifdef CUSTOMER_HW4_DEBUG
+#ifdef PLATFORM_SLP
+#define MEMDUMPINFO "/opt/etc/.memdump.info"
+#else
+#define MEMDUMPINFO "/data/.memdump.info"
+#endif /* PLATFORM_SLP */
+#elif defined(CUSTOMER_HW2)
+#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
+#else
+#define MEMDUMPINFO "/installmedia/.memdump.info"
+#endif /* CUSTOMER_HW4_DEBUG */
+
+void dhd_get_memdump_info(dhd_pub_t *dhd)
+{
+ struct file *fp = NULL;
+ uint32 mem_val = DUMP_MEMFILE_MAX;
+ int ret = 0;
+ char *filepath = MEMDUMPINFO;
+
+ /* Read memdump info from the file */
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ goto done;
+ } else {
+ ret = kernel_read(fp, 0, (char *)&mem_val, 4);
+ if (ret < 0) {
+ DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+ filp_close(fp, NULL);
+ goto done;
+ }
+
+ mem_val = bcm_atoi((char *)&mem_val);
+
+ DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, mem_val));
+ filp_close(fp, NULL);
+ }
+
+done:
+#ifdef CUSTOMER_HW4_DEBUG
+ dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
+#else
+ dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE_BUGON;
+#endif /* CUSTOMER_HW4_DEBUG */
+}
+
+
+void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
+{
+ dhd_dump_t *dump = NULL;
+ dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
+ if (dump == NULL) {
+ DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
+ return;
+ }
+ dump->buf = buf;
+ dump->bufsize = size;
+
+#if defined(CONFIG_ARM64)
+ DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
+ (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
+#elif defined(__ARM_ARCH_7A__)
+ DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
+ (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
+#endif /* __ARM_ARCH_7A__ */
+ if (dhdp->memdump_enabled == DUMP_MEMONLY) {
+ BUG_ON(1);
+ }
+
+#ifdef DHD_LOG_DUMP
+ if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
+ dhd_schedule_log_dump(dhdp);
+ }
+#endif /* DHD_LOG_DUMP */
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
+ DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WORK_PRIORITY_HIGH);
+}
+static void
+dhd_mem_dump(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_dump_t *dump = event_info;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!dump) {
+ DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (write_to_file(&dhd->pub, dump->buf, dump->bufsize)) {
+ DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
+ }
+
+ if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
+#ifdef DHD_LOG_DUMP
+ dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
+#endif
+ TRUE) {
+ BUG_ON(1);
+ }
+ MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
+}
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef DHD_LOG_DUMP
+static void
+dhd_log_dump(void *handle, void *event_info, u8 event)
{
- struct iphdr *ipheader;
- struct tcphdr *tcpheader;
- uint16 win_size;
- int32 incremental_checksum;
+ dhd_info_t *dhd = handle;
- if (!(op_mode & DHD_FLAG_HOSTAP_MODE))
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
return;
- if (skb == NULL || skb->data == NULL)
+ }
+
+ if (do_dhd_log_dump(&dhd->pub)) {
+ DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
return;
+ }
+}
+
+void dhd_schedule_log_dump(dhd_pub_t *dhdp)
+{
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP,
+ dhd_log_dump, DHD_WORK_PRIORITY_HIGH);
+}
+
+static int
+do_dhd_log_dump(dhd_pub_t *dhdp)
+{
+ int ret = 0;
+ struct file *fp = NULL;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+ char dump_path[128];
+ char common_info[1024];
+ struct timeval curtime;
+ uint32 file_mode;
+ unsigned long flags = 0;
+
+ if (!dhdp) {
+ return -1;
+ }
+
+ /* Building the additional information like DHD, F/W version */
+ memset(common_info, 0, sizeof(common_info));
+ snprintf(common_info, sizeof(common_info),
+ "---------- Common information ----------\n"
+ "DHD version: %s\n"
+ "F/W version: %s\n"
+ "----------------------------------------\n",
+ dhd_version, fw_version);
+
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* Init file name */
+ memset(dump_path, 0, sizeof(dump_path));
+ do_gettimeofday(&curtime);
+ snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld",
+ DHD_COMMON_DUMP_PATH "debug_dump",
+ (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
+
+ DHD_ERROR(("debug_dump_path = %s\n", dump_path));
+ fp = filp_open(dump_path, file_mode, 0644);
+ if (IS_ERR(fp)) {
+ ret = PTR_ERR(fp);
+ DHD_ERROR(("open file error, err = %d\n", ret));
+ ret = -1;
+ goto exit;
+ }
+
+ fp->f_op->write(fp, common_info, strlen(common_info), &pos);
+ if (dhdp->dld_buf.wraparound) {
+ fp->f_op->write(fp, dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE, &pos);
+ } else {
+ fp->f_op->write(fp, dhdp->dld_buf.buffer,
+ (int)(dhdp->dld_buf.present - dhdp->dld_buf.front), &pos);
+ }
+
+ /* re-init dhd_log_dump_buf structure */
+ spin_lock_irqsave(&dhdp->dld_buf.lock, flags);
+ dhdp->dld_buf.wraparound = 0;
+ dhdp->dld_buf.present = dhdp->dld_buf.front;
+ dhdp->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
+ bzero(dhdp->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
+ spin_unlock_irqrestore(&dhdp->dld_buf.lock, flags);
+exit:
+ if (!ret) {
+ filp_close(fp, NULL);
+ }
+ set_fs(old_fs);
+
+ return ret;
+}
+#endif /* DHD_LOG_DUMP */
+
+#ifdef BCMASSERT_LOG
+#ifdef CUSTOMER_HW4_DEBUG
+#ifdef PLATFORM_SLP
+#define ASSERTINFO "/opt/etc/.assert.info"
+#else
+#define ASSERTINFO "/data/.assert.info"
+#endif /* PLATFORM_SLP */
+#elif defined(CUSTOMER_HW2)
+#define ASSERTINFO "/data/misc/wifi/.assert.info"
+#else
+#define ASSERTINFO "/installmedia/.assert.info"
+#endif /* CUSTOMER_HW4_DEBUG */
+void dhd_get_assert_info(dhd_pub_t *dhd)
+{
+ struct file *fp = NULL;
+ char *filepath = ASSERTINFO;
+
+ /*
+ * Read assert info from the file
+ * 0: Trigger Kernel crash by panic()
+ * 1: Print out the logs and don't trigger Kernel panic. (default)
+ * 2: Trigger Kernel crash by BUG()
+ * File doesn't exist: Keep default value (1).
+ */
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ } else {
+ int mem_val = 0;
+ int ret = kernel_read(fp, 0, (char *)&mem_val, 4);
+ if (ret < 0) {
+ DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+ } else {
+ mem_val = bcm_atoi((char *)&mem_val);
+ DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
+ g_assert_type = mem_val;
+ }
+ filp_close(fp, NULL);
+ }
+}
+#endif /* BCMASSERT_LOG */
+
+
+#ifdef DHD_WMF
+/* Returns interface specific WMF configuration */
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+ return &ifp->wmf;
+}
+#endif /* DHD_WMF */
+
+
+#if defined(DHD_L2_FILTER)
+bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
+{
+ return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
+}
+#endif
+
+#ifdef DHD_L2_FILTER
+arp_table_t*
+dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(bssidx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[bssidx];
+ return ifp->phnd_arp_table;
+}
+
+int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ if (ifp)
+ return ifp->parp_enable;
+ else
+ return FALSE;
+}
+
+/* Set interface specific proxy arp configuration */
+int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+
+ if (!ifp)
+ return BCME_ERROR;
+
+ /* At present all 3 variables are being
+ * handled at once
+ */
+ ifp->parp_enable = val;
+ ifp->parp_discard = val;
+ ifp->parp_allnode = !val;
+
+ /* Flush ARP entries when disabled */
+ if (val == FALSE) {
+ bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
+ FALSE, dhdp->tickcnt);
+ }
+ return BCME_OK;
+}
+
+bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
- ipheader = (struct iphdr*)(skb->data);
-
- if (ipheader->protocol == IPPROTO_TCP) {
- tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2);
- if (tcpheader) {
- win_size = ntoh16(tcpheader->window);
- if (win_size < MIN_TCP_WIN_SIZE &&
- dhd_port_list_match(ntoh16(tcpheader->dest))) {
- incremental_checksum = ntoh16(tcpheader->check);
- incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR;
- if (incremental_checksum < 0)
- --incremental_checksum;
- tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR);
- tcpheader->check = hton16((unsigned short)incremental_checksum);
- }
- }
- skb_push(skb, (ipheader->ihl)<<2);
- }
+ ASSERT(ifp);
+ return ifp->parp_discard;
}
-#endif /* DHD_TCP_WINSIZE_ADJUST */
-/* Get interface specific ap_isolate configuration */
-int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
+bool
+dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
{
dhd_info_t *dhd = dhdp->info;
dhd_if_t *ifp;
ifp = dhd->iflist[idx];
- return ifp->ap_isolate;
+ ASSERT(ifp);
+
+ return ifp->parp_allnode;
}
-/* Set interface specific ap_isolate configuration */
-int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
+int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
{
dhd_info_t *dhd = dhdp->info;
dhd_if_t *ifp;
ifp = dhd->iflist[idx];
- ifp->ap_isolate = val;
+ ASSERT(ifp);
- return 0;
+ return ifp->dhcp_unicast;
}
-#ifdef DHD_WMF
-/* Returns interface specific WMF configuration */
-dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
+int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
{
dhd_info_t *dhd = dhdp->info;
dhd_if_t *ifp;
-
ASSERT(idx < DHD_MAX_IFS);
-
ifp = dhd->iflist[idx];
- return &ifp->wmf;
+
+ ASSERT(ifp);
+
+ ifp->dhcp_unicast = val;
+ return BCME_OK;
}
-#endif /* DHD_WMF */
+int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
-#ifdef DHD_UNICAST_DHCP
-static int
-dhd_get_pkt_ether_type(dhd_pub_t *pub, void *pktbuf,
- uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
-{
- uint8 *frame = PKTDATA(pub->osh, pktbuf);
- int length = PKTLEN(pub->osh, pktbuf);
- uint8 *pt; /* Pointer to type field */
- uint16 ethertype;
- bool snap = FALSE;
- /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
- if (length < ETHER_HDR_LEN) {
- DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
- __FUNCTION__, length));
- return BCME_ERROR;
- } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
- /* Frame is Ethernet II */
- pt = frame + ETHER_TYPE_OFFSET;
- } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
- !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
- pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
- snap = TRUE;
- } else {
- DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
- __FUNCTION__));
- return BCME_ERROR;
- }
+ ASSERT(idx < DHD_MAX_IFS);
- ethertype = ntoh16_ua(pt);
+ ifp = dhd->iflist[idx];
- /* Skip VLAN tag, if any */
- if (ethertype == ETHER_TYPE_8021Q) {
- pt += VLAN_TAG_LEN;
+ ASSERT(ifp);
- if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
- DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
- __FUNCTION__, length));
- return BCME_ERROR;
- }
+ return ifp->block_ping;
+}
- ethertype = ntoh16_ua(pt);
- }
+int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ ifp->block_ping = val;
- *data_ptr = pt + ETHER_TYPE_LEN;
- *len_ptr = length - (pt + ETHER_TYPE_LEN - frame);
- *et_ptr = ethertype;
- *snap_ptr = snap;
return BCME_OK;
}
-static int
-dhd_get_pkt_ip_type(dhd_pub_t *pub, void *pktbuf,
- uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
-{
- struct ipv4_hdr *iph; /* IP frame pointer */
- int iplen; /* IP frame length */
- uint16 ethertype, iphdrlen, ippktlen;
- uint16 iph_frag;
- uint8 prot;
- bool snap;
-
- if (dhd_get_pkt_ether_type(pub, pktbuf, (uint8 **)&iph,
- &iplen, ðertype, &snap) != 0)
- return BCME_ERROR;
-
- if (ethertype != ETHER_TYPE_IP) {
- return BCME_ERROR;
- }
+int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- /* We support IPv4 only */
- if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
- return BCME_ERROR;
- }
+ ASSERT(idx < DHD_MAX_IFS);
- /* Header length sanity */
- iphdrlen = IPV4_HLEN(iph);
+ ifp = dhd->iflist[idx];
- /*
- * Packet length sanity; sometimes we receive eth-frame size bigger
- * than the IP content, which results in a bad tcp chksum
- */
- ippktlen = ntoh16(iph->tot_len);
- if (ippktlen < iplen) {
-
- DHD_INFO(("%s: extra frame length ignored\n",
- __FUNCTION__));
- iplen = ippktlen;
- } else if (ippktlen > iplen) {
- DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
- __FUNCTION__, ippktlen - iplen));
- return BCME_ERROR;
- }
+ ASSERT(ifp);
- if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
- DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
- __FUNCTION__, iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
- return BCME_ERROR;
- }
+ return ifp->grat_arp;
+}
- /*
- * We don't handle fragmented IP packets. A first frag is indicated by the MF
- * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
- */
- iph_frag = ntoh16(iph->frag);
+int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
- if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
- DHD_INFO(("DHD:%s: IP fragment not handled\n",
- __FUNCTION__));
- return BCME_ERROR;
- }
+ ASSERT(ifp);
- prot = IPV4_PROT(iph);
+ ifp->grat_arp = val;
- *data_ptr = (((uint8 *)iph) + iphdrlen);
- *len_ptr = iplen - iphdrlen;
- *prot_ptr = prot;
return BCME_OK;
}
+#endif /* DHD_L2_FILTER */
-/** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet */
-static
-int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx)
-{
- dhd_sta_t* stainfo;
- uint8 *eh = PKTDATA(pub->osh, pktbuf);
- uint8 *udph;
- uint8 *dhcp;
- uint8 *chaddr;
- int udpl;
- int dhcpl;
- uint16 port;
- uint8 prot;
-
- if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
- return BCME_ERROR;
- if (dhd_get_pkt_ip_type(pub, pktbuf, &udph, &udpl, &prot) != 0)
- return BCME_ERROR;
- if (prot != IP_PROT_UDP)
- return BCME_ERROR;
- /* check frame length, at least UDP_HDR_LEN */
- if (udpl < UDP_HDR_LEN) {
- DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
- __FUNCTION__));
- return BCME_ERROR;
- }
- port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
- /* only process DHCP packets from server to client */
- if (port != DHCP_PORT_CLIENT)
- return BCME_ERROR;
- dhcp = udph + UDP_HDR_LEN;
- dhcpl = udpl - UDP_HDR_LEN;
+#if defined(SET_RPS_CPUS)
+int dhd_rps_cpus_enable(struct net_device *net, int enable)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+ dhd_if_t *ifp;
+ int ifidx;
+ char * RPS_CPU_SETBUF;
- if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
- DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
- __FUNCTION__));
- return BCME_ERROR;
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
- /* only process DHCP reply(offer/ack) packets */
- if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
- return BCME_ERROR;
- chaddr = dhcp + DHCP_CHADDR_OFFSET;
- stainfo = dhd_find_sta(pub, ifidx, chaddr);
- if (stainfo) {
- bcopy(chaddr, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
- return BCME_OK;
+
+ if (ifidx == PRIMARY_INF) {
+ if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
+ DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
+ RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
+ } else {
+ DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
+ RPS_CPU_SETBUF = RPS_CPUS_MASK;
+ }
+ } else if (ifidx == VIRTUAL_INF) {
+ DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
+ RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
+ } else {
+ DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
+ return -EINVAL;
}
- return BCME_ERROR;
-}
-#endif /* DHD_UNICAST_DHD */
-#ifdef DHD_L2_FILTER
-/* Check if packet type is ICMP ECHO */
-static
-int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx)
-{
- struct bcmicmp_hdr *icmph;
- int udpl;
- uint8 prot;
- if (dhd_get_pkt_ip_type(pub, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
- return BCME_ERROR;
- if (prot == IP_PROT_ICMP) {
- if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
- return BCME_OK;
+ ifp = dhd->iflist[ifidx];
+ if (ifp) {
+ if (enable) {
+ DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
+ custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
+ } else {
+ custom_rps_map_clear(ifp->net->_rx);
+ }
+ } else {
+ DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
+ return -ENODEV;
}
- return BCME_ERROR;
+ return BCME_OK;
}
-#endif /* DHD_L2_FILTER */
-#ifdef SET_RPS_CPUS
int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
{
struct rps_map *old_map, *map;
}
i = 0;
- for_each_cpu(cpu, mask)
+ for_each_cpu(cpu, mask) {
map->cpus[i++] = cpu;
+ }
- if (i)
+ if (i) {
map->len = i;
- else {
+ } else {
kfree(map);
- DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
map = NULL;
+ free_cpumask_var(mask);
+ DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
+ return -1;
}
spin_lock(&rps_map_lock);
rcu_assign_pointer(queue->rps_map, map);
spin_unlock(&rps_map_lock);
- if (map)
+ if (map) {
static_key_slow_inc(&rps_needed);
+ }
if (old_map) {
kfree_rcu(old_map, rcu);
static_key_slow_dec(&rps_needed);
DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
}
}
-#endif /* SET_RPS_CPUS */
+#endif
+
+
+
+#ifdef DHD_DEBUG_PAGEALLOC
-#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
void
-SDA_setSharedMemory4Send(unsigned int buffer_id,
- unsigned char *buffer, unsigned int buffer_size,
- unsigned int packet_size, unsigned int headroom_size)
+dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
{
- dhd_info_t *dhd = dhd_global;
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
- sda_packet_length = packet_size;
+ DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
+ __FUNCTION__, addr_corrupt, (uint32)len));
- ASSERT(dhd);
- if (dhd == NULL)
- return;
+ DHD_OS_WAKE_LOCK(dhdp);
+ prhex("Page Corruption:", addr_corrupt, len);
+ dhd_dump_to_kernelog(dhdp);
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ /* Load the dongle side dump to host memory and then BUG_ON() */
+ dhdp->memdump_enabled = DUMP_MEMONLY;
+ dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
+ dhd_bus_mem_dump(dhdp);
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+ DHD_OS_WAKE_UNLOCK(dhdp);
}
+EXPORT_SYMBOL(dhd_page_corrupt_cb);
+#endif /* DHD_DEBUG_PAGEALLOC */
+#ifdef DHD_PKTID_AUDIT_ENABLED
void
-SDA_registerCallback4SendDone(SDA_SendDoneCallBack packet_cb)
+dhd_pktid_audit_fail_cb(dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = dhd_global;
-
- ASSERT(dhd);
- if (dhd == NULL)
- return;
+ DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK(dhdp);
+ dhd_dump_to_kernelog(dhdp);
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ /* Load the dongle side dump to host memory and then BUG_ON() */
+ dhdp->memdump_enabled = DUMP_MEMFILE_BUGON;
+ dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
+ dhd_bus_mem_dump(dhdp);
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+ DHD_OS_WAKE_UNLOCK(dhdp);
}
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+/* ----------------------------------------------------------------------------
+ * Infrastructure code for sysfs interface support for DHD
+ *
+ * What is sysfs interface?
+ * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
+ *
+ * Why sysfs interface?
+ * This is the Linux standard way of changing/configuring Run Time parameters
+ * for a driver. We can use this interface to control "linux" specific driver
+ * parameters.
+ *
+ * -----------------------------------------------------------------------------
+ */
-unsigned long long
-SDA_getTsf(unsigned char vif_id)
-{
- dhd_info_t *dhd = dhd_global;
- uint64 tsf_val;
- char buf[WLC_IOCTL_SMLEN];
- int ifidx = 0;
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
- struct tsf {
- uint32 low;
- uint32 high;
- } tsf_buf;
+#if defined(DHD_TRACE_WAKE_LOCK)
- memset(buf, 0, sizeof(buf));
+/* Function to show the history buffer */
+static ssize_t
+show_wklock_trace(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
- if (vif_id == 0) /* wlan0 tsf */
- ifidx = dhd_ifname2idx(dhd, "wlan0");
- else if (vif_id == 1) /* p2p0 tsf */
- ifidx = dhd_ifname2idx(dhd, "p2p0");
+ buf[ret] = '\n';
+ buf[ret+1] = 0;
- bcm_mkiovar("tsf_bss", 0, 0, buf, sizeof(buf));
+ dhd_wk_lock_stats_dump(&dhd->pub);
+ return ret+1;
+}
- if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifidx) < 0) {
- DHD_ERROR(("%s wl ioctl error\n", __FUNCTION__));
- return 0;
+/* Function to enable/disable wakelock trace */
+static ssize_t
+wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
+{
+ unsigned long onoff;
+ unsigned long flags;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
+
+ onoff = bcm_strtoul(buf, NULL, 10);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
}
- memcpy(&tsf_buf, buf, sizeof(tsf_buf));
- tsf_val = (uint64)tsf_buf.high;
- DHD_TRACE(("%s tsf high 0x%08x, low 0x%08x\n",
- __FUNCTION__, tsf_buf.high, tsf_buf.low));
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ trace_wklock_onoff = onoff;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ if (trace_wklock_onoff) {
+ printk("ENABLE WAKLOCK TRACE\n");
+ } else {
+ printk("DISABLE WAKELOCK TRACE\n");
+ }
- return ((tsf_val << 32) | tsf_buf.low);
+ return (ssize_t)(onoff+1);
}
-EXPORT_SYMBOL(SDA_getTsf);
+#endif /* DHD_TRACE_WAKE_LOCK */
-unsigned int
-SDA_syncTsf(void)
+/*
+ * Generic Attribute Structure for DHD.
+ * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
+ * to instantiate an object of type dhd_attr, populate it with
+ * the required show/store functions (ex:- dhd_attr_cpumask_primary)
+ * and add the object to default_attrs[] array, that gets registered
+ * to the kobject of dhd (named bcm-dhd).
+ */
+
+struct dhd_attr {
+ struct attribute attr;
+ ssize_t(*show)(struct dhd_info *, char *);
+ ssize_t(*store)(struct dhd_info *, const char *, size_t count);
+};
+
+#if defined(DHD_TRACE_WAKE_LOCK)
+static struct dhd_attr dhd_attr_wklock =
+ __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
+#endif /* defined(DHD_TRACE_WAKE_LOCK */
+
+/* Attribute object that gets registered with "bcm-dhd" kobject tree */
+static struct attribute *default_attrs[] = {
+#if defined(DHD_TRACE_WAKE_LOCK)
+ &dhd_attr_wklock.attr,
+#endif
+ NULL
+};
+
+#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
+#define to_attr(a) container_of(a, struct dhd_attr, attr)
+
+/*
+ * bcm-dhd kobject show function, the "attr" attribute specifices to which
+ * node under "bcm-dhd" the show function is called.
+ */
+static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
- dhd_info_t *dhd = dhd_global;
- int tsf_sync = 1;
- char iovbuf[WLC_IOCTL_SMLEN];
+ dhd_info_t *dhd = to_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+ int ret;
- bcm_mkiovar("wa_tsf_sync", (char *)&tsf_sync, 4, iovbuf, sizeof(iovbuf));
- dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ if (d_attr->show)
+ ret = d_attr->show(dhd, buf);
+ else
+ ret = -EIO;
- DHD_TRACE(("%s\n", __FUNCTION__));
- return 0;
+ return ret;
}
-extern struct net_device *wl0dot1_dev;
-void
-BCMFASTPATH SDA_function4Send(uint buffer_id, void *packet, uint packet_size)
+/*
+ * bcm-dhd kobject show function, the "attr" attribute specifices to which
+ * node under "bcm-dhd" the store function is called.
+ */
+static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
{
- struct sk_buff *skb;
- sda_packet_t *shm_packet = packet;
- dhd_info_t *dhd = dhd_global;
- int cnt;
+ dhd_info_t *dhd = to_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+ int ret;
- static unsigned int cnt_t = 1;
+ if (d_attr->store)
+ ret = d_attr->store(dhd, buf, count);
+ else
+ ret = -EIO;
- ASSERT(dhd);
- if (dhd == NULL)
- return;
+ return ret;
- if (dhd->is_wlanaudio_blist) {
- for (cnt = 0; cnt < MAX_WLANAUDIO_BLACKLIST; cnt++) {
- if (dhd->wlanaudio_blist[cnt].is_blacklist == true) {
- if (!bcmp(dhd->wlanaudio_blist[cnt].blacklist_addr.octet,
- shm_packet->headroom.ether_dhost, ETHER_ADDR_LEN))
- return;
- }
- }
- }
+}
+
+static struct sysfs_ops dhd_sysfs_ops = {
+ .show = dhd_show,
+ .store = dhd_store,
+};
- if ((cnt_t % 10000) == 0)
- cnt_t = 0;
+static struct kobj_type dhd_ktype = {
+ .sysfs_ops = &dhd_sysfs_ops,
+ .default_attrs = default_attrs,
+};
- cnt_t++;
+/* Create a kobject and attach to sysfs interface */
+static int dhd_sysfs_init(dhd_info_t *dhd)
+{
+ int ret = -1;
- /* packet_size may be smaller than SDA_SHM_PKT_SIZE, remaining will be garbage */
-#define TXOFF 26
- skb = __dev_alloc_skb(TXOFF + sda_packet_length - SDA_PKT_HEADER_SIZE, GFP_ATOMIC);
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
+ return ret;
+ }
- skb_reserve(skb, TXOFF - SDA_HEADROOM_SIZE);
- skb_put(skb, sda_packet_length - SDA_PKT_HEADER_SIZE + SDA_HEADROOM_SIZE);
- skb->priority = PRIO_8021D_VO; /* PRIO_8021D_VO or PRIO_8021D_VI */
+ /* Initialize the kobject */
+ ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
+ if (ret) {
+ kobject_put(&dhd->dhd_kobj);
+ DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
+ return ret;
+ }
- /* p2p_net */
- skb->dev = wl0dot1_dev;
- shm_packet->txTsf = 0x0;
- shm_packet->rxTsf = 0x0;
- memcpy(skb->data, &shm_packet->headroom,
- sda_packet_length - OFFSETOF(sda_packet_t, headroom));
- shm_packet->desc.ready_to_copy = 0;
+ /*
+ * We are always responsible for sending the uevent that the kobject
+ * was added to the system.
+ */
+ kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
- dhd_start_xmit(skb, skb->dev);
+ return ret;
}
-void
-SDA_registerCallback4Recv(unsigned char *pBufferTotal,
- unsigned int BufferTotalSize)
+/* Done with the kobject and detach the sysfs interface */
+static void dhd_sysfs_exit(dhd_info_t *dhd)
{
- dhd_info_t *dhd = dhd_global;
-
- ASSERT(dhd);
- if (dhd == NULL)
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
return;
-}
+ }
+ /* Releae the kobject */
+ kobject_put(&dhd->dhd_kobj);
+}
+#ifdef DHD_LOG_DUMP
void
-SDA_setSharedMemory4Recv(unsigned char *pBufferTotal,
- unsigned int BufferTotalSize,
- unsigned int BufferUnitSize,
- unsigned int Headroomsize)
+dhd_log_dump_init(dhd_pub_t *dhd)
{
- dhd_info_t *dhd = dhd_global;
+ spin_lock_init(&dhd->dld_buf.lock);
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ dhd->dld_buf.buffer = DHD_OS_PREALLOC(dhd,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF, DHD_LOG_DUMP_BUFFER_SIZE);
+#else
+ dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
- ASSERT(dhd);
- if (dhd == NULL)
- return;
+ if (!dhd->dld_buf.buffer) {
+ dhd->dld_buf.buffer = kmalloc(DHD_LOG_DUMP_BUFFER_SIZE, GFP_KERNEL);
+ DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
+
+ if (!dhd->dld_buf.buffer) {
+ DHD_ERROR(("Failed to allocate memory for dld_buf.\n"));
+ return;
+ }
+ }
+
+ dhd->dld_buf.wraparound = 0;
+ dhd->dld_buf.max = (unsigned long)dhd->dld_buf.buffer + DHD_LOG_DUMP_BUFFER_SIZE;
+ dhd->dld_buf.present = dhd->dld_buf.buffer;
+ dhd->dld_buf.front = dhd->dld_buf.buffer;
+ dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
+ dhd->dld_enable = 1;
}
+void
+dhd_log_dump_deinit(dhd_pub_t *dhd)
+{
+ dhd->dld_enable = 0;
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ DHD_OS_PREFREE(dhd,
+ dhd->dld_buf.buffer, DHD_LOG_DUMP_BUFFER_SIZE);
+#else
+ kfree(dhd->dld_buf.buffer);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+}
void
-SDA_function4RecvDone(unsigned char * pBuffer, unsigned int BufferSize)
+dhd_log_dump_print(const char *fmt, ...)
{
- dhd_info_t *dhd = dhd_global;
+ int len = 0;
+ char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
+ va_list args;
+ dhd_pub_t *dhd = NULL;
+ unsigned long flags = 0;
- ASSERT(dhd);
- if (dhd == NULL)
+ if (wl_get_bcm_cfg80211_ptr()) {
+ dhd = (dhd_pub_t*)(wl_get_bcm_cfg80211_ptr()->pub);
+ }
+
+ if (!dhd || dhd->dld_enable != 1) {
+ return;
+ }
+
+ va_start(args, fmt);
+
+ len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
+ if (len < 0) {
return;
+ }
+
+ /* make a critical section to eliminate race conditions */
+ spin_lock_irqsave(&dhd->dld_buf.lock, flags);
+ if (dhd->dld_buf.remain < len) {
+ dhd->dld_buf.wraparound = 1;
+ dhd->dld_buf.present = dhd->dld_buf.front;
+ dhd->dld_buf.remain = DHD_LOG_DUMP_BUFFER_SIZE;
+ }
+
+ strncpy(dhd->dld_buf.present, tmp_buf, len);
+ dhd->dld_buf.remain -= len;
+ dhd->dld_buf.present += len;
+ spin_unlock_irqrestore(&dhd->dld_buf.lock, flags);
+
+ /* double check invalid memory operation */
+ ASSERT((unsigned long)dhd->dld_buf.present <= dhd->dld_buf.max);
+ va_end(args);
+}
+
+char*
+dhd_log_dump_get_timestamp(void)
+{
+ static char buf[16];
+ u64 ts_nsec;
+ unsigned long rem_nsec;
+
+ ts_nsec = local_clock();
+ rem_nsec = do_div(ts_nsec, 1000000000);
+ snprintf(buf, sizeof(buf), "%5lu.%06lu",
+ (unsigned long)ts_nsec, rem_nsec / 1000);
+
+ return buf;
}
-EXPORT_SYMBOL(SDA_setSharedMemory4Send);
-EXPORT_SYMBOL(SDA_registerCallback4SendDone);
-EXPORT_SYMBOL(SDA_syncTsf);
-EXPORT_SYMBOL(SDA_function4Send);
-EXPORT_SYMBOL(SDA_registerCallback4Recv);
-EXPORT_SYMBOL(SDA_setSharedMemory4Recv);
-EXPORT_SYMBOL(SDA_function4RecvDone);
+#endif /* DHD_LOG_DUMP */
-#endif /* CUSTOMER_HW20 && WLANAUDIO */
+/* ---------------------------- End of sysfs implementation ------------------------------------- */
void *dhd_get_pub(struct net_device *dev)
{
else
return NULL;
}
+
+bool dhd_os_wd_timer_enabled(void *bus)
+{
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
+ return FALSE;
+ }
+ return dhd->wd_timer_valid;
+}
/*
* DHD Linux header file (dhd_linux exports for cfg80211 and other components)
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_linux.h 399301 2013-04-29 21:41:52Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux.h 591285 2015-10-07 11:56:29Z $
*/
/* wifi platform functions for power, interrupt and pre-alloc, either
#include <linux/earlysuspend.h>
#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+
+#if defined(CUSTOMER_HW)
+#define WLAN_PLAT_NODFS_FLAG 0x01
+#define WLAN_PLAT_AP_FLAG 0x02
+struct wifi_platform_data {
+ int (*set_power)(bool val);
+ int (*set_carddetect)(bool val);
+ void *(*mem_prealloc)(int section, unsigned long size);
+ int (*get_mac_addr)(unsigned char *buf);
+#if defined(CUSTOM_COUNTRY_CODE)
+ void *(*get_country_code)(char *ccode, u32 flags);
+#else /* defined (CUSTOM_COUNTRY_CODE) */
+ void *(*get_country_code)(char *ccode);
+#endif
+};
+#endif
#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */
typedef struct wifi_adapter_info {
/** Per STA params. A list of dhd_sta objects are managed in dhd_if */
typedef struct dhd_sta {
+ cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */
uint16 flowid[NUMPRIO]; /* allocated flow ring ids (by priority) */
void * ifp; /* associated dhd_if */
struct ether_addr ea; /* stations ethernet mac address */
int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present);
int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr);
int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf);
+#ifdef CUSTOM_COUNTRY_CODE
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode,
+ u32 flags);
+#else
void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode);
+#endif /* CUSTOM_COUNTRY_CODE */
void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size);
void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter);
/*
* Linux platform device for DHD WLAN adapter
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_linux_platdev.c 401742 2013-05-13 15:03:21Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_platdev.c 591285 2015-10-07 11:56:29Z $
*/
#include <typedefs.h>
#include <linux/kernel.h>
#include<linux/of_gpio.h>
#endif /* CONFIG_DTS */
-#ifdef CUSTOMER_HW
+#if defined(CUSTOMER_HW)
#if defined(CUSTOMER_OOB)
extern uint bcm_wlan_get_oob_irq(void);
extern uint bcm_wlan_get_oob_irq_flags(void);
return -EOPNOTSUPP;
}
-void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode)
+void *
+#ifdef CUSTOM_COUNTRY_CODE
+wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode, u32 flags)
+#else
+wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode)
+#endif /* CUSTOM_COUNTRY_CODE */
{
/* get_country_code was added after 2.6.39 */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
DHD_TRACE(("%s\n", __FUNCTION__));
if (plat_data->get_country_code) {
+#ifdef CUSTOM_COUNTRY_CODE
+ return plat_data->get_country_code(ccode, flags);
+#else
return plat_data->get_country_code(ccode);
+#endif /* CUSTOM_COUNTRY_CODE */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 58)) */
}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
return NULL;
}
* DHD (either SDIO, USB or PCIe)
*/
adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL);
+ if (adapter == NULL) {
+ DHD_ERROR(("%s:adapter alloc failed", __FUNCTION__));
+ return ENOMEM;
+ }
adapter->name = "DHD generic adapter";
adapter->bus_type = -1;
adapter->bus_num = -1;
/*
* Expose some of the kernel scheduler routines
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_linux_sched.c 457570 2014-02-23 13:54:46Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_sched.c 514727 2014-11-12 03:02:48Z $
*/
#include <linux/kernel.h>
#include <linux/module.h>
* Broadcom Dongle Host Driver (DHD), Generic work queue framework
* Generic interface to handle dhd deferred work events
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_linux_wq.c 449578 2014-01-17 13:53:20Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_wq.c 514727 2014-11-12 03:02:48Z $
*/
#include <linux/init.h>
* Broadcom Dongle Host Driver (DHD), Generic work queue framework
* Generic interface to handle dhd deferred work events
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_linux_wq.h 449578 2014-01-17 13:53:20Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_linux_wq.h 597512 2015-11-05 11:37:36Z $
*/
#ifndef _dhd_linux_wq_h_
#define _dhd_linux_wq_h_
DHD_WQ_WORK_SET_MCAST_LIST,
DHD_WQ_WORK_IPV6_NDO,
DHD_WQ_WORK_HANG_MSG,
+ DHD_WQ_WORK_SOC_RAM_DUMP,
+ DHD_WQ_WORK_DHD_LOG_DUMP,
DHD_MAX_WQ_EVENTS
};
-/*
- * Header file describing the internal (inter-module) DHD interfaces.
- *
+/**
+ * @file definition of host message ring functionality
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_msgbuf.c 504484 2014-09-24 10:11:20Z $
+ * $Id: dhd_msgbuf.c 605475 2015-12-10 12:49:49Z $
*/
+
+
#include <typedefs.h>
#include <osl.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_proto.h>
+
#include <dhd_bus.h>
-#include <dhd_dbg.h>
+#include <dhd_dbg.h>
#include <siutils.h>
#include <dhd_flowring.h>
-#ifdef PROP_TXSTATUS
-#include <wlfc_proto.h>
-#include <dhd_wlfc.h>
-#endif
-
#include <pcie_core.h>
#include <bcmpcie.h>
#include <dhd_pcie.h>
-#include <dhd_ip.h>
-/*
- * PCIE D2H DMA Complete Sync Modes
- *
- * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
- * Host system memory. A WAR using one of 3 approaches is needed:
- * 1. Dongle places ia modulo-253 seqnum in last word of each D2H message
- * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
- * writes in the last word of each work item. Each work item has a seqnum
- * number = sequence num % 253.
- * 3. Read Barrier: Dongle does a host memory read access prior to posting an
- * interrupt.
- * Host does not participate with option #3, other than reserving a host system
- * memory location for the dongle to read.
+#if defined(DHD_LB)
+#include <linux/cpu.h>
+#include <bcm_ring.h>
+#define DHD_LB_WORKQ_SZ (8192)
+#define DHD_LB_WORKQ_SYNC (16)
+#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
+#endif /* DHD_LB */
+
+
+/**
+ * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
+ * address where a value must be written. Host may also interrupt coalescing
+ * on this soft doorbell.
+ * Use Case: Hosts with network processors, may register with the dongle the
+ * network processor's thread wakeup register and a value corresponding to the
+ * core/thread context. Dongle will issue a write transaction <address,value>
+ * to the PCIE RC which will need to be routed to the mapped register space, by
+ * the host.
*/
-#define PCIE_D2H_SYNC
-#define PCIE_D2H_SYNC_WAIT_TRIES 1024
-#define PCIE_D2H_SYNC_BZERO /* bzero a message before updating the RD offset */
+/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
+
+/* Dependency Check */
+#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
+#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
+#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
-#define IOCTL_HDR_LEN 12
#define DEFAULT_RX_BUFFERS_TO_POST 256
#define RXBUFPOST_THRESHOLD 32
-#define RX_BUF_BURST 16
+#define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */
#define DHD_STOP_QUEUE_THRESHOLD 200
#define DHD_START_QUEUE_THRESHOLD 100
-#define MODX(x, n) ((x) & ((n) -1))
-#define align(x, n) (MODX(x, n) ? ((x) - MODX(x, n) + (n)) : ((x) - MODX(x, n)))
-#define RX_DMA_OFFSET 8
+#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
+#define FLOWRING_SIZE (H2DRING_TXPOST_MAX_ITEM * H2DRING_TXPOST_ITEMSIZE)
+
+/* flags for ioctl pending status */
+#define MSGBUF_IOCTL_ACK_PENDING (1<<0)
+#define MSGBUF_IOCTL_RESP_PENDING (1<<1)
-#define DMA_D2H_SCRATCH_BUF_LEN 8
#define DMA_ALIGN_LEN 4
+
+#define DMA_D2H_SCRATCH_BUF_LEN 8
#define DMA_XFER_LEN_LIMIT 0x400000
#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
-#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
#define DHD_FLOWRING_MAX_EVENTBUF_POST 8
#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
-#define DHD_PROT_FUNCS 22
-
-typedef struct dhd_mem_map {
- void *va;
- dmaaddr_t pa;
- void *dmah;
-} dhd_mem_map_t;
+#define DHD_PROT_FUNCS 37
-typedef struct dhd_dmaxfer {
- dhd_mem_map_t srcmem;
- dhd_mem_map_t destmem;
- uint32 len;
- uint32 srcdelay;
- uint32 destdelay;
-} dhd_dmaxfer_t;
+/* Length of buffer in host for bus throughput measurement */
+#define DHD_BUS_TPUT_BUF_LEN 2048
#define TXP_FLUSH_NITEMS
+
+/* optimization to write "n" tx items at a time to ring */
#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
+#define RING_NAME_MAX_LENGTH 24
+
+
+struct msgbuf_ring; /* ring context for common and flow rings */
+
+/**
+ * PCIE D2H DMA Complete Sync Modes
+ *
+ * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
+ * Host system memory. A WAR using one of 3 approaches is needed:
+ * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
+ * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
+ * writes in the last word of each work item. Each work item has a seqnum
+ * number = sequence num % 253.
+ *
+ * 3. Read Barrier: Dongle does a host memory read access prior to posting an
+ * interrupt, ensuring that D2H data transfer indeed completed.
+ * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
+ * ring contents before the indices.
+ *
+ * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
+ * callback (see dhd_prot_d2h_sync_none) may be bound.
+ *
+ * Dongle advertizes host side sync mechanism requirements.
+ */
+#define PCIE_D2H_SYNC
+
+#if defined(PCIE_D2H_SYNC)
+#define PCIE_D2H_SYNC_WAIT_TRIES (512UL)
+#define PCIE_D2H_SYNC_NUM_OF_STEPS (3UL)
+#define PCIE_D2H_SYNC_DELAY (50UL) /* in terms of usecs */
+
+/**
+ * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
+ *
+ * On success: return cmn_msg_hdr_t::msg_type
+ * On failure: return 0 (invalid msg_type)
+ */
+typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+#endif /* PCIE_D2H_SYNC */
+
+
+/*
+ * +----------------------------------------------------------------------------
+ *
+ * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
+ * flowids do not.
+ *
+ * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
+ * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
+ *
+ * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
+ * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
+ * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings.
+ *
+ * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated
+ * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated
+ *
+ * D2H Control Complete RingId = 2
+ * D2H Transmit Complete RingId = 3
+ * D2H Receive Complete RingId = 4
+ *
+ * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring)
+ * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring)
+ * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
+ *
+ * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
+ * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
+ *
+ * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
+ * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
+ * FlowId values would be in the range [2..133] and the corresponding
+ * RingId values would be in the range [5..136].
+ *
+ * The flowId allocator, may chose to, allocate Flowids:
+ * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
+ * X# of uc flowids in consecutive ranges (per station Id), where X is the
+ * packet's access category (e.g. 4 uc flowids per station).
+ *
+ * CAUTION:
+ * When DMA indices array feature is used, RingId=5, corresponding to the 0th
+ * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
+ * since the FlowId truly represents the index in the H2D DMA indices array.
+ *
+ * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
+ * will represent the index in the D2H DMA indices array.
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+/* First TxPost Flowring Id */
+#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
+
+/* Determine whether a ringid belongs to a TxPost flowring */
+#define DHD_IS_FLOWRING(ringid) \
+ ((ringid) >= BCMPCIE_COMMON_MSGRINGS)
+
+/* Convert a H2D TxPost FlowId to a MsgBuf RingId */
+#define DHD_FLOWID_TO_RINGID(flowid) \
+ (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
+
+/* Convert a MsgBuf RingId to a H2D TxPost FlowId */
+#define DHD_RINGID_TO_FLOWID(ringid) \
+ (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
+
+/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
+ * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
+ * any array of H2D rings.
+ */
+#define DHD_H2D_RING_OFFSET(ringid) \
+ ((DHD_IS_FLOWRING(ringid)) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
+
+/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
+ * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
+ * any array of D2H rings.
+ */
+#define DHD_D2H_RING_OFFSET(ringid) \
+ ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
+/* Convert a D2H DMA Indices Offset to a RingId */
+#define DHD_D2H_RINGID(offset) \
+ ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
+
+
+#define DHD_DMAH_NULL ((void*)NULL)
+
+/*
+ * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
+ * buffer does not occupy the entire cacheline, and another object is placed
+ * following the DMA-able buffer, data corruption may occur if the DMA-able
+ * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
+ * is not available.
+ */
+#if defined(L1_CACHE_BYTES)
+#define DHD_DMA_PAD (L1_CACHE_BYTES)
+#else
+#define DHD_DMA_PAD (128)
+#endif
+
+/* Used in loopback tests */
+typedef struct dhd_dmaxfer {
+ dhd_dma_buf_t srcmem;
+ dhd_dma_buf_t dstmem;
+ uint32 srcdelay;
+ uint32 destdelay;
+ uint32 len;
+ bool in_progress;
+} dhd_dmaxfer_t;
+
+/**
+ * msgbuf_ring : This object manages the host side ring that includes a DMA-able
+ * buffer, the WR and RD indices, ring parameters such as max number of items
+ * an length of each items, and other miscellaneous runtime state.
+ * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
+ * H2D TxPost ring as specified in the PCIE FullDongle Spec.
+ * Ring parameters are conveyed to the dongle, which maintains its own peer end
+ * ring state. Depending on whether the DMA Indices feature is supported, the
+ * host will update the WR/RD index in the DMA indices array in host memory or
+ * directly in dongle memory.
+ */
typedef struct msgbuf_ring {
- bool inited;
- uint16 idx;
- uchar name[24];
- dhd_mem_map_t ring_base;
+ bool inited;
+ uint16 idx; /* ring id */
+ uint16 rd; /* read index */
+ uint16 curr_rd; /* read index for debug */
+ uint16 wr; /* write index */
+ uint16 max_items; /* maximum number of items in ring */
+ uint16 item_len; /* length of each item in the ring */
+ sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */
+ dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
+ uint32 seqnum; /* next expected item's sequence number */
#ifdef TXP_FLUSH_NITEMS
- void* start_addr;
- uint16 pend_items_count;
+ void *start_addr;
+ /* # of messages on ring not yet announced to dongle */
+ uint16 pend_items_count;
#endif /* TXP_FLUSH_NITEMS */
- ring_mem_t *ringmem;
- ring_state_t *ringstate;
-#if defined(PCIE_D2H_SYNC)
- uint32 seqnum;
-#endif /* PCIE_D2H_SYNC */
- void *secdma;
+ uchar name[RING_NAME_MAX_LENGTH];
} msgbuf_ring_t;
-#if defined(PCIE_D2H_SYNC)
-/* Custom callback attached based upon D2H DMA Sync mode used in dongle. */
-typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
- volatile cmn_msg_hdr_t *msg, int msglen);
-#endif /* PCIE_D2H_SYNC */
+#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
+#define DHD_RING_END_VA(ring) \
+ ((uint8 *)(DHD_RING_BGN_VA((ring))) + \
+ (((ring)->max_items - 1) * (ring)->item_len))
+
+
+/** DHD protocol handle. Is an opaque type to other DHD software layers. */
typedef struct dhd_prot {
osl_t *osh; /* OSL handle */
- uint32 reqid;
- uint32 lastcmd;
- uint32 pending;
uint16 rxbufpost;
uint16 max_rxbufpost;
uint16 max_eventbufpost;
uint16 max_ioctlrespbufpost;
uint16 cur_event_bufs_posted;
uint16 cur_ioctlresp_bufs_posted;
- uint16 active_tx_count;
+
+ /* Flow control mechanism based on active transmits pending */
+ uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
uint16 max_tx_count;
- uint16 txp_threshold;
- /* Ring info */
- msgbuf_ring_t *h2dring_txp_subn;
- msgbuf_ring_t *h2dring_rxp_subn;
- msgbuf_ring_t *h2dring_ctrl_subn; /* Cbuf handle for H2D ctrl ring */
- msgbuf_ring_t *d2hring_tx_cpln;
- msgbuf_ring_t *d2hring_rx_cpln;
- msgbuf_ring_t *d2hring_ctrl_cpln; /* Cbuf handle for D2H ctrl ring */
+ uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
+
+ /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
+ msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
+ msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
+ msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
+ msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
+ msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
+
+ msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
+ dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
+ uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */
+
uint32 rx_dataoffset;
- dhd_mem_map_t retbuf;
- dhd_mem_map_t ioctbuf; /* For holding ioct request buf */
- dhd_mb_ring_t mb_ring_fn;
- uint32 d2h_dma_scratch_buf_len; /* For holding ioct request buf */
- dhd_mem_map_t d2h_dma_scratch_buf; /* For holding ioct request buf */
+ dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
- uint32 h2d_dma_writeindx_buf_len; /* For holding dma ringupd buf - submission write */
- dhd_mem_map_t h2d_dma_writeindx_buf; /* For holding dma ringupd buf - submission write */
+ /* ioctl related resources */
+ uint8 ioctl_state;
+ int16 ioctl_status; /* status returned from dongle */
+ uint16 ioctl_resplen;
+ dhd_ioctl_recieved_status_t ioctl_received;
+ uint curr_ioctl_cmd;
+ dhd_dma_buf_t retbuf; /* For holding ioctl response */
+ dhd_dma_buf_t ioctbuf; /* For holding ioctl request */
- uint32 h2d_dma_readindx_buf_len; /* For holding dma ringupd buf - submission read */
- dhd_mem_map_t h2d_dma_readindx_buf; /* For holding dma ringupd buf - submission read */
+ dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */
- uint32 d2h_dma_writeindx_buf_len; /* For holding dma ringupd buf - completion write */
- dhd_mem_map_t d2h_dma_writeindx_buf; /* For holding dma ringupd buf - completion write */
+ /* DMA-able arrays for holding WR and RD indices */
+ uint32 rw_index_sz; /* Size of a RD or WR index in dongle */
+ dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */
+ dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
+ dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
+ dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
- uint32 d2h_dma_readindx_buf_len; /* For holding dma ringupd buf - completion read */
- dhd_mem_map_t d2h_dma_readindx_buf; /* For holding dma ringupd buf - completion read */
+ dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
+
+ dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
+ uint32 flowring_num;
#if defined(PCIE_D2H_SYNC)
d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
ulong d2h_sync_wait_tot; /* total wait loops */
#endif /* PCIE_D2H_SYNC */
- dhd_dmaxfer_t dmaxfer;
- bool dmaxfer_in_progress;
+
+ dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
uint16 ioctl_seq_no;
uint16 data_seq_no;
uint16 ioctl_trans_id;
- void *pktid_map_handle;
+ void *pktid_map_handle; /* a pktid maps to a packet and its metadata */
+ bool metadata_dbg;
+ void *pktid_map_handle_ioctl;
+
+ /* Applications/utilities can read tx and rx metadata using IOVARs */
uint16 rx_metadata_offset;
uint16 tx_metadata_offset;
- uint16 rx_cpln_early_upd_idx;
+
+
+#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
+ /* Host's soft doorbell configuration */
+ bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
+#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
+#if defined(DHD_LB)
+ /* Work Queues to be used by the producer and the consumer, and threshold
+ * when the WRITE index must be synced to consumer's workq
+ */
+#if defined(DHD_LB_TXC)
+ uint32 tx_compl_prod_sync ____cacheline_aligned;
+ bcm_workq_t tx_compl_prod, tx_compl_cons;
+#endif /* DHD_LB_TXC */
+#if defined(DHD_LB_RXC)
+ uint32 rx_compl_prod_sync ____cacheline_aligned;
+ bcm_workq_t rx_compl_prod, rx_compl_cons;
+#endif /* DHD_LB_RXC */
+#endif /* DHD_LB */
} dhd_prot_t;
-static int dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
+/* Convert a dmaaddr_t to a base_addr with htol operations */
+static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
+
+/* APIs for managing a DMA-able buffer */
+static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
+static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+
+/* msgbuf ring management */
+static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
+static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+
+/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
+static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
+static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
+static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
+
+/* Fetch and Release a flowring msgbuf_ring from flowring pool */
+static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
+ uint16 flowid);
+/* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
+
+/* Producer: Allocate space in a msgbuf ring */
+static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ uint16 nitems, uint16 *alloced, bool exactly_nitems);
+static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
+ uint16 *alloced, bool exactly_nitems);
+
+/* Consumer: Determine the location where the next message may be consumed */
+static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ uint32 *available_len);
+
+/* Producer (WR index update) or Consumer (RD index update) indication */
+static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ void *p, uint16 len);
+static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+
+/* Allocate DMA-able memory for saving H2D/D2H WR/RD indices */
+static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
+ dhd_dma_buf_t *dma_buf, uint32 bufsz);
+
+/* Set/Get a RD or WR index in the array of indices */
+/* See also: dhd_prot_dma_indx_init() */
+static void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
+ uint16 ringid);
+static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
+
+/* Locate a packet given a pktid */
+static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
+ bool free_pktid);
+/* Locate a packet given a PktId and free it. */
+static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
+
+static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
void *buf, uint len, uint8 action);
static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
void *buf, uint len, uint8 action);
-static int dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf);
-
-static int dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd);
-static int dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count);
-static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt);
-static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen);
-static void dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len);
-static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len);
-static int dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len);
-
-static void dhd_prot_noop(dhd_pub_t *dhd, void * buf, uint16 msglen);
-static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
-static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
-static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
-static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
-static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
-static void* dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
- uint16 msglen, uint16 *alloced);
-static int dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf,
- int ifidx);
-static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid);
-static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid);
+static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
+static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
+ void *buf, int ifidx);
+
+/* Post buffers for Rx, control ioctl response and events */
+static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post);
+static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
+static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
+static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
+static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
+
+static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
+
+/* D2H Message handling */
+static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
+
+/* D2H Message handlers */
+static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
+
+/* Loopback test with dongle */
static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
uint destdelay, dhd_dmaxfer_t *dma);
-static void dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void *buf, uint16 msglen);
-static void dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
-static void dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
-static void dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
+static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
+/* Flowring management communication with dongle */
+static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
+static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
+/* Configure a soft doorbell per D2H ring */
+static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
+static void dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg);
+typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
-#ifdef DHD_RX_CHAINING
-#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
- (!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
- !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
- !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
- !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
- ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
- ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
- (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
+/** callback functions for messages generated by the dongle */
+#define MSG_TYPE_INVALID 0
-static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
-static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
-static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
-
-#define DHD_PKT_CTF_MAX_CHAIN_LEN 64
-#endif /* DHD_RX_CHAINING */
-
-static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post);
-static int dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
-static int dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
-
-static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring);
-static void dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
-static msgbuf_ring_t* prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item,
- uint16 len_item, uint16 ringid);
-static void* prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced);
-static void dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index);
-static uint16 dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid);
-static void prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 len);
-static void prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring);
-static uint8* prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 *available_len);
-static void prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
-static void prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring);
-
-typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void * buf, uint16 msglen);
static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
- dhd_prot_noop, /* 0 is invalid message type */
+ dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
NULL,
- dhd_prot_process_flow_ring_create_response, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
+ dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
NULL,
- dhd_prot_process_flow_ring_delete_response, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
+ dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
NULL,
- dhd_prot_process_flow_ring_flush_response, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
+ dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
NULL,
dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
NULL,
NULL,
dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */
NULL,
- dhdmsgbuf_dmaxfer_compare, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
- NULL,
+ dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
+ NULL, /* MSG_TYPE_FLOW_RING_RESUME */
+ NULL, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
+ NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
+ NULL, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
+ NULL, /* MSG_TYPE_INFO_BUF_POST */
+ NULL, /* MSG_TYPE_INFO_BUF_CMPLT */
+ NULL, /* MSG_TYPE_H2D_RING_CREATE */
+ NULL, /* MSG_TYPE_D2H_RING_CREATE */
+ NULL, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
+ NULL, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
+ NULL, /* MSG_TYPE_H2D_RING_CONFIG */
+ NULL, /* MSG_TYPE_D2H_RING_CONFIG */
+ NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
+ dhd_prot_d2h_ring_config_cmplt_process, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
+ NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
+ NULL, /* MSG_TYPE_D2H_MAILBOX_DATA */
};
-#if defined(PCIE_D2H_SYNC)
+#ifdef DHD_RX_CHAINING
-/*
+#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
+ (!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
+ !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
+ !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
+ !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
+ ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
+ ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
+ (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))) && \
+ dhd_l2_filter_chainable((dhd), (evh), (ifidx)))
+
+static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
+static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
+static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
+
+#define DHD_PKT_CTF_MAX_CHAIN_LEN 64
+
+#endif /* DHD_RX_CHAINING */
+
+static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
+
+#if defined(PCIE_D2H_SYNC) /* avoids problems related to host CPU cache */
+
+/**
* D2H DMA to completion callback handlers. Based on the mode advertised by the
* dongle through the PCIE shared region, the appropriate callback will be
* registered in the proto layer to be invoked prior to precessing any message
* from a D2H DMA ring. If the dongle uses a read barrier or another mode that
* does not require host participation, then a noop callback handler will be
- * bound that simply returns the msgtype.
+ * bound that simply returns the msg_type.
*/
-static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 seqnum,
+static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring,
uint32 tries, uchar *msg, int msglen);
static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
volatile cmn_msg_hdr_t *msg, int msglen);
static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
-static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd, dhd_prot_t * prot);
+static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
+
+void dhd_prot_collect_memdump(dhd_pub_t *dhd)
+{
+ DHD_ERROR(("%s(): Collecting mem dump now \r\n", __FUNCTION__));
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+ dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
+ dhd_os_send_hang_message(dhd);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+}
-/* Debug print a livelock avert by dropping a D2H message */
+/**
+ * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
+ * not completed, a livelock condition occurs. Host will avert this livelock by
+ * dropping this message and moving to the next. This dropped message can lead
+ * to a packet leak, or even something disastrous in the case the dropped
+ * message happens to be a control response.
+ * Here we will log this condition. One may choose to reboot the dongle.
+ *
+ */
static void
-dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 seqnum, uint32 tries,
+dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 tries,
uchar *msg, int msglen)
{
- DHD_ERROR(("LIVELOCK DHD<%p> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>\n",
- dhd, seqnum, seqnum% D2H_EPOCH_MODULO, tries,
- dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot));
+ uint32 seqnum = ring->seqnum;
+
+ DHD_ERROR(("LIVELOCK DHD<%p> name<%s> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>"
+ "dma_buf va<%p> msg<%p> curr_rd<%d>\n",
+ dhd, ring->name, seqnum, seqnum% D2H_EPOCH_MODULO, tries,
+ dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
+ ring->dma_buf.va, msg, ring->curr_rd));
prhex("D2H MsgBuf Failure", (uchar *)msg, msglen);
+ dhd_dump_to_kernelog(dhd);
+
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+ dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
+ dhd_os_send_hang_message(dhd);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
}
-/* Sync on a D2H DMA to complete using SEQNUM mode */
+/**
+ * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
+ * mode. Sequence number is always in the last word of a message.
+ */
static uint8 BCMFASTPATH
dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
int num_words = msglen / sizeof(uint32); /* num of 32bit words */
volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */
dhd_prot_t *prot = dhd->prot;
+ uint32 step = 0;
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
+ uint32 total_tries = 0;
+
+ ASSERT(msglen == ring->item_len);
+
+ BCM_REFERENCE(delay);
+ /*
+ * For retries we have to make some sort of stepper algorithm.
+ * We see that every time when the Dongle comes out of the D3
+ * Cold state, the first D2H mem2mem DMA takes more time to
+ * complete, leading to livelock issues.
+ *
+ * Case 1 - Apart from Host CPU some other bus master is
+ * accessing the DDR port, probably page close to the ring
+ * so, PCIE does not get a change to update the memory.
+ * Solution - Increase the number of tries.
+ *
+ * Case 2 - The 50usec delay given by the Host CPU is not
+ * sufficient for the PCIe RC to start its work.
+ * In this case the breathing time of 50usec given by
+ * the Host CPU is not sufficient.
+ * Solution: Increase the delay in a stepper fashion.
+ * This is done to ensure that there are no
+ * unwanted extra delay introdcued in normal conditions.
+ */
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
+ for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+ uint32 msg_seqnum = *marker;
+ if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
+ ring->seqnum++; /* next expected sequence number */
+ goto dma_completed;
+ }
- ASSERT(msglen == RING_LEN_ITEMS(ring));
-
- for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
- uint32 msg_seqnum = *marker;
- if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
- ring->seqnum++; /* next expected sequence number */
- goto dma_completed;
- }
-
- if (tries > prot->d2h_sync_wait_max)
- prot->d2h_sync_wait_max = tries;
+ total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
- OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+ if (total_tries > prot->d2h_sync_wait_max)
+ prot->d2h_sync_wait_max = total_tries;
- } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
+#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
+ /* For ARM there is no pause in cpu_relax, so add extra delay */
+ OSL_DELAY(delay * step);
+#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ } /* for number of steps */
- dhd_prot_d2h_sync_livelock(dhd, ring->seqnum, tries, (uchar *)msg, msglen);
+ dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen);
ring->seqnum++; /* skip this message ... leak of a pktid */
- return 0; /* invalid msgtype 0 -> noop callback */
+ return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
dma_completed:
- prot->d2h_sync_wait_tot += tries;
+ prot->d2h_sync_wait_tot += total_tries;
return msg->msg_type;
}
-/* Sync on a D2H DMA to complete using XORCSUM mode */
+/**
+ * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
+ * mode. The xorcsum is placed in the last word of a message. Dongle will also
+ * place a seqnum in the epoch field of the cmn_msg_hdr.
+ */
static uint8 BCMFASTPATH
dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
int num_words = msglen / sizeof(uint32); /* num of 32bit words */
uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
dhd_prot_t *prot = dhd->prot;
-
- ASSERT(msglen == RING_LEN_ITEMS(ring));
-
- for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
- prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
- if (prot_checksum == 0U) { /* checksum is OK */
- if (msg->epoch == ring_seqnum) {
- ring->seqnum++; /* next expected sequence number */
- goto dma_completed;
+ uint32 step = 0;
+ uint32 delay = PCIE_D2H_SYNC_DELAY;
+ uint32 total_tries = 0;
+
+ ASSERT(msglen == ring->item_len);
+
+ BCM_REFERENCE(delay);
+
+ /*
+ * For retries we have to make some sort of stepper algorithm.
+ * We see that every time when the Dongle comes out of the D3
+ * Cold state, the first D2H mem2mem DMA takes more time to
+ * complete, leading to livelock issues.
+ *
+ * Case 1 - Apart from Host CPU some other bus master is
+ * accessing the DDR port, probably page close to the ring
+ * so, PCIE does not get a change to update the memory.
+ * Solution - Increase the number of tries.
+ *
+ * Case 2 - The 50usec delay given by the Host CPU is not
+ * sufficient for the PCIe RC to start its work.
+ * In this case the breathing time of 50usec given by
+ * the Host CPU is not sufficient.
+ * Solution: Increase the delay in a stepper fashion.
+ * This is done to ensure that there are no
+ * unwanted extra delay introdcued in normal conditions.
+ */
+ for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
+ for (tries = 1; tries <= PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
+ prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
+ if (prot_checksum == 0U) { /* checksum is OK */
+ if (msg->epoch == ring_seqnum) {
+ ring->seqnum++; /* next expected sequence number */
+ goto dma_completed;
+ }
}
- }
- if (tries > prot->d2h_sync_wait_max)
- prot->d2h_sync_wait_max = tries;
+ total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
+
+ if (total_tries > prot->d2h_sync_wait_max)
+ prot->d2h_sync_wait_max = total_tries;
- OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+ OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
+ OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
+#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890)
+ /* For ARM there is no pause in cpu_relax, so add extra delay */
+ OSL_DELAY(delay * step);
+#endif /* defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890) */
- } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ } /* for PCIE_D2H_SYNC_WAIT_TRIES */
+ } /* for number of steps */
- dhd_prot_d2h_sync_livelock(dhd, ring->seqnum, tries, (uchar *)msg, msglen);
+ dhd_prot_d2h_sync_livelock(dhd, ring, total_tries, (uchar *)msg, msglen);
ring->seqnum++; /* skip this message ... leak of a pktid */
- return 0; /* invalid msgtype 0 -> noop callback */
+ return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
dma_completed:
- prot->d2h_sync_wait_tot += tries;
+ prot->d2h_sync_wait_tot += total_tries;
return msg->msg_type;
}
-/* Do not sync on a D2H DMA */
+/**
+ * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
+ * need to try to sync. This noop sync handler will be bound when the dongle
+ * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
+ */
static uint8 BCMFASTPATH
dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
return msg->msg_type;
}
-/* Initialize the D2H DMA Sync mode, per D2H ring seqnum and dhd stats */
+/**
+ * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
+ * dongle advertizes.
+ */
static void
-dhd_prot_d2h_sync_init(dhd_pub_t *dhd, dhd_prot_t * prot)
+dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
{
+ dhd_prot_t *prot = dhd->prot;
prot->d2h_sync_wait_max = 0UL;
prot->d2h_sync_wait_tot = 0UL;
- prot->d2hring_tx_cpln->seqnum = D2H_EPOCH_INIT_VAL;
- prot->d2hring_rx_cpln->seqnum = D2H_EPOCH_INIT_VAL;
- prot->d2hring_ctrl_cpln->seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
+ prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
- if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
+ if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
- else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
+ } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
- else
+ } else {
prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
+ }
}
#endif /* PCIE_D2H_SYNC */
+int INLINE
+dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
+{
+ /* To synchronize with the previous memory operations call wmb() */
+ OSL_SMP_WMB();
+ dhd->prot->ioctl_received = reason;
+ /* Call another wmb() to make sure before waking up the other event value gets updated */
+ OSL_SMP_WMB();
+ dhd_os_ioctl_resp_wake(dhd);
+ return 0;
+}
+
+/**
+ * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
+ */
+static void
+dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
+ prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
+}
+
+/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
+
+
/*
* +---------------------------------------------------------------------------+
- * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
- * The packet id map, also includes storage for some packet parameters that
- * may be saved. A native packet pointer along with the parameters may be saved
- * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
- * and the metadata may be retrieved using the previously allocated packet id.
+ * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
+ * virtual and physical address, the buffer lenght and the DMA handler.
+ * A secdma handler is also included in the dhd_dma_buf object.
* +---------------------------------------------------------------------------+
*/
-#define MAX_PKTID_ITEMS (8192) /* Maximum number of pktids supported */
-
-typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
-/* Construct a packet id mapping table, returing an opaque map handle */
-static dhd_pktid_map_handle_t *dhd_pktid_map_init(void *osh, uint32 num_items);
+static INLINE void
+dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
+{
+ base_addr->low_addr = htol32(PHYSADDRLO(pa));
+ base_addr->high_addr = htol32(PHYSADDRHI(pa));
+}
-/* Destroy a packet id mapping table, freeing all packets active in the table */
-static void dhd_pktid_map_fini(dhd_pktid_map_handle_t *map);
-/* Determine number of pktids that are available */
-static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *map);
+/**
+ * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
+ */
+static int
+dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
+{
+ uint32 base, end; /* dongle uses 32bit ptr arithmetic */
-/* Allocate a unique pktid against which a pkt and some metadata is saved */
-static INLINE uint32 dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle,
- void *pkt);
-static INLINE void dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt,
- uint32 nkey, dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma);
-static uint32 dhd_pktid_map_alloc(dhd_pktid_map_handle_t *map, void *pkt,
- dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma);
+ ASSERT(dma_buf);
+ base = PHYSADDRLO(dma_buf->pa);
+ ASSERT(base);
+ ASSERT(ISALIGNED(base, DMA_ALIGN_LEN));
+ ASSERT(dma_buf->len != 0);
-/* Return an allocated pktid, retrieving previously saved pkt and metadata */
-static void *dhd_pktid_map_free(dhd_pktid_map_handle_t *map, uint32 id,
- dmaaddr_t *physaddr, uint32 *len, void **secdma);
+ /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
+ end = (base + dma_buf->len); /* end address */
-/* Packet metadata saved in packet id mapper */
-typedef struct dhd_pktid_item {
- bool inuse; /* tag an item to be in use */
- uint8 dma; /* map direction: flush or invalidate */
- uint16 len; /* length of mapped packet's buffer */
- void *pkt; /* opaque native pointer to a packet */
- dmaaddr_t physaddr; /* physical address of mapped packet's buffer */
- void *secdma;
-} dhd_pktid_item_t;
+ if ((end & 0xFFFFFFFF) < (base & 0xFFFFFFFF)) { /* exclude carryover */
+ DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
+ __FUNCTION__, base, dma_buf->len));
+ return BCME_ERROR;
+ }
-typedef struct dhd_pktid_map {
- void *osh;
- int items; /* total items in map */
- int avail; /* total available items */
- int failures; /* lockers unavailable count */
- uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */
- dhd_pktid_item_t lockers[0]; /* metadata storage */
-} dhd_pktid_map_t;
+ return BCME_OK;
+}
-/*
- * PktId (Locker) #0 is never allocated and is considered invalid.
- *
- * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
- * depleted pktid pool and must not be used by the caller.
- *
- * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
+/**
+ * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
+ * returns BCME_OK=0 on success
+ * returns non-zero negative error value on failure.
*/
-#define DHD_PKTID_INVALID (0U)
+static int
+dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
+{
+ uint32 dma_pad = 0;
+ osl_t *osh = dhd->osh;
-#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
-#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
- (DHD_PKTID_ITEM_SZ * ((items) + 1)))
+ ASSERT(dma_buf != NULL);
+ ASSERT(dma_buf->va == NULL);
+ ASSERT(dma_buf->len == 0);
-#define NATIVE_TO_PKTID_INIT(osh, items) dhd_pktid_map_init((osh), (items))
-#define NATIVE_TO_PKTID_FINI(map) dhd_pktid_map_fini(map)
-#define NATIVE_TO_PKTID_CLEAR(map) dhd_pktid_map_clear(map)
+ /* Pad the buffer length by one extra cacheline size.
+ * Required for D2H direction.
+ */
+ dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
+ dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
+ DMA_ALIGN_LEN, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
-#define NATIVE_TO_PKTID_RSV(map, pkt) dhd_pktid_map_reserve((map), (pkt))
-#define NATIVE_TO_PKTID_SAVE(map, pkt, nkey, pa, len, dma, secdma) \
- dhd_pktid_map_save((map), (void *)(pkt), (nkey), (pa), (uint32)(len), (uint8)dma, \
- (void *)(secdma))
-#define NATIVE_TO_PKTID(map, pkt, pa, len, dma, secdma) \
- dhd_pktid_map_alloc((map), (void *)(pkt), (pa), (uint32)(len), (uint8)dma, (void *)(secdma))
+ if (dma_buf->va == NULL) {
+ DHD_ERROR(("%s: buf_len %d, no memory available\n",
+ __FUNCTION__, buf_len));
+ return BCME_NOMEM;
+ }
-#define PKTID_TO_NATIVE(map, pktid, pa, len, secdma) \
- dhd_pktid_map_free((map), (uint32)(pktid), \
- (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **) &secdma)
+ dma_buf->len = buf_len; /* not including padded len */
-#define PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
+ if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
+ dhd_dma_buf_free(dhd, dma_buf);
+ return BCME_ERROR;
+ }
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING)
-#define FLOWRING_NAME "h2dflr"
-#define RING_IS_FLOWRING(ring) \
- ((strncmp(ring->name, FLOWRING_NAME, sizeof(FLOWRING_NAME))) == (0))
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */
+ dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
-/*
- * +---------------------------------------------------------------------------+
- * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
- *
- * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS].
- *
- * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
- * packet id is returned. This unique packet id may be used to retrieve the
- * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
- * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
- * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
- *
- * Implementation Note:
- * Convert this into a <key,locker> abstraction and place into bcmutils !
- * Locker abstraction should treat contents as opaque storage, and a
- * callback should be registered to handle inuse lockers on destructor.
- *
- * +---------------------------------------------------------------------------+
- */
+ return BCME_OK;
+}
+
+/**
+ * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
+ */
+static void
+dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
+{
+ if ((dma_buf == NULL) || (dma_buf->va == NULL)) {
+ return;
+ }
+
+ (void)dhd_dma_buf_audit(dhd, dma_buf);
+
+ /* Zero out the entire buffer and cache flush */
+ memset((void*)dma_buf->va, 0, dma_buf->len);
+ OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
+}
+
+/**
+ * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
+ * dhd_dma_buf_alloc().
+ */
+static void
+dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
+{
+ osl_t *osh = dhd->osh;
+
+ ASSERT(dma_buf);
+
+ if (dma_buf->va == NULL) {
+ return; /* Allow for free invocation, when alloc failed */
+ }
+
+ /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
+ (void)dhd_dma_buf_audit(dhd, dma_buf);
+
+ /* dma buffer may have been padded at allocation */
+ DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
+ dma_buf->pa, dma_buf->dmah);
+
+ memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
+}
+
+/**
+ * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
+ * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
+ */
+void
+dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
+ void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
+{
+ dhd_dma_buf_t *dma_buf;
+ ASSERT(dhd_dma_buf);
+ dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
+ dma_buf->va = va;
+ dma_buf->len = len;
+ dma_buf->pa = pa;
+ dma_buf->dmah = dmah;
+ dma_buf->secdma = secdma;
+
+ /* Audit user defined configuration */
+ (void)dhd_dma_buf_audit(dhd, dma_buf);
+}
+
+/* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
+
+/*
+ * +---------------------------------------------------------------------------+
+ * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
+ * Main purpose is to save memory on the dongle, has other purposes as well.
+ * The packet id map, also includes storage for some packet parameters that
+ * may be saved. A native packet pointer along with the parameters may be saved
+ * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
+ * and the metadata may be retrieved using the previously allocated packet id.
+ * +---------------------------------------------------------------------------+
+ */
+#define DHD_PCIE_PKTID
+#define MAX_PKTID_ITEMS (3072) /* Maximum number of pktids supported */
+
+/* On Router, the pktptr serves as a pktid. */
+
+
+#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
+#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
+#endif
+
+/* Enum for marking the buffer color based on usage */
+typedef enum dhd_pkttype {
+ PKTTYPE_DATA_TX = 0,
+ PKTTYPE_DATA_RX,
+ PKTTYPE_IOCTL_RX,
+ PKTTYPE_EVENT_RX,
+ /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
+ PKTTYPE_NO_CHECK
+} dhd_pkttype_t;
+
+#define DHD_PKTID_INVALID (0U)
+#define DHD_IOCTL_REQ_PKTID (0xFFFE)
+#define DHD_FAKE_PKTID (0xFACE)
+
+#define DHD_PKTID_FREE_LOCKER (FALSE)
+#define DHD_PKTID_RSV_LOCKER (TRUE)
+
+typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
+
+/* Construct a packet id mapping table, returning an opaque map handle */
+static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index);
+
+/* Destroy a packet id mapping table, freeing all packets active in the table */
+static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
+
+#define PKTID_MAP_HANDLE (0)
+#define PKTID_MAP_HANDLE_IOCTL (1)
+
+#define DHD_NATIVE_TO_PKTID_INIT(dhd, items, index) dhd_pktid_map_init((dhd), (items), (index))
+#define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
+
+#if defined(DHD_PCIE_PKTID)
+
+
+/* Determine number of pktids that are available */
+static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
+
+/* Allocate a unique pktid against which a pkt and some metadata is saved */
+static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
+ void *pkt);
+static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
+ void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
+ void *dmah, void *secdma, dhd_pkttype_t pkttype);
+static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
+ void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
+ void *dmah, void *secdma, dhd_pkttype_t pkttype);
+
+/* Return an allocated pktid, retrieving previously saved pkt and metadata */
+static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
+ uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
+ void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
+
+/*
+ * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
+ *
+ * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
+ * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
+ *
+ * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
+ * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
+ */
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+#define USE_DHD_PKTID_AUDIT_LOCK 1
+/* Audit the pktidmap allocator */
+/* #define DHD_PKTID_AUDIT_MAP */
+
+/* Audit the pktid during production/consumption of workitems */
+#define DHD_PKTID_AUDIT_RING
+
+#if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
+#error "May only enabled audit of MAP or RING, at a time."
+#endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
+
+#define DHD_DUPLICATE_ALLOC 1
+#define DHD_DUPLICATE_FREE 2
+#define DHD_TEST_IS_ALLOC 3
+#define DHD_TEST_IS_FREE 4
+
+#ifdef USE_DHD_PKTID_AUDIT_LOCK
+#define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
+#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
+#define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock)
+#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
+#else
+#define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1)
+#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0)
+#define DHD_PKTID_AUDIT_LOCK(lock) 0
+#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0)
+#endif /* !USE_DHD_PKTID_AUDIT_LOCK */
+
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+/* #define USE_DHD_PKTID_LOCK 1 */
+
+#ifdef USE_DHD_PKTID_LOCK
+#define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
+#define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
+#define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock)
+#define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
+#else
+#define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
+#define DHD_PKTID_LOCK_DEINIT(osh, lock) \
+ do { \
+ BCM_REFERENCE(osh); \
+ BCM_REFERENCE(lock); \
+ } while (0)
+#define DHD_PKTID_LOCK(lock) 0
+#define DHD_PKTID_UNLOCK(lock, flags) \
+ do { \
+ BCM_REFERENCE(lock); \
+ BCM_REFERENCE(flags); \
+ } while (0)
+#endif /* !USE_DHD_PKTID_LOCK */
+
+/* Packet metadata saved in packet id mapper */
+
+/* The Locker can be 3 states
+ * LOCKER_IS_FREE - Locker is free and can be allocated
+ * LOCKER_IS_BUSY - Locker is assigned and is being used, values in the
+ * locker (buffer address, len, phy addr etc) are populated
+ * with valid values
+ * LOCKER_IS_RSVD - The locker is reserved for future use, but the values
+ * in the locker are not valid. Especially pkt should be
+ * NULL in this state. When the user wants to re-use the
+ * locker dhd_pktid_map_free can be called with a flag
+ * to reserve the pktid for future use, which will clear
+ * the contents of the locker. When the user calls
+ * dhd_pktid_map_save the locker would move to LOCKER_IS_BUSY
+ */
+typedef enum dhd_locker_state {
+ LOCKER_IS_FREE,
+ LOCKER_IS_BUSY,
+ LOCKER_IS_RSVD
+} dhd_locker_state_t;
+
+typedef struct dhd_pktid_item {
+ dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
+ uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
+ dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
+ uint16 len; /* length of mapped packet's buffer */
+ void *pkt; /* opaque native pointer to a packet */
+ dmaaddr_t pa; /* physical address of mapped packet's buffer */
+ void *dmah; /* handle to OS specific DMA map */
+ void *secdma;
+} dhd_pktid_item_t;
+
+typedef struct dhd_pktid_map {
+ uint32 items; /* total items in map */
+ uint32 avail; /* total available items */
+ int failures; /* lockers unavailable count */
+ /* Spinlock to protect dhd_pktid_map in process/tasklet context */
+ void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ void *pktid_audit_lock;
+ struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+ uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */
+ dhd_pktid_item_t lockers[0]; /* metadata storage */
+} dhd_pktid_map_t;
+
+/*
+ * PktId (Locker) #0 is never allocated and is considered invalid.
+ *
+ * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
+ * depleted pktid pool and must not be used by the caller.
+ *
+ * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
+ */
+
+#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
+#define DHD_PKIDMAP_ITEMS(items) (items)
+#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
+ (DHD_PKTID_ITEM_SZ * ((items) + 1)))
+
+#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, map) dhd_pktid_map_fini_ioctl((dhd), (map))
+
+/* Convert a packet to a pktid, and save pkt pointer in busy locker */
+#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) dhd_pktid_map_reserve((dhd), (map), (pkt))
+
+/* Reuse a previously reserved locker to save packet params */
+#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
+ dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
+ (uint8)(dir), (void *)(dmah), (void *)(secdma), \
+ (dhd_pkttype_t)(pkttype))
+
+/* Convert a packet to a pktid, and save packet params in locker */
+#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
+ dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
+ (uint8)(dir), (void *)(dmah), (void *)(secdma), \
+ (dhd_pkttype_t)(pkttype))
+
+/* Convert pktid to a packet, and free the locker */
+#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
+ dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
+ (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
+ (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
+
+/* Convert the pktid to a packet, empty locker, but keep it reserved */
+#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
+ dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
+ (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
+ (void **) &secdma, (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
+
+#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+
+static int dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
+ const int test_for, const char *errmsg);
+
+/**
+* dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
+*/
+static int
+dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
+ const int test_for, const char *errmsg)
+{
+#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
+
+ const uint32 max_pktid_items = (MAX_PKTID_ITEMS);
+ struct bcm_mwbmap *handle;
+ uint32 flags;
+ bool ignore_audit;
+
+ if (pktid_map == (dhd_pktid_map_t *)NULL) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
+ return BCME_OK;
+ }
+
+ flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
+
+ handle = pktid_map->pktid_audit;
+ if (handle == (struct bcm_mwbmap *)NULL) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
+ DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+ return BCME_OK;
+ }
+
+ /* Exclude special pktids from audit */
+ ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
+ if (ignore_audit) {
+ DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+ return BCME_OK;
+ }
+
+ if ((pktid == DHD_PKTID_INVALID) || (pktid > max_pktid_items)) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
+ /* lock is released in "error" */
+ goto error;
+ }
+
+ /* Perform audit */
+ switch (test_for) {
+ case DHD_DUPLICATE_ALLOC:
+ if (!bcm_mwbmap_isfree(handle, pktid)) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
+ errmsg, pktid));
+ goto error;
+ }
+ bcm_mwbmap_force(handle, pktid);
+ break;
+
+ case DHD_DUPLICATE_FREE:
+ if (bcm_mwbmap_isfree(handle, pktid)) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
+ errmsg, pktid));
+ goto error;
+ }
+ bcm_mwbmap_free(handle, pktid);
+ break;
+
+ case DHD_TEST_IS_ALLOC:
+ if (bcm_mwbmap_isfree(handle, pktid)) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
+ errmsg, pktid));
+ goto error;
+ }
+ break;
+
+ case DHD_TEST_IS_FREE:
+ if (!bcm_mwbmap_isfree(handle, pktid)) {
+ DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
+ errmsg, pktid));
+ goto error;
+ }
+ break;
+
+ default:
+ goto error;
+ }
+
+ DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+ return BCME_OK;
+
+error:
+
+ DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+ /* May insert any trap mechanism here ! */
+ dhd_pktid_audit_fail_cb(dhd);
+
+ return BCME_ERROR;
+}
+
+#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
+ dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
+
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+/* +------------------ End of PCIE DHD PKTID AUDIT ------------------------+ */
+
+
+/**
+ * +---------------------------------------------------------------------------+
+ * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
+ *
+ * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS].
+ *
+ * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
+ * packet id is returned. This unique packet id may be used to retrieve the
+ * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
+ * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
+ * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
+ *
+ * Implementation Note:
+ * Convert this into a <key,locker> abstraction and place into bcmutils !
+ * Locker abstraction should treat contents as opaque storage, and a
+ * callback should be registered to handle busy lockers on destructor.
+ *
+ * +---------------------------------------------------------------------------+
+ */
+
+/** Allocate and initialize a mapper of num_items <numbered_key, locker> */
-/* Allocate and initialize a mapper of num_items <numbered_key, locker> */
static dhd_pktid_map_handle_t *
-dhd_pktid_map_init(void *osh, uint32 num_items)
+dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
{
+ void *osh;
uint32 nkey;
dhd_pktid_map_t *map;
uint32 dhd_pktid_map_sz;
+ uint32 map_items;
+#ifdef DHD_USE_STATIC_PKTIDMAP
+ uint32 section;
+#endif /* DHD_USE_STATIC_PKTIDMAP */
+ osh = dhd->osh;
- ASSERT((num_items >= 1) && num_items <= MAX_PKTID_ITEMS);
+ ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS));
dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
- if ((map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz)) == NULL) {
+#ifdef DHD_USE_STATIC_PKTIDMAP
+ if (index == PKTID_MAP_HANDLE) {
+ section = DHD_PREALLOC_PKTID_MAP;
+ } else {
+ section = DHD_PREALLOC_PKTID_MAP_IOCTL;
+ }
+
+ map = (dhd_pktid_map_t *)DHD_OS_PREALLOC(dhd, section, dhd_pktid_map_sz);
+#else
+ map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz);
+#endif /* DHD_USE_STATIC_PKTIDMAP */
+
+ if (map == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
- __FUNCTION__, __LINE__, dhd_pktid_map_sz));
- return NULL;
+ __FUNCTION__, __LINE__, dhd_pktid_map_sz));
+ goto error;
}
+
bzero(map, dhd_pktid_map_sz);
- map->osh = osh;
+ /* Initialize the lock that protects this structure */
+ map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
+ if (map->pktid_lock == NULL) {
+ DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
+ goto error;
+ }
+
map->items = num_items;
map->avail = num_items;
- map->lockers[DHD_PKTID_INVALID].inuse = TRUE; /* tag locker #0 as inuse */
+ map_items = DHD_PKIDMAP_ITEMS(map->items);
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
+ map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
+ if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
+ DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
+ goto error;
+ } else {
+ DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
+ __FUNCTION__, __LINE__, map_items + 1));
+ }
+
+ map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
+
+#endif /* DHD_PKTID_AUDIT_ENABLED */
- for (nkey = 1; nkey <= num_items; nkey++) { /* locker #0 is reserved */
+ for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
map->keys[nkey] = nkey; /* populate with unique keys */
- map->lockers[nkey].inuse = FALSE;
+ map->lockers[nkey].state = LOCKER_IS_FREE;
+ map->lockers[nkey].pkt = NULL; /* bzero: redundant */
+ map->lockers[nkey].len = 0;
}
+ /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be busy */
+ map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY;
+ map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
+ map->lockers[DHD_PKTID_INVALID].len = 0;
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
+ bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
return (dhd_pktid_map_handle_t *)map; /* opaque handle */
+
+error:
+
+ if (map) {
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
+ bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
+ map->pktid_audit = (struct bcm_mwbmap *)NULL;
+ if (map->pktid_audit_lock)
+ DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
+ }
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+ if (map->pktid_lock)
+ DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
+
+ MFREE(osh, map, dhd_pktid_map_sz);
+ }
+
+ return (dhd_pktid_map_handle_t *)NULL;
}
-/*
+/**
* Retrieve all allocated keys and free all <numbered_key, locker>.
* Freeing implies: unmapping the buffers and freeing the native packet
* This could have been a callback registered with the pktid mapper.
*/
+
static void
-dhd_pktid_map_fini(dhd_pktid_map_handle_t *handle)
+dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
{
void *osh;
- int nkey;
+ uint32 nkey;
dhd_pktid_map_t *map;
uint32 dhd_pktid_map_sz;
dhd_pktid_item_t *locker;
+ uint32 map_items;
+ uint32 flags;
- if (handle == NULL)
+ if (handle == NULL) {
return;
+ }
map = (dhd_pktid_map_t *)handle;
- osh = map->osh;
+ flags = DHD_PKTID_LOCK(map->pktid_lock);
+ osh = dhd->osh;
+
dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
nkey = 1; /* skip reserved KEY #0, and start from 1 */
locker = &map->lockers[nkey];
- for (; nkey <= map->items; nkey++, locker++) {
- if (locker->inuse == TRUE) { /* numbered key still in use */
- locker->inuse = FALSE; /* force open the locker */
+ map_items = DHD_PKIDMAP_ITEMS(map->items);
+
+ for (; nkey <= map_items; nkey++, locker++) {
+
+ if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
+
+ locker->state = LOCKER_IS_FREE; /* force open the locker */
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
+#endif /* DHD_PKTID_AUDIT_ENABLED */
{ /* This could be a callback registered with dhd_pktid_map */
- DMA_UNMAP(osh, locker->physaddr, locker->len,
- locker->dma, 0, 0);
- PKTFREE(osh, (ulong*)locker->pkt, FALSE);
+ DMA_UNMAP(osh, locker->pa, locker->len,
+ locker->dir, 0, DHD_DMAH_NULL);
+ dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
+ locker->pkttype, TRUE);
}
}
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ else {
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
+ }
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+ locker->pkt = NULL; /* clear saved pkt */
+ locker->len = 0;
+ }
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
+ bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
+ map->pktid_audit = (struct bcm_mwbmap *)NULL;
+ if (map->pktid_audit_lock) {
+ DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
+ }
}
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
+#ifdef DHD_USE_STATIC_PKTIDMAP
+ DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
+#else
MFREE(osh, handle, dhd_pktid_map_sz);
+#endif /* DHD_USE_STATIC_PKTIDMAP */
}
+#ifdef IOCTLRESP_USE_CONSTMEM
+/** Called in detach scenario. Releasing IOCTL buffers. */
static void
-dhd_pktid_map_clear(dhd_pktid_map_handle_t *handle)
+dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
{
- void *osh;
- int nkey;
+ uint32 nkey;
dhd_pktid_map_t *map;
+ uint32 dhd_pktid_map_sz;
dhd_pktid_item_t *locker;
+ uint32 map_items;
+ uint32 flags;
+ osl_t *osh = dhd->osh;
- DHD_TRACE(("%s\n", __FUNCTION__));
-
- if (handle == NULL)
+ if (handle == NULL) {
return;
+ }
map = (dhd_pktid_map_t *)handle;
- osh = map->osh;
- map->failures = 0;
+ flags = DHD_PKTID_LOCK(map->pktid_lock);
+
+ dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
nkey = 1; /* skip reserved KEY #0, and start from 1 */
locker = &map->lockers[nkey];
- for (; nkey <= map->items; nkey++, locker++) {
- map->keys[nkey] = nkey; /* populate with unique keys */
- if (locker->inuse == TRUE) { /* numbered key still in use */
- locker->inuse = FALSE; /* force open the locker */
- DHD_TRACE(("%s free id%d\n", __FUNCTION__, nkey));
- DMA_UNMAP(osh, (uint32)locker->physaddr, locker->len,
- locker->dma, 0, 0);
- PKTFREE(osh, (ulong*)locker->pkt, FALSE);
+ map_items = DHD_PKIDMAP_ITEMS(map->items);
+
+ for (; nkey <= map_items; nkey++, locker++) {
+
+ if (locker->state == LOCKER_IS_BUSY) { /* numbered key still in use */
+
+ locker->state = LOCKER_IS_FREE; /* force open the locker */
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+ {
+ dhd_dma_buf_t retbuf;
+ retbuf.va = locker->pkt;
+ retbuf.len = locker->len;
+ retbuf.pa = locker->pa;
+ retbuf.dmah = locker->dmah;
+ retbuf.secdma = locker->secdma;
+
+ /* This could be a callback registered with dhd_pktid_map */
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ free_ioctl_return_buffer(dhd, &retbuf);
+ flags = DHD_PKTID_LOCK(map->pktid_lock);
+ }
+ }
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ else {
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
}
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+ locker->pkt = NULL; /* clear saved pkt */
+ locker->len = 0;
}
- map->avail = map->items;
+
+#if defined(DHD_PKTID_AUDIT_ENABLED)
+ if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
+ bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
+ map->pktid_audit = (struct bcm_mwbmap *)NULL;
+ if (map->pktid_audit_lock) {
+ DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
+ }
+ }
+#endif /* DHD_PKTID_AUDIT_ENABLED */
+
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
+
+#ifdef DHD_USE_STATIC_PKTIDMAP
+ DHD_OS_PREFREE(dhd, handle, dhd_pktid_map_sz);
+#else
+ MFREE(osh, handle, dhd_pktid_map_sz);
+#endif /* DHD_USE_STATIC_PKTIDMAP */
}
+#endif /* IOCTLRESP_USE_CONSTMEM */
-/* Get the pktid free count */
+/** Get the pktid free count */
static INLINE uint32 BCMFASTPATH
dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
{
dhd_pktid_map_t *map;
+ uint32 flags;
+ uint32 avail;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
- return map->avail;
+ flags = DHD_PKTID_LOCK(map->pktid_lock);
+ avail = map->avail;
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+
+ return avail;
}
-/*
+/**
* Allocate locker, save pkt contents, and return the locker's numbered key.
* dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility.
* Caller must treat a returned value DHD_PKTID_INVALID as a failure case,
* implying a depleted pool of pktids.
*/
+
static INLINE uint32
-dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle, void *pkt)
+__dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
{
uint32 nkey;
dhd_pktid_map_t *map;
DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
return DHD_PKTID_INVALID; /* failed alloc request */
}
- ASSERT(map->avail <= map->items);
+ ASSERT(map->avail <= map->items);
nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
+ locker = &map->lockers[nkey]; /* save packet metadata in locker */
map->avail--;
+ locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
+ locker->len = 0;
+ locker->state = LOCKER_IS_BUSY; /* reserve this locker */
- locker = &map->lockers[nkey]; /* save packet metadata in locker */
- locker->inuse = TRUE; /* reserve this locker */
- locker->pkt = pkt;
+#if defined(DHD_PKTID_AUDIT_MAP)
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_ALLOC); /* Audit duplicate alloc */
+#endif /* DHD_PKTID_AUDIT_MAP */
ASSERT(nkey != DHD_PKTID_INVALID);
return nkey; /* return locker's numbered key */
}
+
+/**
+ * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
+ * yet populated. Invoke the pktid save api to populate the packet parameters
+ * into the locker.
+ * Wrapper that takes the required lock when called directly.
+ */
+static INLINE uint32
+dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt)
+{
+ dhd_pktid_map_t *map;
+ uint32 flags;
+ uint32 ret;
+
+ ASSERT(handle != NULL);
+ map = (dhd_pktid_map_t *)handle;
+ flags = DHD_PKTID_LOCK(map->pktid_lock);
+ ret = __dhd_pktid_map_reserve(dhd, handle, pkt);
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+
+ return ret;
+}
+
static INLINE void
-dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt, uint32 nkey,
- dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma)
+__dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
+ uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
+ dhd_pkttype_t pkttype)
{
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
- ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= (uint32)map->items));
+ ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
locker = &map->lockers[nkey];
- ASSERT(locker->pkt == pkt);
- locker->dma = dma; /* store contents in locker */
- locker->physaddr = physaddr;
+ ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
+ ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
+
+#if defined(DHD_PKTID_AUDIT_MAP)
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
+#endif /* DHD_PKTID_AUDIT_MAP */
+
+ /* store contents in locker */
+ locker->dir = dir;
+ locker->pa = pa;
locker->len = (uint16)len; /* 16bit len */
+ locker->dmah = dmah; /* 16bit len */
locker->secdma = secdma;
+ locker->pkttype = pkttype;
+ locker->pkt = pkt;
+ locker->state = LOCKER_IS_BUSY; /* make this locker busy */
}
+/**
+ * dhd_pktid_map_save - Save a packet's parameters into a locker corresponding
+ * to a previously reserved unique numbered key.
+ * Wrapper that takes the required lock when called directly.
+ */
+static INLINE void
+dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
+ uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
+ dhd_pkttype_t pkttype)
+{
+ dhd_pktid_map_t *map;
+ uint32 flags;
+
+ ASSERT(handle != NULL);
+ map = (dhd_pktid_map_t *)handle;
+ flags = DHD_PKTID_LOCK(map->pktid_lock);
+ __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, len,
+ dir, dmah, secdma, pkttype);
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+}
+
+/**
+ * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
+ * contents into the corresponding locker. Return the numbered key.
+ */
static uint32 BCMFASTPATH
-dhd_pktid_map_alloc(dhd_pktid_map_handle_t *handle, void *pkt,
- dmaaddr_t physaddr, uint32 len, uint8 dma, void *secdma)
+dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
+ dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
+ dhd_pkttype_t pkttype)
{
- uint32 nkey = dhd_pktid_map_reserve(handle, pkt);
+ uint32 nkey;
+ uint32 flags;
+ dhd_pktid_map_t *map;
+
+ ASSERT(handle != NULL);
+ map = (dhd_pktid_map_t *)handle;
+
+ flags = DHD_PKTID_LOCK(map->pktid_lock);
+
+ nkey = __dhd_pktid_map_reserve(dhd, handle, pkt);
if (nkey != DHD_PKTID_INVALID) {
- dhd_pktid_map_save(handle, pkt, nkey, physaddr, len, dma, secdma);
+ __dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
+ len, dir, dmah, secdma, pkttype);
+#if defined(DHD_PKTID_AUDIT_MAP)
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_ALLOC); /* apriori, reservation */
+#endif /* DHD_PKTID_AUDIT_MAP */
}
+
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+
return nkey;
}
-/*
- * Given a numbered key, return the locker contents.
+/**
+ * dhd_pktid_map_free - Given a numbered key, return the locker contents.
* dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
* Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
* value. Only a previously allocated pktid may be freed.
*/
static void * BCMFASTPATH
-dhd_pktid_map_free(dhd_pktid_map_handle_t *handle, uint32 nkey,
- dmaaddr_t *physaddr, uint32 *len, void **secdma)
+dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
+ dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma,
+ dhd_pkttype_t pkttype, bool rsv_locker)
{
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
+ void * pkt;
+ uint32 flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
- ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= (uint32)map->items));
- locker = &map->lockers[nkey];
+ flags = DHD_PKTID_LOCK(map->pktid_lock);
+
+ ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items)));
+
+ locker = &map->lockers[nkey];
+
+#if defined(DHD_PKTID_AUDIT_MAP)
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
+#endif /* DHD_PKTID_AUDIT_MAP */
+
+ if (locker->state == LOCKER_IS_FREE) { /* Debug check for cloned numbered key */
+ DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n",
+ __FUNCTION__, __LINE__, nkey));
+ ASSERT(locker->state != LOCKER_IS_FREE);
+
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ return NULL;
+ }
+
+ /* Check for the colour of the buffer i.e The buffer posted for TX,
+ * should be freed for TX completion. Similarly the buffer posted for
+ * IOCTL should be freed for IOCT completion etc.
+ */
+ if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
+
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+
+ DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
+ __FUNCTION__, __LINE__, nkey));
+ ASSERT(locker->pkttype == pkttype);
+
+ return NULL;
+ }
+
+ if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
+ map->avail++;
+ map->keys[map->avail] = nkey; /* make this numbered key available */
+ locker->state = LOCKER_IS_FREE; /* open and free Locker */
+ } else {
+ /* pktid will be reused, but the locker does not have a valid pkt */
+ locker->state = LOCKER_IS_RSVD;
+ }
+
+#if defined(DHD_PKTID_AUDIT_MAP)
+ DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
+#endif /* DHD_PKTID_AUDIT_MAP */
+
+ *pa = locker->pa; /* return contents of locker */
+ *len = (uint32)locker->len;
+ *dmah = locker->dmah;
+ *secdma = locker->secdma;
+
+ pkt = locker->pkt;
+ locker->pkt = NULL; /* Clear pkt */
+ locker->len = 0;
+
+ DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ return pkt;
+}
+
+#else /* ! DHD_PCIE_PKTID */
+
+
+typedef struct pktlist {
+ PKT_LIST *tx_pkt_list; /* list for tx packets */
+ PKT_LIST *rx_pkt_list; /* list for rx packets */
+ PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */
+} pktlists_t;
+
+/*
+ * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
+ * of a one to one mapping 32bit pktptr and a 32bit pktid.
+ *
+ * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
+ * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
+ * a lock.
+ * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
+ */
+#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
+#define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
+
+
+static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
+ dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
+ dhd_pkttype_t pkttype);
+static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
+ dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
+ dhd_pkttype_t pkttype);
+
+static dhd_pktid_map_handle_t *
+dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items, uint32 index)
+{
+ osl_t *osh = dhd->osh;
+ pktlists_t *handle = NULL;
+
+ if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
+ __FUNCTION__, __LINE__, sizeof(pktlists_t)));
+ goto error_done;
+ }
+
+ if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
+ __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
+ goto error;
+ }
+
+ if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
+ __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
+ goto error;
+ }
+
+ if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
+ __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
+ goto error;
+ }
+
+ PKTLIST_INIT(handle->tx_pkt_list);
+ PKTLIST_INIT(handle->rx_pkt_list);
+ PKTLIST_INIT(handle->ctrl_pkt_list);
+
+ return (dhd_pktid_map_handle_t *) handle;
+
+error:
+ if (handle->ctrl_pkt_list) {
+ MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
+ }
+
+ if (handle->rx_pkt_list) {
+ MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
+ }
+
+ if (handle->tx_pkt_list) {
+ MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
+ }
+
+ if (handle) {
+ MFREE(osh, handle, sizeof(pktlists_t));
+ }
+
+error_done:
+ return (dhd_pktid_map_handle_t *)NULL;
+}
+
+static void
+dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
+{
+ osl_t *osh = dhd->osh;
+ pktlists_t *handle = (pktlists_t *) map;
+
+ ASSERT(handle != NULL);
+ if (handle == (pktlists_t *)NULL) {
+ return;
+ }
+
+ if (handle->ctrl_pkt_list) {
+ PKTLIST_FINI(handle->ctrl_pkt_list);
+ MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
+ }
+
+ if (handle->rx_pkt_list) {
+ PKTLIST_FINI(handle->rx_pkt_list);
+ MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
+ }
+
+ if (handle->tx_pkt_list) {
+ PKTLIST_FINI(handle->tx_pkt_list);
+ MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
+ }
+
+ if (handle) {
+ MFREE(osh, handle, sizeof(pktlists_t));
+ }
+}
+
+/** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
+static INLINE uint32
+dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
+ dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
+ dhd_pkttype_t pkttype)
+{
+ pktlists_t *handle = (pktlists_t *) map;
+ ASSERT(pktptr32 != NULL);
+ DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
+ DHD_PKT_SET_DMAH(pktptr32, dmah);
+ DHD_PKT_SET_PA(pktptr32, pa);
+ DHD_PKT_SET_SECDMA(pktptr32, secdma);
+
+ if (pkttype == PKTTYPE_DATA_TX) {
+ PKTLIST_ENQ(handle->tx_pkt_list, pktptr32);
+ } else if (pkttype == PKTTYPE_DATA_RX) {
+ PKTLIST_ENQ(handle->rx_pkt_list, pktptr32);
+ } else {
+ PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32);
+ }
+
+ return DHD_PKTID32(pktptr32);
+}
+
+/** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
+static INLINE void *
+dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
+ dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
+ dhd_pkttype_t pkttype)
+{
+ pktlists_t *handle = (pktlists_t *) map;
+ void *pktptr32;
+
+ ASSERT(pktid32 != 0U);
+ pktptr32 = DHD_PKTPTR32(pktid32);
+ *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
+ *dmah = DHD_PKT_GET_DMAH(pktptr32);
+ *pa = DHD_PKT_GET_PA(pktptr32);
+ *secdma = DHD_PKT_GET_SECDMA(pktptr32);
+
+ if (pkttype == PKTTYPE_DATA_TX) {
+ PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32);
+ } else if (pkttype == PKTTYPE_DATA_RX) {
+ PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32);
+ } else {
+ PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32);
+ }
+
+ return pktptr32;
+}
+
+#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt) DHD_PKTID32(pkt)
+
+#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
+ ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
+ dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
+ (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
+ })
+
+#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
+ ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
+ dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
+ (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
+ })
+
+#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
+ ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \
+ dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
+ (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
+ (void **)&secdma, (dhd_pkttype_t)(pkttype)); \
+ })
- if (locker->inuse == FALSE) { /* Debug check for cloned numbered key */
- DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n",
- __FUNCTION__, __LINE__, nkey));
- ASSERT(locker->inuse != FALSE);
- return NULL;
- }
+#define DHD_PKTID_AVAIL(map) (~0)
- map->avail++;
- map->keys[map->avail] = nkey; /* make this numbered key available */
- locker->inuse = FALSE; /* open and free Locker */
+#endif /* ! DHD_PCIE_PKTID */
- *physaddr = locker->physaddr; /* return contents of locker */
- *len = (uint32)locker->len;
- *secdma = locker->secdma;
+/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
- return locker->pkt;
-}
-/* Linkage, sets prot link and updates hdrlen in pub */
-int dhd_prot_attach(dhd_pub_t *dhd)
+/**
+ * The PCIE FD protocol layer is constructed in two phases:
+ * Phase 1. dhd_prot_attach()
+ * Phase 2. dhd_prot_init()
+ *
+ * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
+ * All Common rings are allose attached (msgbuf_ring_t objects are allocated
+ * with DMA-able buffers).
+ * All dhd_dma_buf_t objects are also allocated here.
+ *
+ * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
+ * initialization of objects that requires information advertized by the dongle
+ * may not be performed here.
+ * E.g. the number of TxPost flowrings is not know at this point, neither do
+ * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
+ * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
+ * rings (common + flow).
+ *
+ * dhd_prot_init() is invoked after the bus layer has fetched the information
+ * advertized by the dongle in the pcie_shared_t.
+ */
+int
+dhd_prot_attach(dhd_pub_t *dhd)
{
- uint alloced = 0;
-
+ osl_t *osh = dhd->osh;
dhd_prot_t *prot;
/* Allocate prot structure */
}
memset(prot, 0, sizeof(*prot));
- prot->osh = dhd->osh;
+ prot->osh = osh;
dhd->prot = prot;
/* DMAing ring completes supported? FALSE by default */
dhd->dma_d2h_ring_upd_support = FALSE;
dhd->dma_h2d_ring_upd_support = FALSE;
- /* Ring Allocations */
- /* 1.0 H2D TXPOST ring */
- if (!(prot->h2dring_txp_subn = prot_ring_attach(prot, "h2dtxp",
- H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
- BCMPCIE_H2D_TXFLOWRINGID))) {
- DHD_ERROR(("%s: kmalloc for H2D TXPOST ring failed\n", __FUNCTION__));
+ /* Common Ring Allocations */
+
+ /* Ring 0: H2D Control Submission */
+ if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
+ H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
+ BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
+ __FUNCTION__));
goto fail;
}
- /* 2.0 H2D RXPOST ring */
- if (!(prot->h2dring_rxp_subn = prot_ring_attach(prot, "h2drxp",
- H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
- BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT))) {
- DHD_ERROR(("%s: kmalloc for H2D RXPOST ring failed\n", __FUNCTION__));
+ /* Ring 1: H2D Receive Buffer Post */
+ if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
+ H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
+ BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
+ __FUNCTION__));
goto fail;
+ }
+ /* Ring 2: D2H Control Completion */
+ if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
+ D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
+ BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
+ __FUNCTION__));
+ goto fail;
}
- /* 3.0 H2D CTRL_SUBMISSION ring */
- if (!(prot->h2dring_ctrl_subn = prot_ring_attach(prot, "h2dctrl",
- H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
- BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT))) {
- DHD_ERROR(("%s: kmalloc for H2D CTRL_SUBMISSION ring failed\n",
+ /* Ring 3: D2H Transmit Complete */
+ if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
+ D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
+ BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
__FUNCTION__));
goto fail;
}
- /* 4.0 D2H TX_COMPLETION ring */
- if (!(prot->d2hring_tx_cpln = prot_ring_attach(prot, "d2htxcpl",
- D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
- BCMPCIE_D2H_MSGRING_TX_COMPLETE))) {
- DHD_ERROR(("%s: kmalloc for D2H TX_COMPLETION ring failed\n",
+ /* Ring 4: D2H Receive Complete */
+ if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
+ D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
+ BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
__FUNCTION__));
goto fail;
}
- /* 5.0 D2H RX_COMPLETION ring */
- if (!(prot->d2hring_rx_cpln = prot_ring_attach(prot, "d2hrxcpl",
- D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
- BCMPCIE_D2H_MSGRING_RX_COMPLETE))) {
- DHD_ERROR(("%s: kmalloc for D2H RX_COMPLETION ring failed\n",
- __FUNCTION__));
+ /*
+ * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
+ * buffers for flowrings will be instantiated, in dhd_prot_init() .
+ * See dhd_prot_flowrings_pool_attach()
+ */
+ /* ioctl response buffer */
+ if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
+ goto fail;
+ }
+
+ /* IOCTL request buffer */
+ if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
goto fail;
+ }
+ /* Scratch buffer for dma rx offset */
+ if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
+ goto fail;
}
- /* 6.0 D2H CTRL_COMPLETION ring */
- if (!(prot->d2hring_ctrl_cpln = prot_ring_attach(prot, "d2hctrl",
- D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
- BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE))) {
- DHD_ERROR(("%s: kmalloc for D2H CTRL_COMPLETION ring failed\n",
- __FUNCTION__));
+ /* scratch buffer bus throughput measurement */
+ if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
goto fail;
}
- /* Return buffer for ioctl */
- prot->retbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN,
- &alloced, &prot->retbuf.pa, &prot->retbuf.dmah);
- if (prot->retbuf.va == NULL) {
- ASSERT(0);
- return BCME_NOMEM;
+#ifdef DHD_RX_CHAINING
+ dhd_rxchain_reset(&prot->rxchain);
+#endif
+
+#if defined(DHD_LB)
+
+ /* Initialize the work queues to be used by the Load Balancing logic */
+#if defined(DHD_LB_TXC)
+ {
+ void *buffer;
+ buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
+ bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
+ buffer, DHD_LB_WORKQ_SZ);
+ prot->tx_compl_prod_sync = 0;
+ DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
+ __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
}
+#endif /* DHD_LB_TXC */
- ASSERT(MODX((unsigned long)prot->retbuf.va, DMA_ALIGN_LEN) == 0);
- bzero(prot->retbuf.va, IOCT_RETBUF_SIZE);
- OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
+#if defined(DHD_LB_RXC)
+ {
+ void *buffer;
+ buffer = MALLOC(dhd->osh, sizeof(uint32) * DHD_LB_WORKQ_SZ);
+ bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
+ buffer, DHD_LB_WORKQ_SZ);
+ prot->rx_compl_prod_sync = 0;
+ DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
+ __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
+ }
+#endif /* DHD_LB_RXC */
- /* IOCTL request buffer */
- prot->ioctbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN,
- &alloced, &prot->ioctbuf.pa, &prot->ioctbuf.dmah);
+#endif /* DHD_LB */
- if (prot->ioctbuf.va == NULL) {
- ASSERT(0);
- return BCME_NOMEM;
+ return BCME_OK;
+
+fail:
+
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ if (prot != NULL) {
+ dhd_prot_detach(dhd);
}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+ return BCME_NOMEM;
+} /* dhd_prot_attach */
- ASSERT(MODX((unsigned long)prot->ioctbuf.va, DMA_ALIGN_LEN) == 0);
- bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE);
- OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
- /* Scratch buffer for dma rx offset */
- prot->d2h_dma_scratch_buf_len = DMA_D2H_SCRATCH_BUF_LEN;
- prot->d2h_dma_scratch_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, DMA_D2H_SCRATCH_BUF_LEN,
- DMA_ALIGN_LEN, &alloced, &prot->d2h_dma_scratch_buf.pa,
- &prot->d2h_dma_scratch_buf.dmah);
+/**
+ * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
+ * completed it's initialization of the pcie_shared structure, we may now fetch
+ * the dongle advertized features and adjust the protocol layer accordingly.
+ *
+ * dhd_prot_init() may be invoked again after a dhd_prot_reset().
+ */
+int
+dhd_prot_init(dhd_pub_t *dhd)
+{
+ sh_addr_t base_addr;
+ dhd_prot_t *prot = dhd->prot;
- if (prot->d2h_dma_scratch_buf.va == NULL) {
+ /* PKTID handle INIT */
+ if (prot->pktid_map_handle != NULL) {
+ DHD_ERROR(("%s: pktid_map_handle already set!\n", __FUNCTION__));
ASSERT(0);
- return BCME_NOMEM;
+ return BCME_ERROR;
}
- ASSERT(MODX((unsigned long)prot->d2h_dma_scratch_buf.va, DMA_ALIGN_LEN) == 0);
- bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
- OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+#ifdef IOCTLRESP_USE_CONSTMEM
+ if (prot->pktid_map_handle_ioctl != NULL) {
+ DHD_ERROR(("%s: pktid_map_handle_ioctl already set!\n", __FUNCTION__));
+ ASSERT(0);
+ return BCME_ERROR;
+ }
+#endif /* IOCTLRESP_USE_CONSTMEM */
- /* PKTID handle INIT */
- prot->pktid_map_handle = NATIVE_TO_PKTID_INIT(dhd->osh, MAX_PKTID_ITEMS);
+ prot->pktid_map_handle = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_PKTID_ITEMS, PKTID_MAP_HANDLE);
if (prot->pktid_map_handle == NULL) {
+ DHD_ERROR(("%s: Unable to map packet id's\n", __FUNCTION__));
ASSERT(0);
return BCME_NOMEM;
}
-#if defined(PCIE_D2H_SYNC)
- dhd_prot_d2h_sync_init(dhd, prot);
-#endif /* PCIE_D2H_SYNC */
+#ifdef IOCTLRESP_USE_CONSTMEM
+ prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
+ DHD_FLOWRING_MAX_IOCTLRESPBUF_POST, PKTID_MAP_HANDLE_IOCTL);
+ if (prot->pktid_map_handle_ioctl == NULL) {
+ DHD_ERROR(("%s: Unable to map ioctl response buffers\n", __FUNCTION__));
+ ASSERT(0);
+ return BCME_NOMEM;
+ }
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+ /* Max pkts in ring */
+ prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM;
+
+ DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count));
+
+ /* Read max rx packets supported by dongle */
+ dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
+ if (prot->max_rxbufpost == 0) {
+ /* This would happen if the dongle firmware is not */
+ /* using the latest shared structure template */
+ prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
+ }
+ DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
+
+ /* Initialize. bzero() would blow away the dma pointers. */
+ prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
+ prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
+
+ prot->cur_ioctlresp_bufs_posted = 0;
+ prot->active_tx_count = 0;
+ prot->data_seq_no = 0;
+ prot->ioctl_seq_no = 0;
+ prot->rxbufpost = 0;
+ prot->cur_event_bufs_posted = 0;
+ prot->ioctl_state = 0;
+ prot->curr_ioctl_cmd = 0;
+ prot->ioctl_received = IOCTL_WAIT;
prot->dmaxfer.srcmem.va = NULL;
- prot->dmaxfer.destmem.va = NULL;
- prot->dmaxfer_in_progress = FALSE;
+ prot->dmaxfer.dstmem.va = NULL;
+ prot->dmaxfer.in_progress = FALSE;
+ prot->metadata_dbg = FALSE;
prot->rx_metadata_offset = 0;
prot->tx_metadata_offset = 0;
+ prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
-#ifdef DHD_RX_CHAINING
- dhd_rxchain_reset(&prot->rxchain);
-#endif
+ prot->ioctl_trans_id = 0;
- return 0;
+ /* Register the interrupt function upfront */
+ /* remove corerev checks in data path */
+ prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
-fail:
-#ifndef CONFIG_DHD_USE_STATIC_BUF
- if (prot != NULL)
- dhd_prot_detach(dhd);
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
- return BCME_NOMEM;
-}
+ /* Initialize Common MsgBuf Rings */
-/* Init memory block on host DMA'ing indices */
-int
-dhd_prot_init_index_dma_block(dhd_pub_t *dhd, uint8 type, uint32 length)
-{
- uint alloced = 0;
+ dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
+ dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
+ dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
+ dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
+ dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
- dhd_prot_t *prot = dhd->prot;
- uint32 dma_block_size = 4 * length;
+#if defined(PCIE_D2H_SYNC)
+ dhd_prot_d2h_sync_init(dhd);
+#endif /* PCIE_D2H_SYNC */
- if (prot == NULL) {
- DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
+ dhd_prot_h2d_sync_init(dhd);
+
+ /* init the scratch buffer */
+ dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ D2H_DMA_SCRATCH_BUF, 0);
+ dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
+ sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
+
+ /* If supported by the host, indicate the memory block
+ * for completion writes / submission reads to shared space
+ */
+ if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+ dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ D2H_DMA_INDX_WR_BUF, 0);
+ dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ H2D_DMA_INDX_RD_BUF, 0);
+ }
+
+ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
+ dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ H2D_DMA_INDX_WR_BUF, 0);
+ dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
+ dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
+ D2H_DMA_INDX_RD_BUF, 0);
+ }
+
+ /*
+ * If the DMA-able buffers for flowring needs to come from a specific
+ * contiguous memory region, then setup prot->flowrings_dma_buf here.
+ * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
+ * this contiguous memory region, for each of the flowrings.
+ */
+
+ /* Pre-allocate pool of msgbuf_ring for flowrings */
+ if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
return BCME_ERROR;
}
- switch (type) {
- case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER:
- /* ring update dma buffer for submission write */
- prot->h2d_dma_writeindx_buf_len = dma_block_size;
- prot->h2d_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
- dma_block_size, DMA_ALIGN_LEN, &alloced,
- &prot->h2d_dma_writeindx_buf.pa,
- &prot->h2d_dma_writeindx_buf.dmah);
-
- if (prot->h2d_dma_writeindx_buf.va == NULL) {
- return BCME_NOMEM;
- }
+ /* Host should configure soft doorbells if needed ... here */
- ASSERT(ISALIGNED(prot->h2d_dma_writeindx_buf.va, 4));
- bzero(prot->h2d_dma_writeindx_buf.va, dma_block_size);
- OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va, dma_block_size);
- DHD_ERROR(("%s: H2D_WRITEINDX_ARRAY_HOST: %d-bytes "
- "inited for dma'ing h2d-w indices\n", __FUNCTION__,
- prot->h2d_dma_writeindx_buf_len));
- break;
+ /* Post to dongle host configured soft doorbells */
+ dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
- case HOST_TO_DNGL_DMA_READINDX_BUFFER:
- /* ring update dma buffer for submission read */
- prot->h2d_dma_readindx_buf_len = dma_block_size;
- prot->h2d_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
- dma_block_size, DMA_ALIGN_LEN, &alloced,
- &prot->h2d_dma_readindx_buf.pa,
- &prot->h2d_dma_readindx_buf.dmah);
- if (prot->h2d_dma_readindx_buf.va == NULL) {
- return BCME_NOMEM;
- }
+ /* Post buffers for packet reception and ioctl/event responses */
+ dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
+ dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+ dhd_msgbuf_rxbuf_post_event_bufs(dhd);
- ASSERT(ISALIGNED(prot->h2d_dma_readindx_buf.va, 4));
- bzero(prot->h2d_dma_readindx_buf.va, dma_block_size);
- OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va, dma_block_size);
- DHD_ERROR(("%s: H2D_READINDX_ARRAY_HOST %d-bytes "
- "inited for dma'ing h2d-r indices\n", __FUNCTION__,
- prot->h2d_dma_readindx_buf_len));
- break;
+ return BCME_OK;
+} /* dhd_prot_init */
- case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER:
- /* ring update dma buffer for completion write */
- prot->d2h_dma_writeindx_buf_len = dma_block_size;
- prot->d2h_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
- dma_block_size, DMA_ALIGN_LEN, &alloced,
- &prot->d2h_dma_writeindx_buf.pa,
- &prot->d2h_dma_writeindx_buf.dmah);
- if (prot->d2h_dma_writeindx_buf.va == NULL) {
- return BCME_NOMEM;
- }
+/**
+ * dhd_prot_detach - PCIE FD protocol layer destructor.
+ * Unlink, frees allocated protocol memory (including dhd_prot)
+ */
+void
+dhd_prot_detach(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
- ASSERT(ISALIGNED(prot->d2h_dma_writeindx_buf.va, 4));
- bzero(prot->d2h_dma_writeindx_buf.va, dma_block_size);
- OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va, dma_block_size);
- DHD_ERROR(("%s: D2H_WRITEINDX_ARRAY_HOST %d-bytes "
- "inited for dma'ing d2h-w indices\n", __FUNCTION__,
- prot->d2h_dma_writeindx_buf_len));
- break;
+ /* Stop the protocol module */
+ if (prot) {
- case DNGL_TO_HOST_DMA_READINDX_BUFFER:
- /* ring update dma buffer for completion read */
- prot->d2h_dma_readindx_buf_len = dma_block_size;
- prot->d2h_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
- dma_block_size, DMA_ALIGN_LEN, &alloced,
- &prot->d2h_dma_readindx_buf.pa,
- &prot->d2h_dma_readindx_buf.dmah);
+ /* free up all DMA-able buffers allocated during prot attach/init */
- if (prot->d2h_dma_readindx_buf.va == NULL) {
- return BCME_NOMEM;
- }
+ dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
+ dhd_dma_buf_free(dhd, &prot->retbuf); /* ioctl return buffer */
+ dhd_dma_buf_free(dhd, &prot->ioctbuf);
+ dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
- ASSERT(ISALIGNED(prot->d2h_dma_readindx_buf.va, 4));
- bzero(prot->d2h_dma_readindx_buf.va, dma_block_size);
- OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va, dma_block_size);
- DHD_ERROR(("%s: D2H_READINDX_ARRAY_HOST %d-bytes "
- "inited for dma'ing d2h-r indices\n", __FUNCTION__,
- prot->d2h_dma_readindx_buf_len));
- break;
+ /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
+ dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
+ dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
+ dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
+ dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
- default:
- DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
- return BCME_BADOPTION;
- }
+ /* Common MsgBuf Rings */
+ dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
+ dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
+ dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
+ dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
+ dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
- return BCME_OK;
+ /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
+ dhd_prot_flowrings_pool_detach(dhd);
-}
+ DHD_NATIVE_TO_PKTID_FINI(dhd, dhd->prot->pktid_map_handle);
-/* Unlink, frees allocated protocol memory (including dhd_prot) */
-void dhd_prot_detach(dhd_pub_t *dhd)
-{
- dhd_prot_t *prot = dhd->prot;
- /* Stop the protocol module */
- if (dhd->prot) {
-
- /* free up scratch buffer */
- if (prot->d2h_dma_scratch_buf.va) {
- DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_scratch_buf.va,
- DMA_D2H_SCRATCH_BUF_LEN, prot->d2h_dma_scratch_buf.pa,
- prot->d2h_dma_scratch_buf.dmah);
- prot->d2h_dma_scratch_buf.va = NULL;
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+#if defined(DHD_LB)
+#if defined(DHD_LB_TXC)
+ if (prot->tx_compl_prod.buffer) {
+ MFREE(dhd->osh, prot->tx_compl_prod.buffer,
+ sizeof(void*) * DHD_LB_WORKQ_SZ);
}
- /* free up ring upd buffer for submission writes */
- if (prot->h2d_dma_writeindx_buf.va) {
- DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_writeindx_buf.va,
- prot->h2d_dma_writeindx_buf_len, prot->h2d_dma_writeindx_buf.pa,
- prot->h2d_dma_writeindx_buf.dmah);
- prot->h2d_dma_writeindx_buf.va = NULL;
+#endif /* DHD_LB_TXC */
+#if defined(DHD_LB_RXC)
+ if (prot->rx_compl_prod.buffer) {
+ MFREE(dhd->osh, prot->rx_compl_prod.buffer,
+ sizeof(void*) * DHD_LB_WORKQ_SZ);
}
+#endif /* DHD_LB_RXC */
+#endif /* DHD_LB */
- /* free up ring upd buffer for submission reads */
- if (prot->h2d_dma_readindx_buf.va) {
- DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_readindx_buf.va,
- prot->h2d_dma_readindx_buf_len, prot->h2d_dma_readindx_buf.pa,
- prot->h2d_dma_readindx_buf.dmah);
- prot->h2d_dma_readindx_buf.va = NULL;
- }
+ dhd->prot = NULL;
+ }
+} /* dhd_prot_detach */
- /* free up ring upd buffer for completion writes */
- if (prot->d2h_dma_writeindx_buf.va) {
- DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_writeindx_buf.va,
- prot->d2h_dma_writeindx_buf_len, prot->d2h_dma_writeindx_buf.pa,
- prot->d2h_dma_writeindx_buf.dmah);
- prot->d2h_dma_writeindx_buf.va = NULL;
- }
- /* free up ring upd buffer for completion writes */
- if (prot->d2h_dma_readindx_buf.va) {
- DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_readindx_buf.va,
- prot->d2h_dma_readindx_buf_len, prot->d2h_dma_readindx_buf.pa,
- prot->d2h_dma_readindx_buf.dmah);
- prot->d2h_dma_readindx_buf.va = NULL;
- }
+/**
+ * dhd_prot_reset - Reset the protocol layer without freeing any objects. This
+ * may be invoked to soft reboot the dongle, without having to detach and attach
+ * the entire protocol layer.
+ *
+ * After dhd_prot_reset(), dhd_prot_init() may be invoked without going through
+ * a dhd_prot_attach() phase.
+ */
+void
+dhd_prot_reset(dhd_pub_t *dhd)
+{
+ struct dhd_prot *prot = dhd->prot;
- /* ioctl return buffer */
- if (prot->retbuf.va) {
- DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->retbuf.va,
- IOCT_RETBUF_SIZE, dhd->prot->retbuf.pa, dhd->prot->retbuf.dmah);
- dhd->prot->retbuf.va = NULL;
- }
+ DHD_TRACE(("%s\n", __FUNCTION__));
- /* ioctl request buffer */
- if (prot->ioctbuf.va) {
- DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->ioctbuf.va,
- IOCT_RETBUF_SIZE, dhd->prot->ioctbuf.pa, dhd->prot->ioctbuf.dmah);
+ if (prot == NULL) {
+ return;
+ }
- dhd->prot->ioctbuf.va = NULL;
- }
+ dhd_prot_flowrings_pool_reset(dhd);
+ dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
+ dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
+ dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
+ dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
+ dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
- /* 1.0 H2D TXPOST ring */
- dhd_prot_ring_detach(dhd, prot->h2dring_txp_subn);
- /* 2.0 H2D RXPOST ring */
- dhd_prot_ring_detach(dhd, prot->h2dring_rxp_subn);
- /* 3.0 H2D CTRL_SUBMISSION ring */
- dhd_prot_ring_detach(dhd, prot->h2dring_ctrl_subn);
- /* 4.0 D2H TX_COMPLETION ring */
- dhd_prot_ring_detach(dhd, prot->d2hring_tx_cpln);
- /* 5.0 D2H RX_COMPLETION ring */
- dhd_prot_ring_detach(dhd, prot->d2hring_rx_cpln);
- /* 6.0 D2H CTRL_COMPLETION ring */
- dhd_prot_ring_detach(dhd, prot->d2hring_ctrl_cpln);
+ dhd_dma_buf_reset(dhd, &prot->retbuf);
+ dhd_dma_buf_reset(dhd, &prot->ioctbuf);
+ dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
+ dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
+ dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
+ dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
+ dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
- NATIVE_TO_PKTID_FINI(dhd->prot->pktid_map_handle);
-#ifndef CONFIG_DHD_USE_STATIC_BUF
- MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ prot->rx_metadata_offset = 0;
+ prot->tx_metadata_offset = 0;
- dhd->prot = NULL;
+ prot->rxbufpost = 0;
+ prot->cur_event_bufs_posted = 0;
+ prot->cur_ioctlresp_bufs_posted = 0;
+
+ prot->active_tx_count = 0;
+ prot->data_seq_no = 0;
+ prot->ioctl_seq_no = 0;
+ prot->ioctl_state = 0;
+ prot->curr_ioctl_cmd = 0;
+ prot->ioctl_received = IOCTL_WAIT;
+ prot->ioctl_trans_id = 0;
+
+ /* dhd_flow_rings_init is located at dhd_bus_start,
+ * so when stopping bus, flowrings shall be deleted
+ */
+ if (dhd->flow_rings_inited) {
+ dhd_flow_rings_deinit(dhd);
}
-}
+
+ if (prot->pktid_map_handle) {
+ DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_map_handle);
+ prot->pktid_map_handle = NULL;
+ }
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+ if (prot->pktid_map_handle_ioctl) {
+ DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
+ prot->pktid_map_handle_ioctl = NULL;
+ }
+#endif /* IOCTLRESP_USE_CONSTMEM */
+} /* dhd_prot_reset */
+
void
dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
prot->rx_dataoffset = rx_offset;
}
-
-/* Initialize protocol: sync w/dongle state.
+/**
+ * Initialize protocol: sync w/dongle state.
* Sets dongle media info (iswl, drv_version, mac address).
*/
-int dhd_sync_with_dongle(dhd_pub_t *dhd)
+int
+dhd_sync_with_dongle(dhd_pub_t *dhd)
{
int ret = 0;
wlc_rev_info_t revinfo;
+
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- /* Post event buffer after shim layer is attached */
- ret = dhd_msgbuf_rxbuf_post_event_bufs(dhd);
- if (ret <= 0) {
- DHD_ERROR(("%s : Post event buffer fail. ret = %d\n", __FUNCTION__, ret));
- return ret;
- }
+ dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+
+#ifdef DHD_FW_COREDUMP
+ /* Check the memdump capability */
+ dhd_get_memdump_info(dhd);
+#endif /* DHD_FW_COREDUMP */
+#ifdef BCMASSERT_LOG
+ dhd_get_assert_info(dhd);
+#endif /* BCMASSERT_LOG */
+
/* Get the device rev info */
memset(&revinfo, 0, sizeof(revinfo));
ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
- if (ret < 0)
+ if (ret < 0) {
+ DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
goto done;
+ }
+ DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
+ revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
dhd_process_cid_mac(dhd, TRUE);
ret = dhd_preinit_ioctls(dhd);
- if (!ret)
+ if (!ret) {
dhd_process_cid_mac(dhd, FALSE);
+ }
/* Always assumes wl for now */
dhd->iswl = TRUE;
done:
return ret;
+} /* dhd_sync_with_dongle */
+
+#if defined(DHD_LB)
+
+/* DHD load balancing: deferral of work to another online CPU */
+
+/* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
+extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
+extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
+extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
+
+extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
+
+/**
+ * dhd_lb_dispatch - load balance by dispatch work to other CPU cores
+ * Note: rx_compl_tasklet is dispatched explicitly.
+ */
+static INLINE void
+dhd_lb_dispatch(dhd_pub_t *dhdp, uint16 ring_idx)
+{
+ switch (ring_idx) {
+
+#if defined(DHD_LB_TXC)
+ case BCMPCIE_D2H_MSGRING_TX_COMPLETE:
+ bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
+ dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
+ break;
+#endif /* DHD_LB_TXC */
+
+ case BCMPCIE_D2H_MSGRING_RX_COMPLETE:
+ {
+#if defined(DHD_LB_RXC)
+ dhd_prot_t *prot = dhdp->prot;
+ /* Schedule the takslet only if we have to */
+ if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
+ /* flush WR index */
+ bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
+ dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
+ }
+#endif /* DHD_LB_RXC */
+#if defined(DHD_LB_RXP)
+ dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
+#endif /* DHD_LB_RXP */
+ break;
+ }
+ default:
+ break;
+ }
}
-/* This function does all necessary initialization needed
-* for IOCTL/IOVAR path
-*/
-int dhd_prot_init(dhd_pub_t *dhd)
+
+#if defined(DHD_LB_TXC)
+/**
+ * DHD load balanced tx completion tasklet handler, that will perform the
+ * freeing of packets on the selected CPU. Packet pointers are delivered to
+ * this tasklet via the tx complete workq.
+ */
+void
+dhd_lb_tx_compl_handler(unsigned long data)
{
- int ret = 0;
+ int elem_ix;
+ void *pkt, **elem;
+ dmaaddr_t pa;
+ uint32 pa_len;
+ dhd_pub_t *dhd = (dhd_pub_t *)data;
dhd_prot_t *prot = dhd->prot;
+ bcm_workq_t *workq = &prot->tx_compl_cons;
+ uint32 count = 0;
- /* Max pkts in ring */
- prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM;
-
- DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count));
+ DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
- /* Read max rx packets supported by dongle */
- dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
- if (prot->max_rxbufpost == 0) {
- /* This would happen if the dongle firmware is not */
- /* using the latest shared structure template */
- prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
- }
- DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
+ while (1) {
+ elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
- prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
- prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
+ if (elem_ix == BCM_RING_EMPTY) {
+ break;
+ }
- prot->active_tx_count = 0;
- prot->data_seq_no = 0;
- prot->ioctl_seq_no = 0;
- prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
+ elem = WORKQ_ELEMENT(void *, workq, elem_ix);
+ pkt = *elem;
- prot->ioctl_trans_id = 1;
+ DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
- /* Register the interrupt function upfront */
- /* remove corerev checks in data path */
- prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
+ OSL_PREFETCH(PKTTAG(pkt));
+ OSL_PREFETCH(pkt);
- /* Initialise rings */
- /* 1.0 H2D TXPOST ring */
- if (dhd_bus_is_txmode_push(dhd->bus)) {
- dhd_ring_init(dhd, prot->h2dring_txp_subn);
- }
+ pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
+ pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
- /* 2.0 H2D RXPOST ring */
- dhd_ring_init(dhd, prot->h2dring_rxp_subn);
- /* 3.0 H2D CTRL_SUBMISSION ring */
- dhd_ring_init(dhd, prot->h2dring_ctrl_subn);
- /* 4.0 D2H TX_COMPLETION ring */
- dhd_ring_init(dhd, prot->d2hring_tx_cpln);
- /* 5.0 D2H RX_COMPLETION ring */
- dhd_ring_init(dhd, prot->d2hring_rx_cpln);
- /* 6.0 D2H CTRL_COMPLETION ring */
- dhd_ring_init(dhd, prot->d2hring_ctrl_cpln);
+ DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
- /* init the scratch buffer */
- dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.pa,
- sizeof(prot->d2h_dma_scratch_buf.pa), DNGL_TO_HOST_DMA_SCRATCH_BUFFER, 0);
- dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf_len,
- sizeof(prot->d2h_dma_scratch_buf_len), DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN, 0);
+#if defined(BCMPCIE)
+ dhd_txcomplete(dhd, pkt, true);
+#endif
- /* If supported by the host, indicate the memory block
- * for comletion writes / submission reads to shared space
- */
- if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
- dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_writeindx_buf.pa,
- sizeof(prot->d2h_dma_writeindx_buf.pa),
- DNGL_TO_HOST_DMA_WRITEINDX_BUFFER, 0);
- dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_readindx_buf.pa,
- sizeof(prot->h2d_dma_readindx_buf.pa),
- HOST_TO_DNGL_DMA_READINDX_BUFFER, 0);
+ PKTFREE(dhd->osh, pkt, TRUE);
+ count++;
}
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
- dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_writeindx_buf.pa,
- sizeof(prot->h2d_dma_writeindx_buf.pa),
- HOST_TO_DNGL_DMA_WRITEINDX_BUFFER, 0);
- dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_readindx_buf.pa,
- sizeof(prot->d2h_dma_readindx_buf.pa),
- DNGL_TO_HOST_DMA_READINDX_BUFFER, 0);
+ /* smp_wmb(); */
+ bcm_workq_cons_sync(workq);
+ DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
+}
+#endif /* DHD_LB_TXC */
- }
+#if defined(DHD_LB_RXC)
+void
+dhd_lb_rx_compl_handler(unsigned long data)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)data;
+ bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
- ret = dhd_msgbuf_rxbuf_post(dhd);
- ret = dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+ DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
- return ret;
+ dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
+ bcm_workq_cons_sync(workq);
}
+#endif /* DHD_LB_RXC */
+
+#endif /* DHD_LB */
#define DHD_DBG_SHOW_METADATA 0
+
#if DHD_DBG_SHOW_METADATA
static void BCMFASTPATH
dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
break;
switch (tlv_t) {
- case WLFC_CTL_TYPE_TXSTATUS:
- bcm_print_bytes("METADATA TX_STATUS", tlv_v, tlv_l);
- break;
+ case WLFC_CTL_TYPE_TXSTATUS: {
+ uint32 txs;
+ memcpy(&txs, tlv_v, sizeof(uint32));
+ if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
+ printf("METADATA TX_STATUS: %08x\n", txs);
+ } else {
+ wl_txstatus_additional_info_t tx_add_info;
+ memcpy(&tx_add_info, tlv_v + sizeof(uint32),
+ sizeof(wl_txstatus_additional_info_t));
+ printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
+ " rate = %08x tries = %d - %d\n", txs,
+ tx_add_info.seq, tx_add_info.entry_ts,
+ tx_add_info.enq_ts, tx_add_info.last_ts,
+ tx_add_info.rspec, tx_add_info.rts_cnt,
+ tx_add_info.tx_cnt);
+ }
+ } break;
- case WLFC_CTL_TYPE_RSSI:
- bcm_print_bytes("METADATA RX_RSSI", tlv_v, tlv_l);
- break;
+ case WLFC_CTL_TYPE_RSSI: {
+ if (tlv_l == 1)
+ printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
+ else
+ printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
+ (*(tlv_v + 3) << 8) | *(tlv_v + 2),
+ (int8)(*tlv_v), *(tlv_v + 1));
+ } break;
case WLFC_CTL_TYPE_FIFO_CREDITBACK:
bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
break;
- case WLFC_CTL_TYPE_RX_STAMP:
- bcm_print_bytes("METADATA RX_TIMESTAMP", tlv_v, tlv_l);
- break;
+ case WLFC_CTL_TYPE_RX_STAMP: {
+ struct {
+ uint32 rspec;
+ uint32 bus_time;
+ uint32 wlan_time;
+ } rx_tmstamp;
+ memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
+ printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
+ rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
+ } break;
case WLFC_CTL_TYPE_TRANS_ID:
bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
#endif /* DHD_DBG_SHOW_METADATA */
static INLINE void BCMFASTPATH
-dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid)
+dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
{
- void *PKTBUF;
- dmaaddr_t pa;
- uint32 pa_len;
- void *secdma;
- PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, secdma);
-
- if (PKTBUF) {
- {
- if (SECURE_DMA_ENAB(dhd->osh)) {
- SECURE_DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0, secdma, 0);
- } else
- DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0);
+ if (pkt) {
+ if (pkttype == PKTTYPE_IOCTL_RX ||
+ pkttype == PKTTYPE_EVENT_RX) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhd->osh, pkt, send);
+#else
+ PKTFREE(dhd->osh, pkt, send);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ } else {
+ PKTFREE(dhd->osh, pkt, send);
}
- PKTFREE(dhd->osh, PKTBUF, FALSE);
}
- return;
}
static INLINE void * BCMFASTPATH
-dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid)
+dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
{
void *PKTBUF;
dmaaddr_t pa;
- uint32 pa_len;
+ uint32 len;
+ void *dmah;
void *secdma;
- PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, secdma);
+
+#ifdef DHD_PCIE_PKTID
+ if (free_pktid) {
+ PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
+ pktid, pa, len, dmah, secdma, pkttype);
+ } else {
+ PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_map_handle,
+ pktid, pa, len, dmah, secdma, pkttype);
+ }
+#else
+ PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid, pa,
+ len, dmah, secdma, pkttype);
+#endif /* DHD_PCIE_PKTID */
+
if (PKTBUF) {
- if (SECURE_DMA_ENAB(dhd->osh))
- SECURE_DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, 0, secdma, 0);
- else
- DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, 0);
+ {
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
+ secdma, 0);
+ } else {
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+ }
+ }
}
return PKTBUF;
}
-static int BCMFASTPATH
-dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd)
+#ifdef IOCTLRESP_USE_CONSTMEM
+static INLINE void BCMFASTPATH
+dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
+{
+ memset(retbuf, 0, sizeof(dhd_dma_buf_t));
+ retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
+ retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
+
+ return;
+}
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
+static void BCMFASTPATH
+dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
{
dhd_prot_t *prot = dhd->prot;
int16 fillbufs;
- uint16 cnt = 64;
+ uint16 cnt = 256;
int retcount = 0;
fillbufs = prot->max_rxbufpost - prot->rxbufpost;
- while (fillbufs > 0) {
+ while (fillbufs >= RX_BUF_BURST) {
cnt--;
if (cnt == 0) {
/* find a better way to reschedule rx buf post if space not available */
- DHD_ERROR(("%s: h2d rx post ring not available to post host buffers\n", __FUNCTION__));
- DHD_ERROR(("%s: Current posted host buf count %d \n", __FUNCTION__, prot->rxbufpost));
+ DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
+ DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
break;
}
- /* Post in a burst of 8 buffers ata time */
+ /* Post in a burst of 32 buffers at a time */
fillbufs = MIN(fillbufs, RX_BUF_BURST);
/* Post buffers */
- retcount = dhd_prot_rxbufpost(dhd, fillbufs);
+ retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
- if (retcount > 0) {
+ if (retcount >= 0) {
prot->rxbufpost += (uint16)retcount;
-
+#ifdef DHD_LB_RXC
+ /* dhd_prot_rxbuf_post returns the number of buffers posted */
+ DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
+#endif /* DHD_LB_RXC */
/* how many more to post */
fillbufs = prot->max_rxbufpost - prot->rxbufpost;
} else {
fillbufs = 0;
}
}
-
- return 0;
}
-/* Post count no of rx buffers down to dongle */
+/** Post 'count' no of rx buffers to dongle */
static int BCMFASTPATH
-dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count)
+dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
{
void *p;
uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
uint8 *rxbuf_post_tmp;
host_rxbuf_post_t *rxbuf_post;
- void* msg_start;
- dmaaddr_t physaddr;
+ void *msg_start;
+ dmaaddr_t pa;
uint32 pktlen;
- dhd_prot_t *prot = dhd->prot;
- msgbuf_ring_t * ring = prot->h2dring_rxp_subn;
uint8 i = 0;
uint16 alloced = 0;
unsigned long flags;
+ uint32 pktid;
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
DHD_GENERAL_LOCK(dhd, flags);
- /* Claim space for 'count' no of messages */
- msg_start = (void *)dhd_alloc_ring_space(dhd, ring, count, &alloced);
+
+ /* Claim space for exactly 'count' no of messages, for mitigation purpose */
+ msg_start = (void *)
+ dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
+
DHD_GENERAL_UNLOCK(dhd, flags);
if (msg_start == NULL) {
rxbuf_post_tmp = (uint8*)msg_start;
- /* loop through each message */
+ /* loop through each allocated message in the rxbuf post msgbuf_ring */
for (i = 0; i < alloced; i++) {
rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
/* Create a rx buffer */
if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
+ dhd->rx_pktgetfail++;
break;
}
pktlen = PKTLEN(dhd->osh, p);
- if (SECURE_DMA_ENAB(dhd->osh)) {
- DHD_GENERAL_LOCK(dhd, flags);
- physaddr = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0,
- ring->secdma, 0);
- DHD_GENERAL_UNLOCK(dhd, flags);
- } else
- physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
-
- if (PHYSADDRISZERO(physaddr)) {
- if (SECURE_DMA_ENAB(dhd->osh)) {
+ if (SECURE_DMA_ENAB(dhd->osh)) {
DHD_GENERAL_LOCK(dhd, flags);
- SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0,
- ring->secdma, 0);
+ pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
+ DMA_RX, p, 0, ring->dma_buf.secdma, 0);
DHD_GENERAL_UNLOCK(dhd, flags);
- } else
- DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+ } else {
+ pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+ }
+
+ if (PHYSADDRISZERO(pa)) {
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
+ ring->dma_buf.secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else {
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+ }
PKTFREE(dhd->osh, p, FALSE);
- DHD_ERROR(("%s: Invalid phyaddr 0\n", __FUNCTION__));
+ DHD_ERROR(("Invalid phyaddr 0\n"));
ASSERT(0);
break;
}
PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
pktlen = PKTLEN(dhd->osh, p);
- /* CMN msg header */
+ /* Common msg header */
rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
rxbuf_post->cmn_hdr.if_id = 0;
+ rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
+
+#if defined(DHD_LB_RXC)
+ if (use_rsv_pktid == TRUE) {
+ bcm_workq_t *workq = &prot->rx_compl_cons;
+ int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
+ if (elem_ix == BCM_RING_EMPTY) {
+ DHD_ERROR(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
+ pktid = DHD_PKTID_INVALID;
+ goto alloc_pkt_id;
+ } else {
+ uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
+ pktid = *elem;
+ }
- /* get the lock before calling NATIVE_TO_PKTID */
+ /* Now populate the previous locker with valid information */
+ if (pktid != DHD_PKTID_INVALID) {
+ rxbuf_post->cmn_hdr.request_id = htol32(pktid);
+ DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, p, pktid,
+ pa, pktlen, DMA_RX, NULL, ring->dma_buf.secdma,
+ PKTTYPE_DATA_RX);
+ }
+ } else
+#endif /* DHD_LB_RXC */
+ {
+#if defined(DHD_LB_RXC)
+alloc_pkt_id:
+#endif
+#if defined(DHD_PCIE_PKTID)
+ /* get the lock before calling DHD_NATIVE_TO_PKTID */
DHD_GENERAL_LOCK(dhd, flags);
+#endif
+ pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_map_handle, p, pa,
+ pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
- rxbuf_post->cmn_hdr.request_id =
- htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr,
- pktlen, DMA_RX, ring->secdma));
-
+#if defined(DHD_PCIE_PKTID)
/* free lock */
DHD_GENERAL_UNLOCK(dhd, flags);
- if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
+ if (pktid == DHD_PKTID_INVALID) {
+
if (SECURE_DMA_ENAB(dhd->osh)) {
DHD_GENERAL_LOCK(dhd, flags);
- SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0,
- ring->secdma, 0);
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
+ ring->dma_buf.secdma, 0);
DHD_GENERAL_UNLOCK(dhd, flags);
- } else
- DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+ } else {
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+ }
PKTFREE(dhd->osh, p, FALSE);
- DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
+ DHD_ERROR(("Pktid pool depleted.\n"));
break;
}
+#endif /* DHD_PCIE_PKTID */
+ }
rxbuf_post->data_buf_len = htol16((uint16)pktlen);
- rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+ rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
rxbuf_post->data_buf_addr.low_addr =
- htol32(PHYSADDRLO(physaddr) + prot->rx_metadata_offset);
+ htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
if (prot->rx_metadata_offset) {
rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
- rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
- rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr));
+ rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+ rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
} else {
rxbuf_post->metadata_buf_len = 0;
rxbuf_post->metadata_buf_addr.high_addr = 0;
rxbuf_post->metadata_buf_addr.low_addr = 0;
}
+#if defined(DHD_PKTID_AUDIT_RING)
+ DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid, DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ rxbuf_post->cmn_hdr.request_id = htol32(pktid);
+
/* Move rxbuf_post_tmp to next item */
- rxbuf_post_tmp = rxbuf_post_tmp + RING_LEN_ITEMS(ring);
+ rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
}
if (i < alloced) {
- if (RING_WRITE_PTR(ring) < (alloced - i))
- RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - (alloced - i);
- else
- RING_WRITE_PTR(ring) -= (alloced - i);
+ if (ring->wr < (alloced - i)) {
+ ring->wr = ring->max_items - (alloced - i);
+ } else {
+ ring->wr -= (alloced - i);
+ }
alloced = i;
}
- /* Update the write pointer in TCM & ring bell */
- if (alloced > 0)
- prot_ring_write_complete(dhd, prot->h2dring_rxp_subn, msg_start, alloced);
+ /* Update ring's WR index and ring doorbell to dongle */
+ if (alloced > 0) {
+ dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
+ }
return alloced;
+} /* dhd_prot_rxbuf_post */
+
+#ifdef IOCTLRESP_USE_CONSTMEM
+static int
+alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
+{
+ int err;
+ memset(retbuf, 0, sizeof(dhd_dma_buf_t));
+
+ if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
+ DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
+ ASSERT(0);
+ return BCME_NOMEM;
+ }
+
+ return BCME_OK;
}
+static void
+free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
+{
+ /* retbuf (declared on stack) not fully populated ... */
+ if (retbuf->va) {
+ uint32 dma_pad;
+ dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
+ retbuf->len = IOCT_RETBUF_SIZE;
+ retbuf->_alloced = retbuf->len + dma_pad;
+ /* JIRA:SWWLAN-70021 The pa value would be overwritten by the dongle.
+ * Need to reassign before free to pass the check in dhd_dma_buf_audit().
+ */
+ retbuf->pa = DMA_MAP(dhd->osh, retbuf->va, retbuf->len, DMA_RX, NULL, NULL);
+ }
+
+ dhd_dma_buf_free(dhd, retbuf);
+ return;
+}
+#endif /* IOCTLRESP_USE_CONSTMEM */
+
static int
dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf)
{
void *p;
uint16 pktsz;
ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
- dmaaddr_t physaddr;
+ dmaaddr_t pa;
uint32 pktlen;
dhd_prot_t *prot = dhd->prot;
uint16 alloced = 0;
unsigned long flags;
+ dhd_dma_buf_t retbuf;
+ void *dmah = NULL;
+ uint32 pktid;
+ void *map_handle;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
if (dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
return -1;
}
+ memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
+
if (event_buf) {
/* Allocate packet for event buffer post */
pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
}
- if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
- DHD_ERROR(("%s:%d: PKTGET for %s rxbuf failed\n",
- __FUNCTION__, __LINE__, event_buf ?
- "event" : "ioctl"));
- return -1;
- }
-
- pktlen = PKTLEN(dhd->osh, p);
- if (SECURE_DMA_ENAB(dhd->osh)) {
- DHD_GENERAL_LOCK(dhd, flags);
- physaddr = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
- DMA_RX, p, 0, prot->h2dring_ctrl_subn->secdma, 0);
- DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef IOCTLRESP_USE_CONSTMEM
+ if (!event_buf) {
+ if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
+ DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
+ return -1;
+ }
+ ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
+ p = retbuf.va;
+ pktlen = retbuf.len;
+ pa = retbuf.pa;
+ dmah = retbuf.dmah;
} else
- physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
+#else
+ p = PKTGET(dhd->osh, pktsz, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ if (p == NULL) {
+ DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
+ __FUNCTION__, __LINE__, event_buf ?
+ "EVENT" : "IOCTL RESP"));
+ dhd->rx_pktgetfail++;
+ return -1;
+ }
- if (PHYSADDRISZERO(physaddr)) {
+ pktlen = PKTLEN(dhd->osh, p);
- DHD_ERROR(("%s: Invalid phyaddr 0\n", __FUNCTION__));
- ASSERT(0);
- goto free_pkt_return;
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
+ DMA_RX, p, 0, ring->dma_buf.secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else {
+ pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+ }
+
+ if (PHYSADDRISZERO(pa)) {
+ DHD_ERROR(("Invalid physaddr 0\n"));
+ ASSERT(0);
+ goto free_pkt_return;
+ }
}
DHD_GENERAL_LOCK(dhd, flags);
- rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)dhd_alloc_ring_space(dhd,
- prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+ rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
+
if (rxbuf_post == NULL) {
DHD_GENERAL_UNLOCK(dhd, flags);
- DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer"
- " for %s\n", __FUNCTION__, __LINE__, event_buf ? "event" :
- "ioctl"));
- if (SECURE_DMA_ENAB(dhd->osh)) {
- DHD_GENERAL_LOCK(dhd, flags);
- SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0,
- prot->h2dring_ctrl_subn->secdma, 0);
- DHD_GENERAL_UNLOCK(dhd, flags);
- } else
- DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+ DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
+ __FUNCTION__, __LINE__));
+#ifdef IOCTLRESP_USE_CONSTMEM
+ if (event_buf)
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ {
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
+ ring->dma_buf.secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else {
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+ }
+ }
goto free_pkt_return;
}
/* CMN msg header */
- if (event_buf)
+ if (event_buf) {
rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST;
- else
+ } else {
rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST;
- rxbuf_post->cmn_hdr.if_id = 0;
+ }
- rxbuf_post->cmn_hdr.request_id =
- htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr, pktlen, DMA_RX,
- prot->h2dring_ctrl_subn->secdma));
+#ifdef IOCTLRESP_USE_CONSTMEM
+ if (!event_buf) {
+ map_handle = dhd->prot->pktid_map_handle_ioctl;
+ pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen,
+ DMA_RX, dmah, ring->dma_buf.secdma, PKTTYPE_IOCTL_RX);
+ } else
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ {
+ map_handle = dhd->prot->pktid_map_handle;
+ pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
+ p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
+ event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX);
+ }
- if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
- if (RING_WRITE_PTR(prot->h2dring_ctrl_subn) == 0)
- RING_WRITE_PTR(prot->h2dring_ctrl_subn) =
- RING_MAX_ITEM(prot->h2dring_ctrl_subn) - 1;
- else
- RING_WRITE_PTR(prot->h2dring_ctrl_subn)--;
+ if (pktid == DHD_PKTID_INVALID) {
+ if (ring->wr == 0) {
+ ring->wr = ring->max_items - 1;
+ } else {
+ ring->wr--;
+ }
DHD_GENERAL_UNLOCK(dhd, flags);
- if (SECURE_DMA_ENAB(dhd->osh)) {
- DHD_GENERAL_LOCK(dhd, flags);
- SECURE_DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0,
- prot->h2dring_ctrl_subn->secdma, 0);
- DHD_GENERAL_UNLOCK(dhd, flags);
- } else
- DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+ goto free_pkt_return;
+ }
+
+#if defined(DHD_PKTID_AUDIT_RING)
+ DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ rxbuf_post->cmn_hdr.request_id = htol32(pktid);
+ rxbuf_post->cmn_hdr.if_id = 0;
+ rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
+#if defined(DHD_PCIE_PKTID)
+ if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
+ if (ring->wr == 0) {
+ ring->wr = ring->max_items - 1;
+ } else {
+ ring->wr--;
+ }
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef IOCTLRESP_USE_CONSTMEM
+ if (event_buf)
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ {
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
+ ring->dma_buf.secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ } else {
+ DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
+ }
+ }
goto free_pkt_return;
}
+#endif /* DHD_PCIE_PKTID */
rxbuf_post->cmn_hdr.flags = 0;
+#ifndef IOCTLRESP_USE_CONSTMEM
rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
- rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
- rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr));
+#else
+ rxbuf_post->host_buf_len = htol16((uint16)pktlen);
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+ rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
- /* Update the write pointer in TCM & ring bell */
- prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, rxbuf_post,
- DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return 1;
free_pkt_return:
- PKTFREE(dhd->osh, p, FALSE);
+#ifdef IOCTLRESP_USE_CONSTMEM
+ if (!event_buf) {
+ free_ioctl_return_buffer(dhd, &retbuf);
+ } else
+#endif /* IOCTLRESP_USE_CONSTMEM */
+ {
+ dhd_prot_packet_free(dhd, p,
+ event_buf ? PKTTYPE_EVENT_RX : PKTTYPE_IOCTL_RX,
+ FALSE);
+ }
return -1;
-}
+} /* dhd_prot_rxbufpost_ctrl */
static uint16
dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post)
uint32 i = 0;
int32 ret_val;
- DHD_INFO(("%s: max to post %d, event %d\n", __FUNCTION__, max_to_post, event_buf));
+ DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf));
if (dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
while (i < max_to_post) {
ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf);
- if (ret_val < 0)
+ if (ret_val < 0) {
break;
+ }
i++;
}
- DHD_INFO(("%s: posted %d buffers to event_pool/ioctl_resp_pool %d\n", __FUNCTION__, i, event_buf));
+ DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf));
return (uint16)i;
}
-static int
+static void
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
- uint16 retcnt = 0;
+ int max_to_post;
- DHD_INFO(("%s: ioctl resp buf post\n", __FUNCTION__));
-
- if (dhd->busstate == DHD_BUS_DOWN) {
- DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
- return 0;
+ DHD_INFO(("ioctl resp buf post\n"));
+ max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
+ if (max_to_post <= 0) {
+ DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
+ __FUNCTION__));
+ return;
}
-
- retcnt = dhd_msgbuf_rxbuf_post_ctrlpath(dhd, FALSE,
- prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted);
- prot->cur_ioctlresp_bufs_posted += retcnt;
- return retcnt;
+ prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
+ FALSE, max_to_post);
}
-static int
+static void
dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
- uint16 retcnt = 0;
+ int max_to_post;
- if (dhd->busstate == DHD_BUS_DOWN) {
- DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
- return 0;
+ max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
+ if (max_to_post <= 0) {
+ DHD_INFO(("%s: Cannot post more than max event buffers\n",
+ __FUNCTION__));
+ return;
}
-
- retcnt = dhd_msgbuf_rxbuf_post_ctrlpath(dhd, TRUE,
- prot->max_eventbufpost - prot->cur_event_bufs_posted);
-
- prot->cur_event_bufs_posted += retcnt;
- return retcnt;
+ prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
+ TRUE, max_to_post);
}
+/** called when DHD needs to check for 'receive complete' messages from the dongle */
bool BCMFASTPATH
dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
{
- dhd_prot_t *prot = dhd->prot;
bool more = TRUE;
uint n = 0;
+ msgbuf_ring_t *ring = &dhd->prot->d2hring_rx_cpln;
/* Process all the messages - DTOH direction */
- while (TRUE) {
- uint8 *src_addr;
- uint16 src_len;
+ while (!dhd_is_device_removed(dhd)) {
+ uint8 *msg_addr;
+ uint32 msg_len;
- /* Store current read pointer */
- /* Read pointer will be updated in prot_early_upd_rxcpln_read_idx */
- prot_store_rxcpln_read_idx(dhd, prot->d2hring_rx_cpln);
+ if (dhd->hang_was_sent) {
+ more = FALSE;
+ break;
+ }
- /* Get the message from ring */
- src_addr = prot_get_src_addr(dhd, prot->d2hring_rx_cpln, &src_len);
- if (src_addr == NULL) {
+ /* Get the address of the next message to be read from ring */
+ msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ if (msg_addr == NULL) {
more = FALSE;
break;
}
/* Prefetch data to populate the cache */
- OSL_PREFETCH(src_addr);
+ OSL_PREFETCH(msg_addr);
- if (dhd_prot_process_msgtype(dhd, prot->d2hring_rx_cpln, src_addr,
- src_len) != BCME_OK) {
- prot_upd_read_idx(dhd, prot->d2hring_rx_cpln);
- DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
- __FUNCTION__, src_len));
+ if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
+ DHD_ERROR(("%s: process %s msg addr %p len %d\n",
+ __FUNCTION__, ring->name, msg_addr, msg_len));
}
+ /* Update read pointer */
+ dhd_prot_upd_read_idx(dhd, ring);
+
/* After batch processing, check RX bound */
- n += src_len/RING_LEN_ITEMS(prot->d2hring_rx_cpln);
+ n += msg_len / ring->item_len;
if (n >= bound) {
break;
}
return more;
}
+/**
+ * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
+ */
void
-dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flow_id, void *msgring_info)
+dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
{
- uint16 r_index = 0;
- msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring_info;
+ msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
/* Update read pointer */
if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
- r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx);
- ring->ringstate->r_offset = r_index;
+ ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
}
- DHD_TRACE(("%s: flow %d, write %d read %d \n\n", __FUNCTION__, flow_id, RING_WRITE_PTR(ring),
- RING_READ_PTR(ring)));
+ DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
+ ring->idx, flowid, ring->wr, ring->rd));
/* Need more logic here, but for now use it directly */
- dhd_bus_schedule_queue(dhd->bus, flow_id, TRUE);
+ dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
}
-
+/** called when DHD needs to check for 'transmit complete' messages from the dongle */
bool BCMFASTPATH
dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
{
- dhd_prot_t *prot = dhd->prot;
bool more = TRUE;
uint n = 0;
+ msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
/* Process all the messages - DTOH direction */
- while (TRUE) {
- uint8 *src_addr;
- uint16 src_len;
+ while (!dhd_is_device_removed(dhd)) {
+ uint8 *msg_addr;
+ uint32 msg_len;
+
+ if (dhd->hang_was_sent) {
+ more = FALSE;
+ break;
+ }
- src_addr = prot_get_src_addr(dhd, prot->d2hring_tx_cpln, &src_len);
- if (src_addr == NULL) {
+ /* Get the address of the next message to be read from ring */
+ msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ if (msg_addr == NULL) {
more = FALSE;
break;
}
/* Prefetch data to populate the cache */
- OSL_PREFETCH(src_addr);
+ OSL_PREFETCH(msg_addr);
- if (dhd_prot_process_msgtype(dhd, prot->d2hring_tx_cpln, src_addr,
- src_len) != BCME_OK) {
- DHD_ERROR(("%s: Error at process txcmpl msgbuf of len %d\n",
- __FUNCTION__, src_len));
+ if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
+ DHD_ERROR(("%s: process %s msg addr %p len %d\n",
+ __FUNCTION__, ring->name, msg_addr, msg_len));
}
/* Write to dngl rd ptr */
- prot_upd_read_idx(dhd, prot->d2hring_tx_cpln);
+ dhd_prot_upd_read_idx(dhd, ring);
/* After batch processing, check bound */
- n += src_len/RING_LEN_ITEMS(prot->d2hring_tx_cpln);
+ n += msg_len / ring->item_len;
if (n >= bound) {
break;
}
return more;
}
+/** called when DHD needs to check for 'ioctl complete' messages from the dongle */
int BCMFASTPATH
-dhd_prot_process_ctrlbuf(dhd_pub_t * dhd)
+dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
/* Process all the messages - DTOH direction */
- while (TRUE) {
- uint8 *src_addr;
- uint16 src_len;
- src_addr = prot_get_src_addr(dhd, prot->d2hring_ctrl_cpln, &src_len);
+ while (!dhd_is_device_removed(dhd)) {
+ uint8 *msg_addr;
+ uint32 msg_len;
- if (src_addr == NULL) {
+ if (dhd->hang_was_sent) {
+ break;
+ }
+
+ /* Get the address of the next message to be read from ring */
+ msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
+ if (msg_addr == NULL) {
break;
}
/* Prefetch data to populate the cache */
- OSL_PREFETCH(src_addr);
- if (dhd_prot_process_msgtype(dhd, prot->d2hring_ctrl_cpln, src_addr,
- src_len) != BCME_OK) {
- DHD_ERROR(("%s: Error at process ctrlmsgbuf of len %d\n",
- __FUNCTION__, src_len));
+ OSL_PREFETCH(msg_addr);
+
+ if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
+ DHD_ERROR(("%s: process %s msg addr %p len %d\n",
+ __FUNCTION__, ring->name, msg_addr, msg_len));
}
/* Write to dngl rd ptr */
- prot_upd_read_idx(dhd, prot->d2hring_ctrl_cpln);
+ dhd_prot_upd_read_idx(dhd, ring);
}
return 0;
}
+/**
+ * Consume messages out of the D2H ring. Ensure that the message's DMA to host
+ * memory has completed, before invoking the message handler via a table lookup
+ * of the cmn_msg_hdr::msg_type.
+ */
static int BCMFASTPATH
-dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len)
-{
- dhd_prot_t *prot = dhd->prot;
- uint32 cur_dma_len = 0;
- int ret = BCME_OK;
-
- DHD_INFO(("%s: process msgbuf of len %d\n", __FUNCTION__, len));
-
- while (len > 0) {
- ASSERT(len > (sizeof(cmn_msg_hdr_t) + prot->rx_dataoffset));
- if (prot->rx_dataoffset) {
- cur_dma_len = *(uint32 *) buf;
- ASSERT(cur_dma_len <= len);
- buf += prot->rx_dataoffset;
- len -= (uint16)prot->rx_dataoffset;
- }
- else {
- cur_dma_len = len;
- }
- if (dhd_process_msgtype(dhd, ring, buf, (uint16)cur_dma_len) != BCME_OK) {
- DHD_ERROR(("%s: Error at process msg of dmalen %d\n",
- __FUNCTION__, cur_dma_len));
- ret = BCME_ERROR;
- }
-
- len -= (uint16)cur_dma_len;
- buf += cur_dma_len;
- }
- return ret;
-}
-
-static int BCMFASTPATH
-dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len)
+dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
{
- uint16 pktlen = len;
- uint16 msglen;
- uint8 msgtype;
+ int buf_len = len;
+ uint16 item_len;
+ uint8 msg_type;
cmn_msg_hdr_t *msg = NULL;
int ret = BCME_OK;
-#if defined(PCIE_D2H_SYNC_BZERO)
- uint8 *buf_head = buf;
-#endif /* PCIE_D2H_SYNC_BZERO */
-
- ASSERT(ring && ring->ringmem);
- msglen = RING_LEN_ITEMS(ring);
- if (msglen == 0) {
- DHD_ERROR(("%s: ringidx %d, msglen is %d, pktlen is %d \n",
- __FUNCTION__, ring->idx, msglen, pktlen));
+ ASSERT(ring);
+ item_len = ring->item_len;
+ if (item_len == 0) {
+ DHD_ERROR(("%s: ringidx %d item_len %d buf_len %d\n",
+ __FUNCTION__, ring->idx, item_len, buf_len));
return BCME_ERROR;
}
- while (pktlen > 0) {
+ while (buf_len > 0) {
+ if (dhd->hang_was_sent) {
+ ret = BCME_ERROR;
+ goto done;
+ }
+
msg = (cmn_msg_hdr_t *)buf;
+ /*
+ * Update the curr_rd to the current index in the ring, from where
+ * the work item is fetched. This way if the fetched work item
+ * fails in LIVELOCK, we can print the exact read index in the ring
+ * that shows up the corrupted work item.
+ */
+ if ((ring->curr_rd + 1) >= ring->max_items) {
+ ring->curr_rd = 0;
+ } else {
+ ring->curr_rd += 1;
+ }
+
#if defined(PCIE_D2H_SYNC)
- /* Wait until DMA completes, then fetch msgtype */
- msgtype = dhd->prot->d2h_sync_cb(dhd, ring, msg, msglen);
+ /* Wait until DMA completes, then fetch msg_type */
+ msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
#else
- msgtype = msg->msg_type;
+ msg_type = msg->msg_type;
#endif /* !PCIE_D2H_SYNC */
- DHD_INFO(("%s: msgtype %d, msglen is %d, pktlen is %d\n", __FUNCTION__,
- msgtype, msglen, pktlen));
- if (msgtype == MSG_TYPE_LOOPBACK) {
- bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, msglen);
- DHD_ERROR(("%s: MSG_TYPE_LOOPBACK, len %d\n", __FUNCTION__, msglen));
- }
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(buf + item_len);
+
+ DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
+ msg_type, item_len, buf_len));
+ if (msg_type == MSG_TYPE_LOOPBACK) {
+ bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
+ DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
+ }
- if (msgtype >= DHD_PROT_FUNCS) {
- DHD_ERROR(("%s: msgtype %d, msglen is %d, pktlen is %d \n",
- __FUNCTION__, msgtype, msglen, pktlen));
+ ASSERT(msg_type < DHD_PROT_FUNCS);
+ if (msg_type >= DHD_PROT_FUNCS) {
+ DHD_ERROR(("%s: msg_type %d item_len %d buf_len %d\n",
+ __FUNCTION__, msg_type, item_len, buf_len));
ret = BCME_ERROR;
goto done;
}
- if (table_lookup[msgtype]) {
- table_lookup[msgtype](dhd, buf, msglen);
+ if (table_lookup[msg_type]) {
+ table_lookup[msg_type](dhd, buf);
}
- if (pktlen < msglen) {
+ if (buf_len < item_len) {
ret = BCME_ERROR;
goto done;
}
- pktlen = pktlen - msglen;
- buf = buf + msglen;
-
- if (ring->idx == BCMPCIE_D2H_MSGRING_RX_COMPLETE)
- prot_early_upd_rxcpln_read_idx(dhd, ring);
+ buf_len = buf_len - item_len;
+ buf = buf + item_len;
}
-done:
-#if defined(PCIE_D2H_SYNC_BZERO)
- OSL_CACHE_FLUSH(buf_head, len - pktlen); /* Flush the bzeroed msg */
-#endif /* PCIE_D2H_SYNC_BZERO */
+done:
#ifdef DHD_RX_CHAINING
dhd_rxchain_commit(dhd);
#endif
-
+#if defined(DHD_LB)
+ dhd_lb_dispatch(dhd, ring->idx);
+#endif
return ret;
-}
+} /* dhd_prot_process_msgtype */
static void
-dhd_prot_noop(dhd_pub_t *dhd, void * buf, uint16 msglen)
+dhd_prot_noop(dhd_pub_t *dhd, void *msg)
{
return;
}
+/** called on MSG_TYPE_RING_STATUS message received from dongle */
static void
-dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
{
- pcie_ring_status_t * ring_status = (pcie_ring_status_t *)buf;
- DHD_ERROR(("%s: ring status: request_id %d, status 0x%04x, flow ring %d, w_offset %d \n",
- __FUNCTION__,
+ pcie_ring_status_t *ring_status = (pcie_ring_status_t *)msg;
+ DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status,
ring_status->compl_hdr.flow_ring_id, ring_status->write_idx));
/* How do we track this to pair it with ??? */
return;
}
+/** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
static void
-dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
{
- pcie_gen_status_t * gen_status = (pcie_gen_status_t *)buf;
- DHD_ERROR(("%s: gen status: request_id %d, status 0x%04x, flow ring %d \n",
- __FUNCTION__,
+ pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
+ DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
gen_status->compl_hdr.flow_ring_id));
return;
}
+/**
+ * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
+ * dongle received the ioctl message in dongle memory.
+ */
static void
-dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
{
- ioctl_req_ack_msg_t * ioct_ack = (ioctl_req_ack_msg_t *)buf;
+ uint32 pktid;
+ ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
+ unsigned long flags;
+
+ pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
+
+#if defined(DHD_PKTID_AUDIT_RING)
+ /* Skip DHD_IOCTL_REQ_PKTID = 0xFFFE */
+ if (pktid != DHD_IOCTL_REQ_PKTID) {
+ if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
+ DHD_TEST_IS_ALLOC) == BCME_ERROR) {
+ prhex("dhd_prot_ioctack_process:",
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+ }
+ }
+#endif /* DHD_PKTID_AUDIT_RING */
- DHD_CTL(("%s: ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
- __FUNCTION__,
+ DHD_GENERAL_LOCK(dhd, flags);
+ if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
+ (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
+ dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
+ } else {
+ DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
+ __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
+ prhex("dhd_prot_ioctack_process:",
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+ }
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
ioct_ack->compl_hdr.flow_ring_id));
if (ioct_ack->compl_hdr.status != 0) {
- DHD_ERROR(("%s: got an error status for the ioctl request...need to handle that\n",
- __FUNCTION__));
+ DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
}
-
-#if defined(PCIE_D2H_SYNC_BZERO)
- memset(buf, 0, msglen);
-#endif /* PCIE_D2H_SYNC_BZERO */
}
+/** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
static void
-dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
{
- uint16 status;
- uint32 resp_len = 0;
+ dhd_prot_t *prot = dhd->prot;
uint32 pkt_id, xt_id;
- ioctl_comp_resp_msg_t * ioct_resp = (ioctl_comp_resp_msg_t *)buf;
+ ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
+ void *pkt;
+ unsigned long flags;
+ dhd_dma_buf_t retbuf;
+
+ memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
- resp_len = ltoh16(ioct_resp->resp_len);
- xt_id = ltoh16(ioct_resp->trans_id);
pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
- status = ioct_resp->compl_hdr.status;
-#if defined(PCIE_D2H_SYNC_BZERO)
- memset(buf, 0, msglen);
-#endif /* PCIE_D2H_SYNC_BZERO */
+#if defined(DHD_PKTID_AUDIT_RING)
+ {
+ int ret;
+#ifndef IOCTLRESP_USE_CONSTMEM
+ ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pkt_id,
+ DHD_DUPLICATE_FREE);
+#else
+ ret = DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle_ioctl, pkt_id,
+ DHD_DUPLICATE_FREE);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+ if (ret == BCME_ERROR) {
+ prhex("dhd_prot_ioctcmplt_process:",
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+ }
+ }
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
+ !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
+ DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
+ __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
+ prhex("dhd_prot_ioctcmplt_process:",
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return;
+ }
+#ifndef IOCTLRESP_USE_CONSTMEM
+ pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
+#else
+ dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
+ pkt = retbuf.va;
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+ if (!pkt) {
+ prot->ioctl_state = 0;
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
+ return;
+ }
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
+ prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
+ xt_id = ltoh16(ioct_resp->trans_id);
+ if (xt_id != prot->ioctl_trans_id) {
+ ASSERT(0);
+ goto exit;
+ }
- DHD_CTL(("%s: IOCTL_COMPLETE: pktid %x xtid %d status %x resplen %d\n", __FUNCTION__,
- pkt_id, xt_id, status, resp_len));
+ DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
+ pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
- dhd_bus_update_retlen(dhd->bus, sizeof(ioctl_comp_resp_msg_t), pkt_id, status, resp_len);
- dhd_os_ioctl_resp_wake(dhd);
+ if (prot->ioctl_resplen > 0) {
+#ifndef IOCTLRESP_USE_CONSTMEM
+ bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
+#else
+ bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
+ }
+
+ /* wake up any dhd_os_ioctl_resp_wait() */
+ dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
+
+exit:
+#ifndef IOCTLRESP_USE_CONSTMEM
+ dhd_prot_packet_free(dhd, pkt,
+ PKTTYPE_IOCTL_RX, FALSE);
+#else
+ free_ioctl_return_buffer(dhd, &retbuf);
+#endif /* !IOCTLRESP_USE_CONSTMEM */
}
+/** called on MSG_TYPE_TX_STATUS message received from dongle */
static void BCMFASTPATH
-dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
{
dhd_prot_t *prot = dhd->prot;
host_txbuf_cmpl_t * txstatus;
unsigned long flags;
uint32 pktid;
- void *pkt;
- ulong pa;
- uint32 pa_len;
+ void *pkt = NULL;
+ dmaaddr_t pa;
+ uint32 len;
+ void *dmah;
void *secdma;
+
/* locks required to protect circular buffer accesses */
DHD_GENERAL_LOCK(dhd, flags);
- txstatus = (host_txbuf_cmpl_t *)buf;
+ txstatus = (host_txbuf_cmpl_t *)msg;
pktid = ltoh32(txstatus->cmn_hdr.request_id);
- DHD_INFO(("%s: txstatus for pktid 0x%04x\n", __FUNCTION__, pktid));
- if (prot->active_tx_count)
+#if defined(DHD_PKTID_AUDIT_RING)
+ if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
+ DHD_DUPLICATE_FREE) == BCME_ERROR) {
+ prhex("dhd_prot_txstatus_process:",
+ (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
+ }
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
+ if (prot->active_tx_count) {
prot->active_tx_count--;
- else
- DHD_ERROR(("%s: Extra packets are freed\n", __FUNCTION__));
+
+ /* Release the Lock when no more tx packets are pending */
+ if (prot->active_tx_count == 0)
+ DHD_OS_WAKE_UNLOCK(dhd);
+
+ } else {
+ DHD_ERROR(("Extra packets are freed\n"));
+ }
ASSERT(pktid != 0);
- pkt = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, secdma);
+
+#if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
+ {
+ int elem_ix;
+ void **elem;
+ bcm_workq_t *workq;
+
+ pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
+ pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
+
+ workq = &prot->tx_compl_prod;
+ /*
+ * Produce the packet into the tx_compl workq for the tx compl tasklet
+ * to consume.
+ */
+ OSL_PREFETCH(PKTTAG(pkt));
+
+ /* fetch next available slot in workq */
+ elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
+
+ DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
+ DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len);
+
+ if (elem_ix == BCM_RING_FULL) {
+ DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
+ goto workq_ring_full;
+ }
+
+ elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
+ *elem = pkt;
+
+ smp_wmb();
+
+ /* Sync WR index to consumer if the SYNC threshold has been reached */
+ if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
+ bcm_workq_prod_sync(workq);
+ prot->tx_compl_prod_sync = 0;
+ }
+
+ DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
+ __FUNCTION__, pkt, prot->tx_compl_prod_sync));
+
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return;
+ }
+
+workq_ring_full:
+
+#endif /* !DHD_LB_TXC */
+
+ /*
+ * We can come here if no DHD_LB_TXC is enabled and in case where DHD_LB_TXC is
+ * defined but the tx_compl queue is full.
+ */
+ if (pkt == NULL) {
+ pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle,
+ pktid, pa, len, dmah, secdma, PKTTYPE_DATA_TX);
+ }
+
if (pkt) {
if (SECURE_DMA_ENAB(dhd->osh)) {
int offset = 0;
if (dhd->prot->tx_metadata_offset)
offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
- SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
- (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, 0,
+ SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
+ (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
secdma, offset);
- } else
- DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, dmah);
-
+ } else {
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+ }
#if defined(BCMPCIE)
dhd_txcomplete(dhd, pkt, true);
#endif
#if DHD_DBG_SHOW_METADATA
- if (dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
+ if (dhd->prot->metadata_dbg &&
+ dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
uchar *ptr;
/* The Ethernet header of TX frame was copied and removed.
* Here, move the data pointer forward by Ethernet header size.
}
#endif /* DHD_DBG_SHOW_METADATA */
PKTFREE(dhd->osh, pkt, TRUE);
+ DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
+ txstatus->tx_status);
}
-#if defined(PCIE_D2H_SYNC_BZERO)
- memset(buf, 0, msglen);
-#endif /* PCIE_D2H_SYNC_BZERO */
-
DHD_GENERAL_UNLOCK(dhd, flags);
return;
-}
+} /* dhd_prot_txstatus_process */
+/** called on MSG_TYPE_WL_EVENT message received from dongle */
static void
-dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len)
+dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
{
wlevent_req_msg_t *evnt;
uint32 bufid;
void* pkt;
unsigned long flags;
dhd_prot_t *prot = dhd->prot;
- int post_cnt = 0;
- bool zero_posted = FALSE;
/* Event complete header */
- evnt = (wlevent_req_msg_t *)buf;
+ evnt = (wlevent_req_msg_t *)msg;
bufid = ltoh32(evnt->cmn_hdr.request_id);
+
+#if defined(DHD_PKTID_AUDIT_RING)
+ if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, bufid,
+ DHD_DUPLICATE_FREE) == BCME_ERROR) {
+ prhex("dhd_prot_event_process:",
+ (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
+ }
+#endif /* DHD_PKTID_AUDIT_RING */
+
buflen = ltoh16(evnt->event_data_len);
ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
/* Post another rxbuf to the device */
- if (prot->cur_event_bufs_posted)
+ if (prot->cur_event_bufs_posted) {
prot->cur_event_bufs_posted--;
- else
- zero_posted = TRUE;
-
-
- post_cnt = dhd_msgbuf_rxbuf_post_event_bufs(dhd);
- if (zero_posted && (post_cnt <= 0)) {
- return;
}
-
-#if defined(PCIE_D2H_SYNC_BZERO)
- memset(buf, 0, len);
-#endif /* PCIE_D2H_SYNC_BZERO */
+ dhd_msgbuf_rxbuf_post_event_bufs(dhd);
/* locks required to protect pktid_map */
DHD_GENERAL_LOCK(dhd, flags);
- pkt = dhd_prot_packet_get(dhd, ltoh32(bufid));
+ pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
DHD_GENERAL_UNLOCK(dhd, flags);
- if (!pkt)
+ if (!pkt) {
return;
+ }
/* DMA RX offset updated through shared area */
- if (dhd->prot->rx_dataoffset)
+ if (dhd->prot->rx_dataoffset) {
PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+ }
PKTSETLEN(dhd->osh, pkt, buflen);
dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
}
+/** called on MSG_TYPE_RX_CMPLT message received from dongle */
static void BCMFASTPATH
-dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen)
+dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void *msg)
{
host_rxbuf_cmpl_t *rxcmplt_h;
uint16 data_offset; /* offset at which data starts */
- void * pkt;
+ void *pkt;
unsigned long flags;
- static uint8 current_phase = 0;
uint ifidx;
+ uint32 pktid;
+#if defined(DHD_LB_RXC)
+ const bool free_pktid = FALSE;
+#else
+ const bool free_pktid = TRUE;
+#endif /* DHD_LB_RXC */
/* RXCMPLT HDR */
- rxcmplt_h = (host_rxbuf_cmpl_t *)buf;
-
- /* Post another set of rxbufs to the device */
- dhd_prot_return_rxbuf(dhd, 1);
+ rxcmplt_h = (host_rxbuf_cmpl_t *)msg;
/* offset from which data starts is populated in rxstatus0 */
data_offset = ltoh16(rxcmplt_h->data_offset);
+ pktid = ltoh32(rxcmplt_h->cmn_hdr.request_id);
+
+#if defined(DHD_PKTID_AUDIT_RING)
+ if (DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_map_handle, pktid,
+ DHD_DUPLICATE_FREE) == BCME_ERROR) {
+ prhex("dhd_prot_rxcmplt_process:",
+ (uchar *)msg, D2HRING_RXCMPLT_ITEMSIZE);
+ }
+#endif /* DHD_PKTID_AUDIT_RING */
+
DHD_GENERAL_LOCK(dhd, flags);
- pkt = dhd_prot_packet_get(dhd, ltoh32(rxcmplt_h->cmn_hdr.request_id));
+ pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_DATA_RX, free_pktid);
DHD_GENERAL_UNLOCK(dhd, flags);
if (!pkt) {
return;
}
- DHD_INFO(("%s: id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
- __FUNCTION__,
+ /* Post another set of rxbufs to the device */
+ dhd_prot_return_rxbuf(dhd, pktid, 1);
+
+ DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len),
rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
ltoh16(rxcmplt_h->metadata_len)));
-
#if DHD_DBG_SHOW_METADATA
- if (dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) {
+ if (dhd->prot->metadata_dbg &&
+ dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) {
uchar *ptr;
ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset);
/* header followed by data */
}
#endif /* DHD_DBG_SHOW_METADATA */
- if (current_phase != rxcmplt_h->cmn_hdr.flags) {
- current_phase = rxcmplt_h->cmn_hdr.flags;
+ if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
+ DHD_INFO(("D11 frame rxed \n"));
}
- if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)
- DHD_INFO(("%s: D11 frame rxed\n", __FUNCTION__));
+
/* data_offset from buf start */
if (data_offset) {
/* data offset given from dongle after split rx */
PKTPULL(dhd->osh, pkt, data_offset); /* data offset */
} else {
/* DMA RX offset updated through shared area */
- if (dhd->prot->rx_dataoffset)
+ if (dhd->prot->rx_dataoffset) {
PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+ }
}
/* Actual length of the packet */
PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len));
ifidx = rxcmplt_h->cmn_hdr.if_id;
-#if defined(PCIE_D2H_SYNC_BZERO)
- memset(buf, 0, msglen);
-#endif /* PCIE_D2H_SYNC_BZERO */
-
+#if defined(DHD_LB_RXP)
+ dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
+#else /* ! DHD_LB_RXP */
#ifdef DHD_RX_CHAINING
/* Chain the packets */
dhd_rxchain_frame(dhd, pkt, ifidx);
/* offset from which data starts is populated in rxstatus0 */
dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
#endif /* ! DHD_RX_CHAINING */
+#endif /* ! DHD_LB_RXP */
+} /* dhd_prot_rxcmplt_process */
-}
-
-/* Stop protocol: sync w/dongle state. */
+/** Stop protocol: sync w/dongle state. */
void dhd_prot_stop(dhd_pub_t *dhd)
{
- /* nothing to do for pcie */
+ ASSERT(dhd);
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
}
/* Add any protocol-specific data header.
#define PKTBUF pktbuf
+/**
+ * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
+ * the corresponding flow ring.
+ */
int BCMFASTPATH
dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
{
unsigned long flags;
dhd_prot_t *prot = dhd->prot;
host_txbuf_post_t *txdesc = NULL;
- dmaaddr_t physaddr, meta_physaddr;
+ dmaaddr_t pa, meta_pa;
uint8 *pktdata;
uint32 pktlen;
uint32 pktid;
uint16 flowid = 0;
uint16 alloced = 0;
uint16 headroom;
+ msgbuf_ring_t *ring;
+ flow_ring_table_t *flow_ring_table;
+ flow_ring_node_t *flow_ring_node;
- msgbuf_ring_t *msg_ring;
- uint8 dhcp_pkt;
-
- if (!dhd->flow_ring_table)
+ if (dhd->flow_ring_table == NULL) {
return BCME_NORESOURCE;
+ }
- if (!dhd_bus_is_txmode_push(dhd->bus)) {
- flow_ring_table_t *flow_ring_table;
- flow_ring_node_t *flow_ring_node;
-
- flowid = (uint16)DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(PKTBUF));
-
- flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
- flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+ flowid = DHD_PKT_GET_FLOWID(PKTBUF);
- msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
- } else {
- msg_ring = prot->h2dring_txp_subn;
- }
+ flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+ flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+ ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
DHD_GENERAL_LOCK(dhd, flags);
/* Create a unique 32-bit packet id */
- pktid = NATIVE_TO_PKTID_RSV(dhd->prot->pktid_map_handle, PKTBUF);
+ pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_map_handle, PKTBUF);
+#if defined(DHD_PCIE_PKTID)
if (pktid == DHD_PKTID_INVALID) {
- DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
+ DHD_ERROR(("Pktid pool depleted.\n"));
/*
* If we return error here, the caller would queue the packet
* again. So we'll just free the skb allocated in DMA Zone.
*/
goto err_no_res_pktfree;
}
+#endif /* DHD_PCIE_PKTID */
/* Reserve space in the circular buffer */
- txdesc = (host_txbuf_post_t *)dhd_alloc_ring_space(dhd,
- msg_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+ txdesc = (host_txbuf_post_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (txdesc == NULL) {
+#if defined(DHD_PCIE_PKTID)
+ void *dmah;
void *secdma;
+ /* Free up the PKTID. physaddr and pktlen will be garbage. */
+ DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle, pktid,
+ pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
+#endif /* DHD_PCIE_PKTID */
DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
__FUNCTION__, __LINE__, prot->active_tx_count));
- /* Free up the PKTID */
- PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, physaddr,
- pktlen, secdma);
goto err_no_res_pktfree;
}
- /* test if dhcp pkt */
- dhcp_pkt = pkt_is_dhcp(dhd->osh, PKTBUF);
- txdesc->flag2 = (txdesc->flag2 & ~(BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK <<
- BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT)) | ((dhcp_pkt &
- BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK) << BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT);
-
/* Extract the data pointer and length information */
pktdata = PKTDATA(dhd->osh, PKTBUF);
pktlen = PKTLEN(dhd->osh, PKTBUF);
/* Map the data pointer to a DMA-able address */
if (SECURE_DMA_ENAB(dhd->osh)) {
-
int offset = 0;
BCM_REFERENCE(offset);
- if (prot->tx_metadata_offset)
+ if (prot->tx_metadata_offset) {
offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
+ }
- physaddr = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
- DMA_TX, PKTBUF, 0, msg_ring->secdma, offset);
- } else
- physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
+ pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
+ DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
+ } else {
+ pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
+ }
- if ((PHYSADDRHI(physaddr) == 0) && (PHYSADDRLO(physaddr) == 0)) {
- DHD_ERROR(("%s: Something really bad, unless 0 is a valid phyaddr\n", __FUNCTION__));
+ if ((PHYSADDRHI(pa) == 0) && (PHYSADDRLO(pa) == 0)) {
+ DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
ASSERT(0);
}
/* No need to lock. Save the rest of the packet's metadata */
- NATIVE_TO_PKTID_SAVE(dhd->prot->pktid_map_handle, PKTBUF, pktid,
- physaddr, pktlen, DMA_TX, msg_ring->secdma);
+ DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_map_handle, PKTBUF, pktid,
+ pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
#ifdef TXP_FLUSH_NITEMS
- if (msg_ring->pend_items_count == 0)
- msg_ring->start_addr = (void *)txdesc;
- msg_ring->pend_items_count++;
+ if (ring->pend_items_count == 0) {
+ ring->start_addr = (void *)txdesc;
+ }
+ ring->pend_items_count++;
#endif
/* Form the Tx descriptor message buffer */
/* Common message hdr */
txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
- txdesc->cmn_hdr.request_id = htol32(pktid);
txdesc->cmn_hdr.if_id = ifidx;
+
txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
prio = (uint8)PKTPRIO(PKTBUF);
txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
txdesc->seg_cnt = 1;
- txdesc->data_len = htol16((uint16)pktlen);
- txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
- txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr));
+ txdesc->data_len = htol16((uint16) pktlen);
+ txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
+ txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
/* Move data pointer to keep ether header in local PKTBUF for later reference */
PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
/* Handle Tx metadata */
headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
- if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
- DHD_ERROR(("%s: No headroom for Metadata tx %d %d\n", __FUNCTION__,
+ if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) {
+ DHD_ERROR(("No headroom for Metadata tx %d %d\n",
prot->tx_metadata_offset, headroom));
+ }
if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
- DHD_TRACE(("%s: Metadata in tx %d\n", __FUNCTION__, prot->tx_metadata_offset));
+ DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
/* Adjust the data pointer to account for meta data in DMA_MAP */
PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
- if (SECURE_DMA_ENAB(dhd->osh)) {
- meta_physaddr = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
- prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
- 0, msg_ring->secdma);
- } else
- meta_physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
- prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
- if (PHYSADDRISZERO(meta_physaddr)) {
- DHD_ERROR(("%s: Something really bad, unless 0 is a valid phyaddr\n", __FUNCTION__));
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
+ prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
+ 0, ring->dma_buf.secdma);
+ } else {
+ meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
+ prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
+ }
+
+ if (PHYSADDRISZERO(meta_pa)) {
+ DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
ASSERT(0);
}
PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
txdesc->metadata_buf_len = prot->tx_metadata_offset;
- txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_physaddr));
- txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_physaddr));
- }
- else {
+ txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
+ txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
+ } else {
txdesc->metadata_buf_len = htol16(0);
txdesc->metadata_buf_addr.high_addr = 0;
txdesc->metadata_buf_addr.low_addr = 0;
}
+#if defined(DHD_PKTID_AUDIT_RING)
+ DHD_PKTID_AUDIT(dhd, prot->pktid_map_handle, pktid,
+ DHD_DUPLICATE_ALLOC);
+#endif /* DHD_PKTID_AUDIT_RING */
+
+ txdesc->cmn_hdr.request_id = htol32(pktid);
- DHD_TRACE(("%s: txpost: data_len %d, pktid 0x%04x\n", __FUNCTION__, txdesc->data_len,
+ DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
txdesc->cmn_hdr.request_id));
/* Update the write pointer in TCM & ring bell */
#ifdef TXP_FLUSH_NITEMS
/* Flush if we have either hit the txp_threshold or if this msg is */
/* occupying the last slot in the flow_ring - before wrap around. */
- if ((msg_ring->pend_items_count == prot->txp_threshold) ||
- ((uint8 *) txdesc == (uint8 *) HOST_RING_END(msg_ring))) {
+ if ((ring->pend_items_count == prot->txp_threshold) ||
+ ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
}
#else
- prot_ring_write_complete(dhd, msg_ring, txdesc, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
#endif
prot->active_tx_count++;
+ /*
+ * Take a wake lock, do not sleep if we have atleast one packet
+ * to finish.
+ */
+ if (prot->active_tx_count == 1)
+ DHD_OS_WAKE_LOCK(dhd);
+
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_OK;
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_NORESOURCE;
-
-}
+} /* dhd_prot_txdata */
/* called with a lock */
+/** optimization to write "n" tx items at a time to ring */
void BCMFASTPATH
dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
{
unsigned long flags = 0;
flow_ring_table_t *flow_ring_table;
flow_ring_node_t *flow_ring_node;
- msgbuf_ring_t *msg_ring;
+ msgbuf_ring_t *ring;
- if (!dhd->flow_ring_table)
+ if (dhd->flow_ring_table == NULL) {
return;
+ }
if (!in_lock) {
DHD_GENERAL_LOCK(dhd, flags);
flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
- msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+ ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
- /* Update the write pointer in TCM & ring bell */
- if (msg_ring->pend_items_count) {
- prot_ring_write_complete(dhd, msg_ring, msg_ring->start_addr,
- msg_ring->pend_items_count);
- msg_ring->pend_items_count = 0;
- msg_ring->start_addr = NULL;
+ if (ring->pend_items_count) {
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
+ ring->pend_items_count);
+ ring->pend_items_count = 0;
+ ring->start_addr = NULL;
}
if (!in_lock) {
}
#undef PKTBUF /* Only defined in the above routine */
+
int BCMFASTPATH
dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
{
return 0;
}
+/** post a set of receive buffers to the dongle */
static void BCMFASTPATH
-dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt)
+dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
{
dhd_prot_t *prot = dhd->prot;
+#if defined(DHD_LB_RXC)
+ int elem_ix;
+ uint32 *elem;
+ bcm_workq_t *workq;
+
+ workq = &prot->rx_compl_prod;
+
+ /* Produce the work item */
+ elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
+ if (elem_ix == BCM_RING_FULL) {
+ DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
+ ASSERT(0);
+ return;
+ }
+
+ elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
+ *elem = pktid;
+
+ smp_wmb();
+
+ /* Sync WR index to consumer if the SYNC threshold has been reached */
+ if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
+ bcm_workq_prod_sync(workq);
+ prot->rx_compl_prod_sync = 0;
+ }
+
+ DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
+ __FUNCTION__, pktid, prot->rx_compl_prod_sync));
+
+#endif /* DHD_LB_RXC */
+
if (prot->rxbufpost >= rxcnt) {
prot->rxbufpost -= rxcnt;
prot->rxbufpost = 0;
}
- if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
- dhd_msgbuf_rxbuf_post(dhd);
-
- return;
+#if !defined(DHD_LB_RXC)
+ if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
+ dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
+ }
+#endif /* !DHD_LB_RXC */
}
+/* called before an ioctl is sent to the dongle */
+static void
+dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
+ int slen = 0;
+ pcie_bus_tput_params_t *tput_params;
+ slen = strlen("pcie_bus_tput") + 1;
+ tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
+ bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
+ sizeof(tput_params->host_buf_addr));
+ tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
+ }
+}
-/* Use protocol to issue ioctl to dongle */
+
+/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
{
- dhd_prot_t *prot = dhd->prot;
int ret = -1;
uint8 action;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- ASSERT(len <= WLC_IOCTL_MAXLEN);
+ if (ioc->cmd == WLC_SET_PM) {
+ DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf));
+ }
- if (len > WLC_IOCTL_MAXLEN)
- goto done;
+ ASSERT(len <= WLC_IOCTL_MAXLEN);
- if (prot->pending == TRUE) {
- DHD_ERROR(("%s: packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
- __FUNCTION__,
- ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
- (unsigned long)prot->lastcmd));
- if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
- DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
- }
+ if (len > WLC_IOCTL_MAXLEN) {
goto done;
}
- prot->pending = TRUE;
- prot->lastcmd = ioc->cmd;
action = ioc->set;
+ dhd_prot_wlioctl_intercept(dhd, ioc, buf);
if (action & WL_IOCTL_ACTION_SET) {
ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
} else {
- ret = dhdmsgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
- if (ret > 0)
+ ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ if (ret > 0) {
ioc->used = ret;
+ }
}
+
/* Too many programs assume ioctl() returns 0 on success */
- if (ret >= 0)
+ if (ret >= 0) {
ret = 0;
- else {
+ } else {
DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret));
dhd->dongle_error = ret;
}
- /* Intercept the wme_dp ioctl here */
- if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
- int slen, val = 0;
-
- slen = strlen("wme_dp") + 1;
- if (len >= (int)(slen + sizeof(int)))
- bcopy(((char *)buf + slen), &val, sizeof(int));
- dhd->wme_dp = (uint8) ltoh32(val);
- }
+ if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
+ /* Intercept the wme_dp ioctl here */
+ if (!strcmp(buf, "wme_dp")) {
+ int slen, val = 0;
+ slen = strlen("wme_dp") + 1;
+ if (len >= (int)(slen + sizeof(int))) {
+ bcopy(((char *)buf + slen), &val, sizeof(int));
+ }
+ dhd->wme_dp = (uint8) ltoh32(val);
+ }
- prot->pending = FALSE;
+ }
done:
return ret;
-}
+} /* dhd_prot_ioctl */
+
+/** test / loopback */
int
dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
uint16 msglen = len + hdrlen;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
-
- if (msglen > MSGBUF_MAX_MSG_SIZE)
- msglen = MSGBUF_MAX_MSG_SIZE;
-
- msglen = align(msglen, DMA_ALIGN_LEN);
+ msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
+ msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
DHD_GENERAL_LOCK(dhd, flags);
- ioct_rqst = (ioct_reqst_hdr_t *)dhd_alloc_ring_space(dhd,
- prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+ ioct_rqst = (ioct_reqst_hdr_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (ioct_rqst == NULL) {
DHD_GENERAL_UNLOCK(dhd, flags);
}
}
-
/* Common msg buf hdr */
+ ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
+
ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
ioct_rqst->msg.if_id = 0;
bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
- /* Update the write pointer in TCM & ring bell */
- prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst,
- DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return 0;
}
-void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma)
+/** test / loopback */
+void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
{
- if (dma == NULL)
+ if (dmaxfer == NULL) {
return;
-
- if (dma->srcmem.va) {
- DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va,
- dma->len, dma->srcmem.pa, dma->srcmem.dmah);
- dma->srcmem.va = NULL;
- }
- if (dma->destmem.va) {
- DMA_FREE_CONSISTENT(dhd->osh, dma->destmem.va,
- dma->len + 8, dma->destmem.pa, dma->destmem.dmah);
- dma->destmem.va = NULL;
}
+
+ dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
+ dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
}
+/** test / loopback */
int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
- uint srcdelay, uint destdelay, dhd_dmaxfer_t *dma)
+ uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
{
uint i;
-
- if (!dma)
+ if (!dmaxfer) {
return BCME_ERROR;
+ }
- /* First free up exisiting buffers */
- dmaxfer_free_dmaaddr(dhd, dma);
+ /* First free up existing buffers */
+ dmaxfer_free_dmaaddr(dhd, dmaxfer);
- dma->srcmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len, DMA_ALIGN_LEN,
- &i, &dma->srcmem.pa, &dma->srcmem.dmah);
- if (dma->srcmem.va == NULL) {
+ if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
return BCME_NOMEM;
}
- /* Populate source with a pattern */
- for (i = 0; i < len; i++) {
- ((uint8*)dma->srcmem.va)[i] = i % 256;
- }
- OSL_CACHE_FLUSH(dma->srcmem.va, len);
-
- dma->destmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len + 8, DMA_ALIGN_LEN,
- &i, &dma->destmem.pa, &dma->destmem.dmah);
- if (dma->destmem.va == NULL) {
- DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va,
- dma->len, dma->srcmem.pa, dma->srcmem.dmah);
- dma->srcmem.va = NULL;
+ if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
+ dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
return BCME_NOMEM;
}
+ dmaxfer->len = len;
- /* Clear the destination buffer */
- bzero(dma->destmem.va, len +8);
- OSL_CACHE_FLUSH(dma->destmem.va, len+8);
+ /* Populate source with a pattern */
+ for (i = 0; i < dmaxfer->len; i++) {
+ ((uint8*)dmaxfer->srcmem.va)[i] = i % 256;
+ }
+ OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
- dma->len = len;
- dma->srcdelay = srcdelay;
- dma->destdelay = destdelay;
+ dmaxfer->srcdelay = srcdelay;
+ dmaxfer->destdelay = destdelay;
return BCME_OK;
-}
+} /* dmaxfer_prepare_dmaaddr */
static void
-dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void * buf, uint16 msglen)
+dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
{
dhd_prot_t *prot = dhd->prot;
- OSL_CACHE_INV(prot->dmaxfer.destmem.va, prot->dmaxfer.len);
- if (prot->dmaxfer.srcmem.va && prot->dmaxfer.destmem.va) {
+ OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
+ if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
if (memcmp(prot->dmaxfer.srcmem.va,
- prot->dmaxfer.destmem.va,
- prot->dmaxfer.len)) {
+ prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) {
bcm_print_bytes("XFER SRC: ",
- prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
- bcm_print_bytes("XFER DEST: ",
- prot->dmaxfer.destmem.va, prot->dmaxfer.len);
- }
- else {
- DHD_INFO(("%s: DMA successful\n", __FUNCTION__));
+ prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
+ bcm_print_bytes("XFER DST: ",
+ prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
+ } else {
+ DHD_INFO(("DMA successful\n"));
}
}
dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
- dhd->prot->dmaxfer_in_progress = FALSE;
+ dhd->prot->dmaxfer.in_progress = FALSE;
}
+/** Test functionality.
+ * Transfers bytes from host to dongle and to host again using DMA
+ * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
+ * by a spinlock.
+ */
int
dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay)
{
int ret = BCME_OK;
dhd_prot_t *prot = dhd->prot;
pcie_dma_xfer_params_t *dmap;
- uint32 xferlen = len > DMA_XFER_LEN_LIMIT ? DMA_XFER_LEN_LIMIT : len;
- uint16 msglen = sizeof(pcie_dma_xfer_params_t);
+ uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
uint16 alloced = 0;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
- if (prot->dmaxfer_in_progress) {
- DHD_ERROR(("%s: DMA is in progress...\n", __FUNCTION__));
+ if (prot->dmaxfer.in_progress) {
+ DHD_ERROR(("DMA is in progress...\n"));
return ret;
}
- prot->dmaxfer_in_progress = TRUE;
+
+ prot->dmaxfer.in_progress = TRUE;
if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
- &prot->dmaxfer)) != BCME_OK) {
- prot->dmaxfer_in_progress = FALSE;
+ &prot->dmaxfer)) != BCME_OK) {
+ prot->dmaxfer.in_progress = FALSE;
return ret;
}
-
- if (msglen > MSGBUF_MAX_MSG_SIZE)
- msglen = MSGBUF_MAX_MSG_SIZE;
-
- msglen = align(msglen, DMA_ALIGN_LEN);
-
DHD_GENERAL_LOCK(dhd, flags);
- dmap = (pcie_dma_xfer_params_t *)dhd_alloc_ring_space(dhd,
- prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+ dmap = (pcie_dma_xfer_params_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (dmap == NULL) {
dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
- prot->dmaxfer_in_progress = FALSE;
+ prot->dmaxfer.in_progress = FALSE;
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_NOMEM;
}
/* Common msg buf hdr */
dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
- dmap->cmn_hdr.request_id = 0x1234;
+ dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
+ dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
- dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.destmem.pa));
- dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.destmem.pa));
+ dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
+ dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
dmap->xfer_len = htol32(prot->dmaxfer.len);
dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
dmap->destdelay = htol32(prot->dmaxfer.destdelay);
- /* Update the write pointer in TCM & ring bell */
- prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, dmap,
- DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
- DHD_ERROR(("%s: DMA Started...\n", __FUNCTION__));
+ DHD_ERROR(("DMA Started...\n"));
return BCME_OK;
-}
+} /* dhdmsgbuf_dmaxfer_req */
+/** Called in the process of submitting an ioctl to the dongle */
static int
-dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
{
- dhd_prot_t *prot = dhd->prot;
-
int ret = 0;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
}
}
- ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx);
- if (ret < 0) {
- DHD_ERROR(("%s : dhd_fillup_ioct_reqst_ptrbased error : %d\n", __FUNCTION__, ret));
- return ret;
- }
+ ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
- DHD_INFO(("%s: ACTION %d ifdix %d cmd %d len %d \n", __FUNCTION__,
+ DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
action, ifidx, cmd, len));
- /* wait for interrupt and get first fragment */
- ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va);
+ /* wait for IOCTL completion message from dongle and get first fragment */
+ ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
done:
return ret;
}
+
+/**
+ * Waits for IOCTL completion message from the dongle, copies this into caller
+ * provided parameter 'buf'.
+ */
static int
-dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf)
+dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
{
dhd_prot_t *prot = dhd->prot;
- ioctl_comp_resp_msg_t ioct_resp;
- void* pkt;
- int retlen;
- int msgbuf_len = 0;
- int post_cnt = 0;
+ int timeleft;
unsigned long flags;
- bool zero_posted = FALSE;
+ int ret = 0;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- if (dhd->busstate == DHD_BUS_DOWN) {
- DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
- return -1;
+
+ if (dhd->dongle_reset) {
+ ret = -EIO;
+ goto out;
}
- if (prot->cur_ioctlresp_bufs_posted)
+ if (prot->cur_ioctlresp_bufs_posted) {
prot->cur_ioctlresp_bufs_posted--;
- else
- zero_posted = TRUE;
-
- post_cnt = dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
- if (zero_posted && (post_cnt <= 0)) {
- return -1;
}
- memset(&ioct_resp, 0, sizeof(ioctl_comp_resp_msg_t));
+ dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
- retlen = dhd_bus_rxctl(dhd->bus, (uchar*)&ioct_resp, msgbuf_len);
- if (retlen <= 0) {
- DHD_ERROR(("%s: IOCTL request failed with error code %d\n", __FUNCTION__, retlen));
- return retlen;
- }
- DHD_INFO(("%s: ioctl resp retlen %d status %d, resp_len %d, pktid %d\n", __FUNCTION__,
- retlen, ioct_resp.compl_hdr.status, ioct_resp.resp_len,
- ioct_resp.cmn_hdr.request_id));
- if (ioct_resp.resp_len != 0) {
- DHD_GENERAL_LOCK(dhd, flags);
- pkt = dhd_prot_packet_get(dhd, ioct_resp.cmn_hdr.request_id);
- DHD_GENERAL_UNLOCK(dhd, flags);
+ timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
+ if (timeleft == 0) {
+ dhd->rxcnt_timeout++;
+ dhd->rx_ctlerrs++;
+ DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d "
+ "trans_id %d state %d busstate=%d ioctl_received=%d\n",
+ __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
+ prot->ioctl_trans_id, prot->ioctl_state,
+ dhd->busstate, prot->ioctl_received));
- DHD_INFO(("%s: ioctl ret buf %p retlen %d status %x\n", __FUNCTION__, pkt, retlen,
- ioct_resp.compl_hdr.status));
- /* get ret buf */
- if ((buf) && (pkt)) {
- /* bcopy(PKTDATA(dhd->osh, pkt), buf, ioct_resp.resp_len); */
- /* ioct_resp.resp_len could have been changed to make it > 8 bytes */
- bcopy(PKTDATA(dhd->osh, pkt), buf, len);
+ dhd_prot_debug_info_print(dhd);
+
+#ifdef DHD_FW_COREDUMP
+ /* As soon as FW TRAP occurs, FW dump will be collected from dhdpcie_checkdied */
+ if (dhd->memdump_enabled && !dhd->dongle_trap_occured) {
+ /* collect core dump */
+ dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
+ dhd_bus_mem_dump(dhd);
}
- if (pkt) {
- PKTFREE(dhd->osh, pkt, FALSE);
+#endif /* DHD_FW_COREDUMP */
+ if (dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) {
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ DHD_ERROR(("%s: timeout > MAX_CNTL_TX_TIMEOUT\n", __FUNCTION__));
}
+ ret = -ETIMEDOUT;
+ goto out;
} else {
- DHD_GENERAL_LOCK(dhd, flags);
- dhd_prot_packet_free(dhd, ioct_resp.cmn_hdr.request_id);
- DHD_GENERAL_UNLOCK(dhd, flags);
+ if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
+ DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
+ __FUNCTION__, prot->ioctl_received));
+ ret = -ECONNABORTED;
+ goto out;
+ }
+ dhd->rxcnt_timeout = 0;
+ dhd->rx_ctlpkts++;
+ DHD_CTL(("%s: ioctl resp resumed, got %d\n",
+ __FUNCTION__, prot->ioctl_resplen));
}
- return (int)(ioct_resp.compl_hdr.status);
-}
+ if (dhd->dongle_trap_occured) {
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ DHD_ERROR(("%s: TRAP occurred!!\n", __FUNCTION__));
+ ret = -EREMOTEIO;
+ goto out;
+ }
+
+ if (dhd->prot->ioctl_resplen > len) {
+ dhd->prot->ioctl_resplen = (uint16)len;
+ }
+ if (buf) {
+ bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
+ }
+
+ ret = (int)(dhd->prot->ioctl_status);
+out:
+ DHD_GENERAL_LOCK(dhd, flags);
+ dhd->prot->ioctl_state = 0;
+ dhd->prot->ioctl_resplen = 0;
+ dhd->prot->ioctl_received = IOCTL_WAIT;
+ dhd->prot->curr_ioctl_cmd = 0;
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return ret;
+} /* dhd_msgbuf_wait_ioctl_cmplt */
+
static int
dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
{
- dhd_prot_t *prot = dhd->prot;
-
int ret = 0;
DHD_TRACE(("%s: Enter \n", __FUNCTION__));
- DHD_TRACE(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
if (dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
}
/* Fill up msgbuf for ioctl req */
- ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx);
- if (ret < 0) {
- DHD_ERROR(("%s : dhd_fillup_ioct_reqst_ptrbased error : %d\n", __FUNCTION__, ret));
- return ret;
- }
+ ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
- DHD_INFO(("%s: ACTIOn %d ifdix %d cmd %d len %d \n", __FUNCTION__,
+ DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
action, ifidx, cmd, len));
- ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va);
+ ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
return ret;
}
-/* Handles a protocol control response asynchronously */
+
+/** Called by upper DHD layer. Handles a protocol control response asynchronously. */
int dhd_prot_ctl_complete(dhd_pub_t *dhd)
{
return 0;
}
-/* Check for and handle local prot-specific iovar commands */
+/** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
- void *params, int plen, void *arg, int len, bool set)
+ void *params, int plen, void *arg, int len, bool set)
{
return BCME_UNSUPPORTED;
}
-/* Add prot dump output to a buffer */
-void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+/** Add prot dump output to a buffer */
+void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
{
+
#if defined(PCIE_D2H_SYNC)
if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
- bcm_bprintf(strbuf, "\nd2h_sync: SEQNUM:");
+ bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
- bcm_bprintf(strbuf, "\nd2h_sync: XORCSUM:");
+ bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
else
- bcm_bprintf(strbuf, "\nd2h_sync: NONE:");
- bcm_bprintf(strbuf, " d2h_sync_wait max<%lu> tot<%lu>\n",
- dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
+ bcm_bprintf(b, "\nd2h_sync: NONE:");
+ bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
+ dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
#endif /* PCIE_D2H_SYNC */
+
+ bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n",
+ DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support),
+ DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support),
+ dhd->prot->rw_index_sz);
}
/* Update local copy of dongle statistics */
void dhd_prot_dstats(dhd_pub_t *dhd)
{
- return;
+ return;
}
+/** Called by upper DHD layer */
int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
uint reorder_info_len, void **pkt, uint32 *free_buf_count)
{
return 0;
}
-/* post a dummy message to interrupt dongle */
-/* used to process cons commands */
+
+/** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
int
dhd_post_dummy_msg(dhd_pub_t *dhd)
{
uint16 alloced = 0;
dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
DHD_GENERAL_LOCK(dhd, flags);
- hevent = (hostevent_hdr_t *)dhd_alloc_ring_space(dhd,
- prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+ hevent = (hostevent_hdr_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (hevent == NULL) {
DHD_GENERAL_UNLOCK(dhd, flags);
}
/* CMN msg header */
+ hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
hevent->msg.if_id = 0;
/* Since, we are filling the data directly into the bufptr obtained
* from the msgbuf, we can directly call the write_complete
*/
- prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, hevent,
- DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return 0;
}
+/**
+ * If exactly_nitems is true, this function will allocate space for nitems or fail
+ * If exactly_nitems is false, this function will allocate space for nitems or less
+ */
static void * BCMFASTPATH
-dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced)
+dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ uint16 nitems, uint16 * alloced, bool exactly_nitems)
{
void * ret_buf;
- uint16 r_index = 0;
/* Alloc space for nitems in the ring */
- ret_buf = prot_get_ring_space(ring, nitems, alloced);
+ ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
if (ret_buf == NULL) {
/* if alloc failed , invalidate cached read ptr */
if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
- r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx);
- ring->ringstate->r_offset = r_index;
- } else
- dhd_bus_cmn_readshared(dhd->bus, &(RING_READ_PTR(ring)),
- RING_READ_PTR, ring->idx);
+ ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
+ }
/* Try allocating once more */
- ret_buf = prot_get_ring_space(ring, nitems, alloced);
+ ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
if (ret_buf == NULL) {
- DHD_INFO(("%s: RING space not available on ring %s for %d items \n", __FUNCTION__,
- ring->name, nitems));
- DHD_INFO(("%s: write %d read %d \n\n", __FUNCTION__, RING_WRITE_PTR(ring),
- RING_READ_PTR(ring)));
+ DHD_INFO(("%s: Ring space not available \n", ring->name));
return NULL;
}
}
return ret_buf;
}
-#define DHD_IOCTL_REQ_PKTID 0xFFFE
-
-/* Non inline ioct request */
-/* Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer */
-/* Form a separate request buffer where a 4 byte cmn header is added in the front */
-/* buf contents from parent function is copied to remaining section of this buffer */
+/**
+ * Non inline ioct request.
+ * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
+ * Form a separate request buffer where a 4 byte cmn header is added in the front
+ * buf contents from parent function is copied to remaining section of this buffer
+ */
static int
-dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
+dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
{
dhd_prot_t *prot = dhd->prot;
ioctl_req_msg_t *ioct_rqst;
uint16 rqstlen, resplen;
unsigned long flags;
uint16 alloced = 0;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
rqstlen = len;
resplen = len;
rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
DHD_GENERAL_LOCK(dhd, flags);
+
+ if (prot->ioctl_state) {
+ DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return BCME_BUSY;
+ } else {
+ prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
+ }
+
/* Request for cbuf space */
- ioct_rqst = (ioctl_req_msg_t*)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn,
- DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+ ioct_rqst = (ioctl_req_msg_t*)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (ioct_rqst == NULL) {
- DHD_ERROR(("%s: couldn't allocate space on msgring to send ioctl request\n", __FUNCTION__));
+ DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
+ prot->ioctl_state = 0;
+ prot->curr_ioctl_cmd = 0;
+ prot->ioctl_received = IOCTL_WAIT;
DHD_GENERAL_UNLOCK(dhd, flags);
return -1;
}
ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
ioct_rqst->cmn_hdr.flags = 0;
- ioct_rqst->cmn_hdr.request_id = DHD_IOCTL_REQ_PKTID;
+ ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
+ ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
ioct_rqst->cmd = htol32(cmd);
+ prot->curr_ioctl_cmd = cmd;
ioct_rqst->output_buf_len = htol16(resplen);
- ioct_rqst->trans_id = prot->ioctl_trans_id ++;
+ prot->ioctl_trans_id++;
+ ioct_rqst->trans_id = prot->ioctl_trans_id;
/* populate ioctl buffer info */
ioct_rqst->input_buf_len = htol16(rqstlen);
/* copy ioct payload */
ioct_buf = (void *) prot->ioctbuf.va;
- if (buf)
+ if (buf) {
memcpy(ioct_buf, buf, len);
+ }
OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
- if ((ulong)ioct_buf % DMA_ALIGN_LEN)
- DHD_ERROR(("%s: host ioct address unaligned !!!!! \n", __FUNCTION__));
+ if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) {
+ DHD_ERROR(("host ioct address unaligned !!!!! \n"));
+ }
- DHD_CTL(("%s: submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
- __FUNCTION__,
+ DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
ioct_rqst->trans_id));
- /* upd wrt ptr and raise interrupt */
- prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst,
- DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return 0;
-}
+} /* dhd_fillup_ioct_reqst */
-/* Packet to PacketID mapper */
-typedef struct {
- ulong native;
- dmaaddr_t pa;
- uint32 pa_len;
- uchar dma;
-} pktid_t;
-typedef struct {
- void *osh;
- void *mwbmap_hdl;
- pktid_t *pktid_list;
- uint32 count;
-} pktid_map_t;
+/**
+ * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
+ * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
+ * information is posted to the dongle.
+ *
+ * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
+ * each flowring in pool of flowrings.
+ *
+ * returns BCME_OK=0 on success
+ * returns non-zero negative error value on failure.
+ */
+static int
+dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
+ uint16 max_items, uint16 item_len, uint16 ringid)
+{
+ int dma_buf_alloced = BCME_NOMEM;
+ uint32 dma_buf_len = max_items * item_len;
+ dhd_prot_t *prot = dhd->prot;
+ ASSERT(ring);
+ ASSERT(name);
+ ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
-void *pktid_map_init(void *osh, uint32 count)
-{
- pktid_map_t *handle;
+ /* Init name */
+ strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
+ ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
- handle = (pktid_map_t *) MALLOC(osh, sizeof(pktid_map_t));
- if (handle == NULL) {
- printf("%s:%d: MALLOC failed for size %d\n",
- __FUNCTION__, __LINE__, (uint32) sizeof(pktid_map_t));
- return NULL;
- }
- handle->osh = osh;
- handle->count = count;
- handle->mwbmap_hdl = bcm_mwbmap_init(osh, count);
- if (handle->mwbmap_hdl == NULL) {
- printf("%s:%d: bcm_mwbmap_init failed for count %d\n",
- __FUNCTION__, __LINE__, count);
- MFREE(osh, handle, sizeof(pktid_map_t));
- return NULL;
- }
+ ring->idx = ringid;
- handle->pktid_list = (pktid_t *) MALLOC(osh, sizeof(pktid_t) * (count+1));
- if (handle->pktid_list == NULL) {
- printf("%s:%d: MALLOC failed for count %d / total = %d\n",
- __FUNCTION__, __LINE__, count, (uint32) sizeof(pktid_t) * count);
- bcm_mwbmap_fini(osh, handle->mwbmap_hdl);
- MFREE(osh, handle, sizeof(pktid_map_t));
- return NULL;
- }
+ ring->max_items = max_items;
+ ring->item_len = item_len;
- return handle;
-}
+ /* A contiguous space may be reserved for all flowrings */
+ if (DHD_IS_FLOWRING(ringid) && (prot->flowrings_dma_buf.va)) {
+ /* Carve out from the contiguous DMA-able flowring buffer */
+ uint16 flowid;
+ uint32 base_offset;
-void
-pktid_map_uninit(void *pktid_map_handle)
-{
- pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
- uint32 ix;
+ dhd_dma_buf_t *dma_buf = &ring->dma_buf;
+ dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
- if (handle != NULL) {
- void *osh = handle->osh;
- for (ix = 0; ix < MAX_PKTID_ITEMS; ix++)
- {
- if (!bcm_mwbmap_isfree(handle->mwbmap_hdl, ix)) {
- /* Mark the slot as free */
- bcm_mwbmap_free(handle->mwbmap_hdl, ix);
- /*
- Here we can do dma unmapping for 32 bit also.
- Since this in removal path, it will not affect performance
- */
- DMA_UNMAP(osh, handle->pktid_list[ix+1].pa,
- (uint) handle->pktid_list[ix+1].pa_len,
- handle->pktid_list[ix+1].dma, 0, 0);
- PKTFREE(osh, (unsigned long*)handle->pktid_list[ix+1].native, TRUE);
- }
+ flowid = DHD_RINGID_TO_FLOWID(ringid);
+ base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
+
+ ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
+
+ dma_buf->len = dma_buf_len;
+ dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
+ PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
+ PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
+
+ /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
+ ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
+
+ dma_buf->dmah = rsv_buf->dmah;
+ dma_buf->secdma = rsv_buf->secdma;
+
+ (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
+ } else {
+ /* Allocate a dhd_dma_buf */
+ dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
+ if (dma_buf_alloced != BCME_OK) {
+ return BCME_NOMEM;
}
- bcm_mwbmap_fini(osh, handle->mwbmap_hdl);
- MFREE(osh, handle->pktid_list, sizeof(pktid_t) * (handle->count+1));
- MFREE(osh, handle, sizeof(pktid_map_t));
}
- return;
-}
-uint32 BCMFASTPATH
-pktid_map_unique(void *pktid_map_handle, void *pkt, dmaaddr_t physaddr, uint32 physlen, uint32 dma)
-{
- uint32 id;
- pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
+ /* CAUTION: Save ring::base_addr in little endian format! */
+ dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
- if (handle == NULL) {
- printf("%s:%d: Error !!! pktid_map_unique called without initing pktid_map\n",
- __FUNCTION__, __LINE__);
- return 0;
+#ifdef BCM_SECURE_DMA
+ if (SECURE_DMA_ENAB(prot->osh)) {
+ ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
+ if (ring->dma_buf.secdma == NULL) {
+ goto free_dma_buf;
+ }
}
- id = bcm_mwbmap_alloc(handle->mwbmap_hdl);
- if (id == BCM_MWBMAP_INVALID_IDX) {
- printf("%s:%d: bcm_mwbmap_alloc failed. Free Count = %d\n",
- __FUNCTION__, __LINE__, bcm_mwbmap_free_cnt(handle->mwbmap_hdl));
- return 0;
+#endif /* BCM_SECURE_DMA */
+
+ DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
+ "ring start %p buf phys addr %x:%x \n",
+ ring->name, ring->max_items, ring->item_len,
+ dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr)));
+
+ return BCME_OK;
+
+#ifdef BCM_SECURE_DMA
+free_dma_buf:
+ if (dma_buf_alloced == BCME_OK) {
+ dhd_dma_buf_free(dhd, &ring->dma_buf);
}
+#endif /* BCM_SECURE_DMA */
+
+ return BCME_NOMEM;
+
+} /* dhd_prot_ring_attach */
+
+
+/**
+ * dhd_prot_ring_init - Post the common ring information to dongle.
+ *
+ * Used only for common rings.
+ *
+ * The flowrings information is passed via the create flowring control message
+ * (tx_flowring_create_request_t) sent over the H2D control submission common
+ * ring.
+ */
+static void
+dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+ ring->wr = 0;
+ ring->rd = 0;
+ ring->curr_rd = 0;
+
+ /* CAUTION: ring::base_addr already in Little Endian */
+ dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
+ sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
+ dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
+ sizeof(uint16), RING_MAX_ITEMS, ring->idx);
+ dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
+ sizeof(uint16), RING_ITEM_LEN, ring->idx);
+
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
+ sizeof(uint16), RING_WR_UPD, ring->idx);
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
+ sizeof(uint16), RING_RD_UPD, ring->idx);
+
+ /* ring inited */
+ ring->inited = TRUE;
+
+} /* dhd_prot_ring_init */
+
+
+/**
+ * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
+ * Reset WR and RD indices to 0.
+ */
+static void
+dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+ DHD_TRACE(("%s\n", __FUNCTION__));
- /* id=0 is invalid as we use this for error checking in the dongle */
- id += 1;
- handle->pktid_list[id].native = (ulong) pkt;
- handle->pktid_list[id].pa = physaddr;
- handle->pktid_list[id].pa_len = (uint32) physlen;
- handle->pktid_list[id].dma = (uchar)dma;
+ dhd_dma_buf_reset(dhd, &ring->dma_buf);
- return id;
+ ring->rd = ring->wr = 0;
+ ring->curr_rd = 0;
}
-void * BCMFASTPATH
-pktid_get_packet(void *pktid_map_handle, uint32 id, dmaaddr_t *physaddr, uint32 *physlen)
+
+/**
+ * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
+ * hanging off the msgbuf_ring.
+ */
+static void
+dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
{
- void *native = NULL;
- pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
- if (handle == NULL) {
- printf("%s:%d: Error !!! pktid_get_packet called without initing pktid_map\n",
- __FUNCTION__, __LINE__);
- return NULL;
+ dhd_prot_t *prot = dhd->prot;
+ ASSERT(ring);
+
+ ring->inited = FALSE;
+ /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
+
+#ifdef BCM_SECURE_DMA
+ if (SECURE_DMA_ENAB(prot->osh)) {
+ SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
+ if (ring->dma_buf.secdma) {
+ MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
+ }
+ ring->dma_buf.secdma = NULL;
}
+#endif /* BCM_SECURE_DMA */
- /* Debug check */
- if (bcm_mwbmap_isfree(handle->mwbmap_hdl, (id-1))) {
- printf("%s:%d: Error !!!. slot (%d/0x%04x) free but the app is using it.\n",
- __FUNCTION__, __LINE__, (id-1), (id-1));
- return NULL;
+ /* If the DMA-able buffer was carved out of a pre-reserved contiguous
+ * memory, then simply stop using it.
+ */
+ if (DHD_IS_FLOWRING(ring->idx) && (prot->flowrings_dma_buf.va)) {
+ (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
+ memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
+ } else {
+ dhd_dma_buf_free(dhd, &ring->dma_buf);
}
- native = (void *) handle->pktid_list[id].native;
- *physaddr = handle->pktid_list[id].pa;
- *physlen = (uint32) handle->pktid_list[id].pa_len;
+} /* dhd_prot_ring_detach */
- /* Mark the slot as free */
- bcm_mwbmap_free(handle->mwbmap_hdl, (id-1));
- return native;
-}
-static msgbuf_ring_t*
-prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item, uint16 len_item, uint16 ringid)
+/*
+ * +----------------------------------------------------------------------------
+ * Flowring Pool
+ *
+ * Unlike common rings, which are attached very early on (dhd_prot_attach),
+ * flowrings are dynamically instantiated. Moreover, flowrings may require a
+ * larger DMA-able buffer. To avoid issues with fragmented cache coherent
+ * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
+ * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
+ *
+ * Each DMA-able buffer may be allocated independently, or may be carved out
+ * of a single large contiguous region that is registered with the protocol
+ * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
+ * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
+ *
+ * No flowring pool action is performed in dhd_prot_attach(), as the number
+ * of h2d rings is not yet known.
+ *
+ * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
+ * determine the number of flowrings required, and a pool of msgbuf_rings are
+ * allocated and a DMA-able buffer (carved or allocated) is attached.
+ * See: dhd_prot_flowrings_pool_attach()
+ *
+ * A flowring msgbuf_ring object may be fetched from this pool during flowring
+ * creation, using the flowid. Likewise, flowrings may be freed back into the
+ * pool on flowring deletion.
+ * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
+ *
+ * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
+ * are detached (returned back to the carved region or freed), and the pool of
+ * msgbuf_ring and any objects allocated against it are freed.
+ * See: dhd_prot_flowrings_pool_detach()
+ *
+ * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
+ * state as-if upon an attach. All DMA-able buffers are retained.
+ * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
+ * pool attach will notice that the pool persists and continue to use it. This
+ * will avoid the case of a fragmented DMA-able region.
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+/* Fetch number of H2D flowrings given the total number of h2d rings */
+#define DHD_FLOWRINGS_POOL_TOTAL(h2d_rings_total) \
+ ((h2d_rings_total) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
+/* Conversion of a flowid to a flowring pool index */
+#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
+ ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
+/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
+#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
+ (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + DHD_FLOWRINGS_POOL_OFFSET(flowid)
+
+/* Traverse each flowring in the flowring pool, assigning ring and flowid */
+#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) \
+ for ((flowid) = DHD_FLOWRING_START_FLOWID, \
+ (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
+ (flowid) < (prot)->h2d_rings_total; \
+ (flowid)++, (ring)++)
+
+/**
+ * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
+ *
+ * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
+ * Dongle includes common rings when it advertizes the number of H2D rings.
+ * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
+ * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
+ *
+ * dhd_prot_ring_attach is invoked to perform the actual initialization and
+ * attaching the DMA-able buffer.
+ *
+ * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
+ * initialized msgbuf_ring_t object.
+ *
+ * returns BCME_OK=0 on success
+ * returns non-zero negative error value on failure.
+ */
+static int
+dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
{
- uint alloced = 0;
+ uint16 flowid;
msgbuf_ring_t *ring;
- dmaaddr_t physaddr;
- uint16 size;
-
- ASSERT(name);
- BCM_REFERENCE(physaddr);
+ uint16 h2d_flowrings_total; /* exclude H2D common rings */
+ dhd_prot_t *prot = dhd->prot;
+ char ring_name[RING_NAME_MAX_LENGTH];
- /* allocate ring info */
- ring = MALLOC(prot->osh, sizeof(msgbuf_ring_t));
- if (ring == NULL) {
- ASSERT(0);
- return NULL;
+ if (prot->h2d_flowrings_pool != NULL) {
+ return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
}
- bzero(ring, sizeof(*ring));
- /* Init name */
- strncpy(ring->name, name, sizeof(ring->name) - 1);
+ ASSERT(prot->h2d_rings_total == 0);
- /* Ringid in the order given in bcmpcie.h */
- ring->idx = ringid;
+ /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
+ prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
- /* init ringmem */
- ring->ringmem = MALLOC(prot->osh, sizeof(ring_mem_t));
- if (ring->ringmem == NULL)
- goto fail;
- bzero(ring->ringmem, sizeof(*ring->ringmem));
-
- ring->ringmem->max_item = max_item;
- ring->ringmem->len_items = len_item;
- size = max_item * len_item;
-
- /* Ring Memmory allocation */
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING)
- if (RING_IS_FLOWRING(ring)) {
- ring->ring_base.va = DMA_ALLOC_CONSISTENT_STATIC(prot->osh,
- size, DMA_ALIGN_LEN, &alloced, &ring->ring_base.pa,
- &ring->ring_base.dmah, ringid);
- } else
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */
- ring->ring_base.va = DMA_ALLOC_CONSISTENT(prot->osh, size, DMA_ALIGN_LEN,
- &alloced, &ring->ring_base.pa, &ring->ring_base.dmah);
+ if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
+ DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
+ __FUNCTION__, prot->h2d_rings_total));
+ return BCME_ERROR;
+ }
- if (ring->ring_base.va == NULL)
- goto fail;
- ring->ringmem->base_addr.high_addr = htol32(PHYSADDRHI(ring->ring_base.pa));
- ring->ringmem->base_addr.low_addr = htol32(PHYSADDRLO(ring->ring_base.pa));
+ /* Subtract number of H2D common rings, to determine number of flowrings */
+ h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
- ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0);
- bzero(ring->ring_base.va, size);
+ DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
- OSL_CACHE_FLUSH((void *) ring->ring_base.va, size);
+ /* Allocate pool of msgbuf_ring_t objects for all flowrings */
+ prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
+ (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
- /* Ring state init */
- ring->ringstate = MALLOC(prot->osh, sizeof(ring_state_t));
- if (ring->ringstate == NULL)
+ if (prot->h2d_flowrings_pool == NULL) {
+ DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
+ __FUNCTION__, h2d_flowrings_total));
goto fail;
- bzero(ring->ringstate, sizeof(*ring->ringstate));
+ }
-#ifdef BCM_SECURE_DMA
- if (SECURE_DMA_ENAB(prot->osh)) {
- ring->secdma = MALLOC(prot->osh, sizeof(sec_cma_info_t));
- bzero(ring->secdma, sizeof(sec_cma_info_t));
- if (ring->secdma == NULL) {
- DHD_ERROR(("%s: MALLOC failure for secdma\n", __FUNCTION__));
- goto fail;
+ /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
+ snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
+ ring_name[RING_NAME_MAX_LENGTH - 1] = '\0';
+ if (dhd_prot_ring_attach(dhd, ring, ring_name,
+ H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
+ DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
+ goto attach_fail;
}
}
-#endif
- DHD_INFO(("%s: RING_ATTACH : %s Max item %d len item %d total size %d "
- "ring start %p buf phys addr %x:%x \n", __FUNCTION__,
- ring->name, ring->ringmem->max_item, ring->ringmem->len_items,
- size, ring->ring_base.va, ring->ringmem->base_addr.high_addr,
- ring->ringmem->base_addr.low_addr));
- return ring;
+
+ return BCME_OK;
+
+attach_fail:
+ dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
+
fail:
- if (ring->ring_base.va && ring->ringmem) {
- PHYSADDRHISET(physaddr, ring->ringmem->base_addr.high_addr);
- PHYSADDRLOSET(physaddr, ring->ringmem->base_addr.low_addr);
- size = ring->ringmem->max_item * ring->ringmem->len_items;
- DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa, NULL);
- ring->ring_base.va = NULL;
- }
- if (ring->ringmem)
- MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t));
- MFREE(prot->osh, ring, sizeof(msgbuf_ring_t));
- ASSERT(0);
- return NULL;
-}
+ prot->h2d_rings_total = 0;
+ return BCME_NOMEM;
+
+} /* dhd_prot_flowrings_pool_attach */
+
+
+/**
+ * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
+ * Invokes dhd_prot_ring_reset to perform the actual reset.
+ *
+ * The DMA-able buffer is not freed during reset and neither is the flowring
+ * pool freed.
+ *
+ * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
+ * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
+ * from a previous flowring pool instantiation will be reused.
+ *
+ * This will avoid a fragmented DMA-able memory condition, if multiple
+ * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
+ * cycle.
+ */
static void
-dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
{
- /* update buffer address of ring */
- dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->base_addr,
- sizeof(ring->ringmem->base_addr), RING_BUF_ADDR, ring->idx);
+ uint16 flowid;
+ msgbuf_ring_t *ring;
+ dhd_prot_t *prot = dhd->prot;
- /* Update max items possible in ring */
- dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->max_item,
- sizeof(ring->ringmem->max_item), RING_MAX_ITEM, ring->idx);
+ if (prot->h2d_flowrings_pool == NULL) {
+ ASSERT(prot->h2d_rings_total == 0);
+ return;
+ }
- /* Update length of each item in the ring */
- dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->len_items,
- sizeof(ring->ringmem->len_items), RING_LEN_ITEMS, ring->idx);
+ /* Reset each flowring in the flowring pool */
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
+ dhd_prot_ring_reset(dhd, ring);
+ ring->inited = FALSE;
+ }
- /* ring inited */
- ring->inited = TRUE;
+ /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
}
+
+
+/**
+ * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
+ * DMA-able buffers for flowrings.
+ * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
+ * de-initialization of each msgbuf_ring_t.
+ */
static void
-dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
{
- dmaaddr_t phyaddr;
- uint16 size;
+ int flowid;
+ msgbuf_ring_t *ring;
+ int h2d_flowrings_total; /* exclude H2D common rings */
dhd_prot_t *prot = dhd->prot;
- BCM_REFERENCE(phyaddr);
-
- if (ring == NULL)
+ if (prot->h2d_flowrings_pool == NULL) {
+ ASSERT(prot->h2d_rings_total == 0);
return;
+ }
-
- if (ring->ringmem == NULL) {
- DHD_ERROR(("%s: ring->ringmem is NULL\n", __FUNCTION__));
- return;
+ /* Detach the DMA-able buffer for each flowring in the flowring pool */
+ FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid) {
+ dhd_prot_ring_detach(dhd, ring);
}
- ring->inited = FALSE;
+ h2d_flowrings_total = DHD_FLOWRINGS_POOL_TOTAL(prot->h2d_rings_total);
- PHYSADDRHISET(phyaddr, ring->ringmem->base_addr.high_addr);
- PHYSADDRLOSET(phyaddr, ring->ringmem->base_addr.low_addr);
- size = ring->ringmem->max_item * ring->ringmem->len_items;
- /* Free up ring */
- if (ring->ring_base.va) {
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING)
- if (RING_IS_FLOWRING(ring)) {
- DMA_FREE_CONSISTENT_STATIC(prot->osh, ring->ring_base.va, size,
- ring->ring_base.pa, ring->ring_base.dmah, ring->idx);
- } else
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */
- DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa,
- ring->ring_base.dmah);
- ring->ring_base.va = NULL;
- }
+ MFREE(prot->osh, prot->h2d_flowrings_pool,
+ (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
- /* Free up ring mem space */
- if (ring->ringmem) {
- MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t));
- ring->ringmem = NULL;
- }
+ prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
+ prot->h2d_rings_total = 0;
- /* Free up ring state info */
- if (ring->ringstate) {
- MFREE(prot->osh, ring->ringstate, sizeof(ring_state_t));
- ring->ringstate = NULL;
- }
-#ifdef BCM_SECURE_DMA
- if (SECURE_DMA_ENAB(prot->osh)) {
- DHD_ERROR(("%s:free secdma\n", __FUNCTION__));
- SECURE_DMA_UNMAP_ALL(prot->osh, ring->secdma);
- MFREE(prot->osh, ring->secdma, sizeof(sec_cma_info_t));
- }
-#endif
+} /* dhd_prot_flowrings_pool_detach */
+
+
+/**
+ * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
+ * msgbuf_ring from the flowring pool, and assign it.
+ *
+ * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
+ * ring information to the dongle, a flowring's information is passed via a
+ * flowring create control message.
+ *
+ * Only the ring state (WR, RD) index are initialized.
+ */
+static msgbuf_ring_t *
+dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
+{
+ msgbuf_ring_t *ring;
+ dhd_prot_t *prot = dhd->prot;
+
+ ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
+ ASSERT(flowid < prot->h2d_rings_total);
+ ASSERT(prot->h2d_flowrings_pool != NULL);
+
+ ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
+
+ /* ASSERT flow_ring->inited == FALSE */
+
+ ring->wr = 0;
+ ring->rd = 0;
+ ring->curr_rd = 0;
+ ring->inited = TRUE;
+
+ return ring;
+}
+
+
+/**
+ * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
+ * msgbuf_ring back to the flow_ring pool.
+ */
+void
+dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
+{
+ msgbuf_ring_t *ring;
+ dhd_prot_t *prot = dhd->prot;
+
+ ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
+ ASSERT(flowid < prot->h2d_rings_total);
+ ASSERT(prot->h2d_flowrings_pool != NULL);
+
+ ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
+
+ ASSERT(ring == (msgbuf_ring_t*)flow_ring);
+ /* ASSERT flow_ring->inited == TRUE */
- /* free up ring info */
- MFREE(prot->osh, ring, sizeof(msgbuf_ring_t));
+ (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
+
+ ring->wr = 0;
+ ring->rd = 0;
+ ring->inited = FALSE;
+
+ ring->curr_rd = 0;
}
-/* Assumes only one index is updated ata time */
+
+
+/* Assumes only one index is updated at a time */
+/* If exactly_nitems is true, this function will allocate space for nitems or fail */
+/* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
+/* If exactly_nitems is false, this function will allocate space for nitems or less */
static void *BCMFASTPATH
-prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced)
+dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
+ bool exactly_nitems)
{
void *ret_ptr = NULL;
uint16 ring_avail_cnt;
- ASSERT(nitems <= RING_MAX_ITEM(ring));
+ ASSERT(nitems <= ring->max_items);
- ring_avail_cnt = CHECK_WRITE_SPACE(RING_READ_PTR(ring), RING_WRITE_PTR(ring),
- RING_MAX_ITEM(ring));
+ ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
- if (ring_avail_cnt == 0) {
+ if ((ring_avail_cnt == 0) ||
+ (exactly_nitems && (ring_avail_cnt < nitems) &&
+ ((ring->max_items - ring->wr) >= nitems))) {
+ DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
+ ring->name, nitems, ring->wr, ring->rd));
return NULL;
}
*alloced = MIN(nitems, ring_avail_cnt);
/* Return next available space */
- ret_ptr = (char*)HOST_RING_BASE(ring) + (RING_WRITE_PTR(ring) * RING_LEN_ITEMS(ring));
-
- /* Update write pointer */
- if ((RING_WRITE_PTR(ring) + *alloced) == RING_MAX_ITEM(ring))
- RING_WRITE_PTR(ring) = 0;
- else if ((RING_WRITE_PTR(ring) + *alloced) < RING_MAX_ITEM(ring))
- RING_WRITE_PTR(ring) += *alloced;
- else {
+ ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
+
+ /* Update write index */
+ if ((ring->wr + *alloced) == ring->max_items) {
+ ring->wr = 0;
+ } else if ((ring->wr + *alloced) < ring->max_items) {
+ ring->wr += *alloced;
+ } else {
/* Should never hit this */
ASSERT(0);
return NULL;
}
- return ret_ptr;
-}
+ return ret_ptr;
+} /* dhd_prot_get_ring_space */
+
+
+/**
+ * dhd_prot_ring_write_complete - Host updates the new WR index on producing
+ * new messages in a H2D ring. The messages are flushed from cache prior to
+ * posting the new WR index. The new WR index will be updated in the DMA index
+ * array or directly in the dongle's ring state memory.
+ * A PCIE doorbell will be generated to wake up the dongle.
+ */
+static void BCMFASTPATH
+dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
+ uint16 nitems)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ /* cache flush */
+ OSL_CACHE_FLUSH(p, ring->item_len * nitems);
+
+ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
+ dhd_prot_dma_indx_set(dhd, ring->wr,
+ H2D_DMA_INDX_WR_UPD, ring->idx);
+ } else {
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
+ sizeof(uint16), RING_WR_UPD, ring->idx);
+ }
+
+ /* raise h2d interrupt */
+ prot->mb_ring_fn(dhd->bus, ring->wr);
+}
+
+
+/**
+ * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
+ * from a D2H ring. The new RD index will be updated in the DMA Index array or
+ * directly in dongle's ring state memory.
+ */
+static void
+dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+ /* update read index */
+ /* If dma'ing h2d indices supported
+ * update the r -indices in the
+ * host memory o/w in TCM
+ */
+ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
+ dhd_prot_dma_indx_set(dhd, ring->rd,
+ D2H_DMA_INDX_RD_UPD, ring->idx);
+ } else {
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
+ sizeof(uint16), RING_RD_UPD, ring->idx);
+ }
+}
+
+
+/**
+ * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
+ * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
+ * See dhd_prot_dma_indx_init()
+ */
+static void
+dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
+{
+ uint8 *ptr;
+ uint16 offset;
+ dhd_prot_t *prot = dhd->prot;
+
+ switch (type) {
+ case H2D_DMA_INDX_WR_UPD:
+ ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
+ offset = DHD_H2D_RING_OFFSET(ringid);
+ break;
+
+ case D2H_DMA_INDX_RD_UPD:
+ ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
+ offset = DHD_D2H_RING_OFFSET(ringid);
+ break;
+
+ default:
+ DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
+ __FUNCTION__));
+ return;
+ }
+
+ ASSERT(prot->rw_index_sz != 0);
+ ptr += offset * prot->rw_index_sz;
+
+ *(uint16*)ptr = htol16(new_index);
-static void BCMFASTPATH
-prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 nitems)
-{
- dhd_prot_t *prot = dhd->prot;
+ OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
- /* cache flush */
- OSL_CACHE_FLUSH(p, RING_LEN_ITEMS(ring) * nitems);
+ DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
+ __FUNCTION__, new_index, type, ringid, ptr, offset));
- /* update write pointer */
- /* If dma'ing h2d indices are supported
- * update the values in the host memory
- * o/w update the values in TCM
- */
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
- dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX,
- ring->idx, (uint16)RING_WRITE_PTR(ring));
- else
- dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(ring)),
- sizeof(uint16), RING_WRITE_PTR, ring->idx);
+} /* dhd_prot_dma_indx_set */
- /* raise h2d interrupt */
- prot->mb_ring_fn(dhd->bus, RING_WRITE_PTR(ring));
-}
-/* If dma'ing h2d indices are supported
- * this function updates the indices in
- * the host memory
+/**
+ * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
+ * array.
+ * Dongle DMAes an entire array to host memory (if the feature is enabled).
+ * See dhd_prot_dma_indx_init()
*/
-static void
-dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index)
+static uint16
+dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
{
+ uint8 *ptr;
+ uint16 data;
+ uint16 offset;
dhd_prot_t *prot = dhd->prot;
- uint32 *ptr = NULL;
- uint16 offset = 0;
-
switch (type) {
- case H2D_DMA_WRITEINDX:
- ptr = (uint32 *)(prot->h2d_dma_writeindx_buf.va);
-
- /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
- * but in host memory their indices start
- * after H2D Common Rings
- */
- if (ringid >= BCMPCIE_COMMON_MSGRINGS)
- offset = ringid - BCMPCIE_COMMON_MSGRINGS +
- BCMPCIE_H2D_COMMON_MSGRINGS;
- else
- offset = ringid;
- ptr += offset;
-
- *ptr = htol16(new_index);
-
- /* cache flush */
- OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va,
- prot->h2d_dma_writeindx_buf_len);
-
+ case H2D_DMA_INDX_WR_UPD:
+ ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
+ offset = DHD_H2D_RING_OFFSET(ringid);
break;
- case D2H_DMA_READINDX:
- ptr = (uint32 *)(prot->d2h_dma_readindx_buf.va);
-
- /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
- offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
- ptr += offset;
+ case H2D_DMA_INDX_RD_UPD:
+ ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
+ offset = DHD_H2D_RING_OFFSET(ringid);
+ break;
- *ptr = htol16(new_index);
- /* cache flush */
- OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va,
- prot->d2h_dma_readindx_buf_len);
+ case D2H_DMA_INDX_WR_UPD:
+ ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
+ offset = DHD_D2H_RING_OFFSET(ringid);
+ break;
+ case D2H_DMA_INDX_RD_UPD:
+ ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
+ offset = DHD_D2H_RING_OFFSET(ringid);
break;
default:
DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
__FUNCTION__));
-
- break;
+ return 0;
}
- DHD_TRACE(("%s: Data 0x%p, ringId %d, new_index %d\n",
- __FUNCTION__, ptr, ringid, new_index));
-}
+ ASSERT(prot->rw_index_sz != 0);
+ ptr += offset * prot->rw_index_sz;
-static uint16
-dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid)
+ OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
+
+ data = LTOH16(*((uint16*)ptr));
+
+ DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
+ __FUNCTION__, data, type, ringid, ptr, offset));
+
+ return (data);
+
+} /* dhd_prot_dma_indx_get */
+
+/**
+ * An array of DMA read/write indices, containing information about host rings, can be maintained
+ * either in host memory or in device memory, dependent on preprocessor options. This function is,
+ * dependent on these options, called during driver initialization. It reserves and initializes
+ * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
+ * address of these host memory blocks are communicated to the dongle later on. By reading this host
+ * memory, the dongle learns about the state of the host rings.
+ */
+
+static INLINE int
+dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
+ dhd_dma_buf_t *dma_buf, uint32 bufsz)
{
- uint32 *ptr = NULL;
- uint16 data = 0;
- uint16 offset = 0;
+ int rc;
- switch (type) {
- case H2D_DMA_WRITEINDX:
- OSL_CACHE_INV((void *)dhd->prot->h2d_dma_writeindx_buf.va,
- dhd->prot->h2d_dma_writeindx_buf_len);
- ptr = (uint32 *)(dhd->prot->h2d_dma_writeindx_buf.va);
-
- /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
- * but in host memory their indices start
- * after H2D Common Rings
- */
- if (ringid >= BCMPCIE_COMMON_MSGRINGS)
- offset = ringid - BCMPCIE_COMMON_MSGRINGS +
- BCMPCIE_H2D_COMMON_MSGRINGS;
- else
- offset = ringid;
- ptr += offset;
+ if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
+ return BCME_OK;
- data = LTOH16((uint16)*ptr);
- break;
+ rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
- case H2D_DMA_READINDX:
- OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va,
- dhd->prot->h2d_dma_readindx_buf_len);
- ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va);
+ return rc;
+}
- /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
- * but in host memory their indices start
- * after H2D Common Rings
- */
- if (ringid >= BCMPCIE_COMMON_MSGRINGS)
- offset = ringid - BCMPCIE_COMMON_MSGRINGS +
- BCMPCIE_H2D_COMMON_MSGRINGS;
- else
- offset = ringid;
- ptr += offset;
+int
+dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
+{
+ uint32 bufsz;
+ dhd_prot_t *prot = dhd->prot;
+ dhd_dma_buf_t *dma_buf;
- data = LTOH16((uint16)*ptr);
- break;
+ if (prot == NULL) {
+ DHD_ERROR(("prot is not inited\n"));
+ return BCME_ERROR;
+ }
- case D2H_DMA_WRITEINDX:
- OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va,
- dhd->prot->d2h_dma_writeindx_buf_len);
- ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va);
+ /* Dongle advertizes 2B or 4B RW index size */
+ ASSERT(rw_index_sz != 0);
+ prot->rw_index_sz = rw_index_sz;
- /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
- offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
- ptr += offset;
+ bufsz = rw_index_sz * length;
- data = LTOH16((uint16)*ptr);
+ switch (type) {
+ case H2D_DMA_INDX_WR_BUF:
+ dma_buf = &prot->h2d_dma_indx_wr_buf;
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
+ goto ret_no_mem;
+ }
+ DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
+ dma_buf->len, rw_index_sz, length));
break;
- case D2H_DMA_READINDX:
- OSL_CACHE_INV((void *)dhd->prot->d2h_dma_readindx_buf.va,
- dhd->prot->d2h_dma_readindx_buf_len);
- ptr = (uint32 *)(dhd->prot->d2h_dma_readindx_buf.va);
+ case H2D_DMA_INDX_RD_BUF:
+ dma_buf = &prot->h2d_dma_indx_rd_buf;
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
+ goto ret_no_mem;
+ }
+ DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
+ dma_buf->len, rw_index_sz, length));
+ break;
- /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
- offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
- ptr += offset;
+ case D2H_DMA_INDX_WR_BUF:
+ dma_buf = &prot->d2h_dma_indx_wr_buf;
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
+ goto ret_no_mem;
+ }
+ DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
+ dma_buf->len, rw_index_sz, length));
+ break;
- data = LTOH16((uint16)*ptr);
+ case D2H_DMA_INDX_RD_BUF:
+ dma_buf = &prot->d2h_dma_indx_rd_buf;
+ if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) {
+ goto ret_no_mem;
+ }
+ DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
+ dma_buf->len, rw_index_sz, length));
break;
default:
- DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
- __FUNCTION__));
-
- break;
+ DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
+ return BCME_BADOPTION;
}
- DHD_TRACE(("%s: Data 0x%p, data %d\n", __FUNCTION__, ptr, data));
- return (data);
-}
-/* D2H dircetion: get next space to read from */
+ return BCME_OK;
+
+ret_no_mem:
+ DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
+ __FUNCTION__, type, bufsz));
+ return BCME_NOMEM;
+
+} /* dhd_prot_dma_indx_init */
+
+
+/**
+ * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
+ * from, or NULL if there are no more messages to read.
+ */
static uint8*
-prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t * ring, uint16* available_len)
+dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
{
- uint16 w_ptr;
- uint16 r_ptr;
+ uint16 wr;
+ uint16 rd;
uint16 depth;
- void* ret_addr = NULL;
- uint16 d2h_w_index = 0;
-
- DHD_TRACE(("%s: h2d_dma_readindx_buf %p, d2h_dma_writeindx_buf %p\n",
- __FUNCTION__, (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va),
- (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va)));
+ uint16 items;
+ void *read_addr = NULL; /* address of next msg to be read in ring */
+ uint16 d2h_wr = 0;
+
+ DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
+ __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
+ (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
+
+ /* Remember the read index in a variable.
+ * This is becuase ring->rd gets updated in the end of this function
+ * So if we have to print the exact read index from which the
+ * message is read its not possible.
+ */
+ ring->curr_rd = ring->rd;
/* update write pointer */
if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
/* DMAing write/read indices supported */
- d2h_w_index = dhd_get_dmaed_index(dhd, D2H_DMA_WRITEINDX, ring->idx);
- ring->ringstate->w_offset = d2h_w_index;
- } else
- dhd_bus_cmn_readshared(dhd->bus,
- &(RING_WRITE_PTR(ring)), RING_WRITE_PTR, ring->idx);
+ d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
+ ring->wr = d2h_wr;
+ } else {
+ dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
+ }
- w_ptr = ring->ringstate->w_offset;
- r_ptr = ring->ringstate->r_offset;
- depth = ring->ringmem->max_item;
+ wr = ring->wr;
+ rd = ring->rd;
+ depth = ring->max_items;
- /* check for avail space */
- *available_len = READ_AVAIL_SPACE(w_ptr, r_ptr, depth);
- if (*available_len == 0)
+ /* check for avail space, in number of ring items */
+ items = READ_AVAIL_SPACE(wr, rd, depth);
+ if (items == 0) {
return NULL;
+ }
+
+ ASSERT(items < ring->max_items);
- if (*available_len > ring->ringmem->max_item) {
- DHD_ERROR(("%s: *available_len %d, ring->ringmem->max_item %d\n",
- __FUNCTION__, *available_len, ring->ringmem->max_item));
+ /*
+ * Note that there are builds where Assert translates to just printk
+ * so, even if we had hit this condition we would never halt. Now
+ * dhd_prot_process_msgtype can get into an big loop if this
+ * happens.
+ */
+ if (items >= ring->max_items) {
+ DHD_ERROR(("\r\n======================= \r\n"));
+ DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
+ __FUNCTION__, ring, ring->name, ring->max_items, items));
+ DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth));
+ DHD_ERROR(("dhd->busstate %d bus->suspended %d bus->wait_for_d3_ack %d \r\n",
+ dhd->busstate, dhd->bus->suspended, dhd->bus->wait_for_d3_ack));
+ DHD_ERROR(("\r\n======================= \r\n"));
+
+ *available_len = 0;
return NULL;
}
- /* if space available, calculate address to be read */
- ret_addr = (char*)ring->ring_base.va + (r_ptr * ring->ringmem->len_items);
+ /* if space is available, calculate address to be read */
+ read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
/* update read pointer */
- if ((ring->ringstate->r_offset + *available_len) >= ring->ringmem->max_item)
- ring->ringstate->r_offset = 0;
- else
- ring->ringstate->r_offset += *available_len;
+ if ((ring->rd + items) >= ring->max_items) {
+ ring->rd = 0;
+ } else {
+ ring->rd += items;
+ }
- ASSERT(ring->ringstate->r_offset < ring->ringmem->max_item);
+ ASSERT(ring->rd < ring->max_items);
- /* convert index to bytes */
- *available_len = *available_len * ring->ringmem->len_items;
+ /* convert items to bytes : available_len must be 32bits */
+ *available_len = (uint32)(items * ring->item_len);
- /* Cache invalidate */
- OSL_CACHE_INV((void *) ret_addr, *available_len);
+ OSL_CACHE_INV(read_addr, *available_len);
/* return read address */
- return ret_addr;
-}
-static void
-prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
-{
- /* update read index */
- /* If dma'ing h2d indices supported
- * update the r -indices in the
- * host memory o/w in TCM
- */
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
- dhd_set_dmaed_index(dhd, D2H_DMA_READINDX,
- ring->idx, (uint16)RING_READ_PTR(ring));
- else
- dhd_bus_cmn_writeshared(dhd->bus, &(RING_READ_PTR(ring)),
- sizeof(uint16), RING_READ_PTR, ring->idx);
-}
-
-static void
-prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
-{
- dhd_prot_t *prot;
+ return read_addr;
- if (!dhd || !dhd->prot)
- return;
-
- prot = dhd->prot;
- prot->rx_cpln_early_upd_idx = RING_READ_PTR(ring);
-}
-
-static void
-prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
-{
- dhd_prot_t *prot;
-
- if (!dhd || !dhd->prot)
- return;
-
- prot = dhd->prot;
-
- if (prot->rx_cpln_early_upd_idx == RING_READ_PTR(ring))
- return;
-
- if (++prot->rx_cpln_early_upd_idx >= RING_MAX_ITEM(ring))
- prot->rx_cpln_early_upd_idx = 0;
-
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
- dhd_set_dmaed_index(dhd, D2H_DMA_READINDX,
- ring->idx, (uint16)prot->rx_cpln_early_upd_idx);
- else
- dhd_bus_cmn_writeshared(dhd->bus, &(prot->rx_cpln_early_upd_idx),
- sizeof(uint16), RING_READ_PTR, ring->idx);
-}
+} /* dhd_prot_get_read_addr */
+/** Creates a flow ring and informs dongle of this event */
int
dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
{
tx_flowring_create_request_t *flow_create_rqst;
- msgbuf_ring_t *msgbuf_flow_info;
+ msgbuf_ring_t *flow_ring;
dhd_prot_t *prot = dhd->prot;
- uint16 hdrlen = sizeof(tx_flowring_create_request_t);
- uint16 msglen = hdrlen;
unsigned long flags;
- char eabuf[ETHER_ADDR_STR_LEN];
uint16 alloced = 0;
+ msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
- if (!(msgbuf_flow_info = prot_ring_attach(prot, "h2dflr",
- H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
- BCMPCIE_H2D_TXFLOWRINGID +
- (flow_ring_node->flowid - BCMPCIE_H2D_COMMON_MSGRINGS)))) {
- DHD_ERROR(("%s: kmalloc for H2D TX Flow ring failed\n", __FUNCTION__));
+ /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
+ flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
+ if (flow_ring == NULL) {
+ DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
+ __FUNCTION__, flow_ring_node->flowid));
return BCME_NOMEM;
}
- /* Clear write pointer of the ring */
- flow_ring_node->prot_info = (void *)msgbuf_flow_info;
-
- /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
- msglen = align(msglen, DMA_ALIGN_LEN);
-
DHD_GENERAL_LOCK(dhd, flags);
- /* Request for ring buffer space */
- flow_create_rqst = (tx_flowring_create_request_t *)dhd_alloc_ring_space(dhd,
- prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+ /* Request for ctrl_ring buffer space */
+ flow_create_rqst = (tx_flowring_create_request_t *)
+ dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
if (flow_create_rqst == NULL) {
- DHD_ERROR(("%s: No space in control ring for Flow create req\n", __FUNCTION__));
+ dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
+ DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
+ __FUNCTION__, flow_ring_node->flowid));
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_NOMEM;
}
- msgbuf_flow_info->inited = TRUE;
+
+ flow_ring_node->prot_info = (void *)flow_ring;
/* Common msg buf hdr */
flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
- flow_create_rqst->msg.request_id = htol16(0); /* TBD */
+ flow_create_rqst->msg.request_id = htol32(0); /* TBD */
+
+ flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
/* Update flow create message */
flow_create_rqst->tid = flow_ring_node->flow_info.tid;
flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
- flow_create_rqst->flow_ring_ptr.low_addr = msgbuf_flow_info->ringmem->base_addr.low_addr;
- flow_create_rqst->flow_ring_ptr.high_addr = msgbuf_flow_info->ringmem->base_addr.high_addr;
+ /* CAUTION: ring::base_addr already in Little Endian */
+ flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
+ flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM);
flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
- bcm_ether_ntoa((struct ether_addr *)flow_ring_node->flow_info.da, eabuf);
- DHD_ERROR(("%s Send Flow create Req msglen flow ID %d for peer %s prio %d ifindex %d\n",
- __FUNCTION__, flow_ring_node->flowid, eabuf, flow_ring_node->flow_info.tid,
+ DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
+ " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
+ MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
flow_ring_node->flow_info.ifindex));
- /* upd wrt ptr and raise interrupt */
- prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_create_rqst,
- DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ /* Update the flow_ring's WRITE index */
+ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
+ dhd_prot_dma_indx_set(dhd, flow_ring->wr,
+ H2D_DMA_INDX_WR_UPD, flow_ring->idx);
+ } else {
+ dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
+ sizeof(uint16), RING_WR_UPD, flow_ring->idx);
+ }
+
+ /* update control subn ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
- /* If dma'ing indices supported
- * update the w-index in host memory o/w in TCM
- */
- if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
- dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX,
- msgbuf_flow_info->idx, (uint16)RING_WRITE_PTR(msgbuf_flow_info));
- else
- dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(msgbuf_flow_info)),
- sizeof(uint16), RING_WRITE_PTR, msgbuf_flow_info->idx);
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_OK;
-}
+} /* dhd_prot_flow_ring_create */
+/** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
static void
-dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
{
- tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)buf;
+ tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
- DHD_ERROR(("%s Flow create Response status = %d Flow %d\n", __FUNCTION__,
- flow_create_resp->cmplt.status, flow_create_resp->cmplt.flow_ring_id));
+ DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
+ ltoh16(flow_create_resp->cmplt.status),
+ ltoh16(flow_create_resp->cmplt.flow_ring_id)));
- dhd_bus_flow_ring_create_response(dhd->bus, flow_create_resp->cmplt.flow_ring_id,
- flow_create_resp->cmplt.status);
+ dhd_bus_flow_ring_create_response(dhd->bus,
+ ltoh16(flow_create_resp->cmplt.flow_ring_id),
+ ltoh16(flow_create_resp->cmplt.status));
}
+/** called on e.g. flow ring delete */
void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
{
msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
}
void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
- struct bcmstrbuf *strbuf)
+ struct bcmstrbuf *strbuf, const char * fmt)
{
+ const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d\n";
msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
- uint16 rd, wrt;
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_READ_PTR, flow_ring->idx);
- dhd_bus_cmn_readshared(dhd->bus, &wrt, RING_WRITE_PTR, flow_ring->idx);
- bcm_bprintf(strbuf, "RD %d WR %d\n", rd, wrt);
+ uint16 rd, wr;
+ uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
+
+ if (fmt == NULL) {
+ fmt = default_fmt;
+ }
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
+ bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
+ ltoh32(flow_ring->base_addr.high_addr),
+ ltoh32(flow_ring->base_addr.low_addr), dma_buf_len);
}
void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
{
+ dhd_prot_t *prot = dhd->prot;
bcm_bprintf(strbuf, "CtrlPost: ");
- dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_ctrl_subn, strbuf);
+ dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, NULL);
bcm_bprintf(strbuf, "CtrlCpl: ");
- dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_ctrl_cpln, strbuf);
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, NULL);
+
bcm_bprintf(strbuf, "RxPost: ");
- bcm_bprintf(strbuf, "RBP %d ", dhd->prot->rxbufpost);
- dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_rxp_subn, strbuf);
+ bcm_bprintf(strbuf, "RBP %d ", prot->rxbufpost);
+ dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, NULL);
bcm_bprintf(strbuf, "RxCpl: ");
- dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_rx_cpln, strbuf);
- if (dhd_bus_is_txmode_push(dhd->bus)) {
- bcm_bprintf(strbuf, "TxPost: ");
- dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_txp_subn, strbuf);
- }
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, NULL);
+
bcm_bprintf(strbuf, "TxCpl: ");
- dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_tx_cpln, strbuf);
+ dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, NULL);
bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n",
dhd->prot->active_tx_count,
- dhd_pktid_map_avail_cnt(dhd->prot->pktid_map_handle));
+ DHD_PKTID_AVAIL(dhd->prot->pktid_map_handle));
}
int
{
tx_flowring_delete_request_t *flow_delete_rqst;
dhd_prot_t *prot = dhd->prot;
- uint16 msglen = sizeof(tx_flowring_delete_request_t);
unsigned long flags;
- char eabuf[ETHER_ADDR_STR_LEN];
uint16 alloced = 0;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
- /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
- msglen = align(msglen, DMA_ALIGN_LEN);
+ DHD_GENERAL_LOCK(dhd, flags);
/* Request for ring buffer space */
- DHD_GENERAL_LOCK(dhd, flags);
- flow_delete_rqst = (tx_flowring_delete_request_t *)dhd_alloc_ring_space(dhd,
- prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+ flow_delete_rqst = (tx_flowring_delete_request_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (flow_delete_rqst == NULL) {
DHD_GENERAL_UNLOCK(dhd, flags);
- DHD_ERROR(("%s Flow Delete req failure no ring mem %d \n", __FUNCTION__, msglen));
+ DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
return BCME_NOMEM;
}
/* Common msg buf hdr */
flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
- flow_delete_rqst->msg.request_id = htol16(0); /* TBD */
+ flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
+
+ flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
/* Update Delete info */
flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
flow_delete_rqst->reason = htol16(BCME_OK);
- bcm_ether_ntoa((struct ether_addr *)flow_ring_node->flow_info.da, eabuf);
- DHD_ERROR(("%s sending FLOW RING ID %d for peer %s prio %d ifindex %d"
- " Delete req msglen %d\n", __FUNCTION__,
- flow_ring_node->flowid, eabuf, flow_ring_node->flow_info.tid,
- flow_ring_node->flow_info.ifindex, msglen));
+ DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
+ " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
+ MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
+ flow_ring_node->flow_info.ifindex));
- /* upd wrt ptr and raise interrupt */
- prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_delete_rqst,
- DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_OK;
}
static void
-dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
{
- tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)buf;
+ tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
- DHD_INFO(("%s Flow Delete Response status = %d \n", __FUNCTION__,
- flow_delete_resp->cmplt.status));
+ DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
+ flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
-#ifdef PCIE_TX_DEFERRAL
- if (flow_delete_resp->cmplt.status != BCME_OK) {
- DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
- __FUNCTION__, flow_delete_resp->cmplt.status));
- return;
- }
- set_bit(flow_delete_resp->cmplt.flow_ring_id, dhd->bus->delete_flow_map);
- queue_work(dhd->bus->tx_wq, &dhd->bus->delete_flow_work);
-#else
dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
flow_delete_resp->cmplt.status);
-#endif /* PCIE_TX_DEFERRAL */
}
int
{
tx_flowring_flush_request_t *flow_flush_rqst;
dhd_prot_t *prot = dhd->prot;
- uint16 msglen = sizeof(tx_flowring_flush_request_t);
unsigned long flags;
uint16 alloced = 0;
+ msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
- /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
- msglen = align(msglen, DMA_ALIGN_LEN);
+ DHD_GENERAL_LOCK(dhd, flags);
/* Request for ring buffer space */
- DHD_GENERAL_LOCK(dhd, flags);
- flow_flush_rqst = (tx_flowring_flush_request_t *)dhd_alloc_ring_space(dhd,
- prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+ flow_flush_rqst = (tx_flowring_flush_request_t *)
+ dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (flow_flush_rqst == NULL) {
DHD_GENERAL_UNLOCK(dhd, flags);
- DHD_ERROR(("%s Flow Flush req failure no ring mem %d \n", __FUNCTION__, msglen));
+ DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
return BCME_NOMEM;
}
/* Common msg buf hdr */
flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
- flow_flush_rqst->msg.request_id = htol16(0); /* TBD */
+ flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
+
+ flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
+ ring->seqnum++;
flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
flow_flush_rqst->reason = htol16(BCME_OK);
- DHD_INFO(("%s sending FLOW RING Flush req msglen %d \n", __FUNCTION__, msglen));
+ DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
- /* upd wrt ptr and raise interrupt */
- prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_flush_rqst,
- DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ /* update ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
DHD_GENERAL_UNLOCK(dhd, flags);
return BCME_OK;
-}
+} /* dhd_prot_flow_ring_flush */
static void
-dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
{
- tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)buf;
+ tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
- DHD_INFO(("%s Flow Flush Response status = %d \n", __FUNCTION__,
+ DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
flow_flush_resp->cmplt.status));
dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
flow_flush_resp->cmplt.status);
}
+/**
+ * Request dongle to configure soft doorbells for D2H rings. Host populated soft
+ * doorbell information is transferred to dongle via the d2h ring config control
+ * message.
+ */
+void
+dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
+{
+#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
+ uint16 ring_idx;
+ uint8 *msg_next;
+ void *msg_start;
+ uint16 alloced = 0;
+ unsigned long flags;
+ dhd_prot_t *prot = dhd->prot;
+ ring_config_req_t *ring_config_req;
+ bcmpcie_soft_doorbell_t *soft_doorbell;
+ msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
+ const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
+
+ /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
+ DHD_GENERAL_LOCK(dhd, flags);
+ msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
+
+ if (msg_start == NULL) {
+ DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
+ __FUNCTION__, d2h_rings));
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return;
+ }
+
+ msg_next = (uint8*)msg_start;
+
+ for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
+
+ /* position the ring_config_req into the ctrl subm ring */
+ ring_config_req = (ring_config_req_t *)msg_next;
+
+ /* Common msg header */
+ ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
+ ring_config_req->msg.if_id = 0;
+ ring_config_req->msg.flags = 0;
+
+ ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
+ ctrl_ring->seqnum++;
+
+ ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
+
+ /* Ring Config subtype and d2h ring_id */
+ ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
+ ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
+
+ /* Host soft doorbell configuration */
+ soft_doorbell = &prot->soft_doorbell[ring_idx];
+
+ ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
+ ring_config_req->soft_doorbell.haddr.high =
+ htol32(soft_doorbell->haddr.high);
+ ring_config_req->soft_doorbell.haddr.low =
+ htol32(soft_doorbell->haddr.low);
+ ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
+ ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
+
+ DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
+ __FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
+ ring_config_req->soft_doorbell.haddr.low,
+ ring_config_req->soft_doorbell.value));
+
+ msg_next = msg_next + ctrl_ring->item_len;
+ }
+
+ /* update control subn ring's WR index and ring doorbell to dongle */
+ dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
+}
+
+static void
+dhd_prot_d2h_ring_config_cmplt_process(dhd_pub_t *dhd, void *msg)
+{
+ DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
+ __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
+ ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
+}
+
+int
+dhd_prot_debug_info_print(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring;
+ uint16 rd, wr;
+ uint32 intstatus = 0;
+ uint32 intmask = 0;
+ uint32 mbintstatus = 0;
+ uint32 d2h_mb_data = 0;
+ uint32 dma_buf_len;
+
+ DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
+
+ ring = &prot->h2dring_ctrl_subn;
+ dma_buf_len = ring->max_items * ring->item_len;
+ DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+
+ ring = &prot->d2hring_ctrl_cpln;
+ dma_buf_len = ring->max_items * ring->item_len;
+ DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
+ ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
+ ltoh32(ring->base_addr.low_addr), dma_buf_len));
+ DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum));
+
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
+ mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
+ dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
+
+ DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n,",
+ intstatus, intmask, mbintstatus));
+ DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, dhd->bus->def_intmask));
+
+ return 0;
+}
+
int
dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
{
uint32 *ptr;
uint32 value;
uint32 i;
- uint8 txpush = 0;
- uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus, &txpush);
+ uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
- OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va,
- dhd->prot->d2h_dma_writeindx_buf_len);
+ OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
+ dhd->prot->d2h_dma_indx_wr_buf.len);
- ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va);
+ ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
- bcm_bprintf(b, "\n max_tx_queues %d, txpush mode %d\n", max_h2d_queues, txpush);
+ bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
value = ltoh32(*ptr);
value = ltoh32(*ptr);
bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
- if (txpush) {
- ptr++;
+ ptr++;
+ bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
+ for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
value = ltoh32(*ptr);
- bcm_bprintf(b, "\tH2D TXPOST value 0x%04x\n", value);
- }
- else {
+ bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
ptr++;
- bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
- for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
- ptr++;
- }
}
- OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va,
- dhd->prot->h2d_dma_readindx_buf_len);
+ OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
+ dhd->prot->h2d_dma_indx_rd_buf.len);
- ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va);
+ ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
value = ltoh32(*ptr);
return 0;
}
+uint32
+dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
+{
+ dhd_prot_t *prot = dhd->prot;
+#if DHD_DBG_SHOW_METADATA
+ prot->metadata_dbg = val;
+#endif
+ return (uint32)prot->metadata_dbg;
+}
+
+uint32
+dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ return (uint32)prot->metadata_dbg;
+}
+
uint32
dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
{
return prot->tx_metadata_offset;
}
+/** optimization to write "n" tx items at a time to ring */
uint32
dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
{
}
#ifdef DHD_RX_CHAINING
+
static INLINE void BCMFASTPATH
dhd_rxchain_reset(rxchain_info_t *rxchain)
{
dhd_prot_t *prot = dhd->prot;
rxchain_info_t *rxchain = &prot->rxchain;
+ ASSERT(!PKTISCHAINED(pkt));
+ ASSERT(PKTCLINK(pkt) == NULL);
+ ASSERT(PKTCGETATTR(pkt) == 0);
+
eh = PKTDATA(dhd->osh, pkt);
prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
+ if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
+ rxchain->h_da, rxchain->h_prio))) {
+ /* Different flow - First release the existing chain */
+ dhd_rxchain_commit(dhd);
+ }
+
/* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
/* so that the chain can be handed off to CTF bridge as is. */
if (rxchain->pkt_count == 0) {
rxchain->ifidx = ifidx;
rxchain->pkt_count++;
} else {
- if (PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
- rxchain->h_da, rxchain->h_prio)) {
- /* Same flow - keep chaining */
- PKTSETCLINK(rxchain->pkttail, pkt);
- rxchain->pkttail = pkt;
- rxchain->pkt_count++;
- } else {
- /* Different flow - First release the existing chain */
- dhd_rxchain_commit(dhd);
-
- /* Create a new chain */
- rxchain->pkthead = rxchain->pkttail = pkt;
-
- /* Keep a copy of ptr to ether_da, ether_sa and prio */
- rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
- rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
- rxchain->h_prio = prio;
- rxchain->ifidx = ifidx;
- rxchain->pkt_count++;
- }
+ /* Same flow - keep chaining */
+ PKTSETCLINK(rxchain->pkttail, pkt);
+ rxchain->pkttail = pkt;
+ rxchain->pkt_count++;
}
if ((!ETHER_ISMULTI(rxchain->h_da)) &&
/* Reset the chain */
dhd_rxchain_reset(rxchain);
}
-#endif /* DHD_RX_CHAINING */
-
-static void
-dhd_prot_ring_clear(msgbuf_ring_t* ring)
-{
- uint16 size;
-
- DHD_TRACE(("%s\n", __FUNCTION__));
-
- size = ring->ringmem->max_item * ring->ringmem->len_items;
- ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0);
- OSL_CACHE_INV((void *) ring->ring_base.va, size);
- bzero(ring->ring_base.va, size);
-
- OSL_CACHE_FLUSH((void *) ring->ring_base.va, size);
-
- bzero(ring->ringstate, sizeof(*ring->ringstate));
-}
-
-void
-dhd_prot_clear(dhd_pub_t *dhd)
-{
- struct dhd_prot *prot = dhd->prot;
-
- DHD_TRACE(("%s\n", __FUNCTION__));
-
- if (prot == NULL)
- return;
-
- if (prot->h2dring_txp_subn)
- dhd_prot_ring_clear(prot->h2dring_txp_subn);
- if (prot->h2dring_rxp_subn)
- dhd_prot_ring_clear(prot->h2dring_rxp_subn);
- if (prot->h2dring_ctrl_subn)
- dhd_prot_ring_clear(prot->h2dring_ctrl_subn);
- if (prot->d2hring_tx_cpln)
- dhd_prot_ring_clear(prot->d2hring_tx_cpln);
- if (prot->d2hring_rx_cpln)
- dhd_prot_ring_clear(prot->d2hring_rx_cpln);
- if (prot->d2hring_ctrl_cpln)
- dhd_prot_ring_clear(prot->d2hring_ctrl_cpln);
-
- if (prot->retbuf.va) {
- OSL_CACHE_INV((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
- bzero(prot->retbuf.va, IOCT_RETBUF_SIZE);
- OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
- }
-
- if (prot->ioctbuf.va) {
- OSL_CACHE_INV((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
- bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE);
- OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
- }
-
- if (prot->d2h_dma_scratch_buf.va) {
- OSL_CACHE_INV((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
- bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
- OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
- }
-
- if (prot->h2d_dma_readindx_buf.va) {
- OSL_CACHE_INV((void *)prot->h2d_dma_readindx_buf.va,
- prot->h2d_dma_readindx_buf_len);
- bzero(prot->h2d_dma_readindx_buf.va,
- prot->h2d_dma_readindx_buf_len);
- OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va,
- prot->h2d_dma_readindx_buf_len);
- }
-
- if (prot->h2d_dma_writeindx_buf.va) {
- OSL_CACHE_INV((void *)prot->h2d_dma_writeindx_buf.va,
- prot->h2d_dma_writeindx_buf_len);
- bzero(prot->h2d_dma_writeindx_buf.va, prot->h2d_dma_writeindx_buf_len);
- OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va,
- prot->h2d_dma_writeindx_buf_len);
- }
-
- if (prot->d2h_dma_readindx_buf.va) {
- OSL_CACHE_INV((void *)prot->d2h_dma_readindx_buf.va,
- prot->d2h_dma_readindx_buf_len);
- bzero(prot->d2h_dma_readindx_buf.va, prot->d2h_dma_readindx_buf_len);
- OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va,
- prot->d2h_dma_readindx_buf_len);
- }
-
- if (prot->d2h_dma_writeindx_buf.va) {
- OSL_CACHE_INV((void *)prot->d2h_dma_writeindx_buf.va,
- prot->d2h_dma_writeindx_buf_len);
- bzero(prot->d2h_dma_writeindx_buf.va, prot->d2h_dma_writeindx_buf_len);
- OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va,
- prot->d2h_dma_writeindx_buf_len);
- }
- prot->rx_metadata_offset = 0;
- prot->tx_metadata_offset = 0;
-
- prot->rxbufpost = 0;
- prot->cur_event_bufs_posted = 0;
- prot->cur_ioctlresp_bufs_posted = 0;
-
- prot->active_tx_count = 0;
- prot->data_seq_no = 0;
- prot->ioctl_seq_no = 0;
- prot->pending = 0;
- prot->lastcmd = 0;
-
- prot->ioctl_trans_id = 1;
-
- /* dhd_flow_rings_init is located at dhd_bus_start,
- * so when stopping bus, flowrings shall be deleted
- */
- dhd_flow_rings_deinit(dhd);
- NATIVE_TO_PKTID_CLEAR(prot->pktid_map_handle);
-}
+#endif /* DHD_RX_CHAINING */
/*
* DHD Bus Module for PCIE
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_pcie.c 506043 2014-10-02 12:29:45Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_pcie.c 609007 2015-12-30 07:44:52Z $
*/
#include <hndsoc.h>
#include <hndpmu.h>
#include <sbchipc.h>
-#if defined(DHD_DEBUG)
#include <hnd_armtrap.h>
+#if defined(DHD_DEBUG)
#include <hnd_cons.h>
#endif /* defined(DHD_DEBUG) */
#include <dngl_stats.h>
#include BCMEMBEDIMAGE
#endif /* BCMEMBEDIMAGE */
+#ifdef PCIE_OOB
+#include "ftdi_sio_external.h"
+#endif /* PCIE_OOB */
+
#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
-#define MAX_NVRAMBUF_SIZE 6144 /* max nvram buf size */
+#define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
#define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
#define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
int dhd_dongle_memsize;
int dhd_dongle_ramsize;
-#ifdef DHD_DEBUG
static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
+#ifdef DHD_DEBUG
static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
-#endif
+#endif /* DHD_DEBUG */
+#if defined(DHD_FW_COREDUMP)
+static int dhdpcie_mem_dump(dhd_bus_t *bus);
+#endif /* DHD_FW_COREDUMP */
+
static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
const char *name, void *params,
static int dhdpcie_readshared(dhd_bus_t *bus);
static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
-static void dhdpcie_bus_intr_enable(dhd_bus_t *bus);
static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
bool dongle_isolation, bool reset_flag);
static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
-#ifdef CONFIG_ARCH_MSM8994
-static void dhdpcie_bus_cfg_set_bar1_win(dhd_bus_t *bus, uint32 data);
-static ulong dhd_bus_cmn_check_offset(dhd_bus_t *bus, ulong offset);
-#endif
static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size);
static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
#endif /* BCMEMBEDIMAGE */
+#ifdef EXYNOS_PCIE_DEBUG
+extern void exynos_pcie_register_dump(int ch_num);
+#endif /* EXYNOS_PCIE_DEBUG */
#define PCI_VENDOR_ID_BROADCOM 0x14e4
+static void dhd_bus_set_device_wake(struct dhd_bus *bus, bool val);
+extern void wl_nddbg_wpp_log(const char *format, ...);
+#ifdef PCIE_OOB
+static void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus);
+
+#define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
+static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT;
+
+#define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */
+#define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */
+#define BIT_WL_REG_ON 6
+#define BIT_BT_REG_ON 7
+
+int gpio_handle_val = 0;
+unsigned char gpio_port = 0;
+unsigned char gpio_direction = 0;
+#define OOB_PORT "ttyUSB0"
+#endif /* PCIE_OOB */
+static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
+
/* IOVar table */
enum {
IOV_INTR = 1,
IOV_SBREG,
IOV_DONGLEISOLATION,
IOV_LTRSLEEPON_UNLOOAD,
+ IOV_METADATA_DBG,
IOV_RX_METADATALEN,
IOV_TX_METADATALEN,
IOV_TXP_THRESHOLD,
IOV_DMA_RINGINDICES,
IOV_DB1_FOR_MB,
IOV_FLOW_PRIO_MAP,
+#ifdef DHD_PCIE_RUNTIMEPM
+ IOV_IDLETIME,
+#endif /* DHD_PCIE_RUNTIMEPM */
IOV_RXBOUND,
- IOV_TXBOUND
+ IOV_TXBOUND,
+ IOV_HANGREPORT,
+#ifdef PCIE_OOB
+ IOV_OOB_BT_REG_ON,
+ IOV_OOB_ENABLE
+#endif /* PCIE_OOB */
};
{"pciecfgreg", IOV_PCIECFGREG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
{"pciecorereg", IOV_PCIECOREREG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
{"pcieserdesreg", IOV_PCIESERDESREG, 0, IOVT_BUFFER, 3 * sizeof(int32) },
- {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+ {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
{"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
{"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, IOVT_BUFFER, 3 * sizeof(int32) },
{"pcie_suspend", IOV_PCIE_SUSPEND, 0, IOVT_UINT32, 0 },
+#ifdef PCIE_OOB
+ {"oob_bt_reg_on", IOV_OOB_BT_REG_ON, 0, IOVT_UINT32, 0 },
+ {"oob_enable", IOV_OOB_ENABLE, 0, IOVT_UINT32, 0 },
+#endif /* PCIE_OOB */
{"sleep_allowed", IOV_SLEEP_ALLOWED, 0, IOVT_BOOL, 0 },
{"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 },
{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, IOVT_UINT32, 0 },
{"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, IOVT_BUFFER, 0 },
{"dma_ring_indices", IOV_DMA_RINGINDICES, 0, IOVT_UINT32, 0},
+ {"metadata_dbg", IOV_METADATA_DBG, 0, IOVT_BOOL, 0 },
{"rx_metadata_len", IOV_RX_METADATALEN, 0, IOVT_UINT32, 0 },
{"tx_metadata_len", IOV_TX_METADATALEN, 0, IOVT_UINT32, 0 },
{"db1_for_mb", IOV_DB1_FOR_MB, 0, IOVT_UINT32, 0 },
{"txp_thresh", IOV_TXP_THRESHOLD, 0, IOVT_UINT32, 0 },
{"buzzz_dump", IOV_BUZZZ_DUMP, 0, IOVT_UINT32, 0 },
{"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, IOVT_UINT32, 0 },
+#ifdef DHD_PCIE_RUNTIMEPM
+ {"idletime", IOV_IDLETIME, 0, IOVT_INT32, 0 },
+#endif /* DHD_PCIE_RUNTIMEPM */
{"rxbound", IOV_RXBOUND, 0, IOVT_UINT32, 0 },
{"txbound", IOV_TXBOUND, 0, IOVT_UINT32, 0 },
+ {"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 },
{NULL, 0, 0, 0, 0 }
};
+
#define MAX_READ_TIMEOUT 5 * 1000 * 1000
#ifndef DHD_RXBOUND
*
* 'tcm' is the *host* virtual address at which tcm is mapped.
*/
-dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm, uint32 tcm_size)
+dhd_bus_t* dhdpcie_bus_attach(osl_t *osh,
+ volatile char *regs, volatile char *tcm, void *pci_dev)
{
dhd_bus_t *bus;
DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
do {
- if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+ if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
break;
}
- bzero(bus, sizeof(dhd_bus_t));
+
bus->regs = regs;
bus->tcm = tcm;
- bus->tcm_size = tcm_size;
bus->osh = osh;
+ /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
+ bus->dev = (struct pci_dev *)pci_dev;
dll_init(&bus->const_flowring);
/* Attach pcie shared structure */
- bus->pcie_sh = MALLOC(osh, sizeof(pciedev_shared_t));
- if (!bus->pcie_sh) {
+ if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
break;
}
/* dhd_common_init(osh); */
+
if (dhdpcie_dongle_attach(bus)) {
DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
break;
}
bus->dhd->busstate = DHD_BUS_DOWN;
bus->db1_for_mb = TRUE;
- bus->dhd->hang_report = TRUE;
+ bus->dhd->hang_report = TRUE;
+ bus->irq_registered = FALSE;
+
+ bus->d3_ack_war_cnt = 0;
DHD_TRACE(("%s: EXIT SUCCESS\n",
__FUNCTION__));
DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
- if (bus && bus->pcie_sh)
+ if (bus && bus->pcie_sh) {
MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
+ }
- if (bus)
+ if (bus) {
MFREE(osh, bus, sizeof(dhd_bus_t));
-
+ }
return NULL;
}
return &bus->txq;
}
-/* Get Chip ID version */
+/** Get Chip ID version */
uint dhd_bus_chip_id(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = dhdp->bus;
return bus->sih->chip;
}
-/* Get Chip Rev ID version */
+/** Get Chip Rev ID version */
uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = dhdp->bus;
return bus->sih->chiprev;
}
-/* Get Chip Pkg ID version */
+/** Get Chip Pkg ID version */
uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = dhdp->bus;
return bus->sih->chippkg;
}
+/** Read and clear intstatus. This should be called with interupts disabled or inside isr */
+uint32
+dhdpcie_bus_intstatus(dhd_bus_t *bus)
+{
+ uint32 intstatus = 0;
+ uint32 intmask = 0;
-/*
-
-Name: dhdpcie_bus_isr
-
-Parametrs:
-
-1: IN int irq -- interrupt vector
-2: IN void *arg -- handle to private data structure
+ if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
+ (bus->sih->buscorerev == 2)) {
+ intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
+ intstatus &= I_MB;
+ } else {
+ /* this is a PCIE core register..not a config register... */
+ intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
-Return value:
+ /* this is a PCIE core register..not a config register... */
+ intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
-Status (TRUE or FALSE)
+ /*
+ * The fourth argument to si_corereg is the "mask" fields of the register to update
+ * and the fifth field is the "value" to update. Now if we are interested in only
+ * few fields of the "mask" bit map, we should not be writing back what we read
+ * By doing so, we might clear/ack interrupts that are not handled yet.
+ */
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask,
+ intstatus);
+
+ intstatus &= intmask;
+
+ /* Is device removed. intstatus & intmask read 0xffffffff */
+ if (intstatus == (uint32)-1) {
+ DHD_ERROR(("%s: !!!!!!Device Removed or dead chip.\n", __FUNCTION__));
+ intstatus = 0;
+#ifdef CUSTOMER_HW4_DEBUG
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+ dhd_os_send_hang_message(bus->dhd);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+#endif /* CUSTOMER_HW4_DEBUG */
+ }
-Description:
-Interrupt Service routine checks for the status register,
-disable interrupt and queue DPC if mail box interrupts are raised.
-*/
+ intstatus &= bus->def_intmask;
+ }
+ return intstatus;
+}
+/**
+ * Name: dhdpcie_bus_isr
+ * Parameters:
+ * 1: IN int irq -- interrupt vector
+ * 2: IN void *arg -- handle to private data structure
+ * Return value:
+ * Status (TRUE or FALSE)
+ *
+ * Description:
+ * Interrupt Service routine checks for the status register,
+ * disable interrupt and queue DPC if mail box interrupts are raised.
+ */
int32
dhdpcie_bus_isr(dhd_bus_t *bus)
{
+ uint32 intstatus = 0;
do {
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- /* verify argument */
- if (!bus) {
- DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
- break;
- }
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ /* verify argument */
+ if (!bus) {
+ DHD_ERROR(("%s : bus is null pointer, exit \n", __FUNCTION__));
+ break;
+ }
- if (bus->dhd->busstate == DHD_BUS_DOWN) {
- DHD_TRACE(("%s : bus is down. we have nothing to do\n",
- __FUNCTION__));
- break;
- }
+ if (bus->dhd->dongle_reset) {
+ break;
+ }
- /* Overall operation:
- * - Mask further interrupts
- * - Read/ack intstatus
- * - Take action based on bits and state
- * - Reenable interrupts (as per state)
- */
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: BUS is down, not processing the interrupt \r\n",
+ __FUNCTION__));
+ break;
+ }
+
+ intstatus = dhdpcie_bus_intstatus(bus);
+
+ /* Check if the interrupt is ours or not */
+ if (intstatus == 0) {
+ break;
+ }
- /* Count the interrupt call */
- bus->intrcount++;
+ /* save the intstatus */
+ bus->intstatus = intstatus;
- /* read interrupt status register!! Status bits will be cleared in DPC !! */
- bus->ipend = TRUE;
- dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */
- bus->intdis = TRUE;
+ /* Overall operation:
+ * - Mask further interrupts
+ * - Read/ack intstatus
+ * - Take action based on bits and state
+ * - Reenable interrupts (as per state)
+ */
+
+ /* Count the interrupt call */
+ bus->intrcount++;
+
+ /* read interrupt status register!! Status bits will be cleared in DPC !! */
+ bus->ipend = TRUE;
+ dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */
+ bus->intdis = TRUE;
#if defined(PCIE_ISR_THREAD)
- DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
- DHD_OS_WAKE_LOCK(bus->dhd);
- while (dhd_bus_dpc(bus));
- DHD_OS_WAKE_UNLOCK(bus->dhd);
+ DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ while (dhd_bus_dpc(bus));
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
#else
- bus->dpc_sched = TRUE;
- dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
#endif /* defined(SDIO_ISR_THREAD) */
- DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
- return TRUE;
+ DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
+ return TRUE;
} while (0);
return FALSE;
}
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+dhd_pub_t *link_recovery = NULL;
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
static bool
dhdpcie_dongle_attach(dhd_bus_t *bus)
{
uint32 val;
sbpcieregs_t *sbpcieregs;
- DHD_TRACE(("%s: ENTER\n",
- __FUNCTION__));
+ DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+ link_recovery = bus->dhd;
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
bus->alp_only = TRUE;
bus->sih = NULL;
/* Set bar0 window to si_enum_base */
dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
-#ifdef CONFIG_ARCH_MSM8994
- /* Read bar1 window */
- bus->bar1_win_base = OSL_PCI_READ_CONFIG(bus->osh, PCI_BAR1_WIN, 4);
- DHD_ERROR(("%s: PCI_BAR1_WIN = %x\n", __FUNCTION__, bus->bar1_win_base));
-#endif
+ /* Checking PCIe bus status with reading configuration space */
+ val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
+ if ((val & 0xFFFF) != VENDOR_BROADCOM) {
+ DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
+ goto fail;
+ }
/* si_attach() will provide an SI handle and scan the backplane */
if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
/* WAR where the BAR1 window may not be sized properly */
W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
val = R_REG(osh, &sbpcieregs->configdata);
-#ifdef CONFIG_ARCH_MSM8994
- bus->bar1_win_mask = 0xffffffff - (bus->tcm_size - 1);
- DHD_ERROR(("%s: BAR1 window val=%d mask=%x\n", __FUNCTION__, val, bus->bar1_win_mask));
-#endif
W_REG(osh, &sbpcieregs->configdata, val);
/* Get info on the ARM and SOCRAM cores... */
/* Should really be qualified by device id */
if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
- (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+ (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
bus->armrev = si_corerev(bus->sih);
} else {
DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
goto fail;
}
- if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
+ if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+ /* also populate base address */
+ bus->dongle_ram_base = CA7_4365_RAM_BASE;
+ } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
goto fail;
case BCM4360_CHIP_ID:
bus->dongle_ram_base = CR4_4360_RAM_BASE;
break;
- case BCM4345_CHIP_ID:
+ CASE_BCM4345_CHIP:
bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */
? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
break;
- case BCM43602_CHIP_ID:
+ CASE_BCM43602_CHIP:
bus->dongle_ram_base = CR4_43602_RAM_BASE;
break;
case BCM4349_CHIP_GRPID:
- bus->dongle_ram_base = CR4_4349_RAM_BASE;
+ /* RAM base changed from 4349c0(revid=9) onwards */
+ bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
+ CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
break;
default:
bus->dongle_ram_base = 0;
bus->wait_for_d3_ack = 1;
bus->suspended = FALSE;
- DHD_TRACE(("%s: EXIT: SUCCESS\n",
- __FUNCTION__));
+
+#ifdef PCIE_OOB
+ gpio_handle_val = get_handle(OOB_PORT);
+ if (gpio_handle_val < 0)
+ {
+ DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__));
+ ASSERT(FALSE);
+ }
+
+ gpio_direction = 0;
+ ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG);
+
+ /* Note BT core is also enabled here */
+ gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
+ gpio_write_port(gpio_handle_val, gpio_port);
+
+ gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
+ ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG);
+
+ bus->oob_enabled = TRUE;
+
+ /* drive the Device_Wake GPIO low on startup */
+ bus->device_wake_state = TRUE;
+ dhd_bus_set_device_wake(bus, FALSE);
+ dhd_bus_doorbell_timeout_reset(bus);
+#endif /* PCIE_OOB */
+
+ DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
return 0;
fail:
- if (bus->sih != NULL)
+ if (bus->sih != NULL) {
si_detach(bus->sih);
- DHD_TRACE(("%s: EXIT: FAILURE\n",
- __FUNCTION__));
+ bus->sih = NULL;
+ }
+ DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
return -1;
}
dhdpcie_bus_intr_enable(dhd_bus_t *bus)
{
DHD_TRACE(("%s: enable interrupts\n", __FUNCTION__));
-
- if (!bus || !bus->sih)
- return;
-
- if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
- (bus->sih->buscorerev == 4)) {
- dhpcie_bus_unmask_interrupt(bus);
- }
- else if (bus->sih) {
+ if (bus && bus->sih && !bus->is_linkdown) {
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ dhpcie_bus_unmask_interrupt(bus);
+ } else {
si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
bus->def_intmask, bus->def_intmask);
+ }
+ } else {
+ DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__));
+ DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n",
+ bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1));
}
}
DHD_TRACE(("%s Enter\n", __FUNCTION__));
- if (!bus || !bus->sih)
- return;
-
- if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
- (bus->sih->buscorerev == 4)) {
- dhpcie_bus_mask_interrupt(bus);
- }
- else if (bus->sih) {
- si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
- bus->def_intmask, 0);
+ if (bus && bus->sih && !bus->is_linkdown) {
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ dhpcie_bus_mask_interrupt(bus);
+ } else {
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
+ bus->def_intmask, 0);
+ }
+ } else {
+ DHD_ERROR(("****** %s: failed ******\n", __FUNCTION__));
+ DHD_ERROR(("bus: %p sih: %p bus->is_linkdown %d\n",
+ bus, bus ? bus->sih : NULL, bus ? bus->is_linkdown: -1));
}
DHD_TRACE(("%s Exit\n", __FUNCTION__));
}
-void
+/*
+ * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
+ * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
+ * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
+ * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
+ * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
+ */
+static void
+dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp)
+{
+ unsigned long flags;
+ int timeleft;
+
+ DHD_GENERAL_LOCK(dhdp, flags);
+ dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+
+ timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
+ if (timeleft == 0) {
+ DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
+ __FUNCTION__, dhdp->dhd_bus_busy_state));
+ BUG_ON(1);
+ }
+
+ return;
+}
+
+static void
dhdpcie_bus_remove_prep(dhd_bus_t *bus)
{
+ unsigned long flags;
DHD_TRACE(("%s Enter\n", __FUNCTION__));
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
dhd_os_sdlock(bus->dhd);
- bus->dhd->busstate = DHD_BUS_DOWN;
dhdpcie_bus_intr_disable(bus);
- // terence 20150406: fix for null pointer handle
- if (bus->sih)
+ if (!bus->dhd->dongle_isolation) {
pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
+ }
dhd_os_sdunlock(bus->dhd);
DHD_TRACE(("%s Exit\n", __FUNCTION__));
}
-
-/* Detach and free everything */
+/** Detach and free everything */
void
dhdpcie_bus_release(dhd_bus_t *bus)
{
ASSERT(osh);
if (bus->dhd) {
+ dhdpcie_advertise_bus_cleanup(bus->dhd);
dongle_isolation = bus->dhd->dongle_isolation;
+ dhdpcie_bus_remove_prep(bus);
+
if (bus->intr) {
dhdpcie_bus_intr_disable(bus);
dhdpcie_free_irq(bus);
}
- dhd_detach(bus->dhd);
dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+ dhd_detach(bus->dhd);
dhd_free(bus->dhd);
bus->dhd = NULL;
}
bus->regs = NULL;
}
if (bus->tcm) {
- dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, bus->tcm_size);
+ dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, DONGLE_TCM_MAP_SIZE);
bus->tcm = NULL;
}
dhdpcie_bus_release_malloc(bus, osh);
/* Detach pcie shared structure */
- if (bus->pcie_sh)
+ if (bus->pcie_sh) {
MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
+ bus->pcie_sh = NULL;
+ }
#ifdef DHD_DEBUG
}
DHD_TRACE(("%s: Exit\n", __FUNCTION__));
-
-}
+} /* dhdpcie_bus_release */
void
dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
{
-
DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
bus->dhd, bus->dhd->dongle_reset));
si_corereg(bus->sih, bus->sih->buscoreidx,
OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
}
- si_detach(bus->sih);
- // terence 20150420: fix for sih incorrectly handled in other function
- bus->sih = NULL;
+
+ if (bus->sih->buscorerev == 13)
+ pcie_serdes_iddqdisable(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
+
+ if (bus->sih != NULL) {
+ si_detach(bus->sih);
+ bus->sih = NULL;
+ }
if (bus->vars && bus->varsz)
MFREE(osh, bus->vars, bus->varsz);
bus->vars = NULL;
return data;
}
-/* 32 bit config write */
+/** 32 bit config write */
void
dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
{
OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
}
-#ifdef CONFIG_ARCH_MSM8994
-void
-dhdpcie_bus_cfg_set_bar1_win(dhd_bus_t *bus, uint32 data)
-{
- OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN, 4, data);
-}
-#endif
-
void
dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
{
}
-/* Stop bus module: clear pending frames, disable data flow */
+/** Stop bus module: clear pending frames, disable data flow */
void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
{
uint32 status;
+ unsigned long flags;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
goto done;
}
+ DHD_DISABLE_RUNTIME_PM(bus->dhd);
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
dhdpcie_bus_intr_disable(bus);
status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
- if (!dhd_download_fw_on_driverload)
+
+ if (!dhd_download_fw_on_driverload) {
dhd_dpc_kill(bus->dhd);
+ }
/* Clear rx control and wake any waiters */
- bus->rxlen = 0;
- dhd_os_ioctl_resp_wake(bus->dhd);
+ dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
+ dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
done:
return;
}
-/* Watchdog timer function */
+/** Watchdog timer function */
bool dhd_bus_watchdog(dhd_pub_t *dhd)
{
+ unsigned long flags;
#ifdef DHD_DEBUG
dhd_bus_t *bus;
bus = dhd->bus;
+ DHD_GENERAL_LOCK(dhd, flags);
+ if (dhd->busstate == DHD_BUS_DOWN ||
+ dhd->busstate == DHD_BUS_DOWN_IN_PROGRESS) {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return FALSE;
+ }
+ dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD;
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
/* Poll for console output periodically */
}
#endif /* DHD_DEBUG */
- return FALSE;
+#ifdef PCIE_OOB
+ /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */
+ if (dhd_doorbell_timeout != 0 && !(bus->dhd->busstate == DHD_BUS_SUSPEND) &&
+ dhd_timeout_expired(&bus->doorbell_timer)) {
+ dhd_bus_set_device_wake(bus, FALSE);
+ }
+#endif /* PCIE_OOB */
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD;
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return TRUE;
+} /* dhd_bus_watchdog */
+
+
+#define DEADBEEF_PATTERN 0xADDEADDE // "DeadDead"
+#define MEMCHECKINFO "/data/.memcheck.info"
+
+static int
+dhd_get_memcheck_info(void)
+{
+ struct file *fp = NULL;
+ uint32 mem_val = 0;
+ int ret = 0;
+ char *filepath = MEMCHECKINFO;
+
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("[WIFI_SEC] %s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ goto done;
+ } else {
+ ret = kernel_read(fp, 0, (char *)&mem_val, 4);
+ if (ret < 0) {
+ DHD_ERROR(("[WIFI_SEC] %s: File read error, ret=%d\n", __FUNCTION__, ret));
+ filp_close(fp, NULL);
+ goto done;
+ }
+
+ mem_val = bcm_atoi((char *)&mem_val);
+
+ DHD_ERROR(("[WIFI_SEC]%s: MEMCHECK ENABLED = %d\n", __FUNCTION__, mem_val));
+ filp_close(fp, NULL);
+ }
+done:
+ return mem_val;
}
+static int
+dhdpcie_mem_check(struct dhd_bus *bus)
+{
+ int bcmerror = BCME_OK;
+ int offset = 0;
+ int len = 0;
+ uint8 *memblock = NULL, *memptr;
+ int size = bus->ramsize;
+ int i;
+ uint32 memcheck_enabled;
+
+ /* Read memcheck info from the file */
+ /* 0 : Disable */
+ /* 1 : "Dead Beef" pattern write */
+ /* 2 : "Dead Beef" pattern write and checking the pattern value */
+
+ memcheck_enabled = dhd_get_memcheck_info();
+
+ DHD_ERROR(("%s: memcheck_enabled: %d \n", __FUNCTION__, memcheck_enabled));
+
+ if (memcheck_enabled == 0) {
+ return bcmerror;
+ }
+
+ memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+
+ if ((ulong)memblock % DHD_SDALIGN) {
+ memptr += (DHD_SDALIGN - ((ulong)memblock % DHD_SDALIGN));
+ }
+
+ for (i = 0; i < MEMBLOCK; i = i + 4) {
+ *(ulong*)(memptr + i) = DEADBEEF_PATTERN;
+ }
+
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
+ si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
+ if (offset == 0) {
+ /* Add start of RAM address to the address given by user */
+ offset += bus->dongle_ram_base;
+ }
+ }
+
+ /* Write "DeadBeef" pattern with MEMBLOCK size */
+ while (size) {
+ len = MIN(MEMBLOCK, size);
+
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ if (memcheck_enabled == 2) {
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, (uint8 *)memptr, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on read %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ } else {
+ for (i = 0; i < len; i = i+4) {
+ if ((*(uint32*)(memptr + i)) != DEADBEEF_PATTERN) {
+ DHD_ERROR(("%s: error on reading pattern at "
+ "0x%08x\n", __FUNCTION__, (offset + i)));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+ }
+ }
+ }
+ offset += MEMBLOCK;
+ size -= MEMBLOCK;
+ }
+
+ DHD_ERROR(("%s: Writing the Dead Beef pattern is Done \n", __FUNCTION__));
+err:
+ if (memblock) {
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+ }
+
+ return bcmerror;
+}
/* Download firmware image and nvram image */
int
bus->nv_path = pnv_path;
bus->dhd->conf_path = pconf_path;
+ DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
+ __FUNCTION__, bus->fw_path, bus->nv_path));
+
+ dhdpcie_mem_check(bus);
+
ret = dhdpcie_download_firmware(bus, osh);
return ret;
dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
{
int ret = 0;
-
- DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
- __FUNCTION__, bus->fw_path, bus->nv_path));
+#if defined(BCM_REQUEST_FW)
+ uint chipid = bus->sih->chip;
+ uint revid = bus->sih->chiprev;
+ char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
+ char nv_path[64]; /* path to nvram vars file */
+ bus->fw_path = fw_path;
+ bus->nv_path = nv_path;
+ switch (chipid) {
+ case BCM43570_CHIP_ID:
+ bcmstrncat(fw_path, "43570", 5);
+ switch (revid) {
+ case 0:
+ bcmstrncat(fw_path, "a0", 2);
+ break;
+ case 2:
+ bcmstrncat(fw_path, "a2", 2);
+ break;
+ default:
+ DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
+ revid));
+ break;
+ }
+ break;
+ default:
+ DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
+ chipid));
+ return 0;
+ }
+ /* load board specific nvram file */
+ snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
+ /* load firmware */
+ snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
+#endif /* BCM_REQUEST_FW */
DHD_OS_WAKE_LOCK(bus->dhd);
static int
dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
{
- int bcmerror = -1;
+ int bcmerror = BCME_ERROR;
int offset = 0;
- int len;
- void *image = NULL;
+ int len = 0;
+ char *imgbuf = NULL;
uint8 *memblock = NULL, *memptr;
+ uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
+
+ int offset_end = bus->ramsize;
DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
/* Should succeed in opening image if it is actually given through registry
* entry or in module param.
*/
- image = dhd_os_open_image(pfw_path);
- if (image == NULL) {
+ imgbuf = dhd_os_open_image(pfw_path);
+ if (imgbuf == NULL) {
printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
goto err;
}
DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
goto err;
}
+ if (dhd_msg_level & DHD_TRACE_VAL) {
+ memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memptr_tmp == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+ }
if ((uint32)(uintptr)memblock % DHD_SDALIGN)
memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
- /* Download image */
- while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+ DHD_INFO_HW4(("%s: dongle_ram_base: 0x%x ramsize: 0x%x tcm: %p\n",
+ __FUNCTION__, bus->dongle_ram_base, bus->ramsize, bus->tcm));
+ /* Download image with MEMBLOCK size */
+ while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
if (len < 0) {
DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
bcmerror = BCME_ERROR;
goto err;
}
- /* check if CR4 */
- if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ /* check if CR4/CA7 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
+ si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
/* if address is 0, store the reset instruction to be written in 0 */
-
if (offset == 0) {
bus->resetinstr = *(((uint32*)memptr));
/* Add start of RAM address to the address given by user */
offset += bus->dongle_ram_base;
+ offset_end += offset;
}
}
-
- bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, memptr, len);
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
if (bcmerror) {
DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
- __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
goto err;
}
+ if (dhd_msg_level & DHD_TRACE_VAL) {
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+ if (memcmp(memptr_tmp, memptr, len)) {
+ DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__));
+ goto err;
+ } else
+ DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
+ }
offset += MEMBLOCK;
+
+ if (offset >= offset_end) {
+ DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
+ __FUNCTION__, offset, offset_end));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
}
err:
if (memblock)
MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+ if (dhd_msg_level & DHD_TRACE_VAL) {
+ if (memptr_tmp)
+ MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
+ }
- if (image)
- dhd_os_close_image(image);
+ if (imgbuf)
+ dhd_os_close_image(imgbuf);
return bcmerror;
-}
+} /* dhdpcie_download_code_file */
+#ifdef CUSTOMER_HW4_DEBUG
+#define MIN_NVRAMVARS_SIZE 128
+#endif /* CUSTOMER_HW4_DEBUG */
static int
dhdpcie_download_nvram(struct dhd_bus *bus)
{
- int bcmerror = -1;
+ int bcmerror = BCME_ERROR;
uint len;
- void * image = NULL;
char * memblock = NULL;
char *bufp;
char *pnv_path;
bool nvram_file_exists;
-
+ bool nvram_uefi_exists = FALSE;
+ bool local_alloc = FALSE;
pnv_path = bus->nv_path;
nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
- if (!nvram_file_exists && (bus->nvram_params == NULL))
- return (0);
- if (nvram_file_exists) {
- image = dhd_os_open_image(pnv_path);
- if (image == NULL) {
- printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path);
- goto err;
+ /* First try UEFI */
+ len = MAX_NVRAMBUF_SIZE;
+ dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, &len);
+
+ /* If UEFI empty, then read from file system */
+ if ((len == 0) || (memblock[0] == '\0')) {
+
+ if (nvram_file_exists) {
+ len = MAX_NVRAMBUF_SIZE;
+ dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, &len);
+ if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
+ goto err;
+ }
+ }
+ else {
+ /* For SROM OTP no external file or UEFI required */
+ bcmerror = BCME_OK;
}
+ } else {
+ nvram_uefi_exists = TRUE;
}
- memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
- if (memblock == NULL) {
- DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
- __FUNCTION__, MAX_NVRAMBUF_SIZE));
- goto err;
- }
+ DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
- /* Download variables */
- if (nvram_file_exists) {
- len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
- }
- else {
+ if (len > 0 && len <= MAX_NVRAMBUF_SIZE) {
+ bufp = (char *) memblock;
- /* nvram is string with null terminated. cannot use strlen */
- len = bus->nvram_params_len;
- ASSERT(len <= MAX_NVRAMBUF_SIZE);
- memcpy(memblock, bus->nvram_params, len);
- }
- if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
- bufp = (char *)memblock;
- bufp[len] = 0;
+#ifdef CACHE_FW_IMAGES
+ if (bus->processed_nvram_params_len) {
+ len = bus->processed_nvram_params_len;
+ }
- if (nvram_file_exists)
- len = process_nvram_vars(bufp, len);
+ if (!bus->processed_nvram_params_len) {
+ bufp[len] = 0;
+ if (nvram_uefi_exists || nvram_file_exists) {
+ len = process_nvram_vars(bufp, len);
+ bus->processed_nvram_params_len = len;
+ }
+ } else
+#else
+ {
+ bufp[len] = 0;
+ if (nvram_uefi_exists || nvram_file_exists) {
+ len = process_nvram_vars(bufp, len);
+ }
+ }
+#endif /* CACHE_FW_IMAGES */
+
+ DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
+#ifdef CUSTOMER_HW4_DEBUG
+ if (len < MIN_NVRAMVARS_SIZE) {
+ DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
+ __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
if (len % 4) {
len += 4 - (len % 4);
bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
if (bcmerror) {
DHD_ERROR(("%s: error downloading vars: %d\n",
- __FUNCTION__, bcmerror));
+ __FUNCTION__, bcmerror));
}
}
- else {
- DHD_ERROR(("%s: error reading nvram file: %d\n",
- __FUNCTION__, len));
- bcmerror = BCME_ERROR;
- }
-err:
- if (memblock)
- MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
- if (image)
- dhd_os_close_image(image);
+err:
+ if (memblock) {
+ if (local_alloc) {
+ MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+ } else {
+ dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
+ }
+ }
return bcmerror;
}
len = remaining_len;
memcpy(memptr, (p_dlarray + downloded_len), len);
- /* check if CR4 */
- if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ /* check if CR4/CA7 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
+ si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
/* if address is 0, store the reset instruction to be written in 0 */
if (offset == 0) {
bus->resetinstr = *(((uint32*)memptr));
MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
return bcmerror;
-}
+} /* dhdpcie_download_code_array */
#endif /* BCMEMBEDIMAGE */
#else
goto err;
#endif
- }
- else {
+ } else {
embed = FALSE;
dlok = TRUE;
}
if (dhdpcie_download_code_array(bus)) {
DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
goto err;
- }
- else {
+ } else {
dlok = TRUE;
}
}
err:
return bcmerror;
-}
-
-int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
-{
- int timeleft;
- uint rxlen = 0;
- bool pending;
-
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
- if (bus->dhd->dongle_reset)
- return -EIO;
-
- /* Wait until control frame is available */
- timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
- rxlen = bus->rxlen;
- bcopy(&bus->ioct_resp, msg, MIN(rxlen, sizeof(ioctl_comp_resp_msg_t)));
- bus->rxlen = 0;
-
- if (rxlen) {
- DHD_CTL(("%s: resumed on rxctl frame, got %d\n", __FUNCTION__, rxlen));
- } else if (timeleft == 0) {
- DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
- bus->ioct_resp.cmn_hdr.request_id = 0;
- bus->ioct_resp.compl_hdr.status = 0xffff;
- bus->dhd->rxcnt_timeout++;
- DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout));
- } else if (pending == TRUE) {
- DHD_CTL(("%s: canceled\n", __FUNCTION__));
- return -ERESTARTSYS;
- } else {
- DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
- }
-
- if (timeleft != 0)
- bus->dhd->rxcnt_timeout = 0;
-
- if (rxlen)
- bus->dhd->rx_ctlpkts++;
- else
- bus->dhd->rx_ctlerrs++;
-
- if (bus->dhd->rxcnt_timeout >= MAX_CNTL_TX_TIMEOUT)
- return -ETIMEDOUT;
-
- if (bus->dhd->dongle_trap_occured)
- return -EREMOTEIO;
-
- return rxlen ? (int)rxlen : -EIO;
-
-}
+} /* _dhdpcie_download_firmware */
#define CONSOLE_LINE_MAX 192
n--;
line[n] = 0;
printf("CONSOLE: %s\n", line);
+
}
}
break2:
return BCME_OK;
-}
+} /* dhdpcie_bus_readconsole */
+#endif /* DHD_DEBUG */
static int
dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- if (DHD_NOCHECKDIED_ON())
+ if (DHD_NOCHECKDIED_ON()) {
return 0;
+ }
if (data == NULL) {
/*
goto done;
}
- if ((bcmerror = dhdpcie_readshared(bus)) < 0)
+ if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
goto done;
+ }
bcm_binit(&strbuf, data, size);
if (bus->pcie_sh->assert_exp_addr != 0) {
str[0] = '\0';
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
- bus->pcie_sh->assert_exp_addr,
- (uint8 *)str, maxstrlen)) < 0)
+ bus->pcie_sh->assert_exp_addr,
+ (uint8 *)str, maxstrlen)) < 0) {
goto done;
+ }
str[maxstrlen - 1] = '\0';
bcm_bprintf(&strbuf, " expr \"%s\"", str);
if (bus->pcie_sh->assert_file_addr != 0) {
str[0] = '\0';
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
- bus->pcie_sh->assert_file_addr,
- (uint8 *)str, maxstrlen)) < 0)
+ bus->pcie_sh->assert_file_addr,
+ (uint8 *)str, maxstrlen)) < 0) {
goto done;
+ }
str[maxstrlen - 1] = '\0';
bcm_bprintf(&strbuf, " file \"%s\"", str);
if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
bus->dhd->dongle_trap_occured = TRUE;
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
- bus->pcie_sh->trap_addr,
- (uint8*)&tr, sizeof(trap_t))) < 0)
+ bus->pcie_sh->trap_addr, (uint8*)&tr, sizeof(trap_t))) < 0) {
goto done;
+ }
bcm_bprintf(&strbuf,
- "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
- "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
- "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+ "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+ " lp 0x%x, rpc 0x%x"
+ "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
- (uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+ (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
goto printbuf;
+ }
addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
- (uint8 *)&console_size, sizeof(console_size))) < 0)
+ (uint8 *)&console_size, sizeof(console_size))) < 0) {
goto printbuf;
+ }
addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
- (uint8 *)&console_index, sizeof(console_index))) < 0)
+ (uint8 *)&console_index, sizeof(console_index))) < 0) {
goto printbuf;
+ }
console_ptr = ltoh32(console_ptr);
console_size = ltoh32(console_size);
console_index = ltoh32(console_index);
if (console_size > CONSOLE_BUFFER_MAX ||
- !(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+ !(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
goto printbuf;
+ }
if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
- (uint8 *)console_buffer, console_size)) < 0)
+ (uint8 *)console_buffer, console_size)) < 0) {
goto printbuf;
+ }
for (i = 0, n = 0; i < console_size; i += n + 1) {
for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
* will truncate a lot of the printfs
*/
- if (dhd_msg_level & DHD_ERROR_VAL)
- printf("CONSOLE: %s\n", line);
+ printf("CONSOLE: %s\n", line);
}
}
}
printbuf:
if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
- DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+ printf("%s: %s\n", __FUNCTION__, strbuf.origbuf);
+
+ /* wake up IOCTL wait event */
+ dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
+
+#if defined(DHD_FW_COREDUMP)
+ /* save core dump or write to a file */
+ if (bus->dhd->memdump_enabled) {
+ bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
+ dhdpcie_mem_dump(bus);
+ }
+#endif /* DHD_FW_COREDUMP */
+
+
}
done:
MFREE(bus->dhd->osh, console_buffer, console_size);
return bcmerror;
+} /* dhdpcie_checkdied */
+
+
+/* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
+void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
+{
+ int ret = 0;
+ int size; /* Full mem size */
+ int start; /* Start address */
+ int read_size = 0; /* Read size of each iteration */
+ uint8 *databuf = buf;
+
+ if (bus == NULL) {
+ return;
+ }
+
+ start = bus->dongle_ram_base;
+ /* Get full mem size */
+ size = bus->ramsize;
+ /* Read mem content */
+ while (size)
+ {
+ read_size = MIN(MEMBLOCK, size);
+ if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
+ return;
+ }
+
+ /* Decrement size and increment start address */
+ size -= read_size;
+ start += read_size;
+ databuf += read_size;
+ }
+ bus->dhd->soc_ram = buf;
+ bus->dhd->soc_ram_length = bus->ramsize;
+ return;
}
-#endif /* DHD_DEBUG */
+#if defined(DHD_FW_COREDUMP)
+static int
+dhdpcie_mem_dump(dhd_bus_t *bus)
+{
+ int ret = 0;
+ int size; /* Full mem size */
+ int start = bus->dongle_ram_base; /* Start address */
+ int read_size = 0; /* Read size of each iteration */
+ uint8 *buf = NULL, *databuf = NULL;
+
+#ifdef EXYNOS_PCIE_DEBUG
+ exynos_pcie_register_dump(1);
+#endif /* EXYNOS_PCIE_DEBUG */
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down so skip\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+ /* Get full mem size */
+ size = bus->ramsize;
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ buf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_MEMDUMP_BUF, size);
+ bzero(buf, size);
+#else
+ buf = MALLOC(bus->dhd->osh, size);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ if (!buf) {
+ DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
+ return BCME_ERROR;
+ }
+
+ /* Read mem content */
+ DHD_TRACE_HW4(("Dump dongle memory"));
+ databuf = buf;
+ while (size)
+ {
+ read_size = MIN(MEMBLOCK, size);
+ if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size)))
+ {
+ DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
+ if (buf) {
+ MFREE(bus->dhd->osh, buf, size);
+ }
+ return BCME_ERROR;
+ }
+ DHD_TRACE(("."));
+
+ /* Decrement size and increment start address */
+ size -= read_size;
+ start += read_size;
+ databuf += read_size;
+ }
+
+ DHD_TRACE_HW4(("%s FUNC: Copy fw image to the embedded buffer \n", __FUNCTION__));
+
+ dhd_save_fwdump(bus->dhd, buf, bus->ramsize);
+ dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
+
+ return ret;
+}
+
+int
+dhd_bus_mem_dump(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+
+ if (bus->suspended) {
+ DHD_ERROR(("%s: Bus is suspend so skip\n", __FUNCTION__));
+ return 0;
+ }
+
+ return dhdpcie_mem_dump(bus);
+}
+#endif /* DHD_FW_COREDUMP */
+
+int
+dhd_socram_dump(dhd_bus_t *bus)
+{
+#if defined(DHD_FW_COREDUMP)
+ return (dhdpcie_mem_dump(bus));
+#else
+ return -1;
+#endif
+}
+
/**
* Transfers bytes from host to dongle using pio mode.
* Parameter 'address' is a backplane address.
static int
dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
{
- int bcmerror = 0;
uint dsize;
int detect_endian_flag = 0x01;
bool little_endian;
-#ifdef CONFIG_ARCH_MSM8994
- bool is_64bit_unaligned;
-#endif
+
+ if (write && bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
/* Detect endianness. */
little_endian = *(char *)&detect_endian_flag;
-#ifdef CONFIG_ARCH_MSM8994
- /* Check 64bit aligned or not. */
- is_64bit_unaligned = (address & 0x7);
-#endif
/* In remap mode, adjust address beyond socram and redirect
* to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
* is not backplane accessible
dsize = sizeof(uint64);
/* Do the transfer(s) */
+ DHD_INFO(("%s: %s %d bytes in window 0x%08x\n",
+ __FUNCTION__, (write ? "write" : "read"), size, address));
if (write) {
while (size) {
- if (size >= sizeof(uint64) && little_endian) {
-#ifdef CONFIG_ARCH_MSM8994
- if (is_64bit_unaligned) {
- DHD_INFO(("%s: write unaligned %lx\n",
- __FUNCTION__, address));
- dhdpcie_bus_wtcm32(bus, address, *((uint32 *)data));
- data += 4;
- size -= 4;
- address += 4;
- is_64bit_unaligned = (address & 0x7);
- continue;
- }
- else
-#endif
+ if (size >= sizeof(uint64) && little_endian &&
+#ifdef CONFIG_64BIT
+ !(address % 8) &&
+#endif /* CONFIG_64BIT */
+ 1) {
dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
} else {
dsize = sizeof(uint8);
}
} else {
while (size) {
- if (size >= sizeof(uint64) && little_endian) {
-#ifdef CONFIG_ARCH_MSM8994
- if (is_64bit_unaligned) {
- DHD_INFO(("%s: read unaligned %lx\n",
- __FUNCTION__, address));
- *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
- data += 4;
- size -= 4;
- address += 4;
- is_64bit_unaligned = (address & 0x7);
- continue;
- }
- else
-#endif
+ if (size >= sizeof(uint64) && little_endian &&
+#ifdef CONFIG_64BIT
+ !(address % 8) &&
+#endif /* CONFIG_64BIT */
+ 1) {
*(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
} else {
dsize = sizeof(uint8);
}
}
}
- return bcmerror;
-}
+ return BCME_OK;
+} /* dhdpcie_bus_membytes */
+/**
+ * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
+ * to the (non flow controlled) flow ring.
+ */
int BCMFASTPATH
dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs)
{
flow_ring_node_t *flow_ring_node;
int ret = BCME_OK;
-
+#ifdef DHD_LOSSLESS_ROAMING
+ dhd_pub_t *dhdp = bus->dhd;
+#endif
DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
+
/* ASSERT on flow_id */
if (flow_id >= bus->max_sub_queues) {
DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
+#ifdef DHD_LOSSLESS_ROAMING
+ if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
+ DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
+ __FUNCTION__, flow_ring_node->flow_info.tid));
+ return BCME_OK;
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
+
{
unsigned long flags;
void *txp = NULL;
flow_queue_t *queue;
+#ifdef DHD_LOSSLESS_ROAMING
+ struct ether_header *eh;
+ uint8 *pktdata;
+#endif /* DHD_LOSSLESS_ROAMING */
queue = &flow_ring_node->queue; /* queue associated with flow ring */
while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
PKTORPHAN(txp);
+ /*
+ * Modifying the packet length caused P2P cert failures.
+ * Specifically on test cases where a packet of size 52 bytes
+ * was injected, the sniffer capture showed 62 bytes because of
+ * which the cert tests failed. So making the below change
+ * only Router specific.
+ */
+
#ifdef DHDTCPACK_SUPPRESS
- if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
- dhd_tcpack_check_xmit(bus->dhd, txp);
- }
+ if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
+ ret = dhd_tcpack_check_xmit(bus->dhd, txp);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
+ __FUNCTION__));
+ }
+ }
#endif /* DHDTCPACK_SUPPRESS */
- /* Attempt to transfer packet over flow ring */
+#ifdef DHD_LOSSLESS_ROAMING
+ pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
+ eh = (struct ether_header *) pktdata;
+ if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+ uint8 prio = (uint8)PKTPRIO(txp);
+
+ /* Restore to original priority for 802.1X packet */
+ if (prio == PRIO_8021D_NC) {
+ PKTSETPRIO(txp, PRIO_8021D_BE);
+ }
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
+ /* Attempt to transfer packet over flow ring */
ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
if (ret != BCME_OK) { /* may not have resources in flow ring */
DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
}
return ret;
-}
+} /* dhd_bus_schedule_queue */
-#ifndef PCIE_TX_DEFERRAL
-/* Send a data frame to the dongle. Callee disposes of txp. */
+/** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
int BCMFASTPATH
dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
{
- unsigned long flags;
- int ret = BCME_OK;
- void *txp_pend = NULL;
- if (!bus->txmode_push) {
- uint16 flowid;
- flow_queue_t *queue;
- flow_ring_node_t *flow_ring_node;
- if (!bus->dhd->flowid_allocator) {
- DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
- goto toss;
- }
-
- flowid = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(txp));
-
- flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
-
- DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
- __FUNCTION__, flowid, flow_ring_node->status,
- flow_ring_node->active));
-
- if ((flowid >= bus->dhd->num_flow_rings) ||
- (!flow_ring_node->active) ||
- (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
- DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
- __FUNCTION__, flowid, flow_ring_node->status,
- flow_ring_node->active));
- ret = BCME_ERROR;
- goto toss;
- }
-
- queue = &flow_ring_node->queue; /* queue associated with flow ring */
-
- DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
-
- if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
- txp_pend = txp;
-
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
-
- if (flow_ring_node->status) {
- DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
- __FUNCTION__, flowid, flow_ring_node->status,
- flow_ring_node->active));
- if (txp_pend) {
- txp = txp_pend;
- goto toss;
- }
- return BCME_OK;
- }
- ret = dhd_bus_schedule_queue(bus, flowid, FALSE);
-
- /* If we have anything pending, try to push into q */
- if (txp_pend) {
- DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
-
- if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- txp = txp_pend;
- goto toss;
- }
-
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- }
-
- return ret;
-
- } else { /* bus->txmode_push */
- return dhd_prot_txdata(bus->dhd, txp, ifidx);
- }
-
-toss:
- DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
- PKTCFREE(bus->dhd->osh, txp, TRUE);
- return ret;
-}
-#else /* PCIE_TX_DEFERRAL */
-int BCMFASTPATH
-dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
-{
- unsigned long flags;
- int ret = BCME_OK;
uint16 flowid;
flow_queue_t *queue;
flow_ring_node_t *flow_ring_node;
- uint8 *pktdata = (uint8 *)PKTDATA(bus->dhd->osh, txp);
- struct ether_header *eh = (struct ether_header *)pktdata;
+ unsigned long flags;
+ int ret = BCME_OK;
+ void *txp_pend = NULL;
if (!bus->dhd->flowid_allocator) {
DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
goto toss;
}
- flowid = dhd_flowid_find(bus->dhd, ifidx,
- bus->dhd->flow_prio_map[(PKTPRIO(txp))],
- eh->ether_shost, eh->ether_dhost);
- if (flowid == FLOWID_INVALID) {
- DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), ifidx);
- skb_queue_tail(&bus->orphan_list, txp);
- queue_work(bus->tx_wq, &bus->create_flow_work);
- return BCME_OK;
- }
+ flowid = DHD_PKT_GET_FLOWID(txp);
- DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), flowid);
flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
- queue = &flow_ring_node->queue; /* queue associated with flow ring */
- DHD_DATA(("%s: pkt flowid %d, status %d active %d\n",
+ DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
__FUNCTION__, flowid, flow_ring_node->status,
flow_ring_node->active));
DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
if ((flowid >= bus->dhd->num_flow_rings) ||
(!flow_ring_node->active) ||
- (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
+ (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
+ (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING)) {
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- DHD_DATA(("%s: Dropping pkt flowid %d, status %d active %d\n",
+ DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
__FUNCTION__, flowid, flow_ring_node->status,
flow_ring_node->active));
ret = BCME_ERROR;
goto toss;
}
- if (flow_ring_node->status == FLOW_RING_STATUS_PENDING) {
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- DHD_PKTTAG_SET_FLOWID((dhd_pkttag_fr_t *)PKTTAG(txp), ifidx);
- skb_queue_tail(&bus->orphan_list, txp);
- queue_work(bus->tx_wq, &bus->create_flow_work);
- return BCME_OK;
- }
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK) {
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- goto toss;
+ txp_pend = txp;
}
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- ret = dhd_bus_schedule_queue(bus, flowid, FALSE);
+ if (flow_ring_node->status) {
+ DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
+ __FUNCTION__, flowid, flow_ring_node->status,
+ flow_ring_node->active));
+ if (txp_pend) {
+ txp = txp_pend;
+ goto toss;
+ }
+ return BCME_OK;
+ }
+ ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
+
+ /* If we have anything pending, try to push into q */
+ if (txp_pend) {
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+
+ if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ txp = txp_pend;
+ goto toss;
+ }
+
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ }
return ret;
toss:
- DHD_DATA(("%s: Toss %d\n", __FUNCTION__, ret));
+ DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
PKTCFREE(bus->dhd->osh, txp, TRUE);
return ret;
-}
-#endif /* !PCIE_TX_DEFERRAL */
+} /* dhd_bus_txdata */
void
bus->bus_flowctrl = TRUE;
}
-void
-dhd_bus_update_retlen(dhd_bus_t *bus, uint32 retlen, uint32 pkt_id, uint16 status,
- uint32 resp_len)
-{
- bus->rxlen = retlen;
- bus->ioct_resp.cmn_hdr.request_id = pkt_id;
- bus->ioct_resp.compl_hdr.status = status;
- bus->ioct_resp.resp_len = (uint16)resp_len;
-}
-
#if defined(DHD_DEBUG)
/* Device console input function */
int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
goto done;
- /* generate an interurpt to dongle to indicate that it needs to process cons command */
+ /* generate an interrupt to dongle to indicate that it needs to process cons command */
dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
done:
return rv;
-}
+} /* dhd_bus_console_in */
#endif /* defined(DHD_DEBUG) */
-/* Process rx frame , Send up the layer to netif */
+/**
+ * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
+ * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
+ */
void BCMFASTPATH
dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
{
dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
}
-#ifdef CONFIG_ARCH_MSM8994
-static ulong dhd_bus_cmn_check_offset(dhd_bus_t *bus, ulong offset)
-{
- uint new_bar1_wbase = 0;
- ulong address = 0;
-
- new_bar1_wbase = (uint)offset & bus->bar1_win_mask;
- if (bus->bar1_win_base != new_bar1_wbase) {
- bus->bar1_win_base = new_bar1_wbase;
- dhdpcie_bus_cfg_set_bar1_win(bus, bus->bar1_win_base);
- DHD_ERROR(("%s: offset=%lx, switch bar1_win_base to %x\n",
- __FUNCTION__, offset, bus->bar1_win_base));
- }
-
- address = offset - bus->bar1_win_base;
-
- return address;
-}
-#else
-#define dhd_bus_cmn_check_offset(x, y) y
-#endif /* CONFIG_ARCH_MSM8994 */
-
/** 'offset' is a backplane address */
void
dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
{
- *(volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint8)data;
+ *(volatile uint8 *)(bus->tcm + offset) = (uint8)data;
}
uint8
dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
{
volatile uint8 data;
-#ifdef BCM47XX_ACP_WAR
- data = R_REG(bus->dhd->osh,
- (volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
-#else
- data = *(volatile uint8 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
-#endif
+
+ data = *(volatile uint8 *)(bus->tcm + offset);
+
return data;
}
void
dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
{
- *(volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint32)data;
+ *(volatile uint32 *)(bus->tcm + offset) = (uint32)data;
}
void
dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
{
- *(volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint16)data;
+ *(volatile uint16 *)(bus->tcm + offset) = (uint16)data;
}
void
dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
{
- *(volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)) = (uint64)data;
+ *(volatile uint64 *)(bus->tcm + offset) = (uint64)data;
}
uint16
dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
{
volatile uint16 data;
-#ifdef BCM47XX_ACP_WAR
- data = R_REG(bus->dhd->osh,
- (volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
-#else
- data = *(volatile uint16 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
-#endif
+
+ data = *(volatile uint16 *)(bus->tcm + offset);
+
return data;
}
dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
{
volatile uint32 data;
-#ifdef BCM47XX_ACP_WAR
- data = R_REG(bus->dhd->osh,
- (volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
-#else
- data = *(volatile uint32 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
-#endif
+
+ data = *(volatile uint32 *)(bus->tcm + offset);
+
return data;
}
dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
{
volatile uint64 data;
-#ifdef BCM47XX_ACP_WAR
- data = R_REG(bus->dhd->osh,
- (volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset)));
-#else
- data = *(volatile uint64 *)(bus->tcm + dhd_bus_cmn_check_offset(bus, offset));
-#endif
+
+ data = *(volatile uint64 *)(bus->tcm + offset);
+
return data;
}
+/** A snippet of dongle memory is shared between host and dongle */
void
-dhd_bus_cmn_writeshared(dhd_bus_t *bus, void * data, uint32 len, uint8 type, uint16 ringid)
+dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
{
uint64 long_data;
ulong tcm_offset;
- pciedev_shared_t *sh;
- pciedev_shared_t *shmem = NULL;
- sh = (pciedev_shared_t*)bus->shared_addr;
+ DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
- DHD_INFO(("%s: writing to msgbuf type %d, len %d\n", __FUNCTION__, type, len));
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
switch (type) {
- case DNGL_TO_HOST_DMA_SCRATCH_BUFFER:
+ case D2H_DMA_SCRATCH_BUF:
+ {
+ pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
long_data = HTOL64(*(uint64 *)data);
tcm_offset = (ulong)&(sh->host_dma_scratch_buffer);
dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
prhex(__FUNCTION__, data, len);
break;
+ }
- case DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN :
+ case D2H_DMA_SCRATCH_BUF_LEN:
+ {
+ pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len);
dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data));
prhex(__FUNCTION__, data, len);
break;
+ }
- case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER:
- /* ring_info_ptr stored in pcie_sh */
- shmem = (pciedev_shared_t *)bus->pcie_sh;
+ case H2D_DMA_INDX_WR_BUF:
+ {
+ pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
long_data = HTOL64(*(uint64 *)data);
tcm_offset = (ulong)shmem->rings_info_ptr;
dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
prhex(__FUNCTION__, data, len);
break;
+ }
- case HOST_TO_DNGL_DMA_READINDX_BUFFER:
- /* ring_info_ptr stored in pcie_sh */
- shmem = (pciedev_shared_t *)bus->pcie_sh;
-
+ case H2D_DMA_INDX_RD_BUF:
+ {
+ pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
long_data = HTOL64(*(uint64 *)data);
tcm_offset = (ulong)shmem->rings_info_ptr;
tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
prhex(__FUNCTION__, data, len);
break;
+ }
- case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER:
- /* ring_info_ptr stored in pcie_sh */
- shmem = (pciedev_shared_t *)bus->pcie_sh;
-
+ case D2H_DMA_INDX_WR_BUF:
+ {
+ pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
long_data = HTOL64(*(uint64 *)data);
tcm_offset = (ulong)shmem->rings_info_ptr;
tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
prhex(__FUNCTION__, data, len);
break;
+ }
- case DNGL_TO_HOST_DMA_READINDX_BUFFER:
- /* ring_info_ptr stored in pcie_sh */
- shmem = (pciedev_shared_t *)bus->pcie_sh;
-
+ case D2H_DMA_INDX_RD_BUF:
+ {
+ pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
long_data = HTOL64(*(uint64 *)data);
tcm_offset = (ulong)shmem->rings_info_ptr;
tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
prhex(__FUNCTION__, data, len);
break;
+ }
- case RING_LEN_ITEMS :
+ case RING_ITEM_LEN:
tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
tcm_offset += OFFSETOF(ring_mem_t, len_items);
dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
break;
- case RING_MAX_ITEM :
+ case RING_MAX_ITEMS:
tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
tcm_offset += OFFSETOF(ring_mem_t, max_item);
dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
break;
- case RING_BUF_ADDR :
+ case RING_BUF_ADDR:
long_data = HTOL64(*(uint64 *)data);
tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
tcm_offset += OFFSETOF(ring_mem_t, base_addr);
prhex(__FUNCTION__, data, len);
break;
- case RING_WRITE_PTR :
+ case RING_WR_UPD:
tcm_offset = bus->ring_sh[ringid].ring_state_w;
dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
break;
- case RING_READ_PTR :
+
+ case RING_RD_UPD:
tcm_offset = bus->ring_sh[ringid].ring_state_r;
dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
break;
- case DTOH_MB_DATA:
+ case D2H_MB_DATA:
dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
(uint32) HTOL32(*(uint32 *)data));
break;
- case HTOD_MB_DATA:
+ case H2D_MB_DATA:
dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
(uint32) HTOL32(*(uint32 *)data));
break;
+
default:
break;
}
-}
-
+} /* dhd_bus_cmn_writeshared */
+/** A snippet of dongle memory is shared between host and dongle */
void
dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
{
- pciedev_shared_t *sh;
ulong tcm_offset;
- sh = (pciedev_shared_t*)bus->shared_addr;
-
switch (type) {
- case RING_WRITE_PTR :
+ case RING_WR_UPD:
tcm_offset = bus->ring_sh[ringid].ring_state_w;
*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
break;
- case RING_READ_PTR :
+ case RING_RD_UPD:
tcm_offset = bus->ring_sh[ringid].ring_state_r;
*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
break;
- case TOTAL_LFRAG_PACKET_CNT :
+ case TOTAL_LFRAG_PACKET_CNT:
+ {
+ pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
(ulong) &sh->total_lfrag_pkt_cnt));
break;
- case HTOD_MB_DATA:
+ }
+ case H2D_MB_DATA:
*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
break;
- case DTOH_MB_DATA:
+ case D2H_MB_DATA:
*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
break;
- case MAX_HOST_RXBUFS :
+ case MAX_HOST_RXBUFS:
+ {
+ pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
(ulong) &sh->max_host_rxbufs));
break;
+ }
default :
break;
}
exit:
return bcmerror;
-}
+} /* dhd_bus_iovar_op */
#ifdef BCM_BUZZZ
#include <bcm_buzzz.h>
-int dhd_buzzz_dump_cntrs3(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
+int
+dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
+ const int num_counters)
{
int bytes = 0;
- uint32 ctr, curr[3], prev[3], delta[3];
+ uint32 ctr;
+ uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
+ uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
/* Compute elapsed counter values per counter event type */
- for (ctr = 0U; ctr < 3; ctr++) {
+ for (ctr = 0U; ctr < num_counters; ctr++) {
prev[ctr] = core[ctr];
curr[ctr] = *log++;
core[ctr] = curr[ctr]; /* saved for next log */
else
delta[ctr] = (curr[ctr] - prev[ctr]);
- /* Adjust for instrumentation overhead */
- if (delta[ctr] >= ovhd[ctr])
- delta[ctr] -= ovhd[ctr];
- else
- delta[ctr] = 0;
-
bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
}
};
} cm3_cnts_t;
-int dhd_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
+int
+dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
{
int bytes = 0;
delta = curr + (~0U - prev);
else
delta = (curr - prev);
- if (delta >= ovhd[0])
- delta -= ovhd[0];
- else
- delta = 0;
bytes += sprintf(p + bytes, "%12u ", delta);
cyccnt = delta;
delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
else
delta.u8[i] = (curr.u8[i] - prev.u8[i]);
- if (delta.u8[i] >= ovhd[i + 1])
- delta.u8[i] -= ovhd[i + 1];
- else
- delta.u8[i] = 0;
bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
}
cm3_cnts.u32 = delta.u32;
{ /* Extract the foldcnt from arg0 */
uint8 curr, prev, delta, max8 = ~0;
- buzzz_arg0_t arg0; arg0.u32 = *log;
+ bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
if (curr < prev)
delta = curr + (max8 - prev);
else
delta = (curr - prev);
- if (delta >= ovhd[5])
- delta -= ovhd[5];
- else
- delta = 0;
bytes += sprintf(p + bytes, "%4u ", delta);
foldcnt = delta;
}
return bytes;
}
-int dhd_buzzz_dump_log(char * p, uint32 * core, uint32 * log, buzzz_t * buzzz)
+int
+dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
{
int bytes = 0;
- buzzz_arg0_t arg0;
- static uint8 * fmt[] = BUZZZ_FMT_STRINGS;
+ bcm_buzzz_arg0_t arg0;
+ static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
if (buzzz->counters == 6) {
- bytes += dhd_buzzz_dump_cntrs6(p, core, buzzz->ovhd, log);
+ bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
} else {
- bytes += dhd_buzzz_dump_cntrs3(p, core, buzzz->ovhd, log);
- log += 3; /* (3 x 32bit) CR4 */
+ bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
+ log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
}
/* Dump the logged arguments using the registered formats */
bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
break;
}
+ case 2:
+ {
+ uint32 arg1, arg2;
+ arg1 = *log++; arg2 = *log++;
+ bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
+ break;
+ }
+ case 3:
+ {
+ uint32 arg1, arg2, arg3;
+ arg1 = *log++; arg2 = *log++; arg3 = *log++;
+ bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
+ break;
+ }
+ case 4:
+ {
+ uint32 arg1, arg2, arg3, arg4;
+ arg1 = *log++; arg2 = *log++;
+ arg3 = *log++; arg4 = *log++;
+ bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
+ break;
+ }
default:
printf("%s: Maximum one argument supported\n", __FUNCTION__);
break;
}
+
bytes += sprintf(p + bytes, "\n");
return bytes;
}
-void dhd_buzzz_dump(buzzz_t * buzzz_p, void * buffer_p, char * p)
+void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
{
int i;
- uint32 total, part1, part2, log_sz, core[BUZZZ_COUNTERS_MAX];
+ uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
void * log;
- for (i = 0; i < BUZZZ_COUNTERS_MAX; i++)
+ for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
core[i] = 0;
+ }
log_sz = buzzz_p->log_sz;
if (buzzz_p->wrap == TRUE) {
part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
- total = (buzzz_p->buffer_sz - BUZZZ_LOGENTRY_MAXSZ) / log_sz;
+ total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
} else {
part2 = 0U;
total = buzzz_p->count;
}
if (total == 0U) {
- printf("%s: buzzz_dump total<%u> done\n", __FUNCTION__, total);
+ printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total);
return;
} else {
- printf("%s: buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
+ printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
total, part2, part1);
}
log = (void*)((size_t)log + buzzz_p->log_sz);
}
- printf("%s: buzzz_dump done.\n", __FUNCTION__);
+ printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__);
}
int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
{
- buzzz_t * buzzz_p = NULL;
+ bcm_buzzz_t * buzzz_p = NULL;
void * buffer_p = NULL;
char * page_p = NULL;
pciedev_shared_t *sh;
printf("%s: Page memory allocation failure\n", __FUNCTION__);
goto done;
}
- if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(buzzz_t))) == NULL) {
- printf("%s: Buzzz memory allocation failure\n", __FUNCTION__);
+ if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
+ printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__);
goto done;
}
DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz));
if (sh->buzzz != 0U) { /* Fetch and display dongle BUZZZ Trace */
+
dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz,
- (uint8 *)buzzz_p, sizeof(buzzz_t));
+ (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
+
+ printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
+ "count<%u> status<%u> wrap<%u>\n"
+ "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
+ (int)sh->buzzz,
+ (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
+ buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
+ buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
+ buzzz_p->buffer_sz, buzzz_p->log_sz);
+
if (buzzz_p->count == 0) {
printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
goto done;
}
- if (buzzz_p->counters != 3) { /* 3 counters for CR4 */
- printf("%s: Counters<%u> mismatch\n", __FUNCTION__, buzzz_p->counters);
- goto done;
- }
+
/* Allocate memory for trace buffer and format strings */
buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
if (buffer_p == NULL) {
printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
goto done;
}
- /* Fetch the trace and format strings */
+
+ /* Fetch the trace. format strings are exported via bcm_buzzz.h */
dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */
(uint8 *)buffer_p, buzzz_p->buffer_sz);
+
/* Process and display the trace using formatted output */
- printf("%s: <#cycle> <#instruction> <#ctr3> <event information>\n", __FUNCTION__);
+
+ {
+ int ctr;
+ for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
+ printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
+ }
+ printf("<code execution point>\n");
+ }
+
dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
- printf("%s: ----- End of dongle BUZZZ Trace -----\n\n", __FUNCTION__);
+
+ printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__);
+
MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
}
done:
if (page_p) MFREE(bus->dhd->osh, page_p, 4096);
- if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(buzzz_t));
+ if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
return BCME_OK;
}
-static int
-pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
- bool slave_bypass)
-{
- uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
- uint32 reg32;
-
- pcie2_mdiosetblock(bus, physmedia);
-
- /* enable mdio access to SERDES */
- mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
- mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
-
- if (slave_bypass)
- mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
-
- if (!write)
- mdio_ctrl |= MDIOCTL2_READ;
-
- si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
-
- if (write) {
- reg32 = PCIE2_MDIO_WR_DATA;
- si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
- *val | MDIODATA2_DONE);
- }
- else
- reg32 = PCIE2_MDIO_RD_DATA;
-
- /* retry till the transaction is complete */
- while (i < pcie_serdes_spinwait) {
- uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
- if (!(done_val & MDIODATA2_DONE)) {
- if (!write) {
- *val = si_corereg(bus->sih, bus->sih->buscoreidx,
- PCIE2_MDIO_RD_DATA, 0, 0);
- *val = *val & MDIODATA2_MASK;
- }
- return 0;
- }
- OSL_DELAY(1000);
- i++;
- }
- return -1;
-}
-
int
dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
{
dhd_bus_t *bus = dhdp->bus;
int bcmerror = 0;
+ unsigned long flags;
#ifdef CONFIG_ARCH_MSM
int retry = POWERUP_MAX_RETRY;
#endif /* CONFIG_ARCH_MSM */
if (flag == TRUE) { /* Turn off WLAN */
/* Removing Power */
DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
+
bus->dhd->up = FALSE;
+
if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ dhdpcie_advertise_bus_cleanup(bus->dhd);
if (bus->intr) {
dhdpcie_bus_intr_disable(bus);
dhdpcie_free_irq(bus);
#endif /* BCMPCIE_OOB_HOST_WAKE */
dhd_os_wd_timer(dhdp, 0);
dhd_bus_stop(bus, TRUE);
- dhd_prot_clear(dhdp);
+ dhd_prot_reset(dhdp);
dhd_clear(dhdp);
dhd_bus_release_dongle(bus);
dhdpcie_bus_free_resource(bus);
goto done;
}
#endif /* CONFIG_ARCH_MSM */
+ DHD_GENERAL_LOCK(bus->dhd, flags);
bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
} else {
if (bus->intr) {
- dhdpcie_bus_intr_disable(bus);
dhdpcie_free_irq(bus);
}
#ifdef BCMPCIE_OOB_HOST_WAKE
dhd_bus_oob_intr_set(bus->dhd, FALSE);
dhd_bus_oob_intr_unregister(bus->dhd);
#endif /* BCMPCIE_OOB_HOST_WAKE */
- dhd_prot_clear(dhdp);
+ dhd_prot_reset(dhdp);
dhd_clear(dhdp);
dhd_bus_release_dongle(bus);
dhdpcie_bus_free_resource(bus);
DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
__FUNCTION__));
break;
- }
- else
+ } else {
OSL_SLEEP(10);
+ }
}
if (bcmerror && !retry) {
goto done;
}
#endif /* CONFIG_ARCH_MSM */
+ bus->is_linkdown = 0;
+ bus->pci_d3hot_done = 0;
bcmerror = dhdpcie_bus_enable_device(bus);
if (bcmerror) {
DHD_ERROR(("%s: host configuration restore failed: %d\n",
}
}
}
+
done:
- if (bcmerror)
+ if (bcmerror) {
+ DHD_GENERAL_LOCK(bus->dhd, flags);
bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ }
return bcmerror;
}
+static int
+pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
+ bool slave_bypass)
+{
+ uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
+ uint32 reg32;
+
+ pcie2_mdiosetblock(bus, physmedia);
+
+ /* enable mdio access to SERDES */
+ mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
+ mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
+
+ if (slave_bypass)
+ mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
+
+ if (!write)
+ mdio_ctrl |= MDIOCTL2_READ;
+
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
+
+ if (write) {
+ reg32 = PCIE2_MDIO_WR_DATA;
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
+ *val | MDIODATA2_DONE);
+ } else
+ reg32 = PCIE2_MDIO_RD_DATA;
+
+ /* retry till the transaction is complete */
+ while (i < pcie_serdes_spinwait) {
+ uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
+ if (!(done_val & MDIODATA2_DONE)) {
+ if (!write) {
+ *val = si_corereg(bus->sih, bus->sih->buscoreidx,
+ PCIE2_MDIO_RD_DATA, 0, 0);
+ *val = *val & MDIODATA2_MASK;
+ }
+ return 0;
+ }
+ OSL_DELAY(1000);
+ i++;
+ }
+ return -1;
+}
+
static int
dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
void *params, int plen, void *arg, int len, int val_size)
int_val);
int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
OFFSETOF(sbpcieregs_t, configdata), 0, 0);
- bcopy(&int_val, arg, val_size);
+ bcopy(&int_val, arg, sizeof(int_val));
break;
+ case IOV_SVAL(IOV_PCIECOREREG):
+ si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
+ break;
case IOV_GVAL(IOV_BAR0_SECWIN_REG):
{
- uint32 cur_base, base;
- uchar *bar0;
- volatile uint32 *offset;
- /* set the bar0 secondary window to this */
- /* write the register value */
- cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
- base = int_val & 0xFFFFF000;
- dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), base);
- bar0 = (uchar *)bus->regs;
- offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
- int_val = *offset;
- bcopy(&int_val, arg, val_size);
- dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
- }
+ sdreg_t sdreg;
+ uint32 addr, size;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = sdreg.offset;
+ size = sdreg.func;
+
+ if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ bcopy(&int_val, arg, sizeof(int32));
break;
+ }
+
case IOV_SVAL(IOV_BAR0_SECWIN_REG):
{
- uint32 cur_base, base;
- uchar *bar0;
- volatile uint32 *offset;
- /* set the bar0 secondary window to this */
- /* write the register value */
- cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
- base = int_val & 0xFFFFF000;
- dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), base);
- bar0 = (uchar *)bus->regs;
- offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
- *offset = int_val2;
- bcopy(&int_val2, arg, val_size);
- dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
- }
- break;
+ sdreg_t sdreg;
+ uint32 addr, size;
- case IOV_SVAL(IOV_PCIECOREREG):
- si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = sdreg.offset;
+ size = sdreg.func;
+ if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ bcmerror = BCME_ERROR;
+ }
break;
+ }
+
case IOV_GVAL(IOV_SBREG):
{
sdreg_t sdreg;
- uint32 addr, coreidx;
+ uint32 addr, size;
bcopy(params, &sdreg, sizeof(sdreg));
- addr = sdreg.offset;
- coreidx = (addr & 0xF000) >> 12;
+ addr = sdreg.offset | SI_ENUM_BASE;
+ size = sdreg.func;
- int_val = si_corereg(bus->sih, coreidx, (addr & 0xFFF), 0, 0);
+ if (si_backplane_access(bus->sih, addr, size, &int_val, TRUE) != BCME_OK) {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ bcmerror = BCME_ERROR;
+ break;
+ }
bcopy(&int_val, arg, sizeof(int32));
break;
}
case IOV_SVAL(IOV_SBREG):
{
sdreg_t sdreg;
- uint32 addr, coreidx;
+ uint32 addr, size;
bcopy(params, &sdreg, sizeof(sdreg));
- addr = sdreg.offset;
- coreidx = (addr & 0xF000) >> 12;
-
- si_corereg(bus->sih, coreidx, (addr & 0xFFF), ~0, sdreg.value);
-
+ addr = sdreg.offset | SI_ENUM_BASE;
+ size = sdreg.func;
+ if (si_backplane_access(bus->sih, addr, size, &sdreg.value, FALSE) != BCME_OK) {
+ DHD_ERROR(("Invalid size/addr combination \n"));
+ bcmerror = BCME_ERROR;
+ }
break;
}
bcmerror = BCME_ERROR;
break;
}
+
if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) {
bcopy(&val, arg, sizeof(int32));
- }
- else {
+ } else {
DHD_ERROR(("%s: pcie2_mdioop failed.\n", __FUNCTION__));
bcmerror = BCME_ERROR;
}
break;
}
+
case IOV_SVAL(IOV_PCIESERDESREG):
if (!PCIE_GEN2(bus->sih)) {
DHD_ERROR(("%s: supported only in pcie gen2\n", __FUNCTION__));
break;
case IOV_GVAL(IOV_PCIECOREREG):
int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
- bcopy(&int_val, arg, val_size);
+ bcopy(&int_val, arg, sizeof(int_val));
break;
case IOV_SVAL(IOV_PCIECFGREG):
case IOV_GVAL(IOV_PCIECFGREG):
int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
- bcopy(&int_val, arg, val_size);
+ bcopy(&int_val, arg, sizeof(int_val));
break;
case IOV_SVAL(IOV_PCIE_LPBK):
(set ? "write" : "read"), size, address, dsize));
/* check if CR4 */
- if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
+ si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
/* if address is 0, store the reset instruction to be written in 0 */
if (set && address == bus->dongle_ram_base) {
bus->resetinstr = *(((uint32*)params) + 2);
}
#ifdef BCM_BUZZZ
+ /* Dump dongle side buzzz trace to console */
case IOV_GVAL(IOV_BUZZZ_DUMP):
bcmerror = dhd_buzzz_dump_dngl(bus);
break;
d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0;
h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0;
int_val = d2h_support | (h2d_support << 1);
- bcopy(&int_val, arg, val_size);
+ bcopy(&int_val, arg, sizeof(int_val));
break;
}
case IOV_SVAL(IOV_DMA_RINGINDICES):
}
break;
+ case IOV_GVAL(IOV_METADATA_DBG):
+ int_val = dhd_prot_metadata_dbg_get(bus->dhd);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_METADATA_DBG):
+ dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
+ break;
+
case IOV_GVAL(IOV_RX_METADATALEN):
int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
bcopy(&int_val, arg, val_size);
break;
- case IOV_SVAL(IOV_RX_METADATALEN):
+ case IOV_SVAL(IOV_RX_METADATALEN):
if (int_val > 64) {
bcmerror = BCME_BUFTOOLONG;
break;
dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
break;
+ case IOV_SVAL(IOV_DEVRESET):
+ dhd_bus_devreset(bus->dhd, (uint8)bool_val);
+ break;
+
case IOV_GVAL(IOV_FLOW_PRIO_MAP):
int_val = bus->dhd->flow_prio_map_type;
bcopy(&int_val, arg, val_size);
bcopy(&int_val, arg, val_size);
break;
+#ifdef DHD_PCIE_RUNTIMEPM
+ case IOV_GVAL(IOV_IDLETIME):
+ int_val = bus->idletime;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLETIME):
+ if (int_val < 0) {
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->idletime = int_val;
+ }
+ break;
+#endif /* DHD_PCIE_RUNTIMEPM */
+
case IOV_GVAL(IOV_TXBOUND):
int_val = (int32)dhd_txbound;
bcopy(&int_val, arg, val_size);
dhd_rxbound = (uint)int_val;
break;
+ case IOV_SVAL(IOV_HANGREPORT):
+ bus->dhd->hang_report = bool_val;
+ DHD_ERROR(("%s: Set hang_report as %d\n",
+ __FUNCTION__, bus->dhd->hang_report));
+ break;
+
+ case IOV_GVAL(IOV_HANGREPORT):
+ int_val = (int32)bus->dhd->hang_report;
+ bcopy(&int_val, arg, val_size);
+ break;
+
default:
bcmerror = BCME_UNSUPPORTED;
break;
exit:
return bcmerror;
-}
+} /* dhdpcie_bus_doiovar */
-/* Transfers bytes from host to dongle using pio mode */
+/** Transfers bytes from host to dongle using pio mode */
static int
dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len)
{
return 0;
}
-void
-dhd_bus_set_suspend_resume(dhd_pub_t *dhdp, bool state)
-{
- struct dhd_bus *bus = dhdp->bus;
- if (bus) {
- dhdpcie_bus_suspend(bus, state);
- }
-}
-
int
dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
{
-
int timeleft;
- bool pending;
+ unsigned long flags;
int rc = 0;
if (bus->dhd == NULL) {
DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
return BCME_ERROR;
}
+ DHD_GENERAL_LOCK(bus->dhd, flags);
if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) {
DHD_ERROR(("%s: not in a readystate to LPBK is not inited\n", __FUNCTION__));
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
return BCME_ERROR;
}
- if (bus->dhd->dongle_reset)
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ if (bus->dhd->dongle_reset) {
+ DHD_ERROR(("Dongle is in reset state.\n"));
return -EIO;
+ }
- if (bus->suspended == state) /* Set to same state */
+ if (bus->suspended == state) { /* Set to same state */
+ DHD_ERROR(("Bus is already in SUSPEND state.\n"));
return BCME_OK;
+ }
if (state) {
+ int idle_retry = 0;
+ int active;
+
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down, state=%d\n",
+ __FUNCTION__, state));
+ return BCME_ERROR;
+ }
+
+ /* Suspend */
+ DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
bus->wait_for_d3_ack = 0;
bus->suspended = TRUE;
+
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ /* stop all interface network queue. */
+ dhd_bus_stop_queue(bus);
bus->dhd->busstate = DHD_BUS_SUSPEND;
+ if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX) {
+ DHD_ERROR(("Tx Request is not ended\n"));
+ bus->dhd->busstate = DHD_BUS_DATA;
+ /* resume all interface network queue. */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ bus->suspended = FALSE;
+ return -EBUSY;
+ }
+
+ bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SUSPEND;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
- dhd_os_set_ioctl_resp_timeout(DEFAULT_IOCTL_RESP_TIMEOUT);
+ dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT);
dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
- timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->wait_for_d3_ack, &pending);
+ timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+
+ {
+ uint32 d2h_mb_data = 0;
+ uint32 zero = 0;
+
+ /* If wait_for_d3_ack was not updated because D2H MB was not received */
+ if (bus->wait_for_d3_ack == 0) {
+ /* Read the Mb data to see if the Dongle has actually sent D3 ACK */
+ dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
+
+ if (d2h_mb_data & D2H_DEV_D3_ACK) {
+ DHD_ERROR(("*** D3 WAR for missing interrupt ***\r\n"));
+ /* Clear the MB Data */
+ dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32),
+ D2H_MB_DATA, 0);
+
+ /* Consider that D3 ACK is received */
+ bus->wait_for_d3_ack = 1;
+ bus->d3_ack_war_cnt++;
+
+ } /* d2h_mb_data & D2H_DEV_D3_ACK */
+ } /* bus->wait_for_d3_ack was 0 */
+ }
+
+ /* To allow threads that got pre-empted to complete.
+ */
+ while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
+ (idle_retry < MAX_WKLK_IDLE_CHECK)) {
+ msleep(1);
+ idle_retry++;
+ }
+
if (bus->wait_for_d3_ack) {
+ DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
/* Got D3 Ack. Suspend the bus */
- if (dhd_os_check_wakelock_all(bus->dhd)) {
- DHD_ERROR(("%s: Suspend failed because of wakelock\n", __FUNCTION__));
- bus->dev->current_state = PCI_D3hot;
- pci_set_master(bus->dev);
- rc = pci_set_power_state(bus->dev, PCI_D0);
- if (rc) {
- DHD_ERROR(("%s: pci_set_power_state failed:"
- " current_state[%d], ret[%d]\n",
- __FUNCTION__, bus->dev->current_state, rc));
- }
+ if (active) {
+ DHD_ERROR(("%s():Suspend failed because of wakelock restoring "
+ "Dongle to D0\n", __FUNCTION__));
+
+ /*
+ * Dongle still thinks that it has to be in D3 state
+ * until gets a D0 Inform, but we are backing off from suspend.
+ * Ensure that Dongle is brought back to D0.
+ *
+ * Bringing back Dongle from D3 Ack state to D0 state
+ * is a 2 step process. Dongle would want to know that D0 Inform
+ * would be sent as a MB interrupt
+ * to bring it out of D3 Ack state to D0 state.
+ * So we have to send both this message.
+ */
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+ dhdpcie_send_mb_data(bus,
+ (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+
bus->suspended = FALSE;
+ DHD_GENERAL_LOCK(bus->dhd, flags);
bus->dhd->busstate = DHD_BUS_DATA;
+ /* resume all interface network queue. */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
rc = BCME_ERROR;
} else {
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+ dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
dhdpcie_bus_intr_disable(bus);
rc = dhdpcie_pci_suspend_resume(bus, state);
+ dhd_bus_set_device_wake(bus, FALSE);
}
+ bus->dhd->d3ackcnt_timeout = 0;
+#if defined(BCMPCIE_OOB_HOST_WAKE)
+ dhdpcie_oob_intr_set(bus, TRUE);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
} else if (timeleft == 0) {
- DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
- bus->dev->current_state = PCI_D3hot;
- pci_set_master(bus->dev);
- rc = pci_set_power_state(bus->dev, PCI_D0);
- if (rc) {
- DHD_ERROR(("%s: pci_set_power_state failed:"
- " current_state[%d], ret[%d]\n",
- __FUNCTION__, bus->dev->current_state, rc));
+ bus->dhd->d3ackcnt_timeout++;
+ DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n",
+ __FUNCTION__, bus->dhd->d3ackcnt_timeout));
+ dhd_prot_debug_info_print(bus->dhd);
+#ifdef DHD_FW_COREDUMP
+ if (bus->dhd->memdump_enabled) {
+ /* write core dump to file */
+ bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
+ dhdpcie_mem_dump(bus);
}
+#endif /* DHD_FW_COREDUMP */
bus->suspended = FALSE;
+ DHD_GENERAL_LOCK(bus->dhd, flags);
bus->dhd->busstate = DHD_BUS_DATA;
+ /* resume all interface network queue. */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ if (bus->dhd->d3ackcnt_timeout >= MAX_CNTL_D3ACK_TIMEOUT) {
+ DHD_ERROR(("%s: Event HANG send up "
+ "due to PCIe linkdown\n", __FUNCTION__));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
+ }
rc = -ETIMEDOUT;
+
}
+
bus->wait_for_d3_ack = 1;
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SUSPEND;
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
} else {
/* Resume */
-#ifdef BCMPCIE_OOB_HOST_WAKE
+#if defined(BCMPCIE_OOB_HOST_WAKE)
DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
#endif /* BCMPCIE_OOB_HOST_WAKE */
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_RESUME;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
rc = dhdpcie_pci_suspend_resume(bus, state);
+ if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+ dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+ dhd_bus_set_device_wake(bus, TRUE);
+ }
bus->suspended = FALSE;
+ DHD_GENERAL_LOCK(bus->dhd, flags);
bus->dhd->busstate = DHD_BUS_DATA;
+ bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_RESUME;
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) {
+ bus->bus_wake = 1;
+ OSL_SMP_WMB();
+ wake_up_interruptible(&bus->rpm_queue);
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+ /* resume all interface network queue. */
+ dhd_bus_start_queue(bus);
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
dhdpcie_bus_intr_enable(bus);
}
return rc;
}
-/* Transfers bytes from host to dongle and to host again using DMA */
+/** Transfers bytes from host to dongle and to host again using DMA */
static int
dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay)
{
int bcmerror = 0;
uint32 *cr4_regs;
- if (!bus->sih)
+ if (!bus->sih) {
+ DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
return BCME_ERROR;
+ }
/* To enter download state, disable ARM and reset SOCRAM.
* To exit download state, simply reset ARM (default is RAM boot).
*/
cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
- !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
- if (cr4_regs == NULL) { /* no CR4 present on chip */
+ if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
+ /* Halt ARM & remove reset */
+ si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+ if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ si_core_reset(bus->sih, 0, 0);
+ /* reset last 4 bytes of RAM address. to be used for shared area */
+ dhdpcie_init_shared_addr(bus);
+ } else if (cr4_regs == NULL) { /* no CR4 present on chip */
si_core_disable(bus->sih, 0);
if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
si_core_reset(bus->sih, 0, 0);
-
/* Clear the top bit of memory */
if (bus->ramsize) {
uint32 zeros = 0;
*/
/* Halt ARM & remove reset */
si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
- if (bus->sih->chip == BCM43602_CHIP_ID) {
+ if (BCM43602_CHIP(bus->sih->chip)) {
W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
dhdpcie_init_shared_addr(bus);
}
} else {
- if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
+ /* write vars */
+ if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
+ DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+ goto fail;
+ }
+ /* switch back to arm core again */
+ if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ /* write address 0 with reset instruction */
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
+ (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+ /* now remove reset and halt and continue to run CA7 */
+ } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
-
/* Enable remap before ARM reset but after vars.
* No backplane access in remap mode
*/
-
if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
!si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
goto fail;
}
} else {
- if (bus->sih->chip == BCM43602_CHIP_ID) {
+ if (BCM43602_CHIP(bus->sih->chip)) {
/* Firmware crashes on SOCSRAM access when core is in reset */
if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
goto fail;
}
-
/* switch back to arm core again */
if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
goto fail;
}
- /* write address 0 with reset instruction */
- bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
- (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
-
+ /* write address 0 with reset instruction */
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
+ (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+
+ if (bcmerror == BCME_OK) {
+ uint32 tmp;
+
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
+ (uint8 *)&tmp, sizeof(tmp));
+
+ if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
+ DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
+ __FUNCTION__, bus->resetinstr));
+ DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
+ __FUNCTION__, tmp));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ }
+
/* now remove reset and halt and continue to run CR4 */
}
si_setcore(bus->sih, PCIE2_CORE_ID, 0);
return bcmerror;
-}
+} /* dhdpcie_bus_download_state */
static int
dhdpcie_bus_write_vars(dhd_bus_t *bus)
bzero(vbuffer, varsize);
bcopy(bus->vars, vbuffer, bus->varsz);
/* Write the vars list */
+ DHD_INFO_HW4(("%s: tcm: %p varaddr: 0x%x varsize: %d\n",
+ __FUNCTION__, bus->tcm, varaddr, varsize));
bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
/* Implement read back and verify later */
DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
/* Write the length token to the last word */
+ DHD_INFO_HW4(("%s: tcm: %p phys_size: 0x%x varsizew: %x\n",
+ __FUNCTION__, bus->tcm, phys_size, varsizew));
bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
(uint8*)&varsizew, 4);
return bcmerror;
-}
+} /* dhdpcie_bus_write_vars */
int
dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
/* Copy the passed variables, which should include the terminating double-null */
bcopy(arg, bus->vars, bus->varsz);
+
+
err:
return bcmerror;
}
OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
}
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-/* Add bus dump output to a buffer */
+bool
+dhdpcie_pme_cap(osl_t *osh)
+{
+ uint8 cap_ptr;
+ uint32 pme_cap;
+
+ cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
+
+ if (!cap_ptr) {
+ DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
+
+ DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
+
+ return ((pme_cap & PME_CAP_PM_STATES) != 0);
+}
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
+
+void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+ uint32 intstatus = 0;
+ uint32 intmask = 0;
+ uint32 mbintstatus = 0;
+ uint32 d2h_mb_data = 0;
+
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
+ mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
+ dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
+
+ bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
+ intstatus, intmask, mbintstatus);
+ bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
+ d2h_mb_data, dhd->bus->def_intmask);
+}
+
+/** Add bus dump output to a buffer */
void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
{
uint16 flowid;
+ int ix = 0;
flow_ring_node_t *flow_ring_node;
+ flow_info_t *flow_info;
+ char eabuf[ETHER_ADDR_STR_LEN];
+
+ if (dhdp->busstate != DHD_BUS_DATA)
+ return;
dhd_prot_print_info(dhdp, strbuf);
+ dhd_dump_intr_registers(dhdp, strbuf);
+ bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
+ dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
+ bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
+ bcm_bprintf(strbuf,
+ "%s %4s %2s %4s %17s %4s %4s %10s %4s %4s ",
+ "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen",
+ "Overflows", "RD", "WR");
+ bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack");
+
for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
if (flow_ring_node->active) {
- bcm_bprintf(strbuf, "Flow:%d IF %d Prio %d Qlen %d ",
- flow_ring_node->flowid, flow_ring_node->flow_info.ifindex,
- flow_ring_node->flow_info.tid, flow_ring_node->queue.len);
- dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf);
+ flow_info = &flow_ring_node->flow_info;
+ bcm_bprintf(strbuf,
+ "%3d. %4d %2d %4d %17s %4d %4d %10u ", ix++,
+ flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
+ bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf),
+ DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
+ DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
+ DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
+ dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
+ "%4d %4d ");
+ bcm_bprintf(strbuf,
+ "%5s %6s %5s\n", "NA", "NA", "NA");
}
}
+ bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
+ bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
+ bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
+ bcm_bprintf(strbuf, "D3 Ack WAR cnt %d\n", dhdp->bus->d3_ack_war_cnt);
}
+/**
+ * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
+ * flow queue to their flow ring.
+ */
static void
dhd_update_txflowrings(dhd_pub_t *dhd)
{
+ unsigned long flags;
dll_t *item, *next;
flow_ring_node_t *flow_ring_node;
struct dhd_bus *bus = dhd->bus;
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
for (item = dll_head_p(&bus->const_flowring);
- !dll_end(&bus->const_flowring, item); item = next) {
- next = dll_next_p(item);
+ (!dhd_is_device_removed(dhd) && !dll_end(&bus->const_flowring, item));
+ item = next) {
+ if (dhd->hang_was_sent) {
+ break;
+ }
+ next = dll_next_p(item);
flow_ring_node = dhd_constlist_to_flowring(item);
+
+ /* Ensure that flow_ring_node in the list is Not Null */
+ ASSERT(flow_ring_node != NULL);
+
+ /* Ensure that the flowring node has valid contents */
+ ASSERT(flow_ring_node->prot_info != NULL);
+
dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
}
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
}
-
-/* Mailbox ringbell Function */
+/** Mailbox ringbell Function */
static void
dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
{
return;
}
if (bus->db1_for_mb) {
- /* this is a pcie core register, not the config regsiter */
+ /* this is a pcie core register, not the config register */
DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
- }
- else {
+ } else {
DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
}
}
-/* doorbell ring Function */
+static void
+dhd_bus_set_device_wake(struct dhd_bus *bus, bool val)
+{
+ if (bus->device_wake_state != val)
+ {
+ DHD_INFO(("Set Device_Wake to %d\n", val));
+#ifdef PCIE_OOB
+ if (bus->oob_enabled)
+ {
+ if (val)
+ {
+ gpio_port = gpio_port | (1 << DEVICE_WAKE);
+ gpio_write_port_non_block(gpio_handle_val, gpio_port);
+ } else {
+ gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE));
+ gpio_write_port_non_block(gpio_handle_val, gpio_port);
+ }
+ }
+#endif /* PCIE_OOB */
+ bus->device_wake_state = val;
+ }
+}
+
+#ifdef PCIE_OOB
+void
+dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val)
+{
+ DHD_INFO(("Set Device_Wake to %d\n", val));
+ if (val)
+ {
+ gpio_port = gpio_port | (1 << BIT_BT_REG_ON);
+ gpio_write_port(gpio_handle_val, gpio_port);
+ } else {
+ gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON));
+ gpio_write_port(gpio_handle_val, gpio_port);
+ }
+}
+
+int
+dhd_oob_get_bt_reg_on(struct dhd_bus *bus)
+{
+ int ret;
+ uint8 val;
+ ret = gpio_read_port(gpio_handle_val, &val);
+
+ if (ret < 0) {
+ DHD_ERROR(("gpio_read_port returns %d\n", ret));
+ return ret;
+ }
+
+ if (val & (1 << BIT_BT_REG_ON))
+ {
+ ret = 1;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void
+dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus)
+{
+ if (dhd_doorbell_timeout)
+ dhd_timeout_start(&bus->doorbell_timer,
+ (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms);
+ else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND))
+ dhd_bus_set_device_wake(bus, FALSE);
+}
+#endif /* PCIE_OOB */
+
+/** mailbox doorbell ring function */
void
dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
{
}
}
-static void
-dhd_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
+void
+dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
{
+#ifdef PCIE_OOB
+ dhd_bus_set_device_wake(bus, TRUE);
+ dhd_bus_doorbell_timeout_reset(bus);
+#endif
W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
}
PCIH2D_MailBox);
if (bus->pcie_mb_intr_addr) {
bus->pcie_mb_intr_osh = si_osh(bus->sih);
- return dhd_bus_ringbell_fast;
+ return dhdpcie_bus_ringbell_fast;
}
}
return dhd_bus_ringbell;
bool BCMFASTPATH
dhd_bus_dpc(struct dhd_bus *bus)
{
- uint32 intstatus = 0;
- uint32 newstatus = 0;
bool resched = FALSE; /* Flag indicating resched wanted */
+ unsigned long flags;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
+ * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
+ * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
+ * and if we return from here, then IOCTL response will never be handled
+ */
if (bus->dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
bus->intstatus = 0;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
return 0;
}
+ bus->dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
- intstatus = bus->intstatus;
-
- if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
- (bus->sih->buscorerev == 2)) {
- newstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
- dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, newstatus);
- /* Merge new bits with previous */
- intstatus |= newstatus;
- bus->intstatus = 0;
- if (intstatus & I_MB) {
- resched = dhdpcie_bus_process_mailbox_intr(bus, intstatus);
- }
- } else {
- /* this is a PCIE core register..not a config register... */
- newstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
- intstatus |= (newstatus & bus->def_intmask);
- si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, newstatus, newstatus);
- if (intstatus & bus->def_intmask) {
- resched = dhdpcie_bus_process_mailbox_intr(bus, intstatus);
- intstatus &= ~bus->def_intmask;
- }
- }
-
+ resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
if (!resched) {
- // terence 20150420: no need to enable interrupt if busstate is down
- if (bus->dhd->busstate) {
+ bus->intstatus = 0;
+ if (!bus->pci_d3hot_done) {
dhdpcie_bus_intr_enable(bus);
+ } else {
+ DHD_ERROR(("%s: dhdpcie_bus_intr_enable skip in pci D3hot state \n",
+ __FUNCTION__));
}
}
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC;
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
return resched;
}
{
uint32 cur_h2d_mb_data = 0;
- dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
+ DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
+
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return;
+ }
+
+ dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
if (cur_h2d_mb_data != 0) {
uint32 i = 0;
DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data));
while ((i++ < 100) && cur_h2d_mb_data) {
OSL_DELAY(10);
- dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
+ dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
+ }
+ if (i >= 100) {
+ DHD_ERROR(("%s : waited 1ms for the dngl "
+ "to ack the previous mb transaction\n", __FUNCTION__));
+ DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
+ __FUNCTION__, cur_h2d_mb_data));
}
- if (i >= 100)
- DHD_ERROR(("%s: waited 1ms for the dngl to ack the previous mb transaction\n", __FUNCTION__));
}
- dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), HTOD_MB_DATA, 0);
+ dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
dhd_bus_gen_devmb_intr(bus);
- if (h2d_mb_data == H2D_HOST_D3_INFORM)
+ if (h2d_mb_data == H2D_HOST_D3_INFORM) {
DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
+ bus->d3_inform_cnt++;
+ }
+ if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
+ DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
+ bus->d0_inform_in_use_cnt++;
+ }
+ if (h2d_mb_data == H2D_HOST_D0_INFORM) {
+ DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
+ bus->d0_inform_cnt++;
+ }
}
static void
{
uint32 d2h_mb_data = 0;
uint32 zero = 0;
- dhd_bus_cmn_readshared(bus, &d2h_mb_data, DTOH_MB_DATA, 0);
- if (!d2h_mb_data)
+ dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
+ if (!d2h_mb_data) {
+ DHD_INFO_HW4(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
+ __FUNCTION__, d2h_mb_data));
return;
+ }
- dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), DTOH_MB_DATA, 0);
+ dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
- DHD_INFO(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
+ DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%08x\n", __FUNCTION__, d2h_mb_data));
+ if (d2h_mb_data & D2H_DEV_FWHALT) {
+ DHD_ERROR(("FW trap has happened\n"));
+ dhdpcie_checkdied(bus, NULL, 0);
+ /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ return;
+ }
if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
/* what should we do */
DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
}
if (d2h_mb_data & D2H_DEV_D3_ACK) {
/* what should we do */
- DHD_INFO_HW4(("%s D2H_MB_DATA: Received D3 ACK\n", __FUNCTION__));
+ DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__));
if (!bus->wait_for_d3_ack) {
bus->wait_for_d3_ack = 1;
- dhd_os_ioctl_resp_wake(bus->dhd);
+ dhd_os_d3ack_wake(bus->dhd);
}
}
- if (d2h_mb_data & D2H_DEV_FWHALT) {
- DHD_INFO(("%s: FW trap has happened\n", __FUNCTION__));
-#ifdef DHD_DEBUG
- dhdpcie_checkdied(bus, NULL, 0);
-#endif
- bus->dhd->busstate = DHD_BUS_DOWN;
- }
+}
+
+/* Inform Dongle to print HW Registers for Livelock Debug */
+void dhdpcie_bus_dongle_print_hwregs(struct dhd_bus *bus)
+{
+ dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
}
static bool
} else if (intstatus & I_BIT0) {
/* do nothing for Now */
}
- }
- else {
+ } else {
if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
dhdpcie_handle_mb_data(bus);
resched = dhdpci_bus_read_frames(bus);
}
}
+
exit:
return resched;
}
-/* Decode dongle to host message stream */
static bool
dhdpci_bus_read_frames(dhd_bus_t *bus)
{
bool more = FALSE;
/* There may be frames in both ctrl buf and data buf; check ctrl buf first */
- DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */
+ DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
dhd_prot_process_ctrlbuf(bus->dhd);
/* Unlock to give chance for resp to be handled */
- DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */
+ DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
- DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */
+ DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
/* update the flow ring cpls */
dhd_update_txflowrings(bus->dhd);
* processing RX frames without RX bound
*/
more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
- DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */
+
+ /* don't talk to the dongle if fw is about to be reloaded */
+ if (bus->dhd->hang_was_sent) {
+ more = FALSE;
+ }
+ DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
return more;
}
+bool
+dhdpcie_tcm_valid(dhd_bus_t *bus)
+{
+ uint32 addr = 0;
+ int rv;
+ uint32 shaddr = 0;
+ pciedev_shared_t sh;
+
+ shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+
+ /* Read last word in memory to determine address of pciedev_shared structure */
+ addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
+
+ if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
+ (addr > shaddr)) {
+ DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
+ __FUNCTION__, addr));
+ return FALSE;
+ }
+
+ /* Read hndrte_shared structure */
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
+ sizeof(pciedev_shared_t))) < 0) {
+ DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
+ return FALSE;
+ }
+
+ /* Compare any field in pciedev_shared_t */
+ if (sh.console_addr != bus->pcie_sh->console_addr) {
+ DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static bool
+dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
+{
+ DHD_INFO(("firmware api revision %d, host api revision %d\n",
+ firmware_api_version, host_api_version));
+ if (firmware_api_version <= host_api_version)
+ return TRUE;
+ if ((firmware_api_version == 6) && (host_api_version == 5))
+ return TRUE;
+ if ((firmware_api_version == 5) && (host_api_version == 6))
+ return TRUE;
+ return FALSE;
+}
+
static int
dhdpcie_readshared(dhd_bus_t *bus)
{
uint32 addr = 0;
- int rv, w_init, r_init;
+ int rv, dma_indx_wr_buf, dma_indx_rd_buf;
uint32 shaddr = 0;
pciedev_shared_t *sh = bus->pcie_sh;
dhd_timeout_t tmo;
shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+
+ DHD_INFO_HW4(("%s: ram_base: 0x%x ramsize 0x%x tcm: %p shaddr: 0x%x nvram_csm: 0x%x\n",
+ __FUNCTION__, bus->dongle_ram_base, bus->ramsize,
+ bus->tcm, shaddr, bus->nvram_csm));
/* start a timer for 5 seconds */
dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
- /* Read last word in memory to determine address of sdpcm_shared structure */
+ /* Read last word in memory to determine address of pciedev_shared structure */
addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
}
return BCME_ERROR;
} else {
bus->shared_addr = (ulong)addr;
- DHD_ERROR(("%s: PCIe shared addr read took %u usec "
- "before dongle is ready\n", __FUNCTION__, tmo.elapsed));
+ DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec "
+ "before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed));
}
/* Read hndrte_shared structure */
if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
sizeof(pciedev_shared_t))) < 0) {
- DHD_ERROR(("%s: Failed to read PCIe shared struct,"
- "size read %d < %d\n", __FUNCTION__, rv, (int)sizeof(pciedev_shared_t)));
+ DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv));
return rv;
}
sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
- /* load bus console address */
#ifdef DHD_DEBUG
+ /* load bus console address */
bus->console_addr = sh->console_addr;
#endif
DHD_ERROR(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
- if ((sh->flags & PCIE_SHARED_VERSION_MASK) > PCIE_SHARED_VERSION) {
+ if (!(dhdpcie_check_firmware_compatible(sh->flags & PCIE_SHARED_VERSION_MASK,
+ PCIE_SHARED_VERSION)))
+ {
DHD_ERROR(("%s: pcie_shared version %d in dhd "
"is older than pciedev_shared version %d in dongle\n",
__FUNCTION__, PCIE_SHARED_VERSION,
sh->flags & PCIE_SHARED_VERSION_MASK));
return BCME_ERROR;
}
- if ((sh->flags & PCIE_SHARED_VERSION_MASK) >= 4) {
- if (sh->flags & PCIE_SHARED_TXPUSH_SPRT) {
-#ifdef DHDTCPACK_SUPPRESS
- /* Do not use tcpack suppress as packets don't stay in queue */
- dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
-#endif
- bus->txmode_push = TRUE;
- } else
- bus->txmode_push = FALSE;
- }
- DHD_ERROR(("%s: bus->txmode_push is set to %d\n", __FUNCTION__, bus->txmode_push));
+
+ bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
+ sizeof(uint16) : sizeof(uint32);
+ DHD_ERROR(("%s: Dongle advertizes %d size indices\n",
+ __FUNCTION__, bus->rw_index_sz));
/* Does the FW support DMA'ing r/w indices */
if (sh->flags & PCIE_SHARED_DMA_INDEX) {
+
DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
__FUNCTION__,
(DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0),
* The max_sub_queues is read from FW initialized ring_info
*/
if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
- w_init = dhd_prot_init_index_dma_block(bus->dhd,
- HOST_TO_DNGL_DMA_WRITEINDX_BUFFER,
- bus->max_sub_queues);
- r_init = dhd_prot_init_index_dma_block(bus->dhd,
- DNGL_TO_HOST_DMA_READINDX_BUFFER,
- BCMPCIE_D2H_COMMON_MSGRINGS);
-
- if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
+ dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+ H2D_DMA_INDX_WR_BUF, bus->max_sub_queues);
+ dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+ D2H_DMA_INDX_RD_BUF, BCMPCIE_D2H_COMMON_MSGRINGS);
+
+ if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
- "Host will use w/r indices in TCM\n",
- __FUNCTION__));
+ "Host will use w/r indices in TCM\n",
+ __FUNCTION__));
bus->dhd->dma_h2d_ring_upd_support = FALSE;
}
}
if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) {
- w_init = dhd_prot_init_index_dma_block(bus->dhd,
- DNGL_TO_HOST_DMA_WRITEINDX_BUFFER,
- BCMPCIE_D2H_COMMON_MSGRINGS);
- r_init = dhd_prot_init_index_dma_block(bus->dhd,
- HOST_TO_DNGL_DMA_READINDX_BUFFER,
- bus->max_sub_queues);
-
- if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
+ dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+ D2H_DMA_INDX_WR_BUF, BCMPCIE_D2H_COMMON_MSGRINGS);
+ dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
+ H2D_DMA_INDX_RD_BUF, bus->max_sub_queues);
+
+ if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
- "Host will use w/r indices in TCM\n",
- __FUNCTION__));
+ "Host will use w/r indices in TCM\n",
+ __FUNCTION__));
bus->dhd->dma_d2h_ring_upd_support = FALSE;
}
}
bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
DHD_INFO(("%s: ring_info\n", __FUNCTION__));
- DHD_ERROR(("%s: max H2D queues %d\n", __FUNCTION__, ltoh16(ring_info.max_sub_queues)));
+ DHD_ERROR(("%s: max H2D queues %d\n",
+ __FUNCTION__, ltoh16(ring_info.max_sub_queues)));
- DHD_INFO(("%s: mail box address\n", __FUNCTION__));
- DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n", __FUNCTION__, bus->h2d_mb_data_ptr_addr));
- DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n", __FUNCTION__, bus->d2h_mb_data_ptr_addr));
+ DHD_INFO(("mail box address\n"));
+ DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
+ __FUNCTION__, bus->h2d_mb_data_ptr_addr));
+ DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
+ __FUNCTION__, bus->d2h_mb_data_ptr_addr));
}
bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
- DHD_INFO(("%s: d2h_sync_mode 0x%08x\n", __FUNCTION__, bus->dhd->d2h_sync_mode));
+ DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
+ __FUNCTION__, bus->dhd->d2h_sync_mode));
return BCME_OK;
-}
-/* Read ring mem and ring state ptr info from shared are in TCM */
+} /* dhdpcie_readshared */
+
+/** Read ring mem and ring state ptr info from shared memory area in device memory */
static void
dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
{
D2H_MSGRING_CONTROL_COMPLETE 2
D2H_MSGRING_TX_COMPLETE 3
D2H_MSGRING_RX_COMPLETE 4
- TX_FLOW_RING 5
*/
{
DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
i, bus->ring_sh[i].ring_mem_addr));
}
-
- /* Tx flow Ring */
- if (bus->txmode_push) {
- bus->ring_sh[i].ring_mem_addr = tcm_memloc;
- DHD_INFO(("%s: TX ring ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
- i, bus->ring_sh[i].ring_mem_addr));
- }
}
/* Ring state mem ptr info */
d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
+
/* Store h2d common ring write/read pointers */
for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
/* update mem block */
- h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
- h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
+ h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
+ h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
}
+
/* Store d2h common ring write/read pointers */
for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
/* update mem block */
- d2h_w_idx_ptr = d2h_w_idx_ptr + sizeof(uint32);
- d2h_r_idx_ptr = d2h_r_idx_ptr + sizeof(uint32);
+ d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
+ d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
}
/* Store txflow ring write/read pointers */
- if (bus->txmode_push) {
+ for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS);
+ i++, j++)
+ {
bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
- DHD_INFO(("%s: txflow : idx %d write %x read %x \n", __FUNCTION__, i,
- bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
- } else {
- for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS);
- i++, j++)
- {
- bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
- bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
-
- /* update mem block */
- h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
- h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
+ /* update mem block */
+ h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
+ h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
- DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
- __FUNCTION__, i,
- bus->ring_sh[i].ring_state_w,
- bus->ring_sh[i].ring_state_r));
- }
+ DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
+ __FUNCTION__, i,
+ bus->ring_sh[i].ring_state_w,
+ bus->ring_sh[i].ring_state_r));
}
}
-}
+} /* dhd_fillup_ring_sharedptr_info */
-/* Initialize bus module: prepare for communication w/dongle */
+/**
+ * Initialize bus module: prepare for communication with the dongle. Called after downloading
+ * firmware into the dongle.
+ */
int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
{
dhd_bus_t *bus = dhdp->bus;
return ret;
}
-
/* Make sure we're talking to the core. */
bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
ASSERT(bus->reg != NULL);
/* Set bus state according to enable result */
dhdp->busstate = DHD_BUS_DATA;
+ if (!dhd_download_fw_on_driverload)
+ dhd_dpc_enable(bus->dhd);
+
/* Enable the interrupt after device is up */
dhdpcie_bus_intr_enable(bus);
/* bcmsdh_intr_unmask(bus->sdh); */
- return ret;
+#ifdef DHD_PCIE_RUNTIMEPM
+ bus->idlecount = 0;
+ bus->idletime = (int32)MAX_IDLE_COUNT;
+ init_waitqueue_head(&bus->rpm_queue);
+ mutex_init(&bus->pm_lock);
+#endif /* DHD_PCIE_RUNTIMEPM */
-}
+ bus->d3_ack_war_cnt = 0;
+ return ret;
+}
static void
dhdpcie_init_shared_addr(dhd_bus_t *bus)
uint32 addr = 0;
uint32 val = 0;
addr = bus->dongle_ram_base + bus->ramsize - 4;
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+ DHD_INFO_HW4(("%s: tcm: %p, addr: 0x%x val: 0x%x\n", __FUNCTION__, bus->tcm, addr, val));
dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
}
}
if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
- (device == BCM4350_D11AC5G_ID) || BCM4350_CHIP(device))
+ (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
+ (device == BCM43569_CHIP_ID))
return 0;
if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
return 0;
if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
- (device == BCM4345_D11AC5G_ID) || (device == BCM4345_CHIP_ID))
+ (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device))
return 0;
if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
return 0;
if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
- (device == BCM4358_D11AC5G_ID) || (device == BCM4358_CHIP_ID))
+ (device == BCM4358_D11AC5G_ID))
return 0;
if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
(device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
return 0;
+
if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
(device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
return 0;
+
if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
- (device == BCM4359_D11AC5G_ID) || (device == BCM4359_CHIP_ID))
+ (device == BCM4359_D11AC5G_ID))
return 0;
-
- DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
- return (-ENODEV);
-}
+ if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
+ (device == BCM43596_D11AC5G_ID))
+ return 0;
-/*
+ if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
+ (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID))
+ return 0;
-Name: dhdpcie_cc_nvmshadow
+ if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
+ (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID))
+ return 0;
-Description:
-A shadow of OTP/SPROM exists in ChipCommon Region
-betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
-Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
-can also be read from ChipCommon Registers.
-*/
+ DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
+ return (-ENODEV);
+} /* dhdpcie_chipmatch */
+/**
+ * Name: dhdpcie_cc_nvmshadow
+ *
+ * Description:
+ * A shadow of OTP/SPROM exists in ChipCommon Region
+ * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
+ * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
+ * can also be read from ChipCommon Registers.
+ */
static int
dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
{
uint chipc_corerev;
chipcregs_t *chipcregs;
-
/* Save the current core */
cur_coreid = si_coreid(bus->sih);
/* Switch to ChipC */
chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
+ ASSERT(chipcregs != NULL);
+
chipc_corerev = si_corerev(bus->sih);
/* Check ChipcommonCore Rev */
}
/* Check ChipID */
- if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) &&
- ((uint16)bus->sih->chip != BCM4345_CHIP_ID)) {
+ if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip)) {
DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n",
__FUNCTION__));
return BCME_UNSUPPORTED;
/* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
/* dump_size in 16bit words */
dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
- }
- else {
+ } else {
DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
__FUNCTION__));
return BCME_NOTFOUND;
si_setcore(bus->sih, cur_coreid, 0);
return BCME_OK;
-}
-
-
-uint8 BCMFASTPATH
-dhd_bus_is_txmode_push(dhd_bus_t *bus)
-{
- return bus->txmode_push;
-}
+} /* dhdpcie_cc_nvmshadow */
+/** Flow rings are dynamically created and destroyed */
void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
{
void *pkt;
while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
PKTFREE(bus->dhd->osh, pkt, TRUE);
}
- ASSERT(flow_queue_empty(queue));
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
flow_ring_node->active = FALSE;
- dll_delete(&flow_ring_node->list);
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- /* Call Flow ring clean up */
- dhd_prot_clean_flow_ring(bus->dhd, flow_ring_node->prot_info);
- dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
- flow_ring_node->flowid);
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+ dll_delete(&flow_ring_node->list);
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+ /* Release the flowring object back into the pool */
+ dhd_prot_flowrings_pool_release(bus->dhd,
+ flow_ring_node->flowid, flow_ring_node->prot_info);
+ /* Free the flowid back to the flowid allocator */
+ dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
+ flow_ring_node->flowid);
}
-/*
+/**
* Allocate a Flow ring buffer,
- * Init Ring buffer,
- * Send Msg to device about flow ring creation
+ * Init Ring buffer, send Msg to device about flow ring creation
*/
int
dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
return BCME_OK;
}
+/** Handle response from dongle on a 'flow ring create' request */
void
dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
{
flow_ring_node->status = FLOW_RING_STATUS_OPEN;
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- dhd_bus_schedule_queue(bus, flowid, FALSE);
+ /* Now add the Flow ring node into the active list
+ * Note that this code to add the newly created node to the active
+ * list was living in dhd_flowid_lookup. But note that after
+ * adding the node to the active list the contents of node is being
+ * filled in dhd_prot_flow_ring_create.
+ * If there is a D2H interrupt after the node gets added to the
+ * active list and before the node gets populated with values
+ * from the Bottom half dhd_update_txflowrings would be called.
+ * which will then try to walk through the active flow ring list,
+ * pickup the nodes and operate on them. Now note that since
+ * the function dhd_prot_flow_ring_create is not finished yet
+ * the contents of flow_ring_node can still be NULL leading to
+ * crashes. Hence the flow_ring_node should be added to the
+ * active list only after its truely created, which is after
+ * receiving the create response message from the Host.
+ */
+
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+ dll_prepend(&bus->const_flowring, &flow_ring_node->list);
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+ dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
return;
}
flow_ring_node = (flow_ring_node_t *)arg;
DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
- if (flow_ring_node->status & FLOW_RING_STATUS_DELETE_PENDING) {
+ if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- DHD_ERROR(("%s :Delete Pending\n", __FUNCTION__));
+ DHD_ERROR(("%s :Delete Pending Flow %d\n",
+ __FUNCTION__, flow_ring_node->flowid));
return BCME_ERROR;
}
flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
PKTFREE(bus->dhd->osh, pkt, TRUE);
}
- ASSERT(flow_queue_empty(queue));
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
}
+/** This function is not called. Obsolete ? */
int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
{
void *pkt;
*/
dhd_tcpack_info_tbl_clean(bus->dhd);
#endif /* DHDTCPACK_SUPPRESS */
+
/* Flush all pending packets in the queue, if any */
while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
PKTFREE(bus->dhd->osh, pkt, TRUE);
}
- ASSERT(flow_queue_empty(queue));
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
}
uint32
-dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush)
+dhd_bus_max_h2d_queues(struct dhd_bus *bus)
{
- if (bus->txmode_push)
- *txpush = 1;
- else
- *txpush = 0;
return bus->max_sub_queues;
}
+/* To be symmetric with SDIO */
+void
+dhd_bus_pktq_flush(dhd_pub_t *dhdp)
+{
+ return;
+}
+
+void
+dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
+{
+ dhdp->bus->is_linkdown = val;
+}
+
int
dhdpcie_bus_clock_start(struct dhd_bus *bus)
{
dhd_bus_release_dongle(struct dhd_bus *bus)
{
bool dongle_isolation;
- osl_t *osh;
+ osl_t *osh;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
}
#ifdef BCMPCIE_OOB_HOST_WAKE
-int dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
+int
+dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
{
return dhdpcie_oob_intr_register(dhdp->bus);
}
-void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
+void
+dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
{
dhdpcie_oob_intr_unregister(dhdp->bus);
}
-void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
+void
+dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
{
dhdpcie_oob_intr_set(dhdp->bus, enable);
}
/*
* Linux DHD Bus Module for PCIE
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_pcie.h 506084 2014-10-02 15:34:59Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_pcie.h 607608 2015-12-21 13:14:19Z $
*/
#include <hnd_cons.h>
#ifdef SUPPORT_LINKDOWN_RECOVERY
#ifdef CONFIG_ARCH_MSM
-#ifdef CONFIG_ARCH_MSM8994
+#ifdef CONFIG_PCI_MSM
#include <linux/msm_pcie.h>
#else
#include <mach/msm_pcie.h>
-#endif
+#endif /* CONFIG_PCI_MSM */
#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#ifdef CONFIG_SOC_EXYNOS8890
+#include <linux/exynos-pci-noti.h>
+extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg);
+extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg);
+#endif /* CONFIG_SOC_EXYNOS8890 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
#endif /* SUPPORT_LINKDOWN_RECOVERY */
+#ifdef DHD_PCIE_RUNTIMEPM
+#include <linux/mutex.h>
+#include <linux/wait.h>
+
+#define DEFAULT_DHD_RUNTIME_MS 100
+#ifndef CUSTOM_DHD_RUNTIME_MS
+#define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS
+#endif /* CUSTOM_DHD_RUNTIME_MS */
+
+
+#ifndef MAX_IDLE_COUNT
+#define MAX_IDLE_COUNT 16
+#endif /* MAX_IDLE_COUNT */
+
+#ifndef MAX_RESUME_WAIT
+#define MAX_RESUME_WAIT 100
+#endif /* MAX_RESUME_WAIT */
+#endif /* DHD_PCIE_RUNTIMEPM */
+
/* defines */
#define PCMSGBUF_HDRLEN 0
#define REMAP_ENAB(bus) ((bus)->remap)
#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
-#define MAX_DHD_TX_FLOWS 256
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+#define struct_pcie_notify struct msm_pcie_notify
+#define struct_pcie_register_event struct msm_pcie_register_event
+#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#ifdef CONFIG_SOC_EXYNOS8890
+#define struct_pcie_notify struct exynos_pcie_notify
+#define struct_pcie_register_event struct exynos_pcie_register_event
+#endif /* CONFIG_SOC_EXYNOS8890 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+/*
+ * Router with 4366 can have 128 stations and 16 BSS,
+ * hence (128 stations x 4 access categories for ucast) + 16 bc/mc flowrings
+ */
+#define MAX_DHD_TX_FLOWS 320
/* user defined data structures */
-#ifdef DHD_DEBUG
/* Device console log buffer state */
#define CONSOLE_LINE_MAX 192
-#define CONSOLE_BUFFER_MAX 2024
+#define CONSOLE_BUFFER_MAX (8 * 1024)
+#ifndef MAX_CNTL_D3ACK_TIMEOUT
+#define MAX_CNTL_D3ACK_TIMEOUT 2
+#endif /* MAX_CNTL_D3ACK_TIMEOUT */
+
+#ifdef DHD_DEBUG
typedef struct dhd_console {
uint count; /* Poll interval msec counter */
uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
char *fw_path; /* module_param: path to firmware image */
char *nv_path; /* module_param: path to nvram vars file */
- char *nvram_params; /* user specified nvram params. */
- int nvram_params_len;
-
- struct pktq txq; /* Queue length used for flow-control */
+#ifdef CACHE_FW_IMAGES
+ int processed_nvram_params_len; /* Modified len of NVRAM info */
+#endif
- uint rxlen; /* Length of valid data in buffer */
+ struct pktq txq; /* Queue length used for flow-control */
bool intr; /* Use interrupts */
bool ipend; /* Device interrupt is pending */
ulong shared_addr;
pciedev_shared_t *pcie_sh;
bool bus_flowctrl;
- ioctl_comp_resp_msg_t ioct_resp;
uint32 dma_rxoffset;
volatile char *regs; /* pci device memory va */
volatile char *tcm; /* pci device memory va */
- uint32 tcm_size;
-#ifdef CONFIG_ARCH_MSM8994
- uint32 bar1_win_base;
- uint32 bar1_win_mask;
-#endif
osl_t *osh;
uint32 nvram_csm; /* Nvram checksum */
uint16 pollrate;
uint32 def_intmask;
bool ltrsleep_on_unload;
uint wait_for_d3_ack;
- uint8 txmode_push;
uint32 max_sub_queues;
+ uint32 rw_index_sz;
bool db1_for_mb;
bool suspended;
+
+ dhd_timeout_t doorbell_timer;
+ bool device_wake_state;
+ bool irq_registered;
+#ifdef PCIE_OOB
+ bool oob_enabled;
+#endif /* PCIE_OOB */
#ifdef SUPPORT_LINKDOWN_RECOVERY
+#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
+ defined(CONFIG_SOC_EXYNOS8890))
#ifdef CONFIG_ARCH_MSM
- struct msm_pcie_register_event pcie_event;
- bool islinkdown;
+ uint8 no_cfg_restore;
#endif /* CONFIG_ARCH_MSM */
+ struct_pcie_register_event pcie_event;
+#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && CONFIG_SOC_EXYNOS8890) */
#endif /* SUPPORT_LINKDOWN_RECOVERY */
-#ifdef PCIE_TX_DEFERRAL
- struct workqueue_struct *tx_wq;
- struct work_struct create_flow_work;
- struct work_struct delete_flow_work;
- unsigned long *delete_flow_map;
- struct sk_buff_head orphan_list;
-#endif /* PCIE_TX_DEFERRAL */
- bool irq_registered;
+#ifdef DHD_PCIE_RUNTIMEPM
+ int32 idlecount; /* Activity timeout counter */
+ int32 idletime; /* Control for activity timeout */
+ int32 bus_wake; /* For wake up the bus */
+ bool runtime_resume_done; /* For check runtime suspend end */
+ struct mutex pm_lock; /* Synchronize for system PM & runtime PM */
+ wait_queue_head_t rpm_queue; /* wait-queue for bus wake up */
+#endif /* DHD_PCIE_RUNTIMEPM */
+ uint32 d3_inform_cnt;
+ uint32 d0_inform_cnt;
+ uint32 d0_inform_in_use_cnt;
+ uint8 force_suspend;
+ uint32 d3_ack_war_cnt;
+ uint8 is_linkdown;
+ uint32 pci_d3hot_done;
} dhd_bus_t;
/* function declarations */
extern void dhdpcie_bus_unregister(void);
extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
-extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh, volatile char* regs,
- volatile char* tcm, uint32 tcm_size);
+extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh,
+ volatile char *regs, volatile char *tcm, void *pci_dev);
extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
+extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus);
extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus);
-extern void dhdpcie_bus_remove_prep(struct dhd_bus *bus);
extern void dhdpcie_bus_release(struct dhd_bus *bus);
extern int32 dhdpcie_bus_isr(struct dhd_bus *bus);
extern void dhdpcie_free_irq(dhd_bus_t *bus);
+extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value);
extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state);
-extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state);
+extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state);
+extern bool dhdpcie_tcm_valid(dhd_bus_t *bus);
+extern void dhdpcie_bus_dongle_print_hwregs(struct dhd_bus *bus);
#ifndef BCMPCIE_OOB_HOST_WAKE
extern void dhdpcie_pme_active(osl_t *osh, bool enable);
#endif /* !BCMPCIE_OOB_HOST_WAKE */
+extern bool dhdpcie_pme_cap(osl_t *osh);
extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus);
extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus);
extern int dhdpcie_disable_device(dhd_bus_t *bus);
extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus);
extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable);
#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef PCIE_OOB
+extern void dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val);
+extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus);
+#endif /* PCIE_OOB */
+
+#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
+#if defined(CONFIG_MACH_UNIVERSAL5433)
+#define SAMSUNG_PCIE_DEVICE_ID 0xa5e3
+#define SAMSUNG_PCIE_CH_NUM
+#elif defined(CONFIG_MACH_UNIVERSAL7420)
+#define SAMSUNG_PCIE_DEVICE_ID 0xa575
+#define SAMSUNG_PCIE_CH_NUM 1
+#elif defined(CONFIG_SOC_EXYNOS8890)
+#define SAMSUNG_PCIE_DEVICE_ID 0xa544
+#define SAMSUNG_PCIE_CH_NUM 0
+#else
+#error "Not supported platform"
+#endif
+#ifdef CONFIG_MACH_UNIVERSAL5433
+extern int exynos_pcie_pm_suspend(void);
+extern int exynos_pcie_pm_resume(void);
+#else
+extern int exynos_pcie_pm_suspend(int ch_num);
+extern int exynos_pcie_pm_resume(int ch_num);
+#endif /* CONFIG_MACH_UNIVERSAL5433 */
+#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus);
#endif /* dhd_pcie_h */
/*
* Linux DHD Bus Module for PCIE
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_pcie_linux.c 506043 2014-10-02 12:29:45Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_pcie_linux.c 610267 2016-01-06 16:03:53Z $
*/
#include <dhd_pcie.h>
#include <dhd_linux.h>
#ifdef CONFIG_ARCH_MSM
-#ifdef CONFIG_ARCH_MSM8994
+#ifdef CONFIG_PCI_MSM
#include <linux/msm_pcie.h>
#else
#include <mach/msm_pcie.h>
-#endif
+#endif /* CONFIG_PCI_MSM */
#endif /* CONFIG_ARCH_MSM */
-#define PCI_CFG_RETRY 10
+#define PCI_CFG_RETRY 10
#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
uint16 last_intrstatus; /* to cache intrstatus */
int irq;
char pciname[32];
+ struct pci_saved_state* default_state;
struct pci_saved_state* state;
#ifdef BCMPCIE_OOB_HOST_WAKE
void *os_cxt; /* Pointer to per-OS private data */
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
#endif
-static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state);
-static int dhdpcie_pci_resume(struct pci_dev *dev);
+static int dhdpcie_resume_host_dev(dhd_bus_t *bus);
+static int dhdpcie_suspend_host_dev(dhd_bus_t *bus);
static int dhdpcie_resume_dev(struct pci_dev *dev);
static int dhdpcie_suspend_dev(struct pci_dev *dev);
+#ifdef DHD_PCIE_RUNTIMEPM
+static int dhdpcie_pm_suspend(struct device *dev);
+static int dhdpcie_pm_prepare(struct device *dev);
+static int dhdpcie_pm_resume(struct device *dev);
+static void dhdpcie_pm_complete(struct device *dev);
+#else
+static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
+static int dhdpcie_pci_resume(struct pci_dev *dev);
+#endif /* DHD_PCIE_RUNTIMEPM */
static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
{ vendor: 0x14e4,
device: PCI_ANY_ID,
};
MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
+/* Power Management Hooks */
+#ifdef DHD_PCIE_RUNTIMEPM
+static const struct dev_pm_ops dhd_pcie_pm_ops = {
+ .prepare = dhdpcie_pm_prepare,
+ .suspend = dhdpcie_pm_suspend,
+ .resume = dhdpcie_pm_resume,
+ .complete = dhdpcie_pm_complete,
+};
+#endif /* DHD_PCIE_RUNTIMEPM */
+
static struct pci_driver dhdpcie_driver = {
node: {},
name: "pcieh",
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
save_state: NULL,
#endif
+#ifdef DHD_PCIE_RUNTIMEPM
+ .driver.pm = &dhd_pcie_pm_ops,
+#else
suspend: dhdpcie_pci_suspend,
resume: dhdpcie_pci_resume,
+#endif /* DHD_PCIE_RUNTIMEPM */
};
int dhdpcie_init_succeeded = FALSE;
-static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state)
+#ifdef DHD_PCIE_RUNTIMEPM
+static int dhdpcie_pm_suspend(struct device *dev)
{
- int ret = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ return dhdpcie_set_suspend_resume(pdev, TRUE);
+}
+
+static int dhdpcie_pm_prepare(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
dhdpcie_info_t *pch = pci_get_drvdata(pdev);
dhd_bus_t *bus = NULL;
if (pch) {
bus = pch->bus;
+ DHD_DISABLE_RUNTIME_PM(bus->dhd);
}
- /* When firmware is not loaded do the PCI bus */
- /* suspend/resume only */
- if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) &&
-#ifdef CONFIG_MACH_UNIVERSAL5433
- /* RB:34285 check_rev() : return 1 - new rev., 0 - old rev. */
- (!check_rev() || (check_rev() && !bus->dhd->dongle_reset)))
-#else
- !bus->dhd->dongle_reset)
-#endif /* CONFIG_MACH_UNIVERSAL5433 */
- {
- ret = dhdpcie_pci_suspend_resume(bus, state);
- return ret;
- }
+ return 0;
+}
- if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)||
- (bus->dhd->busstate == DHD_BUS_DATA)) &&
- (bus->suspended != state)) {
+static int dhdpcie_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ return dhdpcie_set_suspend_resume(pdev, FALSE);
+}
- ret = dhdpcie_bus_suspend(bus, state);
+static void dhdpcie_pm_complete(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+
+ if (pch) {
+ bus = pch->bus;
+ DHD_ENABLE_RUNTIME_PM(bus->dhd);
}
- return ret;
-}
+ return;
+}
+#else
static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
{
BCM_REFERENCE(state);
return dhdpcie_set_suspend_resume(pdev, FALSE);
}
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state)
+{
+ int ret = 0;
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+
+ if (pch) {
+ bus = pch->bus;
+ }
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (bus && !bus->dhd->dongle_reset) {
+ /* if wakelock is held during suspend, return failed */
+ if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) {
+ return -EBUSY;
+ }
+
+ mutex_lock(&bus->pm_lock);
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ /* When firmware is not loaded do the PCI bus */
+ /* suspend/resume only */
+ if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) &&
+ !bus->dhd->dongle_reset) {
+ ret = dhdpcie_pci_suspend_resume(bus, state);
+#ifdef DHD_PCIE_RUNTIMEPM
+ mutex_unlock(&bus->pm_lock);
+#endif /* DHD_PCIE_RUNTIMEPM */
+ return ret;
+ }
+
+ if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)||
+ (bus->dhd->busstate == DHD_BUS_DATA)) &&
+ (bus->suspended != state)) {
+ ret = dhdpcie_bus_suspend(bus, state);
+ }
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (bus && !bus->dhd->dongle_reset) {
+ mutex_unlock(&bus->pm_lock);
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+ return ret;
+}
+
static int dhdpcie_suspend_dev(struct pci_dev *dev)
{
int ret;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ dhdpcie_info_t *pch = pci_get_drvdata(dev);
+ dhd_bus_t *bus = pch->bus;
+
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ bus->pci_d3hot_done = 1;
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
pci_save_state(dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ pch->state = pci_store_saved_state(dev);
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
pci_enable_wake(dev, PCI_D0, TRUE);
- pci_disable_device(dev);
+ if (pci_is_enabled(dev)) {
+ pci_disable_device(dev);
+ }
ret = pci_set_power_state(dev, PCI_D3hot);
if (ret) {
DHD_ERROR(("%s: pci_set_power_state error %d\n",
__FUNCTION__, ret));
}
+ disable_irq(dev->irq);
return ret;
}
static int dhdpcie_resume_dev(struct pci_dev *dev)
{
int err = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ dhdpcie_info_t *pch = pci_get_drvdata(dev);
+ dhd_bus_t *bus = pch->bus;
+ pci_load_and_free_saved_state(dev, &pch->state);
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ bus->pci_d3hot_done = 0;
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
pci_restore_state(dev);
err = pci_enable_device(dev);
if (err) {
printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
- return err;
+ goto out;
}
pci_set_master(dev);
err = pci_set_power_state(dev, PCI_D0);
if (err) {
printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
- return err;
+ goto out;
}
+
+out:
+ enable_irq(dev->irq);
return err;
}
-int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state)
+static int dhdpcie_resume_host_dev(dhd_bus_t *bus)
+{
+ int bcmerror = 0;
+#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
+ bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM);
+#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
+#ifdef CONFIG_ARCH_MSM
+ bcmerror = dhdpcie_start_host_pcieclock(bus);
+#endif /* CONFIG_ARCH_MSM */
+ if (bcmerror < 0) {
+ DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n",
+ __FUNCTION__, bcmerror));
+ bus->is_linkdown = 1;
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+ }
+
+ return bcmerror;
+}
+
+static int dhdpcie_suspend_host_dev(dhd_bus_t *bus)
+{
+ int bcmerror = 0;
+#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
+ struct pci_dev *rc_pci_dev;
+ rc_pci_dev = pci_get_device(0x144d, SAMSUNG_PCIE_DEVICE_ID, NULL);
+ if (rc_pci_dev) {
+ pci_save_state(rc_pci_dev);
+ }
+ exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM);
+#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
+#ifdef CONFIG_ARCH_MSM
+ bcmerror = dhdpcie_stop_host_pcieclock(bus);
+#endif /* CONFIG_ARCH_MSM */
+ return bcmerror;
+}
+
+int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
{
int rc;
+
struct pci_dev *dev = bus->dev;
if (state) {
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
#ifndef BCMPCIE_OOB_HOST_WAKE
dhdpcie_pme_active(bus->osh, state);
-#endif /* BCMPCIE_OOB_HOST_WAKE */
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
rc = dhdpcie_suspend_dev(dev);
+ if (!rc) {
+ dhdpcie_suspend_host_dev(bus);
+ }
} else {
+ dhdpcie_resume_host_dev(bus);
rc = dhdpcie_resume_dev(dev);
-#ifndef BCMPCIE_OOB_HOST_WAKE
+#ifndef BCMPCIE_OOB_HOST_WAKE
dhdpcie_pme_active(bus->osh, state);
-#endif /* BCMPCIE_OOB_HOST_WAKE */
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ if (bus->is_linkdown) {
+ bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
+ dhd_os_send_hang_message(bus->dhd);
+ }
+#endif
}
return rc;
}
dhdpcie_detach(dhdpcie_info_t *pch)
{
if (pch) {
- osl_t *osh = pch->osh;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
- if (!dhd_download_fw_on_driverload)
- pci_load_and_free_saved_state(pch->dev, &pch->state);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
- MFREE(osh, pch, sizeof(dhdpcie_info_t));
+ if (!dhd_download_fw_on_driverload) {
+ pci_load_and_free_saved_state(pch->dev, &pch->default_state);
+ }
+#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+ MFREE(pch->osh, pch, sizeof(dhdpcie_info_t));
}
return 0;
}
osl_t *osh = NULL;
dhdpcie_info_t *pch = NULL;
dhd_bus_t *bus = NULL;
-#ifdef PCIE_TX_DEFERRAL
- struct sk_buff *skb;
-#endif
DHD_TRACE(("%s Enter\n", __FUNCTION__));
bus = pch->bus;
osh = pch->osh;
-#ifdef PCIE_TX_DEFERRAL
- if (bus->tx_wq)
- destroy_workqueue(bus->tx_wq);
- skb = skb_dequeue(&bus->orphan_list);
- while (skb) {
- PKTCFREE(osh, skb, TRUE);
- skb = skb_dequeue(&bus->orphan_list);
- }
-#endif
-
#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (bus) {
#ifdef CONFIG_ARCH_MSM
- if (bus)
msm_pcie_deregister_event(&bus->pcie_event);
#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#ifdef CONFIG_SOC_EXYNOS8890
+ exynos_pcie_deregister_event(&bus->pcie_event);
+#endif /* CONFIG_SOC_EXYNOS8890 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+ }
#endif /* SUPPORT_LINKDOWN_RECOVERY */
-
- dhdpcie_bus_remove_prep(bus);
dhdpcie_bus_release(bus);
pci_disable_device(pdev);
#ifdef BCMPCIE_OOB_HOST_WAKE
dhd_bus_t *bus = dhdpcie_info->bus;
struct pci_dev *pdev = dhdpcie_info->bus->dev;
- snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
- "dhdpcie:%s", pci_name(pdev));
- if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
- dhdpcie_info->pciname, bus) < 0) {
+ if (!bus->irq_registered) {
+ snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
+ "dhdpcie:%s", pci_name(pdev));
+ if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
+ dhdpcie_info->pciname, bus) < 0) {
DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
return -1;
+ } else {
+ bus->irq_registered = TRUE;
+ }
+ } else {
+ DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__));
}
- bus->irq_registered = TRUE;
DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
}
dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
- dhdpcie_info->tcm_size =
- (bar1_size < DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
- dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE);
+ dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE;
if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
* in case of built in driver
*/
pci_save_state(pdev);
- dhdpcie_info->state = pci_store_saved_state(pdev);
+ dhdpcie_info->default_state = pci_store_saved_state(pdev);
- if (dhdpcie_info->state == NULL) {
+ if (dhdpcie_info->default_state == NULL) {
DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
__FUNCTION__));
REG_UNMAP(dhdpcie_info->regs);
}
#ifdef SUPPORT_LINKDOWN_RECOVERY
-#ifdef CONFIG_ARCH_MSM
-void dhdpcie_linkdown_cb(struct msm_pcie_notify *noti)
+#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
+ defined(CONFIG_SOC_EXYNOS8890))
+void dhdpcie_linkdown_cb(struct_pcie_notify *noti)
{
struct pci_dev *pdev = (struct pci_dev *)noti->user;
dhdpcie_info_t *pch = NULL;
DHD_ERROR(("%s: Event HANG send up "
"due to PCIe linkdown\n",
__FUNCTION__));
- bus->islinkdown = TRUE;
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+ bus->is_linkdown = 1;
DHD_OS_WAKE_LOCK(dhd);
- dhd_os_check_hang(dhd, 0, -ETIMEDOUT);
+ dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+ dhd_os_send_hang_message(dhd);
}
}
}
}
}
-#endif /* CONFIG_ARCH_MSM */
-#endif /* SUPPORT_LINKDOWN_RECOVERY */
-
-#ifdef PCIE_TX_DEFERRAL
-static void dhd_pcie_create_flow_worker(struct work_struct *worker)
-{
- dhd_bus_t *bus;
- struct sk_buff *skb;
- uint16 ifidx, flowid;
- flow_queue_t *queue;
- flow_ring_node_t *flow_ring_node;
- unsigned long flags;
-
- bus = container_of(worker, dhd_bus_t, create_flow_work);
- skb = skb_dequeue(&bus->orphan_list);
- while (skb) {
- ifidx = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(skb));
- if (BCME_OK != dhd_flowid_update(bus->dhd, ifidx,
- bus->dhd->flow_prio_map[(PKTPRIO(skb))], skb)) {
- PKTCFREE(bus->dhd->osh, skb, TRUE);
- skb = skb_dequeue(&bus->orphan_list);
- continue;
- }
- flowid = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(skb));
- flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
- queue = &flow_ring_node->queue;
- DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
- if ((flowid >= bus->dhd->num_flow_rings) ||
- (!flow_ring_node->active) ||
- (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- DHD_ERROR(("%s: Dropping pkt flowid %d, status %d active %d\n",
- __FUNCTION__, flowid, flow_ring_node->status,
- flow_ring_node->active));
- PKTCFREE(bus->dhd->osh, skb, TRUE);
- skb = skb_dequeue(&bus->orphan_list);
- continue;
- }
- if (BCME_OK != dhd_flow_queue_enqueue(bus->dhd, queue, skb)) {
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- PKTCFREE(bus->dhd->osh, skb, TRUE);
- skb = skb_dequeue(&bus->orphan_list);
- continue;
- }
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
-
- if (flow_ring_node->status == FLOW_RING_STATUS_OPEN)
- dhd_bus_schedule_queue(bus, flowid, FALSE);
-
- skb = skb_dequeue(&bus->orphan_list);
- }
-}
-
-static void dhd_pcie_delete_flow_worker(struct work_struct *worker)
-{
- dhd_bus_t *bus;
- uint16 flowid;
-
- bus = container_of(worker, dhd_bus_t, delete_flow_work);
- for_each_set_bit(flowid, bus->delete_flow_map, bus->dhd->num_flow_rings) {
- clear_bit(flowid, bus->delete_flow_map);
- dhd_bus_flow_ring_delete_response(bus, flowid, BCME_OK);
- }
-}
-
-#endif /* PCIE_TX_DEFERRAL */
+#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY && CONFIG_SOC_EXYNOS8890) */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
#if defined(MULTIPLE_SUPPLICANT)
extern void wl_android_post_init(void); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
osl_static_mem_init(osh, adapter);
+ /* Set ACP coherence flag */
+ if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT())
+ osl_flag_set(osh, OSL_ACP_COHERENCE);
+
/* allocate linux spcific pcie structure here */
if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
}
/* Bus initialization */
- bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs,
- dhdpcie_info->tcm, dhdpcie_info->tcm_size);
+ bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm, pdev);
if (!bus) {
DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
break;
}
dhdpcie_info->bus = bus;
- dhdpcie_info->bus->dev = pdev;
-
+ bus->is_linkdown = 0;
+ bus->pci_d3hot_done = 0;
+#ifdef DONGLE_ENABLE_ISOLATION
+ bus->dhd->dongle_isolation = TRUE;
+#endif /* DONGLE_ENABLE_ISOLATION */
#ifdef SUPPORT_LINKDOWN_RECOVERY
#ifdef CONFIG_ARCH_MSM
bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN;
bus->pcie_event.callback = dhdpcie_linkdown_cb;
bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
msm_pcie_register_event(&bus->pcie_event);
- bus->islinkdown = FALSE;
+ bus->no_cfg_restore = 0;
#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#ifdef CONFIG_SOC_EXYNOS8890
+ bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN;
+ bus->pcie_event.user = pdev;
+ bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK;
+ bus->pcie_event.callback = dhdpcie_linkdown_cb;
+ exynos_pcie_register_event(&bus->pcie_event);
+#endif /* CONFIG_SOC_EXYNOS8890 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
#endif /* SUPPORT_LINKDOWN_RECOVERY */
if (bus->intr) {
"due to polling mode\n", __FUNCTION__));
}
-#if 0 // terence 20150325: fix for WPA/WPA2 4-way handshake fail in hostapd
- if (dhd_download_fw_on_driverload) {
- if (dhd_bus_start(bus->dhd)) {
- DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__));
- break;
- }
+#if defined(BCM_REQUEST_FW)
+ if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) {
+ DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__));
}
-#endif
+ bus->nv_path = NULL;
+ bus->fw_path = NULL;
+#endif /* BCM_REQUEST_FW */
/* set private data for pci_dev */
pci_set_drvdata(pdev, dhdpcie_info);
-#ifdef PCIE_TX_DEFERRAL
- bus->tx_wq = create_singlethread_workqueue("bcmdhd_tx");
- if (bus->tx_wq == NULL) {
- DHD_ERROR(("%s workqueue creation failed\n", __FUNCTION__));
- break;
+ if (dhd_download_fw_on_driverload) {
+ if (dhd_bus_start(bus->dhd)) {
+ DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__));
+ if (!allow_delay_fwdl)
+ break;
+ }
+ } else {
+ /* Set ramdom MAC address during boot time */
+ get_random_bytes(&bus->dhd->mac.octet[3], 3);
+ /* Adding BRCM OUI */
+ bus->dhd->mac.octet[0] = 0;
+ bus->dhd->mac.octet[1] = 0x90;
+ bus->dhd->mac.octet[2] = 0x4C;
}
- INIT_WORK(&bus->create_flow_work, dhd_pcie_create_flow_worker);
- INIT_WORK(&bus->delete_flow_work, dhd_pcie_delete_flow_worker);
- skb_queue_head_init(&bus->orphan_list);
-#endif /* PCIE_TX_DEFERRAL */
/* Attach to the OS network interface */
DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
dhdpcie_bus_release(bus);
#ifdef BCMPCIE_OOB_HOST_WAKE
- if (dhdpcie_osinfo)
+ if (dhdpcie_osinfo) {
MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
+ }
#endif /* BCMPCIE_OOB_HOST_WAKE */
if (dhdpcie_info)
struct pci_dev *pdev = NULL;
DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
- if (bus && bus->irq_registered) {
+ if (!bus) {
+ return;
+ }
+
+ if (bus->irq_registered) {
pdev = bus->dev;
free_irq(pdev->irq, bus);
bus->irq_registered = FALSE;
+ } else {
+ DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__));
}
DHD_TRACE(("%s: Exit\n", __FUNCTION__));
return;
#endif /* CONFIG_ARCH_MSM */
DHD_TRACE(("%s Enter:\n", __FUNCTION__));
- if (bus == NULL)
+ if (bus == NULL) {
return BCME_ERROR;
+ }
- if (bus->dev == NULL)
+ if (bus->dev == NULL) {
return BCME_ERROR;
+ }
#ifdef CONFIG_ARCH_MSM
#ifdef SUPPORT_LINKDOWN_RECOVERY
- if (bus->islinkdown) {
+ if (bus->no_cfg_restore) {
options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
}
ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
bus->dev, NULL, options);
- if (bus->islinkdown && !ret) {
+ if (bus->no_cfg_restore && !ret) {
msm_pcie_recover_config(bus->dev);
- if (bus->dhd)
- DHD_OS_WAKE_UNLOCK(bus->dhd);
- bus->islinkdown = FALSE;
+ bus->no_cfg_restore = 0;
}
#else
ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
dhdpcie_stop_host_pcieclock(dhd_bus_t *bus)
{
int ret = 0;
-
#ifdef CONFIG_ARCH_MSM
#ifdef SUPPORT_LINKDOWN_RECOVERY
int options = 0;
#endif /* SUPPORT_LINKDOWN_RECOVERY */
#endif /* CONFIG_ARCH_MSM */
+
DHD_TRACE(("%s Enter:\n", __FUNCTION__));
- if (bus == NULL)
+ if (bus == NULL) {
return BCME_ERROR;
+ }
- if (bus->dev == NULL)
+ if (bus->dev == NULL) {
return BCME_ERROR;
+ }
#ifdef CONFIG_ARCH_MSM
#ifdef SUPPORT_LINKDOWN_RECOVERY
- if (bus->islinkdown)
+ if (bus->no_cfg_restore) {
options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
+ }
- ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
+ ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
bus->dev, NULL, options);
#else
- ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
+ ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
bus->dev, NULL, 0);
#endif /* SUPPORT_LINKDOWN_RECOVERY */
if (ret) {
int
dhdpcie_disable_device(dhd_bus_t *bus)
{
- if (bus == NULL)
+ DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+ if (bus == NULL) {
return BCME_ERROR;
+ }
- if (bus->dev == NULL)
+ if (bus->dev == NULL) {
return BCME_ERROR;
+ }
pci_disable_device(bus->dev);
dhdpcie_enable_device(dhd_bus_t *bus)
{
int ret = BCME_ERROR;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
dhdpcie_info_t *pch;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
DHD_TRACE(("%s Enter:\n", __FUNCTION__));
- if (bus == NULL)
+ if (bus == NULL) {
return BCME_ERROR;
+ }
- if (bus->dev == NULL)
+ if (bus->dev == NULL) {
return BCME_ERROR;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
pch = pci_get_drvdata(bus->dev);
- if (pch == NULL)
+ if (pch == NULL) {
return BCME_ERROR;
+ }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !defined(CONFIG_SOC_EXYNOS8890)
/* Updated with pci_load_and_free_saved_state to compatible
* with kernel 3.14 or higher
*/
- if (pci_load_and_free_saved_state(bus->dev, &pch->state))
- pci_disable_device(bus->dev);
- else
-#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)))
- if (pci_load_saved_state(bus->dev, pch->state))
- pci_disable_device(bus->dev);
- else
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) and
- * (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
- * (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
- */
- {
- pci_restore_state(bus->dev);
- ret = pci_enable_device(bus->dev);
- if (!ret)
- pci_set_master(bus->dev);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
- }
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) and
- * (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
- * (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
- */
+ pci_load_and_free_saved_state(bus->dev, &pch->default_state);
+ pch->default_state = pci_store_saved_state(bus->dev);
+#else
+ pci_load_saved_state(bus->dev, pch->default_state);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && !CONFIG_SOC_EXYNOS8890 */
+
+ pci_restore_state(bus->dev);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */
- if (ret)
+ ret = pci_enable_device(bus->dev);
+ if (ret) {
pci_disable_device(bus->dev);
+ } else {
+ pci_set_master(bus->dev);
+ }
return ret;
}
}
bus->regs = dhdpcie_info->regs;
- dhdpcie_info->tcm_size =
- (bar1_size < DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
- dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE);
+ dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE;
if (!dhdpcie_info->tcm) {
DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
REG_UNMAP(dhdpcie_info->regs);
}
bus->tcm = dhdpcie_info->tcm;
- bus->tcm_size = dhdpcie_info->tcm_size;
DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
__FUNCTION__, dhdpcie_info->regs, bar0_addr));
spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags);
if ((dhdpcie_osinfo->oob_irq_enabled != enable) &&
(dhdpcie_osinfo->oob_irq_num > 0)) {
- if (enable)
+ if (enable) {
enable_irq(dhdpcie_osinfo->oob_irq_num);
- else
+ } else {
disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
+ }
dhdpcie_osinfo->oob_irq_enabled = enable;
}
spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags);
dhd_bus_t *bus;
DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__));
bus = (dhd_bus_t *)data;
+ dhdpcie_oob_intr_set(bus, FALSE);
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq);
+#endif /* DHD_PCIE_RUNTIMPM */
if (bus->dhd->up && bus->suspended) {
DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT);
}
return err;
}
err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num);
- if (!err)
+ if (!err) {
dhdpcie_osinfo->oob_irq_wake_enabled = TRUE;
+ }
dhdpcie_osinfo->oob_irq_enabled = TRUE;
}
if (dhdpcie_osinfo->oob_irq_num > 0) {
if (dhdpcie_osinfo->oob_irq_wake_enabled) {
err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num);
- if (!err)
+ if (!err) {
dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
+ }
}
if (dhdpcie_osinfo->oob_irq_enabled) {
disable_irq(dhdpcie_osinfo->oob_irq_num);
dhdpcie_osinfo->oob_irq_registered = FALSE;
}
#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+bool dhd_runtimepm_state(dhd_pub_t *dhd)
+{
+ dhd_bus_t *bus;
+ unsigned long flags;
+ bus = dhd->bus;
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ if (bus->suspended == TRUE) {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_INFO(("Bus is already suspended system PM: %d\n", bus->suspended));
+ return FALSE;
+ }
+
+ bus->idlecount++;
+
+ DHD_TRACE(("%s : Enter \n", __FUNCTION__));
+ if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
+ bus->idlecount = 0;
+ if (dhd->dhd_bus_busy_state == 0 && dhd->busstate != DHD_BUS_DOWN &&
+ dhd->busstate != DHD_BUS_DOWN_IN_PROGRESS) {
+ bus->bus_wake = 0;
+ dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS;
+ bus->runtime_resume_done = FALSE;
+ /* stop all interface network queue. */
+ dhd_bus_stop_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_ERROR(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n",
+ __FUNCTION__, bus->idletime, dhd_runtimepm_ms));
+ /* RPM suspend is failed, return FALSE then re-trying */
+ if (dhdpcie_set_suspend_resume(bus->dev, TRUE)) {
+ DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__));
+ DHD_GENERAL_LOCK(dhd, flags);
+ dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS;
+ bus->runtime_resume_done = TRUE;
+ /* It can make stuck NET TX Queue without below */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ smp_wmb();
+ wake_up_interruptible(&bus->rpm_queue);
+ return FALSE;
+ }
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS;
+ dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE;
+ /* For making sure NET TX Queue active */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ wait_event_interruptible(bus->rpm_queue, bus->bus_wake);
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE;
+ dhd->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS;
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ dhdpcie_set_suspend_resume(bus->dev, FALSE);
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ dhd->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS;
+ /* Inform the wake up context that Resume is over */
+ bus->runtime_resume_done = TRUE;
+ /* For making sure NET TX Queue active */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ smp_wmb();
+ wake_up_interruptible(&bus->rpm_queue);
+ DHD_ERROR(("%s : runtime resume ended\n", __FUNCTION__));
+ return TRUE;
+ } else {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ /* Since one of the contexts are busy (TX, IOVAR or RX)
+ * we should not suspend
+ */
+ DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n",
+ __FUNCTION__, dhd->dhd_bus_busy_state));
+ return FALSE;
+ }
+ }
+
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return FALSE;
+} /* dhd_runtimepm_state */
+
+/*
+ * dhd_runtime_bus_wake
+ * TRUE - related with runtime pm context
+ * FALSE - It isn't invloved in runtime pm context
+ */
+bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr)
+{
+ unsigned long flags;
+ bus->idlecount = 0;
+ DHD_TRACE(("%s : enter\n", __FUNCTION__));
+ if (bus->dhd->up == FALSE) {
+ DHD_INFO(("%s : dhd is not up\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL) {
+ /* Wake up RPM state thread if it is suspend in progress or suspended */
+ if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS ||
+ bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE) {
+ bus->bus_wake = 1;
+
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ DHD_ERROR(("Runtime Resume is called in %pf\n", func_addr));
+ smp_wmb();
+ wake_up_interruptible(&bus->rpm_queue);
+ /* No need to wake up the RPM state thread */
+ } else if (bus->dhd->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS) {
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ }
+
+ /* If wait is TRUE, function with wait = TRUE will be wait in here */
+ if (wait) {
+ wait_event_interruptible(bus->rpm_queue, bus->runtime_resume_done);
+ } else {
+ DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__));
+ }
+ /* If it is called from RPM context, it returns TRUE */
+ return TRUE;
+ }
+
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ return FALSE;
+}
+
+bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return dhd_runtime_bus_wake(bus, wait, func_addr);
+}
+
+void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ bus->idletime = 0;
+}
+
+bool dhdpcie_is_resume_done(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return bus->runtime_resume_done;
+}
+#endif /* DHD_PCIE_RUNTIMEPM */
* Broadcom Dongle Host Driver (DHD)
* Prefered Network Offload and Wi-Fi Location Service(WLS) code.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_pno.c 423669 2013-09-18 13:01:55Z yangj$
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_pno.c 606280 2015-12-15 05:28:25Z $
*/
+
+#if defined(GSCAN_SUPPORT) && !defined(PNO_SUPPORT)
+#error "GSCAN needs PNO to be enabled!"
+#endif
+
#ifdef PNO_SUPPORT
#include <typedefs.h>
#include <osl.h>
#include <dhd.h>
#include <dhd_pno.h>
#include <dhd_dbg.h>
+#ifdef GSCAN_SUPPORT
+#include <linux/gcd.h>
+#endif /* GSCAN_SUPPORT */
#ifdef __BIG_ENDIAN
#include <bcmendian.h>
#define PNO_ON 1
#define PNO_OFF 0
#define CHANNEL_2G_MAX 14
+#define CHANNEL_5G_MAX 165
#define MAX_NODE_CNT 5
#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE)
#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000) \
- (uint32)(timestamp2/1000)))
+#define TIME_DIFF_MS(timestamp1, timestamp2) (abs((uint32)(timestamp1) \
+ - (uint32)(timestamp2)))
+#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
+ (ts).tv_nsec / NSEC_PER_USEC)
#define ENTRY_OVERHEAD strlen("bssid=\nssid=\nfreq=\nlevel=\nage=\ndist=\ndistSd=\n====")
#define TIME_MIN_DIFF 5
+static wlc_ssid_ext_t * dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd,
+ dhd_pno_status_info_t *pno_state);
+#ifdef GSCAN_SUPPORT
+static wl_pfn_gscan_channel_bucket_t *
+dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state,
+uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw);
+#endif /* GSCAN_SUPPORT */
+
static inline bool
is_dfs(uint16 channel)
{
return err;
}
+bool
+dhd_is_pno_supported(dhd_pub_t *dhd)
+{
+ dhd_pno_status_info_t *_pno_state;
+
+ if (!dhd || !dhd->pno_state) {
+ DHD_ERROR(("NULL POINTER : %s\n",
+ __FUNCTION__));
+ return FALSE;
+ }
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ return WLS_SUPPORTED(_pno_state);
+}
+
+int
+dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui)
+{
+ int err = BCME_OK;
+ dhd_pno_status_info_t *_pno_state;
+
+ if (!dhd || !dhd->pno_state) {
+ DHD_ERROR(("NULL POINTER : %s\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ if (ETHER_ISMULTI(oui)) {
+ DHD_ERROR(("Expected unicast OUI\n"));
+ err = BCME_ERROR;
+ } else {
+ memcpy(_pno_state->pno_oui, oui, DOT11_OUI_LEN);
+ DHD_PNO(("PNO mac oui to be used - %02x:%02x:%02x\n", _pno_state->pno_oui[0],
+ _pno_state->pno_oui[1], _pno_state->pno_oui[2]));
+ }
+
+ return err;
+}
+
+#ifdef GSCAN_SUPPORT
+static uint64
+convert_fw_rel_time_to_systime(uint32 fw_ts_ms)
+{
+ struct timespec ts;
+
+ get_monotonic_boottime(&ts);
+ return ((uint64)(TIMESPEC_TO_US(ts)) - (uint64)(fw_ts_ms * 1000));
+}
+
+static int
+_dhd_pno_gscan_cfg(dhd_pub_t *dhd, wl_pfn_gscan_cfg_t *pfncfg_gscan_param, int size)
+{
+ int err = BCME_OK;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ err = dhd_iovar(dhd, 0, "pfn_gscan_cfg", (char *)pfncfg_gscan_param, size, 1);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfncfg_gscan_param\n", __FUNCTION__));
+ goto exit;
+ }
+exit:
+ return err;
+}
+
+static bool
+is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params)
+{
+ smp_rmb();
+ return (gscan_params->get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE);
+}
+#endif /* GSCAN_SUPPORT */
+
+static int
+dhd_pno_set_mac_addr(dhd_pub_t *dhd, struct ether_addr *macaddr)
+{
+ int err;
+ wl_pfn_macaddr_cfg_t cfg;
+
+ cfg.version = WL_PFN_MACADDR_CFG_VER;
+ if (ETHER_ISNULLADDR(macaddr)) {
+ cfg.flags = 0;
+ } else {
+ cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
+ }
+ memcpy(&cfg.macaddr, macaddr, ETHER_ADDR_LEN);
+
+ err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&cfg, sizeof(cfg), 1);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn_macaddr\n", __FUNCTION__));
+ }
+
+ return err;
+}
+
static int
_dhd_pno_suspend(dhd_pub_t *dhd)
{
}
if (enable) {
if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) &&
- dhd_is_associated(dhd, NULL, NULL)) {
+ dhd_is_associated(dhd, 0, NULL)) {
DHD_ERROR(("%s Legacy PNO mode cannot be enabled "
"in assoc mode , ignore it\n", __FUNCTION__));
err = BCME_BADOPTION;
/* Enable/Disable PNO */
err = dhd_iovar(dhd, 0, "pfn", (char *)&enable, sizeof(enable), 1);
if (err < 0) {
- DHD_ERROR(("%s : failed to execute pfn_set\n", __FUNCTION__));
+ DHD_ERROR(("%s : failed to execute pfn_set - %d\n", __FUNCTION__, err));
goto exit;
}
_pno_state->pno_status = (enable)?
dhd_pno_params_t *_params;
dhd_pno_status_info_t *_pno_state;
bool combined_scan = FALSE;
+ struct ether_addr macaddr;
DHD_PNO(("%s enter\n", __FUNCTION__));
NULL_CHECK(dhd, "dhd is NULL", err);
mode |= DHD_PNO_HOTLIST_MODE;
combined_scan = TRUE;
}
+#ifdef GSCAN_SUPPORT
+ else if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ DHD_PNO(("will enable combined scan with GSCAN SCAN MODE\n"));
+ mode |= DHD_PNO_GSCAN_MODE;
+ }
+#endif /* GSCAN_SUPPORT */
}
if (mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
/* Scan frequency of 30 sec */
/* slow adapt scan is off by default */
pfn_param.slow_freq = htod32(0);
/* RSSI margin of 30 dBm */
- pfn_param.rssi_margin = htod16(30);
+ pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM);
/* Network timeout 60 sec */
pfn_param.lost_network_timeout = htod32(60);
/* best n = 2 by default */
}
}
}
- if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) ||
- pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) {
- DHD_ERROR(("%s pno freq(%d sec) is not valid \n",
- __FUNCTION__, PNO_SCAN_MIN_FW_SEC));
- err = BCME_BADARG;
+#ifdef GSCAN_SUPPORT
+ if (mode & DHD_PNO_GSCAN_MODE) {
+ uint32 lost_network_timeout;
+
+ pfn_param.scan_freq = htod32(pno_params->params_gscan.scan_fr);
+ if (pno_params->params_gscan.mscan) {
+ pfn_param.bestn = pno_params->params_gscan.bestn;
+ pfn_param.mscan = pno_params->params_gscan.mscan;
+ pfn_param.flags |= (ENABLE << ENABLE_BD_SCAN_BIT);
+ }
+ /* RSSI margin of 30 dBm */
+ pfn_param.rssi_margin = htod16(PNO_RSSI_MARGIN_DBM);
+ /* ADAPTIVE turned off */
+ pfn_param.flags &= ~(htod16(ENABLE << ENABLE_ADAPTSCAN_BIT));
+ pfn_param.repeat = 0;
+ pfn_param.exp = 0;
+ pfn_param.slow_freq = 0;
+
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+ dhd_pno_params_t *_params;
+
+ _params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
+
+ pfn_param.scan_freq = htod32(MIN(pno_params->params_gscan.scan_fr,
+ _params->params_legacy.scan_fr));
+ }
+
+ lost_network_timeout = (pno_params->params_gscan.max_ch_bucket_freq *
+ pfn_param.scan_freq *
+ pno_params->params_gscan.lost_ap_window);
+ if (lost_network_timeout) {
+ pfn_param.lost_network_timeout = htod32(MIN(lost_network_timeout,
+ GSCAN_MIN_BSSID_TIMEOUT));
+ } else {
+ pfn_param.lost_network_timeout = htod32(GSCAN_MIN_BSSID_TIMEOUT);
+ }
+ } else
+#endif /* GSCAN_SUPPORT */
+ {
+ if (pfn_param.scan_freq < htod32(PNO_SCAN_MIN_FW_SEC) ||
+ pfn_param.scan_freq > htod32(PNO_SCAN_MAX_FW_SEC)) {
+ DHD_ERROR(("%s pno freq(%d sec) is not valid \n",
+ __FUNCTION__, PNO_SCAN_MIN_FW_SEC));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ }
+
+ memset(&macaddr, 0, ETHER_ADDR_LEN);
+ memcpy(&macaddr, _pno_state->pno_oui, DOT11_OUI_LEN);
+
+ DHD_PNO(("Setting mac oui to FW - %02x:%02x:%02x\n", _pno_state->pno_oui[0],
+ _pno_state->pno_oui[1], _pno_state->pno_oui[2]));
+ err = dhd_pno_set_mac_addr(dhd, &macaddr);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to set pno mac address, error - %d\n", __FUNCTION__, err));
goto exit;
}
- if (mode == DHD_PNO_BATCH_MODE) {
+
+
+#ifdef GSCAN_SUPPORT
+ if (mode == DHD_PNO_BATCH_MODE ||
+ ((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan)) {
+#else
+ if (mode == DHD_PNO_BATCH_MODE) {
+#endif /* GSCAN_SUPPORT */
+
int _tmp = pfn_param.bestn;
/* set bestn to calculate the max mscan which firmware supports */
err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 1);
}
err = dhd_iovar(dhd, 0, "pfn_set", (char *)&pfn_param, sizeof(pfn_param), 1);
if (err < 0) {
- DHD_ERROR(("%s : failed to execute pfn_set\n", __FUNCTION__));
+ DHD_ERROR(("%s : failed to execute pfn_set %d\n", __FUNCTION__, err));
goto exit;
}
/* need to return mscan if this is for batch scan instead of err */
return err;
}
static int
-_dhd_pno_add_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssids_list, int nssid)
+_dhd_pno_add_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssids_list, int nssid)
{
int err = BCME_OK;
int i = 0;
{
int j;
for (j = 0; j < nssid; j++) {
- DHD_PNO(("%d: scan for %s size = %d\n", j,
- ssids_list[j].SSID, ssids_list[j].SSID_len));
+ DHD_PNO(("%d: scan for %s size = %d hidden = %d\n", j,
+ ssids_list[j].SSID, ssids_list[j].SSID_len, ssids_list[j].hidden));
}
}
/* Check for broadcast ssid */
pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY);
pfn_element.wsec = htod32(0);
pfn_element.infra = htod32(1);
- pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT);
+ if (ssids_list[i].hidden) {
+ pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT);
+ } else {
+ pfn_element.flags = 0;
+ }
memcpy((char *)pfn_element.ssid.SSID, ssids_list[i].SSID,
ssids_list[i].SSID_len);
pfn_element.ssid.SSID_len = ssids_list[i].SSID_len;
if (skip_dfs && is_dfs(dtoh32(list->element[i])))
continue;
- } else { /* All channels */
- if (skip_dfs && is_dfs(dtoh32(list->element[i])))
+
+ } else if (band == WLC_BAND_AUTO) {
+ if (skip_dfs || !is_dfs(dtoh32(list->element[i])))
+ continue;
+ } else { /* All channels */
+ if (skip_dfs && is_dfs(dtoh32(list->element[i])))
continue;
}
- d_chan_list[j++] = dtoh32(list->element[i]);
+ if (dtoh32(list->element[i]) <= CHANNEL_5G_MAX) {
+ d_chan_list[j++] = (uint16) dtoh32(list->element[i]);
+ } else {
+ err = BCME_BADCHAN;
+ goto exit;
+ }
}
*nchan = j;
exit:
if (nbssid) {
NULL_CHECK(p_pfn_bssid, "bssid list is NULL", err);
}
- err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)&p_pfn_bssid,
+ err = dhd_iovar(dhd, 0, "pfn_add_bssid", (char *)p_pfn_bssid,
sizeof(wl_pfn_bssid_t) * nbssid, 1);
if (err < 0) {
DHD_ERROR(("%s : failed to execute pfn_cfg\n", __FUNCTION__));
exit:
return err;
}
+
+#ifdef GSCAN_SUPPORT
+static int
+_dhd_pno_add_significant_bssid(dhd_pub_t *dhd,
+ wl_pfn_significant_bssid_t *p_pfn_significant_bssid, int nbssid)
+{
+ int err = BCME_OK;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+
+ if (!nbssid) {
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ NULL_CHECK(p_pfn_significant_bssid, "bssid list is NULL", err);
+
+ err = dhd_iovar(dhd, 0, "pfn_add_swc_bssid", (char *)p_pfn_significant_bssid,
+ sizeof(wl_pfn_significant_bssid_t) * nbssid, 1);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn_significant_bssid %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+exit:
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
+
int
dhd_pno_stop_for_ssid(dhd_pub_t *dhd)
{
uint32 mode = 0;
dhd_pno_status_info_t *_pno_state;
dhd_pno_params_t *_params;
- wl_pfn_bssid_t *p_pfn_bssid;
+ wl_pfn_bssid_t *p_pfn_bssid = NULL;
NULL_CHECK(dhd, "dev is NULL", err);
NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
_pno_state = PNO_GET_PNOSTATE(dhd);
}
DHD_PNO(("%s enter\n", __FUNCTION__));
_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+#ifdef GSCAN_SUPPORT
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ struct dhd_pno_gscan_params *gscan_params;
+
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ gscan_params = &_params->params_gscan;
+
+ if (gscan_params->mscan)
+ dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
+ /* save current pno_mode before calling dhd_pno_clean */
+ mode = _pno_state->pno_mode;
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ /* restore previous pno_mode */
+ _pno_state->pno_mode = mode;
+ /* Restart gscan */
+ err = dhd_pno_initiate_gscan_request(dhd, 1, 0);
+ goto exit;
+ }
+#endif /* GSCAN_SUPPORT */
/* restart Batch mode if the batch mode is on */
if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
/* retrieve the batching data from firmware into host */
}
}
exit:
+ kfree(p_pfn_bssid);
return err;
}
return (_dhd_pno_enable(dhd, enable));
}
+static wlc_ssid_ext_t *
+dhd_pno_get_legacy_pno_ssid(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state)
+{
+ int err = BCME_OK;
+ int i;
+ struct dhd_pno_ssid *iter, *next;
+ dhd_pno_params_t *_params1 = &pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+ wlc_ssid_ext_t *p_ssid_list;
+
+ p_ssid_list = kzalloc(sizeof(wlc_ssid_ext_t) *
+ _params1->params_legacy.nssid, GFP_KERNEL);
+ if (p_ssid_list == NULL) {
+ DHD_ERROR(("%s : failed to allocate wlc_ssid_ext_t array (count: %d)",
+ __FUNCTION__, _params1->params_legacy.nssid));
+ err = BCME_ERROR;
+ pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+ goto exit;
+ }
+ i = 0;
+ /* convert dhd_pno_ssid to wlc_ssid_ext_t */
+ list_for_each_entry_safe(iter, next, &_params1->params_legacy.ssid_list, list) {
+ p_ssid_list[i].SSID_len = iter->SSID_len;
+ p_ssid_list[i].hidden = iter->hidden;
+ memcpy(p_ssid_list[i].SSID, iter->SSID, p_ssid_list[i].SSID_len);
+ i++;
+ }
+exit:
+ return p_ssid_list;
+}
+
+static int
+dhd_pno_add_to_ssid_list(dhd_pno_params_t *params, wlc_ssid_ext_t *ssid_list,
+ int nssid)
+{
+ int ret = 0;
+ int i;
+ struct dhd_pno_ssid *_pno_ssid;
+
+ for (i = 0; i < nssid; i++) {
+ if (ssid_list[i].SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("%s : Invalid SSID length %d\n",
+ __FUNCTION__, ssid_list[i].SSID_len));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ _pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL);
+ if (_pno_ssid == NULL) {
+ DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n",
+ __FUNCTION__));
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ _pno_ssid->SSID_len = ssid_list[i].SSID_len;
+ _pno_ssid->hidden = ssid_list[i].hidden;
+ memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len);
+ list_add_tail(&_pno_ssid->list, ¶ms->params_legacy.ssid_list);
+ }
+
+exit:
+ return ret;
+}
+
int
-dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssid_list, int nssid,
+dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid,
uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
{
- struct dhd_pno_ssid *_pno_ssid;
dhd_pno_params_t *_params;
dhd_pno_params_t *_params2;
dhd_pno_status_info_t *_pno_state;
if (!dhd_support_sta_mode(dhd)) {
err = BCME_BADOPTION;
- goto exit;
+ goto exit_no_clear;
}
DHD_PNO(("%s enter : scan_fr :%d, pno_repeat :%d,"
"pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__,
scan_fr, pno_repeat, pno_freq_expo_max, nchan));
_params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
- if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ /* If GSCAN is also ON will handle this down below */
+#ifdef GSCAN_SUPPORT
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE &&
+ !(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+#else
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+#endif /* GSCAN_SUPPORT */
DHD_ERROR(("%s : Legacy PNO mode was already started, "
"will disable previous one to start new one\n", __FUNCTION__));
err = dhd_pno_stop_for_ssid(dhd);
if (err < 0) {
DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n",
__FUNCTION__, err));
- goto exit;
+ goto exit_no_clear;
}
}
_pno_state->pno_mode |= DHD_PNO_LEGACY_MODE;
if (err < 0) {
DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n",
__FUNCTION__, err));
- goto exit;
+ goto exit_no_clear;
}
memset(_chan_list, 0, sizeof(_chan_list));
- tot_nchan = nchan;
+ tot_nchan = MIN(nchan, WL_NUMCHANNELS);
if (tot_nchan > 0 && channel_list) {
- for (i = 0; i < nchan; i++)
+ for (i = 0; i < tot_nchan; i++)
_params->params_legacy.chan_list[i] = _chan_list[i] = channel_list[i];
}
+#ifdef GSCAN_SUPPORT
+ else {
+ tot_nchan = WL_NUMCHANNELS;
+ err = _dhd_pno_get_channels(dhd, _chan_list, &tot_nchan,
+ (WLC_BAND_2G | WLC_BAND_5G), TRUE);
+ if (err < 0) {
+ tot_nchan = 0;
+ DHD_PNO(("Could not get channel list for PNO SSID\n"));
+ } else {
+ for (i = 0; i < tot_nchan; i++)
+ _params->params_legacy.chan_list[i] = _chan_list[i];
+ }
+ }
+#endif /* GSCAN_SUPPORT */
+
if (_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_HOTLIST_MODE)) {
DHD_PNO(("BATCH SCAN is on progress in firmware\n"));
/* retrieve the batching data from firmware into host */
err = _dhd_pno_enable(dhd, PNO_OFF);
if (err < 0) {
DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
- goto exit;
+ goto exit_no_clear;
}
/* restore the previous mode */
_pno_state->pno_mode = mode;
/* use superset of channel list between two mode */
if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
_params2 = &(_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS]);
- if (_params2->params_batch.nchan > 0 && nchan > 0) {
+ if (_params2->params_batch.nchan > 0 && tot_nchan > 0) {
err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
&_params2->params_batch.chan_list[0],
_params2->params_batch.nchan,
- &channel_list[0], nchan);
+ &channel_list[0], tot_nchan);
if (err < 0) {
DHD_ERROR(("%s : failed to merge channel list"
" between legacy and batch\n",
__FUNCTION__));
- goto exit;
+ goto exit_no_clear;
}
} else {
DHD_PNO(("superset channel will use"
}
} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
_params2 = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
- if (_params2->params_hotlist.nchan > 0 && nchan > 0) {
+ if (_params2->params_hotlist.nchan > 0 && tot_nchan > 0) {
err = _dhd_pno_chan_merge(_chan_list, &tot_nchan,
&_params2->params_hotlist.chan_list[0],
_params2->params_hotlist.nchan,
- &channel_list[0], nchan);
+ &channel_list[0], tot_nchan);
if (err < 0) {
DHD_ERROR(("%s : failed to merge channel list"
" between legacy and hotlist\n",
__FUNCTION__));
- goto exit;
+ goto exit_no_clear;
}
}
}
_params->params_legacy.scan_fr = scan_fr;
_params->params_legacy.pno_repeat = pno_repeat;
_params->params_legacy.pno_freq_expo_max = pno_freq_expo_max;
- _params->params_legacy.nchan = nchan;
+ _params->params_legacy.nchan = tot_nchan;
_params->params_legacy.nssid = nssid;
INIT_LIST_HEAD(&_params->params_legacy.ssid_list);
+#ifdef GSCAN_SUPPORT
+ /* dhd_pno_initiate_gscan_request will handle simultaneous Legacy PNO and GSCAN */
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) {
+ err = BCME_ERROR;
+ goto exit;
+ }
+ DHD_PNO(("GSCAN mode is ON! Will restart GSCAN+Legacy PNO\n"));
+ err = dhd_pno_initiate_gscan_request(dhd, 1, 0);
+ goto exit;
+ }
+#endif /* GSCAN_SUPPORT */
if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_LEGACY_MODE)) < 0) {
DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err));
goto exit;
DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid));
goto exit;
}
- for (i = 0; i < nssid; i++) {
- _pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL);
- if (_pno_ssid == NULL) {
- DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n",
- __FUNCTION__));
- goto exit;
- }
- _pno_ssid->SSID_len = ssid_list[i].SSID_len;
- memcpy(_pno_ssid->SSID, ssid_list[i].SSID, _pno_ssid->SSID_len);
- list_add_tail(&_pno_ssid->list, &_params->params_legacy.ssid_list);
-
+ if (dhd_pno_add_to_ssid_list(_params, ssid_list, nssid) < 0) {
+ err = BCME_ERROR;
+ goto exit;
}
if (tot_nchan > 0) {
if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
DHD_ERROR(("%s : failed to enable PNO\n", __FUNCTION__));
}
exit:
+ if (err < 0) {
+ _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+ }
+exit_no_clear:
/* clear mode in case of error */
- if (err < 0)
- _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+ if (err < 0) {
+ int ret = dhd_pno_clean(dhd);
+
+ if (ret < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, ret));
+ } else {
+ _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+ }
+ }
return err;
}
int
uint16 _chan_list[WL_NUMCHANNELS];
int rem_nchan = 0, tot_nchan = 0;
int mode = 0, mscan = 0;
- int i = 0;
dhd_pno_params_t *_params;
dhd_pno_params_t *_params2;
dhd_pno_status_info_t *_pno_state;
- wlc_ssid_t *p_ssid_list = NULL;
+ wlc_ssid_ext_t *p_ssid_list = NULL;
NULL_CHECK(dhd, "dhd is NULL", err);
NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
NULL_CHECK(batch_params, "batch_params is NULL", err);
tot_nchan = _params->params_batch.nchan;
}
if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
- struct dhd_pno_ssid *iter, *next;
DHD_PNO(("PNO SSID is on progress in firmware\n"));
/* store current pno_mode before disabling pno */
mode = _pno_state->pno_mode;
} else {
DHD_PNO(("superset channel will use all channels in firmware\n"));
}
- p_ssid_list = kzalloc(sizeof(wlc_ssid_t) *
- _params2->params_legacy.nssid, GFP_KERNEL);
- if (p_ssid_list == NULL) {
- DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)",
- __FUNCTION__, _params2->params_legacy.nssid));
- err = BCME_ERROR;
- _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+ p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state);
+ if (!p_ssid_list) {
+ err = BCME_NOMEM;
+ DHD_ERROR(("failed to get Legacy PNO SSID list\n"));
goto exit;
}
- i = 0;
- /* convert dhd_pno_ssid to dhd_pno_ssid */
- list_for_each_entry_safe(iter, next, &_params2->params_legacy.ssid_list, list) {
- p_ssid_list[i].SSID_len = iter->SSID_len;
- memcpy(p_ssid_list->SSID, iter->SSID, p_ssid_list[i].SSID_len);
- i++;
- }
if ((err = _dhd_pno_add_ssid(dhd, p_ssid_list,
_params2->params_legacy.nssid)) < 0) {
DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err));
return err;
}
-static int
-_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+
+#ifdef GSCAN_SUPPORT
+static void
+dhd_pno_reset_cfg_gscan(dhd_pno_params_t *_params,
+ dhd_pno_status_info_t *_pno_state, uint8 flags)
{
- int err = BCME_OK;
- int i, j;
- uint32 timestamp = 0;
- dhd_pno_params_t *_params = NULL;
- dhd_pno_status_info_t *_pno_state = NULL;
- wl_pfn_lscanresults_t *plbestnet = NULL;
- wl_pfn_lnet_info_t *plnetinfo;
- dhd_pno_bestnet_entry_t *pbestnet_entry;
- dhd_pno_best_header_t *pbestnetheader = NULL;
- dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext;
- bool allocate_header = FALSE;
- NULL_CHECK(dhd, "dhd is NULL", err);
- NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
- if (!dhd_support_sta_mode(dhd)) {
- err = BCME_BADOPTION;
- goto exit;
- }
DHD_PNO(("%s enter\n", __FUNCTION__));
- _pno_state = PNO_GET_PNOSTATE(dhd);
- if (!WLS_SUPPORTED(_pno_state)) {
- DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
- err = BCME_UNSUPPORTED;
- goto exit;
+ if (flags & GSCAN_FLUSH_SCAN_CFG) {
+ _params->params_gscan.bestn = 0;
+ _params->params_gscan.mscan = 0;
+ _params->params_gscan.buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+ _params->params_gscan.scan_fr = 0;
+ _params->params_gscan.send_all_results_flag = 0;
+ memset(_params->params_gscan.channel_bucket, 0,
+ _params->params_gscan.nchannel_buckets *
+ sizeof(struct dhd_pno_gscan_channel_bucket));
+ _params->params_gscan.nchannel_buckets = 0;
+ DHD_PNO(("Flush Scan config\n"));
+ }
+ if (flags & GSCAN_FLUSH_HOTLIST_CFG) {
+ struct dhd_pno_bssid *iter, *next;
+ if (_params->params_gscan.nbssid_hotlist > 0) {
+ list_for_each_entry_safe(iter, next,
+ &_params->params_gscan.hotlist_bssid_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ }
+ }
+ _params->params_gscan.nbssid_hotlist = 0;
+ DHD_PNO(("Flush Hotlist Config\n"));
}
- if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
- DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
- goto exit;
+ if (flags & GSCAN_FLUSH_SIGNIFICANT_CFG) {
+ dhd_pno_significant_bssid_t *iter, *next;
+
+ if (_params->params_gscan.nbssid_significant_change > 0) {
+ list_for_each_entry_safe(iter, next,
+ &_params->params_gscan.significant_bssid_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ }
+ }
+ _params->params_gscan.nbssid_significant_change = 0;
+ DHD_PNO(("Flush Significant Change Config\n"));
}
+
+ return;
+}
+
+void
+dhd_pno_lock_batch_results(dhd_pub_t *dhd)
+{
+ dhd_pno_status_info_t *_pno_state;
+ _pno_state = PNO_GET_PNOSTATE(dhd);
mutex_lock(&_pno_state->pno_mutex);
- _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
- if (buf && bufsize) {
+ return;
+}
+
+void
+dhd_pno_unlock_batch_results(dhd_pub_t *dhd)
+{
+ dhd_pno_status_info_t *_pno_state;
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ mutex_unlock(&_pno_state->pno_mutex);
+ return;
+}
+
+void
+dhd_wait_batch_results_complete(dhd_pub_t *dhd)
+{
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ /* Has the workqueue finished its job already?? */
+ if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_IN_PROGRESS) {
+ DHD_PNO(("%s: Waiting to complete retrieval..\n", __FUNCTION__));
+ wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+ is_batch_retrieval_complete(&_params->params_gscan),
+ msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+ } else { /* GSCAN_BATCH_RETRIEVAL_COMPLETE */
+ gscan_results_cache_t *iter;
+ uint16 num_results = 0;
+ int err;
+
+ mutex_lock(&_pno_state->pno_mutex);
+ iter = _params->params_gscan.gscan_batch_cache;
+ while (iter) {
+ num_results += iter->tot_count - iter->tot_consumed;
+ iter = iter->next;
+ }
+ mutex_unlock(&_pno_state->pno_mutex);
+
+ /* All results consumed/No results cached??
+ * Get fresh results from FW
+ */
+ if (!num_results) {
+ DHD_PNO(("%s: No results cached, getting from FW..\n", __FUNCTION__));
+ err = dhd_retreive_batch_scan_results(dhd);
+ if (err == BCME_OK) {
+ wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+ is_batch_retrieval_complete(&_params->params_gscan),
+ msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+ }
+ }
+ }
+ DHD_PNO(("%s: Wait complete\n", __FUNCTION__));
+
+ return;
+}
+
+static void *
+dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len)
+{
+ gscan_results_cache_t *iter, *results;
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ uint16 num_scan_ids = 0, num_results = 0;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ iter = results = _params->params_gscan.gscan_batch_cache;
+ while (iter) {
+ num_results += iter->tot_count - iter->tot_consumed;
+ num_scan_ids++;
+ iter = iter->next;
+ }
+
+ *len = ((num_results << 16) | (num_scan_ids));
+ return results;
+}
+
+void *
+dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+ void *info, uint32 *len)
+{
+ void *ret = NULL;
+ dhd_pno_gscan_capabilities_t *ptr;
+
+ if (!len) {
+ DHD_ERROR(("%s: len is NULL\n", __FUNCTION__));
+ return ret;
+ }
+
+ switch (type) {
+ case DHD_PNO_GET_CAPABILITIES:
+ ptr = (dhd_pno_gscan_capabilities_t *)
+ kmalloc(sizeof(dhd_pno_gscan_capabilities_t), GFP_KERNEL);
+ if (!ptr)
+ break;
+ /* Hardcoding these values for now, need to get
+ * these values from FW, will change in a later check-in
+ */
+ ptr->max_scan_cache_size = 12;
+ ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS;
+ ptr->max_ap_cache_per_scan = 16;
+ ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX;
+ ptr->max_scan_reporting_threshold = 100;
+ ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS;
+ ptr->max_significant_wifi_change_aps = PFN_SWC_MAX_NUM_APS;
+ ret = (void *)ptr;
+ *len = sizeof(dhd_pno_gscan_capabilities_t);
+ break;
+
+ case DHD_PNO_GET_BATCH_RESULTS:
+ ret = dhd_get_gscan_batch_results(dhd, len);
+ break;
+ case DHD_PNO_GET_CHANNEL_LIST:
+ if (info) {
+ uint16 ch_list[WL_NUMCHANNELS];
+ uint32 *ptr, mem_needed, i;
+ int32 err, nchan = WL_NUMCHANNELS;
+ uint32 *gscan_band = (uint32 *) info;
+ uint8 band = 0;
+
+ /* No band specified?, nothing to do */
+ if ((*gscan_band & GSCAN_BAND_MASK) == 0) {
+ DHD_PNO(("No band specified\n"));
+ *len = 0;
+ break;
+ }
+
+ /* HAL and DHD use different bits for 2.4G and
+ * 5G in bitmap. Hence translating it here...
+ */
+ if (*gscan_band & GSCAN_BG_BAND_MASK) {
+ band |= WLC_BAND_2G;
+ }
+ if (*gscan_band & GSCAN_A_BAND_MASK) {
+ band |= WLC_BAND_5G;
+ }
+
+ err = _dhd_pno_get_channels(dhd, ch_list, &nchan,
+ (band & GSCAN_ABG_BAND_MASK),
+ !(*gscan_band & GSCAN_DFS_MASK));
+
+ if (err < 0) {
+ DHD_ERROR(("%s: failed to get valid channel list\n",
+ __FUNCTION__));
+ *len = 0;
+ } else {
+ mem_needed = sizeof(uint32) * nchan;
+ ptr = (uint32 *) kmalloc(mem_needed, GFP_KERNEL);
+ if (!ptr) {
+ DHD_ERROR(("%s: Unable to malloc %d bytes\n",
+ __FUNCTION__, mem_needed));
+ break;
+ }
+ for (i = 0; i < nchan; i++) {
+ ptr[i] = wf_channel2mhz(ch_list[i],
+ (ch_list[i] <= CH_MAX_2G_CHANNEL?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+ }
+ ret = ptr;
+ *len = mem_needed;
+ }
+ } else {
+ *len = 0;
+ DHD_ERROR(("%s: info buffer is NULL\n", __FUNCTION__));
+ }
+ break;
+
+ default:
+ DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type));
+ break;
+ }
+
+ return ret;
+
+}
+
+int
+dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, uint8 flush)
+{
+ int err = BCME_OK;
+ dhd_pno_params_t *_params;
+ int i;
+ dhd_pno_status_info_t *_pno_state;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ mutex_lock(&_pno_state->pno_mutex);
+
+ switch (type) {
+ case DHD_PNO_BATCH_SCAN_CFG_ID:
+ {
+ gscan_batch_params_t *ptr = (gscan_batch_params_t *)buf;
+ _params->params_gscan.bestn = ptr->bestn;
+ _params->params_gscan.mscan = ptr->mscan;
+ _params->params_gscan.buffer_threshold = ptr->buffer_threshold;
+ break;
+ }
+ case DHD_PNO_GEOFENCE_SCAN_CFG_ID:
+ {
+ gscan_hotlist_scan_params_t *ptr = (gscan_hotlist_scan_params_t *)buf;
+ struct dhd_pno_bssid *_pno_bssid;
+ struct bssid_t *bssid_ptr;
+ int8 flags;
+
+ if (flush) {
+ dhd_pno_reset_cfg_gscan(_params, _pno_state,
+ GSCAN_FLUSH_HOTLIST_CFG);
+ }
+
+ if (!ptr->nbssid) {
+ break;
+ }
+ if (!_params->params_gscan.nbssid_hotlist) {
+ INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list);
+ }
+ if ((_params->params_gscan.nbssid_hotlist +
+ ptr->nbssid) > PFN_SWC_MAX_NUM_APS) {
+ DHD_ERROR(("Excessive number of hotlist APs programmed %d\n",
+ (_params->params_gscan.nbssid_hotlist +
+ ptr->nbssid)));
+ err = BCME_RANGE;
+ goto exit;
+ }
+
+ for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) {
+ _pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL);
+
+ if (!_pno_bssid) {
+ DHD_ERROR(("_pno_bssid is NULL, cannot kalloc %zd bytes",
+ sizeof(struct dhd_pno_bssid)));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ memcpy(&_pno_bssid->macaddr, &bssid_ptr->macaddr, ETHER_ADDR_LEN);
+
+ flags = (int8) bssid_ptr->rssi_reporting_threshold;
+ _pno_bssid->flags = flags << WL_PFN_RSSI_SHIFT;
+ list_add_tail(&_pno_bssid->list,
+ &_params->params_gscan.hotlist_bssid_list);
+ }
+
+ _params->params_gscan.nbssid_hotlist += ptr->nbssid;
+ _params->params_gscan.lost_ap_window = ptr->lost_ap_window;
+ break;
+ }
+ case DHD_PNO_SIGNIFICANT_SCAN_CFG_ID:
+ {
+ gscan_swc_params_t *ptr = (gscan_swc_params_t *)buf;
+ dhd_pno_significant_bssid_t *_pno_significant_change_bssid;
+ wl_pfn_significant_bssid_t *significant_bssid_ptr;
+
+ if (flush) {
+ dhd_pno_reset_cfg_gscan(_params, _pno_state,
+ GSCAN_FLUSH_SIGNIFICANT_CFG);
+ }
+
+ if (!ptr->nbssid) {
+ break;
+ }
+ if (!_params->params_gscan.nbssid_significant_change) {
+ INIT_LIST_HEAD(&_params->params_gscan.significant_bssid_list);
+ }
+ if ((_params->params_gscan.nbssid_significant_change +
+ ptr->nbssid) > PFN_SWC_MAX_NUM_APS) {
+ DHD_ERROR(("Excessive number of SWC APs programmed %d\n",
+ (_params->params_gscan.nbssid_significant_change +
+ ptr->nbssid)));
+ err = BCME_RANGE;
+ goto exit;
+ }
+
+ for (i = 0, significant_bssid_ptr = ptr->bssid_elem_list;
+ i < ptr->nbssid; i++, significant_bssid_ptr++) {
+ _pno_significant_change_bssid =
+ kzalloc(sizeof(dhd_pno_significant_bssid_t),
+ GFP_KERNEL);
+
+ if (!_pno_significant_change_bssid) {
+ DHD_ERROR(("SWC bssidptr is NULL, cannot kalloc %zd bytes",
+ sizeof(dhd_pno_significant_bssid_t)));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ memcpy(&_pno_significant_change_bssid->BSSID,
+ &significant_bssid_ptr->macaddr, ETHER_ADDR_LEN);
+ _pno_significant_change_bssid->rssi_low_threshold =
+ significant_bssid_ptr->rssi_low_threshold;
+ _pno_significant_change_bssid->rssi_high_threshold =
+ significant_bssid_ptr->rssi_high_threshold;
+ list_add_tail(&_pno_significant_change_bssid->list,
+ &_params->params_gscan.significant_bssid_list);
+ }
+
+ _params->params_gscan.swc_nbssid_threshold = ptr->swc_threshold;
+ _params->params_gscan.swc_rssi_window_size = ptr->rssi_window;
+ _params->params_gscan.lost_ap_window = ptr->lost_ap_window;
+ _params->params_gscan.nbssid_significant_change += ptr->nbssid;
+ break;
+ }
+ case DHD_PNO_SCAN_CFG_ID:
+ {
+ int i, k, valid = 0;
+ uint16 band, min;
+ gscan_scan_params_t *ptr = (gscan_scan_params_t *)buf;
+ struct dhd_pno_gscan_channel_bucket *ch_bucket;
+
+ if (ptr->nchannel_buckets <= GSCAN_MAX_CH_BUCKETS) {
+ _params->params_gscan.nchannel_buckets = ptr->nchannel_buckets;
+
+ memcpy(_params->params_gscan.channel_bucket, ptr->channel_bucket,
+ _params->params_gscan.nchannel_buckets *
+ sizeof(struct dhd_pno_gscan_channel_bucket));
+ min = ptr->channel_bucket[0].bucket_freq_multiple;
+ ch_bucket = _params->params_gscan.channel_bucket;
+
+ for (i = 0; i < ptr->nchannel_buckets; i++) {
+ band = ch_bucket[i].band;
+ for (k = 0; k < ptr->channel_bucket[i].num_channels; k++) {
+ ch_bucket[i].chan_list[k] =
+ wf_mhz2channel(ptr->channel_bucket[i].chan_list[k],
+ 0);
+ }
+ ch_bucket[i].band = 0;
+ /* HAL and DHD use different bits for 2.4G and
+ * 5G in bitmap. Hence translating it here...
+ */
+ if (band & GSCAN_BG_BAND_MASK)
+ ch_bucket[i].band |= WLC_BAND_2G;
+
+ if (band & GSCAN_A_BAND_MASK)
+ ch_bucket[i].band |= WLC_BAND_5G;
+
+ if (band & GSCAN_DFS_MASK)
+ ch_bucket[i].band |= GSCAN_DFS_MASK;
+ if (ptr->scan_fr ==
+ ptr->channel_bucket[i].bucket_freq_multiple) {
+ valid = 1;
+ }
+ if (ptr->channel_bucket[i].bucket_freq_multiple < min)
+ min = ptr->channel_bucket[i].bucket_freq_multiple;
+
+ DHD_PNO(("band %d report_flag %d\n", ch_bucket[i].band,
+ ch_bucket[i].report_flag));
+ }
+ if (!valid)
+ ptr->scan_fr = min;
+
+ for (i = 0; i < ptr->nchannel_buckets; i++) {
+ ch_bucket[i].bucket_freq_multiple =
+ ch_bucket[i].bucket_freq_multiple/ptr->scan_fr;
+ }
+ _params->params_gscan.scan_fr = ptr->scan_fr;
+
+ DHD_PNO(("num_buckets %d scan_fr %d\n", ptr->nchannel_buckets,
+ _params->params_gscan.scan_fr));
+ } else {
+ err = BCME_BADARG;
+ }
+ break;
+ }
+ default:
+ err = BCME_BADARG;
+ DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type));
+ break;
+ }
+exit:
+ mutex_unlock(&_pno_state->pno_mutex);
+ return err;
+
+}
+
+
+static bool
+validate_gscan_params(struct dhd_pno_gscan_params *gscan_params)
+{
+ unsigned int i, k;
+
+ if (!gscan_params->scan_fr || !gscan_params->nchannel_buckets) {
+ DHD_ERROR(("%s : Scan freq - %d or number of channel buckets - %d is empty\n",
+ __FUNCTION__, gscan_params->scan_fr, gscan_params->nchannel_buckets));
+ return false;
+ }
+
+ for (i = 0; i < gscan_params->nchannel_buckets; i++) {
+ if (!gscan_params->channel_bucket[i].band) {
+ for (k = 0; k < gscan_params->channel_bucket[i].num_channels; k++) {
+ if (gscan_params->channel_bucket[i].chan_list[k] > CHANNEL_5G_MAX) {
+ DHD_ERROR(("%s : Unknown channel %d\n", __FUNCTION__,
+ gscan_params->channel_bucket[i].chan_list[k]));
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+static int
+dhd_pno_set_for_gscan(dhd_pub_t *dhd, struct dhd_pno_gscan_params *gscan_params)
+{
+ int err = BCME_OK;
+ int mode, i = 0, k;
+ uint16 _chan_list[WL_NUMCHANNELS];
+ int tot_nchan = 0;
+ int num_buckets_to_fw, tot_num_buckets, gscan_param_size;
+ dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+ wl_pfn_gscan_channel_bucket_t *ch_bucket = NULL;
+ wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL;
+ wl_pfn_significant_bssid_t *p_pfn_significant_bssid = NULL;
+ wl_pfn_bssid_t *p_pfn_bssid = NULL;
+ wlc_ssid_ext_t *pssid_list = NULL;
+ dhd_pno_params_t *params_legacy;
+ dhd_pno_params_t *_params;
+
+ params_legacy = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ NULL_CHECK(gscan_params, "gscan_params is NULL", err);
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ if (!dhd_support_sta_mode(dhd)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if (!validate_gscan_params(gscan_params)) {
+ DHD_ERROR(("%s : Cannot start gscan - bad params\n", __FUNCTION__));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ /* Create channel list based on channel buckets */
+ if (!(ch_bucket = dhd_pno_gscan_create_channel_list(dhd, _pno_state,
+ _chan_list, &tot_num_buckets, &num_buckets_to_fw))) {
+ goto exit;
+ }
+
+ if (_pno_state->pno_mode & (DHD_PNO_GSCAN_MODE | DHD_PNO_LEGACY_MODE)) {
+ /* store current pno_mode before disabling pno */
+ mode = _pno_state->pno_mode;
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to disable PNO\n", __FUNCTION__));
+ goto exit;
+ }
+ /* restore the previous mode */
+ _pno_state->pno_mode = mode;
+ }
+
+ _pno_state->pno_mode |= DHD_PNO_GSCAN_MODE;
+
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state);
+
+ if (!pssid_list) {
+ err = BCME_NOMEM;
+ DHD_ERROR(("failed to get Legacy PNO SSID list\n"));
+ goto exit;
+ }
+
+ if ((err = _dhd_pno_add_ssid(dhd, pssid_list,
+ params_legacy->params_legacy.nssid)) < 0) {
+ DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err));
+ goto exit;
+ }
+ }
+
+ if ((err = _dhd_pno_set(dhd, _params, DHD_PNO_GSCAN_MODE)) < 0) {
+ DHD_ERROR(("failed to set call pno_set (err %d) in firmware\n", err));
+ goto exit;
+ }
+
+ gscan_param_size = sizeof(wl_pfn_gscan_cfg_t) +
+ (num_buckets_to_fw - 1) * sizeof(wl_pfn_gscan_channel_bucket_t);
+ pfn_gscan_cfg_t = (wl_pfn_gscan_cfg_t *) MALLOC(dhd->osh, gscan_param_size);
+
+ if (!pfn_gscan_cfg_t) {
+ DHD_ERROR(("%s: failed to malloc memory of size %d\n",
+ __FUNCTION__, gscan_param_size));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+
+
+ if (gscan_params->mscan) {
+ pfn_gscan_cfg_t->buffer_threshold = gscan_params->buffer_threshold;
+ } else {
+ pfn_gscan_cfg_t->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+ }
+ if (gscan_params->nbssid_significant_change) {
+ pfn_gscan_cfg_t->swc_nbssid_threshold = gscan_params->swc_nbssid_threshold;
+ pfn_gscan_cfg_t->swc_rssi_window_size = gscan_params->swc_rssi_window_size;
+ pfn_gscan_cfg_t->lost_ap_window = gscan_params->lost_ap_window;
+ } else {
+ pfn_gscan_cfg_t->swc_nbssid_threshold = 0;
+ pfn_gscan_cfg_t->swc_rssi_window_size = 0;
+ pfn_gscan_cfg_t->lost_ap_window = 0;
+ }
+
+ pfn_gscan_cfg_t->flags =
+ (gscan_params->send_all_results_flag & GSCAN_SEND_ALL_RESULTS_MASK);
+ pfn_gscan_cfg_t->count_of_channel_buckets = num_buckets_to_fw;
+
+
+ for (i = 0, k = 0; i < tot_num_buckets; i++) {
+ if (ch_bucket[i].bucket_end_index != CHANNEL_BUCKET_EMPTY_INDEX) {
+ pfn_gscan_cfg_t->channel_bucket[k].bucket_end_index =
+ ch_bucket[i].bucket_end_index;
+ pfn_gscan_cfg_t->channel_bucket[k].bucket_freq_multiple =
+ ch_bucket[i].bucket_freq_multiple;
+ pfn_gscan_cfg_t->channel_bucket[k].report_flag =
+ ch_bucket[i].report_flag;
+ k++;
+ }
+ }
+
+ tot_nchan = pfn_gscan_cfg_t->channel_bucket[num_buckets_to_fw - 1].bucket_end_index + 1;
+ DHD_PNO(("Total channel num %d total ch_buckets %d ch_buckets_to_fw %d \n", tot_nchan,
+ tot_num_buckets, num_buckets_to_fw));
+
+ if ((err = _dhd_pno_cfg(dhd, _chan_list, tot_nchan)) < 0) {
+ DHD_ERROR(("%s : failed to set call pno_cfg (err %d) in firmware\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+
+ if ((err = _dhd_pno_gscan_cfg(dhd, pfn_gscan_cfg_t, gscan_param_size)) < 0) {
+ DHD_ERROR(("%s : failed to set call pno_gscan_cfg (err %d) in firmware\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ if (gscan_params->nbssid_significant_change) {
+ dhd_pno_significant_bssid_t *iter, *next;
+
+
+ p_pfn_significant_bssid = kzalloc(sizeof(wl_pfn_significant_bssid_t) *
+ gscan_params->nbssid_significant_change, GFP_KERNEL);
+ if (p_pfn_significant_bssid == NULL) {
+ DHD_ERROR(("%s : failed to allocate memory %zd\n",
+ __FUNCTION__,
+ sizeof(wl_pfn_significant_bssid_t) *
+ gscan_params->nbssid_significant_change));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+ i = 0;
+ /* convert dhd_pno_significant_bssid_t to wl_pfn_significant_bssid_t */
+ list_for_each_entry_safe(iter, next, &gscan_params->significant_bssid_list, list) {
+ p_pfn_significant_bssid[i].rssi_low_threshold = iter->rssi_low_threshold;
+ p_pfn_significant_bssid[i].rssi_high_threshold = iter->rssi_high_threshold;
+ memcpy(&p_pfn_significant_bssid[i].macaddr, &iter->BSSID, ETHER_ADDR_LEN);
+ i++;
+ }
+ DHD_PNO(("nbssid_significant_change %d \n",
+ gscan_params->nbssid_significant_change));
+ err = _dhd_pno_add_significant_bssid(dhd, p_pfn_significant_bssid,
+ gscan_params->nbssid_significant_change);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call _dhd_pno_add_significant_bssid(err :%d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+
+ if (gscan_params->nbssid_hotlist) {
+ struct dhd_pno_bssid *iter, *next;
+ wl_pfn_bssid_t *ptr;
+ p_pfn_bssid = (wl_pfn_bssid_t *)kzalloc(sizeof(wl_pfn_bssid_t) *
+ gscan_params->nbssid_hotlist, GFP_KERNEL);
+ if (p_pfn_bssid == NULL) {
+ DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
+ " (count: %d)",
+ __FUNCTION__, _params->params_hotlist.nbssid));
+ err = BCME_NOMEM;
+ _pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
+ goto exit;
+ }
+ ptr = p_pfn_bssid;
+ /* convert dhd_pno_bssid to wl_pfn_bssid */
+ DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist));
+ list_for_each_entry_safe(iter, next,
+ &gscan_params->hotlist_bssid_list, list) {
+ memcpy(&ptr->macaddr,
+ &iter->macaddr, ETHER_ADDR_LEN);
+ ptr->flags = iter->flags;
+ ptr++;
+ }
+
+ err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to call _dhd_pno_add_bssid(err :%d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+ }
+
+ if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) {
+ DHD_ERROR(("%s : failed to enable PNO err %d\n", __FUNCTION__, err));
+ }
+
+exit:
+ /* clear mode in case of error */
+ if (err < 0) {
+ int ret = dhd_pno_clean(dhd);
+
+ if (ret < 0) {
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, ret));
+ } else {
+ _pno_state->pno_mode &= ~DHD_PNO_GSCAN_MODE;
+ }
+ }
+ kfree(pssid_list);
+ kfree(p_pfn_significant_bssid);
+ kfree(p_pfn_bssid);
+ if (pfn_gscan_cfg_t) {
+ MFREE(dhd->osh, pfn_gscan_cfg_t, gscan_param_size);
+ }
+ if (ch_bucket) {
+ MFREE(dhd->osh, ch_bucket,
+ (tot_num_buckets * sizeof(wl_pfn_gscan_channel_bucket_t)));
+ }
+ return err;
+
+}
+
+
+static void
+dhd_pno_merge_gscan_pno_channels(dhd_pno_status_info_t *pno_state,
+ uint16 *chan_list,
+ uint8 *ch_scratch_pad,
+ wl_pfn_gscan_channel_bucket_t *ch_bucket,
+ uint32 *num_buckets_to_fw,
+ int num_channels)
+{
+ uint16 chan_buf[WL_NUMCHANNELS];
+ int i, j = 0, ch_bucket_idx = 0;
+ dhd_pno_params_t *_params = &pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ dhd_pno_params_t *_params1 = &pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+ uint16 *legacy_chan_list = _params1->params_legacy.chan_list;
+ bool is_legacy_scan_freq_higher;
+ uint8 report_flag = CH_BUCKET_REPORT_REGULAR;
+
+ if (!_params1->params_legacy.scan_fr)
+ _params1->params_legacy.scan_fr = PNO_SCAN_MIN_FW_SEC;
+
+ is_legacy_scan_freq_higher =
+ _params->params_gscan.scan_fr < _params1->params_legacy.scan_fr;
+
+ /* Calculate new Legacy scan multiple of base scan_freq
+ * The legacy PNO channel bucket is added at the end of the
+ * channel bucket list.
+ */
+ if (is_legacy_scan_freq_higher) {
+ ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple =
+ _params1->params_legacy.scan_fr/_params->params_gscan.scan_fr;
+
+ } else {
+ uint16 max = 0;
+
+ /* Calculate new multiple of base scan_freq for gscan buckets */
+ ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple = 1;
+ for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) {
+ ch_bucket[i].bucket_freq_multiple *= _params->params_gscan.scan_fr;
+ ch_bucket[i].bucket_freq_multiple /= _params1->params_legacy.scan_fr;
+ if (max < ch_bucket[i].bucket_freq_multiple)
+ max = ch_bucket[i].bucket_freq_multiple;
+ }
+ _params->params_gscan.max_ch_bucket_freq = max;
+ }
+
+ /* Off to remove duplicates!!
+ * Find channels that are already being serviced by gscan before legacy bucket
+ * These have to be removed from legacy bucket.
+ * !!Assuming chan_list channels are validated list of channels!!
+ * ch_scratch_pad is 1 at gscan bucket locations see dhd_pno_gscan_create_channel_list()
+ */
+ for (i = 0; i < _params1->params_legacy.nchan; i++)
+ ch_scratch_pad[legacy_chan_list[i]] += 2;
+
+ ch_bucket_idx = 0;
+ memcpy(chan_buf, chan_list, num_channels * sizeof(uint16));
+
+ /* Finally create channel list and bucket
+ * At this point ch_scratch_pad can have 4 values:
+ * 0 - Channel not present in either Gscan or Legacy PNO bucket
+ * 1 - Channel present only in Gscan bucket
+ * 2 - Channel present only in Legacy PNO bucket
+ * 3 - Channel present in both Gscan and Legacy PNO buckets
+ * Thus Gscan buckets can have values 1 or 3 and Legacy 2 or 3
+ * For channel buckets with scan_freq < legacy accept all
+ * channels i.e. ch_scratch_pad = 1 and 3
+ * else accept only ch_scratch_pad = 1 and mark rejects as
+ * ch_scratch_pad = 4 so that they go in legacy
+ */
+ for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) {
+ if (ch_bucket[i].bucket_freq_multiple <=
+ ch_bucket[_params->params_gscan.nchannel_buckets].bucket_freq_multiple) {
+ for (; ch_bucket_idx <= ch_bucket[i].bucket_end_index; ch_bucket_idx++, j++)
+ chan_list[j] = chan_buf[ch_bucket_idx];
+
+ ch_bucket[i].bucket_end_index = j - 1;
+ } else {
+ num_channels = 0;
+ for (; ch_bucket_idx <= ch_bucket[i].bucket_end_index; ch_bucket_idx++) {
+ if (ch_scratch_pad[chan_buf[ch_bucket_idx]] == 1) {
+ chan_list[j] = chan_buf[ch_bucket_idx];
+ j++;
+ num_channels++;
+ } else {
+ ch_scratch_pad[chan_buf[ch_bucket_idx]] = 4;
+ /* If Gscan channel is merged off to legacy bucket and
+ * if the gscan channel bucket has a report flag > 0
+ * use the same for legacy
+ */
+ if (report_flag < ch_bucket[i].report_flag)
+ report_flag = ch_bucket[i].report_flag;
+ }
+ }
+
+ if (num_channels) {
+ ch_bucket[i].bucket_end_index = j - 1;
+ } else {
+ ch_bucket[i].bucket_end_index = CHANNEL_BUCKET_EMPTY_INDEX;
+ *num_buckets_to_fw = *num_buckets_to_fw - 1;
+ }
+ }
+
+ }
+
+ num_channels = 0;
+ ch_bucket[_params->params_gscan.nchannel_buckets].report_flag = report_flag;
+ /* Now add channels to the legacy scan bucket
+ * ch_scratch_pad = 0 to 4 at this point, for legacy -> 2,3,4. 2 means exclusively
+ * Legacy so add to bucket. 4 means it is a reject of gscan bucket and must
+ * be added to Legacy bucket,reject 3
+ */
+ for (i = 0; i < _params1->params_legacy.nchan; i++) {
+ if (ch_scratch_pad[legacy_chan_list[i]] != 3) {
+ chan_list[j] = legacy_chan_list[i];
+ j++;
+ num_channels++;
+ }
+ }
+ if (num_channels) {
+ ch_bucket[_params->params_gscan.nchannel_buckets].bucket_end_index = j - 1;
+ }
+ else {
+ ch_bucket[_params->params_gscan.nchannel_buckets].bucket_end_index =
+ CHANNEL_BUCKET_EMPTY_INDEX;
+ *num_buckets_to_fw = *num_buckets_to_fw - 1;
+ }
+
+ return;
+}
+static wl_pfn_gscan_channel_bucket_t *
+dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd,
+ dhd_pno_status_info_t *_pno_state,
+ uint16 *chan_list,
+ uint32 *num_buckets,
+ uint32 *num_buckets_to_fw)
+{
+ int i, num_channels, err, nchan = WL_NUMCHANNELS;
+ uint16 *ptr = chan_list, max;
+ uint8 *ch_scratch_pad;
+ wl_pfn_gscan_channel_bucket_t *ch_bucket;
+ dhd_pno_params_t *_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ bool is_pno_legacy_running = _pno_state->pno_mode & DHD_PNO_LEGACY_MODE;
+ dhd_pno_gscan_channel_bucket_t *gscan_buckets = _params->params_gscan.channel_bucket;
+
+ if (is_pno_legacy_running)
+ *num_buckets = _params->params_gscan.nchannel_buckets + 1;
+ else
+ *num_buckets = _params->params_gscan.nchannel_buckets;
+
+
+ *num_buckets_to_fw = *num_buckets;
+
+
+ ch_bucket = (wl_pfn_gscan_channel_bucket_t *) MALLOC(dhd->osh,
+ ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t)));
+
+ if (!ch_bucket) {
+ DHD_ERROR(("%s: failed to malloc memory of size %zd\n",
+ __FUNCTION__, (*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t)));
+ *num_buckets_to_fw = *num_buckets = 0;
+ return NULL;
+ }
+
+ max = gscan_buckets[0].bucket_freq_multiple;
+ num_channels = 0;
+ for (i = 0; i < _params->params_gscan.nchannel_buckets; i++) {
+ if (!gscan_buckets[i].band) {
+ num_channels += gscan_buckets[i].num_channels;
+ memcpy(ptr, gscan_buckets[i].chan_list,
+ gscan_buckets[i].num_channels * sizeof(uint16));
+ ptr = ptr + gscan_buckets[i].num_channels;
+ } else {
+ /* get a valid channel list based on band B or A */
+ err = _dhd_pno_get_channels(dhd, ptr,
+ &nchan, (gscan_buckets[i].band & GSCAN_ABG_BAND_MASK),
+ !(gscan_buckets[i].band & GSCAN_DFS_MASK));
+
+ if (err < 0) {
+ DHD_ERROR(("%s: failed to get valid channel list(band : %d)\n",
+ __FUNCTION__, gscan_buckets[i].band));
+ MFREE(dhd->osh, ch_bucket,
+ ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t)));
+ *num_buckets_to_fw = *num_buckets = 0;
+ return NULL;
+ }
+
+ num_channels += nchan;
+ ptr = ptr + nchan;
+ }
+
+ ch_bucket[i].bucket_end_index = num_channels - 1;
+ ch_bucket[i].bucket_freq_multiple = gscan_buckets[i].bucket_freq_multiple;
+ ch_bucket[i].report_flag = gscan_buckets[i].report_flag;
+ if (max < gscan_buckets[i].bucket_freq_multiple)
+ max = gscan_buckets[i].bucket_freq_multiple;
+ nchan = WL_NUMCHANNELS - num_channels;
+ DHD_PNO(("end_idx %d freq_mult - %d\n",
+ ch_bucket[i].bucket_end_index, ch_bucket[i].bucket_freq_multiple));
+ }
+
+ ch_scratch_pad = (uint8 *) kzalloc(CHANNEL_5G_MAX, GFP_KERNEL);
+ if (!ch_scratch_pad) {
+ DHD_ERROR(("%s: failed to malloc memory of size %d\n",
+ __FUNCTION__, CHANNEL_5G_MAX));
+ MFREE(dhd->osh, ch_bucket,
+ ((*num_buckets) * sizeof(wl_pfn_gscan_channel_bucket_t)));
+ *num_buckets_to_fw = *num_buckets = 0;
+ return NULL;
+ }
+
+ /* Need to look for duplicates in gscan buckets if the framework programmed
+ * the gscan buckets badly, for now return error if there are duplicates.
+ * Plus as an added bonus, we get all channels in Gscan bucket
+ * set to 1 for dhd_pno_merge_gscan_pno_channels()
+ */
+ for (i = 0; i < num_channels; i++) {
+ if (!ch_scratch_pad[chan_list[i]]) {
+ ch_scratch_pad[chan_list[i]] = 1;
+ } else {
+ DHD_ERROR(("%s: Duplicate channel - %d programmed in channel bucket\n",
+ __FUNCTION__, chan_list[i]));
+ MFREE(dhd->osh, ch_bucket, ((*num_buckets) *
+ sizeof(wl_pfn_gscan_channel_bucket_t)));
+ *num_buckets_to_fw = *num_buckets = 0;
+ kfree(ch_scratch_pad);
+ return NULL;
+ }
+ }
+ _params->params_gscan.max_ch_bucket_freq = max;
+ /* Legacy PNO maybe running, which means we need to create a legacy PNO bucket
+ * Plus need to remove duplicates as the legacy PNO chan_list may have common channels
+ * If channel is to be scanned more frequently as per gscan requirements
+ * remove from legacy PNO ch_bucket. Similarly, if legacy wants a channel scanned
+ * more often, it is removed from the Gscan channel bucket.
+ * In the end both are satisfied.
+ */
+ if (is_pno_legacy_running)
+ dhd_pno_merge_gscan_pno_channels(_pno_state, chan_list,
+ ch_scratch_pad, ch_bucket, num_buckets_to_fw, num_channels);
+
+ kfree(ch_scratch_pad);
+ return ch_bucket;
+}
+
+static int
+dhd_pno_stop_for_gscan(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ int mode;
+ wlc_ssid_ext_t *pssid_list = NULL;
+ dhd_pno_status_info_t *_pno_state;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ if (!dhd_support_sta_mode(dhd)) {
+ err = BCME_BADOPTION;
+ goto exit;
+ }
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n",
+ __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+ DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__));
+ goto exit;
+ }
+ mutex_lock(&_pno_state->pno_mutex);
+ mode = _pno_state->pno_mode & ~DHD_PNO_GSCAN_MODE;
+ err = dhd_pno_clean(dhd);
+ if (err < 0) {
+
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
+ __FUNCTION__, err));
+ mutex_unlock(&_pno_state->pno_mutex);
+ return err;
+ }
+ _pno_state->pno_mode = mode;
+ mutex_unlock(&_pno_state->pno_mutex);
+
+ /* Reprogram Legacy PNO if it was running */
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ struct dhd_pno_legacy_params *params_legacy;
+ uint16 chan_list[WL_NUMCHANNELS];
+
+ params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+ _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+ pssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state);
+ if (!pssid_list) {
+ err = BCME_NOMEM;
+ DHD_ERROR(("failed to get Legacy PNO SSID list\n"));
+ goto exit;
+ }
+
+ DHD_PNO(("Restarting Legacy PNO SSID scan...\n"));
+ memcpy(chan_list, params_legacy->chan_list,
+ (params_legacy->nchan * sizeof(uint16)));
+ err = dhd_pno_set_for_ssid(dhd, pssid_list, params_legacy->nssid,
+ params_legacy->scan_fr, params_legacy->pno_repeat,
+ params_legacy->pno_freq_expo_max, chan_list,
+ params_legacy->nchan);
+ if (err < 0) {
+ _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+ DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
+
+ }
+
+exit:
+ kfree(pssid_list);
+ return err;
+}
+
+int
+dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush)
+{
+ int err = BCME_OK;
+ dhd_pno_params_t *params;
+ dhd_pno_status_info_t *_pno_state;
+ struct dhd_pno_gscan_params *gscan_params;
+
+ NULL_CHECK(dhd, "dhd is NULL\n", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+
+ DHD_PNO(("%s enter - run %d flush %d\n", __FUNCTION__, run, flush));
+ params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ gscan_params = ¶ms->params_gscan;
+
+ if (run) {
+ err = dhd_pno_set_for_gscan(dhd, gscan_params);
+ } else {
+ if (flush) {
+ mutex_lock(&_pno_state->pno_mutex);
+ dhd_pno_reset_cfg_gscan(params, _pno_state, GSCAN_FLUSH_ALL_CFG);
+ mutex_unlock(&_pno_state->pno_mutex);
+ }
+ /* Need to stop all gscan */
+ err = dhd_pno_stop_for_gscan(dhd);
+ }
+
+ return err;
+}
+
+int
+dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag)
+{
+ int err = BCME_OK;
+ dhd_pno_params_t *params;
+ dhd_pno_status_info_t *_pno_state;
+ struct dhd_pno_gscan_params *gscan_params;
+ uint8 old_flag;
+
+ NULL_CHECK(dhd, "dhd is NULL\n", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ gscan_params = ¶ms->params_gscan;
+
+ mutex_lock(&_pno_state->pno_mutex);
+
+ old_flag = gscan_params->send_all_results_flag;
+ gscan_params->send_all_results_flag = (uint8) real_time_flag;
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ if (old_flag != gscan_params->send_all_results_flag) {
+ wl_pfn_gscan_cfg_t gscan_cfg;
+ gscan_cfg.flags = (gscan_params->send_all_results_flag &
+ GSCAN_SEND_ALL_RESULTS_MASK);
+ gscan_cfg.flags |= GSCAN_CFG_FLAGS_ONLY_MASK;
+
+ if ((err = _dhd_pno_gscan_cfg(dhd, &gscan_cfg,
+ sizeof(wl_pfn_gscan_cfg_t))) < 0) {
+ DHD_ERROR(("%s : pno_gscan_cfg failed (err %d) in firmware\n",
+ __FUNCTION__, err));
+ goto exit_mutex_unlock;
+ }
+ } else {
+ DHD_PNO(("No change in flag - %d\n", old_flag));
+ }
+ } else {
+ DHD_PNO(("Gscan not started\n"));
+ }
+exit_mutex_unlock:
+ mutex_unlock(&_pno_state->pno_mutex);
+exit:
+ return err;
+}
+
+int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ dhd_pno_params_t *params;
+ struct dhd_pno_gscan_params *gscan_params;
+ dhd_pno_status_info_t *_pno_state;
+ gscan_results_cache_t *iter, *tmp;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ gscan_params = ¶ms->params_gscan;
+ iter = gscan_params->gscan_batch_cache;
+
+ while (iter) {
+ if (iter->tot_consumed == iter->tot_count) {
+ tmp = iter->next;
+ kfree(iter);
+ iter = tmp;
+ } else
+ break;
+}
+ gscan_params->gscan_batch_cache = iter;
+ ret = (iter == NULL);
+ return ret;
+}
+
+static int
+_dhd_pno_get_gscan_batch_from_fw(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ uint32 timestamp = 0, ts = 0, i, j, timediff;
+ dhd_pno_params_t *params;
+ dhd_pno_status_info_t *_pno_state;
+ wl_pfn_lnet_info_t *plnetinfo;
+ struct dhd_pno_gscan_params *gscan_params;
+ wl_pfn_lscanresults_t *plbestnet = NULL;
+ gscan_results_cache_t *iter, *tail;
+ wifi_gscan_result_t *result;
+ uint8 *nAPs_per_scan = NULL;
+ uint8 num_scans_in_cur_iter;
+ uint16 count, scan_id = 0;
+
+ NULL_CHECK(dhd, "dhd is NULL\n", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit;
+ }
+
+ gscan_params = ¶ms->params_gscan;
+ nAPs_per_scan = (uint8 *) MALLOC(dhd->osh, gscan_params->mscan);
+
+ if (!nAPs_per_scan) {
+ DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__,
+ gscan_params->mscan));
+ err = BCME_NOMEM;
+ goto exit;
+ }
+
+ plbestnet = (wl_pfn_lscanresults_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
+
+ mutex_lock(&_pno_state->pno_mutex);
+ iter = gscan_params->gscan_batch_cache;
+ /* If a cache has not been consumed , just delete it */
+ while (iter) {
+ iter->tot_consumed = iter->tot_count;
+ iter = iter->next;
+ }
+ dhd_gscan_batch_cache_cleanup(dhd);
+
+ if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE)) {
+ DHD_ERROR(("%s : GSCAN is not enabled\n", __FUNCTION__));
+ goto exit_mutex_unlock;
+ }
+
+ timediff = gscan_params->scan_fr * 1000;
+ timediff = timediff >> 1;
+
+ /* Ok, now lets start getting results from the FW */
+ plbestnet->status = PFN_INCOMPLETE;
+ tail = gscan_params->gscan_batch_cache;
+ while (plbestnet->status != PFN_COMPLETE) {
+ memset(plbestnet, 0, PNO_BESTNET_LEN);
+ err = dhd_iovar(dhd, 0, "pfnlbest", (char *)plbestnet, PNO_BESTNET_LEN, 0);
+ if (err < 0) {
+ DHD_ERROR(("%s : Cannot get all the batch results, err :%d\n",
+ __FUNCTION__, err));
+ goto exit_mutex_unlock;
+ }
+ DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version,
+ plbestnet->status, plbestnet->count));
+ if (plbestnet->version != PFN_SCANRESULT_VERSION) {
+ err = BCME_VERSION;
+ DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n",
+ plbestnet->version, PFN_SCANRESULT_VERSION));
+ goto exit_mutex_unlock;
+ }
+
+ num_scans_in_cur_iter = 0;
+ timestamp = plbestnet->netinfo[0].timestamp;
+ /* find out how many scans' results did we get in this batch of FW results */
+ for (i = 0, count = 0; i < plbestnet->count; i++, count++) {
+ plnetinfo = &plbestnet->netinfo[i];
+ /* Unlikely to happen, but just in case the results from
+ * FW doesnt make sense..... Assume its part of one single scan
+ */
+ if (num_scans_in_cur_iter > gscan_params->mscan) {
+ num_scans_in_cur_iter = 0;
+ count = plbestnet->count;
+ break;
+ }
+ if (TIME_DIFF_MS(timestamp, plnetinfo->timestamp) > timediff) {
+ nAPs_per_scan[num_scans_in_cur_iter] = count;
+ count = 0;
+ num_scans_in_cur_iter++;
+ }
+ timestamp = plnetinfo->timestamp;
+ }
+ nAPs_per_scan[num_scans_in_cur_iter] = count;
+ num_scans_in_cur_iter++;
+
+ DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter));
+ plnetinfo = &plbestnet->netinfo[0];
+
+ for (i = 0; i < num_scans_in_cur_iter; i++) {
+ iter = (gscan_results_cache_t *)
+ kzalloc(((nAPs_per_scan[i] - 1) * sizeof(wifi_gscan_result_t)) +
+ sizeof(gscan_results_cache_t), GFP_KERNEL);
+ if (!iter) {
+ DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n",
+ __FUNCTION__, gscan_params->mscan));
+ err = BCME_NOMEM;
+ goto exit_mutex_unlock;
+ }
+ /* Need this check because the new set of results from FW
+ * maybe a continuation of previous sets' scan results
+ */
+ if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff) {
+ iter->scan_id = ++scan_id;
+ } else {
+ iter->scan_id = scan_id;
+ }
+ DHD_PNO(("scan_id %d tot_count %d\n", scan_id, nAPs_per_scan[i]));
+ iter->tot_count = nAPs_per_scan[i];
+ iter->tot_consumed = 0;
+
+ if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
+ DHD_PNO(("This scan is aborted\n"));
+ iter->flag = (ENABLE << PNO_STATUS_ABORT);
+ } else if (gscan_params->reason) {
+ iter->flag = (ENABLE << gscan_params->reason);
+ }
+
+ if (!tail) {
+ gscan_params->gscan_batch_cache = iter;
+ } else {
+ tail->next = iter;
+ }
+ tail = iter;
+ iter->next = NULL;
+ for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo++) {
+ result = &iter->results[j];
+
+ result->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel,
+ (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+ result->rssi = (int32) plnetinfo->RSSI;
+ /* Info not available & not expected */
+ result->beacon_period = 0;
+ result->capability = 0;
+ result->ie_length = 0;
+ result->rtt = (uint64) plnetinfo->rtt0;
+ result->rtt_sd = (uint64) plnetinfo->rtt1;
+ result->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp);
+ ts = plnetinfo->timestamp;
+ if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("%s: Invalid SSID length %d\n",
+ __FUNCTION__, plnetinfo->pfnsubnet.SSID_len));
+ plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+ }
+ memcpy(result->ssid, plnetinfo->pfnsubnet.SSID,
+ plnetinfo->pfnsubnet.SSID_len);
+ result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0';
+ memcpy(&result->macaddr, &plnetinfo->pfnsubnet.BSSID,
+ ETHER_ADDR_LEN);
+
+ DHD_PNO(("\tSSID : "));
+ DHD_PNO(("\n"));
+ DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ result->macaddr.octet[0],
+ result->macaddr.octet[1],
+ result->macaddr.octet[2],
+ result->macaddr.octet[3],
+ result->macaddr.octet[4],
+ result->macaddr.octet[5]));
+ DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+ plnetinfo->pfnsubnet.channel,
+ plnetinfo->RSSI, plnetinfo->timestamp));
+ DHD_PNO(("\tRTT0 : %d, RTT1: %d\n",
+ plnetinfo->rtt0, plnetinfo->rtt1));
+
+ }
+ }
+ }
+exit_mutex_unlock:
+ mutex_unlock(&_pno_state->pno_mutex);
+exit:
+ params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_COMPLETE;
+ smp_wmb();
+ wake_up_interruptible(&_pno_state->batch_get_wait);
+ if (nAPs_per_scan) {
+ MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan);
+ }
+ if (plbestnet) {
+ MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN);
+ }
+ DHD_PNO(("Batch retrieval done!\n"));
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+static int
+_dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
+{
+ int err = BCME_OK;
+ int i, j;
+ uint32 timestamp = 0;
+ dhd_pno_params_t *_params = NULL;
+ dhd_pno_status_info_t *_pno_state = NULL;
+ wl_pfn_lscanresults_t *plbestnet = NULL;
+ wl_pfn_lnet_info_t *plnetinfo;
+ dhd_pno_bestnet_entry_t *pbestnet_entry;
+ dhd_pno_best_header_t *pbestnetheader = NULL;
+ dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext;
+ bool allocate_header = FALSE;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+ if (!dhd_support_sta_mode(dhd)) {
+ err = BCME_BADOPTION;
+ goto exit_no_unlock;
+ }
+ DHD_PNO(("%s enter\n", __FUNCTION__));
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+
+ if (!WLS_SUPPORTED(_pno_state)) {
+ DHD_ERROR(("%s : wifi location service is not supported\n", __FUNCTION__));
+ err = BCME_UNSUPPORTED;
+ goto exit_no_unlock;
+ }
+#ifdef GSCAN_SUPPORT
+ if (!(_pno_state->pno_mode & (DHD_PNO_BATCH_MODE | DHD_PNO_GSCAN_MODE))) {
+#else
+ if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+#endif /* GSCAN_SUPPORT */
+ DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+ goto exit_no_unlock;
+ }
+ mutex_lock(&_pno_state->pno_mutex);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+ if (buf && bufsize) {
if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) {
/* need to check whether we have cashed data or not */
DHD_PNO(("%s: have cashed batching data in Driver\n",
pbestnet_entry->rtt0 = plnetinfo->rtt0;
pbestnet_entry->rtt1 = plnetinfo->rtt1;
pbestnet_entry->timestamp = plnetinfo->timestamp;
+ if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("%s: Invalid SSID length %d: trimming it to max\n",
+ __FUNCTION__, plnetinfo->pfnsubnet.SSID_len));
+ plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+ }
pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len;
memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID,
pbestnet_entry->SSID_len);
_params->params_batch.get_batch.bytes_written = err;
}
mutex_unlock(&_pno_state->pno_mutex);
+exit_no_unlock:
if (waitqueue_active(&_pno_state->get_batch_done.wait))
complete(&_pno_state->get_batch_done);
return err;
DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
return;
}
- params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
- _dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf,
- params_batch->get_batch.bufsize, params_batch->get_batch.reason);
+
+#ifdef GSCAN_SUPPORT
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ _dhd_pno_get_gscan_batch_from_fw(dhd);
+ return;
+ } else
+#endif /* GSCAN_SUPPORT */
+ {
+ params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+
+ _dhd_pno_get_for_batch(dhd, params_batch->get_batch.buf,
+ params_batch->get_batch.bufsize, params_batch->get_batch.reason);
+ }
}
goto exit;
}
params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
- if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
- DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
- memset(pbuf, 0, bufsize);
- pbuf += sprintf(pbuf, "scancount=%d\n", 0);
- sprintf(pbuf, "%s", RESULTS_END_MARKER);
- err = strlen(buf);
- goto exit;
+#ifdef GSCAN_SUPPORT
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ struct dhd_pno_gscan_params *gscan_params;
+ gscan_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan;
+ gscan_params->reason = reason;
+ err = dhd_retreive_batch_scan_results(dhd);
+ if (err == BCME_OK) {
+ wait_event_interruptible_timeout(_pno_state->batch_get_wait,
+ is_batch_retrieval_complete(gscan_params),
+ msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
+ }
+ } else
+#endif
+ {
+ if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
+ DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+ memset(pbuf, 0, bufsize);
+ pbuf += sprintf(pbuf, "scancount=%d\n", 0);
+ sprintf(pbuf, "%s", RESULTS_END_MARKER);
+ err = strlen(buf);
+ goto exit;
+ }
+ params_batch->get_batch.buf = buf;
+ params_batch->get_batch.bufsize = bufsize;
+ params_batch->get_batch.reason = reason;
+ params_batch->get_batch.bytes_written = 0;
+ schedule_work(&_pno_state->work);
+ wait_for_completion(&_pno_state->get_batch_done);
}
- params_batch->get_batch.buf = buf;
- params_batch->get_batch.bufsize = bufsize;
- params_batch->get_batch.reason = reason;
- params_batch->get_batch.bytes_written = 0;
- schedule_work(&_pno_state->work);
- wait_for_completion(&_pno_state->get_batch_done);
+
+#ifdef GSCAN_SUPPORT
+ if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE))
+#endif
err = params_batch->get_batch.bytes_written;
exit:
return err;
int i = 0;
dhd_pno_status_info_t *_pno_state;
dhd_pno_params_t *_params;
- wl_pfn_bssid_t *p_pfn_bssid;
- wlc_ssid_t *p_ssid_list = NULL;
+ wl_pfn_bssid_t *p_pfn_bssid = NULL;
+ wlc_ssid_ext_t *p_ssid_list = NULL;
NULL_CHECK(dhd, "dhd is NULL", err);
NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
_pno_state = PNO_GET_PNOSTATE(dhd);
err = BCME_UNSUPPORTED;
goto exit;
}
+
+#ifdef GSCAN_SUPPORT
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ DHD_PNO(("Gscan is ongoing, nothing to stop here\n"));
+ return err;
+ }
+#endif
+
if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__));
goto exit;
/* restart Legacy PNO if the Legacy PNO is on */
if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
struct dhd_pno_legacy_params *_params_legacy;
- struct dhd_pno_ssid *iter, *next;
_params_legacy =
&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
- p_ssid_list = kzalloc(sizeof(wlc_ssid_t) *
- _params_legacy->nssid, GFP_KERNEL);
- if (p_ssid_list == NULL) {
- DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)",
- __FUNCTION__, _params_legacy->nssid));
- err = BCME_ERROR;
- _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+ p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state);
+ if (!p_ssid_list) {
+ err = BCME_NOMEM;
+ DHD_ERROR(("failed to get Legacy PNO SSID list\n"));
goto exit;
}
- i = 0;
- /* convert dhd_pno_ssid to dhd_pno_ssid */
- list_for_each_entry_safe(iter, next, &_params_legacy->ssid_list, list) {
- p_ssid_list[i].SSID_len = iter->SSID_len;
- memcpy(p_ssid_list[i].SSID, iter->SSID, p_ssid_list[i].SSID_len);
- i++;
- }
err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid,
_params_legacy->scan_fr, _params_legacy->pno_repeat,
_params_legacy->pno_freq_expo_max, _params_legacy->chan_list,
exit:
_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
- if (p_ssid_list)
- kfree(p_ssid_list);
+ kfree(p_ssid_list);
+ kfree(p_pfn_bssid);
return err;
}
uint32 mode = 0;
dhd_pno_status_info_t *_pno_state;
dhd_pno_params_t *_params;
- wlc_ssid_t *p_ssid_list;
+ wlc_ssid_ext_t *p_ssid_list = NULL;
NULL_CHECK(dhd, "dhd is NULL", err);
NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
_pno_state = PNO_GET_PNOSTATE(dhd);
if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
/* restart Legacy PNO Scan */
struct dhd_pno_legacy_params *_params_legacy;
- struct dhd_pno_ssid *iter, *next;
_params_legacy =
&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
- p_ssid_list =
- kzalloc(sizeof(wlc_ssid_t) * _params_legacy->nssid, GFP_KERNEL);
- if (p_ssid_list == NULL) {
- DHD_ERROR(("%s : failed to allocate wlc_ssid_t array (count: %d)",
- __FUNCTION__, _params_legacy->nssid));
- err = BCME_ERROR;
- _pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
+ p_ssid_list = dhd_pno_get_legacy_pno_ssid(dhd, _pno_state);
+ if (!p_ssid_list) {
+ err = BCME_NOMEM;
+ DHD_ERROR(("failed to get Legacy PNO SSID list\n"));
goto exit;
}
- /* convert dhd_pno_ssid to dhd_pno_ssid */
- list_for_each_entry_safe(iter, next, &_params_legacy->ssid_list, list) {
- p_ssid_list->SSID_len = iter->SSID_len;
- memcpy(p_ssid_list->SSID, iter->SSID, p_ssid_list->SSID_len);
- p_ssid_list++;
- }
err = dhd_pno_set_for_ssid(dhd, p_ssid_list, _params_legacy->nssid,
_params_legacy->scan_fr, _params_legacy->pno_repeat,
_params_legacy->pno_freq_expo_max, _params_legacy->chan_list,
}
}
exit:
+ kfree(p_ssid_list);
+ return err;
+}
+
+#ifdef GSCAN_SUPPORT
+int
+dhd_retreive_batch_scan_results(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ struct dhd_pno_batch_params *params_batch;
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
+ if (_params->params_gscan.get_batch_flag == GSCAN_BATCH_RETRIEVAL_COMPLETE) {
+ DHD_PNO(("Retreive batch results\n"));
+ params_batch->get_batch.buf = NULL;
+ params_batch->get_batch.bufsize = 0;
+ params_batch->get_batch.reason = PNO_STATUS_EVENT;
+ _params->params_gscan.get_batch_flag = GSCAN_BATCH_RETRIEVAL_IN_PROGRESS;
+ schedule_work(&_pno_state->work);
+ } else {
+ DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING retrieval"
+ "already in progress, will skip\n", __FUNCTION__));
+ err = BCME_ERROR;
+ }
+
return err;
}
+/* Handle Significant WiFi Change (SWC) event from FW
+ * Send event to HAL when all results arrive from FW
+ */
+void *
+dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes)
+{
+ void *ptr = NULL;
+ dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+ struct dhd_pno_gscan_params *gscan_params;
+ struct dhd_pno_swc_evt_param *params;
+ wl_pfn_swc_results_t *results = (wl_pfn_swc_results_t *)event_data;
+ wl_pfn_significant_net_t *change_array;
+ int i;
+
+
+ gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+ params = &(gscan_params->param_significant);
+
+ if (!results->total_count) {
+ *send_evt_bytes = 0;
+ return ptr;
+ }
+
+ if (!params->results_rxed_so_far) {
+ if (!params->change_array) {
+ params->change_array = (wl_pfn_significant_net_t *)
+ kmalloc(sizeof(wl_pfn_significant_net_t) * results->total_count,
+ GFP_KERNEL);
+
+ if (!params->change_array) {
+ DHD_ERROR(("%s Cannot Malloc %zd bytes!!\n", __FUNCTION__,
+ sizeof(wl_pfn_significant_net_t) * results->total_count));
+ *send_evt_bytes = 0;
+ return ptr;
+ }
+ } else {
+ DHD_ERROR(("RX'ed WLC_E_PFN_SWC evt from FW, previous evt not complete!!"));
+ *send_evt_bytes = 0;
+ return ptr;
+ }
+
+ }
+
+ DHD_PNO(("%s: pkt_count %d total_count %d\n", __FUNCTION__,
+ results->pkt_count, results->total_count));
+
+ for (i = 0; i < results->pkt_count; i++) {
+ DHD_PNO(("\t %02x:%02x:%02x:%02x:%02x:%02x\n",
+ results->list[i].BSSID.octet[0],
+ results->list[i].BSSID.octet[1],
+ results->list[i].BSSID.octet[2],
+ results->list[i].BSSID.octet[3],
+ results->list[i].BSSID.octet[4],
+ results->list[i].BSSID.octet[5]));
+ }
+
+ change_array = ¶ms->change_array[params->results_rxed_so_far];
+ memcpy(change_array, results->list, sizeof(wl_pfn_significant_net_t) * results->pkt_count);
+ params->results_rxed_so_far += results->pkt_count;
+
+ if (params->results_rxed_so_far == results->total_count) {
+ params->results_rxed_so_far = 0;
+ *send_evt_bytes = sizeof(wl_pfn_significant_net_t) * results->total_count;
+ /* Pack up change buffer to send up and reset
+ * results_rxed_so_far, after its done.
+ */
+ ptr = (void *) params->change_array;
+ /* expecting the callee to free this mem chunk */
+ params->change_array = NULL;
+ }
+ else {
+ *send_evt_bytes = 0;
+ }
+
+ return ptr;
+}
+
+void
+dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type)
+{
+ dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+ struct dhd_pno_gscan_params *gscan_params;
+ gscan_results_cache_t *iter, *tmp;
+
+ if (!_pno_state) {
+ return;
+ }
+ gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+ if (type == HOTLIST_FOUND) {
+ iter = gscan_params->gscan_hotlist_found;
+ gscan_params->gscan_hotlist_found = NULL;
+ } else {
+ iter = gscan_params->gscan_hotlist_lost;
+ gscan_params->gscan_hotlist_lost = NULL;
+ }
+
+ while (iter) {
+ tmp = iter->next;
+ kfree(iter);
+ iter = tmp;
+ }
+
+ return;
+}
+
+void *
+dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *data, int *size)
+{
+ wl_bss_info_t *bi = NULL;
+ wl_gscan_result_t *gscan_result;
+ wifi_gscan_result_t *result = NULL;
+ u32 bi_length = 0;
+ uint8 channel;
+ uint32 mem_needed;
+
+ struct timespec ts;
+
+ *size = 0;
+
+ gscan_result = (wl_gscan_result_t *)data;
+
+ if (!gscan_result) {
+ DHD_ERROR(("Invalid gscan result (NULL pointer)\n"));
+ goto exit;
+ }
+ if (!gscan_result->bss_info) {
+ DHD_ERROR(("Invalid gscan bss info (NULL pointer)\n"));
+ goto exit;
+ }
+ bi = &gscan_result->bss_info[0].info;
+ bi_length = dtoh32(bi->length);
+ if (bi_length != (dtoh32(gscan_result->buflen) -
+ WL_GSCAN_RESULTS_FIXED_SIZE - WL_GSCAN_INFO_FIXED_FIELD_SIZE)) {
+ DHD_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length));
+ goto exit;
+ }
+ if (bi->SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("Invalid SSID length %d: trimming it to max\n", bi->SSID_len));
+ bi->SSID_len = DOT11_MAX_SSID_LEN;
+ }
+
+ mem_needed = OFFSETOF(wifi_gscan_result_t, ie_data) + bi->ie_length;
+ result = kmalloc(mem_needed, GFP_KERNEL);
+
+ if (!result) {
+ DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n",
+ __FUNCTION__, mem_needed));
+ goto exit;
+ }
+
+ memcpy(result->ssid, bi->SSID, bi->SSID_len);
+ result->ssid[bi->SSID_len] = '\0';
+ channel = wf_chspec_ctlchan(bi->chanspec);
+ result->channel = wf_channel2mhz(channel,
+ (channel <= CH_MAX_2G_CHANNEL?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+ result->rssi = (int32) bi->RSSI;
+ result->rtt = 0;
+ result->rtt_sd = 0;
+ get_monotonic_boottime(&ts);
+ result->ts = (uint64) TIMESPEC_TO_US(ts);
+ result->beacon_period = dtoh16(bi->beacon_period);
+ result->capability = dtoh16(bi->capability);
+ result->ie_length = dtoh32(bi->ie_length);
+ memcpy(&result->macaddr, &bi->BSSID, ETHER_ADDR_LEN);
+ memcpy(result->ie_data, ((uint8 *)bi + bi->ie_offset), bi->ie_length);
+ *size = mem_needed;
+exit:
+ return result;
+}
+
+void *
+dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data,
+ int *send_evt_bytes, hotlist_type_t type)
+{
+ void *ptr = NULL;
+ dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+ struct dhd_pno_gscan_params *gscan_params;
+ wl_pfn_scanresults_t *results = (wl_pfn_scanresults_t *)event_data;
+ wifi_gscan_result_t *hotlist_found_array;
+ wl_pfn_net_info_t *plnetinfo;
+ gscan_results_cache_t *gscan_hotlist_cache;
+ int malloc_size = 0, i, total = 0;
+
+ gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
+
+ if (!results->count) {
+ *send_evt_bytes = 0;
+ return ptr;
+ }
+
+ malloc_size = sizeof(gscan_results_cache_t) +
+ ((results->count - 1) * sizeof(wifi_gscan_result_t));
+ gscan_hotlist_cache = (gscan_results_cache_t *) kmalloc(malloc_size, GFP_KERNEL);
+
+ if (!gscan_hotlist_cache) {
+ DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size));
+ *send_evt_bytes = 0;
+ return ptr;
+ }
+
+ if (type == HOTLIST_FOUND) {
+ gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found;
+ gscan_params->gscan_hotlist_found = gscan_hotlist_cache;
+ DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, results->count));
+ } else {
+ gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost;
+ gscan_params->gscan_hotlist_lost = gscan_hotlist_cache;
+ DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, results->count));
+ }
+
+ gscan_hotlist_cache->tot_count = results->count;
+ gscan_hotlist_cache->tot_consumed = 0;
+ plnetinfo = results->netinfo;
+
+ for (i = 0; i < results->count; i++, plnetinfo++) {
+ hotlist_found_array = &gscan_hotlist_cache->results[i];
+ hotlist_found_array->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel,
+ (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+ hotlist_found_array->rssi = (int32) plnetinfo->RSSI;
+ /* Info not available & not expected */
+ hotlist_found_array->beacon_period = 0;
+ hotlist_found_array->capability = 0;
+ hotlist_found_array->ie_length = 0;
+
+ hotlist_found_array->ts = convert_fw_rel_time_to_systime(plnetinfo->timestamp);
+ if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("Invalid SSID length %d: trimming it to max\n",
+ plnetinfo->pfnsubnet.SSID_len));
+ plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+ }
+ memcpy(hotlist_found_array->ssid, plnetinfo->pfnsubnet.SSID,
+ plnetinfo->pfnsubnet.SSID_len);
+ hotlist_found_array->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0';
+
+ memcpy(&hotlist_found_array->macaddr, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+ DHD_PNO(("\t%s %02x:%02x:%02x:%02x:%02x:%02x rssi %d\n", hotlist_found_array->ssid,
+ hotlist_found_array->macaddr.octet[0],
+ hotlist_found_array->macaddr.octet[1],
+ hotlist_found_array->macaddr.octet[2],
+ hotlist_found_array->macaddr.octet[3],
+ hotlist_found_array->macaddr.octet[4],
+ hotlist_found_array->macaddr.octet[5],
+ hotlist_found_array->rssi));
+ }
+
+
+ if (results->status == PFN_COMPLETE) {
+ ptr = (void *) gscan_hotlist_cache;
+ while (gscan_hotlist_cache) {
+ total += gscan_hotlist_cache->tot_count;
+ gscan_hotlist_cache = gscan_hotlist_cache->next;
+ }
+ *send_evt_bytes = total * sizeof(wifi_gscan_result_t);
+ }
+
+ return ptr;
+}
+#endif /* GSCAN_SUPPORT */
int
dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
{
/* TODO : need to implement event logic using generic netlink */
break;
case WLC_E_PFN_BEST_BATCHING:
+#ifndef GSCAN_SUPPORT
{
struct dhd_pno_batch_params *params_batch;
params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
"will skip this event\n", __FUNCTION__));
break;
}
+#else
+ break;
+#endif /* !GSCAN_SUPPORT */
default:
DHD_ERROR(("unknown event : %d\n", event_type));
}
mutex_init(&_pno_state->pno_mutex);
INIT_WORK(&_pno_state->work, _dhd_pno_get_batch_handler);
init_completion(&_pno_state->get_batch_done);
+#ifdef GSCAN_SUPPORT
+ init_waitqueue_head(&_pno_state->batch_get_wait);
+#endif /* GSCAN_SUPPORT */
err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, 0);
if (err == BCME_UNSUPPORTED) {
_pno_state->wls_supported = FALSE;
DHD_INFO(("Current firmware doesn't support"
" Android Location Service\n"));
+ } else {
+ DHD_ERROR(("%s: Support Android Location Service\n",
+ __FUNCTION__));
}
exit:
return err;
}
+
int dhd_pno_deinit(dhd_pub_t *dhd)
{
int err = BCME_OK;
_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
}
+#ifdef GSCAN_SUPPORT
+ if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+ mutex_lock(&_pno_state->pno_mutex);
+ dhd_pno_reset_cfg_gscan(_params, _pno_state, GSCAN_FLUSH_ALL_CFG);
+ mutex_unlock(&_pno_state->pno_mutex);
+ }
+#endif /* GSCAN_SUPPORT */
+
if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
/* clear resource if the BATCH MODE is on */
/*
* Header file of Broadcom Dongle Host Driver (DHD)
* Prefered Network Offload code and Wi-Fi Location Service(WLS) code.
- * $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_pno.h 423669 2013-09-18 13:01:55Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_pno.h 591285 2015-10-07 11:56:29Z $
*/
#ifndef __DHD_PNO_H__
#define PNO_TLV_VERSION '1'
#define PNO_TLV_SUBTYPE_LEGACY_PNO '2'
#define PNO_TLV_RESERVED '0'
-
#define PNO_BATCHING_SET "SET"
#define PNO_BATCHING_GET "GET"
#define PNO_BATCHING_STOP "STOP"
-
#define PNO_PARAMS_DELIMETER " "
#define PNO_PARAM_CHANNEL_DELIMETER ","
#define PNO_PARAM_VALUE_DELLIMETER '='
#define RESULTS_END_MARKER "----\n"
#define SCAN_END_MARKER "####\n"
#define AP_END_MARKER "====\n"
+#define PNO_RSSI_MARGIN_DBM 30
+
+#ifdef GSCAN_SUPPORT
+
+#define GSCAN_MAX_CH_BUCKETS 8
+#define GSCAN_BG_BAND_MASK (1 << 0)
+#define GSCAN_A_BAND_MASK (1 << 1)
+#define GSCAN_DFS_MASK (1 << 2)
+#define GSCAN_ABG_BAND_MASK (GSCAN_A_BAND_MASK | GSCAN_BG_BAND_MASK)
+#define GSCAN_BAND_MASK (GSCAN_ABG_BAND_MASK | GSCAN_DFS_MASK)
+
+#define GSCAN_FLUSH_HOTLIST_CFG (1 << 0)
+#define GSCAN_FLUSH_SIGNIFICANT_CFG (1 << 1)
+#define GSCAN_FLUSH_SCAN_CFG (1 << 2)
+#define GSCAN_FLUSH_ALL_CFG (GSCAN_FLUSH_SCAN_CFG | \
+ GSCAN_FLUSH_SIGNIFICANT_CFG | \
+ GSCAN_FLUSH_HOTLIST_CFG)
+/* Do not change GSCAN_BATCH_RETRIEVAL_COMPLETE */
+#define GSCAN_BATCH_RETRIEVAL_COMPLETE 0
+#define GSCAN_BATCH_RETRIEVAL_IN_PROGRESS 1
+#define GSCAN_BATCH_NO_THR_SET 101
+#define GSCAN_LOST_AP_WINDOW_DEFAULT 4
+#define GSCAN_MIN_BSSID_TIMEOUT 90
+#define GSCAN_BATCH_GET_MAX_WAIT 500
+#define CHANNEL_BUCKET_EMPTY_INDEX 0xFFFF
+#define GSCAN_RETRY_THRESHOLD 3
+#endif /* GSCAN_SUPPORT */
enum scan_status {
/* SCAN ABORT by other scan */
INDEX_OF_LEGACY_PARAMS,
INDEX_OF_BATCH_PARAMS,
INDEX_OF_HOTLIST_PARAMS,
+ /* GSCAN includes hotlist scan and they do not run
+ * independent of each other
+ */
+#ifdef GSCAN_SUPPORT
+ INDEX_OF_GSCAN_PARAMS = INDEX_OF_HOTLIST_PARAMS,
+#endif /* GSCAN_SUPPORT */
INDEX_MODE_MAX
};
enum dhd_pno_status {
char subtype;
char reserved;
} cmd_tlv_t;
+#ifdef GSCAN_SUPPORT
+typedef enum {
+ WIFI_BAND_UNSPECIFIED,
+ WIFI_BAND_BG = 1, /* 2.4 GHz */
+ WIFI_BAND_A = 2, /* 5 GHz without DFS */
+ WIFI_BAND_A_DFS = 4, /* 5 GHz DFS only */
+ WIFI_BAND_A_WITH_DFS = 6, /* 5 GHz with DFS */
+ WIFI_BAND_ABG = 3, /* 2.4 GHz + 5 GHz; no DFS */
+ WIFI_BAND_ABG_WITH_DFS = 7, /* 2.4 GHz + 5 GHz with DFS */
+} gscan_wifi_band_t;
+
+typedef enum {
+ HOTLIST_LOST,
+ HOTLIST_FOUND
+} hotlist_type_t;
+
+typedef enum dhd_pno_gscan_cmd_cfg {
+ DHD_PNO_BATCH_SCAN_CFG_ID,
+ DHD_PNO_GEOFENCE_SCAN_CFG_ID,
+ DHD_PNO_SIGNIFICANT_SCAN_CFG_ID,
+ DHD_PNO_SCAN_CFG_ID,
+ DHD_PNO_GET_CAPABILITIES,
+ DHD_PNO_GET_BATCH_RESULTS,
+ DHD_PNO_GET_CHANNEL_LIST
+} dhd_pno_gscan_cmd_cfg_t;
+
typedef enum dhd_pno_mode {
/* Wi-Fi Legacy PNO Mode */
- DHD_PNO_NONE_MODE = 0,
+ DHD_PNO_NONE_MODE = 0,
+ DHD_PNO_LEGACY_MODE = (1 << (0)),
+ /* Wi-Fi Android BATCH SCAN Mode */
+ DHD_PNO_BATCH_MODE = (1 << (1)),
+ /* Wi-Fi Android Hotlist SCAN Mode */
+ DHD_PNO_HOTLIST_MODE = (1 << (2)),
+ /* Wi-Fi Google Android SCAN Mode */
+ DHD_PNO_GSCAN_MODE = (1 << (3))
+} dhd_pno_mode_t;
+#else
+typedef enum dhd_pno_mode {
+ /* Wi-Fi Legacy PNO Mode */
+ DHD_PNO_NONE_MODE = 0,
DHD_PNO_LEGACY_MODE = (1 << (0)),
/* Wi-Fi Android BATCH SCAN Mode */
DHD_PNO_BATCH_MODE = (1 << (1)),
/* Wi-Fi Android Hotlist SCAN Mode */
DHD_PNO_HOTLIST_MODE = (1 << (2))
} dhd_pno_mode_t;
+#endif /* GSCAN_SUPPORT */
struct dhd_pno_ssid {
+ bool hidden;
uint32 SSID_len;
uchar SSID[DOT11_MAX_SSID_LEN];
struct list_head list;
uint16 nbssid;
struct list_head bssid_list;
};
+#ifdef GSCAN_SUPPORT
+typedef struct dhd_pno_gscan_channel_bucket {
+ uint16 bucket_freq_multiple;
+ /* band = 1 All bg band channels,
+ * band = 2 All a band channels,
+ * band = 0 chan_list channels
+ */
+ uint16 band;
+ uint8 report_flag;
+ uint8 num_channels;
+ uint16 chan_list[GSCAN_MAX_CH_BUCKETS];
+} dhd_pno_gscan_channel_bucket_t;
+
+typedef struct dhd_pno_swc_evt_param {
+ uint16 results_rxed_so_far;
+ wl_pfn_significant_net_t *change_array;
+} dhd_pno_swc_evt_param_t;
+
+typedef struct wifi_gscan_result {
+ uint64 ts; /* Time of discovery */
+ char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */
+ struct ether_addr macaddr; /* BSSID */
+ uint32 channel; /* channel frequency in MHz */
+ int32 rssi; /* in db */
+ uint64 rtt; /* in nanoseconds */
+ uint64 rtt_sd; /* standard deviation in rtt */
+ uint16 beacon_period; /* units are Kusec */
+ uint16 capability; /* Capability information */
+ uint32 ie_length; /* byte length of Information Elements */
+ char ie_data[1]; /* IE data to follow */
+} wifi_gscan_result_t;
+
+typedef struct gscan_results_cache {
+ struct gscan_results_cache *next;
+ uint8 scan_id;
+ uint8 flag;
+ uint8 tot_count;
+ uint8 tot_consumed;
+ wifi_gscan_result_t results[1];
+} gscan_results_cache_t;
+
+typedef struct dhd_pno_gscan_capabilities {
+ int max_scan_cache_size;
+ int max_scan_buckets;
+ int max_ap_cache_per_scan;
+ int max_rssi_sample_size;
+ int max_scan_reporting_threshold;
+ int max_hotlist_aps;
+ int max_significant_wifi_change_aps;
+} dhd_pno_gscan_capabilities_t;
+
+struct dhd_pno_gscan_params {
+ int32 scan_fr;
+ uint8 bestn;
+ uint8 mscan;
+ uint8 buffer_threshold;
+ uint8 swc_nbssid_threshold;
+ uint8 swc_rssi_window_size;
+ uint8 lost_ap_window;
+ uint8 nchannel_buckets;
+ uint8 reason;
+ uint8 get_batch_flag;
+ uint8 send_all_results_flag;
+ uint16 max_ch_bucket_freq;
+ gscan_results_cache_t *gscan_batch_cache;
+ gscan_results_cache_t *gscan_hotlist_found;
+ gscan_results_cache_t *gscan_hotlist_lost;
+ uint16 nbssid_significant_change;
+ uint16 nbssid_hotlist;
+ struct dhd_pno_swc_evt_param param_significant;
+ struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS];
+ struct list_head hotlist_bssid_list;
+ struct list_head significant_bssid_list;
+};
+
+typedef struct gscan_scan_params {
+ int32 scan_fr;
+ uint16 nchannel_buckets;
+ struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS];
+} gscan_scan_params_t;
+
+typedef struct gscan_batch_params {
+ uint8 bestn;
+ uint8 mscan;
+ uint8 buffer_threshold;
+} gscan_batch_params_t;
+
+struct bssid_t {
+ struct ether_addr macaddr;
+ int16 rssi_reporting_threshold; /* 0 -> no reporting threshold */
+};
+
+typedef struct gscan_hotlist_scan_params {
+ uint16 lost_ap_window; /* number of scans to declare LOST */
+ uint16 nbssid; /* number of bssids */
+ struct bssid_t bssid[1]; /* n bssids to follow */
+} gscan_hotlist_scan_params_t;
+
+/* SWC (Significant WiFi Change) params */
+typedef struct gscan_swc_params {
+ /* Rssi averaging window size */
+ uint8 rssi_window;
+ /* Number of scans that the AP has to be absent before
+ * being declared LOST
+ */
+ uint8 lost_ap_window;
+ /* if x Aps have a significant change generate an event. */
+ uint8 swc_threshold;
+ uint8 nbssid;
+ wl_pfn_significant_bssid_t bssid_elem_list[1];
+} gscan_swc_params_t;
+
+typedef struct dhd_pno_significant_bssid {
+ struct ether_addr BSSID;
+ int8 rssi_low_threshold;
+ int8 rssi_high_threshold;
+ struct list_head list;
+} dhd_pno_significant_bssid_t;
+#endif /* GSCAN_SUPPORT */
typedef union dhd_pno_params {
struct dhd_pno_legacy_params params_legacy;
struct dhd_pno_batch_params params_batch;
struct dhd_pno_hotlist_params params_hotlist;
+#ifdef GSCAN_SUPPORT
+ struct dhd_pno_gscan_params params_gscan;
+#endif /* GSCAN_SUPPORT */
} dhd_pno_params_t;
typedef struct dhd_pno_status_info {
+ uint8 pno_oui[DOT11_OUI_LEN];
dhd_pub_t *dhd;
struct work_struct work;
struct mutex pno_mutex;
+#ifdef GSCAN_SUPPORT
+ wait_queue_head_t batch_get_wait;
+#endif /* GSCAN_SUPPORT */
struct completion get_batch_done;
bool wls_supported; /* wifi location service supported or not */
enum dhd_pno_status pno_status;
dhd_dev_pno_stop_for_ssid(struct net_device *dev);
extern int
-dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
extern int
extern int
dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
struct dhd_pno_hotlist_params *hotlist_params);
-
+extern int dhd_dev_pno_set_mac_oui(struct net_device *dev, uint8 *oui);
+#ifdef GSCAN_SUPPORT
+extern int
+dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, uint8 flush);
+extern void *
+dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type, void *info,
+ uint32 *len);
+void dhd_dev_pno_lock_access_batch_results(struct net_device *dev);
+void dhd_dev_pno_unlock_access_batch_results(struct net_device *dev);
+extern int dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush);
+extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time);
+extern void * dhd_dev_swc_scan_event(struct net_device *dev, const void *data,
+ int *send_evt_bytes);
+int dhd_retreive_batch_scan_results(dhd_pub_t *dhd);
+extern void * dhd_dev_hotlist_scan_event(struct net_device *dev,
+ const void *data, int *send_evt_bytes, hotlist_type_t type);
+void * dhd_dev_process_full_gscan_result(struct net_device *dev,
+ const void *data, int *send_evt_bytes);
+extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev);
+extern void dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type);
+extern void dhd_dev_wait_batch_results_complete(struct net_device *dev);
+#endif /* GSCAN_SUPPORT */
/* dhd pno fuctions */
extern int dhd_pno_stop_for_ssid(dhd_pub_t *dhd);
extern int dhd_pno_enable(dhd_pub_t *dhd, int enable);
-extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_t* ssid_list, int nssid,
+extern int dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid,
uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan);
extern int dhd_pno_set_for_batch(dhd_pub_t *dhd, struct dhd_pno_batch_params *batch_params);
extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
extern int dhd_pno_init(dhd_pub_t *dhd);
extern int dhd_pno_deinit(dhd_pub_t *dhd);
+extern bool dhd_is_pno_supported(dhd_pub_t *dhd);
+extern int dhd_pno_set_mac_oui(dhd_pub_t *dhd, uint8 *oui);
+#ifdef GSCAN_SUPPORT
+extern int dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, uint8 flush);
+extern void * dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *info,
+ uint32 *len);
+extern void dhd_pno_lock_batch_results(dhd_pub_t *dhd);
+extern void dhd_pno_unlock_batch_results(dhd_pub_t *dhd);
+extern int dhd_pno_initiate_gscan_request(dhd_pub_t *dhd, bool run, bool flush);
+extern int dhd_pno_enable_full_scan_result(dhd_pub_t *dhd, bool real_time_flag);
+extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf);
+extern int dhd_dev_retrieve_batch_scan(struct net_device *dev);
+extern void *dhd_handle_swc_evt(dhd_pub_t *dhd, const void *event_data, int *send_evt_bytes);
+extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data,
+ int *send_evt_bytes, hotlist_type_t type);
+extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data,
+ int *send_evt_bytes);
+extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd);
+extern void dhd_gscan_hotlist_cache_cleanup(dhd_pub_t *dhd, hotlist_type_t type);
+extern void dhd_wait_batch_results_complete(dhd_pub_t *dhd);
+#endif /* GSCAN_SUPPORT */
#endif
-#if defined(NDISVER)
-#if defined(PNO_SUPPORT)
-#if (NDISVER >= 0x0630)
-extern int dhd_pno_cfg(dhd_pub_t *dhd, wl_pfn_cfg_t *pcfg);
-extern int dhd_pno_suspend(dhd_pub_t *dhd, int pfn_suspend);
-extern int dhd_pno_set_add(dhd_pub_t *dhd, wl_pfn_t *netinfo, int nssid, ushort scan_fr,
- ushort slowscan_fr, uint8 pno_repeat, uint8 pno_freq_expo_max, int16 flags);
-extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
-extern int dhd_pno_clean(dhd_pub_t *dhd);
-#endif /* #if (NDISVER >= 0x0630) */
-#endif /* #if defined(PNO_SUPPORT) */
-#endif /* #if defined(NDISVER) */
#endif /* __DHD_PNO_H__ */
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_proto.h 499674 2014-08-29 21:56:23Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_proto.h 604483 2015-12-07 14:47:36Z $
*/
#ifndef _dhd_proto_h_
#define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */
#endif /* MFG_IOCTL_RESP_TIMEOUT */
+#define DEFAULT_D3_ACK_RESP_TIMEOUT 4000
+#ifndef D3_ACK_RESP_TIMEOUT
+#define D3_ACK_RESP_TIMEOUT DEFAULT_D3_ACK_RESP_TIMEOUT
+#endif /* D3_ACK_RESP_TIMEOUT */
+
+#define DEFAULT_DHD_BUS_BUSY_TIMEOUT (IOCTL_RESP_TIMEOUT + 1000)
+#ifndef DHD_BUS_BUSY_TIMEOUT
+#define DHD_BUS_BUSY_TIMEOUT DEFAULT_DHD_BUS_BUSY_TIMEOUT
+#endif /* DEFAULT_DHD_BUS_BUSY_TIMEOUT */
+
+#define IOCTL_DISABLE_TIMEOUT 0
/*
* Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
*/
extern int dhd_prot_attach(dhd_pub_t *dhdp);
/* Initilizes the index block for dma'ing indices */
-extern int dhd_prot_init_index_dma_block(dhd_pub_t *dhdp, uint8 type, uint32 length);
+extern int dhd_prot_dma_indx_init(dhd_pub_t *dhdp, uint32 rw_index_sz,
+ uint8 type, uint32 length);
/* Unlink, frees allocated protocol memory (including dhd_prot) */
extern void dhd_prot_detach(dhd_pub_t *dhdp);
extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx);
extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay);
+extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf,
+ void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma);
+extern void dhd_prot_flowrings_pool_release(dhd_pub_t *dhd,
+ uint16 flowid, void *msgbuf_ring);
extern int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
-extern void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info);
extern int dhd_post_tx_ring_item(dhd_pub_t *dhd, void *PKTBUF, uint8 ifindex);
extern int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
extern int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
extern int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b);
+extern uint32 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val);
+extern uint32 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd);
extern uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx);
extern uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx);
extern void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
- struct bcmstrbuf *strbuf);
+ struct bcmstrbuf *strbuf, const char * fmt);
extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf);
extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info);
extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id, bool in_lock);
extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val);
-extern void dhd_prot_clear(dhd_pub_t *dhd);
-
+extern void dhd_prot_reset(dhd_pub_t *dhd);
+#ifdef DHD_LB
+extern void dhd_lb_tx_compl_handler(unsigned long data);
+extern void dhd_lb_rx_compl_handler(unsigned long data);
+extern void dhd_lb_rx_process_handler(unsigned long data);
+#endif /* DHD_LB */
+void dhd_prot_collect_memdump(dhd_pub_t *dhd);
#endif /* BCMPCIE */
-
/********************************
* For version-string expansion *
*/
--- /dev/null
+/*
+ * Broadcom Dongle Host Driver (DHD), RTT
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_rtt.c 606280 2015-12-15 05:28:25Z $
+ */
+#ifdef RTT_SUPPORT
+#include <typedefs.h>
+#include <osl.h>
+
+#include <epivers.h>
+#include <bcmutils.h>
+
+#include <bcmendian.h>
+#include <linuxver.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <dngl_stats.h>
+#include <wlioctl.h>
+
+#include <proto/bcmevent.h>
+#include <dhd.h>
+#include <dhd_rtt.h>
+#include <dhd_dbg.h>
+#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state)
+static DEFINE_SPINLOCK(noti_list_lock);
+#define NULL_CHECK(p, s, err) \
+ do { \
+ if (!(p)) { \
+ printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
+ err = BCME_ERROR; \
+ return err; \
+ } \
+ } while (0)
+
+#define RTT_TWO_SIDED(capability) \
+ do { \
+ if ((capability & RTT_CAP_ONE_WAY) == (uint8) (RTT_CAP_ONE_WAY)) \
+ return FALSE; \
+ else \
+ return TRUE; \
+ } while (0)
+#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
+ (ts).tv_nsec / NSEC_PER_USEC)
+struct rtt_noti_callback {
+ struct list_head list;
+ void *ctx;
+ dhd_rtt_compl_noti_fn noti_fn;
+};
+
+typedef struct rtt_status_info {
+ dhd_pub_t *dhd;
+ int8 status; /* current status for the current entry */
+ int8 cur_idx; /* current entry to do RTT */
+ int32 capability; /* rtt capability */
+ struct mutex rtt_mutex;
+ rtt_config_params_t rtt_config;
+ struct work_struct work;
+ struct list_head noti_fn_list;
+ struct list_head rtt_results_cache; /* store results for RTT */
+} rtt_status_info_t;
+
+static int dhd_rtt_start(dhd_pub_t *dhd);
+
+chanspec_t
+dhd_rtt_convert_to_chspec(wifi_channel_info_t channel)
+{
+ int bw;
+ /* set witdh to 20MHZ for 2.4G HZ */
+ if (channel.center_freq >= 2400 && channel.center_freq <= 2500) {
+ channel.width = WIFI_CHAN_WIDTH_20;
+ }
+ switch (channel.width) {
+ case WIFI_CHAN_WIDTH_20:
+ bw = WL_CHANSPEC_BW_20;
+ break;
+ case WIFI_CHAN_WIDTH_40:
+ bw = WL_CHANSPEC_BW_40;
+ break;
+ case WIFI_CHAN_WIDTH_80:
+ bw = WL_CHANSPEC_BW_80;
+ break;
+ case WIFI_CHAN_WIDTH_160:
+ bw = WL_CHANSPEC_BW_160;
+ break;
+ default:
+ DHD_ERROR(("doesn't support this bandwith : %d", channel.width));
+ bw = -1;
+ break;
+ }
+ return wf_channel2chspec(wf_mhz2channel(channel.center_freq, 0), bw);
+}
+
+int
+dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params)
+{
+ int err = BCME_OK;
+ int idx;
+ rtt_status_info_t *rtt_status;
+ NULL_CHECK(params, "params is NULL", err);
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ if (rtt_status->capability == RTT_CAP_NONE) {
+ DHD_ERROR(("doesn't support RTT \n"));
+ return BCME_ERROR;
+ }
+ if (rtt_status->status == RTT_STARTED) {
+ DHD_ERROR(("rtt is already started\n"));
+ return BCME_BUSY;
+ }
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+ bcopy(params, &rtt_status->rtt_config, sizeof(rtt_config_params_t));
+ rtt_status->status = RTT_STARTED;
+ /* start to measure RTT from 1th device */
+ /* find next target to trigger RTT */
+ for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+ /* skip the disabled device */
+ if (rtt_status->rtt_config.target_info[idx].disable) {
+ continue;
+ } else {
+ /* set the idx to cur_idx */
+ rtt_status->cur_idx = idx;
+ break;
+ }
+ }
+ if (idx < rtt_status->rtt_config.rtt_target_cnt) {
+ DHD_RTT(("rtt_status->cur_idx : %d\n", rtt_status->cur_idx));
+ schedule_work(&rtt_status->work);
+ }
+ return err;
+}
+
+int
+dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt)
+{
+ int err = BCME_OK;
+ int i = 0, j = 0;
+ rtt_status_info_t *rtt_status;
+
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ if (rtt_status->status == RTT_STOPPED) {
+ DHD_ERROR(("rtt is not started\n"));
+ return BCME_OK;
+ }
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+ mutex_lock(&rtt_status->rtt_mutex);
+ for (i = 0; i < mac_cnt; i++) {
+ for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) {
+ if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr,
+ ETHER_ADDR_LEN)) {
+ rtt_status->rtt_config.target_info[j].disable = TRUE;
+ }
+ }
+ }
+ mutex_unlock(&rtt_status->rtt_mutex);
+ return err;
+}
+
+static int
+dhd_rtt_start(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ int mpc = 0;
+ int nss, mcs, bw;
+ uint32 rspec = 0;
+ int8 eabuf[ETHER_ADDR_STR_LEN];
+ int8 chanbuf[CHANSPEC_STR_LEN];
+ bool set_mpc = FALSE;
+ wl_proxd_iovar_t proxd_iovar;
+ wl_proxd_params_iovar_t proxd_params;
+ wl_proxd_params_iovar_t proxd_tune;
+ wl_proxd_params_tof_method_t *tof_params = &proxd_params.u.tof_params;
+ rtt_status_info_t *rtt_status;
+ rtt_target_info_t *rtt_target;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ /* turn off mpc in case of non-associted */
+ if (!dhd_is_associated(dhd, 0, NULL)) {
+ err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to set proxd_tune\n", __FUNCTION__));
+ goto exit;
+ }
+ set_mpc = TRUE;
+ }
+
+ if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) {
+ err = BCME_RANGE;
+ goto exit;
+ }
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+ bzero(&proxd_tune, sizeof(proxd_tune));
+ bzero(&proxd_params, sizeof(proxd_params));
+ mutex_lock(&rtt_status->rtt_mutex);
+ /* Get a target information */
+ rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+ mutex_unlock(&rtt_status->rtt_mutex);
+ /* set role */
+ proxd_iovar.method = PROXD_TOF_METHOD;
+ proxd_iovar.mode = WL_PROXD_MODE_INITIATOR;
+
+ /* make sure that proxd is stop */
+ /* dhd_iovar(dhd, 0, "proxd_stop", (char *)NULL, 0, 1); */
+
+ err = dhd_iovar(dhd, 0, "proxd", (char *)&proxd_iovar, sizeof(proxd_iovar), 1);
+ if (err < 0 && err != BCME_BUSY) {
+ DHD_ERROR(("%s : failed to set proxd %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+ if (err == BCME_BUSY) {
+ DHD_RTT(("BCME_BUSY occurred\n"));
+ }
+ /* mac address */
+ bcopy(&rtt_target->addr, &tof_params->tgt_mac, ETHER_ADDR_LEN);
+ /* frame count */
+ if (rtt_target->ftm_cnt > RTT_MAX_FRAME_CNT) {
+ rtt_target->ftm_cnt = RTT_MAX_FRAME_CNT;
+ }
+
+ if (rtt_target->ftm_cnt) {
+ tof_params->ftm_cnt = htol16(rtt_target->ftm_cnt);
+ } else {
+ tof_params->ftm_cnt = htol16(DEFAULT_FTM_CNT);
+ }
+
+ if (rtt_target->retry_cnt > RTT_MAX_RETRY_CNT) {
+ rtt_target->retry_cnt = RTT_MAX_RETRY_CNT;
+ }
+
+ /* retry count */
+ if (rtt_target->retry_cnt) {
+ tof_params->retry_cnt = htol16(rtt_target->retry_cnt);
+ } else {
+ tof_params->retry_cnt = htol16(DEFAULT_RETRY_CNT);
+ }
+
+ /* chanspec */
+ tof_params->chanspec = htol16(rtt_target->chanspec);
+ /* set parameter */
+ DHD_RTT(("Target addr(Idx %d) %s, Channel : %s for RTT (ftm_cnt %d, rety_cnt : %d)\n",
+ rtt_status->cur_idx,
+ bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr, eabuf),
+ wf_chspec_ntoa(rtt_target->chanspec, chanbuf), rtt_target->ftm_cnt,
+ rtt_target->retry_cnt));
+
+ if (rtt_target->type == RTT_ONE_WAY) {
+ proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_ONEWAY);
+ /* report RTT results for initiator */
+ proxd_tune.u.tof_tune.flags |= htol32(WL_PROXD_FLAG_INITIATOR_RPTRTT);
+ proxd_tune.u.tof_tune.vhtack = 0;
+ tof_params->tx_rate = htol16(WL_RATE_6M);
+ tof_params->vht_rate = htol16((WL_RATE_6M >> 16));
+ } else { /* RTT TWO WAY */
+ /* initiator will send the rtt result to the target */
+ proxd_tune.u.tof_tune.flags = htol32(WL_PROXD_FLAG_INITIATOR_REPORT);
+ tof_params->timeout = 10; /* 10ms for timeout */
+ rspec = WL_RSPEC_ENCODE_VHT; /* 11ac VHT */
+ nss = 1; /* default Nss = 1 */
+ mcs = 0; /* default MCS 0 */
+ rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs;
+ bw = 0;
+ switch (CHSPEC_BW(rtt_target->chanspec)) {
+ case WL_CHANSPEC_BW_20:
+ bw = WL_RSPEC_BW_20MHZ;
+ break;
+ case WL_CHANSPEC_BW_40:
+ bw = WL_RSPEC_BW_40MHZ;
+ break;
+ case WL_CHANSPEC_BW_80:
+ bw = WL_RSPEC_BW_80MHZ;
+ break;
+ case WL_CHANSPEC_BW_160:
+ bw = WL_RSPEC_BW_160MHZ;
+ break;
+ default:
+ DHD_ERROR(("CHSPEC_BW not supported : %d",
+ CHSPEC_BW(rtt_target->chanspec)));
+ goto exit;
+ }
+ rspec |= bw;
+ tof_params->tx_rate = htol16(rspec & 0xffff);
+ tof_params->vht_rate = htol16(rspec >> 16);
+ }
+
+ /* Set Method to TOF */
+ proxd_tune.method = PROXD_TOF_METHOD;
+ err = dhd_iovar(dhd, 0, "proxd_tune", (char *)&proxd_tune, sizeof(proxd_tune), 1);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to set proxd_tune %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+
+ /* Set Method to TOF */
+ proxd_params.method = PROXD_TOF_METHOD;
+ err = dhd_iovar(dhd, 0, "proxd_params", (char *)&proxd_params, sizeof(proxd_params), 1);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to set proxd_params %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+ err = dhd_iovar(dhd, 0, "proxd_find", (char *)NULL, 0, 1);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to set proxd_find %d\n", __FUNCTION__, err));
+ goto exit;
+ }
+exit:
+ if (err < 0) {
+ rtt_status->status = RTT_STOPPED;
+ if (set_mpc) {
+ /* enable mpc again in case of error */
+ mpc = 1;
+ err = dhd_iovar(dhd, 0, "mpc", (char *)&mpc, sizeof(mpc), 1);
+ }
+ }
+ return err;
+}
+
+int
+dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
+{
+ int err = BCME_OK;
+ struct rtt_noti_callback *cb = NULL, *iter;
+ rtt_status_info_t *rtt_status;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(noti_fn, "noti_fn is NULL", err);
+
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ spin_lock_bh(¬i_list_lock);
+ list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ if (iter->noti_fn == noti_fn) {
+ goto exit;
+ }
+ }
+ cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC);
+ if (!cb) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ cb->noti_fn = noti_fn;
+ cb->ctx = ctx;
+ list_add(&cb->list, &rtt_status->noti_fn_list);
+exit:
+ spin_unlock_bh(¬i_list_lock);
+ return err;
+}
+
+int
+dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn)
+{
+ int err = BCME_OK;
+ struct rtt_noti_callback *cb = NULL, *iter;
+ rtt_status_info_t *rtt_status;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ NULL_CHECK(noti_fn, "noti_fn is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ spin_lock_bh(¬i_list_lock);
+ list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ if (iter->noti_fn == noti_fn) {
+ cb = iter;
+ list_del(&cb->list);
+ break;
+ }
+ }
+ spin_unlock_bh(¬i_list_lock);
+ if (cb) {
+ kfree(cb);
+ }
+ return err;
+}
+
+static int
+dhd_rtt_convert_to_host(rtt_result_t *rtt_results, const wl_proxd_event_data_t* evp)
+{
+ int err = BCME_OK;
+ int i;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ char diststr[40];
+ struct timespec ts;
+ NULL_CHECK(rtt_results, "rtt_results is NULL", err);
+ NULL_CHECK(evp, "evp is NULL", err);
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+ rtt_results->distance = ntoh32(evp->distance);
+ rtt_results->sdrtt = ntoh32(evp->sdrtt);
+ rtt_results->ftm_cnt = ntoh16(evp->ftm_cnt);
+ rtt_results->avg_rssi = ntoh16(evp->avg_rssi);
+ rtt_results->validfrmcnt = ntoh16(evp->validfrmcnt);
+ rtt_results->meanrtt = ntoh32(evp->meanrtt);
+ rtt_results->modertt = ntoh32(evp->modertt);
+ rtt_results->medianrtt = ntoh32(evp->medianrtt);
+ rtt_results->err_code = evp->err_code;
+ rtt_results->tx_rate.preamble = (evp->OFDM_frame_type == TOF_FRAME_RATE_VHT)? 3 : 0;
+ rtt_results->tx_rate.nss = 0; /* 1 x 1 */
+ rtt_results->tx_rate.bw =
+ (evp->bandwidth == TOF_BW_80MHZ)? 2 : (evp->bandwidth == TOF_BW_40MHZ)? 1 : 0;
+ rtt_results->TOF_type = evp->TOF_type;
+ if (evp->TOF_type == TOF_TYPE_ONE_WAY) {
+ /* convert to 100kbps unit */
+ rtt_results->tx_rate.bitrate = WL_RATE_6M * 5;
+ rtt_results->tx_rate.rateMcsIdx = WL_RATE_6M;
+ } else {
+ rtt_results->tx_rate.bitrate = WL_RATE_6M * 5;
+ rtt_results->tx_rate.rateMcsIdx = 0; /* MCS 0 */
+ }
+ memset(diststr, 0, sizeof(diststr));
+ if (rtt_results->distance == 0xffffffff || rtt_results->distance == 0) {
+ sprintf(diststr, "distance=-1m\n");
+ } else {
+ sprintf(diststr, "distance=%d.%d m\n",
+ rtt_results->distance >> 4, ((rtt_results->distance & 0xf) * 125) >> 1);
+ }
+
+ if (ntoh32(evp->mode) == WL_PROXD_MODE_INITIATOR) {
+ DHD_RTT(("Target:(%s) %s;\n", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr));
+ DHD_RTT(("RTT : mean %d mode %d median %d\n", rtt_results->meanrtt,
+ rtt_results->modertt, rtt_results->medianrtt));
+ } else {
+ DHD_RTT(("Initiator:(%s) %s; ", bcm_ether_ntoa((&evp->peer_mac), eabuf), diststr));
+ }
+ if (rtt_results->sdrtt > 0) {
+ DHD_RTT(("sigma:%d.%d\n", rtt_results->sdrtt/10, rtt_results->sdrtt % 10));
+ } else {
+ DHD_RTT(("sigma:0\n"));
+ }
+
+ DHD_RTT(("rssi:%d validfrmcnt %d, err_code : %d\n", rtt_results->avg_rssi,
+ rtt_results->validfrmcnt, evp->err_code));
+
+ switch (evp->err_code) {
+ case TOF_REASON_OK:
+ rtt_results->err_code = RTT_REASON_SUCCESS;
+ break;
+ case TOF_REASON_TIMEOUT:
+ rtt_results->err_code = RTT_REASON_TIMEOUT;
+ break;
+ case TOF_REASON_NOACK:
+ rtt_results->err_code = RTT_REASON_NO_RSP;
+ break;
+ case TOF_REASON_ABORT:
+ rtt_results->err_code = RTT_REASON_ABORT;
+ break;
+ default:
+ rtt_results->err_code = RTT_REASON_FAILURE;
+ break;
+ }
+ rtt_results->peer_mac = evp->peer_mac;
+ /* get the time elapsed from boot time */
+ get_monotonic_boottime(&ts);
+ rtt_results->ts = (uint64) TIMESPEC_TO_US(ts);
+
+ for (i = 0; i < rtt_results->ftm_cnt; i++) {
+ rtt_results->ftm_buff[i].value = ltoh32(evp->ftm_buff[i].value);
+ rtt_results->ftm_buff[i].rssi = ltoh32(evp->ftm_buff[i].rssi);
+ }
+ return err;
+}
+
+int
+dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
+{
+ int err = BCME_OK;
+ int len = 0;
+ int idx;
+ uint status, event_type, flags, reason, ftm_cnt;
+ rtt_status_info_t *rtt_status;
+ wl_proxd_event_data_t* evp;
+ struct rtt_noti_callback *iter;
+ rtt_result_t *rtt_result, *entry, *next;
+ gfp_t kflags;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ event_type = ntoh32_ua((void *)&event->event_type);
+ flags = ntoh16_ua((void *)&event->flags);
+ status = ntoh32_ua((void *)&event->status);
+ reason = ntoh32_ua((void *)&event->reason);
+
+ if (event_type != WLC_E_PROXD) {
+ goto exit;
+ }
+ kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL;
+ evp = (wl_proxd_event_data_t*)event_data;
+ DHD_RTT(("%s enter : mode: %s, reason :%d \n", __FUNCTION__,
+ (ntoh16(evp->mode) == WL_PROXD_MODE_INITIATOR)?
+ "initiator":"target", reason));
+ switch (reason) {
+ case WLC_E_PROXD_STOP:
+ DHD_RTT(("WLC_E_PROXD_STOP\n"));
+ break;
+ case WLC_E_PROXD_ERROR:
+ case WLC_E_PROXD_COMPLETED:
+ if (reason == WLC_E_PROXD_ERROR) {
+ DHD_RTT(("WLC_E_PROXD_ERROR\n"));
+ } else {
+ DHD_RTT(("WLC_E_PROXD_COMPLETED\n"));
+ }
+
+ if (!in_atomic()) {
+ mutex_lock(&rtt_status->rtt_mutex);
+ }
+ ftm_cnt = ntoh16(evp->ftm_cnt);
+
+ if (ftm_cnt > 0) {
+ len = OFFSETOF(rtt_result_t, ftm_buff);
+ } else {
+ len = sizeof(rtt_result_t);
+ }
+ /* check whether the results is already reported or not */
+ list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) {
+ if (!memcmp(&entry->peer_mac, &evp->peer_mac, ETHER_ADDR_LEN)) {
+ if (!in_atomic()) {
+ mutex_unlock(&rtt_status->rtt_mutex);
+ }
+ goto exit;
+ }
+ }
+ rtt_result = kzalloc(len + sizeof(ftm_sample_t) * ftm_cnt, kflags);
+ if (!rtt_result) {
+ if (!in_atomic()) {
+ mutex_unlock(&rtt_status->rtt_mutex);
+ }
+ err = -ENOMEM;
+ goto exit;
+ }
+ /* point to target_info in status struct and increase pointer */
+ rtt_result->target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+ /* find next target to trigger RTT */
+ for (idx = (rtt_status->cur_idx + 1);
+ idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+ /* skip the disabled device */
+ if (rtt_status->rtt_config.target_info[idx].disable) {
+ continue;
+ } else {
+ /* set the idx to cur_idx */
+ rtt_status->cur_idx = idx;
+ break;
+ }
+ }
+ /* convert the event results to host format */
+ dhd_rtt_convert_to_host(rtt_result, evp);
+ list_add_tail(&rtt_result->list, &rtt_status->rtt_results_cache);
+ if (idx < rtt_status->rtt_config.rtt_target_cnt) {
+ /* restart to measure RTT from next device */
+ schedule_work(&rtt_status->work);
+ } else {
+ DHD_RTT(("RTT_STOPPED\n"));
+ rtt_status->status = RTT_STOPPED;
+ /* to turn on mpc mode */
+ schedule_work(&rtt_status->work);
+ /* notify the completed information to others */
+ list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
+ }
+ /* remove the rtt results in cache */
+ list_for_each_entry_safe(rtt_result, next,
+ &rtt_status->rtt_results_cache, list) {
+ list_del(&rtt_result->list);
+ kfree(rtt_result);
+ }
+ /* reinit the HEAD */
+ INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+ /* clear information for rtt_config */
+ bzero(&rtt_status->rtt_config, sizeof(rtt_status->rtt_config));
+ rtt_status->cur_idx = 0;
+ }
+ if (!in_atomic()) {
+ mutex_unlock(&rtt_status->rtt_mutex);
+ }
+
+ break;
+ case WLC_E_PROXD_GONE:
+ DHD_RTT(("WLC_E_PROXD_GONE\n"));
+ break;
+ case WLC_E_PROXD_START:
+ /* event for targets / accesspoints */
+ DHD_RTT(("WLC_E_PROXD_START\n"));
+ break;
+ case WLC_E_PROXD_COLLECT_START:
+ DHD_RTT(("WLC_E_PROXD_COLLECT_START\n"));
+ break;
+ case WLC_E_PROXD_COLLECT_STOP:
+ DHD_RTT(("WLC_E_PROXD_COLLECT_STOP\n"));
+ break;
+ case WLC_E_PROXD_COLLECT_COMPLETED:
+ DHD_RTT(("WLC_E_PROXD_COLLECT_COMPLETED\n"));
+ break;
+ case WLC_E_PROXD_COLLECT_ERROR:
+ DHD_RTT(("WLC_E_PROXD_COLLECT_ERROR; "));
+ break;
+ default:
+ DHD_ERROR(("WLC_E_PROXD: supported EVENT reason code:%d\n", reason));
+ break;
+ }
+
+exit:
+ return err;
+}
+
+static void
+dhd_rtt_work(struct work_struct *work)
+{
+ rtt_status_info_t *rtt_status;
+ dhd_pub_t *dhd;
+ rtt_status = container_of(work, rtt_status_info_t, work);
+ if (rtt_status == NULL) {
+ DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__));
+ return;
+ }
+ dhd = rtt_status->dhd;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+ (void) dhd_rtt_start(dhd);
+}
+
+int
+dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa)
+{
+ rtt_status_info_t *rtt_status;
+ int err = BCME_OK;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ NULL_CHECK(capa, "capa is NULL", err);
+ bzero(capa, sizeof(rtt_capabilities_t));
+
+ if (rtt_status->capability & RTT_CAP_ONE_WAY) {
+ capa->rtt_one_sided_supported = 1;
+ }
+ if (rtt_status->capability & RTT_CAP_11V_WAY) {
+ capa->rtt_11v_supported = 1;
+ }
+ if (rtt_status->capability & RTT_CAP_11MC_WAY) {
+ capa->rtt_ftm_supported = 1;
+ }
+ if (rtt_status->capability & RTT_CAP_VS_WAY) {
+ capa->rtt_vs_supported = 1;
+ }
+
+ return err;
+}
+
+int
+dhd_rtt_init(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ rtt_status_info_t *rtt_status;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ if (dhd->rtt_state) {
+ goto exit;
+ }
+ dhd->rtt_state = MALLOC(dhd->osh, sizeof(rtt_status_info_t));
+ if (dhd->rtt_state == NULL) {
+ DHD_ERROR(("failed to create rtt_state\n"));
+ goto exit;
+ }
+ bzero(dhd->rtt_state, sizeof(rtt_status_info_t));
+ rtt_status = GET_RTTSTATE(dhd);
+ rtt_status->dhd = dhd;
+ err = dhd_iovar(dhd, 0, "proxd_params", NULL, 0, 1);
+ if (err != BCME_UNSUPPORTED) {
+ rtt_status->capability |= RTT_CAP_ONE_WAY;
+ rtt_status->capability |= RTT_CAP_VS_WAY;
+ DHD_ERROR(("%s: Support RTT Service\n", __FUNCTION__));
+ }
+ mutex_init(&rtt_status->rtt_mutex);
+ INIT_LIST_HEAD(&rtt_status->noti_fn_list);
+ INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+ INIT_WORK(&rtt_status->work, dhd_rtt_work);
+exit:
+ return err;
+}
+
+int
+dhd_rtt_deinit(dhd_pub_t *dhd)
+{
+ int err = BCME_OK;
+ rtt_status_info_t *rtt_status;
+ rtt_result_t *rtt_result, *next;
+ struct rtt_noti_callback *iter, *iter2;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ rtt_status->status = RTT_STOPPED;
+ /* clear evt callback list */
+ if (!list_empty(&rtt_status->noti_fn_list)) {
+ list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ }
+ }
+ /* remove the rtt results */
+ if (!list_empty(&rtt_status->rtt_results_cache)) {
+ list_for_each_entry_safe(rtt_result, next, &rtt_status->rtt_results_cache, list) {
+ list_del(&rtt_result->list);
+ kfree(rtt_result);
+ }
+ }
+ MFREE(dhd->osh, dhd->rtt_state, sizeof(rtt_status_info_t));
+ dhd->rtt_state = NULL;
+ return err;
+}
+#endif /* RTT_SUPPORT */
--- /dev/null
+/*
+ * Broadcom Dongle Host Driver (DHD), RTT
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * $Id: dhd_rtt.h 558438 2015-05-22 06:05:11Z $
+ */
+#ifndef __DHD_RTT_H__
+#define __DHD_RTT_H__
+
+#include "dngl_stats.h"
+
+#define RTT_MAX_TARGET_CNT 10
+#define RTT_MAX_FRAME_CNT 25
+#define RTT_MAX_RETRY_CNT 10
+#define DEFAULT_FTM_CNT 6
+#define DEFAULT_RETRY_CNT 6
+
+
+/* DSSS, CCK and 802.11n rates in [500kbps] units */
+#define WL_MAXRATE 108 /* in 500kbps units */
+#define WL_RATE_1M 2 /* in 500kbps units */
+#define WL_RATE_2M 4 /* in 500kbps units */
+#define WL_RATE_5M5 11 /* in 500kbps units */
+#define WL_RATE_11M 22 /* in 500kbps units */
+#define WL_RATE_6M 12 /* in 500kbps units */
+#define WL_RATE_9M 18 /* in 500kbps units */
+#define WL_RATE_12M 24 /* in 500kbps units */
+#define WL_RATE_18M 36 /* in 500kbps units */
+#define WL_RATE_24M 48 /* in 500kbps units */
+#define WL_RATE_36M 72 /* in 500kbps units */
+#define WL_RATE_48M 96 /* in 500kbps units */
+#define WL_RATE_54M 108 /* in 500kbps units */
+
+
+enum rtt_role {
+ RTT_INITIATOR = 0,
+ RTT_TARGET = 1
+};
+enum rtt_status {
+ RTT_STOPPED = 0,
+ RTT_STARTED = 1
+};
+typedef int64_t wifi_timestamp; /* In microseconds (us) */
+typedef int64_t wifi_timespan;
+typedef int wifi_rssi;
+
+typedef enum {
+ RTT_INVALID,
+ RTT_ONE_WAY,
+ RTT_TWO_WAY,
+ RTT_AUTO
+} rtt_type_t;
+
+typedef enum {
+ RTT_PEER_STA,
+ RTT_PEER_AP,
+ RTT_PEER_P2P,
+ RTT_PEER_NAN,
+ RTT_PEER_INVALID
+} rtt_peer_type_t;
+
+typedef enum rtt_reason {
+ RTT_REASON_SUCCESS,
+ RTT_REASON_FAILURE,
+ RTT_REASON_NO_RSP,
+ RTT_REASON_REJECTED,
+ RTT_REASON_NOT_SCHEDULED_YET,
+ RTT_REASON_TIMEOUT,
+ RTT_REASON_AP_ON_DIFF_CH,
+ RTT_REASON_AP_NO_CAP,
+ RTT_REASON_ABORT
+} rtt_reason_t;
+
+typedef enum rtt_capability {
+ RTT_CAP_NONE = 0,
+ RTT_CAP_ONE_WAY = (1 << (0)),
+ RTT_CAP_11V_WAY = (1 << (1)), /* IEEE802.11v */
+ RTT_CAP_11MC_WAY = (1 << (2)), /* IEEE802.11mc */
+ RTT_CAP_VS_WAY = (1 << (3)) /* BRCM vendor specific */
+} rtt_capability_t;
+
+typedef struct wifi_channel_info {
+ wifi_channel_width_t width;
+ wifi_channel center_freq; /* primary 20 MHz channel */
+ wifi_channel center_freq0; /* center freq (MHz) first segment */
+ wifi_channel center_freq1; /* center freq (MHz) second segment valid for 80 + 80 */
+} wifi_channel_info_t;
+
+typedef struct wifi_rate {
+ uint32 preamble :3; /* 0: OFDM, 1: CCK, 2 : HT, 3: VHT, 4..7 reserved */
+ uint32 nss :2; /* 0 : 1x1, 1: 2x2, 3: 3x3, 4: 4x4 */
+ uint32 bw :3; /* 0: 20Mhz, 1: 40Mhz, 2: 80Mhz, 3: 160Mhz */
+ /* OFDM/CCK rate code would be as per IEEE std in the unit of 0.5 mb
+ * HT/VHT it would be mcs index
+ */
+ uint32 rateMcsIdx :8;
+ uint32 reserved :16; /* reserved */
+ uint32 bitrate; /* unit of 100 Kbps */
+} wifi_rate_t;
+
+typedef struct rtt_target_info {
+ struct ether_addr addr;
+ rtt_type_t type; /* rtt_type */
+ rtt_peer_type_t peer; /* peer type */
+ wifi_channel_info_t channel; /* channel information */
+ chanspec_t chanspec; /* chanspec for channel */
+ int8 continuous; /* 0 = single shot or 1 = continous raging */
+ bool disable; /* disable for RTT measurement */
+ uint32 interval; /* interval of RTT measurement (unit ms) when continuous = true */
+ uint32 measure_cnt; /* total number of RTT measurement when continuous */
+ uint32 ftm_cnt; /* num of packets in each RTT measurement */
+ uint32 retry_cnt; /* num of retries if sampling fails */
+} rtt_target_info_t;
+
+typedef struct rtt_result {
+ struct list_head list;
+ uint16 ver; /* version */
+ rtt_target_info_t *target_info; /* target info */
+ uint16 mode; /* mode: target/initiator */
+ uint16 method; /* method: rssi/TOF/AOA */
+ uint8 err_code; /* error classification */
+ uint8 TOF_type; /* one way or two way TOF */
+ wifi_rate_t tx_rate; /* tx rate */
+ struct ether_addr peer_mac; /* (e.g for tgt:initiator's */
+ int32 distance; /* dst to tgt, units (meter * 16) */
+ uint32 meanrtt; /* mean delta */
+ uint32 modertt; /* Mode delta */
+ uint32 medianrtt; /* median RTT */
+ uint32 sdrtt; /* Standard deviation of RTT */
+ int16 avg_rssi; /* avg rssi across the ftm frames */
+ int16 validfrmcnt; /* Firmware's valid frame counts */
+ wifi_timestamp ts; /* the time elapsed from boot time when driver get this result */
+ uint16 ftm_cnt; /* num of rtd measurments/length in the ftm buffer */
+ ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */
+} rtt_result_t;
+
+typedef struct rtt_report {
+ struct ether_addr addr;
+ uint num_measurement; /* measurement number in case of continous raging */
+ rtt_reason_t status; /* raging status */
+ rtt_type_t type; /* rtt type */
+ rtt_peer_type_t peer; /* peer type */
+ wifi_channel_info_t channel; /* channel information */
+ wifi_rssi rssi; /* avg rssi accroos the ftm frames */
+ wifi_rssi rssi_spread; /* rssi spread in 0.5 db steps e.g. 5 implies 2.5 spread */
+ wifi_rate_t tx_rate; /* tx rate */
+ wifi_timespan rtt; /* round trip time in nanoseconds */
+ wifi_timespan rtt_sd; /* rtt standard deviation in nanoseconds */
+ wifi_timespan rtt_spread; /* difference between max and min rtt times recorded */
+ int32 distance; /* distance in cm (optional) */
+ int32 distance_sd; /* standard deviation in cm (optional) */
+ int32 distance_spread; /* difference between max and min distance recorded (optional) */
+ wifi_timestamp ts; /* time of the measurement (in microseconds since boot) */
+} rtt_report_t;
+
+/* RTT Capabilities */
+typedef struct rtt_capabilities {
+ uint8 rtt_one_sided_supported; /* if 1-sided rtt data collection is supported */
+ uint8 rtt_11v_supported; /* if 11v rtt data collection is supported */
+ uint8 rtt_ftm_supported; /* if ftm rtt data collection is supported */
+ uint8 rtt_vs_supported; /* if vendor specific data collection supported */
+} rtt_capabilities_t;
+
+typedef struct rtt_config_params {
+ int8 rtt_target_cnt;
+ rtt_target_info_t target_info[RTT_MAX_TARGET_CNT];
+} rtt_config_params_t;
+
+typedef void (*dhd_rtt_compl_noti_fn)(void *ctx, void *rtt_data);
+/* Linux wrapper to call common dhd_rtt_set_cfg */
+int
+dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf);
+
+int
+dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt);
+
+int
+dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx,
+ dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa);
+
+/* export to upper layer */
+chanspec_t
+dhd_rtt_convert_to_chspec(wifi_channel_info_t channel);
+
+int
+dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params);
+
+int
+dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt);
+
+
+int
+dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_rtt_unregister_noti_callback(dhd_pub_t *dhd, dhd_rtt_compl_noti_fn noti_fn);
+
+int
+dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
+
+int
+dhd_rtt_capability(dhd_pub_t *dhd, rtt_capabilities_t *capa);
+
+int
+dhd_rtt_init(dhd_pub_t *dhd);
+
+int
+dhd_rtt_deinit(dhd_pub_t *dhd);
+#endif /* __DHD_RTT_H__ */
/*
* DHD Bus Module for SDIO
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_sdio.c 506046 2014-10-02 12:40:12Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_sdio.c 593728 2015-10-19 09:20:32Z $
*/
#include <typedefs.h>
#include <hndpmu.h>
#include <hndsoc.h>
#include <bcmsdpcm.h>
-#if defined(DHD_DEBUG)
#include <hnd_armtrap.h>
#include <hnd_cons.h>
-#endif /* defined(DHD_DEBUG) */
#include <sbchipc.h>
#include <sbhnddma.h>
#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */
#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
-#define MAX_NVRAMBUF_SIZE 4096 /* max nvram buf size */
#define MAX_DATA_BUF (64 * 1024) /* Must be large enough to hold biggest possible glom */
#ifndef DHD_FIRSTREAD
*/
#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \
PKTFREE(bus->dhd->osh, pkt, FALSE);
+
+#ifdef PKT_STATICS
+pkt_statics_t tx_statics = {0};
+#endif
+
DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
#if defined(MULTIPLE_SUPPLICANT)
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
#endif
-#ifdef DHD_DEBUG
/* Device console log buffer state */
#define CONSOLE_LINE_MAX 192
#define CONSOLE_BUFFER_MAX 2024
uint8 *buf; /* Log buffer (host copy) */
uint last; /* Last buffer read index */
} dhd_console_t;
-#endif /* DHD_DEBUG */
#define REMAP_ENAB(bus) ((bus)->remap)
#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
char *fw_path; /* module_param: path to firmware image */
char *nv_path; /* module_param: path to nvram vars file */
- const char *nvram_params; /* user specified nvram params. */
uint blocksize; /* Block size of SDIO transfers */
uint roundup; /* Max roundup limit */
int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */
bool use_rxchain; /* If dhd should use PKT chains */
bool sleeping; /* Is SDIO bus sleeping? */
+#if defined(SUPPORT_P2P_GO_PS)
wait_queue_head_t bus_sleep;
+#endif /* LINUX && SUPPORT_P2P_GO_PS */
uint rxflow_mode; /* Rx flow control mode */
bool rxflow; /* Is rx flow control on */
uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */
#ifdef DHDENABLE_TAILPAD
void *pad_pkt;
#endif /* DHDENABLE_TAILPAD */
- uint txglomframes; /* Number of tx glom frames (superframes) */
- uint txglompkts; /* Number of packets from tx glom frames */
+ uint txglomframes; /* Number of tx glom frames (superframes) */
+ uint txglompkts; /* Number of packets from tx glom frames */
} dhd_bus_t;
/* clkstate */
#define DHD_NOPMU(dhd) (FALSE)
+#if defined(BCMSDIOH_STD)
+#define BLK_64_MAXTXGLOM 20
+#endif /* BCMSDIOH_STD */
+
#ifdef DHD_DEBUG
static int qcount[NUMPRIO];
static int tx_packets[NUMPRIO];
extern uint dhd_watchdog_ms;
extern void dhd_os_wd_timer(void *bus, uint wdtick);
+int dhd_enableOOB(dhd_pub_t *dhd, bool sleep);
/* Tx/Rx bounds */
uint dhd_txbound;
#define ALIGNMENT 4
-#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
+#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN)
extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
#endif
/* Try doing readahead */
static bool dhd_readahead;
+#if defined(SWTXGLOM) || defined(BCMSDIOH_TXGLOM_EXT)
+bool
+dhdsdio_is_dataok(dhd_bus_t *bus) {
+ return (((uint8)(bus->tx_max - bus->tx_seq) - bus->dhd->conf->tx_max_offset > 1) && \
+ (((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0));
+}
+
+uint8
+dhdsdio_get_databufcnt(dhd_bus_t *bus) {
+ return ((uint8)(bus->tx_max - bus->tx_seq) - 1 - bus->dhd->conf->tx_max_offset);
+}
+#endif
+
/* To check if there's window offered */
+#if defined(SWTXGLOM) || defined(BCMSDIOH_TXGLOM_EXT)
+#define DATAOK(bus) dhdsdio_is_dataok(bus)
+#else
#define DATAOK(bus) \
(((uint8)(bus->tx_max - bus->tx_seq) > 1) && \
(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
+#endif
/* To check if there's window offered for ctrl frame */
#define TXCTLOK(bus) \
(((uint8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
/* Number of pkts available in dongle for data RX */
+#if defined(SWTXGLOM) || defined(BCMSDIOH_TXGLOM_EXT)
+#define DATABUFCNT(bus) dhdsdio_get_databufcnt(bus)
+#else
#define DATABUFCNT(bus) \
((uint8)(bus->tx_max - bus->tx_seq) - 1)
+#endif
/* Macros to get register read/write status */
/* NOTE: these assume a local dhdsdio_bus_t *bus! */
static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count);
#endif
-#ifdef DHD_DEBUG
static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size);
+#ifdef DHD_DEBUG
static int dhd_serialconsole(dhd_bus_t *bus, bool get, bool enable, int *bcmerror);
#endif /* DHD_DEBUG */
+#if defined(DHD_FW_COREDUMP)
+static int dhdsdio_mem_dump(dhd_bus_t *bus);
+#endif /* DHD_FW_COREDUMP */
static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt);
static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
int prev_chain_total_len, bool last_chained_pkt,
- int *pad_pkt_len, void **new_pkt);
+ int *pad_pkt_len, void **new_pkt
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ , int frist_frame
+#endif
+);
static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt);
static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
(bus->sih->chip == BCM4339_CHIP_ID) ||
(bus->sih->chip == BCM43349_CHIP_ID) ||
(bus->sih->chip == BCM4345_CHIP_ID) ||
+ (bus->sih->chip == BCM43454_CHIP_ID) ||
(bus->sih->chip == BCM4354_CHIP_ID) ||
(bus->sih->chip == BCM4356_CHIP_ID) ||
(bus->sih->chip == BCM4358_CHIP_ID) ||
(bus->sih->chip == BCM4339_CHIP_ID) ||
(bus->sih->chip == BCM43349_CHIP_ID) ||
(bus->sih->chip == BCM4345_CHIP_ID) ||
+ (bus->sih->chip == BCM43454_CHIP_ID) ||
(bus->sih->chip == BCM4354_CHIP_ID) ||
(bus->sih->chip == BCM4356_CHIP_ID) ||
(bus->sih->chip == BCM4358_CHIP_ID) ||
if ((bus->sih->chip == BCM4350_CHIP_ID) ||
(bus->sih->chip == BCM4345_CHIP_ID) ||
+ (bus->sih->chip == BCM43454_CHIP_ID) ||
(bus->sih->chip == BCM4354_CHIP_ID) ||
(bus->sih->chip == BCM4356_CHIP_ID) ||
(bus->sih->chip == BCM4358_CHIP_ID) ||
static int
dhdsdio_srwar_init(dhd_bus_t *bus)
{
-#if !defined(NDISVER) || (NDISVER < 0x0630)
bcmsdh_gpio_init(bus->sdh);
-#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
#ifdef USE_OOB_GPIO1
dhdsdio_oobwakeup_init(bus);
#define KSO_WAIT_US 50
#define KSO_WAIT_MS 1
#define KSO_SLEEP_RETRY_COUNT 20
+#define KSO_WAKE_RETRY_COUNT 100
#define ERROR_BCME_NODEVICE_MAX 1
-#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
+#define DEFAULT_MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
+#ifndef CUSTOM_MAX_KSO_ATTEMPTS
+#define CUSTOM_MAX_KSO_ATTEMPTS DEFAULT_MAX_KSO_ATTEMPTS
+#endif
+
static int
dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on)
{
int err = 0;
int try_cnt = 0;
- if (!bus->dhd->conf->kso_enable)
- return 0;
-
KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR")));
wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
bmask = cmp_val;
OSL_SLEEP(3);
+
} else {
/* Put device to sleep, turn off KSO */
cmp_val = 0;
OSL_DELAY(KSO_WAIT_US);
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
- } while (try_cnt++ < MAX_KSO_ATTEMPTS);
+ } while (try_cnt++ < CUSTOM_MAX_KSO_ATTEMPTS);
if (try_cnt > 2)
KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n",
__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
- if (try_cnt > MAX_KSO_ATTEMPTS) {
+ if (try_cnt > CUSTOM_MAX_KSO_ATTEMPTS) {
DHD_ERROR(("%s> op:%s, ERROR: try_cnt:%d, rd_val:%x, ERR:%x \n",
__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
}
+
return err;
}
#ifdef USE_CMD14
err = bcmsdh_sleep(bus->sdh, TRUE);
#else
+
+
err = dhdsdio_clk_kso_enab(bus, FALSE);
if (OOB_WAKEUP_ENAB(bus))
{
-#if !defined(NDISVER) || (NDISVER < 0x0630)
err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */
-#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
}
#endif /* USE_CMD14 */
} else {
DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__));
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
}
-#if !defined(NDISVER) || (NDISVER < 0x0630)
if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) {
SPINWAIT_SLEEP(sdioh_spinwait_sleep,
DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n"));
}
}
-#endif
#ifdef USE_CMD14
err = bcmsdh_sleep(bus->sdh, FALSE);
if (SLPAUTO_ENAB(bus) && (err != 0)) {
#else
if (OOB_WAKEUP_ENAB(bus))
{
-#if !defined(NDISVER) || (NDISVER < 0x0630)
err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE); /* GPIO_1 is on */
-#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
}
do {
err = dhdsdio_clk_kso_enab(bus, TRUE);
DHD_ERROR(("ERROR: kso set failed retry: %d\n", retry));
err = 0; /* continue anyway */
}
+
+
#endif /* !USE_CMD14 */
if (err == 0) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
else if (ht_avail_error == HT_AVAIL_ERROR_MAX) {
+ bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR;
dhd_os_send_hang_message(bus->dhd);
}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
/* Going to sleep: set the alarm and turn off the lights... */
if (sleep) {
/* Don't sleep if something is pending */
+#ifdef DHD_USE_IDLECOUNT
+ if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq) || bus->readframes ||
+ bus->ctrl_frame_stat)
+#else
if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+#endif /* DHD_USE_IDLECOUNT */
return BCME_BUSY;
/* Change state */
bus->sleeping = TRUE;
+#if defined(SUPPORT_P2P_GO_PS)
wake_up(&bus->bus_sleep);
+#endif /* LINUX && SUPPORT_P2P_GO_PS */
} else {
/* Waking up: bus power up is ok, set local state */
return err;
}
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size)
+{
+ int func_blk_size = function_num;
+ int bcmerr = 0;
+ int result;
+
+ bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", &func_blk_size,
+ sizeof(int), &result, sizeof(int), IOV_GET);
-#if defined(OOB_INTR_ONLY)
+ if (bcmerr != BCME_OK) {
+ DHD_ERROR(("%s: Get F%d Block size error\n", __FUNCTION__, function_num));
+ return BCME_ERROR;
+ }
+
+ if (result != block_size) {
+ DHD_TRACE_HW4(("%s: F%d Block size set from %d to %d\n",
+ __FUNCTION__, function_num, result, block_size));
+ func_blk_size = function_num << 16 | block_size;
+ bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL,
+ 0, &func_blk_size, sizeof(int32), IOV_SET);
+ if (bcmerr != BCME_OK) {
+ DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ }
+
+ return BCME_OK;
+}
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+
+#if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
void
dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
{
-#if defined(HW_OOB)
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
#else
sdpcmd_regs_t *regs = bus->regs;
/* Schedule DPC if needed to send queued packet(s) */
if (dhd_deferred_tx && !bus->dpc_sched) {
- bus->dpc_sched = TRUE;
- dhd_sched_dpc(bus->dhd);
+ if (bus->dhd->conf->deferred_tx_len) {
+ if(dhd_os_wd_timer_enabled(bus->dhd) == FALSE) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ if(pktq_len(&bus->txq) >= bus->dhd->conf->deferred_tx_len &&
+ dhd_os_wd_timer_enabled(bus->dhd) == FALSE) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ } else {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
}
} else {
int chan = SDPCM_DATA_CHANNEL;
*/
static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
int prev_chain_total_len, bool last_chained_pkt,
- int *pad_pkt_len, void **new_pkt)
+ int *pad_pkt_len, void **new_pkt
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ , int first_frame
+#endif
+)
{
osl_t *osh;
uint8 *frame;
uint32 swhdr_offset;
bool alloc_new_pkt = FALSE;
uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+#ifdef PKT_STATICS
+ uint16 len;
+#endif
*new_pkt = NULL;
osh = bus->dhd->osh;
}
}
#endif /* WLMEDIA_HTSF */
+#ifdef PKT_STATICS
+ len = (uint16)PKTLEN(osh, pkt);
+ switch(chan) {
+ case SDPCM_CONTROL_CHANNEL:
+ tx_statics.ctrl_count++;
+ tx_statics.ctrl_size += len;
+ break;
+ case SDPCM_DATA_CHANNEL:
+ tx_statics.data_count++;
+ tx_statics.data_size += len;
+ break;
+ case SDPCM_GLOM_CHANNEL:
+ tx_statics.glom_count++;
+ tx_statics.glom_size += len;
+ break;
+ case SDPCM_EVENT_CHANNEL:
+ tx_statics.event_count++;
+ tx_statics.event_size += len;
+ break;
+ case SDPCM_TEST_CHANNEL:
+ tx_statics.test_count++;
+ tx_statics.test_size += len;
+ break;
+
+ default:
+ break;
+ }
+#endif /* PKT_STATICS */
#ifdef DHD_DEBUG
if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets))
tx_packets[PKTPRIO(pkt)]++;
* referred to in sdioh_request_buffer(). The tail length will be excluded in
* dhdsdio_txpkt_postprocess().
*/
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ if (bus->dhd->conf->txglom_bucket_size)
+ tail_padding = 0;
+#endif
*(uint16*)frame = (uint16)htol16(pkt_len);
*(((uint16*)frame) + 1) = (uint16)htol16(~pkt_len);
pkt_len += tail_padding;
if (bus->txglom_enable) {
uint32 hwheader1;
uint32 hwheader2;
-
- swhdr_offset += SDPCM_HWEXT_LEN;
- hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) |
- (last_chained_pkt << 24);
- hwheader2 = (tail_padding) << 16;
- htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
- htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+#ifdef BCMSDIOH_TXGLOM_EXT
+ uint32 act_len = pkt_len - tail_padding;
+ uint32 real_pad = 0;
+ if(bus->dhd->conf->txglom_ext && !last_chained_pkt) {
+ tail_padding = 0;
+ if(first_frame == 0) {
+ // first pkt, add pad to bucket size - recv offset
+ pkt_len = bus->dhd->conf->txglom_bucket_size - TXGLOM_RECV_OFFSET;
+ } else {
+ // add pad to bucket size
+ pkt_len = bus->dhd->conf->txglom_bucket_size;
+ }
+ swhdr_offset += SDPCM_HWEXT_LEN;
+ hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (last_chained_pkt << 24);
+ hwheader2 = (pkt_len - act_len) << 16;
+ htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+ real_pad = pkt_len - act_len;
+
+ if (PKTTAILROOM(osh, pkt) < real_pad) {
+ DHD_INFO(("%s : insufficient tailroom %d for %d real_pad\n",
+ __func__, (int)PKTTAILROOM(osh, pkt), real_pad));
+ if (PKTPADTAILROOM(osh, pkt, real_pad)) {
+ DHD_ERROR(("CHK1: padding error size %d\n", real_pad));
+ } else
+ frame = (uint8 *)PKTDATA(osh, pkt);
+ }
+ } else
+#endif
+ {
+ swhdr_offset += SDPCM_HWEXT_LEN;
+ hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) |
+ (last_chained_pkt << 24);
+ hwheader2 = (tail_padding) << 16;
+ htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+ }
}
PKTSETLEN((osh), (pkt), (pkt_len));
(void)osh;
osh = bus->dhd->osh;
- /* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */
- frame = (uint8*)PKTDATA(osh, pkt);
+ /* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+ DHD_INFO(("%s PKTLEN before postprocess %d",
+ __FUNCTION__, PKTLEN(osh, pkt)));
+
+ /* PKTLEN still includes tail_padding, so exclude it.
+ * We shall have head_padding + original pkt_len for PKTLEN afterwards.
+ */
+ if (bus->txglom_enable) {
+ /* txglom pkts have tail_padding length in HW ext header */
+ tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16;
+ PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding);
+ DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n",
+ tail_padding, PKTLEN(osh, pkt)));
+ } else {
+ /* non-txglom pkts have head_padding + original pkt length in HW frame tag.
+ * We cannot refer to this field for txglom pkts as the first pkt of the chain will
+ * have the field for the total length of the chain.
+ */
+ PKTSETLEN(osh, pkt, *(uint16*)frame);
+ DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n",
+ *(uint16*)frame, PKTLEN(osh, pkt)));
+ }
+
+ data_offset = ltoh32_ua(frame + swhdr_offset);
+ data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT;
+ /* Get rid of sdpcm header + head_padding */
+ PKTPULL(osh, pkt, data_offset);
+
+ DHD_INFO(("%s data_offset %d, PKTLEN %d\n",
+ __FUNCTION__, data_offset, PKTLEN(osh, pkt)));
+
+ return BCME_OK;
+}
+
+#if defined(SWTXGLOM)
+static int
+dhd_bcmsdh_send_swtxglom_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry)
+{
+ int ret;
+ int i = 0;
+ int retries = 0;
+ bcmsdh_info_t *sdh;
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return BCME_NODEVICE;
+ }
+
+ sdh = bus->sdh;
+ do {
+ ret = bcmsdh_send_swtxglom_buf(bus->sdh, addr, fn, flags, buf, nbytes,
+ pkt, complete, handle);
+
+ bus->f2txdata++;
+ ASSERT(ret != BCME_PENDING);
+
+ if (ret == BCME_NODEVICE) {
+ DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+ } else if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+ bus->f1regdata++;
+ bus->dhd->tx_errors++;
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ for (i = 0; i < READ_FRM_CNT_RETRIES; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI,
+ NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO,
+ NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+ }
+ if (ret == 0) {
+#ifdef BCMSDIOH_TXGLOM
+ if (bus->txglom_enable) {
+ bus->tx_seq = (bus->tx_seq + bus->txglom_cnt) % SDPCM_SEQUENCE_WRAP;
+ } else
+#endif
+ {
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ }
+ }
+ } while ((ret < 0) && retrydata && ++retries < max_retry);
+
+ return ret;
+}
+
+/* Writes a HW/SW header into the packet and sends it. */
+/* Assumes: (a) header space already there, (b) caller holds lock */
+static int
+dhdsdio_txpkt_swtxglom(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt, bool queue_only)
+{
+ int ret;
+ osl_t *osh;
+ uint8 *frame;
+ uint16 len, pad1 = 0, act_len = 0;
+ uint32 swheader;
+ uint32 real_pad = 0;
+ bcmsdh_info_t *sdh;
+ void *new;
+ int pkt_cnt;
+#ifdef BCMSDIOH_TXGLOM
+ uint8 *frame_tmp;
+#endif
+#ifdef WLMEDIA_HTSF
+ char *p;
+ htsfts_t *htsf_ts;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ sdh = bus->sdh;
+ osh = bus->dhd->osh;
+
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+
+ /* Add space for the header */
+ PKTPUSH(osh, pkt, SDPCM_HDRLEN_TXGLOM);
+ ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+
+ if (bus->dhd->dongle_reset) {
+ ret = BCME_NOTREADY;
+ goto done;
+ }
+
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+#ifdef WLMEDIA_HTSF
+ if (PKTLEN(osh, pkt) >= 100) {
+ p = PKTDATA(osh, pkt);
+ htsf_ts = (htsfts_t*) (p + HTSF_HOSTOFFSET + 12);
+ if (htsf_ts->magic == HTSFMAGIC) {
+ htsf_ts->c20 = get_cycles();
+ htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
+ }
+ }
+#endif /* WLMEDIA_HTSF */
+
+#ifdef PKT_STATICS
+ len = (uint16)PKTLEN(osh, pkt);
+ switch(chan) {
+ case SDPCM_CONTROL_CHANNEL:
+ tx_statics.ctrl_count++;
+ tx_statics.ctrl_size += len;
+ break;
+ case SDPCM_DATA_CHANNEL:
+ tx_statics.data_count++;
+ tx_statics.data_size += len;
+ break;
+ case SDPCM_GLOM_CHANNEL:
+ tx_statics.glom_count++;
+ tx_statics.glom_size += len;
+ break;
+ case SDPCM_EVENT_CHANNEL:
+ tx_statics.event_count++;
+ tx_statics.event_size += len;
+ break;
+ case SDPCM_TEST_CHANNEL:
+ tx_statics.test_count++;
+ tx_statics.test_size += len;
+ break;
+
+ default:
+ break;
+ }
+#endif /* PKT_STATICS */
+
+ /* Add alignment padding, allocate new packet if needed */
+ if ((pad1 = ((uintptr)frame % DHD_SDALIGN))) {
+ if (PKTHEADROOM(osh, pkt) < pad1) {
+ DHD_INFO(("%s: insufficient headroom %d for %d pad1\n",
+ __FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad1));
+ bus->dhd->tx_realloc++;
+ new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE);
+ if (!new) {
+ DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
+ __FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+
+ PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN);
+ bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt));
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
+ /* free the pkt if canned one is not used */
+ free_pkt = TRUE;
+ pkt = new;
+ frame = (uint8*)PKTDATA(osh, pkt);
+ ASSERT(((uintptr)frame % DHD_SDALIGN) == 0);
+ pad1 = 0;
+ } else {
+ PKTPUSH(osh, pkt, pad1);
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+ ASSERT((pad1 + SDPCM_HDRLEN_TXGLOM) <= (int) PKTLEN(osh, pkt));
+ bzero(frame, pad1 + SDPCM_HDRLEN_TXGLOM);
+ }
+ }
+ ASSERT(pad1 < DHD_SDALIGN);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ len = (uint16)PKTLEN(osh, pkt);
+ *(uint16*)frame = htol16(len);
+ *(((uint16*)frame) + 1) = htol16(~len);
+
+#ifdef BCMSDIOH_TXGLOM
+ if (bus->txglom_enable) {
+ uint32 hwheader1 = 0, hwheader2 = 0;
+ act_len = len;
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) |
+ ((bus->tx_seq + bus->txglom_cnt) % SDPCM_SEQUENCE_WRAP) |
+ (((pad1 + SDPCM_HDRLEN_TXGLOM) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN + sizeof(swheader));
+
+ if (queue_only) {
+ if (bus->dhd->conf->txglom_ext) {
+ if(bus->txglom_cnt == 0) {
+ // first pkt, add pad to bucket size - recv offset
+ len = bus->dhd->conf->txglom_bucket_size - TXGLOM_RECV_OFFSET;
+ } else {
+ // add pad to bucket size
+ len = bus->dhd->conf->txglom_bucket_size;
+ }
+ } else {
+ uint8 alignment = ALIGNMENT;
+ if (forcealign && (len & (alignment - 1)))
+ len = ROUNDUP(len, alignment);
+ }
+ /* Hardware extention tag */
+ /* 2byte frame length, 1byte-, 1byte frame flag,
+ * 2byte-hdrlength, 2byte padlenght
+ */
+ hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (0 << 24);
+ hwheader2 = (len - act_len) << 16;
+ htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+ real_pad = len - act_len;
+ if (PKTTAILROOM(osh, pkt) < real_pad) {
+ DHD_INFO(("%s 1: insufficient tailroom %d for %d real_pad\n",
+ __FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad));
+ if (PKTPADTAILROOM(osh, pkt, real_pad)) {
+ DHD_ERROR(("CHK1: padding error size %d\n", real_pad));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+#ifndef BCMLXSDMMC
+ else
+ PKTSETLEN(osh, pkt, act_len);
+#endif
+ }
+#ifdef BCMLXSDMMC
+ PKTSETLEN(osh, pkt, len);
+#endif /* BCMLXSDMMC */
+ /* Post the frame pointer to sdio glom array */
+ bcmsdh_glom_post(bus->sdh, frame, pkt, len);
+ /* Save the pkt pointer in bus glom array */
+ bus->glom_pkt_arr[bus->txglom_cnt] = pkt;
+ bus->txglom_total_len += len;
+ bus->txglom_cnt++;
+ return BCME_OK;
+ } else {
+ /* Raise len to next SDIO block to eliminate tail command */
+ if (bus->roundup && bus->blocksize &&
+ ((bus->txglom_total_len + len) > bus->blocksize)) {
+ uint16 pad2 = bus->blocksize -
+ ((bus->txglom_total_len + len) % bus->blocksize);
+ if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize)) {
+ len += pad2;
+ } else {
+ }
+ } else if ((bus->txglom_total_len + len) % DHD_SDALIGN) {
+ len += DHD_SDALIGN
+ - ((bus->txglom_total_len + len) % DHD_SDALIGN);
+ }
+ if (forcealign && (len & (ALIGNMENT - 1))) {
+ len = ROUNDUP(len, ALIGNMENT);
+ }
+
+ /* Hardware extention tag */
+ /* 2byte frame length, 1byte-, 1byte frame flag,
+ * 2byte-hdrlength, 2byte padlenght
+ */
+ if (bus->dhd->conf->txglom_ext) {
+ // copy way, the last packet pad2 is set to 0 it will be dropped by HW
+ hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (1 << 24);
+ hwheader2 = 0;
+ htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+ } else {
+ hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (1 << 24);
+ hwheader2 = (len - act_len) << 16;
+ htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
+ }
+ real_pad = len - act_len;
+ if (PKTTAILROOM(osh, pkt) < real_pad) {
+ DHD_INFO(("%s 2: insufficient tailroom %d"
+ " for %d real_pad\n",
+ __FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad));
+ if (PKTPADTAILROOM(osh, pkt, real_pad)) {
+ DHD_ERROR(("CHK2: padding error size %d."
+ " %d more pkts are discarded together.\n",
+ real_pad, bus->txglom_cnt));
+ /* Save the pkt pointer in bus glom array
+ * Otherwise, this last pkt will not be
+ * cleaned under "goto done"
+ */
+ bus->glom_pkt_arr[bus->txglom_cnt] = pkt;
+ bus->txglom_cnt++;
+ bus->txglom_total_len += len;
+ ret = BCME_NOMEM;
+ goto done;
+ }
+#ifndef BCMLXSDMMC
+ else
+ PKTSETLEN(osh, pkt, act_len);
+#endif
+ }
+#ifdef BCMLXSDMMC
+ PKTSETLEN(osh, pkt, len);
+#endif /* BCMLXSDMMC */
+
+ /* Post the frame pointer to sdio glom array */
+ bcmsdh_glom_post(bus->sdh, frame, pkt, len);
+ /* Save the pkt pointer in bus glom array */
+ bus->glom_pkt_arr[bus->txglom_cnt] = pkt;
+ bus->txglom_cnt++;
+ if (bus->dhd->conf->txglom_ext)
+ //copy way, the last buffer padding is not need add to len
+ bus->txglom_total_len += act_len;
+ else
+ bus->txglom_total_len += len;
+
+ /* Update the total length on the first pkt */
+ frame_tmp = (uint8*)PKTDATA(osh, bus->glom_pkt_arr[0]);
+ *(uint16*)frame_tmp = htol16(bus->txglom_total_len);
+ *(((uint16*)frame_tmp) + 1) = htol16(~bus->txglom_total_len);
+ }
+ } else
+#endif /* BCMSDIOH_TXGLOM */
+ {
+ act_len = len;
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
+ (((pad1 + SDPCM_HDRLEN_TXGLOM) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+#ifdef DHD_DEBUG
+ if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) {
+ tx_packets[PKTPRIO(pkt)]++;
+ }
+ if (DHD_BYTES_ON() &&
+ (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
+ (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
+ prhex("Tx Frame", frame, len);
+ } else if (DHD_HDRS_ON()) {
+ prhex("TxHdr", frame, MIN(len, 16));
+ }
+#endif
+
+ /* Raise len to next SDIO block to eliminate tail command */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ uint16 pad2 = bus->blocksize - (len % bus->blocksize);
+ if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize))
+#ifdef NOTUSED
+ if (pad2 <= PKTTAILROOM(osh, pkt))
+#endif /* NOTUSED */
+ len += pad2;
+ } else if (len % DHD_SDALIGN) {
+ len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ }
+
+ /* Some controllers have trouble with odd bytes -- round to even */
+ if (forcealign && (len & (ALIGNMENT - 1))) {
+#ifdef NOTUSED
+ if (PKTTAILROOM(osh, pkt))
+#endif
+ len = ROUNDUP(len, ALIGNMENT);
+#ifdef NOTUSED
+ else
+ DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len));
+#endif
+ }
+ real_pad = len - act_len;
+ if (PKTTAILROOM(osh, pkt) < real_pad) {
+ DHD_INFO(("%s 3: insufficient tailroom %d for %d real_pad\n",
+ __FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad));
+ if (PKTPADTAILROOM(osh, pkt, real_pad)) {
+ DHD_ERROR(("CHK3: padding error size %d\n", real_pad));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+#ifndef BCMLXSDMMC
+ else
+ PKTSETLEN(osh, pkt, act_len);
+#endif
+ }
+#ifdef BCMLXSDMMC
+ PKTSETLEN(osh, pkt, len);
+#endif /* BCMLXSDMMC */
+ }
+#ifdef DHD_DEBUG
+ if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) {
+ tx_packets[PKTPRIO(pkt)]++;
+ }
+#endif
+ ret = dhd_bcmsdh_send_swtxglom_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ frame, len, pkt, NULL, NULL, TXRETRIES);
+
+done:
+
+#ifdef BCMSDIOH_TXGLOM
+ if (bus->txglom_enable && !queue_only) {
+ bcmsdh_glom_clear(bus->sdh);
+ pkt_cnt = bus->txglom_cnt;
+ } else
+#endif
+ {
+ pkt_cnt = 1;
+ }
+ /* restore pkt buffer pointer before calling tx complete routine */
+ while (pkt_cnt) {
+#ifdef BCMSDIOH_TXGLOM
+ uint32 doff;
+ if (bus->txglom_enable) {
+#ifdef BCMLXSDMMC
+ uint32 pad2 = 0;
+#endif /* BCMLXSDMMC */
+ if (!queue_only)
+ pkt = bus->glom_pkt_arr[bus->txglom_cnt - pkt_cnt];
+
+ frame = (uint8*)PKTDATA(osh, pkt);
+ doff = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
+ doff = (doff & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT;
+#ifdef BCMLXSDMMC
+ pad2 = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16;
+ PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - pad2);
+#endif /* BCMLXSDMMC */
+ PKTPULL(osh, pkt, doff);
+ } else
+#endif /* BCMSDIOH_TXGLOM */
+ {
+#ifdef BCMLXSDMMC
+ if (act_len > 0)
+ PKTSETLEN(osh, pkt, act_len);
+#endif /* BCMLXSDMMC */
+ PKTPULL(osh, pkt, SDPCM_HDRLEN_TXGLOM + pad1);
+ }
+#ifdef PROP_TXSTATUS
+ if (bus->dhd->wlfc_state) {
+ dhd_os_sdunlock(bus->dhd);
+ dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0);
+ dhd_os_sdlock(bus->dhd);
+ } else {
+#endif /* PROP_TXSTATUS */
+#ifdef SDTEST
+ if (chan != SDPCM_TEST_CHANNEL) {
+ dhd_txcomplete(bus->dhd, pkt, ret != 0);
+ }
+#else /* SDTEST */
+ dhd_txcomplete(bus->dhd, pkt, ret != 0);
+#endif /* SDTEST */
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
+#ifdef PROP_TXSTATUS
+ }
+#endif
+ pkt_cnt--;
+ }
+
+#ifdef BCMSDIOH_TXGLOM
+ /* Reset the glom array */
+ if (bus->txglom_enable && !queue_only) {
+ bus->txglom_cnt = 0;
+ bus->txglom_total_len = 0;
+ }
+#endif
+ return ret;
+}
+
+static uint
+dhdsdio_sendfromq_swtxglom(dhd_bus_t *bus, uint maxframes)
+{
+ void *pkt;
+ uint32 intstatus = 0;
+ uint retries = 0;
+ int ret = 0, prec_out;
+ uint cnt = 0;
+ uint datalen;
+ uint8 tx_prec_map;
+ uint16 txpktqlen = 0;
+#ifdef BCMSDIOH_TXGLOM
+ uint i;
+ uint8 txglom_cnt;
+#endif
+
+ dhd_pub_t *dhd = bus->dhd;
+ sdpcmd_regs_t *regs = bus->regs;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!KSO_ENAB(bus)) {
+ DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
+ return BCME_NODEVICE;
+ }
+
+ tx_prec_map = ~bus->flowcontrol;
+ /* Send frames until the limit or some other event */
+ for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) {
+#ifdef BCMSDIOH_TXGLOM
+ if (bus->txglom_enable) {
+ void *pkttable[SDPCM_MAXGLOM_SIZE];
+ dhd_os_sdlock_txq(bus->dhd);
+ txglom_cnt = MIN(DATABUFCNT(bus), bus->txglomsize);
+ txglom_cnt = MIN(txglom_cnt, pktq_mlen(&bus->txq, tx_prec_map));
+ txglom_cnt = MIN(txglom_cnt, maxframes-cnt);
+
+ /* Limiting the size to 2pkts in case of copy */
+ if (bus->dhd->conf->txglom_ext)
+ txglom_cnt = MIN(txglom_cnt, SDPCM_MAXGLOM_SIZE);
+ else
+ txglom_cnt = MIN(txglom_cnt, 10);
+
+ for (i = 0; i < txglom_cnt; i++)
+ pkttable[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
+
+ txpktqlen = pktq_len(&bus->txq);
+ dhd_os_sdunlock_txq(bus->dhd);
+
+ if (txglom_cnt == 0)
+ break;
+ datalen = 0;
+
+#ifdef PKT_STATICS
+ if (txglom_cnt < 2)
+ tx_statics.glom_1_count++;
+ else if (txglom_cnt < 3)
+ tx_statics.glom_3_count++;
+ else if (txglom_cnt < 8)
+ tx_statics.glom_3_8_count++;
+ else
+ tx_statics.glom_8_count++;
+ if (txglom_cnt > tx_statics.glom_max)
+ tx_statics.glom_max = txglom_cnt;
+#endif
+ for (i = 0; i < txglom_cnt; i++) {
+ uint datalen_tmp = 0;
+
+ if ((pkt = pkttable[i]) == NULL) {
+ /* This case should not happen */
+ DHD_ERROR(("No pkts in the queue for glomming\n"));
+ break;
+ }
+
+ datalen_tmp = (PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN_TXGLOM);
+
+#ifndef SDTEST
+ ret = dhdsdio_txpkt_swtxglom(bus,
+ pkt,
+ SDPCM_DATA_CHANNEL,
+ TRUE,
+ (i == (txglom_cnt-1))? FALSE: TRUE);
+#else
+ ret = dhdsdio_txpkt_swtxglom(bus,
+ pkt,
+ (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL),
+ TRUE,
+ (i == (txglom_cnt-1))? FALSE: TRUE);
+#endif
+ if (ret == BCME_OK)
+ datalen += datalen_tmp;
+ }
+ cnt += i-1;
+ } else
+#endif /* BCMSDIOH_TXGLOM */
+ {
+ dhd_os_sdlock_txq(bus->dhd);
+ if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) {
+ txpktqlen = pktq_len(&bus->txq);
+ dhd_os_sdunlock_txq(bus->dhd);
+ break;
+ }
+ txpktqlen = pktq_len(&bus->txq);
+ dhd_os_sdunlock_txq(bus->dhd);
+ datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN_TXGLOM;
+
+#ifndef SDTEST
+ ret = dhdsdio_txpkt_swtxglom(bus, pkt, SDPCM_DATA_CHANNEL, TRUE, FALSE);
+#else
+ ret = dhdsdio_txpkt_swtxglom(bus,
+ pkt,
+ (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL),
+ TRUE,
+ FALSE);
+#endif
+ }
- DHD_INFO(("%s PKTLEN before postprocess %d",
- __FUNCTION__, PKTLEN(osh, pkt)));
+ if (ret)
+ bus->dhd->tx_errors++;
+ else
+ bus->dhd->dstats.tx_bytes += datalen;
- /* PKTLEN still includes tail_padding, so exclude it.
- * We shall have head_padding + original pkt_len for PKTLEN afterwards.
- */
- if (bus->txglom_enable) {
- /* txglom pkts have tail_padding length in HW ext header */
- tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16;
- PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding);
- DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n",
- tail_padding, PKTLEN(osh, pkt)));
- } else {
- /* non-txglom pkts have head_padding + original pkt length in HW frame tag.
- * We cannot refer to this field for txglom pkts as the first pkt of the chain will
- * have the field for the total length of the chain.
- */
- PKTSETLEN(osh, pkt, *(uint16*)frame);
- DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n",
- *(uint16*)frame, PKTLEN(osh, pkt)));
+ /* In poll mode, need to check for other events */
+ if (!bus->intr && cnt)
+ {
+ /* Check device status, signal pending interrupt */
+ R_SDREG(intstatus, ®s->intstatus, retries);
+ bus->f2txdata++;
+ if (bcmsdh_regfail(bus->sdh))
+ break;
+ if (intstatus & bus->hostintmask)
+ bus->ipend = TRUE;
+ }
}
- data_offset = ltoh32_ua(frame + swhdr_offset);
- data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT;
- /* Get rid of sdpcm header + head_padding */
- PKTPULL(osh, pkt, data_offset);
-
- DHD_INFO(("%s data_offset %d, PKTLEN %d\n",
- __FUNCTION__, data_offset, PKTLEN(osh, pkt)));
+ /* Deflow-control stack if needed */
+ if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) &&
+ dhd->txoff && (txpktqlen < FCLOW))
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
- return BCME_OK;
+ return cnt;
}
+#endif
static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt)
{
ASSERT(pkt);
last_pkt = (i == num_pkt - 1);
pkt_len = dhdsdio_txpkt_preprocess(bus, pkt, chan, bus->tx_seq + i,
- total_len, last_pkt, &pad_pkt_len, &new_pkt);
+ total_len, last_pkt, &pad_pkt_len, &new_pkt
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ , i
+#endif
+ );
if (pkt_len <= 0)
goto done;
if (new_pkt) {
* so it will take the aligned length and buffer pointer.
*/
pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL;
+#if defined(SWTXGLOM)
+ if (bus->dhd->conf->swtxglom)
+ ret = dhd_bcmsdh_send_swtxglom_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES);
+ else
+#endif
ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES);
if (ret == BCME_OK)
osh = dhd->osh;
tx_prec_map = ~bus->flowcontrol;
+#ifdef DHD_LOSSLESS_ROAMING
+ tx_prec_map &= dhd->dequeue_prec_map;
+#endif
for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) {
int i;
int num_pkt = 1;
dhd_os_sdlock_txq(bus->dhd);
if (bus->txglom_enable) {
- num_pkt = MIN((uint32)DATABUFCNT(bus), (uint32)bus->txglomsize);
+ uint32 glomlimit = (uint32)bus->txglomsize;
+#if defined(BCMSDIOH_STD)
+ if (bus->blocksize == 64) {
+ glomlimit = MIN((uint32)bus->txglomsize, BLK_64_MAXTXGLOM);
+ }
+#endif /* BCMSDIOH_STD */
+ num_pkt = MIN((uint32)DATABUFCNT(bus), glomlimit);
num_pkt = MIN(num_pkt, ARRAYSIZE(pkts));
}
num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map));
for (i = 0; i < num_pkt; i++) {
- pkts[i] = pktq_mdeq(&bus->txq, ~bus->flowcontrol, &prec_out);
+ pkts[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
if (!pkts[i]) {
DHD_ERROR(("%s: pktq_mlen non-zero when no pkt\n",
__FUNCTION__));
bus->txglompkts += num_pkt;
}
cnt += i;
+#ifdef PKT_STATICS
+ if (num_pkt < 2)
+ tx_statics.glom_1_count++;
+ else if (num_pkt < 3)
+ tx_statics.glom_3_count++;
+ else if (num_pkt < 8)
+ tx_statics.glom_3_8_count++;
+ else
+ tx_statics.glom_8_count++;
+ if (num_pkt > tx_statics.glom_max)
+ tx_statics.glom_max = num_pkt;
+#endif
/* In poll mode, need to check for other events */
if (!bus->intr && cnt)
*frame_seq = bus->tx_seq;
}
+#if defined(SWTXGLOM)
+ if (bus->dhd->conf->swtxglom)
+ ret = dhd_bcmsdh_send_swtxglom_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ (uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
+ NULL, NULL, NULL, 1);
+ else
+#endif
ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
(uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
NULL, NULL, NULL, 1);
} else {
bus->dhd->txcnt_timeout++;
if (!bus->dhd->hang_was_sent) {
+#ifdef CUSTOMER_HW4_DEBUG
+ uint32 status, retry = 0;
+ R_SDREG(status, &bus->regs->intstatus, retry);
+ DHD_TRACE_HW4(("%s: txcnt_timeout, INT status=0x%08X\n",
+ __FUNCTION__, status));
+ DHD_TRACE_HW4(("%s : tx_max : %d, tx_seq : %d, clkstate : %d \n",
+ __FUNCTION__, bus->tx_max, bus->tx_seq, bus->clkstate));
+#endif /* CUSTOMER_HW4_DEBUG */
DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
__FUNCTION__, bus->dhd->txcnt_timeout));
}
} else if (DHD_HDRS_ON()) {
prhex("TxHdr", frame, MIN(len, 16));
}
+#endif
+#ifdef PKT_STATICS
+ tx_statics.ctrl_count++;
+ tx_statics.ctrl_size += len;
+#endif
+#if defined(SWTXGLOM)
+ if (bus->dhd->conf->swtxglom)
+ ret = dhd_bcmsdh_send_swtxglom_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ frame, len, NULL, NULL, NULL, TXRETRIES);
+ else
#endif
ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
frame, len, NULL, NULL, NULL, TXRETRIES);
{
int timeleft;
uint rxlen = 0;
- bool pending;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
return -EIO;
/* Wait until control frame is available */
- timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen);
dhd_os_sdlock(bus->dhd);
rxlen = bus->rxlen;
#else
DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
#endif /* DHD_DEBUG */
-#ifdef DHD_DEBUG
- dhd_os_sdlock(bus->dhd);
- dhdsdio_checkdied(bus, NULL, 0);
- dhd_os_sdunlock(bus->dhd);
-#endif /* DHD_DEBUG */
- } else if (pending == TRUE) {
- /* signal pending */
- DHD_ERROR(("%s: signal pending\n", __FUNCTION__));
- return -EINTR;
-
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
} else {
DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
-#ifdef DHD_DEBUG
dhd_os_sdlock(bus->dhd);
dhdsdio_checkdied(bus, NULL, 0);
dhd_os_sdunlock(bus->dhd);
-#endif /* DHD_DEBUG */
}
if (timeleft == 0) {
if (rxlen == 0)
return bcmerror;
}
-#ifdef DHD_DEBUG
static int
dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
{
int rv, i;
uint32 shaddr = 0;
+ if (bus->sih == NULL) {
+ if (bus->dhd && bus->dhd->dongle_reset) {
+ DHD_ERROR(("%s: Dongle is in reset state\n", __FUNCTION__));
+ return BCME_NOTREADY;
+ } else {
+ ASSERT(bus->dhd);
+ ASSERT(bus->sih);
+ DHD_ERROR(("%s: The address of sih is invalid\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ }
if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID && !dhdsdio_sr_cap(bus))
bus->srmemsize = 0;
#define CONSOLE_LINE_MAX 192
+#ifdef DHD_DEBUG
static int
dhdsdio_readconsole(dhd_bus_t *bus)
{
return BCME_OK;
}
+#endif /* DHD_DEBUG */
static int
dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size)
DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
}
+#if defined(DHD_FW_COREDUMP)
+ if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
+ /* Mem dump to a file on device */
+ dhdsdio_mem_dump(bus);
+ }
+#endif /* #if defined(DHD_FW_COREDUMP) */
done:
if (mbuffer)
return bcmerror;
}
-#endif /* #ifdef DHD_DEBUG */
+#if defined(DHD_FW_COREDUMP)
+static int
+dhdsdio_mem_dump(dhd_bus_t *bus)
+{
+ int ret = 0;
+ int size; /* Full mem size */
+ int start = bus->dongle_ram_base; /* Start address */
+ int read_size = 0; /* Read size of each iteration */
+ uint8 *buf = NULL, *databuf = NULL;
+
+ /* Get full mem size */
+ size = bus->ramsize;
+ buf = MALLOC(bus->dhd->osh, size);
+ if (!buf) {
+ printf("%s: Out of memory (%d bytes)\n", __FUNCTION__, size);
+ return -1;
+ }
+
+ /* Read mem content */
+ printf("Dump dongle memory");
+ databuf = buf;
+ while (size)
+ {
+ read_size = MIN(MEMBLOCK, size);
+ if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size)))
+ {
+ printf("%s: Error membytes %d\n", __FUNCTION__, ret);
+ if (buf) {
+ MFREE(bus->dhd->osh, buf, size);
+ }
+ return -1;
+ }
+ /* Decrement size and increment start address */
+ size -= read_size;
+ start += read_size;
+ databuf += read_size;
+ }
+ printf("Done\n");
+
+ dhd_save_fwdump(bus->dhd, buf, bus->ramsize);
+ /* free buf before return !!! */
+ if (write_to_file(bus->dhd, buf, bus->ramsize))
+ {
+ printf("%s: Error writing to files\n", __FUNCTION__);
+ return -1;
+ }
+
+ /* buf free handled in write_to_file, not here */
+ return 0;
+}
+#endif /* DHD_FW_COREDUMP */
+
+int
+dhd_socram_dump(dhd_bus_t * bus)
+{
+#if defined(DHD_FW_COREDUMP)
+ return (dhdsdio_mem_dump(bus));
+#else
+ return -1;
+#endif
+}
int
dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
bcmerror = dhdsdio_membytes(bus, TRUE, 0,
(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+ if (bcmerror == BCME_OK) {
+ uint32 tmp;
+
+ /* verify write */
+ bcmerror = dhdsdio_membytes(bus, FALSE, 0,
+ (uint8 *)&tmp, sizeof(tmp));
+
+ if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
+ DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
+ __FUNCTION__, bus->resetinstr));
+ DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
+ __FUNCTION__, tmp));
+ bcmerror = BCME_SDIO_ERROR;
+ goto fail;
+ }
+ }
+
/* now remove reset and halt and continue to run CR4 */
}
BUS_WAKE(bus);
- /* Change our idea of bus state */
- bus->dhd->busstate = DHD_BUS_DOWN;
-
if (KSO_ENAB(bus)) {
/* Enable clock for device interrupts */
/* Turn off the bus (F2), free any pending packets */
DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
-#if !defined(NDISVER) || (NDISVER < 0x0630)
bcmsdh_intr_disable(bus->sdh);
-#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
/* Clear any pending interrupts now that F2 is disabled */
/* Turn off the backplane clock (only) */
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
+
+ /* Change our idea of bus state */
+ bus->dhd->busstate = DHD_BUS_DOWN;
}
#ifdef PROP_TXSTATUS
#endif /* BCMSDIOH_TXGLOM */
bus->txglom_enable = FALSE;
printf("%s: enable %d\n", __FUNCTION__, bus->txglom_enable);
+ dhd_conf_set_txglom_params(bus->dhd, bus->txglom_enable);
}
int
/* Set bus state according to enable result */
dhdp->busstate = DHD_BUS_DATA;
+ /* Need to set fn2 block size to match fn1 block size.
+ * Requests to fn2 go thru fn1. *
+ * faltwig has this code contitioned with #if !BCMSPI_ANDROID.
+ * It would be cleaner to use the ->sdh->block_sz[fno] instead of
+ * 64, but this layer has no access to sdh types.
+ */
+
/* bcmsdh_intr_unmask(bus->sdh); */
bus->intdis = FALSE;
dhdsdio_sendpendctl(bus);
} else if (bus->dotxinrx && (bus->clkstate == CLK_AVAIL) &&
!bus->fcstate && DATAOK(bus) &&
- (pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres)) {
+ (pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres) &&
+ bus->dhd->conf->tx_in_rx) {
+#if defined(SWTXGLOM)
+ if (bus->dhd->conf->swtxglom)
+ dhdsdio_sendfromq_swtxglom(bus, dhd_txbound);
+ else
+#endif
dhdsdio_sendfromq(bus, dhd_txbound);
#ifdef DHDTCPACK_SUPPRESS
/* In TCPACK_SUP_DELAYTX mode, do txinrx only if
bus->flowcontrol = fcbits;
}
-#ifdef DHD_DEBUG
/* At least print a message if FW halted */
if (hmb_data & HMB_DATA_FWHALT) {
DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n"));
dhdsdio_checkdied(bus, NULL, 0);
bus->dhd->busstate = DHD_BUS_DOWN;
}
-#endif /* DHD_DEBUG */
/* Shouldn't be any others */
if (hmb_data & ~(HMB_DATA_DEVREADY |
uint framecnt = 0; /* Temporary counter of tx/rx frames */
bool rxdone = TRUE; /* Flag for no more read data */
bool resched = FALSE; /* Flag indicating resched wanted */
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ bool is_resched_by_readframe = FALSE;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
dhd_os_sdlock(bus->dhd);
#if defined(OOB_INTR_ONLY)
bcmsdh_oob_intr_set(bus->sdh, TRUE);
#endif /* defined(OOB_INTR_ONLY) */
-#if !defined(NDISVER) || (NDISVER < 0x0630)
bcmsdh_intr_enable(sdh);
-#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
}
#if defined(OOB_INTR_ONLY) && !defined(HW_OOB)
else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit && DATAOK(bus)) {
framecnt = rxdone ? txlimit : MIN(txlimit, dhd_txminmax);
+#if defined(SWTXGLOM)
+ if (bus->dhd->conf->swtxglom)
+ framecnt = dhdsdio_sendfromq_swtxglom(bus, framecnt);
+ else
+#endif
framecnt = dhdsdio_sendfromq(bus, framecnt);
txlimit -= framecnt;
}
/* Resched the DPC if ctrl cmd is pending on bus credit */
- if (bus->ctrl_frame_stat)
+ if (bus->ctrl_frame_stat) {
+ if (bus->dhd->conf->txctl_tmo_fix) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!kthread_should_stop())
+ schedule_timeout(1);
+ set_current_state(TASK_RUNNING);
+ }
resched = TRUE;
+ }
/* Resched if events or tx frames are pending, else await next interrupt */
/* On failed register access, all bets are off: no resched or interrupts */
exit:
if (!resched && dhd_dpcpoll) {
- if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0)
+ if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0) {
resched = TRUE;
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ is_resched_by_readframe = TRUE;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+ }
}
dhd_os_sdunlock(bus->dhd);
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ if (bus->dhd->dhd_bug_on) {
+ DHD_INFO(("%s: resched = %d ctrl_frame_stat = %d intstatus 0x%08x"
+ " ipend = %d pktq_mlen = %d is_resched_by_readframe = %d \n",
+ __FUNCTION__, resched, bus->ctrl_frame_stat,
+ bus->intstatus, bus->ipend,
+ pktq_mlen(&bus->txq, ~bus->flowcontrol), is_resched_by_readframe));
+
+ bus->dhd->dhd_bug_on = FALSE;
+ }
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
return resched;
}
DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
}
-#if !defined(NDISVER) || (NDISVER < 0x0630)
bcmsdh_intr_disable(sdh);
-#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
bus->intdis = TRUE;
#if defined(SDIO_ISR_THREAD)
}
DHD_OS_WAKE_UNLOCK(bus->dhd);
#else
-
-#if !defined(NDISVER) || (NDISVER < 0x0630)
bus->dpc_sched = TRUE;
dhd_sched_dpc(bus->dhd);
-#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
#endif /* defined(SDIO_ISR_THREAD) */
}
+#ifdef PKT_STATICS
+void dhdsdio_txpktstatics(void)
+{
+ uint total, f1, f2, f3, f4;
+ printf("Randy: TYPE EVENT: %d pkts (size=%d) transfered\n", tx_statics.event_count, tx_statics.event_size);
+ printf("Randy: TYPE CTRL: %d pkts (size=%d) transfered\n", tx_statics.ctrl_count, tx_statics.ctrl_size);
+ printf("Randy: TYPE DATA: %d pkts (size=%d) transfered\n", tx_statics.data_count, tx_statics.data_size);
+ if(tx_statics.glom_1_count || tx_statics.glom_3_count || tx_statics.glom_3_8_count || tx_statics.glom_8_count) {
+ total = tx_statics.glom_1_count + tx_statics.glom_3_count + tx_statics.glom_3_8_count + tx_statics.glom_8_count;
+ f1 = (tx_statics.glom_1_count*100) / total;
+ f2 = (tx_statics.glom_3_count*100) / total;
+ f3 = (tx_statics.glom_3_8_count*100) / total;
+ f4 = (tx_statics.glom_8_count*100) / total;
+ printf("Randy: glomsize==1: %d(%d), tglomsize==2: %d(%d), pkts 3<=glomsize<8: %d(%d), pkts glomszie>=8: %d(%d)\n",
+ tx_statics.glom_1_count, f1, tx_statics.glom_3_count, f2, tx_statics.glom_3_8_count, f3, tx_statics.glom_8_count, f4);
+ printf("Randy: data/glom=%d, glom_max=%d\n", tx_statics.data_count/total, tx_statics.glom_max);
+ }
+ printf("Randy: TYPE RX GLOM: %d pkts (size=%d) transfered\n", tx_statics.glom_count, tx_statics.glom_size);
+ printf("Randy: TYPE TEST: %d pkts (size=%d) transfered\n\n\n", tx_statics.test_count, tx_statics.test_size);
+}
+#endif
+
#ifdef SDTEST
static void
dhdsdio_pktgen_init(dhd_bus_t *bus)
bus->lastintrs = bus->intrcount;
}
+ if ((!bus->dpc_sched) && pktq_len(&bus->txq)) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+
#ifdef DHD_DEBUG
/* Poll for console output periodically */
if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
DHD_TIMER(("%s: DHD Idle state!!\n", __FUNCTION__));
- if (SLPAUTO_ENAB(bus)) {
+ if (!bus->poll && SLPAUTO_ENAB(bus)) {
if (dhdsdio_bussleep(bus, TRUE) != BCME_BUSY)
dhd_os_wd_timer(bus->dhd, 0);
} else
bus->idlecount = 0;
if (bus->activity) {
bus->activity = FALSE;
- if (SLPAUTO_ENAB(bus)) {
+ if (!bus->poll && SLPAUTO_ENAB(bus)) {
if (!bus->readframes)
dhdsdio_bussleep(bus, TRUE);
else
return TRUE;
if (chipid == BCM43349_CHIP_ID)
return TRUE;
- if (chipid == BCM4345_CHIP_ID)
+ if (chipid == BCM4345_CHIP_ID || chipid == BCM43454_CHIP_ID)
return TRUE;
if (chipid == BCM4350_CHIP_ID)
return TRUE;
{
int ret;
dhd_bus_t *bus;
+#ifdef GET_OTP_MAC_ENABLE
struct ether_addr ea_addr;
+#endif
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
sd1idle = TRUE;
dhd_readahead = TRUE;
retrydata = FALSE;
-#if !defined(PLATFORM_MPS)
+
+#ifdef DISABLE_FLOW_CONTROL
dhd_doflow = FALSE;
-#else
- dhd_doflow = TRUE;
-#endif /* OEM_ANDROID */
+#endif /* DISABLE_FLOW_CONTROL */
dhd_dongle_ramsize = 0;
dhd_txminmax = DHD_TXMINMAX;
bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
+#if defined(SUPPORT_P2P_GO_PS)
+ init_waitqueue_head(&bus->bus_sleep);
+#endif /* LINUX && SUPPORT_P2P_GO_PS */
+
/* attempt to attach to the dongle */
if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
goto fail;
}
}
+ else {
+ /* Set random MAC address during boot time */
+ get_random_bytes(&bus->dhd->mac.octet[3], 3);
+ /* Adding BRCM OUI */
+ bus->dhd->mac.octet[0] = 0;
+ bus->dhd->mac.octet[1] = 0x90;
+ bus->dhd->mac.octet[2] = 0x4C;
+ }
#endif
#ifdef GET_OTP_MAC_ENABLE
goto fail;
}
+#ifdef BCMHOST_XTAL_PU_TIME_MOD
+ bcmsdh_reg_write(bus->sdh, 0x18000620, 2, 11);
+#ifdef BCM4330_CHIP
+ bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x0000F801);
+#else
+ bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x00F80001);
+#endif /* BCM4330_CHIP */
+#endif /* BCMHOST_XTAL_PU_TIME_MOD */
#if defined(MULTIPLE_SUPPLICANT)
wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
#endif
- init_waitqueue_head(&bus->bus_sleep);
-
return bus;
fail:
return NULL;
}
-#ifdef REGON_BP_HANG_FIX
-static int dhd_sdio_backplane_reset(struct dhd_bus *bus)
-{
- uint32 temp = 0;
- DHD_ERROR(("Resetting the backplane to avoid failure in firmware download..\n"));
-
- temp = bcmsdh_reg_read(bus->sdh, 0x180021e0, 4);
- DHD_INFO(("SDIO Clk Control Reg = %x\n", temp));
-
- /* Force HT req from PMU */
- bcmsdh_reg_write(bus->sdh, 0x18000644, 4, 0x6000005);
-
- /* Increase the clock stretch duration. */
- bcmsdh_reg_write(bus->sdh, 0x18000630, 4, 0xC8FFC8);
-
- /* Setting ALP clock request in SDIOD clock control status register */
- bcmsdh_reg_write(bus->sdh, 0x180021e0, 4, 0x41);
-
- /* Allowing clock from SR engine to SR memory */
- bcmsdh_reg_write(bus->sdh, 0x18004400, 4, 0xf92f1);
- /* Disabling SR Engine before SR binary download. */
- bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
- bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x0);
-
- /* Enabling clock from backplane to SR memory */
- bcmsdh_reg_write(bus->sdh, 0x18004400, 4, 0xf9af1);
-
- /* Initializing SR memory address register in SOCRAM */
- bcmsdh_reg_write(bus->sdh, 0x18004408, 4, 0x0);
-
- /* Downloading the SR binary */
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xc0002000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1051f080);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1050f080);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1050f080);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x80008000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x1050f080);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000004);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000604);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00001604);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00001404);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a08c80);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010001);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00011404);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x04a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xf8000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x04a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00002000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xf8000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00011604);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010604);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010004);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000004);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010001);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010004);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00010000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x14a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x30a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000008);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x04a00000);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0x00000008);
- bcmsdh_reg_write(bus->sdh, 0x1800440c, 4, 0xfc000000);
- /* SR Binary Download complete */
-
- /* Allowing clock from SR engine to SR memory */
- bcmsdh_reg_write(bus->sdh, 0x18004400, 4, 0xf92f1);
-
- /* Turning ON SR Engine to initiate backplane reset Repeated ?? Maharana */
- bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
- bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x0);
- bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
- bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x2);
- bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
- bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x3);
- bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
- bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x37);
- bcmsdh_reg_write(bus->sdh, 0x18000650, 4, 0x3);
- temp = bcmsdh_reg_read(bus->sdh, 0x18000654, 4);
- DHD_INFO(("0x18000654 = %x\n", temp));
- bcmsdh_reg_write(bus->sdh, 0x18000654, 4, 0x800037);
- OSL_DELAY(100000);
- /* Rolling back the original values for clock stretch and PMU timers */
- bcmsdh_reg_write(bus->sdh, 0x18000644, 4, 0x0);
- bcmsdh_reg_write(bus->sdh, 0x18000630, 4, 0xC800C8);
- /* Removing ALP clock request in SDIOD clock control status register */
- bcmsdh_reg_write(bus->sdh, 0x180021e0, 4, 0x40);
- OSL_DELAY(10000);
- return TRUE;
-}
-
-static int dhdsdio_sdio_hang_war(struct dhd_bus *bus)
-{
- uint32 temp = 0, temp2 = 0, counter = 0, BT_pwr_up = 0, BT_ready = 0;
- /* Removing reset of D11 Core */
- bcmsdh_reg_write(bus->sdh, 0x18101408, 4, 0x3);
- bcmsdh_reg_write(bus->sdh, 0x18101800, 4, 0x0);
- bcmsdh_reg_write(bus->sdh, 0x18101408, 4, 0x1);
- /* Reading CLB XTAL BT cntrl register */
- bcmsdh_reg_write(bus->sdh, 0x180013D8, 2, 0xD1);
- bcmsdh_reg_write(bus->sdh, 0x180013DA, 2, 0x12);
- bcmsdh_reg_write(bus->sdh, 0x180013D8, 2, 0x2D0);
- /* Read if BT is powered up */
- temp = bcmsdh_reg_read(bus->sdh, 0x180013DA, 2);
- /* Read BT_ready from WLAN wireless register */
- temp2 = bcmsdh_reg_read(bus->sdh, 0x1800002C, 4);
- /*
- Check if the BT is powered up and ready. The duration between BT being powered up
- and BT becoming ready is the problematic window for WLAN. If we move ahead at this
- time then we may encounter a corrupted backplane later. So we wait for BT to be ready
- and then proceed after checking the health of the backplane. If the backplane shows
- indications of failure then we have to do a full reset of the backplane using SR engine
- and then proceed.
- */
- (temp & 0xF0) ? (BT_pwr_up = 1):(BT_pwr_up = 0);
- (temp2 & (1<<17)) ? (BT_ready = 1):(BT_ready = 0);
- DHD_ERROR(("WARNING: Checking if BT is ready BT_pwr_up = %x"
- "BT_ready = %x \n", BT_pwr_up, BT_ready));
- while (BT_pwr_up && !BT_ready)
- {
- OSL_DELAY(1000);
- bcmsdh_reg_write(bus->sdh, 0x180013D8, 2, 0x2D0);
- temp = bcmsdh_reg_read(bus->sdh, 0x180013DA, 2);
- temp2 = bcmsdh_reg_read(bus->sdh, 0x1800002C, 4);
- (temp & 0xF0) ? (BT_pwr_up = 1):(BT_pwr_up = 0);
- (temp2 & (1<<17)) ? (BT_ready = 1):(BT_ready = 0);
- counter++;
- if (counter == 5000)
- {
- DHD_ERROR(("WARNING: Going ahead after 5 secs with"
- "risk of failure because BT ready is not yet set\n"));
- break;
- }
- }
- DHD_ERROR(("\nWARNING: WL Proceeding BT_pwr_up = %x BT_ready = %x"
- "\n", BT_pwr_up, BT_ready));
- counter = 0;
- OSL_DELAY(10000);
- /*
- Get the information of who accessed the crucial backplane entities
- by reading read and write access registers
- */
- DHD_TRACE(("%d: Read Value @ 0x18104808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18104808, 4)));
- DHD_TRACE(("%d: Read Value @ 0x1810480C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810480C, 4)));
- DHD_TRACE(("%d: Read Value @ 0x18106808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18106808, 4)));
- DHD_TRACE(("%d: Read Value @ 0x1810680C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810680C, 4)));
- DHD_TRACE(("%d: Read Value @ 0x18107808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18107808, 4)));
- DHD_TRACE(("%d: Read Value @ 0x1810780C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810780C, 4)));
- DHD_TRACE(("%d: Read Value @ 0x18108808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18108808, 4)));
- DHD_TRACE(("%d: Read Value @ 0x1810880C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810880C, 4)));
- DHD_TRACE(("%d: Read Value @ 0x18109808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18109808, 4)));
- DHD_TRACE(("%d: Read Value @ 0x1810980C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810980C, 4)));
- DHD_TRACE(("%d: Read Value @ 0x1810C808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c808, 4)));
- DHD_TRACE(("%d: Read Value @ 0x1810C80C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c80C, 4)));
- counter = 0;
- while ((bcmsdh_reg_read(bus->sdh, 0x18104808, 4) == 5) ||
- (bcmsdh_reg_read(bus->sdh, 0x1810480C, 4) == 5) ||
- (bcmsdh_reg_read(bus->sdh, 0x18106808, 4) == 5) ||
- (bcmsdh_reg_read(bus->sdh, 0x1810680C, 4) == 5) ||
- (bcmsdh_reg_read(bus->sdh, 0x1810780C, 4) == 5) ||
- (bcmsdh_reg_read(bus->sdh, 0x1810780C, 4) == 5) ||
- (bcmsdh_reg_read(bus->sdh, 0x1810880C, 4) == 5) ||
- (bcmsdh_reg_read(bus->sdh, 0x1810880C, 4) == 5) ||
- (bcmsdh_reg_read(bus->sdh, 0x1810980C, 4) == 5) ||
- (bcmsdh_reg_read(bus->sdh, 0x1810980C, 4) == 5) ||
- (bcmsdh_reg_read(bus->sdh, 0x1810C80C, 4) == 5) ||
- (bcmsdh_reg_read(bus->sdh, 0x1810C80C, 4) == 5))
- {
- if (++counter > 10)
- {
- DHD_ERROR(("Unable to recover the backkplane corruption"
- "..Tried %d times.. Exiting\n", counter));
- break;
- }
- OSL_DELAY(10000);
- dhd_sdio_backplane_reset(bus);
- /*
- Get the information of who accessed the crucial backplane
- entities by reading read and write access registers
- */
- DHD_ERROR(("%d: Read Value @ 0x18104808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18104808, 4)));
- DHD_ERROR(("%d: Read Value @ 0x1810480C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810480C, 4)));
- DHD_ERROR(("%d: Read Value @ 0x18106808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18106808, 4)));
- DHD_ERROR(("%d: Read Value @ 0x1810680C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810680C, 4)));
- DHD_ERROR(("%d: Read Value @ 0x18107808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18107808, 4)));
- DHD_ERROR(("%d: Read Value @ 0x1810780C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810780C, 4)));
- DHD_ERROR(("%d: Read Value @ 0x18108808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18108808, 4)));
- DHD_ERROR(("%d: Read Value @ 0x1810880C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810880C, 4)));
- DHD_ERROR(("%d: Read Value @ 0x18109808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x18109808, 4)));
- DHD_ERROR(("%d: Read Value @ 0x1810980C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810980C, 4)));
- DHD_ERROR(("%d: Read Value @ 0x1810C808 = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c808, 4)));
- DHD_ERROR(("%d: Read Value @ 0x1810C80C = %x."
- "\n", __LINE__, bcmsdh_reg_read(bus->sdh, 0x1810c80C, 4)));
- }
- /* Set the WL ready to indicate BT that we are done with backplane reset */
- DHD_ERROR(("Setting up AXI_OK\n"));
- bcmsdh_reg_write(bus->sdh, 0x18000658, 4, 0x3);
- temp = bcmsdh_reg_read(bus->sdh, 0x1800065c, 4);
- temp |= 0x80000000;
- bcmsdh_reg_write(bus->sdh, 0x1800065c, 4, temp);
- return TRUE;
-}
-#endif /* REGON_BP_HANG_FIX */
static bool
dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
uint16 devid)
DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
}
-#if defined(DHD_DEBUG)
+#if defined(DHD_DEBUG) && !defined(CUSTOMER_HW4_DEBUG)
DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n",
bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4)));
-#endif
+#endif /* DHD_DEBUG && !CUSTOMER_HW4_DEBUG */
/* Force PLL off until si_attach() programs PLL control regs */
bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg));
#endif /* DHD_DEBUG */
-#ifdef REGON_BP_HANG_FIX
- /* WAR - for 43241 B0-B1-B2. B3 onwards do not need this */
- if (((uint16)bus->sih->chip == BCM4324_CHIP_ID) && (bus->sih->chiprev < 3))
- dhdsdio_sdio_hang_war(bus);
-#endif /* REGON_BP_HANG_FIX */
bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
bus->dongle_ram_base = CR4_4360_RAM_BASE;
break;
case BCM4345_CHIP_ID:
+ case BCM43454_CHIP_ID:
bus->dongle_ram_base = (bus->sih->chiprev < 6) /* from 4345C0 */
? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
break;
case BCM4349_CHIP_GRPID:
- bus->dongle_ram_base = CR4_4349_RAM_BASE;
+ /* RAM base changed from 4349c0(revid=9) onwards */
+ bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
+ CR4_4349_RAM_BASE: CR4_4349_RAM_BASE_FROM_REV_9);
break;
default:
bus->dongle_ram_base = 0;
__FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
}
bus->use_rxchain = (bool)bus->sd_rxchain;
- if (bus->dhd->conf->use_rxchain >= 0) {
- printf("%s: set use_rxchain %d from config.txt\n", __FUNCTION__, bus->dhd->conf->use_rxchain);
- bus->use_rxchain = (bool)bus->dhd->conf->use_rxchain;
- }
- /* Setting default Glom size */
- if (bus->dhd->conf->txglomsize >= 0) {
- printf("%s: set txglomsize %d from config.txt\n", __FUNCTION__, bus->dhd->conf->txglomsize);
- bus->txglomsize = bus->dhd->conf->txglomsize;
- }
bus->txinrx_thres = CUSTOM_TXINRX_THRES;
/* TX first in dhdsdio_readframes() */
bus->dotxinrx = TRUE;
+#ifdef PKT_STATICS
+ memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t));
+#endif
+
return TRUE;
}
{
int ret;
+
DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
__FUNCTION__, bus->fw_path, bus->nv_path));
DHD_OS_WAKE_LOCK(bus->dhd);
/* External conf takes precedence if specified */
dhd_conf_preinit(bus->dhd);
dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
- dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path, bus->nv_path);
+ dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
dhd_conf_set_fw_name_by_mac(bus->dhd, bus->sdh, bus->fw_path);
dhd_conf_set_nv_name_by_mac(bus->dhd, bus->sdh, bus->nv_path);
+ if (bus->dhd->conf->dhd_poll >= 0) {
+ printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll);
+ bus->poll = TRUE;
+ if (!bus->pollrate)
+ bus->pollrate = 1;
+ }
+ if (bus->dhd->conf->use_rxchain >= 0) {
+ printf("%s: set use_rxchain %d\n", __FUNCTION__, bus->dhd->conf->use_rxchain);
+ bus->use_rxchain = (bool)bus->dhd->conf->use_rxchain;
+ }
+ if (bus->dhd->conf->txglomsize >= 0) {
+ printf("%s: set txglomsize %d\n", __FUNCTION__, bus->dhd->conf->txglomsize);
+ bus->txglomsize = bus->dhd->conf->txglomsize;
+ }
+ bcmsdh_set_mode(sdh, bus->dhd->conf->txglom_mode);
printf("Final fw_path=%s\n", bus->fw_path);
printf("Final nv_path=%s\n", bus->nv_path);
int ret = 0;
dhd_bus_t *bus = (dhd_bus_t*)context;
+#ifdef SUPPORT_P2P_GO_PS
int wait_time = 0;
+
if (bus->idletime > 0) {
wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms);
}
-
+#endif /* SUPPORT_P2P_GO_PS */
ret = dhd_os_check_wakelock(bus->dhd);
+#ifdef SUPPORT_P2P_GO_PS
// terence 20141124: fix for suspend issue
- if (SLPAUTO_ENAB(bus) && (!ret) && (bus->dhd->up)) {
+ if (SLPAUTO_ENAB(bus) && (!ret) && (bus->dhd->up) && (bus->dhd->op_mode != DHD_FLAG_HOSTAP_MODE)) {
if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) {
if (!bus->sleeping) {
return 1;
}
}
}
+#endif /* SUPPORT_P2P_GO_PS */
return ret;
}
// terence 20150412: fix for firmware failed to download
if (bus->dhd->conf->chip == BCM43340_CHIP_ID ||
bus->dhd->conf->chip == BCM43341_CHIP_ID) {
- if (len%64 != 0) {
- memset(memptr+len, 0, len%64);
- len += (64 - len%64);
- }
+ if (len%64 != 0) {
+ memset(memptr+len, 0, len%64);
+ len += (64 - len%64);
+ }
}
if (len < 0) {
DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
return bcmerror;
}
-/*
- EXAMPLE: nvram_array
- nvram_arry format:
- name=value
- Use carriage return at the end of each assignment, and an empty string with
- carriage return at the end of array.
-
- For example:
- unsigned char nvram_array[] = {"name1=value1\n", "name2=value2\n", "\n"};
- Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
-
- Search "EXAMPLE: nvram_array" to see how the array is activated.
-*/
-
-void
-dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params)
-{
- bus->nvram_params = nvram_params;
-}
-
static int
dhdsdio_download_nvram(struct dhd_bus *bus)
{
pnv_path = bus->nv_path;
nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
- if (!nvram_file_exists && (bus->nvram_params == NULL))
- return (0);
+ /* For Get nvram from UEFI */
if (nvram_file_exists) {
image = dhd_os_open_image(pnv_path);
if (image == NULL) {
goto err;
}
- /* Download variables */
- if (nvram_file_exists) {
- len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
- }
- else {
- len = strlen(bus->nvram_params);
- ASSERT(len <= MAX_NVRAMBUF_SIZE);
- memcpy(memblock, bus->nvram_params, len);
- }
+ /* For Get nvram from image or UEFI (when image == NULL ) */
+ len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+
if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
bufp = (char *)memblock;
bufp[len] = 0;
goto err;
}
- /* EXAMPLE: nvram_array */
- /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
- /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
-
/* External nvram takes precedence if specified */
if (dhdsdio_download_nvram(bus)) {
DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
return ret;
}
+uint8
+dhd_bus_is_ioready(struct dhd_bus *bus)
+{
+ uint8 enable;
+ bcmsdh_info_t *sdh;
+ ASSERT(bus);
+ ASSERT(bus->sih != NULL);
+ enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+ sdh = bus->sdh;
+ return (enable == bcmsdh_cfg_read(sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL));
+}
+
uint
dhd_bus_chip(struct dhd_bus *bus)
{
bcmsdh_oob_intr_register(bus->sdh,
dhdsdio_isr, bus);
bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#elif defined(FORCE_WOWLAN)
+ dhd_enable_oob_intr(bus, TRUE);
#endif
bus->dhd->dongle_reset = FALSE;
dhdsdio_release_dongle(bus, bus->dhd->osh,
TRUE, FALSE);
}
- } else
+ } else {
+ DHD_ERROR(("%s Failed to download binary to the dongle\n",
+ __FUNCTION__));
+ if (bus->sih != NULL) {
+ si_detach(bus->sih);
+ bus->sih = NULL;
+ }
bcmerror = BCME_SDIO_ERROR;
+ }
} else
bcmerror = BCME_SDIO_ERROR;
__FUNCTION__);
printf("Will call dhd_bus_start instead\n");
dhd_bus_resume(dhdp, 1);
-#if defined(HW_OOB)
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
dhd_conf_set_hw_oob_intr(bus->sdh, bus->sih->chip); // terence 20120615: fix for OOB initial issue
#endif
if ((bcmerror = dhd_bus_start(dhdp)) != 0)
__FUNCTION__, bcmerror));
}
}
+
+#ifdef PKT_STATICS
+ memset((uint8*) &tx_statics, 0, sizeof(pkt_statics_t));
+#endif
return bcmerror;
}
return dhdsdio_membytes(bus, set, address, data, size);
}
-#if defined(NDISVER) && (NDISVER >= 0x0630)
-void
-dhd_bus_reject_ioreqs(dhd_pub_t *dhdp, bool reject)
-{
-
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
- bcmsdh_reject_ioreqs(dhdp->bus->sdh, reject);
-}
-
-void
-dhd_bus_waitfor_iodrain(dhd_pub_t *dhdp)
-{
-
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
- bcmsdh_waitfor_iodrain(dhdp->bus->sdh);
-}
-#endif /* (NDISVER) && (NDISVER >= 0x0630) */
void
dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, char *pconf_path)
dhd_os_sdunlock(bus->dhd);
}
#endif /* DEBUGGER */
-
-#if defined(SOFTAP_TPUT_ENHANCE)
-void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time)
-{
- if (!dhdp || !dhdp->bus) {
- DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
- return;
- }
- dhdp->bus->idletime = idle_time;
-}
-
-void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time)
-{
- if (!dhdp || !dhdp->bus) {
- DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
- return;
- }
-
- if (!idle_time) {
- DHD_ERROR(("%s:Arg idle_time is NULL\n", __FUNCTION__));
- return;
- }
- *idle_time = dhdp->bus->idletime;
-}
-#endif /* SOFTAP_TPUT_ENHANCE */
\r
EXPORT_SYMBOL(bcmdhd_mem_prealloc);\r
\r
-int bcmdhd_init_wlan_mem(void)\r
+int bcmdhd_init_wlan_mem(void)
{\r
int i;\r
int j;\r
for (i=0; i<8; i++) {\r
wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);\r
if (!wlan_static_skb[i])\r
- goto err_skb_alloc;\r
+ goto err_skb_alloc;
printk("1 %s: wlan_static_skb[%d]=%p, size=%lu\n",\r
__FUNCTION__, i, wlan_static_skb[i], DHD_SKB_1PAGE_BUFSIZE);\r
}\r
for (; i<16; i++) {\r
wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);\r
if (!wlan_static_skb[i])\r
- goto err_skb_alloc;\r
+ goto err_skb_alloc;
printk("2 %s: wlan_static_skb[%d]=%p, size=%lu\n",\r
__FUNCTION__, i, wlan_static_skb[i], DHD_SKB_2PAGE_BUFSIZE);\r
}\r
\r
wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);\r
if (!wlan_static_skb[i])\r
- goto err_skb_alloc;\r
+ goto err_skb_alloc;
printk("3 %s: wlan_static_skb[%d]=%p, size=%lu\n",\r
__FUNCTION__, i, wlan_static_skb[i], DHD_SKB_4PAGE_BUFSIZE);\r
\r
}\r
\r
wlan_static_scan_buf0 = kmalloc (65536, GFP_KERNEL);\r
- if (!wlan_static_scan_buf0)\r
+ if (!wlan_static_scan_buf0)
goto err_mem_alloc;\r
printk("5 %s: wlan_static_scan_buf0=%p, size=%d\n",\r
__FUNCTION__, wlan_static_scan_buf0, 65536);\r
\r
wlan_static_scan_buf1 = kmalloc (65536, GFP_KERNEL);\r
- if (!wlan_static_scan_buf1)\r
+ if (!wlan_static_scan_buf1)
goto err_mem_alloc;\r
printk("6 %s: wlan_static_scan_buf1=%p, size=%d\n",\r
__FUNCTION__, wlan_static_scan_buf1, 65536);\r
/*
* DHD PROP_TXSTATUS Module.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_wlfc.c 501046 2014-09-06 01:25:16Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_wlfc.c 579277 2015-08-14 04:49:50Z $
*
*/
+
#include <typedefs.h>
#include <osl.h>
#include <dhd.h>
#include <dhd_bus.h>
+
#include <dhd_dbg.h>
-#ifdef PROP_TXSTATUS
+#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */
#include <wlfc_proto.h>
#include <dhd_wlfc.h>
#endif
+
+#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
/*
*
*/
+#if defined(DHD_WLFC_THREAD)
+#define WLFC_THREAD_QUICK_RETRY_WAIT_MS 10 /* 10 msec */
+#define WLFC_THREAD_RETRY_WAIT_MS 10000 /* 10 sec */
+#endif /* defined (DHD_WLFC_THREAD) */
+
#ifdef PROP_TXSTATUS
-#ifdef QMONITOR
-#define DHD_WLFC_QMON_COMPLETE(entry) dhd_qmon_txcomplete(&entry->qmon)
-#else
#define DHD_WLFC_QMON_COMPLETE(entry)
-#endif /* QMONITOR */
#define LIMIT_BORROW
+/** reordering related */
+
+#if defined(DHD_WLFC_THREAD)
+static void
+_dhd_wlfc_thread_wakeup(dhd_pub_t *dhdp)
+{
+ dhdp->wlfc_thread_go = TRUE;
+ wake_up_interruptible(&dhdp->wlfc_wqhead);
+}
+#endif /* DHD_WLFC_THREAD */
+
static uint16
_dhd_wlfc_adjusted_seq(void* p, uint8 current_seq)
{
return seq;
}
+/**
+ * Enqueue a caller supplied packet on a caller supplied precedence queue, optionally reorder
+ * suppressed packets.
+ * @param[in] pq caller supplied packet queue to enqueue the packet on
+ * @param[in] prec precedence of the to-be-queued packet
+ * @param[in] p transmit packet to enqueue
+ * @param[in] qHead if TRUE, enqueue to head instead of tail. Used to maintain d11 seq order.
+ * @param[in] current_seq
+ * @param[in] reOrder reOrder on odd precedence (=suppress queue)
+ */
static void
_dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead,
uint8 current_seq, bool reOrder)
if (!p)
return;
-
ASSERT(prec >= 0 && prec < pq->num_prec);
- ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+ /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */
+ ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p)));
ASSERT(!pktq_full(pq));
ASSERT(!pktq_pfull(pq, prec));
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
-}
+} /* _dhd_wlfc_prec_enque */
-/* Create a place to store all packet pointers submitted to the firmware until
- a status comes back, suppress or otherwise.
-
- hang-er: noun, a contrivance on which things are hung, as a hook.
-*/
+/**
+ * Create a place to store all packet pointers submitted to the firmware until a status comes back,
+ * suppress or otherwise.
+ *
+ * hang-er: noun, a contrivance on which things are hung, as a hook.
+ */
+/** @deprecated soon */
static void*
-_dhd_wlfc_hanger_create(osl_t *osh, int max_items)
+_dhd_wlfc_hanger_create(dhd_pub_t *dhd, int max_items)
{
int i;
wlfc_hanger_t* hanger;
/* allow only up to a specific size for now */
ASSERT(max_items == WLFC_HANGER_MAXITEMS);
- if ((hanger = (wlfc_hanger_t*)MALLOC(osh, WLFC_HANGER_SIZE(max_items))) == NULL)
+ if ((hanger = (wlfc_hanger_t*)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_DHD_WLFC_HANGER,
+ WLFC_HANGER_SIZE(max_items))) == NULL) {
return NULL;
-
+ }
memset(hanger, 0, WLFC_HANGER_SIZE(max_items));
hanger->max_items = max_items;
return hanger;
}
+/** @deprecated soon */
static int
-_dhd_wlfc_hanger_delete(osl_t *osh, void* hanger)
+_dhd_wlfc_hanger_delete(dhd_pub_t *dhd, void* hanger)
{
wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
if (h) {
- MFREE(osh, h, WLFC_HANGER_SIZE(h->max_items));
+ DHD_OS_PREFREE(dhd, h, WLFC_HANGER_SIZE(h->max_items));
return BCME_OK;
}
return BCME_BADARG;
}
+/** @deprecated soon */
static uint16
_dhd_wlfc_hanger_get_free_slot(void* hanger)
{
return WLFC_HANGER_MAXITEMS;
}
+/** @deprecated soon */
static int
_dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen)
{
return BCME_NOTFOUND;
if (h) {
- if ((h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
- (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+ if (h->items[slot_id].state != WLFC_HANGER_ITEM_STATE_FREE) {
*gen = h->items[slot_id].gen;
}
else {
+ DHD_ERROR(("Error: %s():%d item not used\n",
+ __FUNCTION__, __LINE__));
rc = BCME_NOTFOUND;
}
- }
- else
+
+ } else {
rc = BCME_BADARG;
+ }
+
return rc;
}
+/** @deprecated soon */
static int
_dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
{
h->items[slot_id].pkt_state = 0;
h->items[slot_id].pkt_txstatus = 0;
h->pushed++;
- }
- else {
+ } else {
h->failed_to_push++;
rc = BCME_NOTFOUND;
}
- }
- else
+ } else {
rc = BCME_BADARG;
+ }
+
return rc;
}
+/** @deprecated soon */
static int
_dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove_from_hanger)
{
int rc = BCME_OK;
wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
+ *pktout = NULL;
+
/* this packet was not pushed at the time it went to the firmware */
if (slot_id == WLFC_HANGER_MAXITEMS)
return BCME_NOTFOUND;
h->items[slot_id].identifier = 0;
h->popped++;
}
- }
- else {
+ } else {
h->failed_to_pop++;
rc = BCME_NOTFOUND;
}
- }
- else
+ } else {
rc = BCME_BADARG;
+ }
+
return rc;
}
+/** @deprecated soon */
static int
_dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen)
{
h->items[slot_id].gen = gen;
if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_INUSE) {
h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED;
- }
- else
+ } else {
rc = BCME_BADARG;
- }
- else
+ }
+ } else {
rc = BCME_BADARG;
+ }
return rc;
}
-/* remove reference of specific packet in hanger */
+/** remove reference of specific packet in hanger */
+/** @deprecated soon */
static bool
_dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt)
{
return FALSE;
}
- for (i = 0; i < h->max_items; i++) {
- if (pkt == h->items[i].pkt) {
- if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
- (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
- h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
- h->items[i].pkt = NULL;
- h->items[i].gen = 0xff;
- h->items[i].identifier = 0;
- }
+ i = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(pkt)));
+
+ if ((i < h->max_items) && (pkt == h->items[i].pkt)) {
+ if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) {
+ h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+ h->items[i].pkt = NULL;
+ h->items[i].gen = 0xff;
+ h->items[i].identifier = 0;
return TRUE;
+ } else {
+ DHD_ERROR(("Error: %s():%d item not suppressed\n",
+ __FUNCTION__, __LINE__));
}
}
return FALSE;
}
-
+/** afq = At Firmware Queue, queue containing packets pending in the dongle */
static int
_dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p)
{
return BCME_OK;
}
+/** afq = At Firmware Queue, queue containing packets pending in the dongle */
static int
_dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec,
void **pktout)
return BCME_ERROR;
}
+ bcm_pkt_validate_chk(p);
+
if (!b) {
/* head packet is matched */
if ((q->head = PKTLINK(p)) == NULL) {
}
return BCME_OK;
-}
+} /* _dhd_wlfc_deque_afq */
+/**
+ * Flow control information piggy backs on packets, in the form of one or more TLVs. This function
+ * pushes one or more TLVs onto a packet that is going to be sent towards the dongle.
+ *
+ * @param[in] ctx
+ * @param[in/out] packet
+ * @param[in] tim_signal TRUE if parameter 'tim_bmp' is valid
+ * @param[in] tim_bmp
+ * @param[in] mac_handle
+ * @param[in] htodtag
+ * @param[in] htodseq d11 seqno for seqno reuse, only used if 'seq reuse' was agreed upon
+ * earlier between host and firmware.
+ * @param[in] skip_wlfc_hdr
+ */
static int
_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void** packet, bool tim_signal,
uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr)
wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG;
wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG;
- memcpy(&wlh[TLV_HDR_LEN], &wl_pktinfo, sizeof(uint32));
+ memcpy(&wlh[TLV_HDR_LEN] /* dst */, &wl_pktinfo, sizeof(uint32));
if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
uint16 wl_seqinfo = htol16(htodseq);
memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
push_bdc_hdr:
-
PKTPUSH(ctx->osh, p, BDC_HEADER_LEN);
h = (struct bdc_header *)PKTDATA(ctx->osh, p);
h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
BDC_SET_IF_IDX(h, DHD_PKTTAG_IF(PKTTAG(p)));
*packet = p;
return BCME_OK;
-}
+} /* _dhd_wlfc_pushheader */
+/**
+ * Removes (PULLs) flow control related headers from the caller supplied packet, is invoked eg
+ * when a packet is about to be freed.
+ */
static int
_dhd_wlfc_pullheader(athost_wl_status_info_t* ctx, void* pktbuf)
{
return BCME_OK;
}
+/**
+ * @param[in/out] p packet
+ */
static wlfc_mac_descriptor_t*
_dhd_wlfc_find_table_entry(athost_wl_status_info_t* ctx, void* p)
{
* STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
* have their own entry.
*/
- if ((DHD_IF_ROLE_STA(iftype) || ETHER_ISMULTI(dstn)) &&
+ if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) ||
+ iftype == WLC_E_IF_ROLE_P2P_CLIENT) &&
(ctx->destination_entries.interfaces[ifid].occupied)) {
entry = &ctx->destination_entries.interfaces[ifid];
}
DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
return entry;
-}
-
+} /* _dhd_wlfc_find_table_entry */
+
+/**
+ * In case a packet must be dropped (because eg the queues are full), various tallies have to be
+ * be updated. Called from several other functions.
+ * @param[in] dhdp pointer to public DHD structure
+ * @param[in] prec precedence of the packet
+ * @param[in] p the packet to be dropped
+ * @param[in] bPktInQ TRUE if packet is part of a queue
+ */
static int
_dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ)
{
if (bPktInQ) {
ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
ctx->pkt_cnt_per_ac[prec>>1]--;
+ ctx->pkt_cnt_in_psq--;
}
ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][DHD_PKTTAG_FIFO(PKTTAG(p))]--;
PKTFREE(ctx->osh, p, TRUE);
return 0;
-}
-
+} /* _dhd_wlfc_prec_drop */
+
+/**
+ * Called when eg the host handed a new packet over to the driver, or when the dongle reported
+ * that a packet could currently not be transmitted (=suppressed). This function enqueues a transmit
+ * packet in the host driver to be (re)transmitted at a later opportunity.
+ * @param[in] dhdp pointer to public DHD structure
+ * @param[in] qHead When TRUE, queue packet at head instead of tail, to preserve d11 sequence
+ */
static bool
_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead,
uint8 current_seq)
}
/* Determine precedence from which to evict packet, if any */
- if (pktq_pfull(pq, prec))
+ if (pktq_pfull(pq, prec)) {
eprec = prec;
- else if (pktq_full(pq)) {
+ } else if (pktq_full(pq)) {
p = pktq_peek_tail(pq, &eprec);
if (!p) {
DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
WLFC_GET_REORDERSUPP(dhdp->wlfc_mode));
ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++;
ctx->pkt_cnt_per_ac[prec>>1]++;
+ ctx->pkt_cnt_in_psq++;
return TRUE;
-}
-
+} /* _dhd_wlfc_prec_enq_with_drop */
+/**
+ * Called during eg the 'committing' of a transmit packet from the OS layer to a lower layer, in
+ * the event that this 'commit' failed.
+ */
static int
_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx,
void* p, ewlfc_packet_state_t pkt_type, uint32 hslot)
{
/*
- put the packet back to the head of queue
-
- - suppressed packet goes back to suppress sub-queue
- - pull out the header, if new or delayed packet
-
- Note: hslot is used only when header removal is done.
- */
+ * put the packet back to the head of queue
+ * - suppressed packet goes back to suppress sub-queue
+ * - pull out the header, if new or delayed packet
+ *
+ * Note: hslot is used only when header removal is done.
+ */
wlfc_mac_descriptor_t* entry;
int rc = BCME_OK;
int prec, fifo_id;
DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
rc = BCME_ERROR;
}
+
exit:
if (rc != BCME_OK) {
ctx->stats.rollback_failed++;
_dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE);
- }
- else
+ } else {
ctx->stats.rollback++;
+ }
return rc;
-}
+} /* _dhd_wlfc_rollback_packet_toq */
+/** Returns TRUE if host OS -> DHD flow control is allowed on the caller supplied interface */
static bool
_dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid)
{
uint32 curr_t = OSL_SYSUPTIME();
if (ctx->fc_defer_timestamp == 0) {
- /* first signle ac scenario */
+ /* first single ac scenario */
ctx->fc_defer_timestamp = curr_t;
return FALSE;
}
}
return ctx->allow_fc;
-}
+} /* _dhd_wlfc_allow_fc */
+/**
+ * Starts or stops the flow of transmit packets from the host OS towards the DHD, depending on
+ * low/high watermarks.
+ */
static void
_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id)
{
}
return;
-}
+} /* _dhd_wlfc_flow_control_check */
static int
_dhd_wlfc_send_signalonly_packet(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
_dhd_wlfc_pullheader(ctx, p);
PKTFREE(ctx->osh, p, TRUE);
}
- }
- else {
+ } else {
DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
__FUNCTION__, dummylen));
rc = BCME_NOMEM;
+ dhdp->tx_pktgetfail++;
}
+
return rc;
-}
+} /* _dhd_wlfc_send_signalonly_packet */
-/* Return TRUE if traffic availability changed */
+/**
+ * Called on eg receiving 'mac close' indication from dongle. Updates the per-MAC administration
+ * maintained in caller supplied parameter 'entry'.
+ *
+ * @param[in/out] entry administration about a remote MAC entity
+ * @param[in] prec precedence queue for this remote MAC entitity
+ *
+ * Return value: TRUE if traffic availability changed
+ */
static bool
_dhd_wlfc_traffic_pending_check(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
int prec)
if (entry->state == WLFC_STATE_CLOSE) {
if ((pktq_plen(&entry->psq, (prec << 1)) == 0) &&
(pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) {
-
+ /* no packets in both 'normal' and 'suspended' queues */
if (entry->traffic_pending_bmp & NBITVAL(prec)) {
rc = TRUE;
entry->traffic_pending_bmp =
entry->traffic_pending_bmp & ~ NBITVAL(prec);
}
- }
- else {
+ } else {
+ /* packets are queued in host for transmission to dongle */
if (!(entry->traffic_pending_bmp & NBITVAL(prec))) {
rc = TRUE;
entry->traffic_pending_bmp =
}
}
}
+
if (rc) {
/* request a TIM update to firmware at the next piggyback opportunity */
if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) {
_dhd_wlfc_send_signalonly_packet(ctx, entry, entry->traffic_pending_bmp);
entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
entry->send_tim_signal = 0;
- }
- else {
+ } else {
rc = FALSE;
}
}
+
return rc;
-}
+} /* _dhd_wlfc_traffic_pending_check */
+/**
+ * Called on receiving a 'd11 suppressed' or 'wl suppressed' tx status from the firmware. Enqueues
+ * the packet to transmit to firmware again at a later opportunity.
+ */
static int
_dhd_wlfc_enque_suppressed(athost_wl_status_info_t* ctx, int prec, void* p)
{
return BCME_OK;
}
+/**
+ * Called when a transmit packet is about to be 'committed' from the OS layer to a lower layer
+ * towards the dongle (eg the DBUS layer). Updates wlfc administration. May modify packet.
+ *
+ * @param[in/out] ctx driver specific flow control administration
+ * @param[in/out] entry The remote MAC entity for which the packet is destined.
+ * @param[in/out] packet Packet to send. This function optionally adds TLVs to the packet.
+ * @param[in] header_needed True if packet is 'new' to flow control
+ * @param[out] slot Handle to container in which the packet was 'parked'
+ */
static int
_dhd_wlfc_pretx_pktprocess(athost_wl_status_info_t* ctx,
wlfc_mac_descriptor_t* entry, void** packet, int header_needed, uint32* slot)
bool send_tim_update = FALSE;
uint32 htod = 0;
uint16 htodseq = 0;
- uint8 free_ctr, flags = 0;
+ uint8 free_ctr;
int gen = 0xff;
dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
void * p = *packet;
}
if (entry->send_tim_signal) {
+ /* sends a traffic indication bitmap to the dongle */
send_tim_update = TRUE;
entry->send_tim_signal = 0;
entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
return BCME_ERROR;
}
- flags = WLFC_PKTFLAG_PKTFROMHOST;
+ WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr);
+ WL_TXSTATUS_SET_HSLOT(htod, hslot);
+ WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+ WL_TXSTATUS_SET_GENERATION(htod, gen);
+ DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
+
if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
/*
Indicate that this packet is being sent in response to an
explicit request from the firmware side.
*/
- flags |= WLFC_PKTFLAG_PKT_REQUESTED;
- }
- if (pkt_is_dhcp(ctx->osh, p)) {
- flags |= WLFC_PKTFLAG_PKT_FORCELOWRATE;
+ WLFC_PKTFLAG_SET_PKTREQUESTED(htod);
+ } else {
+ WLFC_PKTFLAG_CLR_PKTREQUESTED(htod);
}
- WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr);
- WL_TXSTATUS_SET_HSLOT(htod, hslot);
- WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
- WL_TXSTATUS_SET_FLAGS(htod, flags);
- WL_TXSTATUS_SET_GENERATION(htod, gen);
- DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
-
rc = _dhd_wlfc_pushheader(ctx, &p, send_tim_update,
entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE);
if (rc == BCME_OK) {
DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
- if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && header_needed) {
- /*
- a new header was created for this packet.
- push to hanger slot and scrub q. Since bus
- send succeeded, increment seq number as well.
- */
- rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
- if (rc == BCME_OK) {
+ if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ wlfc_hanger_t *h = (wlfc_hanger_t*)(ctx->hanger);
+ if (header_needed) {
+ /*
+ a new header was created for this packet.
+ push to hanger slot and scrub q. Since bus
+ send succeeded, increment seq number as well.
+ */
+ rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+ if (rc == BCME_OK) {
#ifdef PROP_TXSTATUS_DEBUG
- ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].push_time =
- OSL_SYSUPTIME();
+ h->items[hslot].push_time =
+ OSL_SYSUPTIME();
#endif
+ } else {
+ DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n",
+ __FUNCTION__, rc));
+ }
} else {
- DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n",
- __FUNCTION__, rc));
+ /* clear hanger state */
+ if (((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt != p)
+ DHD_ERROR(("%s() pkt not match: cur %p, hanger pkt %p\n",
+ __FUNCTION__, p, h->items[hslot].pkt));
+ ASSERT(h->items[hslot].pkt == p);
+ bcm_object_feature_set(h->items[hslot].pkt,
+ BCM_OBJECT_FEATURE_PKT_STATE, 0);
+ h->items[hslot].pkt_state = 0;
+ h->items[hslot].pkt_txstatus = 0;
+ h->items[hslot].state = WLFC_HANGER_ITEM_STATE_INUSE;
}
+ } else if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ /* clear hanger state */
+ ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt_state = 0;
+ ((wlfc_hanger_t*)(ctx->hanger))->items[hslot].pkt_txstatus = 0;
}
if ((rc == BCME_OK) && header_needed) {
*slot = hslot;
*packet = p;
return rc;
-}
+} /* _dhd_wlfc_pretx_pktprocess */
+/**
+ * A remote wireless mac may be temporarily 'closed' due to power management. Returns '1' if remote
+ * mac is in the 'open' state, otherwise '0'.
+ */
static int
_dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx,
wlfc_mac_descriptor_t* entry, int prec)
ASSERT(&ctx->destination_entries.other == entry);
return 1;
}
+
if (ctx->destination_entries.interfaces[entry->interface_id].iftype ==
WLC_E_IF_ROLE_P2P_GO) {
/* - destination interface is of type p2p GO.
return 0;
}
}
+
/* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */
if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
(entry->requested_packet == 0)) ||
}
return 1;
-}
-
+} /* _dhd_wlfc_is_destination_open */
+
+/**
+ * Dequeues a suppressed or delayed packet from a queue
+ * @param[in/out] ctx Driver specific flow control administration
+ * @param[in] prec Precedence of queue to dequeue from
+ * @param[out] ac_credit_spent Boolean, returns 0 or 1
+ * @param[out] needs_hdr Boolean, returns 0 or 1
+ * @param[out] entry_out The remote MAC for which the packet is destined
+ * @param[in] only_no_credit If TRUE, searches all entries instead of just the active ones
+ *
+ * Return value: the dequeued packet
+ */
static void*
_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec,
uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out,
bool only_no_credit)
{
- dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
wlfc_mac_descriptor_t* entry;
int total_entries;
void* p = NULL;
int i;
+ uint8 credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1;
*entry_out = NULL;
/* most cases a packet will count against FIFO credit */
- *ac_credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1;
+ *ac_credit_spent = credit_spent;
/* search all entries, include nodes as well as interfaces */
if (only_no_credit) {
}
ASSERT(entry);
- if (entry->transit_count < 0) {
- DHD_ERROR(("Error: %s():%d transit_count %d < 0\n",
- __FUNCTION__, __LINE__, entry->transit_count));
- continue;
- }
if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) &&
(entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) &&
- !(WLFC_GET_REORDERSUPP(dhdp->wlfc_mode) && entry->suppressed)) {
+ (!entry->suppressed)) {
+ *ac_credit_spent = credit_spent;
if (entry->state == WLFC_STATE_CLOSE) {
*ac_credit_spent = 0;
}
p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec));
*needs_hdr = 0;
if (p == NULL) {
- if (entry->suppressed == TRUE) {
- /* skip this entry */
- continue;
- }
/* De-Q from delay Q */
p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec));
*needs_hdr = 1;
}
if (p != NULL) {
+ bcm_pkt_validate_chk(p);
/* did the packet come from suppress sub-queue? */
if (entry->requested_credit > 0) {
entry->requested_credit--;
*entry_out = entry;
ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--;
ctx->pkt_cnt_per_ac[prec]--;
+ ctx->pkt_cnt_in_psq--;
_dhd_wlfc_flow_control_check(ctx, &entry->psq,
DHD_PKTTAG_IF(PKTTAG(p)));
/*
- A packet has been picked up, update traffic
- availability bitmap, if applicable
- */
+ * A packet has been picked up, update traffic availability bitmap,
+ * if applicable.
+ */
_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
return p;
}
}
}
return NULL;
-}
+} /* _dhd_wlfc_deque_delayedq */
+/** Enqueues caller supplied packet on either a 'suppressed' or 'delayed' queue */
static int
_dhd_wlfc_enque_delayq(athost_wl_status_info_t* ctx, void* pktbuf, int prec)
{
return BCME_ERROR;
}
-#ifdef QMONITOR
- dhd_qmon_tx(&entry->qmon);
-#endif
- /*
- A packet has been pushed, update traffic availability bitmap,
- if applicable
- */
+ /* A packet has been pushed, update traffic availability bitmap, if applicable */
_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
}
return BCME_OK;
-}
+} /* _dhd_wlfc_enque_delayq */
+/** Returns TRUE if caller supplied packet is destined for caller supplied interface */
static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid)
{
if (!p || !p_ifid)
return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p))));
}
+/** Returns TRUE if caller supplied packet is destined for caller supplied remote MAC */
static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry)
{
if (!p || !entry)
_dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt)
{
dhd_pub_t *dhdp;
+ bool credit_return = FALSE;
if (!wlfc || !pkt) {
return;
int lender, credit_returned = 0;
uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt));
+ credit_return = TRUE;
+
/* Note that borrower is fifo_id */
/* Return credits to highest priority lender first */
for (lender = AC_COUNT; lender >= 0; lender--) {
wlfc->FIFO_credit[fifo_id]++;
}
}
+
+ BCM_REFERENCE(credit_return);
+#if defined(DHD_WLFC_THREAD)
+ if (credit_return) {
+ _dhd_wlfc_thread_wakeup(dhdp);
+ }
+#endif /* defined(DHD_WLFC_THREAD) */
}
+/** Removes and frees a packet from the hanger. Called during eg tx complete. */
static void
_dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 pkt_state,
int pkt_txstatus)
return;
item = &hanger->items[slot_id];
- item->pkt_state |= pkt_state;
- if (pkt_txstatus != -1) {
- item->pkt_txstatus = pkt_txstatus;
- }
if (item->pkt) {
- if ((item->pkt_state & WLFC_HANGER_PKT_STATE_TXCOMPLETE) &&
- (item->pkt_state & (WLFC_HANGER_PKT_STATE_TXSTATUS |
- WLFC_HANGER_PKT_STATE_CLEANUP))) {
+ item->pkt_state |= pkt_state;
+ if (pkt_txstatus != -1)
+ item->pkt_txstatus = (uint8)pkt_txstatus;
+ bcm_object_feature_set(item->pkt, BCM_OBJECT_FEATURE_PKT_STATE, item->pkt_state);
+ if (item->pkt_state == WLFC_HANGER_PKT_STATE_COMPLETE) {
void *p = NULL;
void *pkt = item->pkt;
uint8 old_state = item->state;
BCM_REFERENCE(ret);
BCM_REFERENCE(pkt);
ASSERT((ret == BCME_OK) && p && (pkt == p));
-
- /* free packet */
- if (!(item->pkt_state & WLFC_HANGER_PKT_STATE_TXSTATUS)) {
- /* cleanup case */
- wlfc_mac_descriptor_t *entry = _dhd_wlfc_find_table_entry(wlfc, p);
-
- ASSERT(entry);
- entry->transit_count--;
- if (entry->suppressed &&
- (--entry->suppr_transit_count == 0)) {
- entry->suppressed = FALSE;
- }
- _dhd_wlfc_return_implied_credit(wlfc, p);
- wlfc->stats.cleanup_fw_cnt++;
- /* slot not freeable yet */
- item->state = old_state;
+ if (old_state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) {
+ printf("ERROR: free a suppressed pkt %p state %d pkt_state %d\n",
+ pkt, old_state, item->pkt_state);
}
+ ASSERT(old_state != WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED);
+ /* free packet */
wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))]
[DHD_PKTTAG_FIFO(PKTTAG(p))]--;
wlfc->stats.pktout++;
PKTFREE(wlfc->osh, p, TRUE);
}
} else {
- if (item->pkt_state & WLFC_HANGER_PKT_STATE_TXSTATUS) {
- /* free slot */
- if (item->state == WLFC_HANGER_ITEM_STATE_FREE)
- DHD_ERROR(("Error: %s():%d get multi TXSTATUS for one packet???\n",
- __FUNCTION__, __LINE__));
- item->state = WLFC_HANGER_ITEM_STATE_FREE;
- }
+ /* free slot */
+ if (item->state == WLFC_HANGER_ITEM_STATE_FREE)
+ DHD_ERROR(("Error: %s():%d Multiple TXSTATUS or BUSRETURNED: %d (%d)\n",
+ __FUNCTION__, __LINE__, item->pkt_state, pkt_state));
+ item->state = WLFC_HANGER_ITEM_STATE_FREE;
}
-}
+} /* _dhd_wlfc_hanger_free_pkt */
+/** Called during eg detach() */
static void
_dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq,
bool dir, f_processpkt_t fn, void *arg, q_type_t q_type)
return;
}
-
for (prec = 0; prec < pq->num_prec; prec++) {
struct pktq_prec *q;
void *p, *prev = NULL;
q = &pq->q[prec];
p = q->head;
while (p) {
+ bcm_pkt_validate_chk(p);
if (fn == NULL || (*fn)(p, arg)) {
bool head = (p == q->head);
if (head)
}
ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
ctx->pkt_cnt_per_ac[prec>>1]--;
+ ctx->pkt_cnt_in_psq--;
ctx->stats.cleanup_psq_cnt++;
if (!(prec & 1)) {
/* pkt in delayed q, so fake push BDC header for
} else if (q_type == Q_TYPE_AFQ) {
wlfc_mac_descriptor_t* entry =
_dhd_wlfc_find_table_entry(ctx, p);
- entry->transit_count--;
- if (entry->suppressed &&
- (--entry->suppr_transit_count == 0)) {
- entry->suppressed = FALSE;
+ if (entry->transit_count)
+ entry->transit_count--;
+ if (entry->suppr_transit_count) {
+ entry->suppr_transit_count--;
+ if (entry->suppressed &&
+ (!entry->onbus_pkts_count) &&
+ (!entry->suppr_transit_count))
+ entry->suppressed = FALSE;
}
_dhd_wlfc_return_implied_credit(ctx, p);
ctx->stats.cleanup_fw_cnt++;
if (fn == NULL)
ASSERT(pq->len == 0);
-}
+} /* _dhd_wlfc_pktq_flush */
+
+/** !BCMDBUS specific function. Dequeues a packet from the caller supplied queue. */
static void*
_dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg)
{
if (p == NULL)
return NULL;
+ bcm_pkt_validate_chk(p);
+
if (prev == NULL) {
if ((q->head = PKTLINK(p)) == NULL) {
q->tail = NULL;
return p;
}
+/** !BCMDBUS specific function */
static void
_dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
{
DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n",
__FUNCTION__, pkt));
}
- entry->transit_count--;
- if (entry->suppressed &&
- (--entry->suppr_transit_count == 0)) {
- entry->suppressed = FALSE;
+ if (entry->transit_count)
+ entry->transit_count--;
+ if (entry->suppr_transit_count) {
+ entry->suppr_transit_count--;
+ if (entry->suppressed &&
+ (!entry->onbus_pkts_count) &&
+ (!entry->suppr_transit_count))
+ entry->suppressed = FALSE;
}
_dhd_wlfc_return_implied_credit(wlfc, pkt);
wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pkt))][DHD_PKTTAG_FIFO(PKTTAG(pkt))]--;
dhd_txcomplete(dhd, pkt, FALSE);
PKTFREE(wlfc->osh, pkt, TRUE);
}
-}
+} /* _dhd_wlfc_cleanup_txq */
+/** called during eg detach */
void
_dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
{
wlfc->stats.cleanup_txq_cnt = 0;
wlfc->stats.cleanup_psq_cnt = 0;
wlfc->stats.cleanup_fw_cnt = 0;
+
/*
- * flush sequence shoulde be txq -> psq -> hanger/afq, hanger has to be last one
+ * flush sequence should be txq -> psq -> hanger/afq, hanger has to be last one
*/
/* flush bus->txq */
_dhd_wlfc_cleanup_txq(dhd, fn, arg);
-
/* flush psq, search all entries, include nodes as well as interfaces */
total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
(h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
if (fn == NULL || (*fn)(h->items[i].pkt, arg)) {
- _dhd_wlfc_hanger_free_pkt(wlfc, i,
- WLFC_HANGER_PKT_STATE_CLEANUP, FALSE);
+ h->items[i].state = WLFC_HANGER_ITEM_STATE_FLUSHED;
}
}
}
}
return;
-}
+} /* _dhd_wlfc_cleanup */
+/** Called after eg the dongle signalled a new remote MAC that it connected with to the DHD */
static int
_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
uint8 action, uint8 ifid, uint8 iftype, uint8* ea,
{
int rc = BCME_OK;
-#ifdef QMONITOR
- dhd_qmon_reset(&entry->qmon);
-#endif
if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) {
entry->occupied = 1;
entry->interface_id = ifid;
entry->iftype = iftype;
entry->ac_bitmap = 0xff; /* update this when handling APSD */
+
/* for an interface entry we may not care about the MAC address */
if (ea != NULL)
memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
entry->suppressed = FALSE;
entry->transit_count = 0;
entry->suppr_transit_count = 0;
+ entry->onbus_pkts_count = 0;
}
-#ifdef P2PONEINT
- if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) ||
- ((action == eWLFC_MAC_ENTRY_ACTION_UPDATE) && (entry->psq.num_prec == 0))) {
-#else
if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
-#endif
dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+
pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
+
if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN);
}
ctx->active_entry_head->prev->next = entry;
ctx->active_entry_head->prev = entry;
entry->next = ctx->active_entry_head;
-
} else {
ASSERT(ctx->active_entry_count == 0);
entry->prev = entry->next = entry;
}
}
return rc;
-}
+} /* _dhd_wlfc_mac_entry_update */
+
#ifdef LIMIT_BORROW
+
+/** LIMIT_BORROW specific function */
static int
_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac,
bool bBorrowAll)
return rc;
}
+/** LIMIT_BORROW specific function */
static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac)
{
if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) ||
return BCME_OK;
}
+
#endif /* LIMIT_BORROW */
+/**
+ * Called on an interface event (WLC_E_IF) indicated by firmware.
+ * @param action : eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD
+ */
static int
_dhd_wlfc_interface_entry_update(void* state,
uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
_dhd_wlfc_ifpkt_fn, &ifid);
}
+/**
+ * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast
+ * specific)
+ */
static int
_dhd_wlfc_BCMCCredit_support_update(void* state)
{
return BCME_OK;
}
+/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */
static int
_dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
{
return BCME_OK;
}
+/**
+ * Called during committing of a transmit packet from the OS DHD layer to the next layer towards
+ * the dongle (eg the DBUS layer). All transmit packets flow via this function to the next layer.
+ *
+ * @param[in/out] ctx Driver specific flow control administration
+ * @param[in] ac Access Category (QoS) of called supplied packet
+ * @param[in] commit_info Contains eg the packet to send
+ * @param[in] fcommit Function pointer to transmit function of next software layer
+ * @param[in] commit_ctx Opaque context used when calling next layer
+ */
static int
_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac,
dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx)
commit_info->mac_entry->suppr_transit_count++;
}
commit_info->mac_entry->transit_count++;
+ commit_info->mac_entry->onbus_pkts_count++;
} else if (commit_info->needs_hdr) {
if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
void *pout = NULL;
}
return rc;
-}
+} /* _dhd_wlfc_handle_packet_commit */
+/** Returns remote MAC descriptor for caller supplied MAC address */
static uint8
-_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea)
+_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8 *ea)
{
wlfc_mac_descriptor_t* table =
((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes;
return WLFC_MAC_DESC_ID_INVALID;
}
+/**
+ * Called when the host receives a WLFC_CTL_TYPE_TXSTATUS event from the dongle, indicating the
+ * status of a frame that the dongle attempted to transmit over the wireless medium.
+ */
+static int
+dhd_wlfc_suppressed_acked_update(dhd_pub_t *dhd, uint16 hslot, uint8 prec, uint8 hcnt)
+{
+ athost_wl_status_info_t* ctx;
+ wlfc_mac_descriptor_t* entry = NULL;
+ struct pktq *pq;
+ struct pktq_prec *q;
+ void *p, *b;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd(%p)\n", __FUNCTION__, dhd));
+ return BCME_BADARG;
+ }
+ ctx = (athost_wl_status_info_t*)dhd->wlfc_state;
+ if (!ctx) {
+ DHD_ERROR(("%s: ctx(%p)\n", __FUNCTION__, ctx));
+ return BCME_ERROR;
+ }
+
+ ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1));
+
+ if (hslot < WLFC_MAC_DESC_TABLE_SIZE)
+ entry = &ctx->destination_entries.nodes[hslot];
+ else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+ entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE];
+ else
+ entry = &ctx->destination_entries.other;
+
+ pq = &entry->psq;
+
+ ASSERT(((prec << 1) + 1) < pq->num_prec);
+
+ q = &pq->q[((prec << 1) + 1)];
+
+ b = NULL;
+ p = q->head;
+
+ while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p))))) {
+ b = p;
+ p = PKTLINK(p);
+ }
+
+ if (p == NULL) {
+ /* none is matched */
+ if (b) {
+ DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt));
+ } else {
+ DHD_ERROR(("%s: queue is empty\n", __FUNCTION__));
+ }
+
+ return BCME_ERROR;
+ }
+
+ if (!b) {
+ /* head packet is matched */
+ if ((q->head = PKTLINK(p)) == NULL) {
+ q->tail = NULL;
+ }
+ } else {
+ /* middle packet is matched */
+ PKTSETLINK(b, PKTLINK(p));
+ if (PKTLINK(p) == NULL) {
+ q->tail = b;
+ }
+ }
+
+ q->len--;
+ pq->len--;
+ ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--;
+ ctx->pkt_cnt_per_ac[prec]--;
+
+ PKTSETLINK(p, NULL);
+
+ if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ _dhd_wlfc_enque_afq(ctx, p);
+ } else {
+ _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+ }
+
+ entry->transit_count++;
+
+ return BCME_OK;
+}
+
static int
_dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac)
{
- uint8 status_flag;
+ uint8 status_flag_ori, status_flag;
uint32 status;
int ret = BCME_OK;
- int remove_from_hanger = 1;
+ int remove_from_hanger_ori, remove_from_hanger = 1;
void* pktbuf = NULL;
uint8 fifo_id = 0, gen = 0, count = 0, hcnt;
uint16 hslot;
uint16 seq = 0, seq_fromfw = 0, seq_num = 0;
memcpy(&status, pkt_info, sizeof(uint32));
+ status = ltoh32(status);
status_flag = WL_TXSTATUS_GET_FLAGS(status);
hcnt = WL_TXSTATUS_GET_FREERUNCTR(status);
hslot = WL_TXSTATUS_GET_HSLOT(status);
if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ);
+ seq = ltoh16(seq);
seq_fromfw = WL_SEQ_GET_FROMFW(seq);
seq_num = WL_SEQ_GET_NUM(seq);
}
if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
wlfc->stats.pkt_freed += len;
- }
-
- else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) {
+ } else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) {
wlfc->stats.pkt_freed += len;
- }
-
- else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
+ } else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
wlfc->stats.d11_suppress += len;
remove_from_hanger = 0;
- }
-
- else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
+ } else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
wlfc->stats.wl_suppress += len;
remove_from_hanger = 0;
+ } else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
+ wlfc->stats.wlc_tossed_pkts += len;
}
- else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
- wlfc->stats.wlc_tossed_pkts += len;
+ else if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) {
+ wlfc->stats.pkt_freed += len;
}
if (dhd->proptxstatus_txstatus_ignore) {
return BCME_OK;
}
+ status_flag_ori = status_flag;
+ remove_from_hanger_ori = remove_from_hanger;
+
while (count < len) {
+ if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) {
+ dhd_wlfc_suppressed_acked_update(dhd, hslot, fifo_id, hcnt);
+ }
if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf);
} else {
+ status_flag = status_flag_ori;
+ remove_from_hanger = remove_from_hanger_ori;
ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, FALSE);
if (!pktbuf) {
_dhd_wlfc_hanger_free_pkt(wlfc, hslot,
WLFC_HANGER_PKT_STATE_TXSTATUS, -1);
goto cont;
+ } else {
+ wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+ if (h->items[hslot].state == WLFC_HANGER_ITEM_STATE_FLUSHED) {
+ status_flag = WLFC_CTL_PKTFLAG_DISCARD;
+ remove_from_hanger = 1;
+ }
}
}
goto cont;
}
+ bcm_pkt_validate_chk(pktbuf);
+
/* set fifo_id to correct value because not all FW does that */
fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
if this packet did not count against FIFO credit, it must have
taken a requested_credit from the destination entry (for pspoll etc.)
*/
- if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf)))
+ if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf))) {
entry->requested_credit++;
+#if defined(DHD_WLFC_THREAD)
+ _dhd_wlfc_thread_wakeup(dhd);
+#endif /* DHD_WLFC_THREAD */
+ }
#ifdef PROP_TXSTATUS_DEBUG
entry->dstncredit_acks++;
#endif
}
}
/* pkt back from firmware side */
- entry->transit_count--;
- if (entry->suppressed && (--entry->suppr_transit_count == 0)) {
- entry->suppressed = FALSE;
+ if (entry->transit_count)
+ entry->transit_count--;
+ if (entry->suppr_transit_count) {
+ entry->suppr_transit_count--;
+ if (entry->suppressed &&
+ (!entry->onbus_pkts_count) &&
+ (!entry->suppr_transit_count))
+ entry->suppressed = FALSE;
}
cont:
count++;
}
+
return BCME_OK;
-}
+} /* _dhd_wlfc_compressed_txstatus_update */
+/**
+ * Called when eg host receives a 'WLFC_CTL_TYPE_FIFO_CREDITBACK' event from the dongle.
+ * @param[in] credits caller supplied credit that will be added to the host credit.
+ */
static int
_dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
{
wlfc->FIFO_credit[lender] +=
wlfc->credits_borrowed[i][lender];
wlfc->credits_borrowed[i][lender] = 0;
- }
- else {
+ } else {
wlfc->credits_borrowed[i][lender] -= credits[i];
wlfc->FIFO_credit[lender] += credits[i];
credits[i] = 0;
}
}
+#if defined(DHD_WLFC_THREAD)
+ _dhd_wlfc_thread_wakeup(dhd);
+#endif /* defined(DHD_WLFC_THREAD) */
+
return BCME_OK;
-}
+} /* _dhd_wlfc_fifocreditback_indicate */
+
+/** !BCMDBUS specific function */
static void
_dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
{
PKTSETLINK(pkt, NULL);
entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+ if (entry) {
+ if (entry->onbus_pkts_count > 0)
+ entry->onbus_pkts_count--;
+ if (entry->suppressed &&
+ (!entry->onbus_pkts_count) &&
+ (!entry->suppr_transit_count))
+ entry->suppressed = FALSE;
+ }
/* fake a suppression txstatus */
htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt));
WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS);
WL_TXSTATUS_SET_GENERATION(htod, entry->generation);
+ htod = htol32(htod);
memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS);
if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt));
WL_SEQ_SET_FROMFW(htodseq, 1);
WL_SEQ_SET_FROMDRV(htodseq, 0);
}
+ htodseq = htol16(htodseq);
memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq,
WLFC_CTL_VALUE_LEN_SEQ);
}
if (bCreditUpdate) {
_dhd_wlfc_fifocreditback_indicate(dhd, credits);
}
-}
-
+} /* _dhd_wlfc_suppress_txq */
static int
_dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value)
(void)dhd;
bcopy(&value[2], ×tamp, sizeof(uint32));
+ timestamp = ltoh32(timestamp);
DHD_INFO(("RXPKT: SEQ: %d, timestamp %d\n", value[1], timestamp));
return BCME_OK;
}
}
}
+/** called on eg receiving 'mac open' event from the dongle. */
static void
_dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
{
}
}
+/** called on eg receiving a WLFC_CTL_TYPE_MACDESC_ADD TLV from the dongle */
static int
_dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
{
}
BCM_REFERENCE(rc);
return BCME_OK;
-}
+} /* _dhd_wlfc_mac_table_update */
+/** Called on a 'mac open' or 'mac close' event indicated by the dongle */
static int
_dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
{
/* Handle PS on/off indication */
athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
wlfc_mac_descriptor_t* table;
- wlfc_mac_descriptor_t* desc;
+ wlfc_mac_descriptor_t* desc; /* a table maps from mac handle to mac descriptor */
uint8 mac_handle = value[0];
int i;
desc->requested_credit = 0;
desc->requested_packet = 0;
_dhd_wlfc_remove_requested_entry(wlfc, desc);
- }
- else {
+ } else {
desc->state = WLFC_STATE_CLOSE;
DHD_WLFC_CTRINC_MAC_CLOSE(desc);
- /*
- Indicate to firmware if there is any traffic pending.
- */
+ /* Indicate to firmware if there is any traffic pending. */
for (i = 0; i < AC_COUNT; i++) {
_dhd_wlfc_traffic_pending_check(wlfc, desc, i);
}
}
- }
- else {
+ } else {
wlfc->stats.psmode_update_failed++;
}
+
return BCME_OK;
-}
+} /* _dhd_wlfc_psmode_update */
+/** called upon receiving 'interface open' or 'interface close' event from the dongle */
static int
_dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
{
if (type == WLFC_CTL_TYPE_INTERFACE_OPEN) {
table[if_id].state = WLFC_STATE_OPEN;
/* WLFC_DBGMESG(("INTERFACE[%d] OPEN\n", if_id)); */
- }
- else {
+ } else {
table[if_id].state = WLFC_STATE_CLOSE;
/* WLFC_DBGMESG(("INTERFACE[%d] CLOSE\n", if_id)); */
}
return BCME_OK;
}
+/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_CREDIT TLV from the dongle */
static int
_dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
{
desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
_dhd_wlfc_add_requested_entry(wlfc, desc);
- }
- else {
+#if defined(DHD_WLFC_THREAD)
+ if (credit) {
+ _dhd_wlfc_thread_wakeup(dhd);
+ }
+#endif /* DHD_WLFC_THREAD */
+ } else {
wlfc->stats.credit_request_failed++;
}
+
return BCME_OK;
}
+/** Called on receiving a WLFC_CTL_TYPE_MAC_REQUEST_PACKET TLV from the dongle */
static int
_dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
{
desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
_dhd_wlfc_add_requested_entry(wlfc, desc);
- }
- else {
+#if defined(DHD_WLFC_THREAD)
+ if (packet_count) {
+ _dhd_wlfc_thread_wakeup(dhd);
+ }
+#endif /* DHD_WLFC_THREAD */
+ } else {
wlfc->stats.packet_request_failed++;
}
+
return BCME_OK;
}
+/** Called when host receives a WLFC_CTL_TYPE_HOST_REORDER_RXPKTS TLV from the dongle */
static void
_dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len)
{
if (info_buf) {
bcopy(val, info_buf, len);
*info_len = len;
- }
- else
+ } else {
*info_len = 0;
+ }
}
}
wlfc->dhdp = dhd;
if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
- wlfc->hanger = _dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS);
+ wlfc->hanger = _dhd_wlfc_hanger_create(dhd, WLFC_HANGER_MAXITEMS);
if (wlfc->hanger == NULL) {
DHD_OS_PREFREE(dhd, dhd->wlfc_state,
sizeof(athost_wl_status_info_t));
dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
/* default to check rx pkt */
+ dhd->wlfc_rxpkt_chk = TRUE;
if (dhd->op_mode & DHD_FLAG_IBSS_MODE) {
dhd->wlfc_rxpkt_chk = FALSE;
- } else {
- dhd->wlfc_rxpkt_chk = TRUE;
}
-
/* initialize all interfaces to accept traffic */
for (i = 0; i < WLFC_MAX_IFNUM; i++) {
wlfc->hostif_flow_state[i] = OFF;
dhd_os_wlfc_unblock(dhd);
return rc;
-}
+} /* dhd_wlfc_enable */
+
#ifdef SUPPORT_P2P_GO_PS
+
+/**
+ * Called when the host platform enters a lower power mode, eg right before a system hibernate.
+ * SUPPORT_P2P_GO_PS specific function.
+ */
int
dhd_wlfc_suspend(dhd_pub_t *dhd)
{
-
- uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */
uint32 tlv = 0;
DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__));
if (!dhd->wlfc_enabled)
return -1;
- bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
- if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
- DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__));
+ if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0))
return -1;
- }
- tlv = iovbuf[0];
if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0)
return 0;
tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
- bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf));
- if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
- DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
- __FUNCTION__, tlv));
+ if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0))
return -1;
- }
return 0;
}
- int
+/**
+ * Called when the host platform resumes from a power management operation, eg resume after a
+ * system hibernate. SUPPORT_P2P_GO_PS specific function.
+ */
+int
dhd_wlfc_resume(dhd_pub_t *dhd)
{
- uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */
uint32 tlv = 0;
DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__));
if (!dhd->wlfc_enabled)
return -1;
- bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
- if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
- DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__));
+ if (!dhd_wl_ioctl_get_intiovar(dhd, "tlv", &tlv, WLC_GET_VAR, FALSE, 0))
return -1;
- }
- tlv = iovbuf[0];
if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) ==
(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS))
return 0;
tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
- bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf));
- if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, (char*)iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
- DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
- __FUNCTION__, tlv));
+ if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0))
return -1;
- }
return 0;
}
+
#endif /* SUPPORT_P2P_GO_PS */
+/** A flow control header was received from firmware, containing one or more TLVs */
int
dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf,
uint *reorder_info_len)
if (type == WLFC_CTL_TYPE_TXSTATUS) {
_dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry);
- }
- else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) {
+ } else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) {
uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS;
if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
}
_dhd_wlfc_compressed_txstatus_update(dhd, value,
value[compcnt_offset], &entry);
- }
- else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK)
+ } else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK) {
_dhd_wlfc_fifocreditback_indicate(dhd, value);
-
- else if (type == WLFC_CTL_TYPE_RSSI)
+ } else if (type == WLFC_CTL_TYPE_RSSI) {
_dhd_wlfc_rssi_indicate(dhd, value);
-
- else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT)
+ } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT) {
_dhd_wlfc_credit_request(dhd, value);
-
- else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET)
+ } else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET) {
_dhd_wlfc_packet_request(dhd, value);
-
- else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
- (type == WLFC_CTL_TYPE_MAC_CLOSE))
+ } else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
+ (type == WLFC_CTL_TYPE_MAC_CLOSE)) {
_dhd_wlfc_psmode_update(dhd, value, type);
-
- else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
- (type == WLFC_CTL_TYPE_MACDESC_DEL))
+ } else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
+ (type == WLFC_CTL_TYPE_MACDESC_DEL)) {
_dhd_wlfc_mac_table_update(dhd, value, type);
-
- else if (type == WLFC_CTL_TYPE_TRANS_ID)
+ } else if (type == WLFC_CTL_TYPE_TRANS_ID) {
_dhd_wlfc_dbg_senum_check(dhd, value);
-
- else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
+ } else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
(type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) {
_dhd_wlfc_interface_update(dhd, value, type);
}
/* suppress all packets for this mac entry from bus->txq */
_dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry);
}
- }
+ } /* while */
+
if (remainder != 0 && wlfc) {
/* trouble..., something is not right */
wlfc->stats.tlv_parse_failed++;
}
- }
+ } /* if */
if (wlfc)
wlfc->stats.dhd_hdrpulls++;
dhd_os_wlfc_unblock(dhd);
return BCME_OK;
-}
+} /* dhd_wlfc_parse_header_info */
-int
-dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf,
- bool need_toggle_host_if)
+KERNEL_THREAD_RETURN_TYPE
+dhd_wlfc_transfer_packets(void *data)
{
+ dhd_pub_t *dhdp = (dhd_pub_t *)data;
int ac, single_ac = 0, rc = BCME_OK;
dhd_wlfc_commit_info_t commit_info;
athost_wl_status_info_t* ctx;
int bus_retry_count = 0;
+ int pkt_send = 0;
uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */
uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */
int lender;
- if ((dhdp == NULL) || (fcommit == NULL)) {
- DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
- return BCME_BADARG;
- }
-
- dhd_os_wlfc_block(dhdp);
-
- if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
- if (pktbuf) {
- DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0);
+#if defined(DHD_WLFC_THREAD)
+ /* wait till someone wakeup me up, will change it at running time */
+ int wait_msec = msecs_to_jiffies(0xFFFFFFFF);
+#endif /* defined(DHD_WLFC_THREAD) */
+
+#if defined(DHD_WLFC_THREAD)
+ while (1) {
+ bus_retry_count = 0;
+ pkt_send = 0;
+ tx_map = 0;
+ rx_map = 0;
+ packets_map = 0;
+ wait_msec = wait_event_interruptible_timeout(dhdp->wlfc_wqhead,
+ dhdp->wlfc_thread_go, wait_msec);
+ if (kthread_should_stop()) {
+ break;
}
- rc = WLFC_UNSUPPORTED;
- goto exit2;
- }
-
- ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
-
+ dhdp->wlfc_thread_go = FALSE;
- if (dhdp->proptxstatus_module_ignore) {
- if (pktbuf) {
- uint32 htod = 0;
- WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
- _dhd_wlfc_pushheader(ctx, &pktbuf, FALSE, 0, 0, htod, 0, FALSE);
- if (fcommit(commit_ctx, pktbuf))
- PKTFREE(ctx->osh, pktbuf, TRUE);
- rc = BCME_OK;
- }
- goto exit;
- }
+ dhd_os_wlfc_block(dhdp);
+#endif /* defined(DHD_WLFC_THREAD) */
+ ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+#if defined(DHD_WLFC_THREAD)
+ if (!ctx)
+ goto exit;
+#endif /* defined(DHD_WLFC_THREAD) */
memset(&commit_info, 0, sizeof(commit_info));
low priority packet starvation.
*/
- if (pktbuf) {
- DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1);
- ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
- /* en-queue the packets to respective queue. */
- rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac);
- if (rc) {
- _dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE);
- } else {
- ctx->stats.pktin++;
- ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++;
- }
- }
-
for (ac = AC_COUNT; ac >= 0; ac--) {
if (dhdp->wlfc_rxpkt_chk) {
/* check rx packet */
if (ctx->pkt_cnt_per_ac[ac] == 0) {
continue;
}
+
tx_map |= (1 << ac);
single_ac = ac + 1;
while (FALSE == dhdp->proptxstatus_txoff) {
ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent);
}
/* here we can ensure have credit or no credit needed */
- rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, fcommit,
- commit_ctx);
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ ctx->fcommit, ctx->commit_ctx);
/* Bus commits may fail (e.g. flow control); abort after retries */
if (rc == BCME_OK) {
+ pkt_send++;
if (commit_info.ac_fifo_credit_spent && (lender == -1)) {
ctx->FIFO_credit[ac]--;
}
eWLFC_PKTTYPE_SUPPRESSED;
rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
- fcommit, commit_ctx);
+ ctx->fcommit, ctx->commit_ctx);
/* Bus commits may fail (e.g. flow control); abort after retries */
if (rc == BCME_OK) {
-
+ pkt_send++;
if (commit_info.ac_fifo_credit_spent) {
#ifndef LIMIT_BORROW
ctx->FIFO_credit[ac]--;
}
}
+ BCM_REFERENCE(pkt_send);
+
exit:
- if (need_toggle_host_if && ctx->toggle_host_if) {
- ctx->toggle_host_if = 0;
+#if defined(DHD_WLFC_THREAD)
+ dhd_os_wlfc_unblock(dhdp);
+ if (ctx && ctx->pkt_cnt_in_psq && pkt_send) {
+ wait_msec = msecs_to_jiffies(WLFC_THREAD_QUICK_RETRY_WAIT_MS);
+ } else {
+ wait_msec = msecs_to_jiffies(WLFC_THREAD_RETRY_WAIT_MS);
+ }
+ }
+ return 0;
+#else
+ return rc;
+#endif /* defined(DHD_WLFC_THREAD) */
+}
+
+/**
+ * Enqueues a transmit packet in the next layer towards the dongle, eg the DBUS layer. Called by
+ * eg dhd_sendpkt().
+ * @param[in] dhdp Pointer to public DHD structure
+ * @param[in] fcommit Pointer to transmit function of next layer
+ * @param[in] commit_ctx Opaque context used when calling next layer
+ * @param[in] pktbuf Packet to send
+ * @param[in] need_toggle_host_if If TRUE, resets flag ctx->toggle_host_if
+ */
+int
+dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf,
+ bool need_toggle_host_if)
+{
+ int rc = BCME_OK;
+ athost_wl_status_info_t* ctx;
+
+#if defined(DHD_WLFC_THREAD)
+ if (!pktbuf)
+ return BCME_OK;
+#endif /* defined(DHD_WLFC_THREAD) */
+
+ if ((dhdp == NULL) || (fcommit == NULL)) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ if (pktbuf) {
+ DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0);
+ }
+ rc = WLFC_UNSUPPORTED;
+ goto exit;
+ }
+
+ ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+
+ if (dhdp->proptxstatus_module_ignore) {
+ if (pktbuf) {
+ uint32 htod = 0;
+ WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+ _dhd_wlfc_pushheader(ctx, &pktbuf, FALSE, 0, 0, htod, 0, FALSE);
+ if (fcommit(commit_ctx, pktbuf)) {
+ /* free it if failed, otherwise do it in tx complete cb */
+ PKTFREE(ctx->osh, pktbuf, TRUE);
+ }
+ rc = BCME_OK;
+ }
+ goto exit;
+ }
+
+ if (pktbuf) {
+ int ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+ ASSERT(ac <= AC_COUNT);
+ DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1);
+ /* en-queue the packets to respective queue. */
+ rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac);
+ if (rc) {
+ _dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE);
+ } else {
+ ctx->stats.pktin++;
+ ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++;
+ }
}
-exit2:
+ if (!ctx->fcommit) {
+ ctx->fcommit = fcommit;
+ } else {
+ ASSERT(ctx->fcommit == fcommit);
+ }
+ if (!ctx->commit_ctx) {
+ ctx->commit_ctx = commit_ctx;
+ } else {
+ ASSERT(ctx->commit_ctx == commit_ctx);
+ }
+
+#if defined(DHD_WLFC_THREAD)
+ _dhd_wlfc_thread_wakeup(dhdp);
+#else
+ dhd_wlfc_transfer_packets(dhdp);
+#endif /* defined(DHD_WLFC_THREAD) */
+
+exit:
dhd_os_wlfc_unblock(dhdp);
return rc;
-}
+} /* dhd_wlfc_commit_packets */
+/**
+ * Called when the (lower) DBUS layer indicates completion (succesfull or not) of a transmit packet
+ */
int
dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
{
athost_wl_status_info_t* wlfc;
+ wlfc_mac_descriptor_t *entry;
void* pout = NULL;
int rtn = BCME_OK;
if ((dhd == NULL) || (txp == NULL)) {
return BCME_BADARG;
}
+ bcm_pkt_validate_chk(txp);
+
dhd_os_wlfc_block(dhd);
if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
goto EXIT;
}
- if (!success || dhd->proptxstatus_txstatus_ignore) {
- wlfc_mac_descriptor_t *entry = _dhd_wlfc_find_table_entry(wlfc, txp);
+ entry = _dhd_wlfc_find_table_entry(wlfc, txp);
+ ASSERT(entry);
+ if (!success || dhd->proptxstatus_txstatus_ignore) {
WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
__FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
/* return the credit, if necessary */
_dhd_wlfc_return_implied_credit(wlfc, txp);
- entry->transit_count--;
- if (entry->suppressed && (--entry->suppr_transit_count == 0)) {
- entry->suppressed = FALSE;
- }
+ if (entry->transit_count)
+ entry->transit_count--;
+ if (entry->suppr_transit_count)
+ entry->suppr_transit_count--;
wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(txp))][DHD_PKTTAG_FIFO(PKTTAG(txp))]--;
wlfc->stats.pktout++;
PKTFREE(wlfc->osh, txp, TRUE);
} else {
int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp)));
_dhd_wlfc_hanger_free_pkt(wlfc, hslot,
- WLFC_HANGER_PKT_STATE_TXCOMPLETE, -1);
+ WLFC_HANGER_PKT_STATE_BUSRETURNED, -1);
}
}
+ ASSERT(entry->onbus_pkts_count > 0);
+ if (entry->onbus_pkts_count > 0)
+ entry->onbus_pkts_count--;
+ if (entry->suppressed &&
+ (!entry->onbus_pkts_count) &&
+ (!entry->suppr_transit_count))
+ entry->suppressed = FALSE;
EXIT:
dhd_os_wlfc_unblock(dhd);
return rtn;
-}
+} /* dhd_wlfc_txcomplete */
int
dhd_wlfc_init(dhd_pub_t *dhd)
{
- char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
/* enable all signals & indicate host proptxstatus logic is active */
uint32 tlv, mode, fw_caps;
int ret = 0;
dhd_os_wlfc_block(dhd);
if (dhd->wlfc_enabled) {
- DHD_ERROR(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
+ DHD_INFO(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
dhd_os_wlfc_unblock(dhd);
return BCME_OK;
}
*/
/* enable proptxtstatus signaling by default */
- bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
- if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
- DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n"));
- }
- else {
+ if (!dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) {
/*
Leaving the message for now, it should be removed after a while; once
the tlv situation is stable.
*/
- DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+ DHD_INFO(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
dhd->wlfc_enabled?"enabled":"disabled", tlv));
}
+ mode = 0;
+
/* query caps */
- ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf));
- if (ret > 0) {
- ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
- }
+ ret = dhd_wl_ioctl_get_intiovar(dhd, "wlfc_mode", &fw_caps, WLC_GET_VAR, FALSE, 0);
- if (ret >= 0) {
- fw_caps = *((uint32 *)iovbuf);
- mode = 0;
- DHD_ERROR(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
+ if (!ret) {
+ DHD_INFO(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
if (WLFC_IS_OLD_DEF(fw_caps)) {
/* enable proptxtstatus v2 by default */
WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps));
WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps));
}
- ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf));
- if (ret > 0) {
- ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
- }
+ ret = dhd_wl_ioctl_set_intiovar(dhd, "wlfc_mode", mode, WLC_SET_VAR, TRUE, 0);
}
dhd_os_wlfc_block(dhd);
dhd->wlfc_mode = mode;
}
}
- DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
+
+ DHD_INFO(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
dhd_os_wlfc_unblock(dhd);
dhd->plat_init((void *)dhd);
return BCME_OK;
-}
+} /* dhd_wlfc_init */
+/** AMPDU host reorder specific function */
int
dhd_wlfc_hostreorder_init(dhd_pub_t *dhd)
{
- char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
/* enable only ampdu hostreorder here */
uint32 tlv;
tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
/* enable proptxtstatus signaling by default */
- bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
- if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+ if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) {
DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n",
__FUNCTION__));
- }
- else {
+ } else {
/*
Leaving the message for now, it should be removed after a while; once
the tlv situation is stable.
return BCME_OK;
}
-/* release all packet resources */
+/** release all packet resources */
int
dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
{
int
dhd_wlfc_deinit(dhd_pub_t *dhd)
{
- char iovbuf[32]; /* Room for "ampdu_hostreorder" or "tlv" + '\0' + parameter */
/* cleanup all psq related resources */
athost_wl_status_info_t* wlfc;
uint32 tlv = 0;
uint32 hostreorder = 0;
- int ret = BCME_OK;
if (dhd == NULL) {
DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
dhd_os_wlfc_unblock(dhd);
return BCME_OK;
}
+
dhd->wlfc_enabled = FALSE;
dhd_os_wlfc_unblock(dhd);
/* query ampdu hostreorder */
- bcm_mkiovar("ampdu_hostreorder", NULL, 0, iovbuf, sizeof(iovbuf));
- ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
- if (ret == BCME_OK)
- hostreorder = *((uint32 *)iovbuf);
- else {
- hostreorder = 0;
- DHD_ERROR(("%s():%d, ampdu_hostreorder get failed Err = %d\n",
- __FUNCTION__, __LINE__, ret));
- }
+ (void) dhd_wl_ioctl_get_intiovar(dhd, "ampdu_hostreorder",
+ &hostreorder, WLC_GET_VAR, FALSE, 0);
if (hostreorder) {
tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
}
/* Disable proptxtstatus signaling for deinit */
- bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
- ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
-
- if (ret == BCME_OK) {
- /*
- Leaving the message for now, it should be removed after a while; once
- the tlv situation is stable.
- */
- DHD_ERROR(("%s():%d successfully %s bdcv2 tlv signaling, %d\n",
- __FUNCTION__, __LINE__,
- dhd->wlfc_enabled?"enabled":"disabled", tlv));
- } else
- DHD_ERROR(("%s():%d failed to enable/disable bdcv2 tlv signaling Err = %d\n",
- __FUNCTION__, __LINE__, ret));
+ (void) dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0);
dhd_os_wlfc_block(dhd);
wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
-#ifdef PROP_TXSTATUS_DEBUG
- if (!WLFC_GET_AFQ(dhd->wlfc_mode))
- {
+ _dhd_wlfc_cleanup(dhd, NULL, NULL);
+
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
int i;
wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
for (i = 0; i < h->max_items; i++) {
if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) {
- WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n",
- __FUNCTION__, i, h->items[i].pkt,
- DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt))));
+ _dhd_wlfc_hanger_free_pkt(wlfc, i,
+ WLFC_HANGER_PKT_STATE_COMPLETE, TRUE);
}
}
- }
-#endif
-
- _dhd_wlfc_cleanup(dhd, NULL, NULL);
- if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
/* delete hanger */
- _dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger);
+ _dhd_wlfc_hanger_delete(dhd, h);
}
if (dhd->plat_deinit)
dhd->plat_deinit((void *)dhd);
return BCME_OK;
-}
+} /* dhd_wlfc_init */
+/**
+ * Called on an interface event (WLC_E_IF) indicated by firmware
+ * @param[in] dhdp Pointer to public DHD structure
+ * @param[in] action eg eWLFC_MAC_ENTRY_ACTION_UPDATE or eWLFC_MAC_ENTRY_ACTION_ADD
+ */
int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
{
int rc;
return rc;
}
+/** Called eg on receiving a WLC_E_FIFO_CREDIT_MAP event from the dongle */
int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data)
{
int rc;
return rc;
}
+/**
+ * Called eg on receiving a WLC_E_BCMC_CREDIT_SUPPORT event from the dongle (broadcast/multicast
+ * specific)
+ */
int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp)
{
int rc;
return rc;
}
+/** debug specific function */
int
dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
{
iftype_desc, ((wlfc->hostif_flow_state[i] == OFF)
? " OFF":" ON"));
- bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit),(trans,supp_trans)"
- "= (%d,%s,%d),(%d,%d)\n",
+ bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit),"
+ "(trans,supp_trans,onbus)"
+ "= (%d,%s,%d),(%d,%d,%d)\n",
i,
interfaces[i].psq.len,
((interfaces[i].state ==
WLFC_STATE_OPEN) ? "OPEN":"CLOSE"),
interfaces[i].requested_credit,
- interfaces[i].transit_count, interfaces[i].suppr_transit_count);
+ interfaces[i].transit_count,
+ interfaces[i].suppr_transit_count,
+ interfaces[i].onbus_pkts_count);
bcm_bprintf(strbuf, "INTERFACE[%d].PSQ"
"(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
mac_table[i].interface_id);
- bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit),(trans,supp_trans)"
- "= (%d,%s,%d),(%d,%d)\n",
+ bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit),"
+ "(trans,supp_trans,onbus)"
+ "= (%d,%s,%d),(%d,%d,%d)\n",
i,
mac_table[i].psq.len,
((mac_table[i].state ==
WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
mac_table[i].requested_credit,
- mac_table[i].transit_count, mac_table[i].suppr_transit_count);
+ mac_table[i].transit_count,
+ mac_table[i].suppr_transit_count,
+ mac_table[i].onbus_pkts_count);
#ifdef PROP_TXSTATUS_DEBUG
bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
i, mac_table[i].opened_ct, mac_table[i].closed_ct);
dhd_os_wlfc_unblock(dhdp);
return BCME_OK;
-}
+} /* dhd_wlfc_dump */
int dhd_wlfc_clear_counts(dhd_pub_t *dhd)
{
return BCME_OK;
}
+/** returns TRUE if flow control is enabled */
int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val)
{
if (!dhd || !val) {
return BCME_OK;
}
+/** Called via an IOVAR */
int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val)
{
if (!dhd || !val) {
return BCME_OK;
}
+/** Called via an IOVAR */
int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val)
{
if (!dhd) {
return BCME_OK;
}
+/** Called when rx frame is received from the dongle */
bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf)
{
athost_wl_status_info_t* wlfc;
dhd_os_wlfc_unblock(dhdp);
}
+#if defined(DHD_WLFC_THREAD)
+ _dhd_wlfc_thread_wakeup(dhd);
+#endif /* defined(DHD_WLFC_THREAD) */
+
return BCME_OK;
}
+/** Called when eg an rx frame is received from the dongle */
int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio)
{
athost_wl_status_info_t* wlfc;
return BCME_OK;
}
+/** called via an IOVAR */
int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val)
{
if (!dhd || !val) {
return BCME_OK;
}
+/** called via an IOVAR */
int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val)
{
- char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
uint32 tlv = 0;
bool bChanged = FALSE;
if (bChanged) {
/* select enable proptxtstatus signaling */
- bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
- if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+ if (dhd_wl_ioctl_set_intiovar(dhd, "tlv", tlv, WLC_SET_VAR, TRUE, 0)) {
DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
__FUNCTION__, tlv));
- }
- else {
+ } else {
DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n",
__FUNCTION__, tlv));
}
return BCME_OK;
}
+/** called via an IOVAR */
int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val)
{
if (!dhd || !val) {
return BCME_OK;
}
+/** called via an IOVAR */
int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val)
{
if (!dhd) {
return BCME_OK;
}
+/** called via an IOVAR */
int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val)
{
if (!dhd || !val) {
return BCME_OK;
}
+/** called via an IOVAR */
int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val)
{
if (!dhd) {
return BCME_OK;
}
+/** called via an IOVAR */
int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val)
{
if (!dhd || !val) {
return BCME_OK;
}
+/** called via an IOVAR */
int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val)
{
if (!dhd) {
return BCME_OK;
}
+
#endif /* PROP_TXSTATUS */
/*
-* $Copyright Open 2009 Broadcom Corporation$
-* $Id: dhd_wlfc.h 501046 2014-09-06 01:25:16Z $
-*
-*/
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_wlfc.h 557035 2015-05-15 18:48:57Z $
+ *
+ */
#ifndef __wlfc_host_driver_definitions_h__
#define __wlfc_host_driver_definitions_h__
-#ifdef QMONITOR
-#include <dhd_qmon.h>
-#endif
/* #define OOO_DEBUG */
+#define KERNEL_THREAD_RETURN_TYPE int
+
+typedef int (*f_commitpkt_t)(void* ctx, void* p);
+typedef bool (*f_processpkt_t)(void* p, void* arg);
+
#define WLFC_UNSUPPORTED -9999
#define WLFC_NO_TRAFFIC -1
#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */
-/* 16 bits will provide an absolute max of 65536 slots */
+/** 16 bits will provide an absolute max of 65536 slots */
#define WLFC_HANGER_MAXITEMS 3072
#define WLFC_HANGER_ITEM_STATE_FREE 1
#define WLFC_HANGER_ITEM_STATE_INUSE 2
#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED 3
+#define WLFC_HANGER_ITEM_STATE_FLUSHED 4
#define WLFC_HANGER_PKT_STATE_TXSTATUS 1
-#define WLFC_HANGER_PKT_STATE_TXCOMPLETE 2
-#define WLFC_HANGER_PKT_STATE_CLEANUP 4
+#define WLFC_HANGER_PKT_STATE_BUSRETURNED 2
+#define WLFC_HANGER_PKT_STATE_COMPLETE \
+ (WLFC_HANGER_PKT_STATE_TXSTATUS | WLFC_HANGER_PKT_STATE_BUSRETURNED)
typedef enum {
- Q_TYPE_PSQ,
- Q_TYPE_AFQ
+ Q_TYPE_PSQ, /**< Power Save Queue, contains both delayed and suppressed packets */
+ Q_TYPE_AFQ /**< At Firmware Queue */
} q_type_t;
typedef enum ewlfc_packet_state {
- eWLFC_PKTTYPE_NEW,
- eWLFC_PKTTYPE_DELAYED,
- eWLFC_PKTTYPE_SUPPRESSED,
+ eWLFC_PKTTYPE_NEW, /**< unused in the code (Jan 2015) */
+ eWLFC_PKTTYPE_DELAYED, /**< packet did not enter wlfc yet */
+ eWLFC_PKTTYPE_SUPPRESSED, /**< packet entered wlfc and was suppressed by the dongle */
eWLFC_PKTTYPE_MAX
} ewlfc_packet_state_t;
typedef struct wlfc_hanger_item {
uint8 state;
uint8 gen;
- uint8 pkt_state;
+ uint8 pkt_state; /**< bitmask containing eg WLFC_HANGER_PKT_STATE_TXCOMPLETE */
uint8 pkt_txstatus;
uint32 identifier;
void* pkt;
struct wlfc_hanger_item *next;
} wlfc_hanger_item_t;
+/** hanger contains packets that have been posted by the dhd to the dongle and are expected back */
typedef struct wlfc_hanger {
int max_items;
uint32 pushed;
#define WLFC_HANGER_SIZE(n) ((sizeof(wlfc_hanger_t) - \
sizeof(wlfc_hanger_item_t)) + ((n)*sizeof(wlfc_hanger_item_t)))
-#define WLFC_STATE_OPEN 1
-#define WLFC_STATE_CLOSE 2
+#define WLFC_STATE_OPEN 1 /**< remote MAC is able to receive packets */
+#define WLFC_STATE_CLOSE 2 /**< remote MAC is in power save mode */
-#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /* 2 for each AC traffic and bc/mc */
+#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /**< 2 for each AC traffic and bc/mc */
#define WLFC_AFQ_PREC_COUNT (AC_COUNT + 1)
#define WLFC_PSQ_LEN 2048
#define WLFC_FLOWCONTROL_HIWATER (2048 - 256)
#define WLFC_FLOWCONTROL_LOWATER 256
+#if (WLFC_FLOWCONTROL_HIWATER >= (WLFC_PSQ_LEN - 256))
+#undef WLFC_FLOWCONTROL_HIWATER
+#define WLFC_FLOWCONTROL_HIWATER (WLFC_PSQ_LEN - 256)
+#undef WLFC_FLOWCONTROL_LOWATER
+#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER / 4)
+#endif
+
#define WLFC_LOG_BUF_SIZE (1024*1024)
+/** Properties related to a remote MAC entity */
typedef struct wlfc_mac_descriptor {
- uint8 occupied;
+ uint8 occupied; /**< if 0, this descriptor is unused and thus can be (re)used */
uint8 interface_id;
- uint8 iftype;
- uint8 state;
- uint8 ac_bitmap; /* for APSD */
+ uint8 iftype; /**< eg WLC_E_IF_ROLE_STA */
+ uint8 state; /**< eg WLFC_STATE_OPEN */
+ uint8 ac_bitmap; /**< automatic power save delivery (APSD) */
uint8 requested_credit;
- uint8 requested_packet;
+ uint8 requested_packet; /**< unit: [number of packets] */
uint8 ea[ETHER_ADDR_LEN];
- /*
- maintain (MAC,AC) based seq count for
- packets going to the device. As well as bc/mc.
- */
+
+ /** maintain (MAC,AC) based seq count for packets going to the device. As well as bc/mc. */
uint8 seq[AC_COUNT + 1];
- uint8 generation;
- struct pktq psq;
- /* packets at firmware */
+ uint8 generation; /**< toggles between 0 and 1 */
+ struct pktq psq; /**< contains both 'delayed' and 'suppressed' packets */
+ /** packets at firmware queue */
struct pktq afq;
- /* The AC pending bitmap that was reported to the fw at last change */
+ /** The AC pending bitmap that was reported to the fw at last change */
uint8 traffic_lastreported_bmp;
- /* The new AC pending bitmap */
+ /** The new AC pending bitmap */
uint8 traffic_pending_bmp;
- /* 1= send on next opportunity */
+ /** 1= send on next opportunity */
uint8 send_tim_signal;
- uint8 mac_handle;
- /* Number of packets at dongle for this entry. */
+ uint8 mac_handle; /**< mac handles are assigned by the dongle */
+ /** Number of packets at dongle for this entry. */
int transit_count;
- /* Numbe of suppression to wait before evict from delayQ */
+ /** Number of suppression to wait before evict from delayQ */
int suppr_transit_count;
- /* flag. TRUE when in suppress state */
+ /** pkt sent to bus but no bus TX complete yet */
+ int onbus_pkts_count;
+ /** flag. TRUE when remote MAC is in suppressed state */
uint8 suppressed;
-#ifdef QMONITOR
- dhd_qmon_t qmon;
-#endif /* QMONITOR */
#ifdef PROP_TXSTATUS_DEBUG
uint32 dstncredit_sent_packets;
struct wlfc_mac_descriptor* next;
} wlfc_mac_descriptor_t;
+/** A 'commit' is the hand over of a packet from the host OS layer to the layer below (eg DBUS) */
typedef struct dhd_wlfc_commit_info {
uint8 needs_hdr;
uint8 ac_fifo_credit_spent;
uint32 drop_pkts[WLFC_PSQ_PREC_COUNT];
uint32 ooo_pkts[AC_COUNT + 1];
#ifdef PROP_TXSTATUS_DEBUG
- /* all pkt2bus -> txstatus latency accumulated */
+ /** all pkt2bus -> txstatus latency accumulated */
uint32 latency_sample_count;
uint32 total_status_latency;
uint32 latency_most_recent;
#define WLFC_FCMODE_EXPLICIT_CREDIT 2
#define WLFC_ONLY_AMPDU_HOSTREORDER 3
-/* Reserved credits ratio when borrowed by hihger priority */
+/** Reserved credits ratio when borrowed by hihger priority */
#define WLFC_BORROW_LIMIT_RATIO 4
-/* How long to defer borrowing in milliseconds */
+/** How long to defer borrowing in milliseconds */
#define WLFC_BORROW_DEFER_PERIOD_MS 100
-/* How long to defer flow control in milliseconds */
+/** How long to defer flow control in milliseconds */
#define WLFC_FC_DEFER_PERIOD_MS 200
-/* How long to detect occurance per AC in miliseconds */
+/** How long to detect occurance per AC in miliseconds */
#define WLFC_RX_DETECTION_THRESHOLD_MS 100
-/* Mask to represent available ACs (note: BC/MC is ignored */
+/** Mask to represent available ACs (note: BC/MC is ignored) */
#define WLFC_AC_MASK 0xF
+/** flow control specific information, only 1 instance during driver lifetime */
typedef struct athost_wl_status_info {
uint8 last_seqid_to_wlc;
- /* OSL handle */
- osl_t* osh;
- /* dhd pub */
- void* dhdp;
+ /** OSL handle */
+ osl_t *osh;
+ /** dhd public struct pointer */
+ void *dhdp;
+
+ f_commitpkt_t fcommit;
+ void* commit_ctx;
- /* stats */
+ /** statistics */
athost_wl_stat_counters_t stats;
+ /** incremented on eg receiving a credit map event from the dongle */
int Init_FIFO_credit[AC_COUNT + 2];
-
- /* the additional ones are for bc/mc and ATIM FIFO */
+ /** the additional ones are for bc/mc and ATIM FIFO */
int FIFO_credit[AC_COUNT + 2];
-
- /* Credit borrow counts for each FIFO from each of the other FIFOs */
+ /** Credit borrow counts for each FIFO from each of the other FIFOs */
int credits_borrowed[AC_COUNT + 2][AC_COUNT + 2];
- /* packet hanger and MAC->handle lookup table */
- void* hanger;
+ /** packet hanger and MAC->handle lookup table */
+ void *hanger;
+
struct {
- /* table for individual nodes */
+ /** table for individual nodes */
wlfc_mac_descriptor_t nodes[WLFC_MAC_DESC_TABLE_SIZE];
- /* table for interfaces */
+ /** table for interfaces */
wlfc_mac_descriptor_t interfaces[WLFC_MAX_IFNUM];
/* OS may send packets to unknown (unassociated) destinations */
- /* A place holder for bc/mc and packets to unknown destinations */
+ /** A place holder for bc/mc and packets to unknown destinations */
wlfc_mac_descriptor_t other;
} destination_entries;
- wlfc_mac_descriptor_t *active_entry_head;
+ wlfc_mac_descriptor_t *active_entry_head; /**< a chain of MAC descriptors */
int active_entry_count;
- wlfc_mac_descriptor_t* requested_entry[WLFC_MAC_DESC_TABLE_SIZE];
+ wlfc_mac_descriptor_t *requested_entry[WLFC_MAC_DESC_TABLE_SIZE];
int requested_entry_count;
/* pkt counts for each interface and ac */
int pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1];
int pkt_cnt_per_ac[AC_COUNT+1];
int pkt_cnt_in_drv[WLFC_MAX_IFNUM][AC_COUNT+1];
- uint8 allow_fc;
+ int pkt_cnt_in_psq;
+ uint8 allow_fc; /**< Boolean */
uint32 fc_defer_timestamp;
uint32 rx_timestamp[AC_COUNT+1];
- /* ON/OFF state for flow control to the host network interface */
+
+ /** ON/OFF state for flow control to the host network interface */
uint8 hostif_flow_state[WLFC_MAX_IFNUM];
uint8 host_ifidx;
- /* to flow control an OS interface */
+
+ /** to flow control an OS interface */
uint8 toggle_host_if;
- /* To borrow credits */
+ /** To borrow credits */
uint8 allow_credit_borrow;
- /* ac number for the first single ac traffic */
+ /** ac number for the first single ac traffic */
uint8 single_ac;
- /* Timestamp for the first single ac traffic */
+ /** Timestamp for the first single ac traffic */
uint32 single_ac_timestamp;
bool bcmc_credit_supported;
} athost_wl_status_info_t;
-/* Please be mindful that total pkttag space is 32 octets only */
+/** Please be mindful that total pkttag space is 32 octets only */
typedef struct dhd_pkttag {
- /*
+
+#ifdef BCM_OBJECT_TRACE
+ /* if use this field, keep it at the first 4 bytes */
+ uint32 sn;
+#endif /* BCM_OBJECT_TRACE */
+
+ /**
b[15] - 1 = wlfc packet
b[14:13] - encryption exemption
b[12 ] - 1 = event channel
b[3:0] - interface index
*/
uint16 if_flags;
- /* destination MAC address for this packet so that not every
- module needs to open the packet to find this
- */
+
+ /**
+ * destination MAC address for this packet so that not every module needs to open the packet
+ * to find this
+ */
uint8 dstn_ether[ETHER_ADDR_LEN];
- /*
- This 32-bit goes from host to device for every packet.
- */
+
+ /** This 32-bit goes from host to device for every packet. */
uint32 htod_tag;
- /*
- This 16-bit is original seq number for every suppress packet.
- */
+ /** This 16-bit is original seq number for every suppress packet. */
uint16 htod_seq;
- /*
- This address is mac entry for every packet.
- */
- void* entry;
- /* bus specific stuff */
+ /** This address is mac entry for every packet. */
+ void *entry;
+
+ /** bus specific stuff */
union {
struct {
- void* stuff;
+ void *stuff;
uint32 thing1;
uint32 thing2;
} sd;
+
struct {
- void* bus;
- void* urb;
+ void *bus;
+ void *urb;
} usb;
} bus_specific;
} dhd_pkttag_t;
#define PSQ_SUP_IDX(x) (x * 2 + 1)
#define PSQ_DLY_IDX(x) (x * 2)
-typedef int (*f_commitpkt_t)(void* ctx, void* p);
-typedef bool (*f_processpkt_t)(void* p, void* arg);
-
#ifdef PROP_TXSTATUS_DEBUG
#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0)
#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0)
#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0)
#endif
+#ifdef BCM_OBJECT_TRACE
+#define DHD_PKTTAG_SET_SN(tag, val) ((dhd_pkttag_t*)(tag))->sn = (val)
+#define DHD_PKTTAG_SN(tag) (((dhd_pkttag_t*)(tag))->sn)
+#endif /* BCM_OBJECT_TRACE */
+
/* public functions */
int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len,
uchar *reorder_info_buf, uint *reorder_info_len);
+KERNEL_THREAD_RETURN_TYPE dhd_wlfc_transfer_packets(void *data);
int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit,
void* commit_ctx, void *pktbuf, bool need_toggle_host_if);
int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val);
int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val);
+
#endif /* __wlfc_host_driver_definitions_h__ */
* Common stats definitions for clients of dongle
* ports
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dngl_stats.h 464743 2014-03-25 21:04:32Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dngl_stats.h 523030 2014-12-25 17:28:07Z $
*/
#ifndef _dngl_stats_h_
#define _dngl_stats_h_
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+
typedef struct {
unsigned long rx_packets; /* total packets received */
unsigned long tx_packets; /* total packets transmitted */
unsigned long multicast; /* multicast packets received */
} dngl_stats_t;
+typedef int32 wifi_radio;
+typedef int32 wifi_channel;
+typedef int32 wifi_rssi;
+typedef struct { uint16 version; uint16 length; } ver_len;
+
+typedef enum wifi_channel_width {
+ WIFI_CHAN_WIDTH_20 = 0,
+ WIFI_CHAN_WIDTH_40 = 1,
+ WIFI_CHAN_WIDTH_80 = 2,
+ WIFI_CHAN_WIDTH_160 = 3,
+ WIFI_CHAN_WIDTH_80P80 = 4,
+ WIFI_CHAN_WIDTH_5 = 5,
+ WIFI_CHAN_WIDTH_10 = 6,
+ WIFI_CHAN_WIDTH_INVALID = -1
+} wifi_channel_width_t;
+
+typedef enum {
+ WIFI_DISCONNECTED = 0,
+ WIFI_AUTHENTICATING = 1,
+ WIFI_ASSOCIATING = 2,
+ WIFI_ASSOCIATED = 3,
+ WIFI_EAPOL_STARTED = 4, /* if done by firmware/driver */
+ WIFI_EAPOL_COMPLETED = 5, /* if done by firmware/driver */
+} wifi_connection_state;
+
+typedef enum {
+ WIFI_ROAMING_IDLE = 0,
+ WIFI_ROAMING_ACTIVE = 1
+} wifi_roam_state;
+
+typedef enum {
+ WIFI_INTERFACE_STA = 0,
+ WIFI_INTERFACE_SOFTAP = 1,
+ WIFI_INTERFACE_IBSS = 2,
+ WIFI_INTERFACE_P2P_CLIENT = 3,
+ WIFI_INTERFACE_P2P_GO = 4,
+ WIFI_INTERFACE_NAN = 5,
+ WIFI_INTERFACE_MESH = 6
+} wifi_interface_mode;
+
+#define WIFI_CAPABILITY_QOS 0x00000001 /* set for QOS association */
+#define WIFI_CAPABILITY_PROTECTED 0x00000002 /* set for protected association (802.11
+ * beacon frame control protected bit set)
+ */
+#define WIFI_CAPABILITY_INTERWORKING 0x00000004 /* set if 802.11 Extended Capabilities
+ * element interworking bit is set
+ */
+#define WIFI_CAPABILITY_HS20 0x00000008 /* set for HS20 association */
+#define WIFI_CAPABILITY_SSID_UTF8 0x00000010 /* set is 802.11 Extended Capabilities
+ * element UTF-8 SSID bit is set
+ */
+#define WIFI_CAPABILITY_COUNTRY 0x00000020 /* set is 802.11 Country Element is present */
+
+typedef struct {
+ wifi_interface_mode mode; /* interface mode */
+ uint8 mac_addr[6]; /* interface mac address (self) */
+ wifi_connection_state state; /* connection state (valid for STA, CLI only) */
+ wifi_roam_state roaming; /* roaming state */
+ uint32 capabilities; /* WIFI_CAPABILITY_XXX (self) */
+ uint8 ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated SSID */
+ uint8 bssid[ETHER_ADDR_LEN]; /* bssid */
+ uint8 ap_country_str[3]; /* country string advertised by AP */
+ uint8 country_str[3]; /* country string for this association */
+} wifi_interface_info;
+
+typedef wifi_interface_info *wifi_interface_handle;
+
+/* channel information */
+typedef struct {
+ wifi_channel_width_t width; /* channel width (20, 40, 80, 80+80, 160) */
+ wifi_channel center_freq; /* primary 20 MHz channel */
+ wifi_channel center_freq0; /* center frequency (MHz) first segment */
+ wifi_channel center_freq1; /* center frequency (MHz) second segment */
+} wifi_channel_info;
+
+/* wifi rate */
+typedef struct {
+ uint32 preamble; /* 0: OFDM, 1:CCK, 2:HT 3:VHT 4..7 reserved */
+ uint32 nss; /* 0:1x1, 1:2x2, 3:3x3, 4:4x4 */
+ uint32 bw; /* 0:20MHz, 1:40Mhz, 2:80Mhz, 3:160Mhz */
+ uint32 rateMcsIdx; /* OFDM/CCK rate code would be as per ieee std
+ * in the units of 0.5mbps
+ */
+ /* HT/VHT it would be mcs index */
+ uint32 reserved; /* reserved */
+ uint32 bitrate; /* units of 100 Kbps */
+} wifi_rate;
+
+/* channel statistics */
+typedef struct {
+ wifi_channel_info channel; /* channel */
+ uint32 on_time; /* msecs the radio is awake (32 bits number
+ * accruing over time)
+ */
+ uint32 cca_busy_time; /* msecs the CCA register is busy (32 bits number
+ * accruing over time)
+ */
+} wifi_channel_stat;
+
+/* radio statistics */
+typedef struct {
+ struct {
+ uint16 version;
+ uint16 length;
+ };
+ wifi_radio radio; /* wifi radio (if multiple radio supported) */
+ uint32 on_time; /* msecs the radio is awake (32 bits number
+ * accruing over time)
+ */
+ uint32 tx_time; /* msecs the radio is transmitting (32 bits
+ * number accruing over time)
+ */
+ uint32 rx_time; /* msecs the radio is in active receive (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_scan; /* msecs the radio is awake due to all scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_nbd; /* msecs the radio is awake due to NAN (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_gscan; /* msecs the radio is awake due to G?scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_roam_scan; /* msecs the radio is awake due to roam?scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_pno_scan; /* msecs the radio is awake due to PNO scan (32 bits
+ * number accruing over time)
+ */
+ uint32 on_time_hs20; /* msecs the radio is awake due to HS2.0 scans and
+ * GAS exchange (32 bits number accruing over time)
+ */
+ uint32 num_channels; /* number of channels */
+ wifi_channel_stat channels[1]; /* channel statistics */
+} wifi_radio_stat;
+
+/* per rate statistics */
+typedef struct {
+ struct {
+ uint16 version;
+ uint16 length;
+ };
+ uint32 tx_mpdu; /* number of successfully transmitted data pkts (ACK rcvd) */
+ uint32 rx_mpdu; /* number of received data pkts */
+ uint32 mpdu_lost; /* number of data packet losses (no ACK) */
+ uint32 retries; /* total number of data pkt retries */
+ uint32 retries_short; /* number of short data pkt retries */
+ uint32 retries_long; /* number of long data pkt retries */
+ wifi_rate rate; /* rate information */
+} wifi_rate_stat;
+
+/* access categories */
+typedef enum {
+ WIFI_AC_VO = 0,
+ WIFI_AC_VI = 1,
+ WIFI_AC_BE = 2,
+ WIFI_AC_BK = 3,
+ WIFI_AC_MAX = 4
+} wifi_traffic_ac;
+
+/* wifi peer type */
+typedef enum
+{
+ WIFI_PEER_STA,
+ WIFI_PEER_AP,
+ WIFI_PEER_P2P_GO,
+ WIFI_PEER_P2P_CLIENT,
+ WIFI_PEER_NAN,
+ WIFI_PEER_TDLS,
+ WIFI_PEER_INVALID
+} wifi_peer_type;
+
+/* per peer statistics */
+typedef struct {
+ wifi_peer_type type; /* peer type (AP, TDLS, GO etc.) */
+ uint8 peer_mac_address[6]; /* mac address */
+ uint32 capabilities; /* peer WIFI_CAPABILITY_XXX */
+ uint32 num_rate; /* number of rates */
+ wifi_rate_stat rate_stats[1]; /* per rate statistics, number of entries = num_rate */
+} wifi_peer_info;
+
+/* per access category statistics */
+typedef struct {
+ wifi_traffic_ac ac; /* access category (VI, VO, BE, BK) */
+ uint32 tx_mpdu; /* number of successfully transmitted unicast data pkts
+ * (ACK rcvd)
+ */
+ uint32 rx_mpdu; /* number of received unicast mpdus */
+ uint32 tx_mcast; /* number of succesfully transmitted multicast
+ * data packets
+ */
+ /* STA case: implies ACK received from AP for the
+ * unicast packet in which mcast pkt was sent
+ */
+ uint32 rx_mcast; /* number of received multicast data packets */
+ uint32 rx_ampdu; /* number of received unicast a-mpdus */
+ uint32 tx_ampdu; /* number of transmitted unicast a-mpdus */
+ uint32 mpdu_lost; /* number of data pkt losses (no ACK) */
+ uint32 retries; /* total number of data pkt retries */
+ uint32 retries_short; /* number of short data pkt retries */
+ uint32 retries_long; /* number of long data pkt retries */
+ uint32 contention_time_min; /* data pkt min contention time (usecs) */
+ uint32 contention_time_max; /* data pkt max contention time (usecs) */
+ uint32 contention_time_avg; /* data pkt avg contention time (usecs) */
+ uint32 contention_num_samples; /* num of data pkts used for contention statistics */
+} wifi_wmm_ac_stat;
+
+/* interface statistics */
+typedef struct {
+ wifi_interface_handle iface; /* wifi interface */
+ wifi_interface_info info; /* current state of the interface */
+ uint32 beacon_rx; /* access point beacon received count from
+ * connected AP
+ */
+ uint32 mgmt_rx; /* access point mgmt frames received count from
+ * connected AP (including Beacon)
+ */
+ uint32 mgmt_action_rx; /* action frames received count */
+ uint32 mgmt_action_tx; /* action frames transmit count */
+ wifi_rssi rssi_mgmt; /* access Point Beacon and Management frames RSSI
+ * (averaged)
+ */
+ wifi_rssi rssi_data; /* access Point Data Frames RSSI (averaged) from
+ * connected AP
+ */
+ wifi_rssi rssi_ack; /* access Point ACK RSSI (averaged) from
+ * connected AP
+ */
+ wifi_wmm_ac_stat ac[WIFI_AC_MAX]; /* per ac data packet statistics */
+ uint32 num_peers; /* number of peers */
+ wifi_peer_info peer_info[1]; /* per peer statistics */
+} wifi_iface_stat;
+
#endif /* _dngl_stats_h_ */
/*
* Dongle WL Header definitions
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dngl_wlhdr.h 464743 2014-03-25 21:04:32Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dngl_wlhdr.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _dngl_wlhdr_h_
/*
* HND generic packet pool operation primitives
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_pktpool.c 591285 2015-10-07 11:56:29Z $
*/
#include <typedefs.h>
#include <osl.h>
+#include <osl_ext.h>
#include <bcmutils.h>
#include <hnd_pktpool.h>
+/* mutex macros for thread safe */
+#ifdef HND_PKTPOOL_THREAD_SAFE
+#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex)
+#define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex)
+#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec)
+#define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex)
+#else
+#define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS
+#define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
+#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
+#define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
+#endif
+
/* Registry size is one larger than max pools, as slot #0 is reserved */
#define PKTPOOLREG_RSVD_ID (0U)
#define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead))
#define PKTPOOL_REGISTRY_FOREACH(id) \
for ((id) = 1U; (id) <= pktpools_max; (id)++)
+enum pktpool_empty_cb_state {
+ EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */
+ EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */
+ EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */
+};
+
uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
static int pktpool_register(pktpool_t * poolptr);
static int pktpool_deregister(pktpool_t * poolptr);
+/** add declaration */
+static int pktpool_avail_notify(pktpool_t *pktp);
+
/** accessor functions required when ROMming this file, forced into RAM */
+
+
+pktpool_t *
+BCMRAMFN(get_pktpools_registry)(int id)
+{
+ return pktpools_registry[id];
+}
+
static void
BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
{
pktp->plen = (uint16)plen;
pktp->type = type;
+ if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
+ return BCME_ERROR;
+ }
+
pktp->maxlen = PKTPOOL_LEN_MAX;
pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
pktpool_deregister(pktp); /* release previously acquired unique pool id */
POOLSETID(pktp, PKTPOOL_INVALID_ID);
+ if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
pktp->inited = FALSE;
/* Are there still pending pkts? */
int err = 0;
int len, psize, maxlen;
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
ASSERT(pktp->plen != 0);
maxlen = pktp->maxlen;
}
}
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ if (pktp->cbcnt) {
+ if (pktp->empty == FALSE)
+ pktpool_avail_notify(pktp);
+ }
+
return err;
}
static void *
pktpool_deq(pktpool_t *pktp)
{
- void *p;
+ void *p = NULL;
if (pktp->avail == 0)
return NULL;
ASSERT(cb != NULL);
+ if (pktp == NULL)
+ return BCME_ERROR;
ASSERT(pktp->rxcplidfn.cb == NULL);
pktp->rxcplidfn.cb = cb;
pktp->rxcplidfn.arg = arg;
int
pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
{
+ int err = 0;
int i;
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
ASSERT(cb != NULL);
i = pktp->cbcnt;
- if (i == PKTPOOL_CB_MAX)
- return BCME_ERROR;
+ if (i == PKTPOOL_CB_MAX_AVL) {
+ err = BCME_ERROR;
+ goto done;
+ }
ASSERT(pktp->cbs[i].cb == NULL);
pktp->cbs[i].cb = cb;
pktp->cbs[i].arg = arg;
pktp->cbcnt++;
- return 0;
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return err;
}
int
pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
{
+ int err = 0;
int i;
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
ASSERT(cb != NULL);
i = pktp->ecbcnt;
- if (i == PKTPOOL_CB_MAX)
- return BCME_ERROR;
+ if (i == PKTPOOL_CB_MAX) {
+ err = BCME_ERROR;
+ goto done;
+ }
ASSERT(pktp->ecbs[i].cb == NULL);
pktp->ecbs[i].cb = cb;
pktp->ecbs[i].arg = arg;
pktp->ecbcnt++;
- return 0;
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return err;
}
static int
int
pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
{
+ int err = 0;
int i;
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
ASSERT(cb);
i = pktp->dbg_cbcnt;
- if (i == PKTPOOL_CB_MAX)
- return BCME_ERROR;
+ if (i == PKTPOOL_CB_MAX) {
+ err = BCME_ERROR;
+ goto done;
+ }
ASSERT(pktp->dbg_cbs[i].cb == NULL);
pktp->dbg_cbs[i].cb = cb;
pktp->dbg_cbs[i].arg = arg;
pktp->dbg_cbcnt++;
- return 0;
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return err;
}
int pktpool_dbg_notify(pktpool_t *pktp);
{
int i;
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
for (i = 0; i < pktp->dbg_cbcnt; i++) {
ASSERT(pktp->dbg_cbs[i].cb);
pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
}
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
return 0;
}
{
int i;
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen);
for (i = 0; i < pktp->dbg_qlen; i++) {
ASSERT(pktp->dbg_q[i].p);
pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
}
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
return 0;
}
int i;
int state;
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
bzero(stats, sizeof(pktpool_stats_t));
for (i = 0; i < pktp->dbg_qlen; i++) {
ASSERT(pktp->dbg_q[i].p != NULL);
}
}
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
return 0;
}
{
uint32 cycles, i;
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
if (!PKTPOOL(OSH_NULL, p))
- return 0;
+ goto done;
OSL_GETCYCLES(cycles);
}
}
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
return 0;
}
{
uint32 cycles, i;
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
if (!PKTPOOL(OSH_NULL, p))
- return 0;
+ goto done;
OSL_GETCYCLES(cycles);
}
}
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
return 0;
}
#endif /* BCMDBG_POOL */
pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
{
ASSERT(pktp);
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
pktp->availcb_excl = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
return 0;
}
pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
{
int i;
+ int err;
ASSERT(pktp);
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
ASSERT(pktp->availcb_excl == NULL);
for (i = 0; i < pktp->cbcnt; i++) {
if (cb == pktp->cbs[i].cb) {
}
if (pktp->availcb_excl == NULL)
- return BCME_ERROR;
+ err = BCME_ERROR;
else
- return 0;
+ err = 0;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return err;
}
static int
{
void *p;
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+
p = pktpool_deq(pktp);
if (p == NULL) {
p = pktpool_deq(pktp);
if (p == NULL)
- return NULL;
+ goto done;
}
+
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
return p;
}
void
pktpool_free(pktpool_t *pktp, void *p)
{
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
ASSERT(p != NULL);
#ifdef BCMDBG_POOL
/* pktpool_stop_trigger(pktp, p); */
pktpool_enq(pktp, p);
- if (pktp->emptycb_disable)
- return;
-
+ /**
+ * Feed critical DMA with freshly freed packets, to avoid DMA starvation.
+ * If any avail callback functions are registered, send a notification
+ * that a new packet is available in the pool.
+ */
if (pktp->cbcnt) {
- if (pktp->empty == FALSE)
- pktpool_avail_notify(pktp);
+ /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
+ * This allows to feed on burst basis as opposed to inefficient per-packet basis.
+ */
+ if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
+ /**
+ * If the call originated from pktpool_empty_notify, the just freed packet
+ * is needed in pktpool_get.
+ * Therefore don't call pktpool_avail_notify.
+ */
+ if (pktp->empty == FALSE)
+ pktpool_avail_notify(pktp);
+ } else {
+ /**
+ * The callback is temporarily disabled, log that a packet has been freed.
+ */
+ pktp->emptycb_disable = EMPTYCB_SKIPPED;
+ }
}
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return;
}
int
pktpool_add(pktpool_t *pktp, void *p)
{
+ int err = 0;
+
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
ASSERT(p != NULL);
- if (pktp->len == pktp->maxlen)
- return BCME_RANGE;
+ if (pktp->len == pktp->maxlen) {
+ err = BCME_RANGE;
+ goto done;
+ }
/* pkts in pool have same length */
ASSERT(pktp->plen == PKTLEN(OSH_NULL, p));
pktp->dbg_q[pktp->dbg_qlen++].p = p;
#endif
- return 0;
+done:
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
+ return err;
}
/* Force pktpool_setmaxlen () into RAM as it uses a constant
int
BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
{
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
if (maxlen > PKTPOOL_LEN_MAX)
maxlen = PKTPOOL_LEN_MAX;
*/
pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen;
+ /* protect shared resource */
+ if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
+ return BCME_ERROR;
+
return pktp->maxlen;
}
{
ASSERT(pktp);
- pktp->emptycb_disable = disable;
+ /**
+ * To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
+ * If callback is going to be re-enabled, check if any packet got
+ * freed and added back to the pool while callback was disabled.
+ * When this is the case do the callback now, provided that callback functions
+ * are registered and this call did not originate from pktpool_empty_notify.
+ */
+ if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) &&
+ (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
+ pktpool_avail_notify(pktp);
+ }
+
+ /* Enable or temporarily disable callback when packet becomes available. */
+ pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED;
}
bool
pktpool_emptycb_disabled(pktpool_t *pktp)
{
ASSERT(pktp);
- return pktp->emptycb_disable;
+ return pktp->emptycb_disable != EMPTYCB_ENABLED;
+}
+
+#ifdef BCMPKTPOOL
+#include <hnd_lbuf.h>
+
+pktpool_t *pktpool_shared = NULL;
+
+#ifdef BCMFRAGPOOL
+pktpool_t *pktpool_shared_lfrag = NULL;
+#endif /* BCMFRAGPOOL */
+
+pktpool_t *pktpool_shared_rxlfrag = NULL;
+
+static osl_t *pktpool_osh = NULL;
+
+void
+hnd_pktpool_init(osl_t *osh)
+{
+ int n;
+
+ /* Construct a packet pool registry before initializing packet pools */
+ n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID);
+ if (n != PKTPOOL_MAXIMUM_ID) {
+ ASSERT(0);
+ return;
+ }
+
+ pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t));
+ if (pktpool_shared == NULL) {
+ ASSERT(0);
+ goto error1;
+ }
+
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+ pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t));
+ if (pktpool_shared_lfrag == NULL) {
+ ASSERT(0);
+ goto error2;
+ }
+#endif
+
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+ pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
+ if (pktpool_shared_rxlfrag == NULL) {
+ ASSERT(0);
+ goto error3;
+ }
+#endif
+
+
+ /*
+ * At this early stage, there's not enough memory to allocate all
+ * requested pkts in the shared pool. Need to add to the pool
+ * after reclaim
+ *
+ * n = NRXBUFPOST + SDPCMD_RXBUFS;
+ *
+ * Initialization of packet pools may fail (BCME_ERROR), if the packet pool
+ * registry is not initialized or the registry is depleted.
+ *
+ * A BCME_NOMEM error only indicates that the requested number of packets
+ * were not filled into the pool.
+ */
+ n = 1;
+ if (pktpool_init(osh, pktpool_shared,
+ &n, PKTBUFSZ, FALSE, lbuf_basic) == BCME_ERROR) {
+ ASSERT(0);
+ goto error4;
+ }
+ pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN);
+
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+ n = 1;
+ if (pktpool_init(osh, pktpool_shared_lfrag,
+ &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) {
+ ASSERT(0);
+ goto error5;
+ }
+ pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
+#endif
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+ n = 1;
+ if (pktpool_init(osh, pktpool_shared_rxlfrag,
+ &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag) == BCME_ERROR) {
+ ASSERT(0);
+ goto error6;
+ }
+ pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
+#endif
+
+ pktpool_osh = osh;
+
+ return;
+
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+error6:
+#endif
+
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+ pktpool_deinit(osh, pktpool_shared_lfrag);
+error5:
+#endif
+
+#if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \
+ (defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED))
+ pktpool_deinit(osh, pktpool_shared);
+#endif
+
+error4:
+#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
+ hnd_free(pktpool_shared_rxlfrag);
+ pktpool_shared_rxlfrag = (pktpool_t *)NULL;
+error3:
+#endif /* BCMRXFRAGPOOL */
+
+#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
+ hnd_free(pktpool_shared_lfrag);
+ pktpool_shared_lfrag = (pktpool_t *)NULL;
+error2:
+#endif /* BCMFRAGPOOL */
+
+ hnd_free(pktpool_shared);
+ pktpool_shared = (pktpool_t *)NULL;
+
+error1:
+ pktpool_dettach(osh);
+}
+
+void
+hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
+{
+ pktpool_fill(pktpool_osh, pktpool, minimal);
+}
+
+/* refill pktpools after reclaim */
+void
+hnd_pktpool_refill(bool minimal)
+{
+ if (POOL_ENAB(pktpool_shared)) {
+ pktpool_fill(pktpool_osh, pktpool_shared, minimal);
+ }
+/* fragpool reclaim */
+#ifdef BCMFRAGPOOL
+ if (POOL_ENAB(pktpool_shared_lfrag)) {
+ pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
+ }
+#endif /* BCMFRAGPOOL */
+/* rx fragpool reclaim */
+#ifdef BCMRXFRAGPOOL
+ if (POOL_ENAB(pktpool_shared_rxlfrag)) {
+ pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
+ }
+#endif
}
+#endif /* BCMPKTPOOL */
/*
* HND generic pktq operation primitives
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_pktq.c 605726 2015-12-11 07:08:16Z $
*/
#include <typedefs.h>
#include <osl.h>
+#include <osl_ext.h>
#include <bcmutils.h>
#include <hnd_pktq.h>
+/* mutex macros for thread safe */
+#ifdef HND_PKTQ_THREAD_SAFE
+#define HND_PKTQ_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex)
+#define HND_PKTQ_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex)
+#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec)
+#define HND_PKTQ_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex)
+#else
+#define HND_PKTQ_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS
+#define HND_PKTQ_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
+#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
+#define HND_PKTQ_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
+#endif
+
/*
* osl multiple-precedence packet queue
* hi_prec is always >= the number of the highest non-empty precedence
{
struct pktq_prec *q;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
ASSERT(prec >= 0 && prec < pq->num_prec);
- ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+ /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */
+ ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p)));
ASSERT(!pktq_full(pq));
ASSERT(!pktq_pfull(pq, prec));
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
return p;
}
{
struct pktq_prec *q;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
ASSERT(prec >= 0 && prec < pq->num_prec);
- ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+ /* queueing chains not allowed and no segmented SKB (Kernel-3.18.y) */
+ ASSERT(!((PKTLINK(p) != NULL) && (PKTLINK(p) != p)));
ASSERT(!pktq_full(pq));
ASSERT(!pktq_pfull(pq, prec));
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
return p;
}
struct pktq_prec *q;
struct pktq_prec *list_q;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
list_q = &list->q[0];
/* empty list check */
if (list_q->head == NULL)
- return;
+ goto done;
ASSERT(prec >= 0 && prec < pq->num_prec);
ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */
list_q->tail = NULL;
list_q->len = 0;
list->len = 0;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return;
}
/*
struct pktq_prec *q;
struct pktq_prec *list_q;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
list_q = &list->q[0];
/* empty list check */
if (list_q->head == NULL)
- return;
+ goto done;
ASSERT(prec >= 0 && prec < pq->num_prec);
ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */
list_q->tail = NULL;
list_q->len = 0;
list->len = 0;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return;
}
void * BCMFASTPATH
struct pktq_prec *q;
void *p;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
ASSERT(prec >= 0 && prec < pq->num_prec);
q = &pq->q[prec];
if ((p = q->head) == NULL)
- return NULL;
+ goto done;
if ((q->head = PKTLINK(p)) == NULL)
q->tail = NULL;
PKTSETLINK(p, NULL);
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
return p;
}
pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p)
{
struct pktq_prec *q;
- void *p;
+ void *p = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
ASSERT(prec >= 0 && prec < pq->num_prec);
q = &pq->q[prec];
if (prev_p == NULL)
- return NULL;
+ goto done;
if ((p = PKTLINK(prev_p)) == NULL)
- return NULL;
+ goto done;
q->len--;
PKTSETLINK(prev_p, PKTLINK(p));
PKTSETLINK(p, NULL);
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
return p;
}
struct pktq_prec *q;
void *p, *prev = NULL;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
ASSERT(prec >= 0 && prec < pq->num_prec);
q = &pq->q[prec];
}
}
if (p == NULL)
- return NULL;
+ goto done;
if (prev == NULL) {
if ((q->head = PKTLINK(p)) == NULL) {
PKTSETLINK(p, NULL);
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
return p;
}
struct pktq_prec *q;
void *p, *prev;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
ASSERT(prec >= 0 && prec < pq->num_prec);
q = &pq->q[prec];
if ((p = q->head) == NULL)
- return NULL;
+ goto done;
for (prev = NULL; p != q->tail; p = PKTLINK(p))
prev = p;
pq->len--;
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
return p;
}
pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg)
{
struct pktq_prec *q;
- void *p, *prev = NULL;
+ void *p, *next, *prev = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
q = &pq->q[prec];
p = q->head;
while (p) {
+ next = PKTLINK(p);
if (fn == NULL || (*fn)(p, arg)) {
bool head = (p == q->head);
if (head)
- q->head = PKTLINK(p);
+ q->head = next;
else
- PKTSETLINK(prev, PKTLINK(p));
+ PKTSETLINK(prev, next);
PKTSETLINK(p, NULL);
PKTFREE(osh, p, dir);
q->len--;
pq->len--;
- p = (head ? q->head : PKTLINK(prev));
} else {
prev = p;
- p = PKTLINK(p);
}
+ p = next;
}
+ q->tail = prev;
+
if (q->head == NULL) {
ASSERT(q->len == 0);
- q->tail = NULL;
+ ASSERT(q->tail == NULL);
}
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return;
}
bool BCMFASTPATH
pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
{
+ bool ret = FALSE;
struct pktq_prec *q;
- void *p;
+ void *p = NULL;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return FALSE;
ASSERT(prec >= 0 && prec < pq->num_prec);
/* Should this just assert pktbuf? */
if (!pktbuf)
- return FALSE;
+ goto done;
q = &pq->q[prec];
for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
;
if (p == NULL)
- return FALSE;
+ goto done;
PKTSETLINK(p, PKTLINK(pktbuf));
if (q->tail == pktbuf)
q->len--;
pq->len--;
PKTSETLINK(pktbuf, NULL);
- return TRUE;
+ ret = TRUE;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ return ret;
}
-void
+bool
pktq_init(struct pktq *pq, int num_prec, int max_len)
{
int prec;
+ if (HND_PKTQ_MUTEX_CREATE("pktq", &pq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
/* pq is variable size; only zero out what's requested */
for (prec = 0; prec < num_prec; prec++)
pq->q[prec].max = pq->max;
+
+ return TRUE;
+}
+
+bool
+pktq_deinit(struct pktq *pq)
+{
+ if (HND_PKTQ_MUTEX_DELETE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ return TRUE;
}
void
{
ASSERT(prec >= 0 && prec < pq->num_prec);
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
if (prec < pq->num_prec)
pq->q[prec].max = (uint16)max_len;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return;
}
void * BCMFASTPATH
pktq_deq(struct pktq *pq, int *prec_out)
{
struct pktq_prec *q;
- void *p;
+ void *p = NULL;
int prec;
- if (pq->len == 0)
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
+ if (pq->len == 0)
+ goto done;
+
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
pq->hi_prec--;
q = &pq->q[prec];
if ((p = q->head) == NULL)
- return NULL;
+ goto done;
if ((q->head = PKTLINK(p)) == NULL)
q->tail = NULL;
PKTSETLINK(p, NULL);
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
return p;
}
pktq_deq_tail(struct pktq *pq, int *prec_out)
{
struct pktq_prec *q;
- void *p, *prev;
+ void *p = NULL, *prev;
int prec;
- if (pq->len == 0)
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
+ if (pq->len == 0)
+ goto done;
+
for (prec = 0; prec < pq->hi_prec; prec++)
if (pq->q[prec].head)
break;
q = &pq->q[prec];
if ((p = q->head) == NULL)
- return NULL;
+ goto done;
for (prev = NULL; p != q->tail; p = PKTLINK(p))
prev = p;
PKTSETLINK(p, NULL);
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
return p;
}
pktq_peek(struct pktq *pq, int *prec_out)
{
int prec;
+ void *p = NULL;
- if (pq->len == 0)
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
+ if (pq->len == 0)
+ goto done;
+
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
pq->hi_prec--;
if (prec_out)
*prec_out = prec;
- return (pq->q[prec].head);
+ p = pq->q[prec].head;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
}
void *
pktq_peek_tail(struct pktq *pq, int *prec_out)
{
int prec;
+ void *p = NULL;
- if (pq->len == 0)
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
+ if (pq->len == 0)
+ goto done;
+
for (prec = 0; prec < pq->hi_prec; prec++)
if (pq->q[prec].head)
break;
if (prec_out)
*prec_out = prec;
- return (pq->q[prec].tail);
+ p = pq->q[prec].tail;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
}
void
{
int prec;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return;
+
/* Optimize flush, if pktq len = 0, just return.
* pktq len of 0 means pktq's prec q's are all empty.
*/
- if (pq->len == 0) {
- return;
- }
+ if (pq->len == 0)
+ goto done;
for (prec = 0; prec < pq->num_prec; prec++)
pktq_pflush(osh, pq, prec, dir, fn, arg);
if (fn == NULL)
ASSERT(pq->len == 0);
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return;
}
/* Return sum of lengths of a specific set of precedences */
{
int prec, len;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return 0;
+
len = 0;
for (prec = 0; prec <= pq->hi_prec; prec++)
if (prec_bmp & (1 << prec))
len += pq->q[prec].len;
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return 0;
+
return len;
}
pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out)
{
struct pktq_prec *q;
- void *p;
+ void *p = NULL;
int prec;
- if (pq->len == 0)
- {
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
- }
+
+ if (pq->len == 0)
+ goto done;
+
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
pq->hi_prec--;
while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
if (prec-- == 0)
- return NULL;
+ goto done;
q = &pq->q[prec];
if ((p = q->head) == NULL)
- return NULL;
+ goto done;
if (prec_out)
*prec_out = prec;
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
return p;
}
/* Priority dequeue from a specific set of precedences */
pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
{
struct pktq_prec *q;
- void *p;
+ void *p = NULL;
int prec;
- if (pq->len == 0)
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
+ if (pq->len == 0)
+ goto done;
+
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
pq->hi_prec--;
while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0))
if (prec-- == 0)
- return NULL;
+ goto done;
q = &pq->q[prec];
if ((p = q->head) == NULL)
- return NULL;
+ goto done;
if ((q->head = PKTLINK(p)) == NULL)
q->tail = NULL;
PKTSETLINK(p, NULL);
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
return p;
}
+
+#ifdef HND_PKTQ_THREAD_SAFE
+int
+pktq_pavail(struct pktq *pq, int prec)
+{
+ int ret;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return 0;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ ret = pq->q[prec].max - pq->q[prec].len;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return 0;
+
+ return ret;
+}
+
+bool
+pktq_pfull(struct pktq *pq, int prec)
+{
+ bool ret;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ ret = pq->q[prec].len >= pq->q[prec].max;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ return ret;
+}
+
+int
+pktq_avail(struct pktq *pq)
+{
+ int ret;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return 0;
+
+ ret = pq->max - pq->len;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return 0;
+
+ return ret;
+}
+
+bool
+pktq_full(struct pktq *pq)
+{
+ bool ret;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ ret = pq->len >= pq->max;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
+ return ret;
+}
+#endif /* HND_PKTQ_THREAD_SAFE */
* Misc utility routines for accessing PMU corerev specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: hndpmu.c 475037 2014-05-02 23:55:49Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hndpmu.c 530092 2015-01-29 04:44:58Z $
*/
void
si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
{
- chipcregs_t *cc = NULL;
- uint origidx, intr_val = 0;
sdiod_drive_str_t *str_tab = NULL;
uint32 str_mask = 0; /* only alter desired bits in PMU chipcontrol 1 register */
uint32 str_shift = 0;
uint32 str_ovr_pmuctl = PMU_CHIPCTL0; /* PMU chipcontrol register containing override bit */
uint32 str_ovr_pmuval = 0; /* position of bit within this register */
+ pmuregs_t *pmu;
+ uint origidx;
if (!(sih->cccaps & CC_CAP_PMU)) {
return;
}
- /* Remember original core before switch to chipc */
- if (CHIPID(sih->chip) == BCM43362_CHIP_ID)
- cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+ /* Remember original core before switch to chipc/pmu */
+ origidx = si_coreidx(sih);
+ if (AOB_ENAB(sih)) {
+ pmu = si_setcore(sih, PMU_CORE_ID, 0);
+ } else {
+ pmu = si_setcoreidx(sih, SI_CC_IDX);
+ }
+ ASSERT(pmu != NULL);
- switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
+ switch (SDIOD_DRVSTR_KEY(CHIPID(sih->chip), sih->pmurev)) {
case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab1;
str_mask = 0x30000000;
break;
default:
PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
- bcm_chipname(sih->chip, chn, 8), sih->chiprev, sih->pmurev));
+ bcm_chipname(
+ CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), sih->pmurev));
break;
}
- if (CHIPID(sih->chip) == BCM43362_CHIP_ID) {
- if (str_tab != NULL && cc != NULL) {
- uint32 cc_data_temp;
- int i;
-
- /* Pick the lowest available drive strength equal or greater than the
- * requested strength. Drive strength of 0 requests tri-state.
- */
- for (i = 0; drivestrength < str_tab[i].strength; i++)
- ;
-
- if (i > 0 && drivestrength > str_tab[i].strength)
- i--;
-
- W_REG(osh, &cc->chipcontrol_addr, PMU_CHIPCTL1);
- cc_data_temp = R_REG(osh, &cc->chipcontrol_data);
- cc_data_temp &= ~str_mask;
- cc_data_temp |= str_tab[i].sel << str_shift;
- W_REG(osh, &cc->chipcontrol_data, cc_data_temp);
- if (str_ovr_pmuval) { /* enables the selected drive strength */
- W_REG(osh, &cc->chipcontrol_addr, str_ovr_pmuctl);
- OR_REG(osh, &cc->chipcontrol_data, str_ovr_pmuval);
- }
- PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
- drivestrength, str_tab[i].strength));
- }
- /* Return to original core */
- si_restore_core(sih, origidx, intr_val);
- }
- else if (str_tab != NULL) {
+ if (str_tab != NULL) {
uint32 cc_data_temp;
int i;
if (i > 0 && drivestrength > str_tab[i].strength)
i--;
- W_REG(osh, PMUREG(sih, chipcontrol_addr), PMU_CHIPCTL1);
- cc_data_temp = R_REG(osh, PMUREG(sih, chipcontrol_data));
+ W_REG(osh, &pmu->chipcontrol_addr, PMU_CHIPCTL1);
+ cc_data_temp = R_REG(osh, &pmu->chipcontrol_data);
cc_data_temp &= ~str_mask;
cc_data_temp |= str_tab[i].sel << str_shift;
- W_REG(osh, PMUREG(sih, chipcontrol_data), cc_data_temp);
+ W_REG(osh, &pmu->chipcontrol_data, cc_data_temp);
if (str_ovr_pmuval) { /* enables the selected drive strength */
- W_REG(osh, PMUREG(sih, chipcontrol_addr), str_ovr_pmuctl);
- OR_REG(osh, PMUREG(sih, chipcontrol_data), str_ovr_pmuval);
+ W_REG(osh, &pmu->chipcontrol_addr, str_ovr_pmuctl);
+ OR_REG(osh, &pmu->chipcontrol_data, str_ovr_pmuval);
}
PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
drivestrength, str_tab[i].strength));
}
+
+ /* Return to original core */
+ si_setcoreidx(sih, origidx);
} /* si_sdiod_drive_strength_init */
/*
* Broadcom AMBA Interconnect definitions.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: aidmp.h 456346 2014-02-18 16:48:52Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: aidmp.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _AIDMP_H
#define ER_ADD 4
#define ER_END 0xe
#define ER_BAD 0xffffffff
+#define ER_SZ_MAX 4096 /* 4KB */
/* EROM CompIdentA */
#define CIA_MFG_MASK 0xfff00000
#define AI_RESETREADID 0x808
#define AI_RESETWRITEID 0x80c
-#define AI_ERRLOGCTRL 0xa00
-#define AI_ERRLOGDONE 0xa04
-#define AI_ERRLOGSTATUS 0xa08
-#define AI_ERRLOGADDRLO 0xa0c
-#define AI_ERRLOGADDRHI 0xa10
-#define AI_ERRLOGID 0xa14
-#define AI_ERRLOGUSER 0xa18
-#define AI_ERRLOGFLAGS 0xa1c
+#define AI_ERRLOGCTRL 0x900
+#define AI_ERRLOGDONE 0x904
+#define AI_ERRLOGSTATUS 0x908
+#define AI_ERRLOGADDRLO 0x90c
+#define AI_ERRLOGADDRHI 0x910
+#define AI_ERRLOGID 0x914
+#define AI_ERRLOGUSER 0x918
+#define AI_ERRLOGFLAGS 0x91c
#define AI_INTSTATUS 0xa00
#define AI_CONFIG 0xe00
#define AI_ITCR 0xf00
/* resetctrl */
#define AIRC_RESET 1
+/* errlogctrl */
+#define AIELC_TO_EXP_MASK 0x0000001f0 /* backplane timeout exponent */
+#define AIELC_TO_EXP_SHIFT 4
+#define AIELC_TO_ENAB_SHIFT 9 /* backplane timeout enable */
+
+/* errlogdone */
+#define AIELD_ERRDONE_MASK 0x3
+
+/* errlogstatus */
+#define AIELS_TIMEOUT_MASK 0x3
+
/* config */
#define AICFG_OOB 0x00000020
#define AICFG_IOS 0x00000010
#define AI_OOBSEL_5_SHIFT 8
#define AI_OOBSEL_6_SHIFT 16
#define AI_OOBSEL_7_SHIFT 24
+#define AI_IOCTRL_ENABLE_D11_PME (1 << 14)
#endif /* _AIDMP_H */
/*
* BCM common config options
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcm_cfg.h 351867 2012-08-21 18:46:16Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcm_cfg.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _bcm_cfg_h_
* and instrumentation on top of the heap, without modifying the heap
* allocation implementation.
*
- * $Copyright Open Broadcom Corporation$
- *
- * $Id: bcm_mpool_pub.h 407097 2013-06-11 18:43:16Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcm_mpool_pub.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _BCM_MPOOL_PUB_H
--- /dev/null
+#ifndef __bcm_ring_included__
+#define __bcm_ring_included__
+
+/*
+ * +----------------------------------------------------------------------------
+ *
+ * bcm_ring.h : Ring context abstraction
+ *
+ * The ring context tracks the WRITE and READ indices where elements may be
+ * produced and consumed respectively. All elements in the ring need to be
+ * fixed size.
+ *
+ * NOTE: A ring of size N, may only hold N-1 elements.
+ *
+ * +----------------------------------------------------------------------------
+ *
+ * API Notes:
+ *
+ * Ring manipulation API allows for:
+ * Pending operations: Often before some work can be completed, it may be
+ * desired that several resources are available, e.g. space for production in
+ * a ring. Approaches such as, #1) reserve resources one by one and return them
+ * if another required resource is not available, or #2) employ a two pass
+ * algorithm of first testing whether all resources are available, have a
+ * an impact on performance critical code. The approach taken here is more akin
+ * to approach #2, where a test for resource availability essentially also
+ * provides the index for production in an un-committed state.
+ * The same approach is taken for the consumer side.
+ *
+ * - Pending production: Fetch the next index where a ring element may be
+ * produced. The caller may not commit the WRITE of the element.
+ * - Pending consumption: Fetch the next index where a ring element may be
+ * consumed. The caller may not commut the READ of the element.
+ *
+ * Producer side API:
+ * - bcm_ring_is_full : Test whether ring is full
+ * - bcm_ring_prod : Fetch index where an element may be produced (commit)
+ * - bcm_ring_prod_pend: Fetch index where an element may be produced (pending)
+ * - bcm_ring_prod_done: Commit a previous pending produce fetch
+ * - bcm_ring_prod_avail: Fetch total number free slots eligible for production
+ *
+ * Consumer side API:
+ * - bcm_ring_is_empty : Test whether ring is empty
+ * - bcm_ring_cons : Fetch index where an element may be consumed (commit)
+ * - bcm_ring_cons_pend: Fetch index where an element may be consumed (pending)
+ * - bcm_ring_cons_done: Commit a previous pending consume fetch
+ * - bcm_ring_cons_avail: Fetch total number elements eligible for consumption
+ *
+ * - bcm_ring_sync_read: Sync read offset in peer ring, from local ring
+ * - bcm_ring_sync_write: Sync write offset in peer ring, from local ring
+ *
+ * +----------------------------------------------------------------------------
+ *
+ * Design Notes:
+ * Following items are not tracked in a ring context (design decision)
+ * - width of a ring element.
+ * - depth of the ring.
+ * - base of the buffer, where the elements are stored.
+ * - count of number of free slots in the ring
+ *
+ * Implementation Notes:
+ * - When BCM_RING_DEBUG is enabled, need explicit bcm_ring_init().
+ * - BCM_RING_EMPTY and BCM_RING_FULL are (-1)
+ *
+ * +----------------------------------------------------------------------------
+ *
+ * Usage Notes:
+ * An application may incarnate a ring of some fixed sized elements, by defining
+ * - a ring data buffer to store the ring elements.
+ * - depth of the ring (max number of elements managed by ring context).
+ * Preferrably, depth may be represented as a constant.
+ * - width of a ring element: to be used in pointer arithmetic with the ring's
+ * data buffer base and an index to fetch the ring element.
+ *
+ * Use bcm_workq_t to instantiate a pair of workq constructs, one for the
+ * producer and the other for the consumer, both pointing to the same circular
+ * buffer. The producer may operate on it's own local workq and flush the write
+ * index to the consumer. Likewise the consumer may use its local workq and
+ * flush the read index to the producer. This way we do not repeatedly access
+ * the peer's context. The two peers may reside on different CPU cores with a
+ * private L1 data cache.
+ * +----------------------------------------------------------------------------
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcm_ring.h 591283 2015-10-07 11:52:00Z $
+ *
+ * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*-
+ * vim: set ts=4 noet sw=4 tw=80:
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+#ifdef ____cacheline_aligned
+#define __ring_aligned ____cacheline_aligned
+#else
+#define __ring_aligned
+#endif
+
+/* Conditional compile for debug */
+/* #define BCM_RING_DEBUG */
+
+#define BCM_RING_EMPTY (-1)
+#define BCM_RING_FULL (-1)
+#define BCM_RING_NULL ((bcm_ring_t *)NULL)
+
+#if defined(BCM_RING_DEBUG)
+#define RING_ASSERT(exp) ASSERT(exp)
+#define BCM_RING_IS_VALID(ring) (((ring) != BCM_RING_NULL) && \
+ ((ring)->self == (ring)))
+#else /* ! BCM_RING_DEBUG */
+#define RING_ASSERT(exp) do {} while (0)
+#define BCM_RING_IS_VALID(ring) ((ring) != BCM_RING_NULL)
+#endif /* ! BCM_RING_DEBUG */
+
+#define BCM_RING_SIZE_IS_VALID(ring_size) ((ring_size) > 0)
+
+/*
+ * +----------------------------------------------------------------------------
+ * Ring Context
+ * +----------------------------------------------------------------------------
+ */
+typedef struct bcm_ring { /* Ring context */
+#if defined(BCM_RING_DEBUG)
+ struct bcm_ring *self; /* ptr to self for IS VALID test */
+#endif /* BCM_RING_DEBUG */
+ int write __ring_aligned; /* WRITE index in a circular ring */
+ int read __ring_aligned; /* READ index in a circular ring */
+} bcm_ring_t;
+
+
+static INLINE void bcm_ring_init(bcm_ring_t *ring);
+static INLINE void bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from);
+static INLINE bool bcm_ring_is_empty(bcm_ring_t *ring);
+
+static INLINE int __bcm_ring_next_write(bcm_ring_t *ring, const int ring_size);
+
+static INLINE bool __bcm_ring_full(bcm_ring_t *ring, int next_write);
+static INLINE bool bcm_ring_is_full(bcm_ring_t *ring, const int ring_size);
+
+static INLINE void bcm_ring_prod_done(bcm_ring_t *ring, int write);
+static INLINE int bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write,
+ const int ring_size);
+static INLINE int bcm_ring_prod(bcm_ring_t *ring, const int ring_size);
+
+static INLINE void bcm_ring_cons_done(bcm_ring_t *ring, int read);
+static INLINE int bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read,
+ const int ring_size);
+static INLINE int bcm_ring_cons(bcm_ring_t *ring, const int ring_size);
+
+static INLINE void bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self);
+static INLINE void bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self);
+
+static INLINE int bcm_ring_prod_avail(const bcm_ring_t *ring,
+ const int ring_size);
+static INLINE int bcm_ring_cons_avail(const bcm_ring_t *ring,
+ const int ring_size);
+static INLINE void bcm_ring_cons_all(bcm_ring_t *ring);
+
+
+/**
+ * bcm_ring_init - initialize a ring context.
+ * @ring: pointer to a ring context
+ */
+static INLINE void
+bcm_ring_init(bcm_ring_t *ring)
+{
+ ASSERT(ring != (bcm_ring_t *)NULL);
+#if defined(BCM_RING_DEBUG)
+ ring->self = ring;
+#endif /* BCM_RING_DEBUG */
+ ring->write = 0;
+ ring->read = 0;
+}
+
+/**
+ * bcm_ring_copy - copy construct a ring
+ * @to: pointer to the new ring context
+ * @from: pointer to orig ring context
+ */
+static INLINE void
+bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from)
+{
+ bcm_ring_init(to);
+
+ to->write = from->write;
+ to->read = from->read;
+}
+
+/**
+ * bcm_ring_is_empty - "Boolean" test whether ring is empty.
+ * @ring: pointer to a ring context
+ *
+ * PS. does not return BCM_RING_EMPTY value.
+ */
+static INLINE bool
+bcm_ring_is_empty(bcm_ring_t *ring)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(ring));
+ return (ring->read == ring->write);
+}
+
+
+/**
+ * __bcm_ring_next_write - determine the index where the next write may occur
+ * (with wrap-around).
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ *
+ * PRIVATE INTERNAL USE ONLY.
+ */
+static INLINE int
+__bcm_ring_next_write(bcm_ring_t *ring, const int ring_size)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ return ((ring->write + 1) % ring_size);
+}
+
+
+/**
+ * __bcm_ring_full - support function for ring full test.
+ * @ring: pointer to a ring context
+ * @next_write: next location in ring where an element is to be produced
+ *
+ * PRIVATE INTERNAL USE ONLY.
+ */
+static INLINE bool
+__bcm_ring_full(bcm_ring_t *ring, int next_write)
+{
+ return (next_write == ring->read);
+}
+
+
+/**
+ * bcm_ring_is_full - "Boolean" test whether a ring is full.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ *
+ * PS. does not return BCM_RING_FULL value.
+ */
+static INLINE bool
+bcm_ring_is_full(bcm_ring_t *ring, const int ring_size)
+{
+ int next_write;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ next_write = __bcm_ring_next_write(ring, ring_size);
+ return __bcm_ring_full(ring, next_write);
+}
+
+
+/**
+ * bcm_ring_prod_done - commit a previously pending index where production
+ * was requested.
+ * @ring: pointer to a ring context
+ * @write: index into ring upto where production was done.
+ * +----------------------------------------------------------------------------
+ */
+static INLINE void
+bcm_ring_prod_done(bcm_ring_t *ring, int write)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(ring));
+ ring->write = write;
+}
+
+
+/**
+ * bcm_ring_prod_pend - Fetch in "pend" mode, the index where an element may be
+ * produced.
+ * @ring: pointer to a ring context
+ * @pend_write: next index, after the returned index
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_prod_pend(bcm_ring_t *ring, int *pend_write, const int ring_size)
+{
+ int rtn;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ *pend_write = __bcm_ring_next_write(ring, ring_size);
+ if (__bcm_ring_full(ring, *pend_write)) {
+ *pend_write = BCM_RING_FULL;
+ rtn = BCM_RING_FULL;
+ } else {
+ /* production is not committed, caller needs to explicitly commit */
+ rtn = ring->write;
+ }
+ return rtn;
+}
+
+
+/**
+ * bcm_ring_prod - Fetch and "commit" the next index where a ring element may
+ * be produced.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_prod(bcm_ring_t *ring, const int ring_size)
+{
+ int next_write, prod_write;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+
+ next_write = __bcm_ring_next_write(ring, ring_size);
+ if (__bcm_ring_full(ring, next_write)) {
+ prod_write = BCM_RING_FULL;
+ } else {
+ prod_write = ring->write;
+ bcm_ring_prod_done(ring, next_write); /* "commit" production */
+ }
+ return prod_write;
+}
+
+
+/**
+ * bcm_ring_cons_done - commit a previously pending read
+ * @ring: pointer to a ring context
+ * @read: index upto which elements have been consumed.
+ */
+static INLINE void
+bcm_ring_cons_done(bcm_ring_t *ring, int read)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(ring));
+ ring->read = read;
+}
+
+
+/**
+ * bcm_ring_cons_pend - fetch in "pend" mode, the next index where a ring
+ * element may be consumed.
+ * @ring: pointer to a ring context
+ * @pend_read: index into ring upto which elements may be consumed.
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_cons_pend(bcm_ring_t *ring, int *pend_read, const int ring_size)
+{
+ int rtn;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ if (bcm_ring_is_empty(ring)) {
+ *pend_read = BCM_RING_EMPTY;
+ rtn = BCM_RING_EMPTY;
+ } else {
+ *pend_read = (ring->read + 1) % ring_size;
+ /* production is not committed, caller needs to explicitly commit */
+ rtn = ring->read;
+ }
+ return rtn;
+}
+
+
+/**
+ * bcm_ring_cons - fetch and "commit" the next index where a ring element may
+ * be consumed.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_cons(bcm_ring_t *ring, const int ring_size)
+{
+ int cons_read;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ if (bcm_ring_is_empty(ring)) {
+ cons_read = BCM_RING_EMPTY;
+ } else {
+ cons_read = ring->read;
+ ring->read = (ring->read + 1) % ring_size; /* read is committed */
+ }
+ return cons_read;
+}
+
+
+/**
+ * bcm_ring_sync_read - on consumption, update peer's read index.
+ * @peer: pointer to peer's producer ring context
+ * @self: pointer to consumer's ring context
+ */
+static INLINE void
+bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(peer));
+ RING_ASSERT(BCM_RING_IS_VALID(self));
+ peer->read = self->read; /* flush read update to peer producer */
+}
+
+
+/**
+ * bcm_ring_sync_write - on consumption, update peer's write index.
+ * @peer: pointer to peer's consumer ring context
+ * @self: pointer to producer's ring context
+ */
+static INLINE void
+bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self)
+{
+ RING_ASSERT(BCM_RING_IS_VALID(peer));
+ RING_ASSERT(BCM_RING_IS_VALID(self));
+ peer->write = self->write; /* flush write update to peer consumer */
+}
+
+
+/**
+ * bcm_ring_prod_avail - fetch total number of available empty slots in the
+ * ring for production.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_prod_avail(const bcm_ring_t *ring, const int ring_size)
+{
+ int prod_avail;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ if (ring->write >= ring->read) {
+ prod_avail = (ring_size - (ring->write - ring->read) - 1);
+ } else {
+ prod_avail = (ring->read - (ring->write + 1));
+ }
+ ASSERT(prod_avail < ring_size);
+ return prod_avail;
+}
+
+
+/**
+ * bcm_ring_cons_avail - fetch total number of available elements for consumption.
+ * @ring: pointer to a ring context
+ * @ring_size: size of the ring
+ */
+static INLINE int
+bcm_ring_cons_avail(const bcm_ring_t *ring, const int ring_size)
+{
+ int cons_avail;
+ RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
+ if (ring->read == ring->write) {
+ cons_avail = 0;
+ } else if (ring->read > ring->write) {
+ cons_avail = ((ring_size - ring->read) + ring->write);
+ } else {
+ cons_avail = ring->write - ring->read;
+ }
+ ASSERT(cons_avail < ring_size);
+ return cons_avail;
+}
+
+
+/**
+ * bcm_ring_cons_all - set ring in state where all elements are consumed.
+ * @ring: pointer to a ring context
+ */
+static INLINE void
+bcm_ring_cons_all(bcm_ring_t *ring)
+{
+ ring->read = ring->write;
+}
+
+
+/**
+ * Work Queue
+ * A work Queue is composed of a ring of work items, of a specified depth.
+ * It HAS-A bcm_ring object, comprising of a RD and WR offset, to implement a
+ * producer/consumer circular ring.
+ */
+
+struct bcm_workq {
+ bcm_ring_t ring; /* Ring context abstraction */
+ struct bcm_workq *peer; /* Peer workq context */
+ void *buffer; /* Buffer storage for work items in workQ */
+ int ring_size; /* Depth of workQ */
+} __ring_aligned;
+
+typedef struct bcm_workq bcm_workq_t;
+
+
+/* #define BCM_WORKQ_DEBUG */
+#if defined(BCM_WORKQ_DEBUG)
+#define WORKQ_ASSERT(exp) ASSERT(exp)
+#else /* ! BCM_WORKQ_DEBUG */
+#define WORKQ_ASSERT(exp) do {} while (0)
+#endif /* ! BCM_WORKQ_DEBUG */
+
+#define WORKQ_AUDIT(workq) \
+ WORKQ_ASSERT((workq) != BCM_WORKQ_NULL); \
+ WORKQ_ASSERT(WORKQ_PEER(workq) != BCM_WORKQ_NULL); \
+ WORKQ_ASSERT((workq)->buffer == WORKQ_PEER(workq)->buffer); \
+ WORKQ_ASSERT((workq)->ring_size == WORKQ_PEER(workq)->ring_size);
+
+#define BCM_WORKQ_NULL ((bcm_workq_t *)NULL)
+
+#define WORKQ_PEER(workq) ((workq)->peer)
+#define WORKQ_RING(workq) (&((workq)->ring))
+#define WORKQ_PEER_RING(workq) (&((workq)->peer->ring))
+
+#define WORKQ_ELEMENT(__elem_type, __workq, __index) ({ \
+ WORKQ_ASSERT((__workq) != BCM_WORKQ_NULL); \
+ WORKQ_ASSERT((__index) < ((__workq)->ring_size)); \
+ ((__elem_type *)((__workq)->buffer)) + (__index); \
+})
+
+
+static INLINE void bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer,
+ void *buffer, int ring_size);
+
+static INLINE bool bcm_workq_is_empty(bcm_workq_t *workq_prod);
+
+static INLINE void bcm_workq_prod_sync(bcm_workq_t *workq_prod);
+static INLINE void bcm_workq_cons_sync(bcm_workq_t *workq_cons);
+
+static INLINE void bcm_workq_prod_refresh(bcm_workq_t *workq_prod);
+static INLINE void bcm_workq_cons_refresh(bcm_workq_t *workq_cons);
+
+/**
+ * bcm_workq_init - initialize a workq
+ * @workq: pointer to a workq context
+ * @buffer: pointer to a pre-allocated circular buffer to serve as a ring
+ * @ring_size: size of the ring in terms of max number of elements.
+ */
+static INLINE void
+bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer,
+ void *buffer, int ring_size)
+{
+ ASSERT(workq != BCM_WORKQ_NULL);
+ ASSERT(workq_peer != BCM_WORKQ_NULL);
+ ASSERT(buffer != NULL);
+ ASSERT(ring_size > 0);
+
+ WORKQ_PEER(workq) = workq_peer;
+ WORKQ_PEER(workq_peer) = workq;
+
+ bcm_ring_init(WORKQ_RING(workq));
+ bcm_ring_init(WORKQ_RING(workq_peer));
+
+ workq->buffer = workq_peer->buffer = buffer;
+ workq->ring_size = workq_peer->ring_size = ring_size;
+}
+
+/**
+ * bcm_workq_empty - test whether there is work
+ * @workq_prod: producer's workq
+ */
+static INLINE bool
+bcm_workq_is_empty(bcm_workq_t *workq_prod)
+{
+ return bcm_ring_is_empty(WORKQ_RING(workq_prod));
+}
+
+/**
+ * bcm_workq_prod_sync - Commit the producer write index to peer workq's ring
+ * @workq_prod: producer's workq whose write index must be synced to peer
+ */
+static INLINE void
+bcm_workq_prod_sync(bcm_workq_t *workq_prod)
+{
+ WORKQ_AUDIT(workq_prod);
+
+ /* cons::write <--- prod::write */
+ bcm_ring_sync_write(WORKQ_PEER_RING(workq_prod), WORKQ_RING(workq_prod));
+}
+
+/**
+ * bcm_workq_cons_sync - Commit the consumer read index to the peer workq's ring
+ * @workq_cons: consumer's workq whose read index must be synced to peer
+ */
+static INLINE void
+bcm_workq_cons_sync(bcm_workq_t *workq_cons)
+{
+ WORKQ_AUDIT(workq_cons);
+
+ /* prod::read <--- cons::read */
+ bcm_ring_sync_read(WORKQ_PEER_RING(workq_cons), WORKQ_RING(workq_cons));
+}
+
+
+/**
+ * bcm_workq_prod_refresh - Fetch the updated consumer's read index
+ * @workq_prod: producer's workq whose read index must be refreshed from peer
+ */
+static INLINE void
+bcm_workq_prod_refresh(bcm_workq_t *workq_prod)
+{
+ WORKQ_AUDIT(workq_prod);
+
+ /* prod::read <--- cons::read */
+ bcm_ring_sync_read(WORKQ_RING(workq_prod), WORKQ_PEER_RING(workq_prod));
+}
+
+/**
+ * bcm_workq_cons_refresh - Fetch the updated producer's write index
+ * @workq_cons: consumer's workq whose write index must be refreshed from peer
+ */
+static INLINE void
+bcm_workq_cons_refresh(bcm_workq_t *workq_cons)
+{
+ WORKQ_AUDIT(workq_cons);
+
+ /* cons::write <--- prod::write */
+ bcm_ring_sync_write(WORKQ_RING(workq_cons), WORKQ_PEER_RING(workq_cons));
+}
+
+
+#endif /* ! __bcm_ring_h_included__ */
*
* Definitions subject to change without notice.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmcdc.h 318308 2012-03-02 02:23:42Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmcdc.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _bcmcdc_h_
#define _bcmcdc_h_
/*
* Misc system wide definitions
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmdefs.h 474209 2014-04-30 12:16:47Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmdefs.h 601026 2015-11-20 06:53:19Z $
*/
#ifndef _bcmdefs_h_
#undef BCM47XX_CA9
#ifndef BCMFASTPATH
-#if defined(BCM47XX_CA9)
-#define BCMFASTPATH __attribute__ ((__section__ (".text.fastpath")))
-#define BCMFASTPATH_HOST __attribute__ ((__section__ (".text.fastpath_host")))
-#else
#define BCMFASTPATH
#define BCMFASTPATH_HOST
-#endif
#endif /* BCMFASTPATH */
#define CHIPREV(rev) (rev)
#endif
+#ifdef BCMPCIEREV
+#define PCIECOREREV(rev) (BCMPCIEREV)
+#else
+#define PCIECOREREV(rev) (rev)
+#endif
+
/* Defines for DMA Address Width - Shared between OSL and HNDDMA */
#define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */
#define DMADDR_MASK_30 0xc0000000 /* Address mask for 30-bits */
#define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val)
#define PHYSADDRLO(_pa) PHYSADDR64LO(_pa)
#define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val)
+#define PHYSADDRTOULONG(_pa, _ulong) \
+ do { \
+ _ulong = ((unsigned long)(_pa).hiaddr << 32) | ((_pa).loaddr); \
+ } while (0)
#else
typedef unsigned long dmaaddr_t;
/* add 40 bytes to allow for extra RPC header and info */
#define BCMEXTRAHDROOM 260
#else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
-#if defined(BCM47XX_CA9)
-#define BCMEXTRAHDROOM 224
-#else
#define BCMEXTRAHDROOM 204
-#endif /* linux && BCM47XX_CA9 */
#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
/* Packet alignment for most efficient SDIO (can change based on platform) */
/* Max. nvram variable table size */
#ifndef MAXSZ_NVRAM_VARS
-#define MAXSZ_NVRAM_VARS 4096
-#endif
+#ifdef LARGE_NVRAM_MAXSZ
+#define MAXSZ_NVRAM_VARS LARGE_NVRAM_MAXSZ
+#else
+/* SROM12 changes */
+#define MAXSZ_NVRAM_VARS 6144
+#endif /* LARGE_NVRAM_MAXSZ */
+#endif /* !MAXSZ_NVRAM_VARS */
#else
#define BCMLFRAG_ENAB() (0)
#endif /* BCMLFRAG_ENAB */
+#define RXMODE1 1 /* descriptor split */
+#define RXMODE2 2 /* descriptor split + classification */
+#define RXMODE3 3 /* fifo split + classification */
+#define RXMODE4 4 /* fifo split + classification + hdr conversion */
+
#ifdef BCMSPLITRX /* BCMLFRAG support enab macros */
extern bool _bcmsplitrx;
+ extern uint8 _bcmsplitrx_mode;
#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
#define BCMSPLITRX_ENAB() (_bcmsplitrx)
+ #define BCMSPLITRX_MODE() (_bcmsplitrx_mode)
#elif defined(BCMSPLITRX_DISABLED)
#define BCMSPLITRX_ENAB() (0)
+ #define BCMSPLITRX_MODE() (0)
#else
#define BCMSPLITRX_ENAB() (1)
+ #define BCMSPLITRX_MODE() (_bcmsplitrx_mode)
#endif
#else
#define BCMSPLITRX_ENAB() (0)
+ #define BCMSPLITRX_MODE() (0)
#endif /* BCMSPLITRX */
+
+#ifdef BCMPCIEDEV /* BCMPCIEDEV support enab macros */
+extern bool _pciedevenab;
+ #if defined(WL_ENAB_RUNTIME_CHECK)
+ #define BCMPCIEDEV_ENAB() (_pciedevenab)
+ #elif defined(BCMPCIEDEV_ENABLED)
+ #define BCMPCIEDEV_ENAB() 1
+ #else
+ #define BCMPCIEDEV_ENAB() 0
+ #endif
+#else
+ #define BCMPCIEDEV_ENAB() 0
+#endif /* BCMPCIEDEV */
+
+#define SPLIT_RXMODE1() ((BCMSPLITRX_MODE() == RXMODE1))
+#define SPLIT_RXMODE2() ((BCMSPLITRX_MODE() == RXMODE2))
+#define SPLIT_RXMODE3() ((BCMSPLITRX_MODE() == RXMODE3))
+#define SPLIT_RXMODE4() ((BCMSPLITRX_MODE() == RXMODE4))
+
+#define PKT_CLASSIFY() (SPLIT_RXMODE2() || SPLIT_RXMODE3() || SPLIT_RXMODE4())
+#define RXFIFO_SPLIT() (SPLIT_RXMODE3() || SPLIT_RXMODE4())
+#define HDR_CONV() (SPLIT_RXMODE4())
+
+#define PKT_CLASSIFY_EN(x) ((PKT_CLASSIFY()) && (PKT_CLASSIFY_FIFO == (x)))
#ifdef BCM_SPLITBUF
extern bool _bcmsplitbuf;
#if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
#else
#define BCM_SPLITBUF_ENAB() (0)
#endif /* BCM_SPLITBUF */
+
/* Max size for reclaimable NVRAM array */
#ifdef DL_NVRAM
#define NVRAM_ARRAY_MAXSIZE DL_NVRAM
/*
* Broadcom device-specific manifest constants.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmdevs.h 484136 2014-06-12 04:36:10Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmdevs.h 582052 2015-08-26 09:30:53Z $
*/
#ifndef _BCMDEVS_H
#define BCM4349_D11AC_ID 0x4349 /* 4349 802.11ac dualband device */
#define BCM4349_D11AC2G_ID 0x43dd /* 4349 802.11ac 2.4G device */
#define BCM4349_D11AC5G_ID 0x43de /* 4349 802.11ac 5G device */
-#define BCM4355_D11AC_ID 0x43d3 /* 4355 802.11ac dualband device */
-#define BCM4355_D11AC2G_ID 0x43d4 /* 4355 802.11ac 2.4G device */
-#define BCM4355_D11AC5G_ID 0x43d5 /* 4355 802.11ac 5G device */
-#define BCM4359_D11AC_ID 0x43d6 /* 4359 802.11ac dualband device */
-#define BCM4359_D11AC2G_ID 0x43d7 /* 4359 802.11ac 2.4G device */
-#define BCM4359_D11AC5G_ID 0x43d8 /* 4359 802.11ac 5G device */
+#define BCM53573_D11AC_ID 0x43b4 /* 53573 802.11ac dualband device */
+#define BCM53573_D11AC2G_ID 0x43b5 /* 53573 802.11ac 2.4G device */
+#define BCM53573_D11AC5G_ID 0x43b6 /* 53573 802.11ac 5G device */
+#define BCM47189_D11AC_ID 0x43c6 /* 47189 802.11ac dualband device */
+#define BCM47189_D11AC2G_ID 0x43c7 /* 47189 802.11ac 2.4G device */
+#define BCM47189_D11AC5G_ID 0x43c8 /* 47189 802.11ac 5G device */
+#define BCM4355_D11AC_ID 0x43dc /* 4355 802.11ac dualband device */
+#define BCM4355_D11AC2G_ID 0x43fc /* 4355 802.11ac 2.4G device */
+#define BCM4355_D11AC5G_ID 0x43fd /* 4355 802.11ac 5G device */
+#define BCM4359_D11AC_ID 0x43ef /* 4359 802.11ac dualband device */
+#define BCM4359_D11AC2G_ID 0x43fe /* 4359 802.11ac 2.4G device */
+#define BCM4359_D11AC5G_ID 0x43ff /* 4359 802.11ac 5G device */
+#define BCM43596_D11AC_ID 0x4415 /* 43596 802.11ac dualband device */
+#define BCM43596_D11AC2G_ID 0x4416 /* 43596 802.11ac 2.4G device */
+#define BCM43596_D11AC5G_ID 0x4417 /* 43596 802.11ac 5G device */
+#define BCM43909_D11AC_ID 0x43d0 /* 43909 802.11ac dualband device */
+#define BCM43909_D11AC2G_ID 0x43d1 /* 43909 802.11ac 2.4G device */
+#define BCM43909_D11AC5G_ID 0x43d2 /* 43909 802.11ac 5G device */
/* PCI Subsystem ID */
#define BCM943228HMB_SSID_VEN1 0x0607
#define BCM43430_D11N2G_ID 0x43e2 /* 43430 802.11n 2.4G device */
+#define BCM4365_D11AC_ID 0x43ca
+#define BCM4365_D11AC2G_ID 0x43cb
+#define BCM4365_D11AC5G_ID 0x43cc
+
+#define BCM4366_D11AC_ID 0x43c3
+#define BCM4366_D11AC2G_ID 0x43c4
+#define BCM4366_D11AC5G_ID 0x43c5
+
#define BCM43349_D11N_ID 0x43e6 /* 43349 802.11n dualband id */
#define BCM43349_D11N2G_ID 0x43e7 /* 43349 802.11n 2.4Ghz band id */
#define BCM43349_D11N5G_ID 0x43e8 /* 43349 802.11n 5Ghz band id */
#define BCM4339_CHIP_ID 0x4339 /* 4339 chipcommon chipid */
#define BCM43349_CHIP_ID 43349 /* 43349(0xA955) chipcommon chipid */
#define BCM4360_CHIP_ID 0x4360 /* 4360 chipcommon chipid */
+#define BCM4364_CHIP_ID 0x4364 /* 4364 chipcommon chipid */
#define BCM4352_CHIP_ID 0x4352 /* 4352 chipcommon chipid */
#define BCM43526_CHIP_ID 0xAA06
#define BCM43340_CHIP_ID 43340 /* 43340 chipcommon chipid */
#define BCM43570_CHIP_ID 0xAA32 /* 43570 chipcommon chipid */
#define BCM4358_CHIP_ID 0x4358 /* 4358 chipcommon chipid */
#define BCM4371_CHIP_ID 0x4371 /* 4371 chipcommon chipid */
+#define BCM43012_CHIP_ID 0xA804 /* 43012 chipcommon chipid */
#define BCM4350_CHIP(chipid) ((CHIPID(chipid) == BCM4350_CHIP_ID) || \
(CHIPID(chipid) == BCM4354_CHIP_ID) || \
(CHIPID(chipid) == BCM4356_CHIP_ID) || \
(CHIPID(chipid) == BCM43570_CHIP_ID) || \
(CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */
#define BCM4345_CHIP_ID 0x4345 /* 4345 chipcommon chipid */
+#define BCM43454_CHIP_ID 43454 /* 43454 chipcommon chipid */
+#define BCM43455_CHIP_ID 43455 /* 43455 chipcommon chipid */
+#define BCM43457_CHIP_ID 43457 /* 43457 chipcommon chipid */
+#define BCM43458_CHIP_ID 43458 /* 43458 chipcommon chipid */
#define BCM43430_CHIP_ID 43430 /* 43430 chipcommon chipid */
#define BCM4349_CHIP_ID 0x4349 /* 4349 chipcommon chipid */
#define BCM4355_CHIP_ID 0x4355 /* 4355 chipcommon chipid */
#define BCM4349_CHIP(chipid) ((CHIPID(chipid) == BCM4349_CHIP_ID) || \
(CHIPID(chipid) == BCM4355_CHIP_ID) || \
(CHIPID(chipid) == BCM4359_CHIP_ID))
+
+#define BCM4345_CHIP(chipid) (CHIPID(chipid) == BCM4345_CHIP_ID || \
+ CHIPID(chipid) == BCM43454_CHIP_ID || \
+ CHIPID(chipid) == BCM43455_CHIP_ID || \
+ CHIPID(chipid) == BCM43457_CHIP_ID || \
+ CHIPID(chipid) == BCM43458_CHIP_ID)
+
+#define CASE_BCM4345_CHIP case BCM4345_CHIP_ID: /* fallthrough */ \
+ case BCM43454_CHIP_ID: /* fallthrough */ \
+ case BCM43455_CHIP_ID: /* fallthrough */ \
+ case BCM43457_CHIP_ID: /* fallthrough */ \
+ case BCM43458_CHIP_ID
+
#define BCM4349_CHIP_GRPID BCM4349_CHIP_ID: \
case BCM4355_CHIP_ID: \
case BCM4359_CHIP_ID
+#define BCM4365_CHIP_ID 0x4365 /* 4365 chipcommon chipid */
+#define BCM4366_CHIP_ID 0x4366 /* 4366 chipcommon chipid */
+
+#define BCM43909_CHIP_ID 0xab85 /* 43909 chipcommon chipid */
+
#define BCM43602_CHIP_ID 0xaa52 /* 43602 chipcommon chipid */
#define BCM43462_CHIP_ID 0xa9c6 /* 43462 chipcommon chipid */
+#define BCM43522_CHIP_ID 0xaa02 /* 43522 chipcommon chipid */
+#define BCM43602_CHIP(chipid) ((CHIPID(chipid) == BCM43602_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43462_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43522_CHIP_ID)) /* 43602 variations */
+#define CASE_BCM43602_CHIP case BCM43602_CHIP_ID: /* fallthrough */ \
+ case BCM43462_CHIP_ID: /* fallthrough */ \
+ case BCM43522_CHIP_ID
#define BCM4342_CHIP_ID 4342 /* 4342 chipcommon chipid (OTP, RBBU) */
#define BCM4402_CHIP_ID 0x4402 /* 4402 chipid */
#define BCM4704_CHIP_ID 0x4704 /* 4704 chipcommon chipid */
#define BCM4706_CHIP_ID 0x5300 /* 4706 chipcommon chipid */
#define BCM4707_CHIP_ID 53010 /* 4707 chipcommon chipid */
+#define BCM47094_CHIP_ID 53030 /* 47094 chipcommon chipid */
#define BCM53018_CHIP_ID 53018 /* 53018 chipcommon chipid */
-#define BCM4707_CHIP(chipid) (((chipid) == BCM4707_CHIP_ID) || ((chipid) == BCM53018_CHIP_ID))
+#define BCM4707_CHIP(chipid) (((chipid) == BCM4707_CHIP_ID) || \
+ ((chipid) == BCM53018_CHIP_ID) || \
+ ((chipid) == BCM47094_CHIP_ID))
#define BCM4710_CHIP_ID 0x4710 /* 4710 chipid */
#define BCM4712_CHIP_ID 0x4712 /* 4712 chipcommon chipid */
#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
#define BCM5356_CHIP_ID 0x5356 /* 5356 chipcommon chipid */
#define BCM5357_CHIP_ID 0x5357 /* 5357 chipcommon chipid */
#define BCM53572_CHIP_ID 53572 /* 53572 chipcommon chipid */
+#define BCM53573_CHIP_ID 53573 /* 53573 chipcommon chipid */
+#define BCM53573_CHIP(chipid) (CHIPID(chipid) == BCM53573_CHIP_ID)
+#define BCM53573_CHIP_GRPID BCM53573_CHIP_ID
/* Package IDs */
#define BCM4303_PKG_ID 2 /* 4303 package id */
#define BCM4331TT_PKG_ID 8 /* 4331 12x12 package id */
#define BCM4331TN_PKG_ID 9 /* 4331 12x9 package id */
#define BCM4331TNA0_PKG_ID 0xb /* 4331 12x9 package id */
+#define BCM47189_PKG_ID 1 /* 47189 package id */
+#define BCM53573_PKG_ID 0 /* 53573 package id */
#define BCM4706L_PKG_ID 1 /* 4706L package id */
#define HDLSIM5350_PKG_ID 1 /* HDL simulator package id for a 5350 */
#define BCM4335_WLBGA_PKG_ID (0x2) /* WLBGA COB/Mobile SDIO/HSIC. */
#define BCM4335_FCBGAD_PKG_ID (0x3) /* FCBGA Debug Debug/Dev All if's. */
#define BCM4335_PKG_MASK (0x3)
+#define BCM43602_12x12_PKG_ID (0x1) /* 12x12 pins package, used for e.g. router designs */
/* boardflags */
#define BFL_BTC2WIRE 0x00000001 /* old 2wire Bluetooth coexistence, OBSOLETE */
#define BFL_SROM11_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */
#define BFL_SROM11_EPA_TURNON_TIME 0x00018000 /* 2 bits for different PA turn on times */
#define BFL_SROM11_EPA_TURNON_TIME_SHIFT 15
+#define BFL_SROM11_PRECAL_TX_IDX 0x00040000 /* Dedicated TX IQLOCAL IDX values */
+ /* per subband, as derived from 43602A1 MCH5 */
#define BFL_SROM11_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */
#define BFL_SROM11_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */
#define BFL2_SROM11_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */
#define BFL2_SROM11_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */
#define BFL2_SROM11_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */
#define BFL2_SROM11_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */
+#define BFL2_SROM11_EPA_ON_DURING_TXIQLOCAL 0x00020000 /* Keep ext. PA's on in TX IQLO CAL */
/* boardflags3 */
#define BFL3_FEMCTRL_SUB 0x00000007 /* acphy, subrevs of femctrl on top of srom_femctrl */
#define BFL3_AVVMID_FROM_NVRAM_SHIFT 30 /* Read Av Vmid from NVRAM */
#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT 31 /* Enable Vlin from NVRAM */
+/* boardflags4 for SROM12 */
+#define BFL4_SROM12_4dBPAD (1 << 0) /* To distinguigh between normal and 4dB pad board */
+
/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */
#define BOARD_GPIO_BTC3W_IN 0x850 /* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */
#define BOARD_GPIO_2_WLAN_PWR 0x04 /* throttle WLAN power on X29C board */
#define BOARD_GPIO_3_WLAN_PWR 0x08 /* throttle WLAN power on X28 board */
#define BOARD_GPIO_4_WLAN_PWR 0x10 /* throttle WLAN power on X19 board */
+#define BOARD_GPIO_13_WLAN_PWR 0x2000 /* throttle WLAN power on X14 board */
#define GPIO_BTC4W_OUT_4312 0x010 /* bit 4 is BT_IODISABLE */
#define GPIO_BTC4W_OUT_43224 0x020 /* bit 5 is BT_IODISABLE */
#define MIN_SLOW_CLK 32 /* us Slow clock period */
#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-/* Reference Board Types */
-#define BU4710_BOARD 0x0400
-#define VSIM4710_BOARD 0x0401
-#define QT4710_BOARD 0x0402
-
-#define BU4309_BOARD 0x040a
-#define BCM94309CB_BOARD 0x040b
-#define BCM94309MP_BOARD 0x040c
-#define BCM4309AP_BOARD 0x040d
-
-#define BCM94302MP_BOARD 0x040e
-
-#define BU4306_BOARD 0x0416
-#define BCM94306CB_BOARD 0x0417
-#define BCM94306MP_BOARD 0x0418
-
-#define BCM94710D_BOARD 0x041a
-#define BCM94710R1_BOARD 0x041b
-#define BCM94710R4_BOARD 0x041c
-#define BCM94710AP_BOARD 0x041d
-
-#define BU2050_BOARD 0x041f
-
-#define BCM94306P50_BOARD 0x0420
-
-#define BCM94309G_BOARD 0x0421
-
-#define BU4704_BOARD 0x0423
-#define BU4702_BOARD 0x0424
-
-#define BCM94306PC_BOARD 0x0425 /* pcmcia 3.3v 4306 card */
-
-#define MPSG4306_BOARD 0x0427
-
-#define BCM94702MN_BOARD 0x0428
-
-/* BCM4702 1U CompactPCI Board */
-#define BCM94702CPCI_BOARD 0x0429
-
-/* BCM4702 with BCM95380 VLAN Router */
-#define BCM95380RR_BOARD 0x042a
-
-/* cb4306 with SiGe PA */
-#define BCM94306CBSG_BOARD 0x042b
-
-/* cb4306 with SiGe PA */
-#define PCSG94306_BOARD 0x042d
-
-/* bu4704 with sdram */
-#define BU4704SD_BOARD 0x042e
-
-/* Dual 11a/11g Router */
-#define BCM94704AGR_BOARD 0x042f
-
-/* 11a-only minipci */
-#define BCM94308MP_BOARD 0x0430
-
-/* 4306/gprs combo */
-#define BCM94306GPRS_BOARD 0x0432
-
-/* BCM5365/BCM4704 FPGA Bringup Board */
-#define BU5365_FPGA_BOARD 0x0433
-
-#define BU4712_BOARD 0x0444
-#define BU4712SD_BOARD 0x045d
-#define BU4712L_BOARD 0x045f
-
-/* BCM4712 boards */
-#define BCM94712AP_BOARD 0x0445
-#define BCM94712P_BOARD 0x0446
-
-/* BCM4318 boards */
-#define BU4318_BOARD 0x0447
-#define CB4318_BOARD 0x0448
-#define MPG4318_BOARD 0x0449
-#define MP4318_BOARD 0x044a
-#define SD4318_BOARD 0x044b
-
-/* BCM4313 boards */
-#define BCM94313BU_BOARD 0x050f
-#define BCM94313HM_BOARD 0x0510
-#define BCM94313EPA_BOARD 0x0511
-#define BCM94313HMG_BOARD 0x051C
-
-/* BCM63XX boards */
-#define BCM96338_BOARD 0x6338
-#define BCM96348_BOARD 0x6348
-#define BCM96358_BOARD 0x6358
-#define BCM96368_BOARD 0x6368
-
-/* Another mp4306 with SiGe */
-#define BCM94306P_BOARD 0x044c
-
-/* mp4303 */
-#define BCM94303MP_BOARD 0x044e
-
-/* mpsgh4306 */
-#define BCM94306MPSGH_BOARD 0x044f
-
-/* BRCM 4306 w/ Front End Modules */
-#define BCM94306MPM 0x0450
-#define BCM94306MPL 0x0453
-
-/* 4712agr */
-#define BCM94712AGR_BOARD 0x0451
-
-/* pcmcia 4303 */
-#define PC4303_BOARD 0x0454
-
-/* 5350K */
-#define BCM95350K_BOARD 0x0455
-
-/* 5350R */
-#define BCM95350R_BOARD 0x0456
-
-/* 4306mplna */
-#define BCM94306MPLNA_BOARD 0x0457
-
-/* 4320 boards */
-#define BU4320_BOARD 0x0458
-#define BU4320S_BOARD 0x0459
-#define BCM94320PH_BOARD 0x045a
-
-/* 4306mph */
-#define BCM94306MPH_BOARD 0x045b
-
-/* 4306pciv */
-#define BCM94306PCIV_BOARD 0x045c
-
-#define BU4712SD_BOARD 0x045d
-
-#define BCM94320PFLSH_BOARD 0x045e
-
-#define BU4712L_BOARD 0x045f
-#define BCM94712LGR_BOARD 0x0460
-#define BCM94320R_BOARD 0x0461
-
-#define BU5352_BOARD 0x0462
-
-#define BCM94318MPGH_BOARD 0x0463
-
-#define BU4311_BOARD 0x0464
-#define BCM94311MC_BOARD 0x0465
-#define BCM94311MCAG_BOARD 0x0466
-
-#define BCM95352GR_BOARD 0x0467
-
-/* bcm95351agr */
-#define BCM95351AGR_BOARD 0x0470
-
-/* bcm94704mpcb */
-#define BCM94704MPCB_BOARD 0x0472
-
-/* 4785 boards */
-#define BU4785_BOARD 0x0478
-
-/* 4321 boards */
-#define BU4321_BOARD 0x046b
-#define BU4321E_BOARD 0x047c
-#define MP4321_BOARD 0x046c
-#define CB2_4321_BOARD 0x046d
-#define CB2_4321_AG_BOARD 0x0066
-#define MC4321_BOARD 0x046e
-
-/* 4328 boards */
-#define BU4328_BOARD 0x0481
-#define BCM4328SDG_BOARD 0x0482
-#define BCM4328SDAG_BOARD 0x0483
-#define BCM4328UG_BOARD 0x0484
-#define BCM4328UAG_BOARD 0x0485
-#define BCM4328PC_BOARD 0x0486
-#define BCM4328CF_BOARD 0x0487
-
-/* 4325 boards */
-#define BCM94325DEVBU_BOARD 0x0490
-#define BCM94325BGABU_BOARD 0x0491
-
-#define BCM94325SDGWB_BOARD 0x0492
-
-#define BCM94325SDGMDL_BOARD 0x04aa
-#define BCM94325SDGMDL2_BOARD 0x04c6
-#define BCM94325SDGMDL3_BOARD 0x04c9
-
-#define BCM94325SDABGWBA_BOARD 0x04e1
-
-/* 4322 boards */
-#define BCM94322MC_SSID 0x04a4
-#define BCM94322USB_SSID 0x04a8 /* dualband */
-#define BCM94322HM_SSID 0x04b0
-#define BCM94322USB2D_SSID 0x04bf /* single band discrete front end */
-
-/* 4312 boards */
-#define BCM4312MCGSG_BOARD 0x04b5
-
-/* 4315 boards */
-#define BCM94315DEVBU_SSID 0x04c2
-#define BCM94315USBGP_SSID 0x04c7
-#define BCM94315BGABU_SSID 0x04ca
-#define BCM94315USBGP41_SSID 0x04cb
-
-/* 4319 boards */
-#define BCM94319DEVBU_SSID 0X04e5
-#define BCM94319USB_SSID 0X04e6
-#define BCM94319SD_SSID 0X04e7
-
-/* 4716 boards */
-#define BCM94716NR2_SSID 0x04cd
-
-/* 4319 boards */
-#define BCM94319DEVBU_SSID 0X04e5
-#define BCM94319USBNP4L_SSID 0X04e6
-#define BCM94319WLUSBN4L_SSID 0X04e7
-#define BCM94319SDG_SSID 0X04ea
-#define BCM94319LCUSBSDN4L_SSID 0X04eb
-#define BCM94319USBB_SSID 0x04ee
-#define BCM94319LCSDN4L_SSID 0X0507
-#define BCM94319LSUSBN4L_SSID 0X0508
-#define BCM94319SDNA4L_SSID 0X0517
-#define BCM94319SDELNA4L_SSID 0X0518
-#define BCM94319SDELNA6L_SSID 0X0539
-#define BCM94319ARCADYAN_SSID 0X0546
-#define BCM94319WINDSOR_SSID 0x0561
-#define BCM94319MLAP_SSID 0x0562
-#define BCM94319SDNA_SSID 0x058b
-#define BCM94319BHEMU3_SSID 0x0563
-#define BCM94319SDHMB_SSID 0x058c
-#define BCM94319SDBREF_SSID 0x05a1
-#define BCM94319USBSDB_SSID 0x05a2
-
-
-/* 4329 boards */
-#define BCM94329AGB_SSID 0X04b9
-#define BCM94329TDKMDL1_SSID 0X04ba
-#define BCM94329TDKMDL11_SSID 0X04fc
-#define BCM94329OLYMPICN18_SSID 0X04fd
-#define BCM94329OLYMPICN90_SSID 0X04fe
-#define BCM94329OLYMPICN90U_SSID 0X050c
-#define BCM94329OLYMPICN90M_SSID 0X050b
-#define BCM94329AGBF_SSID 0X04ff
-#define BCM94329OLYMPICX17_SSID 0X0504
-#define BCM94329OLYMPICX17M_SSID 0X050a
-#define BCM94329OLYMPICX17U_SSID 0X0509
-#define BCM94329OLYMPICUNO_SSID 0X0564
-#define BCM94329MOTOROLA_SSID 0X0565
-#define BCM94329OLYMPICLOCO_SSID 0X0568
-/* 4336 SDIO board types */
-#define BCM94336SD_WLBGABU_SSID 0x0511
-#define BCM94336SD_WLBGAREF_SSID 0x0519
-#define BCM94336SDGP_SSID 0x0538
-#define BCM94336SDG_SSID 0x0519
-#define BCM94336SDGN_SSID 0x0538
-#define BCM94336SDGFC_SSID 0x056B
-
-/* 4330 SDIO board types */
-#define BCM94330SDG_SSID 0x0528
-#define BCM94330SD_FCBGABU_SSID 0x052e
-#define BCM94330SD_WLBGABU_SSID 0x052f
-#define BCM94330SD_FCBGA_SSID 0x0530
-#define BCM94330FCSDAGB_SSID 0x0532
-#define BCM94330OLYMPICAMG_SSID 0x0549
-#define BCM94330OLYMPICAMGEPA_SSID 0x054F
-#define BCM94330OLYMPICUNO3_SSID 0x0551
-#define BCM94330WLSDAGB_SSID 0x0547
-#define BCM94330CSPSDAGBB_SSID 0x054A
-
-/* 43224 boards */
-#define BCM943224X21 0x056e
-#define BCM943224X21_FCC 0x00d1
-#define BCM943224X21B 0x00e9
-#define BCM943224M93 0x008b
-#define BCM943224M93A 0x0090
-#define BCM943224X16 0x0093
-#define BCM94322X9 0x008d
-#define BCM94322M35e 0x008e
-
-/* 43228 Boards */
-#define BCM943228BU8_SSID 0x0540
-#define BCM943228BU9_SSID 0x0541
-#define BCM943228BU_SSID 0x0542
-#define BCM943227HM4L_SSID 0x0543
-#define BCM943227HMB_SSID 0x0544
-#define BCM943228HM4L_SSID 0x0545
-#define BCM943228SD_SSID 0x0573
-
-/* 43239 Boards */
-#define BCM943239MOD_SSID 0x05ac
-#define BCM943239REF_SSID 0x05aa
-
-/* 4331 boards */
-#define BCM94331X19 0x00D6 /* X19B */
-#define BCM94331X28 0x00E4 /* X28 */
-#define BCM94331X28B 0x010E /* X28B */
-#define BCM94331PCIEBT3Ax_SSID BCM94331X28
-#define BCM94331X12_2G_SSID 0x00EC /* X12 2G */
-#define BCM94331X12_5G_SSID 0x00ED /* X12 5G */
-#define BCM94331X29B 0x00EF /* X29B */
-#define BCM94331X29D 0x010F /* X29D */
-#define BCM94331CSAX_SSID BCM94331X29B
-#define BCM94331X19C 0x00F5 /* X19C */
-#define BCM94331X33 0x00F4 /* X33 */
-#define BCM94331BU_SSID 0x0523
-#define BCM94331S9BU_SSID 0x0524
-#define BCM94331MC_SSID 0x0525
-#define BCM94331MCI_SSID 0x0526
-#define BCM94331PCIEBT4_SSID 0x0527
-#define BCM94331HM_SSID 0x0574
-#define BCM94331PCIEDUAL_SSID 0x059B
-#define BCM94331MCH5_SSID 0x05A9
-#define BCM94331CS_SSID 0x05C6
-#define BCM94331CD_SSID 0x05DA
-
-/* 4314 Boards */
-#define BCM94314BU_SSID 0x05b1
-
-/* 53572 Boards */
-#define BCM953572BU_SSID 0x058D
-#define BCM953572NR2_SSID 0x058E
-#define BCM947188NR2_SSID 0x058F
-#define BCM953572SDRNR2_SSID 0x0590
-
-/* 43236 boards */
-#define BCM943236OLYMPICSULLEY_SSID 0x594
-#define BCM943236PREPROTOBLU2O3_SSID 0x5b9
-#define BCM943236USBELNA_SSID 0x5f8
-
-/* 4314 Boards */
-#define BCM94314BUSDIO_SSID 0x05c8
-#define BCM94314BGABU_SSID 0x05c9
-#define BCM94314HMEPA_SSID 0x05ca
-#define BCM94314HMEPABK_SSID 0x05cb
-#define BCM94314SUHMEPA_SSID 0x05cc
-#define BCM94314SUHM_SSID 0x05cd
-#define BCM94314HM_SSID 0x05d1
-
-/* 4334 Boards */
-#define BCM94334FCAGBI_SSID 0x05df
-#define BCM94334WLAGBI_SSID 0x05dd
-
-/* 4335 Boards */
-#define BCM94335X52 0x0114
-
-/* 4345 Boards */
-#define BCM94345_SSID 0x0687
-
-/* 4360 Boards */
-#define BCM94360X52C 0X0117
-#define BCM94360X52D 0X0137
-#define BCM94360X29C 0X0112
-#define BCM94360X29CP2 0X0134
-#define BCM94360X29CP3 0X013B
-#define BCM94360X51 0x0111
-#define BCM94360X51P2 0x0129
-#define BCM94360X51P3 0x0142
-#define BCM94360X51A 0x0135
-#define BCM94360X51B 0x0136
-#define BCM94360CS 0x061B
-#define BCM94360J28_D11AC2G 0x0c00
-#define BCM94360J28_D11AC5G 0x0c01
-#define BCM94360USBH5_D11AC5G 0x06aa
-#define BCM94360MCM5 0x06d8
-
-/* 4350 Boards */
-#define BCM94350X52B 0X0116
-#define BCM94350X14 0X0131
-
-/* 43217 Boards */
-#define BCM943217BU_SSID 0x05d5
-#define BCM943217HM2L_SSID 0x05d6
-#define BCM943217HMITR2L_SSID 0x05d7
-
-/* 43142 Boards */
-#define BCM943142HM_SSID 0x05e0
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
/* 43341 Boards */
#define BCM943341WLABGS_SSID 0x062d
#define BCM943602RSVD1_SSID 0x06a5
#define BCM943602RSVD2_SSID 0x06a6
#define BCM943602X87 0X0133
+#define BCM943602X87P2 0X0143
#define BCM943602X238 0X0132
+#define BCM943602X238D 0X014A
/* # of GPIO pins */
#define GPIO_NUMPINS 32
/*
* Byte order utilities
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmendian.h 402715 2013-05-16 18:50:09Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmendian.h 514727 2014-11-12 03:02:48Z $
*
* This file by default provides proper behavior on little-endian architectures.
* On big-endian architectures, IL_BIGENDIAN should be defined.
*
* Definitions subject to change without notice.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmmsgbuf.h 499474 2014-08-28 21:30:10Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmmsgbuf.h 541060 2015-03-13 23:28:01Z $
*/
#ifndef _bcmmsgbuf_h_
#define _bcmmsgbuf_h_
+
#include <proto/ethernet.h>
#include <wlioctl.h>
#include <bcmpcie.h>
#define MSGBUF_MAX_MSG_SIZE ETHER_MAX_LEN
-#define D2H_EPOCH_MODULO 253 /* sequence number wrap */
-#define D2H_EPOCH_INIT_VAL (D2H_EPOCH_MODULO + 1)
+#define D2H_EPOCH_MODULO 253 /* sequence number wrap */
+#define D2H_EPOCH_INIT_VAL (D2H_EPOCH_MODULO + 1)
+
+#define H2D_EPOCH_MODULO 253 /* sequence number wrap */
+#define H2D_EPOCH_INIT_VAL (H2D_EPOCH_MODULO + 1)
#define H2DRING_TXPOST_ITEMSIZE 48
#define H2DRING_RXPOST_ITEMSIZE 32
#define D2HRING_CTRL_CMPLT_ITEMSIZE 24
#define H2DRING_TXPOST_MAX_ITEM 512
-#define H2DRING_RXPOST_MAX_ITEM 256
-#define H2DRING_CTRL_SUB_MAX_ITEM 20
+#define H2DRING_RXPOST_MAX_ITEM 512
+#define H2DRING_CTRL_SUB_MAX_ITEM 64
#define D2HRING_TXCMPLT_MAX_ITEM 1024
-#define D2HRING_RXCMPLT_MAX_ITEM 256
-#define D2HRING_CTRL_CMPLT_MAX_ITEM 20
+#define D2HRING_RXCMPLT_MAX_ITEM 512
+
+#define D2HRING_CTRL_CMPLT_MAX_ITEM 64
+
enum {
DNGL_TO_HOST_MSGBUF,
HOST_TO_DNGL_MSGBUF
#endif /* PCIE_API_REV1 */
/* utility data structures */
+
union addr64 {
struct {
uint32 low;
uint64 u64;
} DECLSPEC_ALIGN(8);
-typedef union addr64 addr64_t;
+typedef union addr64 bcm_addr64_t;
/* IOCTL req Hdr */
/* cmn Msg Hdr */
typedef struct cmn_msg_hdr {
- /* message type */
+ /** message type */
uint8 msg_type;
- /* interface index this is valid for */
+ /** interface index this is valid for */
uint8 if_id;
/* flags */
uint8 flags;
- /* sequence number */
+ /** sequence number */
uint8 epoch;
- /* packet Identifier for the associated host buffer */
+ /** packet Identifier for the associated host buffer */
uint32 request_id;
} cmn_msg_hdr_t;
-/* message type */
+/** message type */
typedef enum bcmpcie_msgtype {
MSG_TYPE_GEN_STATUS = 0x1,
MSG_TYPE_RING_STATUS = 0x2,
MSG_TYPE_RX_CMPLT = 0x12,
MSG_TYPE_LPBK_DMAXFER = 0x13,
MSG_TYPE_LPBK_DMAXFER_CMPLT = 0x14,
+ MSG_TYPE_FLOW_RING_RESUME = 0x15,
+ MSG_TYPE_FLOW_RING_RESUME_CMPLT = 0x16,
+ MSG_TYPE_FLOW_RING_SUSPEND = 0x17,
+ MSG_TYPE_FLOW_RING_SUSPEND_CMPLT = 0x18,
+ MSG_TYPE_INFO_BUF_POST = 0x19,
+ MSG_TYPE_INFO_BUF_CMPLT = 0x1A,
+ MSG_TYPE_H2D_RING_CREATE = 0x1B,
+ MSG_TYPE_D2H_RING_CREATE = 0x1C,
+ MSG_TYPE_H2D_RING_CREATE_CMPLT = 0x1D,
+ MSG_TYPE_D2H_RING_CREATE_CMPLT = 0x1E,
+ MSG_TYPE_H2D_RING_CONFIG = 0x1F,
+ MSG_TYPE_D2H_RING_CONFIG = 0x20,
+ MSG_TYPE_H2D_RING_CONFIG_CMPLT = 0x21,
+ MSG_TYPE_D2H_RING_CONFIG_CMPLT = 0x22,
+ MSG_TYPE_H2D_MAILBOX_DATA = 0x23,
+ MSG_TYPE_D2H_MAILBOX_DATA = 0x24,
+
MSG_TYPE_API_MAX_RSVD = 0x3F
} bcmpcie_msg_type_t;
MSG_TYPE_HOST_FETCH = 0x44,
MSG_TYPE_LPBK_DMAXFER_PYLD = 0x45,
MSG_TYPE_TXMETADATA_PYLD = 0x46,
- MSG_TYPE_HOSTDMA_PTRS = 0x47
+ MSG_TYPE_INDX_UPDATE = 0x47
} bcmpcie_msgtype_int_t;
typedef enum bcmpcie_msgtype_u {
MSG_TYPE_TX_BATCH_POST = 0x80,
MSG_TYPE_IOCTL_REQ = 0x81,
- MSG_TYPE_HOST_EVNT = 0x82,
+ MSG_TYPE_HOST_EVNT = 0x82, /* console related */
MSG_TYPE_LOOPBACK = 0x83
} bcmpcie_msgtype_u_t;
+/**
+ * D2H ring host wakeup soft doorbell, override the PCIE doorbell.
+ * Host configures an <32bit address,value> tuple, and dongle uses SBTOPCIE
+ * Transl0 to write specified value to host address.
+ *
+ * Use case: 32bit Address mapped to HW Accelerator Core/Thread Wakeup Register
+ * and value is Core/Thread context. Host will ensure routing the 32bit address
+ * offerred to PCIE to the mapped register.
+ *
+ * D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL
+ */
+typedef struct bcmpcie_soft_doorbell {
+ uint32 value; /* host defined value to be written, eg HW threadid */
+ bcm_addr64_t haddr; /* host address, eg thread wakeup register address */
+ uint16 items; /* interrupt coalescing: item count before wakeup */
+ uint16 msecs; /* interrupt coalescing: timeout in millisecs */
+} bcmpcie_soft_doorbell_t;
+
/* if_id */
#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT 5
/* IOCTL request message */
typedef struct ioctl_req_msg {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
-
- /* ioctl command type */
+ /** ioctl command type */
uint32 cmd;
- /* ioctl transaction ID, to pair with a ioctl response */
+ /** ioctl transaction ID, to pair with a ioctl response */
uint16 trans_id;
- /* input arguments buffer len */
+ /** input arguments buffer len */
uint16 input_buf_len;
- /* expected output len */
+ /** expected output len */
uint16 output_buf_len;
- /* to aling the host address on 8 byte boundary */
+ /** to align the host address on 8 byte boundary */
uint16 rsvd[3];
- /* always aling on 8 byte boundary */
- addr64_t host_input_buf_addr;
+ /** always align on 8 byte boundary */
+ bcm_addr64_t host_input_buf_addr;
/* rsvd */
uint32 rsvd1[2];
} ioctl_req_msg_t;
-/* buffer post messages for device to use to return IOCTL responses, Events */
+/** buffer post messages for device to use to return IOCTL responses, Events */
typedef struct ioctl_resp_evt_buf_post_msg {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* length of the host buffer supplied */
+ /** length of the host buffer supplied */
uint16 host_buf_len;
- /* to aling the host address on 8 byte boundary */
+ /** to align the host address on 8 byte boundary */
uint16 reserved[3];
- /* always aling on 8 byte boundary */
- addr64_t host_buf_addr;
+ /** always align on 8 byte boundary */
+ bcm_addr64_t host_buf_addr;
uint32 rsvd[4];
} ioctl_resp_evt_buf_post_msg_t;
typedef struct pcie_dma_xfer_params {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* always aling on 8 byte boundary */
- addr64_t host_input_buf_addr;
+ /** always align on 8 byte boundary */
+ bcm_addr64_t host_input_buf_addr;
- /* always aling on 8 byte boundary */
- addr64_t host_ouput_buf_addr;
+ /** always align on 8 byte boundary */
+ bcm_addr64_t host_ouput_buf_addr;
- /* length of transfer */
+ /** length of transfer */
uint32 xfer_len;
- /* delay before doing the src txfer */
+ /** delay before doing the src txfer */
uint32 srcdelay;
- /* delay before doing the dest txfer */
+ /** delay before doing the dest txfer */
uint32 destdelay;
uint32 rsvd;
} pcie_dma_xfer_params_t;
-/* Complete msgbuf hdr for flow ring update from host to dongle */
+/** Complete msgbuf hdr for flow ring update from host to dongle */
typedef struct tx_flowring_create_request {
cmn_msg_hdr_t msg;
uint8 da[ETHER_ADDR_LEN];
uint16 int_vector;
uint16 max_items;
uint16 len_item;
- addr64_t flow_ring_ptr;
+ bcm_addr64_t flow_ring_ptr;
} tx_flowring_create_request_t;
typedef struct tx_flowring_delete_request {
uint32 rsvd[7];
} tx_flowring_flush_request_t;
+/** Subtypes for ring_config_req control message */
+typedef enum ring_config_subtype {
+ /** Default D2H PCIE doorbell override using ring_config_req msg */
+ D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL = 1, /* Software doorbell */
+ D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL = 2 /* MSI configuration */
+} ring_config_subtype_t;
+
+typedef struct ring_config_req {
+ cmn_msg_hdr_t msg;
+ uint16 subtype;
+ uint16 ring_id;
+ uint32 rsvd;
+ union {
+ uint32 data[6];
+ /** D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL */
+ bcmpcie_soft_doorbell_t soft_doorbell;
+ };
+} ring_config_req_t;
+
typedef union ctrl_submit_item {
ioctl_req_msg_t ioctl_req;
ioctl_resp_evt_buf_post_msg_t resp_buf_post;
tx_flowring_create_request_t flow_create;
tx_flowring_delete_request_t flow_delete;
tx_flowring_flush_request_t flow_flush;
+ ring_config_req_t ring_config_req;
unsigned char check[H2DRING_CTRL_SUB_ITEMSIZE];
} ctrl_submit_item_t;
-/* Control Completion messages (20 bytes) */
+/** Control Completion messages (20 bytes) */
typedef struct compl_msg_hdr {
- /* status for the completion */
+ /** status for the completion */
int16 status;
- /* submisison flow ring id which generated this status */
+ /** submisison flow ring id which generated this status */
uint16 flow_ring_id;
} compl_msg_hdr_t;
-/* XOR checksum or a magic number to audit DMA done */
+/** XOR checksum or a magic number to audit DMA done */
typedef uint32 dma_done_t;
/* completion header status codes */
#define BCMPCIE_MAX_IOCTLRESP_BUF 10
#define BCMPCIE_MAX_EVENT_BUF 11
-/* IOCTL completion response */
+/** IOCTL completion response */
typedef struct ioctl_compl_resp_msg {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* completion message header */
+ /** completion message header */
compl_msg_hdr_t compl_hdr;
- /* response buffer len where a host buffer is involved */
+ /** response buffer len where a host buffer is involved */
uint16 resp_len;
- /* transaction id to pair with a request */
+ /** transaction id to pair with a request */
uint16 trans_id;
- /* cmd id */
+ /** cmd id */
uint32 cmd;
- /* XOR checksum or a magic number to audit DMA done */
+ /** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} ioctl_comp_resp_msg_t;
-/* IOCTL request acknowledgement */
+/** IOCTL request acknowledgement */
typedef struct ioctl_req_ack_msg {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* completion message header */
+ /** completion message header */
compl_msg_hdr_t compl_hdr;
- /* cmd id */
+ /** cmd id */
uint32 cmd;
- uint32 rsvd[1];
- /* XOR checksum or a magic number to audit DMA done */
+ uint32 rsvd;
+ /** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} ioctl_req_ack_msg_t;
-/* WL event message: send from device to host */
+/** WL event message: send from device to host */
typedef struct wlevent_req_msg {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* completion message header */
+ /** completion message header */
compl_msg_hdr_t compl_hdr;
- /* event data len valid with the event buffer */
+ /** event data len valid with the event buffer */
uint16 event_data_len;
- /* sequence number */
+ /** sequence number */
uint16 seqnum;
- /* rsvd */
+ /** rsvd */
uint32 rsvd;
- /* XOR checksum or a magic number to audit DMA done */
+ /** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} wlevent_req_msg_t;
-/* dma xfer complete message */
+/** dma xfer complete message */
typedef struct pcie_dmaxfer_cmplt {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* completion message header */
+ /** completion message header */
compl_msg_hdr_t compl_hdr;
uint32 rsvd[2];
- /* XOR checksum or a magic number to audit DMA done */
+ /** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} pcie_dmaxfer_cmplt_t;
-/* general status message */
+/** general status message */
typedef struct pcie_gen_status {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* completion message header */
+ /** completion message header */
compl_msg_hdr_t compl_hdr;
uint32 rsvd[2];
- /* XOR checksum or a magic number to audit DMA done */
+ /** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} pcie_gen_status_t;
-/* ring status message */
+/** ring status message */
typedef struct pcie_ring_status {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* completion message header */
+ /** completion message header */
compl_msg_hdr_t compl_hdr;
- /* message which firmware couldn't decode */
+ /** message which firmware couldn't decode */
uint16 write_idx;
uint16 rsvd[3];
- /* XOR checksum or a magic number to audit DMA done */
+ /** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} pcie_ring_status_t;
cmn_msg_hdr_t msg;
compl_msg_hdr_t cmplt;
uint32 rsvd[2];
- /* XOR checksum or a magic number to audit DMA done */
+ /** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} tx_flowring_create_response_t;
+
typedef struct tx_flowring_delete_response {
cmn_msg_hdr_t msg;
compl_msg_hdr_t cmplt;
uint32 rsvd[2];
- /* XOR checksum or a magic number to audit DMA done */
+ /** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} tx_flowring_delete_response_t;
cmn_msg_hdr_t msg;
compl_msg_hdr_t cmplt;
uint32 rsvd[2];
- /* XOR checksum or a magic number to audit DMA done */
+ /** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} tx_flowring_flush_response_t;
-/* Common layout of all d2h control messages */
+/** Common layout of all d2h control messages */
typedef struct ctrl_compl_msg {
- /* common message header */
- cmn_msg_hdr_t cmn_hdr;
- /* completion message header */
- compl_msg_hdr_t compl_hdr;
- uint32 rsvd[2];
- /* XOR checksum or a magic number to audit DMA done */
- dma_done_t marker;
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+ uint32 rsvd[2];
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
} ctrl_compl_msg_t;
+typedef struct ring_config_resp {
+ /** common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /** completion message header */
+ compl_msg_hdr_t compl_hdr;
+ uint32 rsvd[2];
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} ring_config_resp_t;
+
typedef union ctrl_completion_item {
ioctl_comp_resp_msg_t ioctl_resp;
wlevent_req_msg_t event;
tx_flowring_delete_response_t txfl_delete_resp;
tx_flowring_flush_response_t txfl_flush_resp;
ctrl_compl_msg_t ctrl_compl;
+ ring_config_resp_t ring_config_resp;
unsigned char check[D2HRING_CTRL_CMPLT_ITEMSIZE];
} ctrl_completion_item_t;
-/* H2D Rxpost ring work items */
+/** H2D Rxpost ring work items */
typedef struct host_rxbuf_post {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* provided meta data buffer len */
+ /** provided meta data buffer len */
uint16 metadata_buf_len;
- /* provided data buffer len to receive data */
+ /** provided data buffer len to receive data */
uint16 data_buf_len;
- /* alignment to make the host buffers start on 8 byte boundary */
+ /** alignment to make the host buffers start on 8 byte boundary */
uint32 rsvd;
- /* provided meta data buffer */
- addr64_t metadata_buf_addr;
- /* provided data buffer to receive data */
- addr64_t data_buf_addr;
+ /** provided meta data buffer */
+ bcm_addr64_t metadata_buf_addr;
+ /** provided data buffer to receive data */
+ bcm_addr64_t data_buf_addr;
} host_rxbuf_post_t;
typedef union rxbuf_submit_item {
} rxbuf_submit_item_t;
-/* D2H Rxcompletion ring work items */
+/** D2H Rxcompletion ring work items */
typedef struct host_rxbuf_cmpl {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* completion message header */
+ /** completion message header */
compl_msg_hdr_t compl_hdr;
- /* filled up meta data len */
+ /** filled up meta data len */
uint16 metadata_len;
- /* filled up buffer len to receive data */
+ /** filled up buffer len to receive data */
uint16 data_len;
- /* offset in the host rx buffer where the data starts */
+ /** offset in the host rx buffer where the data starts */
uint16 data_offset;
- /* offset in the host rx buffer where the data starts */
+ /** offset in the host rx buffer where the data starts */
uint16 flags;
- /* rx status */
+ /** rx status */
uint32 rx_status_0;
uint32 rx_status_1;
- /* XOR checksum or a magic number to audit DMA done */
- dma_done_t marker;
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
} host_rxbuf_cmpl_t;
typedef union rxbuf_complete_item {
typedef struct host_txbuf_post {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* eth header */
+ /** eth header */
uint8 txhdr[ETHER_HDR_LEN];
- /* flags */
+ /** flags */
uint8 flags;
- /* number of segments */
+ /** number of segments */
uint8 seg_cnt;
- /* provided meta data buffer for txstatus */
- addr64_t metadata_buf_addr;
- /* provided data buffer to receive data */
- addr64_t data_buf_addr;
- /* provided meta data buffer len */
+ /** provided meta data buffer for txstatus */
+ bcm_addr64_t metadata_buf_addr;
+ /** provided data buffer to receive data */
+ bcm_addr64_t data_buf_addr;
+ /** provided meta data buffer len */
uint16 metadata_buf_len;
- /* provided data buffer len to receive data */
+ /** provided data buffer len to receive data */
uint16 data_len;
- uint32 flag2;
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
} host_txbuf_post_t;
#define BCMPCIE_PKT_FLAGS_FRAME_802_3 0x01
#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5
#define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT)
-/* These are added to fix up teh compile issues */
+/* These are added to fix up compile issues */
#define BCMPCIE_TXPOST_FLAGS_FRAME_802_3 BCMPCIE_PKT_FLAGS_FRAME_802_3
#define BCMPCIE_TXPOST_FLAGS_FRAME_802_11 BCMPCIE_PKT_FLAGS_FRAME_802_11
#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT BCMPCIE_PKT_FLAGS_PRIO_SHIFT
#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK BCMPCIE_PKT_FLAGS_PRIO_MASK
-#define BCMPCIE_PKT_FLAGS2_FORCELOWRATE_MASK 0x01
-#define BCMPCIE_PKT_FLAGS2_FORCELOWRATE_SHIFT 0
-
-/* H2D Txpost ring work items */
+/** H2D Txpost ring work items */
typedef union txbuf_submit_item {
host_txbuf_post_t txpost;
unsigned char check[H2DRING_TXPOST_ITEMSIZE];
} txbuf_submit_item_t;
-/* D2H Txcompletion ring work items */
+/** D2H Txcompletion ring work items */
typedef struct host_txbuf_cmpl {
- /* common message header */
+ /** common message header */
cmn_msg_hdr_t cmn_hdr;
- /* completion message header */
+ /** completion message header */
compl_msg_hdr_t compl_hdr;
union {
struct {
- /* provided meta data len */
+ /** provided meta data len */
uint16 metadata_len;
- /* WLAN side txstatus */
+ /** WLAN side txstatus */
uint16 tx_status;
};
- /* XOR checksum or a magic number to audit DMA done */
- dma_done_t marker;
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
};
} host_txbuf_cmpl_t;
#define BCMPCIE_D2H_METADATA_HDRLEN 4
#define BCMPCIE_D2H_METADATA_MINLEN (BCMPCIE_D2H_METADATA_HDRLEN + 4)
-/* ret buf struct */
+/** ret buf struct */
typedef struct ret_buf_ptr {
uint32 low_addr;
uint32 high_addr;
} ret_buf_t;
+
#ifdef PCIE_API_REV1
+
/* ioctl specific hdr */
typedef struct ioctl_hdr {
uint16 cmd;
uint16 retbuf_len;
uint32 cmd_id;
} ioctl_hdr_t;
+
typedef struct ioctlptr_hdr {
uint16 cmd;
uint16 retbuf_len;
uint16 rsvd;
uint32 cmd_id;
} ioctlptr_hdr_t;
+
#else /* PCIE_API_REV1 */
+
typedef struct ioctl_req_hdr {
- uint32 pkt_id; /* Packet ID */
- uint32 cmd; /* IOCTL ID */
+ uint32 pkt_id; /**< Packet ID */
+ uint32 cmd; /**< IOCTL ID */
uint16 retbuf_len;
uint16 buflen;
- uint16 xt_id; /* transaction ID */
+ uint16 xt_id; /**< transaction ID */
uint16 rsvd[1];
} ioctl_req_hdr_t;
+
#endif /* PCIE_API_REV1 */
-/* Complete msgbuf hdr for ioctl from host to dongle */
+/** Complete msgbuf hdr for ioctl from host to dongle */
typedef struct ioct_reqst_hdr {
cmn_msg_hdr_t msg;
#ifdef PCIE_API_REV1
#endif
ret_buf_t ret_buf;
} ioct_reqst_hdr_t;
+
typedef struct ioctptr_reqst_hdr {
cmn_msg_hdr_t msg;
#ifdef PCIE_API_REV1
ret_buf_t ioct_buf;
} ioctptr_reqst_hdr_t;
-/* ioctl response header */
+/** ioctl response header */
typedef struct ioct_resp_hdr {
cmn_msg_hdr_t msg;
#ifdef PCIE_API_REV1
uint32 inline_data;
#ifdef PCIE_API_REV1
#else
- uint16 xt_id; /* transaction ID */
+ uint16 xt_id; /**< transaction ID */
uint16 rsvd[1];
#endif
} ioct_resp_hdr_t;
/* ret buf hdr will be stripped off inside dongle itself */
typedef struct msgbuf_ioctl_resp {
ioct_resp_hdr_t ioct_hdr;
- ret_buf_t ret_buf; /* ret buf pointers */
+ ret_buf_t ret_buf; /**< ret buf pointers */
} msgbuf_ioct_resp_t;
-/* WL evet hdr info */
+/** WL event hdr info */
typedef struct wl_event_hdr {
cmn_msg_hdr_t msg;
uint16 event;
uint32 pktid;
uint16 pktlen;
uint16 rsvd;
- ret_buf_t ret_buf; /* ret buf pointers */
+ ret_buf_t ret_buf; /**< ret buf pointers */
} txbatch_lenptr_tup_t;
typedef struct txbatch_cmn_msghdr {
typedef struct txbatch_msghdr {
txbatch_cmn_msghdr_t txcmn;
- txbatch_lenptr_tup_t tx_tup[0]; /* Based on packet count */
+ txbatch_lenptr_tup_t tx_tup[0]; /**< Based on packet count */
} txbatch_msghdr_t;
/* TX desc posting header */
typedef struct tx_lenptr_tup {
uint16 pktlen;
uint16 rsvd;
- ret_buf_t ret_buf; /* ret buf pointers */
+ ret_buf_t ret_buf; /**< ret buf pointers */
} tx_lenptr_tup_t;
typedef struct txdescr_cmn_msghdr {
txdescr_cmn_msghdr_t txcmn;
uint8 txhdr[ETHER_HDR_LEN];
uint16 rsvd;
- tx_lenptr_tup_t tx_tup[0]; /* Based on descriptor count */
+ tx_lenptr_tup_t tx_tup[0]; /**< Based on descriptor count */
} txdescr_msghdr_t;
-/* Tx status header info */
+/** Tx status header info */
typedef struct txstatus_hdr {
cmn_msg_hdr_t msg;
uint32 pktid;
} txstatus_hdr_t;
-/* RX bufid-len-ptr tuple */
+
+/** RX bufid-len-ptr tuple */
typedef struct rx_lenptr_tup {
uint32 rxbufid;
uint16 len;
uint16 rsvd2;
- ret_buf_t ret_buf; /* ret buf pointers */
+ ret_buf_t ret_buf; /**< ret buf pointers */
} rx_lenptr_tup_t;
-/* Rx descr Post hdr info */
+
+/** Rx descr Post hdr info */
typedef struct rxdesc_msghdr {
cmn_msg_hdr_t msg;
uint16 rsvd0;
rx_lenptr_tup_t rx_tup[0];
} rxdesc_msghdr_t;
-/* RX complete tuples */
+/** RX complete tuples */
typedef struct rxcmplt_tup {
uint16 retbuf_len;
uint16 data_offset;
uint32 rxstatus1;
uint32 rxbufid;
} rxcmplt_tup_t;
-/* RX complete messge hdr */
+
+/** RX complete messge hdr */
typedef struct rxcmplt_hdr {
cmn_msg_hdr_t msg;
uint16 rsvd0;
uint16 rxcmpltcnt;
rxcmplt_tup_t rx_tup[0];
} rxcmplt_hdr_t;
+
typedef struct hostevent_hdr {
cmn_msg_hdr_t msg;
uint32 evnt_pyld;
/* defines for flags */
#define MSGBUF_IOC_ACTION_MASK 0x1
+#define MAX_SUSPEND_REQ 15
+
+typedef struct tx_idle_flowring_suspend_request {
+ cmn_msg_hdr_t msg;
+ uint16 ring_id[MAX_SUSPEND_REQ]; /**< ring Id's */
+ uint16 num; /**< number of flowid's to suspend */
+} tx_idle_flowring_suspend_request_t;
+
+typedef struct tx_idle_flowring_suspend_response {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ uint32 rsvd[2];
+ dma_done_t marker;
+} tx_idle_flowring_suspend_response_t;
+
+typedef struct tx_idle_flowring_resume_request {
+ cmn_msg_hdr_t msg;
+ uint16 flow_ring_id;
+ uint16 reason;
+ uint32 rsvd[7];
+} tx_idle_flowring_resume_request_t;
+
+typedef struct tx_idle_flowring_resume_response {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ uint32 rsvd[2];
+ dma_done_t marker;
+} tx_idle_flowring_resume_response_t;
+
#endif /* _bcmmsgbuf_h_ */
--- /dev/null
+/*
+ * NVRAM variable manipulation
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmnvram.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef _bcmnvram_h_
+#define _bcmnvram_h_
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+
+struct nvram_header {
+ uint32 magic;
+ uint32 len;
+ uint32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
+ uint32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
+ uint32 config_ncdl; /* ncdl values for memc */
+};
+
+struct nvram_tuple {
+ char *name;
+ char *value;
+ struct nvram_tuple *next;
+};
+
+/*
+ * Get default value for an NVRAM variable
+ */
+extern char *nvram_default_get(const char *name);
+/*
+ * validate/restore all per-interface related variables
+ */
+extern void nvram_validate_all(char *prefix, bool restore);
+
+/*
+ * restore specific per-interface variable
+ */
+extern void nvram_restore_var(char *prefix, char *name);
+
+/*
+ * Initialize NVRAM access. May be unnecessary or undefined on certain
+ * platforms.
+ */
+extern int nvram_init(void *sih);
+extern int nvram_deinit(void *sih);
+
+
+/*
+ * Append a chunk of nvram variables to the global list
+ */
+extern int nvram_append(void *si, char *vars, uint varsz);
+
+extern void nvram_get_global_vars(char **varlst, uint *varsz);
+
+
+/*
+ * Check for reset button press for restoring factory defaults.
+ */
+extern int nvram_reset(void *sih);
+
+/*
+ * Disable NVRAM access. May be unnecessary or undefined on certain
+ * platforms.
+ */
+extern void nvram_exit(void *sih);
+
+/*
+ * Get the value of an NVRAM variable. The pointer returned may be
+ * invalid after a set.
+ * @param name name of variable to get
+ * @return value of variable or NULL if undefined
+ */
+extern char * nvram_get(const char *name);
+
+/*
+ * Get the value of an NVRAM variable. The pointer returned may be
+ * invalid after a set.
+ * @param name name of variable to get
+ * @param bit bit value to get
+ * @return value of variable or NULL if undefined
+ */
+extern char * nvram_get_bitflag(const char *name, const int bit);
+
+/*
+ * Read the reset GPIO value from the nvram and set the GPIO
+ * as input
+ */
+extern int nvram_resetgpio_init(void *sih);
+
+/*
+ * Get the value of an NVRAM variable.
+ * @param name name of variable to get
+ * @return value of variable or NUL if undefined
+ */
+static INLINE char *
+nvram_safe_get(const char *name)
+{
+ char *p = nvram_get(name);
+ return p ? p : "";
+}
+
+/*
+ * Match an NVRAM variable.
+ * @param name name of variable to match
+ * @param match value to compare against value of variable
+ * @return TRUE if variable is defined and its value is string equal
+ * to match or FALSE otherwise
+ */
+static INLINE int
+nvram_match(const char *name, const char *match)
+{
+ const char *value = nvram_get(name);
+ return (value && !strcmp(value, match));
+}
+
+/*
+ * Match an NVRAM variable.
+ * @param name name of variable to match
+ * @param bit bit value to get
+ * @param match value to compare against value of variable
+ * @return TRUE if variable is defined and its value is string equal
+ * to match or FALSE otherwise
+ */
+static INLINE int
+nvram_match_bitflag(const char *name, const int bit, const char *match)
+{
+ const char *value = nvram_get_bitflag(name, bit);
+ return (value && !strcmp(value, match));
+}
+
+/*
+ * Inversely match an NVRAM variable.
+ * @param name name of variable to match
+ * @param match value to compare against value of variable
+ * @return TRUE if variable is defined and its value is not string
+ * equal to invmatch or FALSE otherwise
+ */
+static INLINE int
+nvram_invmatch(const char *name, const char *invmatch)
+{
+ const char *value = nvram_get(name);
+ return (value && strcmp(value, invmatch));
+}
+
+/*
+ * Set the value of an NVRAM variable. The name and value strings are
+ * copied into private storage. Pointers to previously set values
+ * may become invalid. The new value may be immediately
+ * retrieved but will not be permanently stored until a commit.
+ * @param name name of variable to set
+ * @param value value of variable
+ * @return 0 on success and errno on failure
+ */
+extern int nvram_set(const char *name, const char *value);
+
+/*
+ * Set the value of an NVRAM variable. The name and value strings are
+ * copied into private storage. Pointers to previously set values
+ * may become invalid. The new value may be immediately
+ * retrieved but will not be permanently stored until a commit.
+ * @param name name of variable to set
+ * @param bit bit value to set
+ * @param value value of variable
+ * @return 0 on success and errno on failure
+ */
+extern int nvram_set_bitflag(const char *name, const int bit, const int value);
+/*
+ * Unset an NVRAM variable. Pointers to previously set values
+ * remain valid until a set.
+ * @param name name of variable to unset
+ * @return 0 on success and errno on failure
+ * NOTE: use nvram_commit to commit this change to flash.
+ */
+extern int nvram_unset(const char *name);
+
+/*
+ * Commit NVRAM variables to permanent storage. All pointers to values
+ * may be invalid after a commit.
+ * NVRAM values are undefined after a commit.
+ * @param nvram_corrupt true to corrupt nvram, false otherwise.
+ * @return 0 on success and errno on failure
+ */
+extern int nvram_commit_internal(bool nvram_corrupt);
+
+/*
+ * Commit NVRAM variables to permanent storage. All pointers to values
+ * may be invalid after a commit.
+ * NVRAM values are undefined after a commit.
+ * @return 0 on success and errno on failure
+ */
+extern int nvram_commit(void);
+
+/*
+ * Get all NVRAM variables (format name=value\0 ... \0\0).
+ * @param buf buffer to store variables
+ * @param count size of buffer in bytes
+ * @return 0 on success and errno on failure
+ */
+extern int nvram_getall(char *nvram_buf, int count);
+
+/*
+ * returns the crc value of the nvram
+ * @param nvh nvram header pointer
+ */
+uint8 nvram_calc_crc(struct nvram_header * nvh);
+
+extern int nvram_space;
+#endif /* _LANGUAGE_ASSEMBLY */
+
+/* The NVRAM version number stored as an NVRAM variable */
+#define NVRAM_SOFTWARE_VERSION "1"
+
+#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */
+#define NVRAM_CLEAR_MAGIC 0x0
+#define NVRAM_INVALID_MAGIC 0xFFFFFFFF
+#define NVRAM_VERSION 1
+#define NVRAM_HEADER_SIZE 20
+/* This definition is for precommit staging, and will be removed */
+#define NVRAM_SPACE 0x8000
+/* For CFE builds this gets passed in thru the makefile */
+#ifndef MAX_NVRAM_SPACE
+#define MAX_NVRAM_SPACE 0x10000
+#endif
+#define DEF_NVRAM_SPACE 0x8000
+#define ROM_ENVRAM_SPACE 0x1000
+#define NVRAM_LZMA_MAGIC 0x4c5a4d41 /* 'LZMA' */
+
+#define NVRAM_MAX_VALUE_LEN 255
+#define NVRAM_MAX_PARAM_LEN 64
+
+#define NVRAM_CRC_START_POSITION 9 /* magic, len, crc8 to be skipped */
+#define NVRAM_CRC_VER_MASK 0xffffff00 /* for crc_ver_init */
+
+/* Offsets to embedded nvram area */
+#define NVRAM_START_COMPRESSED 0x400
+#define NVRAM_START 0x1000
+
+#define BCM_JUMBO_NVRAM_DELIMIT '\n'
+#define BCM_JUMBO_START "Broadcom Jumbo Nvram file"
+
+
+#if (defined(FAILSAFE_UPGRADE) || defined(CONFIG_FAILSAFE_UPGRADE) || \
+ defined(__CONFIG_FAILSAFE_UPGRADE_SUPPORT__))
+#define IMAGE_SIZE "image_size"
+#define BOOTPARTITION "bootpartition"
+#define IMAGE_BOOT BOOTPARTITION
+#define PARTIALBOOTS "partialboots"
+#define MAXPARTIALBOOTS "maxpartialboots"
+#define IMAGE_1ST_FLASH_TRX "flash0.trx"
+#define IMAGE_1ST_FLASH_OS "flash0.os"
+#define IMAGE_2ND_FLASH_TRX "flash0.trx2"
+#define IMAGE_2ND_FLASH_OS "flash0.os2"
+#define IMAGE_FIRST_OFFSET "image_first_offset"
+#define IMAGE_SECOND_OFFSET "image_second_offset"
+#define LINUX_FIRST "linux"
+#define LINUX_SECOND "linux2"
+#endif
+
+#if (defined(DUAL_IMAGE) || defined(CONFIG_DUAL_IMAGE) || \
+ defined(__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__))
+/* Shared by all: CFE, Linux Kernel, and Ap */
+#define IMAGE_BOOT "image_boot"
+#define BOOTPARTITION IMAGE_BOOT
+/* CFE variables */
+#define IMAGE_1ST_FLASH_TRX "flash0.trx"
+#define IMAGE_1ST_FLASH_OS "flash0.os"
+#define IMAGE_2ND_FLASH_TRX "flash0.trx2"
+#define IMAGE_2ND_FLASH_OS "flash0.os2"
+#define IMAGE_SIZE "image_size"
+
+/* CFE and Linux Kernel shared variables */
+#define IMAGE_FIRST_OFFSET "image_first_offset"
+#define IMAGE_SECOND_OFFSET "image_second_offset"
+
+/* Linux application variables */
+#define LINUX_FIRST "linux"
+#define LINUX_SECOND "linux2"
+#define POLICY_TOGGLE "toggle"
+#define LINUX_PART_TO_FLASH "linux_to_flash"
+#define LINUX_FLASH_POLICY "linux_flash_policy"
+
+#endif /* defined(DUAL_IMAGE||CONFIG_DUAL_IMAGE)||__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__ */
+
+#endif /* _bcmnvram_h_ */
* Broadcom PCIE
* Software-specific definitions shared between device and host side
* Explains the shared area between host and dongle
- * $Copyright Open 2005 Broadcom Corporation$
*
- * $Id: bcmpcie.h 497456 2014-08-19 15:06:33Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmpcie.h 604490 2015-12-07 15:48:45Z $
*/
+
#ifndef _bcmpcie_h_
#define _bcmpcie_h_
} sh_addr_t;
-
-#ifdef BCMPCIE_SUPPORT_TX_PUSH_RING
-#define BCMPCIE_PUSH_TX_RING 1
-#else
-#define BCMPCIE_PUSH_TX_RING 0
-#endif /* BCMPCIE_SUPPORT_TX_PUSH_RING */
-
/* May be overridden by 43xxxxx-roml.mk */
#if !defined(BCMPCIE_MAX_TX_FLOWS)
#define BCMPCIE_MAX_TX_FLOWS 40
#endif /* ! BCMPCIE_MAX_TX_FLOWS */
+/**
+ * Feature flags enabled in dongle. Advertised by dongle to DHD via the PCIe Shared structure that
+ * is located in device memory.
+ */
#define PCIE_SHARED_VERSION 0x00005
#define PCIE_SHARED_VERSION_MASK 0x000FF
#define PCIE_SHARED_ASSERT_BUILT 0x00100
#define PCIE_SHARED_EVT_SEQNUM 0x08000
#define PCIE_SHARED_DMA_INDEX 0x10000
-/* D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM */
-#define PCIE_SHARED_D2H_SYNC_SEQNUM 0x20000
-#define PCIE_SHARED_D2H_SYNC_XORCSUM 0x40000
+/**
+ * There are host types where a device interrupt can 'race ahead' of data written by the device into
+ * host memory. The dongle can avoid this condition using a variety of techniques (read barrier,
+ * using PCIe Message Signalled Interrupts, or by using the PCIE_DMA_INDEX feature). Unfortunately
+ * these techniques have drawbacks on router platforms. For these platforms, it was decided to not
+ * avoid the condition, but to detect the condition instead and act on it.
+ * D2H M2M DMA Complete Sync mechanism: Modulo-253-SeqNum or XORCSUM
+ */
+#define PCIE_SHARED_D2H_SYNC_SEQNUM 0x20000
+#define PCIE_SHARED_D2H_SYNC_XORCSUM 0x40000
#define PCIE_SHARED_D2H_SYNC_MODE_MASK \
(PCIE_SHARED_D2H_SYNC_SEQNUM | PCIE_SHARED_D2H_SYNC_XORCSUM)
+#define PCIE_SHARED_IDLE_FLOW_RING 0x80000
+#define PCIE_SHARED_2BYTE_INDICES 0x100000
+
+
+#define PCIE_SHARED_D2H_MAGIC 0xFEDCBA09
+#define PCIE_SHARED_H2D_MAGIC 0x12345678
+/**
+ * Message rings convey messages between host and device. They are unidirectional, and are located
+ * in host memory.
+ *
+ * This is the minimal set of message rings, known as 'common message rings':
+ */
#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT 0
#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT 1
#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE 2
#define BCMPCIE_D2H_MSGRING_RX_COMPLETE 4
#define BCMPCIE_COMMON_MSGRING_MAX_ID 4
-/* Added only for single tx ring */
-#define BCMPCIE_H2D_TXFLOWRINGID 5
-
#define BCMPCIE_H2D_COMMON_MSGRINGS 2
#define BCMPCIE_D2H_COMMON_MSGRINGS 3
#define BCMPCIE_COMMON_MSGRINGS 5
+#define BCMPCIE_H2D_MSGRINGS(max_tx_flows) \
+ (BCMPCIE_H2D_COMMON_MSGRINGS + (max_tx_flows))
+
+/**
+ * H2D and D2H, WR and RD index, are maintained in the following arrays:
+ * - Array of all H2D WR Indices
+ * - Array of all H2D RD Indices
+ * - Array of all D2H WR Indices
+ * - Array of all D2H RD Indices
+ *
+ * The offset of the WR or RD indexes (for common rings) in these arrays are
+ * listed below. Arrays ARE NOT indexed by a ring's id.
+ *
+ * D2H common rings WR and RD index start from 0, even though their ringids
+ * start from BCMPCIE_H2D_COMMON_MSGRINGS
+ */
+
+#define BCMPCIE_H2D_RING_IDX(h2d_ring_id) (h2d_ring_id)
+
enum h2dring_idx {
- BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX = 0,
- BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX = 1,
- BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START = 2
+ /* H2D common rings */
+ BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX =
+ BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT),
+ BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX =
+ BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT),
+
+ /* First TxPost's WR or RD index starts after all H2D common rings */
+ BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START =
+ BCMPCIE_H2D_RING_IDX(BCMPCIE_H2D_COMMON_MSGRINGS)
};
+#define BCMPCIE_D2H_RING_IDX(d2h_ring_id) \
+ ((d2h_ring_id) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
enum d2hring_idx {
- BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX = 0,
- BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX = 1,
- BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX = 2
+ /* D2H Common Rings */
+ BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX =
+ BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE),
+ BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX =
+ BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_TX_COMPLETE),
+ BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX =
+ BCMPCIE_D2H_RING_IDX(BCMPCIE_D2H_MSGRING_RX_COMPLETE)
};
+/**
+ * Macros for managing arrays of RD WR indices:
+ * rw_index_sz:
+ * - in dongle, rw_index_sz is known at compile time
+ * - in host/DHD, rw_index_sz is derived from advertized pci_shared flags
+ *
+ * ring_idx: See h2dring_idx and d2hring_idx
+ */
+
+/** Offset of a RD or WR index in H2D or D2H indices array */
+#define BCMPCIE_RW_INDEX_OFFSET(rw_index_sz, ring_idx) \
+ ((rw_index_sz) * (ring_idx))
+
+/** Fetch the address of RD or WR index in H2D or D2H indices array */
+#define BCMPCIE_RW_INDEX_ADDR(indices_array_base, rw_index_sz, ring_idx) \
+ (void *)((uint32)(indices_array_base) + \
+ BCMPCIE_RW_INDEX_OFFSET((rw_index_sz), (ring_idx)))
+
+/** H2D DMA Indices array size: given max flow rings */
+#define BCMPCIE_H2D_RW_INDEX_ARRAY_SZ(rw_index_sz, max_tx_flows) \
+ ((rw_index_sz) * BCMPCIE_H2D_MSGRINGS(max_tx_flows))
+
+/** D2H DMA Indices array size */
+#define BCMPCIE_D2H_RW_INDEX_ARRAY_SZ(rw_index_sz) \
+ ((rw_index_sz) * BCMPCIE_D2H_COMMON_MSGRINGS)
+
+/**
+ * This type is used by a 'message buffer' (which is a FIFO for messages). Message buffers are used
+ * for host<->device communication and are instantiated on both sides. ring_mem_t is instantiated
+ * both in host as well as device memory.
+ */
typedef struct ring_mem {
- uint16 idx;
+ uint16 idx; /* ring id */
uint8 type;
uint8 rsvd;
- uint16 max_item;
- uint16 len_items;
- sh_addr_t base_addr;
+ uint16 max_item; /* Max number of items in flow ring */
+ uint16 len_items; /* Items are fixed size. Length in bytes of one item */
+ sh_addr_t base_addr; /* 64 bits address, either in host or device memory */
} ring_mem_t;
-#define RINGSTATE_INITED 1
-
-typedef struct ring_state {
- uint8 idx;
- uint8 state;
- uint16 r_offset;
- uint16 w_offset;
- uint16 e_offset;
-} ring_state_t;
-
-
+/**
+ * Per flow ring, information is maintained in device memory, e.g. at what address the ringmem and
+ * ringstate are located. The flow ring itself can be instantiated in either host or device memory.
+ *
+ * Perhaps this type should be renamed to make clear that it resides in device memory only.
+ */
typedef struct ring_info {
- /* locations in the TCM where the ringmem is and ringstate are defined */
- uint32 ringmem_ptr; /* ring mem location in TCM */
- uint32 h2d_w_idx_ptr;
-
- uint32 h2d_r_idx_ptr;
- uint32 d2h_w_idx_ptr;
-
- uint32 d2h_r_idx_ptr;
- /* host locations where the DMA of read/write indices are */
- sh_addr_t h2d_w_idx_hostaddr;
- sh_addr_t h2d_r_idx_hostaddr;
- sh_addr_t d2h_w_idx_hostaddr;
- sh_addr_t d2h_r_idx_hostaddr;
- uint16 max_sub_queues;
+ uint32 ringmem_ptr; /* ring mem location in dongle memory */
+
+ /* Following arrays are indexed using h2dring_idx and d2hring_idx, and not
+ * by a ringid.
+ */
+
+ /* 32bit ptr to arrays of WR or RD indices for all rings in dongle memory */
+ uint32 h2d_w_idx_ptr; /* Array of all H2D ring's WR indices */
+ uint32 h2d_r_idx_ptr; /* Array of all H2D ring's RD indices */
+ uint32 d2h_w_idx_ptr; /* Array of all D2H ring's WR indices */
+ uint32 d2h_r_idx_ptr; /* Array of all D2H ring's RD indices */
+
+ /* PCIE_DMA_INDEX feature: Dongle uses mem2mem DMA to sync arrays in host.
+ * Host may directly fetch WR and RD indices from these host-side arrays.
+ *
+ * 64bit ptr to arrays of WR or RD indices for all rings in host memory.
+ */
+ sh_addr_t h2d_w_idx_hostaddr; /* Array of all H2D ring's WR indices */
+ sh_addr_t h2d_r_idx_hostaddr; /* Array of all H2D ring's RD indices */
+ sh_addr_t d2h_w_idx_hostaddr; /* Array of all D2H ring's WR indices */
+ sh_addr_t d2h_r_idx_hostaddr; /* Array of all D2H ring's RD indices */
+
+ uint16 max_sub_queues; /* maximum number of H2D rings: common + flow */
uint16 rsvd;
} ring_info_t;
+/**
+ * A structure located in TCM that is shared between host and device, primarily used during
+ * initialization.
+ */
typedef struct {
- /* shared area version captured at flags 7:0 */
+ /** shared area version captured at flags 7:0 */
uint32 flags;
uint32 trap_addr;
uint32 assert_exp_addr;
uint32 assert_file_addr;
uint32 assert_line;
- uint32 console_addr; /* Address of hnd_cons_t */
+ uint32 console_addr; /**< Address of hnd_cons_t */
uint32 msgtrace_addr;
uint32 dma_rxoffset; /* rsvd in spec */
- /* these will be used for sleep request/ack, d3 req/ack */
+ /** these will be used for sleep request/ack, d3 req/ack */
uint32 h2d_mb_data_ptr;
uint32 d2h_mb_data_ptr;
/* information pertinent to host IPC/msgbuf channels */
- /* location in the TCM memory which has the ring_info */
+ /** location in the TCM memory which has the ring_info */
uint32 rings_info_ptr;
- /* block of host memory for the scratch buffer */
+ /** block of host memory for the scratch buffer */
uint32 host_dma_scratch_buffer_len;
sh_addr_t host_dma_scratch_buffer;
- /* block of host memory for the dongle to push the status into */
+ /** block of host memory for the dongle to push the status into */
uint32 device_rings_stsblk_len;
sh_addr_t device_rings_stsblk;
-#ifdef BCM_BUZZZ
+
uint32 buzzz; /* BUZZZ state format strings and trace buffer */
-#endif
+
} pciedev_shared_t;
+extern pciedev_shared_t pciedev_shared;
+
+/**
+ * Mailboxes notify a remote party that an event took place, using interrupts. They use hardware
+ * support.
+ */
/* H2D mail box Data */
#define H2D_HOST_D3_INFORM 0x00000001
#define H2D_HOST_DS_ACK 0x00000002
-#define H2D_HOST_CONS_INT 0x80000000 /* h2d int for console cmds */
+#define H2D_HOST_DS_NAK 0x00000004
+#define H2D_HOST_CONS_INT 0x80000000 /**< h2d int for console cmds */
+#define H2D_FW_TRAP 0x20000000 /**< dump HW reg info for Livelock issue */
+#define H2D_HOST_D0_INFORM_IN_USE 0x00000008
+#define H2D_HOST_D0_INFORM 0x00000010
/* D2H mail box Data */
#define D2H_DEV_D3_ACK 0x00000001
#define D2H_DEV_DS_ENTER_REQ 0x00000002
#define D2H_DEV_DS_EXIT_NOTE 0x00000004
#define D2H_DEV_FWHALT 0x10000000
+#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \
+ D2H_DEV_DS_EXIT_NOTE | D2H_DEV_FWHALT)
+#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK))
-
-extern pciedev_shared_t pciedev_shared;
+/** These macro's operate on type 'inuse_lclbuf_pool_t' and are used by firmware only */
#define NEXTTXP(i, d) ((((i)+1) >= (d)) ? 0 : ((i)+1))
#define NTXPACTIVE(r, w, d) (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w)))
#define NTXPAVAIL(r, w, d) (((d) - NTXPACTIVE((r), (w), (d))) > 1)
#define READ_AVAIL_SPACE(w, r, d) \
((w >= r) ? (w - r) : (d - r))
+#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w))
+#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1)
+#define CHECK_WRITE_SPACE(r, w, d) \
+ MIN(WRITE_SPACE_AVAIL(r, w, d), WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d))
+
+
#define WRT_PEND(x) ((x)->wr_pending)
#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr))
#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a) (DNGL_RING_WPTR(msgbuf) = (a))
#define DNGL_RING_RPTR(msgbuf) (*((msgbuf)->tcm_rs_r_ptr))
#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a) (DNGL_RING_RPTR(msgbuf) = (a))
-#define RING_READ_PTR(x) ((x)->ringstate->r_offset)
-#define RING_WRITE_PTR(x) ((x)->ringstate->w_offset)
-#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr)
-#define RING_MAX_ITEM(x) ((x)->ringmem->max_item)
-#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items)
-#define HOST_RING_BASE(x) ((x)->ring_base.va)
-#define HOST_RING_END(x) ((uint8 *)HOST_RING_BASE((x)) + \
- ((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x))))
-
-#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w))
-#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1)
-#define CHECK_WRITE_SPACE(r, w, d) \
- MIN(WRITE_SPACE_AVAIL(r, w, d), WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d))
+#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr)
+#define RING_MAX_ITEM(x) ((x)->ringmem->max_item)
+#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items)
#endif /* _bcmpcie_h_ */
/*
* Broadcom PCI-SPI Host Controller Register Definitions
*
- * $ Copyright Open Broadcom Corporation $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmpcispi.h 241182 2011-02-17 21:50:03Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmpcispi.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _BCM_PCI_SPI_H
#define _BCM_PCI_SPI_H
/*
* Performance counters software interface.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmperf.h 241182 2011-02-17 21:50:03Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmperf.h 514727 2014-11-12 03:02:48Z $
*/
/* essai */
#ifndef _BCMPERF_H_
* Definitions for API from sdio common code (bcmsdh) to individual
* host controller drivers.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdbus.h 408158 2013-06-17 22:15:35Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdbus.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _sdio_api_h_
extern SDIOH_API_RC sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio);
extern SDIOH_API_RC sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab);
+extern uint sdioh_set_mode(sdioh_info_t *sd, uint mode);
+#if defined(SWTXGLOM)
+/* read or write any buffer using cmd53 */
+extern SDIOH_API_RC sdioh_request_swtxglom_buffer(sdioh_info_t *si, uint pio_dma, uint fix_inc,
+ uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
+ void *pkt);
+extern void sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len);
+extern void sdioh_glom_clear(sdioh_info_t *sd);
+#endif
+
#endif /* _sdio_api_h_ */
* export functions to client drivers
* abstract OS and BUS specific details of SDIO
*
- * $ Copyright Open License Broadcom Corporation $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdh.h 450676 2014-01-22 22:45:13Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdh.h 514727 2014-11-12 03:02:48Z $
*/
/**
typedef struct bcmsdh_info bcmsdh_info_t;
typedef void (*bcmsdh_cb_fn_t)(void *);
-#if 0 && (NDISVER >= 0x0630) && 1
-extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl,
- void **regsva, uint irq, shared_info_t *sh);
-#else
extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva);
/**
* BCMSDH API context
uint32 sbwad; /* Save backplane window address */
void *os_cxt; /* Pointer to per-OS private data */
};
-#endif
/* Detach - freeup resources allocated in attach */
extern int bcmsdh_detach(osl_t *osh, void *sdh);
extern int bcmsdh_recv_buf(void *sdh, uint32 addr, uint fn, uint flags,
uint8 *buf, uint nbytes, void *pkt,
bcmsdh_cmplt_fn_t complete_fn, void *handle);
+#if defined(SWTXGLOM)
+extern int bcmsdh_send_swtxglom_buf(void *sdh, uint32 addr, uint fn, uint flags,
+ uint8 *buf, uint nbytes, void *pkt,
+ bcmsdh_cmplt_fn_t complete_fn, void *handle);
+#endif
extern void bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len);
extern void bcmsdh_glom_clear(void *sdh);
void* oob_irq_handler_context);
extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh);
extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable);
-#endif
+#endif
extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh);
extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh);
extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh);
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2014, Broadcom Corporation
+ * Copyright (C) 1999-2016, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdh_sdmmc.h 496576 2014-08-13 15:04:56Z $
+ *
+ * <<Broadcom-WL-IPTag/Proprietary,Open:>>
+ *
+ * $Id: bcmsdh_sdmmc.h 591160 2015-10-07 06:01:58Z $
*/
#ifndef __BCMSDH_SDMMC_H__
#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0)
#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0)
-
#define sd_sync_dma(sd, read, nbytes)
#define sd_init_dma(sd)
#define sd_ack_intr(sd)
/* private bus modes */
#define SDIOH_MODE_SD4 2
#define CLIENT_INTR 0x100 /* Get rid of this! */
-#define SDIOH_SDMMC_MAX_SG_ENTRIES (SDPCM_MAXGLOM_SIZE+2)
+#define SDIOH_SDMMC_MAX_SG_ENTRIES 32
+
+#if defined(SWTXGLOM)
+typedef struct glom_buf {
+ void *glom_pkt_head;
+ void *glom_pkt_tail;
+ uint32 count; /* Total number of pkts queued */
+} glom_buf_t;
+#endif /* SWTXGLOM */
struct sdioh_info {
osl_t *osh; /* osh handler */
struct sdio_func fake_func0;
struct sdio_func *func[SDIOD_MAX_IOFUNCS];
+ uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */
+#if defined(SWTXGLOM)
+ glom_buf_t glom_info; /* pkt information used for glomming */
+#endif
};
/************************************************************
* Broadcom SDIO/PCMCIA
* Software-specific definitions shared between device and host side
*
- * $Copyright Open 2005 Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdpcm.h 472405 2014-04-23 23:46:55Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdpcm.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _bcmsdpcm_h_
/*
* SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
*
- * $ Copyright Open Broadcom Corporation $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdspi.h 294363 2011-11-06 23:02:20Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdspi.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _BCM_SD_SPI_H
#define _BCM_SD_SPI_H
/*
* 'Standard' SDIO HOST CONTROLLER driver
*
- * $ Copyright Open Broadcom Corporation $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdstd.h 455390 2014-02-13 22:14:56Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsdstd.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _BCM_SD_STD_H
#define _BCM_SD_STD_H
/*
* Broadcom SPI Low-Level Hardware Driver API
*
- * $ Copyright Open Broadcom Corporation $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmspi.h 241182 2011-02-17 21:50:03Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmspi.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _BCM_SPI_H
#define _BCM_SPI_H
--- /dev/null
+/*
+ * SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmspibrcm.h 514727 2014-11-12 03:02:48Z $
+ */
+#ifndef _BCM_SPI_BRCM_H
+#define _BCM_SPI_BRCM_H
+
+#ifndef SPI_MAX_IOFUNCS
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS 4
+#endif
+/* global msglevel for debug messages - bitvals come from sdiovar.h */
+
+#if defined(DHD_DEBUG)
+#define sd_err(x) do { if (sd_msglevel & SDH_ERROR_VAL) printf x; } while (0)
+#define sd_trace(x) do { if (sd_msglevel & SDH_TRACE_VAL) printf x; } while (0)
+#define sd_info(x) do { if (sd_msglevel & SDH_INFO_VAL) printf x; } while (0)
+#define sd_debug(x) do { if (sd_msglevel & SDH_DEBUG_VAL) printf x; } while (0)
+#define sd_data(x) do { if (sd_msglevel & SDH_DATA_VAL) printf x; } while (0)
+#define sd_ctrl(x) do { if (sd_msglevel & SDH_CTRL_VAL) printf x; } while (0)
+#else
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#endif
+
+#define sd_log(x)
+
+#define SDIOH_ASSERT(exp) \
+ do { if (!(exp)) \
+ printf("!!!ASSERT fail: file %s lines %d", __FILE__, __LINE__); \
+ } while (0)
+
+#define BLOCK_SIZE_F1 64
+#define BLOCK_SIZE_F2 2048
+#define BLOCK_SIZE_F3 2048
+
+/* internal return code */
+#define SUCCESS 0
+#undef ERROR
+#define ERROR 1
+#define ERROR_UF 2
+#define ERROR_OF 3
+
+/* private bus modes */
+#define SDIOH_MODE_SPI 0
+
+#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
+#define USE_MULTIBLOCK 0x4
+
+struct sdioh_info {
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ void *bar0; /* BAR0 for PCI Device */
+ osl_t *osh; /* osh handler */
+ void *controller; /* Pointer to SPI Controller's private data struct */
+ uint lockcount; /* nest count of spi_lock() calls */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ bool initialized; /* card initialized */
+ uint32 target_dev; /* Target device ID */
+ uint32 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+ uint32 controller_type; /* Host controller type */
+ uint8 version; /* Host Controller Spec Compliance Version */
+ uint irq; /* Client irq */
+ uint32 intrcount; /* Client interrupts */
+ uint32 local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_use_dma; /* DMA on CMD53 */
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ /* polling hack in wl_linux.c:wl_timer() */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SPI_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 card_dstatus; /* 32bit device status */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SPI_MAX_IOFUNCS];
+ void *dma_buf;
+ ulong dma_phys;
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
+ uint32 wordlen; /* host processor 16/32bits */
+ uint32 prev_fun;
+ uint32 chip;
+ uint32 chiprev;
+ bool resp_delay_all;
+ bool dwordmode;
+ bool resp_delay_new;
+
+ struct spierrstats_t spierrstats;
+};
+
+/************************************************************
+ * Internal interfaces: per-port references into bcmspibrcm.c
+ */
+
+/* Global message bits */
+extern uint sd_msglevel;
+
+/**************************************************************
+ * Internal interfaces: bcmspibrcm.c references to per-port code
+ */
+
+/* Interrupt (de)registration routines */
+extern int spi_register_irq(sdioh_info_t *sd, uint irq);
+extern void spi_free_irq(uint irq, sdioh_info_t *sd);
+
+/* OS-specific interrupt wrappers (atomic interrupt enable/disable) */
+extern void spi_lock(sdioh_info_t *sd);
+extern void spi_unlock(sdioh_info_t *sd);
+
+/* Allocate/init/free per-OS private data */
+extern int spi_osinit(sdioh_info_t *sd);
+extern void spi_osfree(sdioh_info_t *sd);
+
+#define SPI_RW_FLAG_M BITFIELD_MASK(1) /* Bit [31] - R/W Command Bit */
+#define SPI_RW_FLAG_S 31
+#define SPI_ACCESS_M BITFIELD_MASK(1) /* Bit [30] - Fixed/Incr Access */
+#define SPI_ACCESS_S 30
+#define SPI_FUNCTION_M BITFIELD_MASK(2) /* Bit [29:28] - Function Number */
+#define SPI_FUNCTION_S 28
+#define SPI_REG_ADDR_M BITFIELD_MASK(17) /* Bit [27:11] - Address */
+#define SPI_REG_ADDR_S 11
+#define SPI_LEN_M BITFIELD_MASK(11) /* Bit [10:0] - Packet length */
+#define SPI_LEN_S 0
+
+#endif /* _BCM_SPI_BRCM_H */
--- /dev/null
+/*
+ * SROM format definition.
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsrom_fmt.h 553280 2015-04-29 07:55:29Z $
+ */
+
+#ifndef _bcmsrom_fmt_h_
+#define _bcmsrom_fmt_h_
+
+#define SROM_MAXREV 13 /* max revision supported by driver */
+
+/* Maximum srom: 12 Kilobits == 1536 bytes */
+
+#define SROM_MAX 1536
+#define SROM_MAXW 594
+
+#ifdef LARGE_NVRAM_MAXSZ
+#define VARS_MAX LARGE_NVRAM_MAXSZ
+#else
+#define VARS_MAX 4096
+#endif /* LARGE_NVRAM_MAXSZ */
+
+/* PCI fields */
+#define PCI_F0DEVID 48
+
+
+#define SROM_WORDS 64
+
+#define SROM3_SWRGN_OFF 28 /* s/w region offset in words */
+
+#define SROM_SSID 2
+#define SROM_SVID 3
+
+#define SROM_WL1LHMAXP 29
+
+#define SROM_WL1LPAB0 30
+#define SROM_WL1LPAB1 31
+#define SROM_WL1LPAB2 32
+
+#define SROM_WL1HPAB0 33
+#define SROM_WL1HPAB1 34
+#define SROM_WL1HPAB2 35
+
+#define SROM_MACHI_IL0 36
+#define SROM_MACMID_IL0 37
+#define SROM_MACLO_IL0 38
+#define SROM_MACHI_ET0 39
+#define SROM_MACMID_ET0 40
+#define SROM_MACLO_ET0 41
+#define SROM_MACHI_ET1 42
+#define SROM_MACMID_ET1 43
+#define SROM_MACLO_ET1 44
+#define SROM3_MACHI 37
+#define SROM3_MACMID 38
+#define SROM3_MACLO 39
+
+#define SROM_BXARSSI2G 40
+#define SROM_BXARSSI5G 41
+
+#define SROM_TRI52G 42
+#define SROM_TRI5GHL 43
+
+#define SROM_RXPO52G 45
+
+#define SROM2_ENETPHY 45
+
+#define SROM_AABREV 46
+/* Fields in AABREV */
+#define SROM_BR_MASK 0x00ff
+#define SROM_CC_MASK 0x0f00
+#define SROM_CC_SHIFT 8
+#define SROM_AA0_MASK 0x3000
+#define SROM_AA0_SHIFT 12
+#define SROM_AA1_MASK 0xc000
+#define SROM_AA1_SHIFT 14
+
+#define SROM_WL0PAB0 47
+#define SROM_WL0PAB1 48
+#define SROM_WL0PAB2 49
+
+#define SROM_LEDBH10 50
+#define SROM_LEDBH32 51
+
+#define SROM_WL10MAXP 52
+
+#define SROM_WL1PAB0 53
+#define SROM_WL1PAB1 54
+#define SROM_WL1PAB2 55
+
+#define SROM_ITT 56
+
+#define SROM_BFL 57
+#define SROM_BFL2 28
+#define SROM3_BFL2 61
+
+#define SROM_AG10 58
+
+#define SROM_CCODE 59
+
+#define SROM_OPO 60
+
+#define SROM3_LEDDC 62
+
+#define SROM_CRCREV 63
+
+/* SROM Rev 4: Reallocate the software part of the srom to accomodate
+ * MIMO features. It assumes up to two PCIE functions and 440 bytes
+ * of useable srom i.e. the useable storage in chips with OTP that
+ * implements hardware redundancy.
+ */
+
+#define SROM4_WORDS 220
+
+#define SROM4_SIGN 32
+#define SROM4_SIGNATURE 0x5372
+
+#define SROM4_BREV 33
+
+#define SROM4_BFL0 34
+#define SROM4_BFL1 35
+#define SROM4_BFL2 36
+#define SROM4_BFL3 37
+#define SROM5_BFL0 37
+#define SROM5_BFL1 38
+#define SROM5_BFL2 39
+#define SROM5_BFL3 40
+
+#define SROM4_MACHI 38
+#define SROM4_MACMID 39
+#define SROM4_MACLO 40
+#define SROM5_MACHI 41
+#define SROM5_MACMID 42
+#define SROM5_MACLO 43
+
+#define SROM4_CCODE 41
+#define SROM4_REGREV 42
+#define SROM5_CCODE 34
+#define SROM5_REGREV 35
+
+#define SROM4_LEDBH10 43
+#define SROM4_LEDBH32 44
+#define SROM5_LEDBH10 59
+#define SROM5_LEDBH32 60
+
+#define SROM4_LEDDC 45
+#define SROM5_LEDDC 45
+
+#define SROM4_AA 46
+#define SROM4_AA2G_MASK 0x00ff
+#define SROM4_AA2G_SHIFT 0
+#define SROM4_AA5G_MASK 0xff00
+#define SROM4_AA5G_SHIFT 8
+
+#define SROM4_AG10 47
+#define SROM4_AG32 48
+
+#define SROM4_TXPID2G 49
+#define SROM4_TXPID5G 51
+#define SROM4_TXPID5GL 53
+#define SROM4_TXPID5GH 55
+
+#define SROM4_TXRXC 61
+#define SROM4_TXCHAIN_MASK 0x000f
+#define SROM4_TXCHAIN_SHIFT 0
+#define SROM4_RXCHAIN_MASK 0x00f0
+#define SROM4_RXCHAIN_SHIFT 4
+#define SROM4_SWITCH_MASK 0xff00
+#define SROM4_SWITCH_SHIFT 8
+
+
+/* Per-path fields */
+#define MAX_PATH_SROM 4
+#define SROM4_PATH0 64
+#define SROM4_PATH1 87
+#define SROM4_PATH2 110
+#define SROM4_PATH3 133
+
+#define SROM4_2G_ITT_MAXP 0
+#define SROM4_2G_PA 1
+#define SROM4_5G_ITT_MAXP 5
+#define SROM4_5GLH_MAXP 6
+#define SROM4_5G_PA 7
+#define SROM4_5GL_PA 11
+#define SROM4_5GH_PA 15
+
+/* Fields in the ITT_MAXP and 5GLH_MAXP words */
+#define B2G_MAXP_MASK 0xff
+#define B2G_ITT_SHIFT 8
+#define B5G_MAXP_MASK 0xff
+#define B5G_ITT_SHIFT 8
+#define B5GH_MAXP_MASK 0xff
+#define B5GL_MAXP_SHIFT 8
+
+/* All the miriad power offsets */
+#define SROM4_2G_CCKPO 156
+#define SROM4_2G_OFDMPO 157
+#define SROM4_5G_OFDMPO 159
+#define SROM4_5GL_OFDMPO 161
+#define SROM4_5GH_OFDMPO 163
+#define SROM4_2G_MCSPO 165
+#define SROM4_5G_MCSPO 173
+#define SROM4_5GL_MCSPO 181
+#define SROM4_5GH_MCSPO 189
+#define SROM4_CDDPO 197
+#define SROM4_STBCPO 198
+#define SROM4_BW40PO 199
+#define SROM4_BWDUPPO 200
+
+#define SROM4_CRCREV 219
+
+
+/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
+ * This is acombined srom for both MIMO and SISO boards, usable in
+ * the .130 4Kilobit OTP with hardware redundancy.
+ */
+
+#define SROM8_SIGN 64
+
+#define SROM8_BREV 65
+
+#define SROM8_BFL0 66
+#define SROM8_BFL1 67
+#define SROM8_BFL2 68
+#define SROM8_BFL3 69
+
+#define SROM8_MACHI 70
+#define SROM8_MACMID 71
+#define SROM8_MACLO 72
+
+#define SROM8_CCODE 73
+#define SROM8_REGREV 74
+
+#define SROM8_LEDBH10 75
+#define SROM8_LEDBH32 76
+
+#define SROM8_LEDDC 77
+
+#define SROM8_AA 78
+
+#define SROM8_AG10 79
+#define SROM8_AG32 80
+
+#define SROM8_TXRXC 81
+
+#define SROM8_BXARSSI2G 82
+#define SROM8_BXARSSI5G 83
+#define SROM8_TRI52G 84
+#define SROM8_TRI5GHL 85
+#define SROM8_RXPO52G 86
+
+#define SROM8_FEM2G 87
+#define SROM8_FEM5G 88
+#define SROM8_FEM_ANTSWLUT_MASK 0xf800
+#define SROM8_FEM_ANTSWLUT_SHIFT 11
+#define SROM8_FEM_TR_ISO_MASK 0x0700
+#define SROM8_FEM_TR_ISO_SHIFT 8
+#define SROM8_FEM_PDET_RANGE_MASK 0x00f8
+#define SROM8_FEM_PDET_RANGE_SHIFT 3
+#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006
+#define SROM8_FEM_EXTPA_GAIN_SHIFT 1
+#define SROM8_FEM_TSSIPOS_MASK 0x0001
+#define SROM8_FEM_TSSIPOS_SHIFT 0
+
+#define SROM8_THERMAL 89
+
+/* Temp sense related entries */
+#define SROM8_MPWR_RAWTS 90
+#define SROM8_TS_SLP_OPT_CORRX 91
+/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
+#define SROM8_FOC_HWIQ_IQSWP 92
+
+#define SROM8_EXTLNAGAIN 93
+
+/* Temperature delta for PHY calibration */
+#define SROM8_PHYCAL_TEMPDELTA 94
+
+/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */
+#define SROM8_MPWR_1_AND_2 95
+
+
+/* Per-path offsets & fields */
+#define SROM8_PATH0 96
+#define SROM8_PATH1 112
+#define SROM8_PATH2 128
+#define SROM8_PATH3 144
+
+#define SROM8_2G_ITT_MAXP 0
+#define SROM8_2G_PA 1
+#define SROM8_5G_ITT_MAXP 4
+#define SROM8_5GLH_MAXP 5
+#define SROM8_5G_PA 6
+#define SROM8_5GL_PA 9
+#define SROM8_5GH_PA 12
+
+/* All the miriad power offsets */
+#define SROM8_2G_CCKPO 160
+
+#define SROM8_2G_OFDMPO 161
+#define SROM8_5G_OFDMPO 163
+#define SROM8_5GL_OFDMPO 165
+#define SROM8_5GH_OFDMPO 167
+
+#define SROM8_2G_MCSPO 169
+#define SROM8_5G_MCSPO 177
+#define SROM8_5GL_MCSPO 185
+#define SROM8_5GH_MCSPO 193
+
+#define SROM8_CDDPO 201
+#define SROM8_STBCPO 202
+#define SROM8_BW40PO 203
+#define SROM8_BWDUPPO 204
+
+/* SISO PA parameters are in the path0 spaces */
+#define SROM8_SISO 96
+
+/* Legacy names for SISO PA paramters */
+#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP)
+#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA)
+#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1)
+#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2)
+#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP)
+#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP)
+#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA)
+#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1)
+#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2)
+#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA)
+#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1)
+#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2)
+#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA)
+#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1)
+#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2)
+
+#define SROM8_CRCREV 219
+
+/* SROM REV 9 */
+#define SROM9_2GPO_CCKBW20 160
+#define SROM9_2GPO_CCKBW20UL 161
+#define SROM9_2GPO_LOFDMBW20 162
+#define SROM9_2GPO_LOFDMBW20UL 164
+
+#define SROM9_5GLPO_LOFDMBW20 166
+#define SROM9_5GLPO_LOFDMBW20UL 168
+#define SROM9_5GMPO_LOFDMBW20 170
+#define SROM9_5GMPO_LOFDMBW20UL 172
+#define SROM9_5GHPO_LOFDMBW20 174
+#define SROM9_5GHPO_LOFDMBW20UL 176
+
+#define SROM9_2GPO_MCSBW20 178
+#define SROM9_2GPO_MCSBW20UL 180
+#define SROM9_2GPO_MCSBW40 182
+
+#define SROM9_5GLPO_MCSBW20 184
+#define SROM9_5GLPO_MCSBW20UL 186
+#define SROM9_5GLPO_MCSBW40 188
+#define SROM9_5GMPO_MCSBW20 190
+#define SROM9_5GMPO_MCSBW20UL 192
+#define SROM9_5GMPO_MCSBW40 194
+#define SROM9_5GHPO_MCSBW20 196
+#define SROM9_5GHPO_MCSBW20UL 198
+#define SROM9_5GHPO_MCSBW40 200
+
+#define SROM9_PO_MCS32 202
+#define SROM9_PO_LOFDM40DUP 203
+#define SROM9_EU_EDCRSTH 204
+#define SROM10_EU_EDCRSTH 204
+#define SROM8_RXGAINERR_2G 205
+#define SROM8_RXGAINERR_5GL 206
+#define SROM8_RXGAINERR_5GM 207
+#define SROM8_RXGAINERR_5GH 208
+#define SROM8_RXGAINERR_5GU 209
+#define SROM8_SUBBAND_PPR 210
+#define SROM8_PCIEINGRESS_WAR 211
+#define SROM8_EU_EDCRSTH 212
+#define SROM9_SAR 212
+
+#define SROM8_NOISELVL_2G 213
+#define SROM8_NOISELVL_5GL 214
+#define SROM8_NOISELVL_5GM 215
+#define SROM8_NOISELVL_5GH 216
+#define SROM8_NOISELVL_5GU 217
+#define SROM8_NOISECALOFFSET 218
+
+#define SROM9_REV_CRC 219
+
+#define SROM10_CCKPWROFFSET 218
+#define SROM10_SIGN 219
+#define SROM10_SWCTRLMAP_2G 220
+#define SROM10_CRCREV 229
+
+#define SROM10_WORDS 230
+#define SROM10_SIGNATURE SROM4_SIGNATURE
+
+
+/* SROM REV 11 */
+#define SROM11_BREV 65
+
+#define SROM11_BFL0 66
+#define SROM11_BFL1 67
+#define SROM11_BFL2 68
+#define SROM11_BFL3 69
+#define SROM11_BFL4 70
+#define SROM11_BFL5 71
+
+#define SROM11_MACHI 72
+#define SROM11_MACMID 73
+#define SROM11_MACLO 74
+
+#define SROM11_CCODE 75
+#define SROM11_REGREV 76
+
+#define SROM11_LEDBH10 77
+#define SROM11_LEDBH32 78
+
+#define SROM11_LEDDC 79
+
+#define SROM11_AA 80
+
+#define SROM11_AGBG10 81
+#define SROM11_AGBG2A0 82
+#define SROM11_AGA21 83
+
+#define SROM11_TXRXC 84
+
+#define SROM11_FEM_CFG1 85
+#define SROM11_FEM_CFG2 86
+
+/* Masks and offsets for FEM_CFG */
+#define SROM11_FEMCTRL_MASK 0xf800
+#define SROM11_FEMCTRL_SHIFT 11
+#define SROM11_PAPDCAP_MASK 0x0400
+#define SROM11_PAPDCAP_SHIFT 10
+#define SROM11_TWORANGETSSI_MASK 0x0200
+#define SROM11_TWORANGETSSI_SHIFT 9
+#define SROM11_PDGAIN_MASK 0x01f0
+#define SROM11_PDGAIN_SHIFT 4
+#define SROM11_EPAGAIN_MASK 0x000e
+#define SROM11_EPAGAIN_SHIFT 1
+#define SROM11_TSSIPOSSLOPE_MASK 0x0001
+#define SROM11_TSSIPOSSLOPE_SHIFT 0
+#define SROM11_GAINCTRLSPH_MASK 0xf800
+#define SROM11_GAINCTRLSPH_SHIFT 11
+
+#define SROM11_THERMAL 87
+#define SROM11_MPWR_RAWTS 88
+#define SROM11_TS_SLP_OPT_CORRX 89
+#define SROM11_XTAL_FREQ 90
+#define SROM11_5GB0_4080_W0_A1 91
+#define SROM11_PHYCAL_TEMPDELTA 92
+#define SROM11_MPWR_1_AND_2 93
+#define SROM11_5GB0_4080_W1_A1 94
+#define SROM11_TSSIFLOOR_2G 95
+#define SROM11_TSSIFLOOR_5GL 96
+#define SROM11_TSSIFLOOR_5GM 97
+#define SROM11_TSSIFLOOR_5GH 98
+#define SROM11_TSSIFLOOR_5GU 99
+
+/* Masks and offsets for Thermal parameters */
+#define SROM11_TEMPS_PERIOD_MASK 0xf0
+#define SROM11_TEMPS_PERIOD_SHIFT 4
+#define SROM11_TEMPS_HYSTERESIS_MASK 0x0f
+#define SROM11_TEMPS_HYSTERESIS_SHIFT 0
+#define SROM11_TEMPCORRX_MASK 0xfc
+#define SROM11_TEMPCORRX_SHIFT 2
+#define SROM11_TEMPSENSE_OPTION_MASK 0x3
+#define SROM11_TEMPSENSE_OPTION_SHIFT 0
+
+#define SROM11_PDOFF_2G_40M_A0_MASK 0x000f
+#define SROM11_PDOFF_2G_40M_A0_SHIFT 0
+#define SROM11_PDOFF_2G_40M_A1_MASK 0x00f0
+#define SROM11_PDOFF_2G_40M_A1_SHIFT 4
+#define SROM11_PDOFF_2G_40M_A2_MASK 0x0f00
+#define SROM11_PDOFF_2G_40M_A2_SHIFT 8
+#define SROM11_PDOFF_2G_40M_VALID_MASK 0x8000
+#define SROM11_PDOFF_2G_40M_VALID_SHIFT 15
+
+#define SROM11_PDOFF_2G_40M 100
+#define SROM11_PDOFF_40M_A0 101
+#define SROM11_PDOFF_40M_A1 102
+#define SROM11_PDOFF_40M_A2 103
+#define SROM11_5GB0_4080_W2_A1 103
+#define SROM11_PDOFF_80M_A0 104
+#define SROM11_PDOFF_80M_A1 105
+#define SROM11_PDOFF_80M_A2 106
+#define SROM11_5GB1_4080_W0_A1 106
+
+#define SROM11_SUBBAND5GVER 107
+
+/* Per-path fields and offset */
+#define MAX_PATH_SROM_11 3
+#define SROM11_PATH0 108
+#define SROM11_PATH1 128
+#define SROM11_PATH2 148
+
+#define SROM11_2G_MAXP 0
+#define SROM11_5GB1_4080_PA 0
+#define SROM11_2G_PA 1
+#define SROM11_5GB2_4080_PA 2
+#define SROM11_RXGAINS1 4
+#define SROM11_RXGAINS 5
+#define SROM11_5GB3_4080_PA 5
+#define SROM11_5GB1B0_MAXP 6
+#define SROM11_5GB3B2_MAXP 7
+#define SROM11_5GB0_PA 8
+#define SROM11_5GB1_PA 11
+#define SROM11_5GB2_PA 14
+#define SROM11_5GB3_PA 17
+
+/* Masks and offsets for rxgains */
+#define SROM11_RXGAINS5GTRELNABYPA_MASK 0x8000
+#define SROM11_RXGAINS5GTRELNABYPA_SHIFT 15
+#define SROM11_RXGAINS5GTRISOA_MASK 0x7800
+#define SROM11_RXGAINS5GTRISOA_SHIFT 11
+#define SROM11_RXGAINS5GELNAGAINA_MASK 0x0700
+#define SROM11_RXGAINS5GELNAGAINA_SHIFT 8
+#define SROM11_RXGAINS2GTRELNABYPA_MASK 0x0080
+#define SROM11_RXGAINS2GTRELNABYPA_SHIFT 7
+#define SROM11_RXGAINS2GTRISOA_MASK 0x0078
+#define SROM11_RXGAINS2GTRISOA_SHIFT 3
+#define SROM11_RXGAINS2GELNAGAINA_MASK 0x0007
+#define SROM11_RXGAINS2GELNAGAINA_SHIFT 0
+#define SROM11_RXGAINS5GHTRELNABYPA_MASK 0x8000
+#define SROM11_RXGAINS5GHTRELNABYPA_SHIFT 15
+#define SROM11_RXGAINS5GHTRISOA_MASK 0x7800
+#define SROM11_RXGAINS5GHTRISOA_SHIFT 11
+#define SROM11_RXGAINS5GHELNAGAINA_MASK 0x0700
+#define SROM11_RXGAINS5GHELNAGAINA_SHIFT 8
+#define SROM11_RXGAINS5GMTRELNABYPA_MASK 0x0080
+#define SROM11_RXGAINS5GMTRELNABYPA_SHIFT 7
+#define SROM11_RXGAINS5GMTRISOA_MASK 0x0078
+#define SROM11_RXGAINS5GMTRISOA_SHIFT 3
+#define SROM11_RXGAINS5GMELNAGAINA_MASK 0x0007
+#define SROM11_RXGAINS5GMELNAGAINA_SHIFT 0
+
+/* Power per rate */
+#define SROM11_CCKBW202GPO 168
+#define SROM11_CCKBW20UL2GPO 169
+#define SROM11_MCSBW202GPO 170
+#define SROM11_MCSBW202GPO_1 171
+#define SROM11_MCSBW402GPO 172
+#define SROM11_MCSBW402GPO_1 173
+#define SROM11_DOT11AGOFDMHRBW202GPO 174
+#define SROM11_OFDMLRBW202GPO 175
+
+#define SROM11_MCSBW205GLPO 176
+#define SROM11_MCSBW205GLPO_1 177
+#define SROM11_MCSBW405GLPO 178
+#define SROM11_MCSBW405GLPO_1 179
+#define SROM11_MCSBW805GLPO 180
+#define SROM11_MCSBW805GLPO_1 181
+#define SROM11_RPCAL_2G 182
+#define SROM11_RPCAL_5GL 183
+#define SROM11_MCSBW205GMPO 184
+#define SROM11_MCSBW205GMPO_1 185
+#define SROM11_MCSBW405GMPO 186
+#define SROM11_MCSBW405GMPO_1 187
+#define SROM11_MCSBW805GMPO 188
+#define SROM11_MCSBW805GMPO_1 189
+#define SROM11_RPCAL_5GM 190
+#define SROM11_RPCAL_5GH 191
+#define SROM11_MCSBW205GHPO 192
+#define SROM11_MCSBW205GHPO_1 193
+#define SROM11_MCSBW405GHPO 194
+#define SROM11_MCSBW405GHPO_1 195
+#define SROM11_MCSBW805GHPO 196
+#define SROM11_MCSBW805GHPO_1 197
+#define SROM11_RPCAL_5GU 198
+#define SROM11_PDOFF_2G_CCK 199
+#define SROM11_MCSLR5GLPO 200
+#define SROM11_MCSLR5GMPO 201
+#define SROM11_MCSLR5GHPO 202
+
+#define SROM11_SB20IN40HRPO 203
+#define SROM11_SB20IN80AND160HR5GLPO 204
+#define SROM11_SB40AND80HR5GLPO 205
+#define SROM11_SB20IN80AND160HR5GMPO 206
+#define SROM11_SB40AND80HR5GMPO 207
+#define SROM11_SB20IN80AND160HR5GHPO 208
+#define SROM11_SB40AND80HR5GHPO 209
+#define SROM11_SB20IN40LRPO 210
+#define SROM11_SB20IN80AND160LR5GLPO 211
+#define SROM11_SB40AND80LR5GLPO 212
+#define SROM11_TXIDXCAP2G 212
+#define SROM11_SB20IN80AND160LR5GMPO 213
+#define SROM11_SB40AND80LR5GMPO 214
+#define SROM11_TXIDXCAP5G 214
+#define SROM11_SB20IN80AND160LR5GHPO 215
+#define SROM11_SB40AND80LR5GHPO 216
+
+#define SROM11_DOT11AGDUPHRPO 217
+#define SROM11_DOT11AGDUPLRPO 218
+
+/* MISC */
+#define SROM11_PCIEINGRESS_WAR 220
+#define SROM11_SAR 221
+
+#define SROM11_NOISELVL_2G 222
+#define SROM11_NOISELVL_5GL 223
+#define SROM11_NOISELVL_5GM 224
+#define SROM11_NOISELVL_5GH 225
+#define SROM11_NOISELVL_5GU 226
+
+#define SROM11_RXGAINERR_2G 227
+#define SROM11_RXGAINERR_5GL 228
+#define SROM11_RXGAINERR_5GM 229
+#define SROM11_RXGAINERR_5GH 230
+#define SROM11_RXGAINERR_5GU 231
+
+#define SROM11_EU_EDCRSTH 232
+#define SROM12_EU_EDCRSTH 232
+
+#define SROM11_SIGN 64
+#define SROM11_CRCREV 233
+
+#define SROM11_WORDS 234
+#define SROM11_SIGNATURE 0x0634
+
+
+/* SROM REV 12 */
+#define SROM12_SIGN 64
+#define SROM12_WORDS 512
+#define SROM12_SIGNATURE 0x8888
+#define SROM12_CRCREV 511
+
+#define SROM12_BFL6 486
+#define SROM12_BFL7 487
+
+#define SROM12_MCSBW205GX1PO 234
+#define SROM12_MCSBW205GX1PO_1 235
+#define SROM12_MCSBW405GX1PO 236
+#define SROM12_MCSBW405GX1PO_1 237
+#define SROM12_MCSBW805GX1PO 238
+#define SROM12_MCSBW805GX1PO_1 239
+#define SROM12_MCSLR5GX1PO 240
+#define SROM12_SB40AND80LR5GX1PO 241
+#define SROM12_SB20IN80AND160LR5GX1PO 242
+#define SROM12_SB20IN80AND160HR5GX1PO 243
+#define SROM12_SB40AND80HR5GX1PO 244
+
+#define SROM12_MCSBW205GX2PO 245
+#define SROM12_MCSBW205GX2PO_1 246
+#define SROM12_MCSBW405GX2PO 247
+#define SROM12_MCSBW405GX2PO_1 248
+#define SROM12_MCSBW805GX2PO 249
+#define SROM12_MCSBW805GX2PO_1 250
+#define SROM12_MCSLR5GX2PO 251
+#define SROM12_SB40AND80LR5GX2PO 252
+#define SROM12_SB20IN80AND160LR5GX2PO 253
+#define SROM12_SB20IN80AND160HR5GX2PO 254
+#define SROM12_SB40AND80HR5GX2PO 255
+
+/* MISC */
+#define SROM12_RXGAINS10 483
+#define SROM12_RXGAINS11 484
+#define SROM12_RXGAINS12 485
+
+/* Per-path fields and offset */
+#define MAX_PATH_SROM_12 3
+#define SROM12_PATH0 256
+#define SROM12_PATH1 328
+#define SROM12_PATH2 400
+
+#define SROM12_5GB42G_MAXP 0
+#define SROM12_2GB0_PA 1
+#define SROM12_2GB0_PA_W0 1
+#define SROM12_2GB0_PA_W1 2
+#define SROM12_2GB0_PA_W2 3
+#define SROM12_2GB0_PA_W3 4
+
+#define SROM12_RXGAINS 5
+#define SROM12_5GB1B0_MAXP 6
+#define SROM12_5GB3B2_MAXP 7
+
+#define SROM12_5GB0_PA 8
+#define SROM12_5GB0_PA_W0 8
+#define SROM12_5GB0_PA_W1 9
+#define SROM12_5GB0_PA_W2 10
+#define SROM12_5GB0_PA_W3 11
+
+#define SROM12_5GB1_PA 12
+#define SROM12_5GB1_PA_W0 12
+#define SROM12_5GB1_PA_W1 13
+#define SROM12_5GB1_PA_W2 14
+#define SROM12_5GB1_PA_W3 15
+
+#define SROM12_5GB2_PA 16
+#define SROM12_5GB2_PA_W0 16
+#define SROM12_5GB2_PA_W1 17
+#define SROM12_5GB2_PA_W2 18
+#define SROM12_5GB2_PA_W3 19
+
+#define SROM12_5GB3_PA 20
+#define SROM12_5GB3_PA_W0 20
+#define SROM12_5GB3_PA_W1 21
+#define SROM12_5GB3_PA_W2 22
+#define SROM12_5GB3_PA_W3 23
+
+#define SROM12_5GB4_PA 24
+#define SROM12_5GB4_PA_W0 24
+#define SROM12_5GB4_PA_W1 25
+#define SROM12_5GB4_PA_W2 26
+#define SROM12_5GB4_PA_W3 27
+
+#define SROM12_2G40B0_PA 28
+#define SROM12_2G40B0_PA_W0 28
+#define SROM12_2G40B0_PA_W1 29
+#define SROM12_2G40B0_PA_W2 30
+#define SROM12_2G40B0_PA_W3 31
+
+#define SROM12_5G40B0_PA 32
+#define SROM12_5G40B0_PA_W0 32
+#define SROM12_5G40B0_PA_W1 33
+#define SROM12_5G40B0_PA_W2 34
+#define SROM12_5G40B0_PA_W3 35
+
+#define SROM12_5G40B1_PA 36
+#define SROM12_5G40B1_PA_W0 36
+#define SROM12_5G40B1_PA_W1 37
+#define SROM12_5G40B1_PA_W2 38
+#define SROM12_5G40B1_PA_W3 39
+
+#define SROM12_5G40B2_PA 40
+#define SROM12_5G40B2_PA_W0 40
+#define SROM12_5G40B2_PA_W1 41
+#define SROM12_5G40B2_PA_W2 42
+#define SROM12_5G40B2_PA_W3 43
+
+#define SROM12_5G40B3_PA 44
+#define SROM12_5G40B3_PA_W0 44
+#define SROM12_5G40B3_PA_W1 45
+#define SROM12_5G40B3_PA_W2 46
+#define SROM12_5G40B3_PA_W3 47
+
+#define SROM12_5G40B4_PA 48
+#define SROM12_5G40B4_PA_W0 48
+#define SROM12_5G40B4_PA_W1 49
+#define SROM12_5G40B4_PA_W2 50
+#define SROM12_5G40B4_PA_W3 51
+
+#define SROM12_5G80B0_PA 52
+#define SROM12_5G80B0_PA_W0 52
+#define SROM12_5G80B0_PA_W1 53
+#define SROM12_5G80B0_PA_W2 54
+#define SROM12_5G80B0_PA_W3 55
+
+#define SROM12_5G80B1_PA 56
+#define SROM12_5G80B1_PA_W0 56
+#define SROM12_5G80B1_PA_W1 57
+#define SROM12_5G80B1_PA_W2 58
+#define SROM12_5G80B1_PA_W3 59
+
+#define SROM12_5G80B2_PA 60
+#define SROM12_5G80B2_PA_W0 60
+#define SROM12_5G80B2_PA_W1 61
+#define SROM12_5G80B2_PA_W2 62
+#define SROM12_5G80B2_PA_W3 63
+
+#define SROM12_5G80B3_PA 64
+#define SROM12_5G80B3_PA_W0 64
+#define SROM12_5G80B3_PA_W1 65
+#define SROM12_5G80B3_PA_W2 66
+#define SROM12_5G80B3_PA_W3 67
+
+#define SROM12_5G80B4_PA 68
+#define SROM12_5G80B4_PA_W0 68
+#define SROM12_5G80B4_PA_W1 69
+#define SROM12_5G80B4_PA_W2 70
+#define SROM12_5G80B4_PA_W3 71
+
+/* PD offset */
+#define SROM12_PDOFF_2G_CCK 472
+
+#define SROM12_PDOFF_20in40M_5G_B0 473
+#define SROM12_PDOFF_20in40M_5G_B1 474
+#define SROM12_PDOFF_20in40M_5G_B2 475
+#define SROM12_PDOFF_20in40M_5G_B3 476
+#define SROM12_PDOFF_20in40M_5G_B4 477
+
+#define SROM12_PDOFF_40in80M_5G_B0 478
+#define SROM12_PDOFF_40in80M_5G_B1 479
+#define SROM12_PDOFF_40in80M_5G_B2 480
+#define SROM12_PDOFF_40in80M_5G_B3 481
+#define SROM12_PDOFF_40in80M_5G_B4 482
+
+#define SROM12_PDOFF_20in80M_5G_B0 488
+#define SROM12_PDOFF_20in80M_5G_B1 489
+#define SROM12_PDOFF_20in80M_5G_B2 490
+#define SROM12_PDOFF_20in80M_5G_B3 491
+#define SROM12_PDOFF_20in80M_5G_B4 492
+
+#define SROM13_PDOFFSET20IN40M5GCORE3 98
+#define SROM13_PDOFFSET20IN40M5GCORE3_1 99
+#define SROM13_PDOFFSET20IN80M5GCORE3 510
+#define SROM13_PDOFFSET20IN80M5GCORE3_1 511
+#define SROM13_PDOFFSET40IN80M5GCORE3 105
+#define SROM13_PDOFFSET40IN80M5GCORE3_1 106
+
+#define SROM13_PDOFFSET20IN40M2G 94
+#define SROM13_PDOFFSET20IN40M2GCORE3 95
+
+#define SROM12_GPDN_L 91 /* GPIO pull down bits [15:0] */
+#define SROM12_GPDN_H 233 /* GPIO pull down bits [31:16] */
+
+#define SROM13_SIGN 64
+#define SROM13_WORDS 590
+#define SROM13_SIGNATURE 0x4d55
+#define SROM13_CRCREV 589
+
+
+/* Per-path fields and offset */
+#define MAX_PATH_SROM_13 4
+#define SROM13_PATH0 256
+#define SROM13_PATH1 328
+#define SROM13_PATH2 400
+#define SROM13_PATH3 512
+#define SROM13_RXGAINS 5
+
+#define SROM13_XTALFREQ 90
+
+#define SROM13_PDOFFSET20IN40M2G 94
+#define SROM13_PDOFFSET20IN40M2GCORE3 95
+#define SROM13_SB20IN40HRLRPOX 96
+
+#define SROM13_RXGAINS1CORE3 97
+
+#define SROM13_PDOFFSET20IN40M5GCORE3 98
+#define SROM13_PDOFFSET20IN40M5GCORE3_1 99
+
+#define SROM13_ANTGAIN_BANDBGA 100
+
+#define SROM13_RXGAINS2CORE0 101
+#define SROM13_RXGAINS2CORE1 102
+#define SROM13_RXGAINS2CORE2 103
+#define SROM13_RXGAINS2CORE3 104
+
+#define SROM13_PDOFFSET40IN80M5GCORE3 105
+#define SROM13_PDOFFSET40IN80M5GCORE3_1 106
+
+/* power per rate */
+#define SROM13_MCS1024QAM2GPO 108
+#define SROM13_MCS1024QAM5GLPO 109
+#define SROM13_MCS1024QAM5GLPO_1 110
+#define SROM13_MCS1024QAM5GMPO 111
+#define SROM13_MCS1024QAM5GMPO_1 112
+#define SROM13_MCS1024QAM5GHPO 113
+#define SROM13_MCS1024QAM5GHPO_1 114
+#define SROM13_MCS1024QAM5GX1PO 115
+#define SROM13_MCS1024QAM5GX1PO_1 116
+#define SROM13_MCS1024QAM5GX2PO 117
+#define SROM13_MCS1024QAM5GX2PO_1 118
+
+#define SROM13_MCSBW1605GLPO 119
+#define SROM13_MCSBW1605GLPO_1 120
+#define SROM13_MCSBW1605GMPO 121
+#define SROM13_MCSBW1605GMPO_1 122
+#define SROM13_MCSBW1605GHPO 123
+#define SROM13_MCSBW1605GHPO_1 124
+
+#define SROM13_MCSBW1605GX1PO 125
+#define SROM13_MCSBW1605GX1PO_1 126
+#define SROM13_MCSBW1605GX2PO 127
+#define SROM13_MCSBW1605GX2PO_1 128
+
+#define SROM13_ULBPPROFFS5GB0 129
+#define SROM13_ULBPPROFFS5GB1 130
+#define SROM13_ULBPPROFFS5GB2 131
+#define SROM13_ULBPPROFFS5GB3 132
+#define SROM13_ULBPPROFFS5GB4 133
+#define SROM13_ULBPPROFFS2G 134
+
+#define SROM13_MCS8POEXP 135
+#define SROM13_MCS8POEXP_1 136
+#define SROM13_MCS9POEXP 137
+#define SROM13_MCS9POEXP_1 138
+#define SROM13_MCS10POEXP 139
+#define SROM13_MCS10POEXP_1 140
+#define SROM13_MCS11POEXP 141
+#define SROM13_MCS11POEXP_1 142
+#define SROM13_ULBPDOFFS5GB0A0 143
+#define SROM13_ULBPDOFFS5GB0A1 144
+#define SROM13_ULBPDOFFS5GB0A2 145
+#define SROM13_ULBPDOFFS5GB0A3 146
+#define SROM13_ULBPDOFFS5GB1A0 147
+#define SROM13_ULBPDOFFS5GB1A1 148
+#define SROM13_ULBPDOFFS5GB1A2 149
+#define SROM13_ULBPDOFFS5GB1A3 150
+#define SROM13_ULBPDOFFS5GB2A0 151
+#define SROM13_ULBPDOFFS5GB2A1 152
+#define SROM13_ULBPDOFFS5GB2A2 153
+#define SROM13_ULBPDOFFS5GB2A3 154
+#define SROM13_ULBPDOFFS5GB3A0 155
+#define SROM13_ULBPDOFFS5GB3A1 156
+#define SROM13_ULBPDOFFS5GB3A2 157
+#define SROM13_ULBPDOFFS5GB3A3 158
+#define SROM13_ULBPDOFFS5GB4A0 159
+#define SROM13_ULBPDOFFS5GB4A1 160
+#define SROM13_ULBPDOFFS5GB4A2 161
+#define SROM13_ULBPDOFFS5GB4A3 162
+#define SROM13_ULBPDOFFS2GA0 163
+#define SROM13_ULBPDOFFS2GA1 164
+#define SROM13_ULBPDOFFS2GA2 165
+#define SROM13_ULBPDOFFS2GA3 166
+
+#define SROM13_RPCAL5GB4 199
+
+#define SROM13_EU_EDCRSTH 232
+
+#define SROM13_SWCTRLMAP4_CFG 493
+#define SROM13_SWCTRLMAP4_TX2G_FEM3TO0 494
+#define SROM13_SWCTRLMAP4_RX2G_FEM3TO0 495
+#define SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0 496
+#define SROM13_SWCTRLMAP4_MISC2G_FEM3TO0 497
+#define SROM13_SWCTRLMAP4_TX5G_FEM3TO0 498
+#define SROM13_SWCTRLMAP4_RX5G_FEM3TO0 499
+#define SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0 500
+#define SROM13_SWCTRLMAP4_MISC5G_FEM3TO0 501
+#define SROM13_SWCTRLMAP4_TX2G_FEM7TO4 502
+#define SROM13_SWCTRLMAP4_RX2G_FEM7TO4 503
+#define SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4 504
+#define SROM13_SWCTRLMAP4_MISC2G_FEM7TO4 505
+#define SROM13_SWCTRLMAP4_TX5G_FEM7TO4 506
+#define SROM13_SWCTRLMAP4_RX5G_FEM7TO4 507
+#define SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4 508
+#define SROM13_SWCTRLMAP4_MISC5G_FEM7TO4 509
+
+#define SROM13_PDOFFSET20IN80M5GCORE3 510
+#define SROM13_PDOFFSET20IN80M5GCORE3_1 511
+
+#define SROM13_NOISELVLCORE3 584
+#define SROM13_NOISELVLCORE3_1 585
+#define SROM13_RXGAINERRCORE3 586
+#define SROM13_RXGAINERRCORE3_1 587
+
+
+typedef struct {
+ uint8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */
+ uint8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */
+ uint8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */
+ uint8 triso; /* TR switch isolation */
+ uint8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */
+} srom_fem_t;
+
+#endif /* _bcmsrom_fmt_h_ */
--- /dev/null
+/*
+ * Table that encodes the srom formats for PCI/PCIe NICs.
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmsrom_tbl.h 553564 2015-04-30 06:19:30Z $
+ */
+
+#ifndef _bcmsrom_tbl_h_
+#define _bcmsrom_tbl_h_
+
+#include "sbpcmcia.h"
+#include "wlioctl.h"
+#include <bcmsrom_fmt.h>
+
+typedef struct {
+ const char *name;
+ uint32 revmask;
+ uint32 flags;
+ uint16 off;
+ uint16 mask;
+} sromvar_t;
+
+#define SRFL_MORE 1 /* value continues as described by the next entry */
+#define SRFL_NOFFS 2 /* value bits can't be all one's */
+#define SRFL_PRHEX 4 /* value is in hexdecimal format */
+#define SRFL_PRSIGN 8 /* value is in signed decimal format */
+#define SRFL_CCODE 0x10 /* value is in country code format */
+#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */
+#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */
+#define SRFL_NOVAR 0x80 /* do not generate a nvram param, entry is for mfgc */
+#define SRFL_ARRAY 0x100 /* value is in an array. All elements EXCEPT FOR THE LAST
+ * ONE in the array should have this flag set.
+ */
+
+
+#define SROM_DEVID_PCIE 48
+
+/**
+ * Assumptions:
+ * - Ethernet address spans across 3 consecutive words
+ *
+ * Table rules:
+ * - Add multiple entries next to each other if a value spans across multiple words
+ * (even multiple fields in the same word) with each entry except the last having
+ * it's SRFL_MORE bit set.
+ * - Ethernet address entry does not follow above rule and must not have SRFL_MORE
+ * bit set. Its SRFL_ETHADDR bit implies it takes multiple words.
+ * - The last entry's name field must be NULL to indicate the end of the table. Other
+ * entries must have non-NULL name.
+ */
+static const sromvar_t pci_sromvars[] = {
+/* name revmask flags off mask */
+#if defined(CABLECPE)
+ {"devid", 0xffffff00, SRFL_PRHEX, PCI_F0DEVID, 0xffff},
+#elif defined(BCMPCIEDEV) && defined(BCMPCIEDEV_ENABLED)
+ {"devid", 0xffffff00, SRFL_PRHEX, SROM_DEVID_PCIE, 0xffff},
+#else
+ {"devid", 0xffffff00, SRFL_PRHEX|SRFL_NOVAR, PCI_F0DEVID, 0xffff},
+#endif
+ {"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK},
+ {"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
+ {"boardrev", 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
+ {"boardflags", 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff},
+ {"boardflags", 0x00000004, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff},
+ {"", 0, 0, SROM_BFL2, 0xffff},
+ {"boardflags", 0x00000008, SRFL_PRHEX|SRFL_MORE, SROM_BFL, 0xffff},
+ {"", 0, 0, SROM3_BFL2, 0xffff},
+ {"boardflags", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL0, 0xffff},
+ {"", 0, 0, SROM4_BFL1, 0xffff},
+ {"boardflags", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL0, 0xffff},
+ {"", 0, 0, SROM5_BFL1, 0xffff},
+ {"boardflags", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL0, 0xffff},
+ {"", 0, 0, SROM8_BFL1, 0xffff},
+ {"boardflags2", 0x00000010, SRFL_PRHEX|SRFL_MORE, SROM4_BFL2, 0xffff},
+ {"", 0, 0, SROM4_BFL3, 0xffff},
+ {"boardflags2", 0x000000e0, SRFL_PRHEX|SRFL_MORE, SROM5_BFL2, 0xffff},
+ {"", 0, 0, SROM5_BFL3, 0xffff},
+ {"boardflags2", 0xffffff00, SRFL_PRHEX|SRFL_MORE, SROM8_BFL2, 0xffff},
+ {"", 0, 0, SROM8_BFL3, 0xffff},
+ {"boardtype", 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
+ {"subvid", 0xfffffffc, SRFL_PRHEX, SROM_SVID, 0xffff},
+ {"boardnum", 0x00000006, 0, SROM_MACLO_IL0, 0xffff},
+ {"boardnum", 0x00000008, 0, SROM3_MACLO, 0xffff},
+ {"boardnum", 0x00000010, 0, SROM4_MACLO, 0xffff},
+ {"boardnum", 0x000000e0, 0, SROM5_MACLO, 0xffff},
+ {"boardnum", 0x00000700, 0, SROM8_MACLO, 0xffff},
+ {"cc", 0x00000002, 0, SROM_AABREV, SROM_CC_MASK},
+ {"regrev", 0x00000008, 0, SROM_OPO, 0xff00},
+ {"regrev", 0x00000010, 0, SROM4_REGREV, 0x00ff},
+ {"regrev", 0x000000e0, 0, SROM5_REGREV, 0x00ff},
+ {"regrev", 0x00000700, 0, SROM8_REGREV, 0x00ff},
+ {"ledbh0", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff},
+ {"ledbh1", 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00},
+ {"ledbh2", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff},
+ {"ledbh3", 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00},
+ {"ledbh0", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff},
+ {"ledbh1", 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00},
+ {"ledbh2", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff},
+ {"ledbh3", 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00},
+ {"ledbh0", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff},
+ {"ledbh1", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00},
+ {"ledbh2", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff},
+ {"ledbh3", 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00},
+ {"ledbh0", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
+ {"ledbh1", 0x00000700, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
+ {"ledbh2", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
+ {"ledbh3", 0x00000700, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
+ {"pa0b0", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff},
+ {"pa0b1", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff},
+ {"pa0b2", 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff},
+ {"pa0itssit", 0x0000000e, 0, SROM_ITT, 0x00ff},
+ {"pa0maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0x00ff},
+ {"pa0b0", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
+ {"pa0b1", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
+ {"pa0b2", 0x00000700, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
+ {"pa0itssit", 0x00000700, 0, SROM8_W0_ITTMAXP, 0xff00},
+ {"pa0maxpwr", 0x00000700, 0, SROM8_W0_ITTMAXP, 0x00ff},
+ {"opo", 0x0000000c, 0, SROM_OPO, 0x00ff},
+ {"opo", 0x00000700, 0, SROM8_2G_OFDMPO, 0x00ff},
+ {"aa2g", 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK},
+ {"aa2g", 0x000000f0, 0, SROM4_AA, 0x00ff},
+ {"aa2g", 0x00000700, 0, SROM8_AA, 0x00ff},
+ {"aa5g", 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK},
+ {"aa5g", 0x000000f0, 0, SROM4_AA, 0xff00},
+ {"aa5g", 0x00000700, 0, SROM8_AA, 0xff00},
+ {"ag0", 0x0000000e, 0, SROM_AG10, 0x00ff},
+ {"ag1", 0x0000000e, 0, SROM_AG10, 0xff00},
+ {"ag0", 0x000000f0, 0, SROM4_AG10, 0x00ff},
+ {"ag1", 0x000000f0, 0, SROM4_AG10, 0xff00},
+ {"ag2", 0x000000f0, 0, SROM4_AG32, 0x00ff},
+ {"ag3", 0x000000f0, 0, SROM4_AG32, 0xff00},
+ {"ag0", 0x00000700, 0, SROM8_AG10, 0x00ff},
+ {"ag1", 0x00000700, 0, SROM8_AG10, 0xff00},
+ {"ag2", 0x00000700, 0, SROM8_AG32, 0x00ff},
+ {"ag3", 0x00000700, 0, SROM8_AG32, 0xff00},
+ {"pa1b0", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff},
+ {"pa1b1", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff},
+ {"pa1b2", 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff},
+ {"pa1lob0", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff},
+ {"pa1lob1", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff},
+ {"pa1lob2", 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff},
+ {"pa1hib0", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff},
+ {"pa1hib1", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff},
+ {"pa1hib2", 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff},
+ {"pa1itssit", 0x0000000e, 0, SROM_ITT, 0xff00},
+ {"pa1maxpwr", 0x0000000e, 0, SROM_WL10MAXP, 0xff00},
+ {"pa1lomaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00},
+ {"pa1himaxpwr", 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff},
+ {"pa1b0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
+ {"pa1b1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
+ {"pa1b2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
+ {"pa1lob0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff},
+ {"pa1lob1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff},
+ {"pa1lob2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff},
+ {"pa1hib0", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff},
+ {"pa1hib1", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff},
+ {"pa1hib2", 0x00000700, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff},
+ {"pa1itssit", 0x00000700, 0, SROM8_W1_ITTMAXP, 0xff00},
+ {"pa1maxpwr", 0x00000700, 0, SROM8_W1_ITTMAXP, 0x00ff},
+ {"pa1lomaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0xff00},
+ {"pa1himaxpwr", 0x00000700, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
+ {"bxa2g", 0x00000008, 0, SROM_BXARSSI2G, 0x1800},
+ {"rssisav2g", 0x00000008, 0, SROM_BXARSSI2G, 0x0700},
+ {"rssismc2g", 0x00000008, 0, SROM_BXARSSI2G, 0x00f0},
+ {"rssismf2g", 0x00000008, 0, SROM_BXARSSI2G, 0x000f},
+ {"bxa2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x1800},
+ {"rssisav2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x0700},
+ {"rssismc2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x00f0},
+ {"rssismf2g", 0x00000700, 0, SROM8_BXARSSI2G, 0x000f},
+ {"bxa5g", 0x00000008, 0, SROM_BXARSSI5G, 0x1800},
+ {"rssisav5g", 0x00000008, 0, SROM_BXARSSI5G, 0x0700},
+ {"rssismc5g", 0x00000008, 0, SROM_BXARSSI5G, 0x00f0},
+ {"rssismf5g", 0x00000008, 0, SROM_BXARSSI5G, 0x000f},
+ {"bxa5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x1800},
+ {"rssisav5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x0700},
+ {"rssismc5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x00f0},
+ {"rssismf5g", 0x00000700, 0, SROM8_BXARSSI5G, 0x000f},
+ {"tri2g", 0x00000008, 0, SROM_TRI52G, 0x00ff},
+ {"tri5g", 0x00000008, 0, SROM_TRI52G, 0xff00},
+ {"tri5gl", 0x00000008, 0, SROM_TRI5GHL, 0x00ff},
+ {"tri5gh", 0x00000008, 0, SROM_TRI5GHL, 0xff00},
+ {"tri2g", 0x00000700, 0, SROM8_TRI52G, 0x00ff},
+ {"tri5g", 0x00000700, 0, SROM8_TRI52G, 0xff00},
+ {"tri5gl", 0x00000700, 0, SROM8_TRI5GHL, 0x00ff},
+ {"tri5gh", 0x00000700, 0, SROM8_TRI5GHL, 0xff00},
+ {"rxpo2g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff},
+ {"rxpo5g", 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00},
+ {"rxpo2g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
+ {"rxpo5g", 0x00000700, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
+ {"txchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0x000000f0, SRFL_NOFFS, SROM4_TXRXC, SROM4_SWITCH_MASK},
+ {"txchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0x00000700, SRFL_NOFFS, SROM8_TXRXC, SROM4_SWITCH_MASK},
+ {"tssipos2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TSSIPOS_MASK},
+ {"extpagain2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_EXTPA_GAIN_MASK},
+ {"pdetrange2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_PDET_RANGE_MASK},
+ {"triso2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK},
+ {"antswctl2g", 0x00000700, 0, SROM8_FEM2G, SROM8_FEM_ANTSWLUT_MASK},
+ {"tssipos5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TSSIPOS_MASK},
+ {"extpagain5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_EXTPA_GAIN_MASK},
+ {"pdetrange5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_PDET_RANGE_MASK},
+ {"triso5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK},
+ {"antswctl5g", 0x00000700, 0, SROM8_FEM5G, SROM8_FEM_ANTSWLUT_MASK},
+ {"txpid2ga0", 0x000000f0, 0, SROM4_TXPID2G, 0x00ff},
+ {"txpid2ga1", 0x000000f0, 0, SROM4_TXPID2G, 0xff00},
+ {"txpid2ga2", 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff},
+ {"txpid2ga3", 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00},
+ {"txpid5ga0", 0x000000f0, 0, SROM4_TXPID5G, 0x00ff},
+ {"txpid5ga1", 0x000000f0, 0, SROM4_TXPID5G, 0xff00},
+ {"txpid5ga2", 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff},
+ {"txpid5ga3", 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00},
+ {"txpid5gla0", 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff},
+ {"txpid5gla1", 0x000000f0, 0, SROM4_TXPID5GL, 0xff00},
+ {"txpid5gla2", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff},
+ {"txpid5gla3", 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00},
+ {"txpid5gha0", 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff},
+ {"txpid5gha1", 0x000000f0, 0, SROM4_TXPID5GH, 0xff00},
+ {"txpid5gha2", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff},
+ {"txpid5gha3", 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00},
+
+ {"ccode", 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff},
+ {"ccode", 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff},
+ {"ccode", 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff},
+ {"ccode", 0x00000700, SRFL_CCODE, SROM8_CCODE, 0xffff},
+ {"macaddr", 0x00000700, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
+ {"macaddr", 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff},
+ {"macaddr", 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff},
+ {"macaddr", 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff},
+ {"il0macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0, 0xffff},
+ {"et1macaddr", 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1, 0xffff},
+ {"leddc", 0x00000700, SRFL_NOFFS|SRFL_LEDDC, SROM8_LEDDC, 0xffff},
+ {"leddc", 0x000000e0, SRFL_NOFFS|SRFL_LEDDC, SROM5_LEDDC, 0xffff},
+ {"leddc", 0x00000010, SRFL_NOFFS|SRFL_LEDDC, SROM4_LEDDC, 0xffff},
+ {"leddc", 0x00000008, SRFL_NOFFS|SRFL_LEDDC, SROM3_LEDDC, 0xffff},
+
+ {"tempthresh", 0x00000700, 0, SROM8_THERMAL, 0xff00},
+ {"tempoffset", 0x00000700, 0, SROM8_THERMAL, 0x00ff},
+ {"rawtempsense", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0x01ff},
+ {"measpower", 0x00000700, SRFL_PRHEX, SROM8_MPWR_RAWTS, 0xfe00},
+ {"tempsense_slope", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x00ff},
+ {"tempcorrx", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0xfc00},
+ {"tempsense_option", 0x00000700, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX, 0x0300},
+ {"freqoffset_corr", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x000f},
+ {"iqcal_swp_dis", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0010},
+ {"hw_iqcal_en", 0x00000700, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP, 0x0020},
+ {"elna2g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0x00ff},
+ {"elna5g", 0x00000700, 0, SROM8_EXTLNAGAIN, 0xff00},
+ {"phycal_tempdelta", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x00ff},
+ {"temps_period", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0x0f00},
+ {"temps_hysteresis", 0x00000700, 0, SROM8_PHYCAL_TEMPDELTA, 0xf000},
+ {"measpower1", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x007f},
+ {"measpower2", 0x00000700, SRFL_PRHEX, SROM8_MPWR_1_AND_2, 0x3f80},
+
+ {"cck2gpo", 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff},
+ {"cck2gpo", 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
+ {"ofdm2gpo", 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_2G_OFDMPO + 1, 0xffff},
+ {"ofdm5gpo", 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5G_OFDMPO + 1, 0xffff},
+ {"ofdm5glpo", 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff},
+ {"ofdm5ghpo", 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff},
+ {"", 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff},
+ {"ofdm2gpo", 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
+ {"ofdm5gpo", 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5G_OFDMPO + 1, 0xffff},
+ {"ofdm5glpo", 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
+ {"ofdm5ghpo", 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
+ {"", 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
+ {"mcs2gpo0", 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff},
+ {"mcs2gpo1", 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff},
+ {"mcs2gpo2", 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff},
+ {"mcs2gpo3", 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff},
+ {"mcs2gpo4", 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff},
+ {"mcs2gpo5", 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff},
+ {"mcs2gpo6", 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff},
+ {"mcs2gpo7", 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff},
+ {"mcs5gpo0", 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff},
+ {"mcs5gpo1", 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff},
+ {"mcs5gpo2", 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff},
+ {"mcs5gpo3", 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff},
+ {"mcs5gpo4", 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff},
+ {"mcs5gpo5", 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff},
+ {"mcs5gpo6", 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff},
+ {"mcs5gpo7", 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff},
+ {"mcs5glpo0", 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff},
+ {"mcs5glpo1", 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff},
+ {"mcs5glpo2", 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff},
+ {"mcs5glpo3", 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff},
+ {"mcs5glpo4", 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff},
+ {"mcs5glpo5", 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff},
+ {"mcs5glpo6", 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff},
+ {"mcs5glpo7", 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff},
+ {"mcs5ghpo0", 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff},
+ {"mcs5ghpo1", 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff},
+ {"mcs5ghpo2", 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff},
+ {"mcs5ghpo3", 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff},
+ {"mcs5ghpo4", 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff},
+ {"mcs5ghpo5", 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff},
+ {"mcs5ghpo6", 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff},
+ {"mcs5ghpo7", 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff},
+ {"mcs2gpo0", 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
+ {"mcs2gpo1", 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
+ {"mcs2gpo2", 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
+ {"mcs2gpo3", 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff},
+ {"mcs2gpo4", 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff},
+ {"mcs2gpo5", 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff},
+ {"mcs2gpo6", 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff},
+ {"mcs2gpo7", 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff},
+ {"mcs5gpo0", 0x00000100, 0, SROM8_5G_MCSPO, 0xffff},
+ {"mcs5gpo1", 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff},
+ {"mcs5gpo2", 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff},
+ {"mcs5gpo3", 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff},
+ {"mcs5gpo4", 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff},
+ {"mcs5gpo5", 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff},
+ {"mcs5gpo6", 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff},
+ {"mcs5gpo7", 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff},
+ {"mcs5glpo0", 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff},
+ {"mcs5glpo1", 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff},
+ {"mcs5glpo2", 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff},
+ {"mcs5glpo3", 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff},
+ {"mcs5glpo4", 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff},
+ {"mcs5glpo5", 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff},
+ {"mcs5glpo6", 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff},
+ {"mcs5glpo7", 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff},
+ {"mcs5ghpo0", 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff},
+ {"mcs5ghpo1", 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff},
+ {"mcs5ghpo2", 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff},
+ {"mcs5ghpo3", 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff},
+ {"mcs5ghpo4", 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff},
+ {"mcs5ghpo5", 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
+ {"mcs5ghpo6", 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
+ {"mcs5ghpo7", 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
+ {"cddpo", 0x000000f0, 0, SROM4_CDDPO, 0xffff},
+ {"stbcpo", 0x000000f0, 0, SROM4_STBCPO, 0xffff},
+ {"bw40po", 0x000000f0, 0, SROM4_BW40PO, 0xffff},
+ {"bwduppo", 0x000000f0, 0, SROM4_BWDUPPO, 0xffff},
+ {"cddpo", 0x00000100, 0, SROM8_CDDPO, 0xffff},
+ {"stbcpo", 0x00000100, 0, SROM8_STBCPO, 0xffff},
+ {"bw40po", 0x00000100, 0, SROM8_BW40PO, 0xffff},
+ {"bwduppo", 0x00000100, 0, SROM8_BWDUPPO, 0xffff},
+
+ /* power per rate from sromrev 9 */
+ {"cckbw202gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20, 0xffff},
+ {"cckbw20ul2gpo", 0x00000600, 0, SROM9_2GPO_CCKBW20UL, 0xffff},
+ {"legofdmbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff},
+ {"legofdmbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff},
+ {"legofdmbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_LOFDMBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff},
+ {"mcsbw202gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul2gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw402gpo", 0x00000600, SRFL_MORE, SROM9_2GPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405glpo", 0x00000600, SRFL_MORE, SROM9_5GLPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405gmpo", 0x00000600, SRFL_MORE, SROM9_5GMPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff},
+ {"mcsbw205ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff},
+ {"mcsbw20ul5ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW20UL, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff},
+ {"mcsbw405ghpo", 0x00000600, SRFL_MORE, SROM9_5GHPO_MCSBW40, 0xffff},
+ {"", 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff},
+ {"mcs32po", 0x00000600, 0, SROM9_PO_MCS32, 0xffff},
+ {"legofdm40duppo", 0x00000600, 0, SROM9_PO_LOFDM40DUP, 0xffff},
+ {"pcieingress_war", 0x00000700, 0, SROM8_PCIEINGRESS_WAR, 0xf},
+ {"eu_edthresh2g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00000100, 0, SROM8_EU_EDCRSTH, 0xff00},
+ {"eu_edthresh2g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00000200, 0, SROM9_EU_EDCRSTH, 0xff00},
+ {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f},
+ {"rxgainerr2ga0", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x003f},
+ {"rxgainerr2ga1", 0x00000700, 0, SROM8_RXGAINERR_2G, 0x07c0},
+ {"rxgainerr2ga2", 0x00000700, 0, SROM8_RXGAINERR_2G, 0xf800},
+ {"rxgainerr5gla0", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x003f},
+ {"rxgainerr5gla1", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0x07c0},
+ {"rxgainerr5gla2", 0x00000700, 0, SROM8_RXGAINERR_5GL, 0xf800},
+ {"rxgainerr5gma0", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x003f},
+ {"rxgainerr5gma1", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0x07c0},
+ {"rxgainerr5gma2", 0x00000700, 0, SROM8_RXGAINERR_5GM, 0xf800},
+ {"rxgainerr5gha0", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x003f},
+ {"rxgainerr5gha1", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0x07c0},
+ {"rxgainerr5gha2", 0x00000700, 0, SROM8_RXGAINERR_5GH, 0xf800},
+ {"rxgainerr5gua0", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x003f},
+ {"rxgainerr5gua1", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0x07c0},
+ {"rxgainerr5gua2", 0x00000700, 0, SROM8_RXGAINERR_5GU, 0xf800},
+ {"sar2g", 0x00000600, 0, SROM9_SAR, 0x00ff},
+ {"sar5g", 0x00000600, 0, SROM9_SAR, 0xff00},
+ {"noiselvl2ga0", 0x00000700, 0, SROM8_NOISELVL_2G, 0x001f},
+ {"noiselvl2ga1", 0x00000700, 0, SROM8_NOISELVL_2G, 0x03e0},
+ {"noiselvl2ga2", 0x00000700, 0, SROM8_NOISELVL_2G, 0x7c00},
+ {"noiselvl5gla0", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x001f},
+ {"noiselvl5gla1", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x03e0},
+ {"noiselvl5gla2", 0x00000700, 0, SROM8_NOISELVL_5GL, 0x7c00},
+ {"noiselvl5gma0", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x001f},
+ {"noiselvl5gma1", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x03e0},
+ {"noiselvl5gma2", 0x00000700, 0, SROM8_NOISELVL_5GM, 0x7c00},
+ {"noiselvl5gha0", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x001f},
+ {"noiselvl5gha1", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x03e0},
+ {"noiselvl5gha2", 0x00000700, 0, SROM8_NOISELVL_5GH, 0x7c00},
+ {"noiselvl5gua0", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x001f},
+ {"noiselvl5gua1", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x03e0},
+ {"noiselvl5gua2", 0x00000700, 0, SROM8_NOISELVL_5GU, 0x7c00},
+ {"noisecaloffset", 0x00000300, 0, SROM8_NOISECALOFFSET, 0x00ff},
+ {"noisecaloffset5g", 0x00000300, 0, SROM8_NOISECALOFFSET, 0xff00},
+ {"subband5gver", 0x00000700, 0, SROM8_SUBBAND_PPR, 0x7},
+
+ {"cckPwrOffset", 0x00000400, 0, SROM10_CCKPWROFFSET, 0xffff},
+ {"eu_edthresh2g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00000400, 0, SROM10_EU_EDCRSTH, 0xff00},
+ /* swctrlmap_2g array, note that the last element doesn't have SRFL_ARRAY flag set */
+ {"swctrlmap_2g", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 1, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 2, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 3, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 4, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 5, 0xffff},
+ {"", 0x00000400, SRFL_MORE|SRFL_PRHEX|SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 6, 0xffff},
+ {"", 0x00000400, SRFL_ARRAY, SROM10_SWCTRLMAP_2G + 7, 0xffff},
+ {"", 0x00000400, SRFL_PRHEX, SROM10_SWCTRLMAP_2G + 8, 0xffff},
+
+ /* sromrev 11 */
+ {"boardflags3", 0xfffff800, SRFL_PRHEX|SRFL_MORE, SROM11_BFL4, 0xffff},
+ {"", 0, 0, SROM11_BFL5, 0xffff},
+ {"boardnum", 0xfffff800, 0, SROM11_MACLO, 0xffff},
+ {"macaddr", 0xfffff800, SRFL_ETHADDR, SROM11_MACHI, 0xffff},
+ {"ccode", 0xfffff800, SRFL_CCODE, SROM11_CCODE, 0xffff},
+ {"regrev", 0xfffff800, 0, SROM11_REGREV, 0x00ff},
+ {"ledbh0", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0x00ff},
+ {"ledbh1", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH10, 0xff00},
+ {"ledbh2", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0x00ff},
+ {"ledbh3", 0xfffff800, SRFL_NOFFS, SROM11_LEDBH32, 0xff00},
+ {"leddc", 0xfffff800, SRFL_NOFFS|SRFL_LEDDC, SROM11_LEDDC, 0xffff},
+ {"aa2g", 0xfffff800, 0, SROM11_AA, 0x00ff},
+ {"aa5g", 0xfffff800, 0, SROM11_AA, 0xff00},
+ {"agbg0", 0xfffff800, 0, SROM11_AGBG10, 0xff00},
+ {"agbg1", 0xfffff800, 0, SROM11_AGBG10, 0x00ff},
+ {"agbg2", 0xfffff800, 0, SROM11_AGBG2A0, 0xff00},
+ {"aga0", 0xfffff800, 0, SROM11_AGBG2A0, 0x00ff},
+ {"aga1", 0xfffff800, 0, SROM11_AGA21, 0xff00},
+ {"aga2", 0xfffff800, 0, SROM11_AGA21, 0x00ff},
+ {"txchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_TXCHAIN_MASK},
+ {"rxchain", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_RXCHAIN_MASK},
+ {"antswitch", 0xfffff800, SRFL_NOFFS, SROM11_TXRXC, SROM4_SWITCH_MASK},
+
+ {"tssiposslope2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0001},
+ {"epagain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x000e},
+ {"pdgain2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x01f0},
+ {"tworangetssi2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0200},
+ {"papdcap2g", 0xfffff800, 0, SROM11_FEM_CFG1, 0x0400},
+ {"femctrl", 0xfffff800, 0, SROM11_FEM_CFG1, 0xf800},
+
+ {"tssiposslope5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0001},
+ {"epagain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x000e},
+ {"pdgain5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x01f0},
+ {"tworangetssi5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0200},
+ {"papdcap5g", 0xfffff800, 0, SROM11_FEM_CFG2, 0x0400},
+ {"gainctrlsph", 0xfffff800, 0, SROM11_FEM_CFG2, 0xf800},
+
+ {"tempthresh", 0xfffff800, 0, SROM11_THERMAL, 0xff00},
+ {"tempoffset", 0xfffff800, 0, SROM11_THERMAL, 0x00ff},
+ {"rawtempsense", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0x01ff},
+ {"measpower", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_RAWTS, 0xfe00},
+ {"tempsense_slope", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x00ff},
+ {"tempcorrx", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0xfc00},
+ {"tempsense_option", 0xfffff800, SRFL_PRHEX, SROM11_TS_SLP_OPT_CORRX, 0x0300},
+ {"xtalfreq", 0xfffff800, 0, SROM11_XTAL_FREQ, 0xffff},
+ /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #1 */
+ {"pa5gbw4080a1", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W0_A1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W1_A1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_4080_W2_A1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_4080_W0_A1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_4080_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_4080_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_4080_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_4080_PA + 2, 0xffff},
+ {"phycal_tempdelta", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x00ff},
+ {"temps_period", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0x0f00},
+ {"temps_hysteresis", 0xfffff800, 0, SROM11_PHYCAL_TEMPDELTA, 0xf000},
+ {"measpower1", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x007f},
+ {"measpower2", 0xfffff800, SRFL_PRHEX, SROM11_MPWR_1_AND_2, 0x3f80},
+ {"tssifloor2g", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_2G, 0x03ff},
+ {"tssifloor5g", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GL, 0x03ff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GM, 0x03ff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_TSSIFLOOR_5GH, 0x03ff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_TSSIFLOOR_5GU, 0x03ff},
+ {"pdoffset2g40ma0", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x000f},
+ {"pdoffset2g40ma1", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x00f0},
+ {"pdoffset2g40ma2", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x0f00},
+ {"pdoffset2g40mvalid", 0xfffff800, 0, SROM11_PDOFF_2G_40M, 0x8000},
+ {"pdoffset40ma0", 0xfffff800, 0, SROM11_PDOFF_40M_A0, 0xffff},
+ {"pdoffset40ma1", 0xfffff800, 0, SROM11_PDOFF_40M_A1, 0xffff},
+ {"pdoffset40ma2", 0xfffff800, 0, SROM11_PDOFF_40M_A2, 0xffff},
+ {"pdoffset80ma0", 0xfffff800, 0, SROM11_PDOFF_80M_A0, 0xffff},
+ {"pdoffset80ma1", 0xfffff800, 0, SROM11_PDOFF_80M_A1, 0xffff},
+ {"pdoffset80ma2", 0xfffff800, 0, SROM11_PDOFF_80M_A2, 0xffff},
+
+ {"subband5gver", 0xfffff800, SRFL_PRHEX, SROM11_SUBBAND5GVER, 0xffff},
+ {"paparambwver", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0xf000},
+ {"rx5ggainwar", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0x2000},
+ /* Special PA Params for 4350 5G Band, 40/80 MHz BW Ant #0 */
+ {"pa5gbw4080a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 +SROM11_5GB0_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff},
+ /* Special PA Params for 4335 5G Band, 40 MHz BW */
+ {"pa5gbw40a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB0_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB1_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB2_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_5GB3_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_5GB3_PA + 2, 0xffff},
+ /* Special PA Params for 4335 5G Band, 80 MHz BW */
+ {"pa5gbw80a0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB0_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB1_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB2_PA + 2, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH2 + SROM11_5GB3_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH2 + SROM11_5GB3_PA + 2, 0xffff},
+ /* Special PA Params for 4335 2G Band, CCK */
+ {"pa2gccka0", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX | SRFL_ARRAY, SROM11_PATH1 + SROM11_2G_PA + 1, 0xffff},
+ {"", 0xfffff800, SRFL_PRHEX, SROM11_PATH1 + SROM11_2G_PA + 2, 0xffff},
+
+ /* power per rate */
+ {"cckbw202gpo", 0xfffff800, 0, SROM11_CCKBW202GPO, 0xffff},
+ {"cckbw20ul2gpo", 0xfffff800, 0, SROM11_CCKBW20UL2GPO, 0xffff},
+ {"mcsbw202gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW202GPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW202GPO_1, 0xffff},
+ {"mcsbw402gpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW402GPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW402GPO_1, 0xffff},
+ {"dot11agofdmhrbw202gpo", 0xfffff800, 0, SROM11_DOT11AGOFDMHRBW202GPO, 0xffff},
+ {"ofdmlrbw202gpo", 0xfffff800, 0, SROM11_OFDMLRBW202GPO, 0xffff},
+ {"mcsbw205glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GLPO_1, 0xffff},
+ {"mcsbw405glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GLPO_1, 0xffff},
+ {"mcsbw805glpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GLPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GLPO_1, 0xffff},
+ {"mcsbw205gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GMPO_1, 0xffff},
+ {"mcsbw405gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GMPO_1, 0xffff},
+ {"mcsbw805gmpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GMPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GMPO_1, 0xffff},
+ {"mcsbw205ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW205GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW205GHPO_1, 0xffff},
+ {"mcsbw405ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW405GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW405GHPO_1, 0xffff},
+ {"mcsbw805ghpo", 0xfffff800, SRFL_MORE, SROM11_MCSBW805GHPO, 0xffff},
+ {"", 0xfffff800, 0, SROM11_MCSBW805GHPO_1, 0xffff},
+ {"mcslr5glpo", 0xfffff800, 0, SROM11_MCSLR5GLPO, 0x0fff},
+ {"mcslr5gmpo", 0xfffff800, 0, SROM11_MCSLR5GMPO, 0xffff},
+ {"mcslr5ghpo", 0xfffff800, 0, SROM11_MCSLR5GHPO, 0xffff},
+ {"sb20in40hrpo", 0xfffff800, 0, SROM11_SB20IN40HRPO, 0xffff},
+ {"sb20in80and160hr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GLPO, 0xffff},
+ {"sb40and80hr5glpo", 0xfffff800, 0, SROM11_SB40AND80HR5GLPO, 0xffff},
+ {"sb20in80and160hr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GMPO, 0xffff},
+ {"sb40and80hr5gmpo", 0xfffff800, 0, SROM11_SB40AND80HR5GMPO, 0xffff},
+ {"sb20in80and160hr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160HR5GHPO, 0xffff},
+ {"sb40and80hr5ghpo", 0xfffff800, 0, SROM11_SB40AND80HR5GHPO, 0xffff},
+ {"sb20in40lrpo", 0xfffff800, 0, SROM11_SB20IN40LRPO, 0xffff},
+ {"sb20in80and160lr5glpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GLPO, 0xffff},
+ {"sb40and80lr5glpo", 0xfffff800, 0, SROM11_SB40AND80LR5GLPO, 0xffff},
+ {"sb20in80and160lr5gmpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GMPO, 0xffff},
+ {"sb40and80lr5gmpo", 0xfffff800, 0, SROM11_SB40AND80LR5GMPO, 0xffff},
+ {"sb20in80and160lr5ghpo", 0xfffff800, 0, SROM11_SB20IN80AND160LR5GHPO, 0xffff},
+ {"sb40and80lr5ghpo", 0xfffff800, 0, SROM11_SB40AND80LR5GHPO, 0xffff},
+ {"dot11agduphrpo", 0xfffff800, 0, SROM11_DOT11AGDUPHRPO, 0xffff},
+ {"dot11agduplrpo", 0xfffff800, 0, SROM11_DOT11AGDUPLRPO, 0xffff},
+
+ /* Misc */
+ {"sar2g", 0xfffff800, 0, SROM11_SAR, 0x00ff},
+ {"sar5g", 0xfffff800, 0, SROM11_SAR, 0xff00},
+
+ {"noiselvl2ga0", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x001f},
+ {"noiselvl2ga1", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x03e0},
+ {"noiselvl2ga2", 0xfffff800, 0, SROM11_NOISELVL_2G, 0x7c00},
+ {"noiselvl5ga0", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x001f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x001f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x001f},
+ {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x001f},
+ {"noiselvl5ga1", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x03e0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x03e0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x03e0},
+ {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x03e0},
+ {"noiselvl5ga2", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GL, 0x7c00},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GM, 0x7c00},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_NOISELVL_5GH, 0x7c00},
+ {"", 0xfffff800, 0, SROM11_NOISELVL_5GU, 0x7c00},
+ {"eu_edthresh2g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00000800, 0, SROM11_EU_EDCRSTH, 0xff00},
+
+ {"rxgainerr2ga0", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x003f},
+ {"rxgainerr2ga1", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0x07c0},
+ {"rxgainerr2ga2", 0xfffff800, 0, SROM11_RXGAINERR_2G, 0xf800},
+ {"rxgainerr5ga0", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x003f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x003f},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x003f},
+ {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x003f},
+ {"rxgainerr5ga1", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0x07c0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0x07c0},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0x07c0},
+ {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0x07c0},
+ {"rxgainerr5ga2", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GL, 0xf800},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GM, 0xf800},
+ {"", 0xfffff800, SRFL_ARRAY, SROM11_RXGAINERR_5GH, 0xf800},
+ {"", 0xfffff800, 0, SROM11_RXGAINERR_5GU, 0xf800},
+ {"rpcal2g", 0xfffff800, 0, SROM11_RPCAL_2G, 0xffff},
+ {"rpcal5gb0", 0xfffff800, 0, SROM11_RPCAL_5GL, 0xffff},
+ {"rpcal5gb1", 0xfffff800, 0, SROM11_RPCAL_5GM, 0xffff},
+ {"rpcal5gb2", 0xfffff800, 0, SROM11_RPCAL_5GH, 0xffff},
+ {"rpcal5gb3", 0xfffff800, 0, SROM11_RPCAL_5GU, 0xffff},
+ {"txidxcap2g", 0xfffff800, 0, SROM11_TXIDXCAP2G, 0x0ff0},
+ {"txidxcap5g", 0xfffff800, 0, SROM11_TXIDXCAP5G, 0x0ff0},
+ {"pdoffsetcckma0", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x000f},
+ {"pdoffsetcckma1", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x00f0},
+ {"pdoffsetcckma2", 0xfffff800, 0, SROM11_PDOFF_2G_CCK, 0x0f00},
+
+ /* sromrev 12 */
+ {"boardflags4", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_BFL6, 0xffff},
+ {"", 0, 0, SROM12_BFL7, 0xffff},
+ {"pdoffsetcck", 0xfffff000, 0, SROM12_PDOFF_2G_CCK, 0xffff},
+ {"pdoffset20in40m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B0, 0xffff},
+ {"pdoffset20in40m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B1, 0xffff},
+ {"pdoffset20in40m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B2, 0xffff},
+ {"pdoffset20in40m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B3, 0xffff},
+ {"pdoffset20in40m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in40M_5G_B4, 0xffff},
+ {"pdoffset40in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B0, 0xffff},
+ {"pdoffset40in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B1, 0xffff},
+ {"pdoffset40in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B2, 0xffff},
+ {"pdoffset40in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B3, 0xffff},
+ {"pdoffset40in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_40in80M_5G_B4, 0xffff},
+ {"pdoffset20in80m5gb0", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B0, 0xffff},
+ {"pdoffset20in80m5gb1", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B1, 0xffff},
+ {"pdoffset20in80m5gb2", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B2, 0xffff},
+ {"pdoffset20in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B3, 0xffff},
+ {"pdoffset20in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B4, 0xffff},
+
+ {"pdoffset20in40m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff},
+ {"pdoffset20in40m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff},
+ {"pdoffset20in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff},
+ {"pdoffset20in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff},
+ {"pdoffset40in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff},
+ {"pdoffset40in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff},
+
+ {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff},
+ {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff},
+
+ /* power per rate */
+ {"mcsbw205gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX1PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW205GX1PO_1, 0xffff},
+ {"mcsbw405gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX1PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW405GX1PO_1, 0xffff},
+ {"mcsbw805gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX1PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW805GX1PO_1, 0xffff},
+ {"mcsbw205gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX2PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW205GX2PO_1, 0xffff},
+ {"mcsbw405gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW405GX2PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW405GX2PO_1, 0xffff},
+ {"mcsbw805gx2po", 0xfffff000, SRFL_MORE, SROM12_MCSBW805GX2PO, 0xffff},
+ {"", 0xfffff000, 0, SROM12_MCSBW805GX2PO_1, 0xffff},
+
+ {"sb20in80and160hr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX1PO, 0xffff},
+ {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff},
+ {"sb20in80and160lr5gx1po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX1PO, 0xffff},
+ {"sb40and80hr5gx1po", 0xfffff000, 0, SROM12_SB40AND80HR5GX1PO, 0xffff},
+ {"sb20in80and160hr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160HR5GX2PO, 0xffff},
+ {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff},
+ {"sb20in80and160lr5gx2po", 0xfffff000, 0, SROM12_SB20IN80AND160LR5GX2PO, 0xffff},
+ {"sb40and80hr5gx2po", 0xfffff000, 0, SROM12_SB40AND80HR5GX2PO, 0xffff},
+
+ {"rxgains5gmelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0007},
+ {"rxgains5gmelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0007},
+ {"rxgains5gmelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0007},
+ {"rxgains5gmtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0078},
+ {"rxgains5gmtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0078},
+ {"rxgains5gmtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0078},
+ {"rxgains5gmtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0080},
+ {"rxgains5gmtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0080},
+ {"rxgains5gmtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0080},
+ {"rxgains5ghelnagaina0", 0xfffff000, 0, SROM12_RXGAINS10, 0x0700},
+ {"rxgains5ghelnagaina1", 0xfffff000, 0, SROM12_RXGAINS11, 0x0700},
+ {"rxgains5ghelnagaina2", 0xfffff000, 0, SROM12_RXGAINS12, 0x0700},
+ {"rxgains5ghtrisoa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x7800},
+ {"rxgains5ghtrisoa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x7800},
+ {"rxgains5ghtrisoa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x7800},
+ {"rxgains5ghtrelnabypa0", 0xfffff000, 0, SROM12_RXGAINS10, 0x8000},
+ {"rxgains5ghtrelnabypa1", 0xfffff000, 0, SROM12_RXGAINS11, 0x8000},
+ {"rxgains5ghtrelnabypa2", 0xfffff000, 0, SROM12_RXGAINS12, 0x8000},
+ {"eu_edthresh2g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00001000, 0, SROM12_EU_EDCRSTH, 0xff00},
+
+ {"gpdn", 0xfffff000, SRFL_PRHEX|SRFL_MORE, SROM12_GPDN_L, 0xffff},
+ {"", 0, 0, SROM12_GPDN_H, 0xffff},
+
+ {"eu_edthresh2g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0x00ff},
+ {"eu_edthresh5g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0xff00},
+
+ {"agbg3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0xff00},
+ {"aga3", 0xffffe000, 0, SROM13_ANTGAIN_BANDBGA, 0x00ff},
+ {"noiselvl2ga3", 0xffffe000, 0, SROM13_NOISELVLCORE3, 0x001f},
+ {"noiselvl5ga3", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x03e0},
+ {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3, 0x7c00},
+ {"", 0xffffe000, SRFL_ARRAY, SROM13_NOISELVLCORE3_1, 0x001f},
+ {"", 0xffffe000, 0, SROM13_NOISELVLCORE3_1, 0x03e0},
+ {"rxgainerr2ga3", 0xffffe000, 0, SROM13_RXGAINERRCORE3, 0x001f},
+ {"rxgainerr5ga3", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x03e0},
+ {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3, 0x7c00},
+ {"", 0xffffe000, SRFL_ARRAY, SROM13_RXGAINERRCORE3_1, 0x001f},
+ {"", 0xffffe000, 0, SROM13_RXGAINERRCORE3_1, 0x03e0},
+ {"rxgains5gmelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0007},
+ {"rxgains5gmtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0078},
+ {"rxgains5gmtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0080},
+ {"rxgains5ghelnagaina3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x0700},
+ {"rxgains5ghtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x7800},
+ {"rxgains5ghtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x8000},
+
+ /* power per rate */
+ {"mcs1024qam2gpo", 0xffffe000, 0, SROM13_MCS1024QAM2GPO, 0xffff},
+ {"mcs1024qam5glpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GLPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS1024QAM5GLPO_1, 0xffff},
+ {"mcs1024qam5gmpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GMPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS1024QAM5GMPO_1, 0xffff},
+ {"mcs1024qam5ghpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GHPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS1024QAM5GHPO_1, 0xffff},
+ {"mcs1024qam5gx1po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX1PO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX1PO_1, 0xffff},
+ {"mcs1024qam5gx2po", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GX2PO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS1024QAM5GX2PO_1, 0xffff},
+
+ {"mcsbw1605glpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GLPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCSBW1605GLPO_1, 0xffff},
+ {"mcsbw1605gmpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GMPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCSBW1605GMPO_1, 0xffff},
+ {"mcsbw1605ghpo", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GHPO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCSBW1605GHPO_1, 0xffff},
+ {"mcsbw1605gx1po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX1PO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCSBW1605GX1PO_1, 0xffff},
+ {"mcsbw1605gx2po", 0xffffe000, SRFL_MORE, SROM13_MCSBW1605GX2PO, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCSBW1605GX2PO_1, 0xffff},
+
+ {"ulbpproffs2g", 0xffffe000, 0, SROM13_ULBPPROFFS2G, 0xffff},
+
+ {"mcs8poexp", 0xffffe000, SRFL_MORE, SROM13_MCS8POEXP, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS8POEXP_1, 0xffff},
+ {"mcs9poexp", 0xffffe000, SRFL_MORE, SROM13_MCS9POEXP, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS9POEXP_1, 0xffff},
+ {"mcs10poexp", 0xffffe000, SRFL_MORE, SROM13_MCS10POEXP, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS10POEXP_1, 0xffff},
+ {"mcs11poexp", 0xffffe000, SRFL_MORE, SROM13_MCS11POEXP, 0xffff},
+ {"", 0xffffe000, 0, SROM13_MCS11POEXP_1, 0xffff},
+
+ {"ulbpdoffs5gb0a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A0, 0xffff},
+ {"ulbpdoffs5gb0a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A1, 0xffff},
+ {"ulbpdoffs5gb0a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A2, 0xffff},
+ {"ulbpdoffs5gb0a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB0A3, 0xffff},
+ {"ulbpdoffs5gb1a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A0, 0xffff},
+ {"ulbpdoffs5gb1a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A1, 0xffff},
+ {"ulbpdoffs5gb1a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A2, 0xffff},
+ {"ulbpdoffs5gb1a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB1A3, 0xffff},
+ {"ulbpdoffs5gb2a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A0, 0xffff},
+ {"ulbpdoffs5gb2a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A1, 0xffff},
+ {"ulbpdoffs5gb2a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A2, 0xffff},
+ {"ulbpdoffs5gb2a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB2A3, 0xffff},
+ {"ulbpdoffs5gb3a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A0, 0xffff},
+ {"ulbpdoffs5gb3a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A1, 0xffff},
+ {"ulbpdoffs5gb3a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A2, 0xffff},
+ {"ulbpdoffs5gb3a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB3A3, 0xffff},
+ {"ulbpdoffs5gb4a0", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A0, 0xffff},
+ {"ulbpdoffs5gb4a1", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A1, 0xffff},
+ {"ulbpdoffs5gb4a2", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A2, 0xffff},
+ {"ulbpdoffs5gb4a3", 0xffffe000, 0, SROM13_ULBPDOFFS5GB4A3, 0xffff},
+ {"ulbpdoffs2ga0", 0xffffe000, 0, SROM13_ULBPDOFFS2GA0, 0xffff},
+ {"ulbpdoffs2ga1", 0xffffe000, 0, SROM13_ULBPDOFFS2GA1, 0xffff},
+ {"ulbpdoffs2ga2", 0xffffe000, 0, SROM13_ULBPDOFFS2GA2, 0xffff},
+ {"ulbpdoffs2ga3", 0xffffe000, 0, SROM13_ULBPDOFFS2GA3, 0xffff},
+
+ {"rpcal5gb4", 0xffffe000, 0, SROM13_RPCAL5GB4, 0xffff},
+
+ {"sb20in40hrlrpox", 0xffffe000, 0, SROM13_SB20IN40HRLRPOX, 0xffff},
+
+ {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff},
+ {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff},
+
+ {"pdoffset20in40m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff},
+ {"", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff},
+ {"pdoffset40in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff},
+ {"", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff},
+ {"pdoffset20in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff},
+ {"", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff},
+
+ {"swctrlmap4_cfg", 0xffffe000, 0, SROM13_SWCTRLMAP4_CFG, 0xffff},
+ {"swctrlmap4_TX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM3TO0, 0xffff},
+ {"swctrlmap4_RX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM3TO0, 0xffff},
+ {"swctrlmap4_RXByp2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM3TO0, 0xffff},
+ {"swctrlmap4_misc2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM3TO0, 0xffff},
+ {"swctrlmap4_TX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM3TO0, 0xffff},
+ {"swctrlmap4_RX5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM3TO0, 0xffff},
+ {"swctrlmap4_RXByp5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM3TO0, 0xffff},
+ {"swctrlmap4_misc5g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM3TO0, 0xffff},
+ {"swctrlmap4_TX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM7TO4, 0xffff},
+ {"swctrlmap4_RX2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM7TO4, 0xffff},
+ {"swctrlmap4_RXByp2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP2G_FEM7TO4, 0xffff},
+ {"swctrlmap4_misc2g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC2G_FEM7TO4, 0xffff},
+ {"swctrlmap4_TX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX5G_FEM7TO4, 0xffff},
+ {"swctrlmap4_RX5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX5G_FEM7TO4, 0xffff},
+ {"swctrlmap4_RXByp5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_RXBYP5G_FEM7TO4, 0xffff},
+ {"swctrlmap4_misc5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM7TO4, 0xffff},
+ {NULL, 0, 0, 0, 0}
+};
+
+static const sromvar_t perpath_pci_sromvars[] = {
+ {"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
+ {"itt2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
+ {"itt5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
+ {"pa2gw0a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff},
+ {"pa2gw1a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff},
+ {"pa2gw2a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff},
+ {"pa2gw3a", 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff},
+ {"maxp5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff},
+ {"maxp5gha", 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff},
+ {"maxp5gla", 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00},
+ {"pa5gw0a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff},
+ {"pa5gw1a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff},
+ {"pa5gw2a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff},
+ {"pa5gw3a", 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff},
+ {"pa5glw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff},
+ {"pa5glw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1, 0xffff},
+ {"pa5glw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2, 0xffff},
+ {"pa5glw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3, 0xffff},
+ {"pa5ghw0a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff},
+ {"pa5ghw1a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1, 0xffff},
+ {"pa5ghw2a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2, 0xffff},
+ {"pa5ghw3a", 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3, 0xffff},
+ {"maxp2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0x00ff},
+ {"itt2ga", 0x00000700, 0, SROM8_2G_ITT_MAXP, 0xff00},
+ {"itt5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0xff00},
+ {"pa2gw0a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA, 0xffff},
+ {"pa2gw1a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff},
+ {"pa2gw2a", 0x00000700, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff},
+ {"maxp5ga", 0x00000700, 0, SROM8_5G_ITT_MAXP, 0x00ff},
+ {"maxp5gha", 0x00000700, 0, SROM8_5GLH_MAXP, 0x00ff},
+ {"maxp5gla", 0x00000700, 0, SROM8_5GLH_MAXP, 0xff00},
+ {"pa5gw0a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA, 0xffff},
+ {"pa5gw1a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff},
+ {"pa5gw2a", 0x00000700, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff},
+ {"pa5glw0a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA, 0xffff},
+ {"pa5glw1a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 1, 0xffff},
+ {"pa5glw2a", 0x00000700, SRFL_PRHEX, SROM8_5GL_PA + 2, 0xffff},
+ {"pa5ghw0a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA, 0xffff},
+ {"pa5ghw1a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 1, 0xffff},
+ {"pa5ghw2a", 0x00000700, SRFL_PRHEX, SROM8_5GH_PA + 2, 0xffff},
+
+ /* sromrev 11 */
+ {"maxp2ga", 0xfffff800, 0, SROM11_2G_MAXP, 0x00ff},
+ {"pa2ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_2G_PA + 1, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX, SROM11_2G_PA + 2, 0xffff},
+ {"rxgains5gmelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0007},
+ {"rxgains5gmtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x0078},
+ {"rxgains5gmtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x0080},
+ {"rxgains5ghelnagaina", 0x00000800, 0, SROM11_RXGAINS1, 0x0700},
+ {"rxgains5ghtrisoa", 0x00000800, 0, SROM11_RXGAINS1, 0x7800},
+ {"rxgains5ghtrelnabypa", 0x00000800, 0, SROM11_RXGAINS1, 0x8000},
+ {"rxgains2gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0007},
+ {"rxgains2gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x0078},
+ {"rxgains2gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x0080},
+ {"rxgains5gelnagaina", 0x00000800, 0, SROM11_RXGAINS, 0x0700},
+ {"rxgains5gtrisoa", 0x00000800, 0, SROM11_RXGAINS, 0x7800},
+ {"rxgains5gtrelnabypa", 0x00000800, 0, SROM11_RXGAINS, 0x8000},
+ {"maxp5ga", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0x00ff},
+ {"", 0x00000800, SRFL_ARRAY, SROM11_5GB1B0_MAXP, 0xff00},
+ {"", 0x00000800, SRFL_ARRAY, SROM11_5GB3B2_MAXP, 0x00ff},
+ {"", 0x00000800, 0, SROM11_5GB3B2_MAXP, 0xff00},
+ {"pa5ga", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 1, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB0_PA + 2, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 1, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB1_PA + 2, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 1, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB2_PA + 2, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX | SRFL_ARRAY, SROM11_5GB3_PA + 1, 0xffff},
+ {"", 0x00000800, SRFL_PRHEX, SROM11_5GB3_PA + 2, 0xffff},
+
+ /* sromrev 12 */
+ {"maxp5gb4a", 0xfffff000, 0, SROM12_5GB42G_MAXP, 0x00ff00},
+ {"pa2ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2GB0_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX, SROM12_2GB0_PA_W3, 0x00ffff},
+
+ {"pa2g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_2G40B0_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX, SROM12_2G40B0_PA_W3, 0x00ffff},
+ {"maxp5gb0a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff},
+ {"maxp5gb1a", 0xfffff000, 0, SROM12_5GB1B0_MAXP, 0x00ff00},
+ {"maxp5gb2a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff},
+ {"maxp5gb3a", 0xfffff000, 0, SROM12_5GB3B2_MAXP, 0x00ff00},
+
+ {"pa5ga", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB0_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB1_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB2_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB3_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5GB4_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX, SROM12_5GB4_PA_W3, 0x00ffff},
+
+ {"pa5g40a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B0_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B1_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B2_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B3_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G40B4_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX, SROM12_5G40B4_PA_W3, 0x00ffff},
+
+ {"pa5g80a", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B0_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B1_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B2_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B3_PA_W3, 0x00ffff},
+
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W0, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W1, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX | SRFL_ARRAY, SROM12_5G80B4_PA_W2, 0x00ffff},
+ {"", 0xfffff000, SRFL_PRHEX, SROM12_5G80B4_PA_W3, 0x00ffff},
+ /* sromrev 13 */
+ {"rxgains2gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0007},
+ {"rxgains2gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x0078},
+ {"rxgains2gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x0080},
+ {"rxgains5gelnagaina", 0xffffe000, 0, SROM13_RXGAINS, 0x0700},
+ {"rxgains5gtrisoa", 0xffffe000, 0, SROM13_RXGAINS, 0x7800},
+ {"rxgains5gtrelnabypa", 0xffffe000, 0, SROM13_RXGAINS, 0x8000},
+ {NULL, 0, 0, 0, 0}
+};
+
+#if !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N))
+#define PHY_TYPE_HT 7 /* HT-Phy value */
+#define PHY_TYPE_N 4 /* N-Phy value */
+#endif /* !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N)) */
+#if !defined(PHY_TYPE_AC)
+#define PHY_TYPE_AC 11 /* AC-Phy value */
+#endif /* !defined(PHY_TYPE_AC) */
+#if !defined(PHY_TYPE_LCN20)
+#define PHY_TYPE_LCN20 12 /* LCN20-Phy value */
+#endif /* !defined(PHY_TYPE_LCN20) */
+#if !defined(PHY_TYPE_NULL)
+#define PHY_TYPE_NULL 0xf /* Invalid Phy value */
+#endif /* !defined(PHY_TYPE_NULL) */
+
+typedef struct {
+ uint16 phy_type;
+ uint16 bandrange;
+ uint16 chain;
+ const char *vars;
+} pavars_t;
+
+static const pavars_t pavars[] = {
+ /* HTPHY */
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gw0a2 pa2gw1a2 pa2gw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 2, "pa5glw0a2 pa5glw1a2 pa5glw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 2, "pa5gw0a2 pa5gw1a2 pa5gw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 2, "pa5ghw0a2 pa5ghw1a2 pa5ghw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 0, "pa5gw0a3 pa5gw1a3 pa5gw2a3"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 1, "pa5glw0a3 pa5glw1a3 pa5glw2a3"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 2, "pa5ghw0a3 pa5ghw1a3 pa5ghw2a3"},
+ /* NPHY */
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
+ /* ACPHY */
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5ga2"},
+ /* LCN20PHY */
+ {PHY_TYPE_LCN20, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"},
+ {PHY_TYPE_NULL, 0, 0, ""}
+};
+
+
+static const pavars_t pavars_SROM12[] = {
+ /* ACPHY */
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 0, "pa2g40a0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 1, "pa2g40a1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 2, "pa2g40a2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 0, "pa5ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 1, "pa5ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 2, "pa5ga2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 0, "pa5g40a0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 1, "pa5g40a1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 2, "pa5g40a2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 0, "pa5g80a0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 1, "pa5g80a1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 2, "pa5g80a2"},
+ {PHY_TYPE_NULL, 0, 0, ""}
+};
+
+static const pavars_t pavars_SROM13[] = {
+ /* ACPHY */
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2ga2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 3, "pa2ga3"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 0, "pa2g40a0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 1, "pa2g40a1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 2, "pa2g40a2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G_40, 3, "pa2g40a3"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 0, "pa5ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 1, "pa5ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 2, "pa5ga2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND, 3, "pa5ga3"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 0, "pa5g40a0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 1, "pa5g40a1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 2, "pa5g40a2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_40, 3, "pa5g40a3"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 0, "pa5g80a0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 1, "pa5g80a1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 2, "pa5g80a2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_5BAND_80, 3, "pa5g80a3"},
+ {PHY_TYPE_NULL, 0, 0, ""}
+};
+
+/* pavars table when paparambwver is 1 */
+static const pavars_t pavars_bwver_1[] = {
+ /* ACPHY */
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gccka0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga2"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5gbw40a0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw80a0"},
+ {PHY_TYPE_NULL, 0, 0, ""}
+};
+
+/* pavars table when paparambwver is 2 */
+static const pavars_t pavars_bwver_2[] = {
+ /* ACPHY */
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw4080a0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 3, "pa5gbw4080a1"},
+ {PHY_TYPE_NULL, 0, 0, ""}
+};
+
+/* pavars table when paparambwver is 3 */
+static const pavars_t pavars_bwver_3[] = {
+ /* ACPHY */
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 1, "pa2ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gccka0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 3, "pa2gccka1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 0, "pa5ga0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 1, "pa5ga1"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 2, "pa5gbw4080a0"},
+ {PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_5G_4BAND, 3, "pa5gbw4080a1"},
+ {PHY_TYPE_NULL, 0, 0, ""}
+};
+
+typedef struct {
+ uint16 phy_type;
+ uint16 bandrange;
+ const char *vars;
+} povars_t;
+
+static const povars_t povars[] = {
+ /* NPHY */
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, "mcs2gpo0 mcs2gpo1 mcs2gpo2 mcs2gpo3 "
+ "mcs2gpo4 mcs2gpo5 mcs2gpo6 mcs2gpo7"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL, "mcs5glpo0 mcs5glpo1 mcs5glpo2 mcs5glpo3 "
+ "mcs5glpo4 mcs5glpo5 mcs5glpo6 mcs5glpo7"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM, "mcs5gpo0 mcs5gpo1 mcs5gpo2 mcs5gpo3 "
+ "mcs5gpo4 mcs5gpo5 mcs5gpo6 mcs5gpo7"},
+ {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH, "mcs5ghpo0 mcs5ghpo1 mcs5ghpo2 mcs5ghpo3 "
+ "mcs5ghpo4 mcs5ghpo5 mcs5ghpo6 mcs5ghpo7"},
+ {PHY_TYPE_NULL, 0, ""}
+};
+
+typedef struct {
+ uint8 tag; /* Broadcom subtag name */
+ uint32 revmask; /* Supported cis_sromrev bitmask. Some of the parameters in
+ * different tuples have the same name. Therefore, the MFGc tool
+ * needs to know which tuple to generate when seeing these
+ * parameters (given that we know sromrev from user input, like the
+ * nvram file).
+ */
+ uint8 len; /* Length field of the tuple, note that it includes the
+ * subtag name (1 byte): 1 + tuple content length
+ */
+ const char *params;
+} cis_tuple_t;
+
+#define OTP_RAW (0xff - 1) /* Reserved tuple number for wrvar Raw input */
+#define OTP_VERS_1 (0xff - 2) /* CISTPL_VERS_1 */
+#define OTP_MANFID (0xff - 3) /* CISTPL_MANFID */
+#define OTP_RAW1 (0xff - 4) /* Like RAW, but comes first */
+
+/** this array is used by CIS creating/writing applications */
+static const cis_tuple_t cis_hnbuvars[] = {
+/* tag revmask len params */
+ {OTP_RAW1, 0xffffffff, 0, ""}, /* special case */
+ {OTP_VERS_1, 0xffffffff, 0, "smanf sproductname"}, /* special case (non BRCM tuple) */
+ {OTP_MANFID, 0xffffffff, 4, "2manfid 2prodid"}, /* special case (non BRCM tuple) */
+ /* Unified OTP: tupple to embed USB manfid inside SDIO CIS */
+ {HNBU_UMANFID, 0xffffffff, 8, "8usbmanfid"},
+ {HNBU_SROMREV, 0xffffffff, 2, "1sromrev"},
+ /* NOTE: subdevid is also written to boardtype.
+ * Need to write HNBU_BOARDTYPE to change it if it is different.
+ */
+ {HNBU_CHIPID, 0xffffffff, 11, "2vendid 2devid 2chiprev 2subvendid 2subdevid"},
+ {HNBU_BOARDREV, 0xffffffff, 3, "2boardrev"},
+ {HNBU_PAPARMS, 0xffffffff, 10, "2pa0b0 2pa0b1 2pa0b2 1pa0itssit 1pa0maxpwr 1opo"},
+ {HNBU_AA, 0xffffffff, 3, "1aa2g 1aa5g"},
+ {HNBU_AA, 0xffffffff, 3, "1aa0 1aa1"}, /* backward compatibility */
+ {HNBU_AG, 0xffffffff, 5, "1ag0 1ag1 1ag2 1ag3"},
+ {HNBU_BOARDFLAGS, 0xffffffff, 21, "4boardflags 4boardflags2 4boardflags3 "
+ "4boardflags4 4boardflags5 "},
+ {HNBU_LEDS, 0xffffffff, 17, "1ledbh0 1ledbh1 1ledbh2 1ledbh3 1ledbh4 1ledbh5 "
+ "1ledbh6 1ledbh7 1ledbh8 1ledbh9 1ledbh10 1ledbh11 1ledbh12 1ledbh13 1ledbh14 1ledbh15"},
+ {HNBU_CCODE, 0xffffffff, 4, "2ccode 1cctl"},
+ {HNBU_CCKPO, 0xffffffff, 3, "2cckpo"},
+ {HNBU_OFDMPO, 0xffffffff, 5, "4ofdmpo"},
+ {HNBU_PAPARMS5G, 0xffffffff, 23, "2pa1b0 2pa1b1 2pa1b2 2pa1lob0 2pa1lob1 2pa1lob2 "
+ "2pa1hib0 2pa1hib1 2pa1hib2 1pa1itssit "
+ "1pa1maxpwr 1pa1lomaxpwr 1pa1himaxpwr"},
+ {HNBU_RDLID, 0xffffffff, 3, "2rdlid"},
+ {HNBU_RSSISMBXA2G, 0xffffffff, 3, "0rssismf2g 0rssismc2g "
+ "0rssisav2g 0bxa2g"}, /* special case */
+ {HNBU_RSSISMBXA5G, 0xffffffff, 3, "0rssismf5g 0rssismc5g "
+ "0rssisav5g 0bxa5g"}, /* special case */
+ {HNBU_XTALFREQ, 0xffffffff, 5, "4xtalfreq"},
+ {HNBU_TRI2G, 0xffffffff, 2, "1tri2g"},
+ {HNBU_TRI5G, 0xffffffff, 4, "1tri5gl 1tri5g 1tri5gh"},
+ {HNBU_RXPO2G, 0xffffffff, 2, "1rxpo2g"},
+ {HNBU_RXPO5G, 0xffffffff, 2, "1rxpo5g"},
+ {HNBU_BOARDNUM, 0xffffffff, 3, "2boardnum"},
+ {HNBU_MACADDR, 0xffffffff, 7, "6macaddr"}, /* special case */
+ {HNBU_RDLSN, 0xffffffff, 3, "2rdlsn"},
+ {HNBU_BOARDTYPE, 0xffffffff, 3, "2boardtype"},
+ {HNBU_LEDDC, 0xffffffff, 3, "2leddc"},
+ {HNBU_RDLRNDIS, 0xffffffff, 2, "1rdlndis"},
+ {HNBU_CHAINSWITCH, 0xffffffff, 5, "1txchain 1rxchain 2antswitch"},
+ {HNBU_REGREV, 0xffffffff, 2, "1regrev"},
+ {HNBU_FEM, 0x000007fe, 5, "0antswctl2g 0triso2g 0pdetrange2g 0extpagain2g "
+ "0tssipos2g 0antswctl5g 0triso5g 0pdetrange5g 0extpagain5g 0tssipos5g"}, /* special case */
+ {HNBU_PAPARMS_C0, 0x000007fe, 31, "1maxp2ga0 1itt2ga0 2pa2gw0a0 2pa2gw1a0 "
+ "2pa2gw2a0 1maxp5ga0 1itt5ga0 1maxp5gha0 1maxp5gla0 2pa5gw0a0 2pa5gw1a0 2pa5gw2a0 "
+ "2pa5glw0a0 2pa5glw1a0 2pa5glw2a0 2pa5ghw0a0 2pa5ghw1a0 2pa5ghw2a0"},
+ {HNBU_PAPARMS_C1, 0x000007fe, 31, "1maxp2ga1 1itt2ga1 2pa2gw0a1 2pa2gw1a1 "
+ "2pa2gw2a1 1maxp5ga1 1itt5ga1 1maxp5gha1 1maxp5gla1 2pa5gw0a1 2pa5gw1a1 2pa5gw2a1 "
+ "2pa5glw0a1 2pa5glw1a1 2pa5glw2a1 2pa5ghw0a1 2pa5ghw1a1 2pa5ghw2a1"},
+ {HNBU_PO_CCKOFDM, 0xffffffff, 19, "2cck2gpo 4ofdm2gpo 4ofdm5gpo 4ofdm5glpo "
+ "4ofdm5ghpo"},
+ {HNBU_PO_MCS2G, 0xffffffff, 17, "2mcs2gpo0 2mcs2gpo1 2mcs2gpo2 2mcs2gpo3 "
+ "2mcs2gpo4 2mcs2gpo5 2mcs2gpo6 2mcs2gpo7"},
+ {HNBU_PO_MCS5GM, 0xffffffff, 17, "2mcs5gpo0 2mcs5gpo1 2mcs5gpo2 2mcs5gpo3 "
+ "2mcs5gpo4 2mcs5gpo5 2mcs5gpo6 2mcs5gpo7"},
+ {HNBU_PO_MCS5GLH, 0xffffffff, 33, "2mcs5glpo0 2mcs5glpo1 2mcs5glpo2 2mcs5glpo3 "
+ "2mcs5glpo4 2mcs5glpo5 2mcs5glpo6 2mcs5glpo7 "
+ "2mcs5ghpo0 2mcs5ghpo1 2mcs5ghpo2 2mcs5ghpo3 "
+ "2mcs5ghpo4 2mcs5ghpo5 2mcs5ghpo6 2mcs5ghpo7"},
+ {HNBU_CCKFILTTYPE, 0xffffffff, 2, "1cckdigfilttype"},
+ {HNBU_PO_CDD, 0xffffffff, 3, "2cddpo"},
+ {HNBU_PO_STBC, 0xffffffff, 3, "2stbcpo"},
+ {HNBU_PO_40M, 0xffffffff, 3, "2bw40po"},
+ {HNBU_PO_40MDUP, 0xffffffff, 3, "2bwduppo"},
+ {HNBU_RDLRWU, 0xffffffff, 2, "1rdlrwu"},
+ {HNBU_WPS, 0xffffffff, 3, "1wpsgpio 1wpsled"},
+ {HNBU_USBFS, 0xffffffff, 2, "1usbfs"},
+ {HNBU_ELNA2G, 0xffffffff, 2, "1elna2g"},
+ {HNBU_ELNA5G, 0xffffffff, 2, "1elna5g"},
+ {HNBU_CUSTOM1, 0xffffffff, 5, "4customvar1"},
+ {OTP_RAW, 0xffffffff, 0, ""}, /* special case */
+ {HNBU_OFDMPO5G, 0xffffffff, 13, "4ofdm5gpo 4ofdm5glpo 4ofdm5ghpo"},
+ {HNBU_USBEPNUM, 0xffffffff, 3, "2usbepnum"},
+ {HNBU_CCKBW202GPO, 0xffffffff, 7, "2cckbw202gpo 2cckbw20ul2gpo 2cckbw20in802gpo"},
+ {HNBU_LEGOFDMBW202GPO, 0xffffffff, 9, "4legofdmbw202gpo 4legofdmbw20ul2gpo"},
+ {HNBU_LEGOFDMBW205GPO, 0xffffffff, 25, "4legofdmbw205glpo 4legofdmbw20ul5glpo "
+ "4legofdmbw205gmpo 4legofdmbw20ul5gmpo 4legofdmbw205ghpo 4legofdmbw20ul5ghpo"},
+ {HNBU_MCS2GPO, 0xffffffff, 17, "4mcsbw202gpo 4mcsbw20ul2gpo 4mcsbw402gpo 4mcsbw802gpo"},
+ {HNBU_MCS5GLPO, 0xffffffff, 13, "4mcsbw205glpo 4mcsbw20ul5glpo 4mcsbw405glpo"},
+ {HNBU_MCS5GMPO, 0xffffffff, 13, "4mcsbw205gmpo 4mcsbw20ul5gmpo 4mcsbw405gmpo"},
+ {HNBU_MCS5GHPO, 0xffffffff, 13, "4mcsbw205ghpo 4mcsbw20ul5ghpo 4mcsbw405ghpo"},
+ {HNBU_MCS32PO, 0xffffffff, 3, "2mcs32po"},
+ {HNBU_LEG40DUPPO, 0xffffffff, 3, "2legofdm40duppo"},
+ {HNBU_TEMPTHRESH, 0xffffffff, 7, "1tempthresh 0temps_period 0temps_hysteresis "
+ "1tempoffset 1tempsense_slope 0tempcorrx 0tempsense_option "
+ "1phycal_tempdelta"}, /* special case */
+ {HNBU_MUXENAB, 0xffffffff, 2, "1muxenab"},
+ {HNBU_FEM_CFG, 0xfffff800, 5, "0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g "
+ "0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g "
+ "0tssiposslope5g"}, /* special case */
+ {HNBU_ACPA_C0, 0xfffff800, 39, "2subband5gver 2maxp2ga0 2*3pa2ga0 "
+ "1*4maxp5ga0 2*12pa5ga0"},
+ {HNBU_ACPA_C1, 0xfffff800, 37, "2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"},
+ {HNBU_ACPA_C2, 0xfffff800, 37, "2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"},
+ {HNBU_MEAS_PWR, 0xfffff800, 5, "1measpower 1measpower1 1measpower2 2rawtempsense"},
+ {HNBU_PDOFF, 0xfffff800, 13, "2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 "
+ "2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"},
+ {HNBU_ACPPR_2GPO, 0xfffff800, 13, "2dot11agofdmhrbw202gpo 2ofdmlrbw202gpo "
+ "2sb20in40dot11agofdm2gpo 2sb20in80dot11agofdm2gpo 2sb20in40ofdmlrbw202gpo "
+ "2sb20in80ofdmlrbw202gpo"},
+ {HNBU_ACPPR_5GPO, 0xfffff800, 59, "4mcsbw805glpo 4mcsbw1605glpo 4mcsbw805gmpo "
+ "4mcsbw1605gmpo 4mcsbw805ghpo 4mcsbw1605ghpo 2mcslr5glpo 2mcslr5gmpo 2mcslr5ghpo "
+ "4mcsbw80p805glpo 4mcsbw80p805gmpo 4mcsbw80p805ghpo 4mcsbw80p805gx1po 2mcslr5gx1po "
+ "2mcslr5g80p80po 4mcsbw805gx1po 4mcsbw1605gx1po"},
+ {HNBU_MCS5Gx1PO, 0xfffff800, 9, "4mcsbw205gx1po 4mcsbw405gx1po"},
+ {HNBU_ACPPR_SBPO, 0xfffff800, 49, "2sb20in40hrpo 2sb20in80and160hr5glpo "
+ "2sb40and80hr5glpo 2sb20in80and160hr5gmpo 2sb40and80hr5gmpo 2sb20in80and160hr5ghpo "
+ "2sb40and80hr5ghpo 2sb20in40lrpo 2sb20in80and160lr5glpo 2sb40and80lr5glpo "
+ "2sb20in80and160lr5gmpo 2sb40and80lr5gmpo 2sb20in80and160lr5ghpo 2sb40and80lr5ghpo "
+ "4dot11agduphrpo 4dot11agduplrpo 2sb20in40and80hrpo 2sb20in40and80lrpo "
+ "2sb20in80and160hr5gx1po 2sb20in80and160lr5gx1po 2sb40and80hr5gx1po 2sb40and80lr5gx1po "
+ },
+ {HNBU_ACPPR_SB8080_PO, 0xfffff800, 23, "2sb2040and80in80p80hr5glpo "
+ "2sb2040and80in80p80lr5glpo 2sb2040and80in80p80hr5gmpo "
+ "2sb2040and80in80p80lr5gmpo 2sb2040and80in80p80hr5ghpo 2sb2040and80in80p80lr5ghpo "
+ "2sb2040and80in80p80hr5gx1po 2sb2040and80in80p80lr5gx1po 2sb20in80p80hr5gpo "
+ "2sb20in80p80lr5gpo 2dot11agduppo"},
+ {HNBU_NOISELVL, 0xfffff800, 16, "1noiselvl2ga0 1noiselvl2ga1 1noiselvl2ga2 "
+ "1*4noiselvl5ga0 1*4noiselvl5ga1 1*4noiselvl5ga2"},
+ {HNBU_RXGAIN_ERR, 0xfffff800, 16, "1rxgainerr2ga0 1rxgainerr2ga1 1rxgainerr2ga2 "
+ "1*4rxgainerr5ga0 1*4rxgainerr5ga1 1*4rxgainerr5ga2"},
+ {HNBU_AGBGA, 0xfffff800, 7, "1agbg0 1agbg1 1agbg2 1aga0 1aga1 1aga2"},
+ {HNBU_USBDESC_COMPOSITE, 0xffffffff, 3, "2usbdesc_composite"},
+ {HNBU_UUID, 0xffffffff, 17, "16uuid"},
+ {HNBU_WOWLGPIO, 0xffffffff, 2, "1wowl_gpio"},
+ {HNBU_ACRXGAINS_C0, 0xfffff800, 5, "0rxgains5gtrelnabypa0 0rxgains5gtrisoa0 "
+ "0rxgains5gelnagaina0 0rxgains2gtrelnabypa0 0rxgains2gtrisoa0 0rxgains2gelnagaina0 "
+ "0rxgains5ghtrelnabypa0 0rxgains5ghtrisoa0 0rxgains5ghelnagaina0 0rxgains5gmtrelnabypa0 "
+ "0rxgains5gmtrisoa0 0rxgains5gmelnagaina0"}, /* special case */
+ {HNBU_ACRXGAINS_C1, 0xfffff800, 5, "0rxgains5gtrelnabypa1 0rxgains5gtrisoa1 "
+ "0rxgains5gelnagaina1 0rxgains2gtrelnabypa1 0rxgains2gtrisoa1 0rxgains2gelnagaina1 "
+ "0rxgains5ghtrelnabypa1 0rxgains5ghtrisoa1 0rxgains5ghelnagaina1 0rxgains5gmtrelnabypa1 "
+ "0rxgains5gmtrisoa1 0rxgains5gmelnagaina1"}, /* special case */
+ {HNBU_ACRXGAINS_C2, 0xfffff800, 5, "0rxgains5gtrelnabypa2 0rxgains5gtrisoa2 "
+ "0rxgains5gelnagaina2 0rxgains2gtrelnabypa2 0rxgains2gtrisoa2 0rxgains2gelnagaina2 "
+ "0rxgains5ghtrelnabypa2 0rxgains5ghtrisoa2 0rxgains5ghelnagaina2 0rxgains5gmtrelnabypa2 "
+ "0rxgains5gmtrisoa2 0rxgains5gmelnagaina2"}, /* special case */
+ {HNBU_TXDUTY, 0xfffff800, 9, "2tx_duty_cycle_ofdm_40_5g "
+ "2tx_duty_cycle_thresh_40_5g 2tx_duty_cycle_ofdm_80_5g 2tx_duty_cycle_thresh_80_5g"},
+ {HNBU_PDOFF_2G, 0xfffff800, 3, "0pdoffset2g40ma0 0pdoffset2g40ma1 "
+ "0pdoffset2g40ma2 0pdoffset2g40mvalid"},
+ {HNBU_ACPA_CCK, 0xfffff800, 7, "2*3pa2gccka0"},
+ {HNBU_ACPA_40, 0xfffff800, 25, "2*12pa5gbw40a0"},
+ {HNBU_ACPA_80, 0xfffff800, 25, "2*12pa5gbw80a0"},
+ {HNBU_ACPA_4080, 0xfffff800, 49, "2*12pa5gbw4080a0 2*12pa5gbw4080a1"},
+ {HNBU_SUBBAND5GVER, 0xfffff800, 3, "2subband5gver"},
+ {HNBU_PAPARAMBWVER, 0xfffff800, 2, "1paparambwver"},
+ {HNBU_TXBFRPCALS, 0xfffff800, 11,
+ "2rpcal2g 2rpcal5gb0 2rpcal5gb1 2rpcal5gb2 2rpcal5gb3"}, /* txbf rpcalvars */
+ {HNBU_GPIO_PULL_DOWN, 0xffffffff, 5, "4gpdn"},
+ {0xFF, 0xffffffff, 0, ""}
+};
+
+#endif /* _bcmsrom_tbl_h_ */
/*
* Misc useful os-independent macros and functions.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmutils.h 504037 2014-09-22 19:03:15Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmutils.h 563776 2015-06-15 15:51:15Z $
*/
#ifndef _bcmutils_h_
#define _bcmutils_h_
-#define bcm_strcpy_s(dst, noOfElements, src) strcpy((dst), (src))
-#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count))
-#define bcm_strcat_s(dst, noOfElements, src) strcat((dst), (src))
#ifdef __cplusplus
extern "C" {
#endif
-#ifdef PKTQ_LOG
-#include <wlioctl.h>
-#endif
+#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count))
+#define bcm_strncat_s(dst, noOfElements, src, count) strncat((dst), (src), (count))
+#define bcm_snprintf_s snprintf
+#define bcm_sprintf_s snprintf
+
+/*
+ * #define bcm_strcpy_s(dst, count, src) strncpy((dst), (src), (count))
+ * Use bcm_strcpy_s instead as it is a safer option
+ * bcm_strcat_s: Use bcm_strncat_s as a safer option
+ *
+ */
/* ctype replacement */
#define _BCM_U 0x01 /* upper */
#define CIRCULAR_ARRAY_FULL(rd_idx, wr_idx, max) ((wr_idx + 1)%max == rd_idx)
+#define KB(bytes) (((bytes) + 1023) / 1024)
+
/* Buffer structure for collecting string-formatted data
* using bcm_bprintf() API.
* Use bcm_binit() to initialize before use
unsigned int origsize; /* unmodified orignal buffer size in bytes */
};
+#define BCMSTRBUF_LEN(b) (b->size)
+#define BCMSTRBUF_BUF(b) (b->buf)
+
/* ** driver-only section ** */
#ifdef BCMDRIVER
#include <osl.h>
#define BCM_RXCPL_CLR_VALID_INFO(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_RXCPLVALID)
#define BCM_RXCPL_VALID_INFO(a) (((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_RXCPLVALID) ? TRUE : FALSE)
+#define UP_TABLE_MAX ((IPV4_TOS_DSCP_MASK >> IPV4_TOS_DSCP_SHIFT) + 1) /* 64 max */
struct reorder_rxcpl_id_list {
uint16 head;
extern uint pktsegcnt_war(osl_t *osh, void *p);
extern uint8 *pktdataoffset(osl_t *osh, void *p, uint offset);
extern void *pktoffset(osl_t *osh, void *p, uint offset);
+/* Add to adjust 802.1x priority */
+extern void pktset8021xprio(void *pkt, int prio);
/* Get priority from a packet and pass it back in scb (or equiv) */
#define PKTPRIO_VDSCP 0x100 /* DSCP prio found after VLAN tag */
#define DSCP_EF 0x2E
extern uint pktsetprio(void *pkt, bool update_vtag);
+extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag);
extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp);
/* string */
#define bcmtslog(tstamp, fmt, a1, a2)
#define bcmprinttslogs()
#define bcmprinttstamp(us)
-#define bcmdumptslog(buf, size)
+#define bcmdumptslog(b)
extern char *bcm_nvram_vars(uint *length);
extern int bcm_nvram_cache(void *sih);
#define BCME_MICERR -50 /* Integrity/MIC error */
#define BCME_REPLAY -51 /* Replay */
#define BCME_IE_NOTFOUND -52 /* IE not found */
-#define BCME_LAST BCME_IE_NOTFOUND
+#define BCME_DATA_NOTFOUND -53 /* Complete data not found in buffer */
+#define BCME_LAST BCME_DATA_NOTFOUND
#define BCME_NOTENABLED BCME_DISABLED
"MIC error", \
"Replay", \
"IE not found", \
+ "Data not found", \
}
#ifndef ABS
#define ETHER_ADDR_STR_LEN 18 /* 18-bytes of Ethernet address buffer length */
static INLINE uint32 /* 32bit word aligned xor-32 */
-bcm_compute_xor32(volatile uint32 *u32, int num_u32)
+bcm_compute_xor32(volatile uint32 *u32_val, int num_u32)
{
- int i;
+ int idx;
uint32 xor32 = 0;
- for (i = 0; i < num_u32; i++)
- xor32 ^= *(u32 + i);
+ for (idx = 0; idx < num_u32; idx++)
+ xor32 ^= *(u32_val + idx);
return xor32;
}
/* IE parsing */
+/* packing is required if struct is passed across the bus */
+#include <packed_section_start.h>
/* tag_ID/length/value_buffer tuple */
typedef struct bcm_tlv {
uint8 id;
} bcm_tlv_t;
/* bcm tlv w/ 16 bit id/len */
-typedef struct bcm_xtlv {
+typedef BWL_PRE_PACKED_STRUCT struct bcm_xtlv {
uint16 id;
uint16 len;
uint8 data[1];
-} bcm_xtlv_t;
+} BWL_POST_PACKED_STRUCT bcm_xtlv_t;
+#include <packed_section_end.h>
+
/* descriptor of xtlv data src or dst */
typedef struct {
void *ptr; /* ptr to memory location */
} xtlv_desc_t;
-/* set a var from xtlv buffer */
-typedef int
-(bcm_set_var_from_tlv_cbfn_t)(void *ctx, void **tlv_buf, uint16 type, uint16 len);
-
-struct bcm_tlvbuf {
- uint16 size;
- uint8 *head; /* point to head of buffer */
- uint8 *buf; /* current position of buffer */
- /* followed by the allocated buffer */
+/* xtlv options */
+#define BCM_XTLV_OPTION_NONE 0x0000
+#define BCM_XTLV_OPTION_ALIGN32 0x0001
+
+typedef uint16 bcm_xtlv_opts_t;
+struct bcm_xtlvbuf {
+ bcm_xtlv_opts_t opts;
+ uint16 size;
+ uint8 *head; /* point to head of buffer */
+ uint8 *buf; /* current position of buffer */
+ /* allocated buffer may follow, but not necessarily */
};
+typedef struct bcm_xtlvbuf bcm_xtlvbuf_t;
#define BCM_TLV_MAX_DATA_SIZE (255)
#define BCM_XTLV_MAX_DATA_SIZE (65535)
#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data))
#define BCM_XTLV_HDR_SIZE (OFFSETOF(bcm_xtlv_t, data))
+/* LEN only stores the value's length without padding */
#define BCM_XTLV_LEN(elt) ltoh16_ua(&(elt->len))
#define BCM_XTLV_ID(elt) ltoh16_ua(&(elt->id))
-#define BCM_XTLV_SIZE(elt) (BCM_XTLV_HDR_SIZE + BCM_XTLV_LEN(elt))
+/* entire size of the XTLV including header, data, and optional padding */
+#define BCM_XTLV_SIZE(elt, opts) bcm_xtlv_size(elt, opts)
+#define bcm_valid_xtlv(elt, buflen, opts) (elt && ((int)(buflen) >= (int)BCM_XTLV_SIZE(elt, opts)))
/* Check that bcm_tlv_t fits into the given buflen */
#define bcm_valid_tlv(elt, buflen) (\
((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \
((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len)))
-#define bcm_valid_xtlv(elt, buflen) (\
- ((int)(buflen) >= (int)BCM_XTLV_HDR_SIZE) && \
- ((int)(buflen) >= (int)BCM_XTLV_SIZE(elt)))
extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
extern uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen);
/* xtlv */
-extern bcm_xtlv_t *bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen);
-extern struct bcm_tlvbuf *bcm_xtlv_buf_alloc(void *osh, uint16 len);
-extern void bcm_xtlv_buf_free(void *osh, struct bcm_tlvbuf *tbuf);
-extern uint16 bcm_xtlv_buf_len(struct bcm_tlvbuf *tbuf);
-extern uint16 bcm_xtlv_buf_rlen(struct bcm_tlvbuf *tbuf);
-extern uint8 *bcm_xtlv_buf(struct bcm_tlvbuf *tbuf);
-extern uint8 *bcm_xtlv_head(struct bcm_tlvbuf *tbuf);
-extern int bcm_xtlv_put_data(struct bcm_tlvbuf *tbuf, uint16 type, const void *data, uint16 dlen);
-extern int bcm_xtlv_put_8(struct bcm_tlvbuf *tbuf, uint16 type, const int8 data);
-extern int bcm_xtlv_put_16(struct bcm_tlvbuf *tbuf, uint16 type, const int16 data);
-extern int bcm_xtlv_put_32(struct bcm_tlvbuf *tbuf, uint16 type, const int32 data);
-extern int bcm_unpack_xtlv_entry(void **tlv_buf, uint16 xpct_type, uint16 xpct_len, void *dst);
-extern int bcm_skip_xtlv(void **tlv_buf);
-extern int bcm_pack_xtlv_entry(void **tlv_buf, uint16 *buflen, uint16 type, uint16 len, void *src);
-extern int bcm_unpack_xtlv_buf(void *ctx,
- void *tlv_buf, uint16 buflen, bcm_set_var_from_tlv_cbfn_t *cbfn);
-extern int
-bcm_unpack_xtlv_buf_to_mem(void *tlv_buf, int *buflen, xtlv_desc_t *items);
-extern int
-bcm_pack_xtlv_buf_from_mem(void **tlv_buf, uint16 *buflen, xtlv_desc_t *items);
-extern int
-bcm_pack_xtlv_entry_from_hex_string(void **tlv_buf, uint16 *buflen, uint16 type, char *hex);
+
+/* return the next xtlv element, and update buffer len (remaining). Buffer length
+ * updated includes padding as specified by options
+ */
+extern bcm_xtlv_t *bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts);
+
+/* initialize an xtlv buffer. Use options specified for packing/unpacking using
+ * the buffer. Caller is responsible for allocating both buffers.
+ */
+extern int bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len,
+ bcm_xtlv_opts_t opts);
+
+extern uint16 bcm_xtlv_buf_len(struct bcm_xtlvbuf *tbuf);
+extern uint16 bcm_xtlv_buf_rlen(struct bcm_xtlvbuf *tbuf);
+extern uint8 *bcm_xtlv_buf(struct bcm_xtlvbuf *tbuf);
+extern uint8 *bcm_xtlv_head(struct bcm_xtlvbuf *tbuf);
+extern int bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen);
+extern int bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data);
+extern int bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data);
+extern int bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data);
+extern int bcm_unpack_xtlv_entry(uint8 **buf, uint16 xpct_type, uint16 xpct_len,
+ void *dst, bcm_xtlv_opts_t opts);
+extern int bcm_pack_xtlv_entry(uint8 **buf, uint16 *buflen, uint16 type, uint16 len,
+ void *src, bcm_xtlv_opts_t opts);
+extern int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts);
+
+/* callback for unpacking xtlv from a buffer into context. */
+typedef int (bcm_xtlv_unpack_cbfn_t)(void *ctx, uint8 *buf, uint16 type, uint16 len);
+
+/* unpack a tlv buffer using buffer, options, and callback */
+extern int bcm_unpack_xtlv_buf(void *ctx, uint8 *buf, uint16 buflen,
+ bcm_xtlv_opts_t opts, bcm_xtlv_unpack_cbfn_t *cbfn);
+
+/* unpack a set of tlvs from the buffer using provided xtlv desc */
+extern int bcm_unpack_xtlv_buf_to_mem(void *buf, int *buflen, xtlv_desc_t *items,
+ bcm_xtlv_opts_t opts);
+
+/* pack a set of tlvs into buffer using provided xtlv desc */
+extern int bcm_pack_xtlv_buf_from_mem(void **buf, uint16 *buflen, xtlv_desc_t *items,
+ bcm_xtlv_opts_t opts);
+
+/* return data pointer of a given ID from xtlv buffer
+ * xtlv data length is given to *datalen_out, if the pointer is valid
+ */
+extern void *bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id,
+ uint16 *datalen_out, bcm_xtlv_opts_t opts);
+
+/* callback to return next tlv id and len to pack, if there is more tlvs to come and
+ * options e.g. alignment
+ */
+typedef bool (*bcm_pack_xtlv_next_info_cbfn_t)(void *ctx, uint16 *tlv_id, uint16 *tlv_len);
+
+/* callback to pack the tlv into length validated buffer */
+typedef void (*bcm_pack_xtlv_pack_next_cbfn_t)(void *ctx,
+ uint16 tlv_id, uint16 tlv_len, uint8* buf);
+
+/* pack a set of tlvs into buffer using get_next to interate */
+int bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen,
+ bcm_xtlv_opts_t opts, bcm_pack_xtlv_next_info_cbfn_t get_next,
+ bcm_pack_xtlv_pack_next_cbfn_t pack_next, int *outlen);
/* bcmerror */
extern const char *bcmerrorstr(int bcmerror);
/* generic datastruct to help dump routines */
struct fielddesc {
const char *nameandfmt;
- uint32 offset;
- uint32 len;
+ uint32 offset;
+ uint32 len;
};
extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
-extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len);
+extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline,
+ const uint8 *buf, int len);
extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount);
extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes);
/* power conversion */
extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
extern uint8 bcm_mw_to_qdbm(uint16 mw);
-extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
+extern uint bcm_mkiovar(const char *name, char *data, uint datalen, char *buf, uint len);
unsigned int process_nvram_vars(char *varbuf, unsigned int len);
+/* trace any object allocation / free, with / without features (flags) set to the object */
+
+#define BCM_OBJDBG_ADD 1
+#define BCM_OBJDBG_REMOVE 2
+#define BCM_OBJDBG_ADD_PKT 3
+
+/* object feature: set or clear flags */
+#define BCM_OBJECT_FEATURE_FLAG 1
+#define BCM_OBJECT_FEATURE_PKT_STATE 2
+/* object feature: flag bits */
+#define BCM_OBJECT_FEATURE_0 (1 << 0)
+#define BCM_OBJECT_FEATURE_1 (1 << 1)
+#define BCM_OBJECT_FEATURE_2 (1 << 2)
+/* object feature: clear flag bits field set with this flag */
+#define BCM_OBJECT_FEATURE_CLEAR (1 << 31)
+#ifdef BCM_OBJECT_TRACE
+#define bcm_pkt_validate_chk(obj) do { \
+ void * pkttag; \
+ bcm_object_trace_chk(obj, 0, 0, \
+ __FUNCTION__, __LINE__); \
+ if ((pkttag = PKTTAG(obj))) { \
+ bcm_object_trace_chk(obj, 1, DHD_PKTTAG_SN(pkttag), \
+ __FUNCTION__, __LINE__); \
+ } \
+} while (0)
+extern void bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line);
+extern void bcm_object_trace_upd(void *obj, void *obj_new);
+extern void bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn,
+ const char *caller, int line);
+extern void bcm_object_feature_set(void *obj, uint32 type, uint32 value);
+extern int bcm_object_feature_get(void *obj, uint32 type, uint32 value);
+extern void bcm_object_trace_init(void);
+extern void bcm_object_trace_deinit(void);
+#else
+#define bcm_pkt_validate_chk(obj)
+#define bcm_object_trace_opr(a, b, c, d)
+#define bcm_object_trace_upd(a, b)
+#define bcm_object_trace_chk(a, b, c, d, e)
+#define bcm_object_feature_set(a, b, c)
+#define bcm_object_feature_get(a, b, c)
+#define bcm_object_trace_init()
+#define bcm_object_trace_deinit()
+#endif /* BCM_OBJECT_TRACE */
+
/* calculate a * b + c */
extern void bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c);
/* calculate a / b */
};
static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */
-bcm_cntsetbits(const uint32 u32)
+bcm_cntsetbits(const uint32 u32arg)
{
/* function local scope declaration of const _CSBTBL[] */
- const uint8 * p = (const uint8 *)&u32;
+ const uint8 * p = (const uint8 *)&u32arg;
return (_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]);
}
static INLINE int /* C equivalent count of leading 0's in a u32 */
-C_bcm_count_leading_zeros(uint32 u32)
+C_bcm_count_leading_zeros(uint32 u32arg)
{
int shifts = 0;
- while (u32) {
- shifts++; u32 >>= 1;
+ while (u32arg) {
+ shifts++; u32arg >>= 1;
}
return (32U - shifts);
}
*/
#if defined(__arm__)
-
#if defined(__ARM_ARCH_7M__) /* Cortex M3 */
#define __USE_ASM_CLZ__
#endif /* __ARM_ARCH_7M__ */
-
#if defined(__ARM_ARCH_7R__) /* Cortex R4 */
#define __USE_ASM_CLZ__
#endif /* __ARM_ARCH_7R__ */
-
#endif /* __arm__ */
static INLINE int
-bcm_count_leading_zeros(uint32 u32)
+bcm_count_leading_zeros(uint32 u32arg)
{
#if defined(__USE_ASM_CLZ__)
int zeros;
- __asm__ volatile("clz %0, %1 \n" : "=r" (zeros) : "r" (u32));
+ __asm__ volatile("clz %0, %1 \n" : "=r" (zeros) : "r" (u32arg));
return zeros;
#else /* C equivalent */
- return C_bcm_count_leading_zeros(u32);
+ return C_bcm_count_leading_zeros(u32arg);
#endif /* C equivalent */
}
+/*
+ * Macro to count leading zeroes
+ *
+ */
+#if defined(__GNUC__)
+#define CLZ(x) __builtin_clzl(x)
+#elif defined(__arm__)
+#define CLZ(x) __clz(x)
+#else
+#define CLZ(x) bcm_count_leading_zeros(x)
+#endif /* __GNUC__ */
+
/* INTERFACE: Multiword bitmap based small id allocator. */
struct bcm_mwbmap; /* forward declaration for use as an opaque mwbmap handle */
/* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */
#define ID16_INVALID ((uint16)(~0))
+#define ID16_UNDEFINED (ID16_INVALID)
/*
* Construct a 16bit id allocator, managing 16bit ids in the range:
/* Audit the 16bit id allocator state. */
extern bool id16_map_audit(void * id16_map_hndl);
/* End - Simple 16bit Id Allocator. */
-
#endif /* BCMDRIVER */
extern void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
void counter_printlog(counter_tbl_t *ctr_tbl);
#endif /* DEBUG_COUNTER */
+/* Given a number 'n' returns 'm' that is next larger power of 2 after n */
+static INLINE uint32 next_larger_power2(uint32 num)
+{
+ num--;
+ num |= (num >> 1);
+ num |= (num >> 2);
+ num |= (num >> 4);
+ num |= (num >> 8);
+ num |= (num >> 16);
+ return (num + 1);
+}
+
#endif /* _bcmutils_h_ */
/*
* Definitions for nl80211 vendor command/event access to host driver
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: brcm_nl80211.h 487126 2014-06-24 23:06:12Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: brcm_nl80211.h 556083 2015-05-12 14:03:00Z $
*
*/
enum wl_vendor_subcmd {
BRCM_VENDOR_SCMD_UNSPEC,
- BRCM_VENDOR_SCMD_PRIV_STR
+ BRCM_VENDOR_SCMD_PRIV_STR,
+ BRCM_VENDOR_SCMD_BCM_STR
};
+
struct bcm_nlmsg_hdr {
uint cmd; /* common ioctl definition */
- uint len; /* expected return buffer length */
+ int len; /* expected return buffer length */
uint offset; /* user buffer offset */
uint set; /* get or set request optional */
uint magic; /* magic number for verification */
--- /dev/null
+/*
+ * Dongle BUS interface Abstraction layer
+ * target serial buses like USB, SDIO, SPI, etc.
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dbus.h 553311 2015-04-29 10:23:08Z $
+ */
+
+#ifndef __DBUS_H__
+#define __DBUS_H__
+
+#include "typedefs.h"
+
+#define DBUSTRACE(args)
+#define DBUSERR(args)
+#define DBUSINFO(args)
+#define DBUSDBGLOCK(args)
+
+enum {
+ DBUS_OK = 0,
+ DBUS_ERR = -200,
+ DBUS_ERR_TIMEOUT,
+ DBUS_ERR_DISCONNECT,
+ DBUS_ERR_NODEVICE,
+ DBUS_ERR_UNSUPPORTED,
+ DBUS_ERR_PENDING,
+ DBUS_ERR_NOMEM,
+ DBUS_ERR_TXFAIL,
+ DBUS_ERR_TXTIMEOUT,
+ DBUS_ERR_TXDROP,
+ DBUS_ERR_RXFAIL,
+ DBUS_ERR_RXDROP,
+ DBUS_ERR_TXCTLFAIL,
+ DBUS_ERR_RXCTLFAIL,
+ DBUS_ERR_REG_PARAM,
+ DBUS_STATUS_CANCELLED,
+ DBUS_ERR_NVRAM,
+ DBUS_JUMBO_NOMATCH,
+ DBUS_JUMBO_BAD_FORMAT,
+ DBUS_NVRAM_NONTXT,
+ DBUS_ERR_RXZLP
+};
+
+#define BCM_OTP_SIZE_43236 84 /* number of 16 bit values */
+#define BCM_OTP_SW_RGN_43236 24 /* start offset of SW config region */
+#define BCM_OTP_ADDR_43236 0x18000800 /* address of otp base */
+
+#define ERR_CBMASK_TXFAIL 0x00000001
+#define ERR_CBMASK_RXFAIL 0x00000002
+#define ERR_CBMASK_ALL 0xFFFFFFFF
+
+#define DBUS_CBCTL_WRITE 0
+#define DBUS_CBCTL_READ 1
+#if defined(INTR_EP_ENABLE)
+#define DBUS_CBINTR_POLL 2
+#endif /* defined(INTR_EP_ENABLE) */
+
+#define DBUS_TX_RETRY_LIMIT 3 /* retries for failed txirb */
+#define DBUS_TX_TIMEOUT_INTERVAL 250 /* timeout for txirb complete, in ms */
+
+#define DBUS_BUFFER_SIZE_TX 32000
+#define DBUS_BUFFER_SIZE_RX 24000
+
+#define DBUS_BUFFER_SIZE_TX_NOAGG 2048
+#define DBUS_BUFFER_SIZE_RX_NOAGG 2048
+
+/** DBUS types */
+enum {
+ DBUS_USB,
+ DBUS_SDIO,
+ DBUS_SPI,
+ DBUS_UNKNOWN
+};
+
+enum dbus_state {
+ DBUS_STATE_DL_PENDING,
+ DBUS_STATE_DL_DONE,
+ DBUS_STATE_UP,
+ DBUS_STATE_DOWN,
+ DBUS_STATE_PNP_FWDL,
+ DBUS_STATE_DISCONNECT,
+ DBUS_STATE_SLEEP,
+ DBUS_STATE_DL_NEEDED
+};
+
+enum dbus_pnp_state {
+ DBUS_PNP_DISCONNECT,
+ DBUS_PNP_SLEEP,
+ DBUS_PNP_RESUME
+};
+
+enum dbus_file {
+ DBUS_FIRMWARE,
+ DBUS_NVFILE
+};
+
+typedef enum _DEVICE_SPEED {
+ INVALID_SPEED = -1,
+ LOW_SPEED = 1, /**< USB 1.1: 1.5 Mbps */
+ FULL_SPEED, /**< USB 1.1: 12 Mbps */
+ HIGH_SPEED, /**< USB 2.0: 480 Mbps */
+ SUPER_SPEED, /**< USB 3.0: 4.8 Gbps */
+} DEVICE_SPEED;
+
+typedef struct {
+ int bustype;
+ int vid;
+ int pid;
+ int devid;
+ int chiprev; /**< chip revsion number */
+ int mtu;
+ int nchan; /**< Data Channels */
+ int has_2nd_bulk_in_ep;
+} dbus_attrib_t;
+
+/* FIX: Account for errors related to DBUS;
+ * Let upper layer account for packets/bytes
+ */
+typedef struct {
+ uint32 rx_errors;
+ uint32 tx_errors;
+ uint32 rx_dropped;
+ uint32 tx_dropped;
+} dbus_stats_t;
+
+/**
+ * Configurable BUS parameters
+ */
+enum {
+ DBUS_CONFIG_ID_RXCTL_DEFERRES = 1,
+ DBUS_CONFIG_ID_AGGR_LIMIT
+};
+
+typedef struct {
+ uint32 config_id;
+ union {
+ bool rxctl_deferrespok;
+ struct {
+ int maxrxsf;
+ int maxrxsize;
+ int maxtxsf;
+ int maxtxsize;
+ } aggr_param;
+ };
+} dbus_config_t;
+
+/**
+ * External Download Info
+ */
+typedef struct dbus_extdl {
+ uint8 *fw;
+ int fwlen;
+ uint8 *vars;
+ int varslen;
+} dbus_extdl_t;
+
+struct dbus_callbacks;
+struct exec_parms;
+
+typedef void *(*probe_cb_t)(void *arg, const char *desc, uint32 bustype, uint32 hdrlen);
+typedef void (*disconnect_cb_t)(void *arg);
+typedef void *(*exec_cb_t)(struct exec_parms *args);
+
+/** Client callbacks registered during dbus_attach() */
+typedef struct dbus_callbacks {
+ void (*send_complete)(void *cbarg, void *info, int status);
+ void (*recv_buf)(void *cbarg, uint8 *buf, int len);
+ void (*recv_pkt)(void *cbarg, void *pkt);
+ void (*txflowcontrol)(void *cbarg, bool onoff);
+ void (*errhandler)(void *cbarg, int err);
+ void (*ctl_complete)(void *cbarg, int type, int status);
+ void (*state_change)(void *cbarg, int state);
+ void *(*pktget)(void *cbarg, uint len, bool send);
+ void (*pktfree)(void *cbarg, void *p, bool send);
+} dbus_callbacks_t;
+
+struct dbus_pub;
+struct bcmstrbuf;
+struct dbus_irb;
+struct dbus_irb_rx;
+struct dbus_irb_tx;
+struct dbus_intf_callbacks;
+
+typedef struct {
+ void* (*attach)(struct dbus_pub *pub, void *cbarg, struct dbus_intf_callbacks *cbs);
+ void (*detach)(struct dbus_pub *pub, void *bus);
+
+ int (*up)(void *bus);
+ int (*down)(void *bus);
+ int (*send_irb)(void *bus, struct dbus_irb_tx *txirb);
+ int (*recv_irb)(void *bus, struct dbus_irb_rx *rxirb);
+ int (*cancel_irb)(void *bus, struct dbus_irb_tx *txirb);
+ int (*send_ctl)(void *bus, uint8 *buf, int len);
+ int (*recv_ctl)(void *bus, uint8 *buf, int len);
+ int (*get_stats)(void *bus, dbus_stats_t *stats);
+ int (*get_attrib)(void *bus, dbus_attrib_t *attrib);
+
+ int (*pnp)(void *bus, int evnt);
+ int (*remove)(void *bus);
+ int (*resume)(void *bus);
+ int (*suspend)(void *bus);
+ int (*stop)(void *bus);
+ int (*reset)(void *bus);
+
+ /* Access to bus buffers directly */
+ void *(*pktget)(void *bus, int len);
+ void (*pktfree)(void *bus, void *pkt);
+
+ int (*iovar_op)(void *bus, const char *name, void *params, int plen, void *arg, int len,
+ bool set);
+ void (*dump)(void *bus, struct bcmstrbuf *strbuf);
+ int (*set_config)(void *bus, dbus_config_t *config);
+ int (*get_config)(void *bus, dbus_config_t *config);
+
+ bool (*device_exists)(void *bus);
+ bool (*dlneeded)(void *bus);
+ int (*dlstart)(void *bus, uint8 *fw, int len);
+ int (*dlrun)(void *bus);
+ bool (*recv_needed)(void *bus);
+
+ void *(*exec_rxlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+ void *(*exec_txlock)(void *bus, exec_cb_t func, struct exec_parms *args);
+
+ int (*tx_timer_init)(void *bus);
+ int (*tx_timer_start)(void *bus, uint timeout);
+ int (*tx_timer_stop)(void *bus);
+
+ int (*sched_dpc)(void *bus);
+ int (*lock)(void *bus);
+ int (*unlock)(void *bus);
+ int (*sched_probe_cb)(void *bus);
+
+ int (*shutdown)(void *bus);
+
+ int (*recv_stop)(void *bus);
+ int (*recv_resume)(void *bus);
+
+ int (*recv_irb_from_ep)(void *bus, struct dbus_irb_rx *rxirb, uint ep_idx);
+
+ int (*readreg)(void *bus, uint32 regaddr, int datalen, uint32 *value);
+
+ /* Add from the bottom */
+} dbus_intf_t;
+
+typedef struct dbus_pub {
+ struct osl_info *osh;
+ dbus_stats_t stats;
+ dbus_attrib_t attrib;
+ enum dbus_state busstate;
+ DEVICE_SPEED device_speed;
+ int ntxq, nrxq, rxsize;
+ void *bus;
+ struct shared_info *sh;
+ void *dev_info;
+} dbus_pub_t;
+
+#define BUS_INFO(bus, type) (((type *) bus)->pub->bus)
+
+#define ALIGNED_LOCAL_VARIABLE(var, align) \
+ uint8 buffer[SDALIGN+64]; \
+ uint8 *var = (uint8 *)(((uintptr)&buffer[0]) & ~(align-1)) + align;
+
+/*
+ * Public Bus Function Interface
+ */
+
+/*
+ * FIX: Is there better way to pass OS/Host handles to DBUS but still
+ * maintain common interface for all OS??
+ * Under NDIS, param1 needs to be MiniportHandle
+ * For NDIS60, param2 is WdfDevice
+ * Under Linux, param1 and param2 are NULL;
+ */
+extern int dbus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+ void *param1, void *param2);
+extern int dbus_deregister(void);
+
+extern dbus_pub_t *dbus_attach(struct osl_info *osh, int rxsize, int nrxq, int ntxq,
+ void *cbarg, dbus_callbacks_t *cbs, dbus_extdl_t *extdl, struct shared_info *sh);
+extern void dbus_detach(dbus_pub_t *pub);
+
+extern int dbus_download_firmware(dbus_pub_t *pub);
+extern int dbus_up(dbus_pub_t *pub);
+extern int dbus_down(dbus_pub_t *pub);
+extern int dbus_stop(dbus_pub_t *pub);
+extern int dbus_shutdown(dbus_pub_t *pub);
+extern void dbus_flowctrl_rx(dbus_pub_t *pub, bool on);
+
+extern int dbus_send_txdata(dbus_pub_t *dbus, void *pktbuf);
+extern int dbus_send_buf(dbus_pub_t *pub, uint8 *buf, int len, void *info);
+extern int dbus_send_pkt(dbus_pub_t *pub, void *pkt, void *info);
+extern int dbus_send_ctl(dbus_pub_t *pub, uint8 *buf, int len);
+extern int dbus_recv_ctl(dbus_pub_t *pub, uint8 *buf, int len);
+extern int dbus_recv_bulk(dbus_pub_t *pub, uint32 ep_idx);
+extern int dbus_poll_intr(dbus_pub_t *pub);
+extern int dbus_get_stats(dbus_pub_t *pub, dbus_stats_t *stats);
+extern int dbus_get_attrib(dbus_pub_t *pub, dbus_attrib_t *attrib);
+extern int dbus_get_device_speed(dbus_pub_t *pub);
+extern int dbus_set_config(dbus_pub_t *pub, dbus_config_t *config);
+extern int dbus_get_config(dbus_pub_t *pub, dbus_config_t *config);
+extern void * dbus_get_devinfo(dbus_pub_t *pub);
+
+extern void *dbus_pktget(dbus_pub_t *pub, int len);
+extern void dbus_pktfree(dbus_pub_t *pub, void* pkt);
+
+extern int dbus_set_errmask(dbus_pub_t *pub, uint32 mask);
+extern int dbus_pnp_sleep(dbus_pub_t *pub);
+extern int dbus_pnp_resume(dbus_pub_t *pub, int *fw_reload);
+extern int dbus_pnp_disconnect(dbus_pub_t *pub);
+
+extern int dbus_iovar_op(dbus_pub_t *pub, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+extern void *dhd_dbus_txq(const dbus_pub_t *pub);
+extern uint dhd_dbus_hdrlen(const dbus_pub_t *pub);
+
+/*
+ * Private Common Bus Interface
+ */
+
+/** IO Request Block (IRB) */
+typedef struct dbus_irb {
+ struct dbus_irb *next; /**< it's casted from dbus_irb_tx or dbus_irb_rx struct */
+} dbus_irb_t;
+
+typedef struct dbus_irb_rx {
+ struct dbus_irb irb; /* Must be first */
+ uint8 *buf;
+ int buf_len;
+ int actual_len;
+ void *pkt;
+ void *info;
+ void *arg;
+} dbus_irb_rx_t;
+
+typedef struct dbus_irb_tx {
+ struct dbus_irb irb; /** Must be first */
+ uint8 *buf; /** mutually exclusive with struct member 'pkt' */
+ int len; /** length of field 'buf' */
+ void *pkt; /** mutually exclusive with struct member 'buf' */
+ int retry_count;
+ void *info;
+ void *arg;
+ void *send_buf; /**< linear bufffer for LINUX when aggreagtion is enabled */
+} dbus_irb_tx_t;
+
+/**
+ * DBUS interface callbacks are different from user callbacks
+ * so, internally, different info can be passed to upper layer
+ */
+typedef struct dbus_intf_callbacks {
+ void (*send_irb_timeout)(void *cbarg, dbus_irb_tx_t *txirb);
+ void (*send_irb_complete)(void *cbarg, dbus_irb_tx_t *txirb, int status);
+ void (*recv_irb_complete)(void *cbarg, dbus_irb_rx_t *rxirb, int status);
+ void (*errhandler)(void *cbarg, int err);
+ void (*ctl_complete)(void *cbarg, int type, int status);
+ void (*state_change)(void *cbarg, int state);
+ bool (*isr)(void *cbarg, bool *wantdpc);
+ bool (*dpc)(void *cbarg, bool bounded);
+ void (*watchdog)(void *cbarg);
+ void *(*pktget)(void *cbarg, uint len, bool send);
+ void (*pktfree)(void *cbarg, void *p, bool send);
+ struct dbus_irb* (*getirb)(void *cbarg, bool send);
+ void (*rxerr_indicate)(void *cbarg, bool on);
+} dbus_intf_callbacks_t;
+
+/*
+ * Porting: To support new bus, port these functions below
+ */
+
+/*
+ * Bus specific Interface
+ * Implemented by dbus_usb.c/dbus_sdio.c
+ */
+extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb, void *prarg,
+ dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_deregister(void);
+extern void dbus_bus_fw_get(void *bus, uint8 **fw, int *fwlen, int *decomp);
+
+/*
+ * Bus-specific and OS-specific Interface
+ * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c
+ */
+extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+ void *prarg, dbus_intf_t **intf, void *param1, void *param2);
+extern int dbus_bus_osl_deregister(void);
+
+/*
+ * Bus-specific, OS-specific, HW-specific Interface
+ * Mainly for SDIO Host HW controller
+ */
+extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb, disconnect_cb_t discb,
+ void *prarg, dbus_intf_t **intf);
+extern int dbus_bus_osl_hw_deregister(void);
+
+extern uint usbdev_bulkin_eps(void);
+#if defined(BCM_REQUEST_FW)
+extern void *dbus_get_fw_nvfile(int devid, int chiprev, uint8 **fw, int *fwlen, int type,
+ uint16 boardtype, uint16 boardrev);
+extern void dbus_release_fw_nvfile(void *firmware);
+#endif /* #if defined(BCM_REQUEST_FW) */
+
+
+#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX)
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ /* Backward compatibility */
+ typedef unsigned int gfp_t;
+
+ #define dma_pool pci_pool
+ #define dma_pool_create(name, dev, size, align, alloc) \
+ pci_pool_create(name, dev, size, align, alloc, GFP_DMA | GFP_ATOMIC)
+ #define dma_pool_destroy(pool) pci_pool_destroy(pool)
+ #define dma_pool_alloc(pool, flags, handle) pci_pool_alloc(pool, flags, handle)
+ #define dma_pool_free(pool, vaddr, addr) pci_pool_free(pool, vaddr, addr)
+
+ #define dma_map_single(dev, addr, size, dir) pci_map_single(dev, addr, size, dir)
+ #define dma_unmap_single(dev, hnd, size, dir) pci_unmap_single(dev, hnd, size, dir)
+ #define DMA_FROM_DEVICE PCI_DMA_FROMDEVICE
+ #define DMA_TO_DEVICE PCI_DMA_TODEVICE
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+/* Availability of these functions varies (when present, they have two arguments) */
+#ifndef hc32_to_cpu
+ #define hc32_to_cpu(x) le32_to_cpu(x)
+ #define cpu_to_hc32(x) cpu_to_le32(x)
+ typedef unsigned int __hc32;
+#else
+ #error Two-argument functions needed
+#endif
+
+/* Private USB opcode base */
+#define EHCI_FASTPATH 0x31
+#define EHCI_SET_EP_BYPASS EHCI_FASTPATH
+#define EHCI_SET_BYPASS_CB (EHCI_FASTPATH + 1)
+#define EHCI_SET_BYPASS_DEV (EHCI_FASTPATH + 2)
+#define EHCI_DUMP_STATE (EHCI_FASTPATH + 3)
+#define EHCI_SET_BYPASS_POOL (EHCI_FASTPATH + 4)
+#define EHCI_CLR_EP_BYPASS (EHCI_FASTPATH + 5)
+
+/*
+ * EHCI QTD structure (hardware and extension)
+ * NOTE that is does not need to (and does not) match its kernel counterpart
+ */
+#define EHCI_QTD_NBUFFERS 5
+#define EHCI_QTD_ALIGN 32
+#define EHCI_BULK_PACKET_SIZE 512
+#define EHCI_QTD_XACTERR_MAX 32
+
+struct ehci_qtd {
+ /* Hardware map */
+ volatile uint32_t qtd_next;
+ volatile uint32_t qtd_altnext;
+ volatile uint32_t qtd_status;
+#define EHCI_QTD_GET_BYTES(x) (((x)>>16) & 0x7fff)
+#define EHCI_QTD_IOC 0x00008000
+#define EHCI_QTD_GET_CERR(x) (((x)>>10) & 0x3)
+#define EHCI_QTD_SET_CERR(x) ((x) << 10)
+#define EHCI_QTD_GET_PID(x) (((x)>>8) & 0x3)
+#define EHCI_QTD_SET_PID(x) ((x) << 8)
+#define EHCI_QTD_ACTIVE 0x80
+#define EHCI_QTD_HALTED 0x40
+#define EHCI_QTD_BUFERR 0x20
+#define EHCI_QTD_BABBLE 0x10
+#define EHCI_QTD_XACTERR 0x08
+#define EHCI_QTD_MISSEDMICRO 0x04
+ volatile uint32_t qtd_buffer[EHCI_QTD_NBUFFERS];
+ volatile uint32_t qtd_buffer_hi[EHCI_QTD_NBUFFERS];
+
+ /* Implementation extension */
+ dma_addr_t qtd_self; /**< own hardware address */
+ struct ehci_qtd *obj_next; /**< software link to the next QTD */
+ void *rpc; /**< pointer to the rpc buffer */
+ size_t length; /**< length of the data in the buffer */
+ void *buff; /**< pointer to the reassembly buffer */
+ int xacterrs; /**< retry counter for qtd xact error */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#define EHCI_NULL __constant_cpu_to_le32(1) /* HW null pointer shall be odd */
+
+#define SHORT_READ_Q(token) (EHCI_QTD_GET_BYTES(token) != 0 && EHCI_QTD_GET_PID(token) == 1)
+
+/**
+ * Queue Head
+ * NOTE This structure is slightly different from the one in the kernel; but needs to stay
+ * compatible.
+ */
+struct ehci_qh {
+ /* Hardware map */
+ volatile uint32_t qh_link;
+ volatile uint32_t qh_endp;
+ volatile uint32_t qh_endphub;
+ volatile uint32_t qh_curqtd;
+
+ /* QTD overlay */
+ volatile uint32_t ow_next;
+ volatile uint32_t ow_altnext;
+ volatile uint32_t ow_status;
+ volatile uint32_t ow_buffer [EHCI_QTD_NBUFFERS];
+ volatile uint32_t ow_buffer_hi [EHCI_QTD_NBUFFERS];
+
+ /* Extension (should match the kernel layout) */
+ dma_addr_t unused0;
+ void *unused1;
+ struct list_head unused2;
+ struct ehci_qtd *dummy;
+ struct ehci_qh *unused3;
+
+ struct ehci_hcd *unused4;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct kref unused5;
+ unsigned unused6;
+
+ uint8_t unused7;
+
+ /* periodic schedule info */
+ uint8_t unused8;
+ uint8_t unused9;
+ uint8_t unused10;
+ uint16_t unused11;
+ uint16_t unused12;
+ uint16_t unused13;
+ struct usb_device *unused14;
+#else
+ unsigned unused5;
+
+ u8 unused6;
+
+ /* periodic schedule info */
+ u8 unused7;
+ u8 unused8;
+ u8 unused9;
+ unsigned short unused10;
+ unsigned short unused11;
+#define NO_FRAME ((unsigned short)~0)
+#ifdef EHCI_QUIRK_FIX
+ struct usb_device *unused12;
+#endif /* EHCI_QUIRK_FIX */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+ struct ehci_qtd *first_qtd;
+ /* Link to the first QTD; this is an optimized equivalent of the qtd_list field */
+ /* NOTE that ehci_qh in ehci.h shall reserve this word */
+} __attribute__ ((aligned(EHCI_QTD_ALIGN)));
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+/** The corresponding structure in the kernel is used to get the QH */
+struct hcd_dev { /* usb_device.hcpriv points to this */
+ struct list_head unused0;
+ struct list_head unused1;
+
+ /* array of QH pointers */
+ void *ep[32];
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+
+int optimize_qtd_fill_with_rpc(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *rpc,
+ int token, int len);
+int optimize_qtd_fill_with_data(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd, void *data,
+ int token, int len);
+int optimize_submit_async(struct ehci_qtd *qtd, int epn);
+void inline optimize_ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma);
+struct ehci_qtd *optimize_ehci_qtd_alloc(gfp_t flags);
+void optimize_ehci_qtd_free(struct ehci_qtd *qtd);
+void optimize_submit_rx_request(const dbus_pub_t *pub, int epn, struct ehci_qtd *qtd_in, void *buf);
+#endif /* EHCI_FASTPATH_TX || EHCI_FASTPATH_RX */
+
+void dbus_flowctrl_tx(void *dbi, bool on);
+#endif /* __DBUS_H__ */
*
* Definitions subject to change without notice.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
*
* $Id: wlioctl_defs.h 403826 2013-05-22 16:40:55Z $
*/
#define wlioctl_defs_h
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
#define WL_RSPEC_BW_40MHZ 0x00020000
#define WL_RSPEC_BW_80MHZ 0x00030000
#define WL_RSPEC_BW_160MHZ 0x00040000
+#define WL_RSPEC_BW_10MHZ 0x00050000
+#define WL_RSPEC_BW_5MHZ 0x00060000
+#define WL_RSPEC_BW_2P5MHZ 0x00070000
/* Legacy defines for the nrate iovar */
#define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */
-#define GET_PRO_PRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8)
+#define WLC_11N_N_PROP_MCS 6
+#define WLC_11N_FIRST_PROP_MCS 87
+#define WLC_11N_LAST_PROP_MCS 102
-#define GET_11N_MCS_NSS(mcs) ((mcs) < 32 ? (1 + ((mcs) / 8)) \
- : ((mcs) == 32 ? 1 : GET_PRO_PRIETARY_11N_MCS_NSS(mcs)))
#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */
#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */
#define WLC_TXFILTER_OVERRIDE_DISABLED 0
#define WLC_TXFILTER_OVERRIDE_ENABLED 1
-#define WL_IOCTL_ACTION_GET 0x0
-#define WL_IOCTL_ACTION_SET 0x1
+#define WL_IOCTL_ACTION_GET 0x0
+#define WL_IOCTL_ACTION_SET 0x1
#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e
-#define WL_IOCTL_ACTION_OVL_RSV 0x20
-#define WL_IOCTL_ACTION_OVL 0x40
-#define WL_IOCTL_ACTION_MASK 0x7e
-#define WL_IOCTL_ACTION_OVL_SHIFT 1
+#define WL_IOCTL_ACTION_OVL_RSV 0x20
+#define WL_IOCTL_ACTION_OVL 0x40
+#define WL_IOCTL_ACTION_MASK 0x7e
+#define WL_IOCTL_ACTION_OVL_SHIFT 1
-#define WL_BSSTYPE_INFRA 1
+/* For WLC_SET_INFRA ioctl & infra_configuration iovar SET/GET operations */
#define WL_BSSTYPE_INDEP 0
-#define WL_BSSTYPE_ANY 2
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_ANY 2 /* deprecated */
+#define WL_BSSTYPE_MESH 3
/* Bitmask for scan_type */
#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */
#define WL_SCANFLAGS_OFFCHAN 0x08 /* allow scanning/reporting off-channel APs */
#define WL_SCANFLAGS_HOTSPOT 0x10 /* automatic ANQP to hotspot APs */
#define WL_SCANFLAGS_SWTCHAN 0x20 /* Force channel switch for differerent bandwidth */
+#define WL_SCANFLAGS_FORCE_PARALLEL 0x40 /* Force parallel scan even when actcb_fn_t is on.
+ * by default parallel scan will be disabled if actcb_fn_t
+ * is provided.
+ */
/* wl_iscan_results status values */
#define WL_SCAN_RESULTS_SUCCESS 0
/* current gain setting is maintained */
#define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */
-#define PLC_CMD_FAILOVER 1
-#define PLC_CMD_MAC_COST 2
-#define PLC_CMD_LINK_COST 3
-#define PLC_CMD_NODE_LIST 4
-
-#define NODE_TYPE_UNKNOWN 0 /* Unknown link */
-#define NODE_TYPE_WIFI_ONLY 1 /* Pure Wireless STA node */
-#define NODE_TYPE_PLC_ONLY 2 /* Pure PLC only node */
-#define NODE_TYPE_WIFI_PLC 3 /* WiFi PLC capable node */
-
/* defines used by poweridx iovar - it controls power in a-band */
/* current gain setting is maintained */
#define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */
/* check this magic number */
#define WLC_IOCTL_MAGIC 0x14e46c77
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
/* bss_info_cap_t flags */
#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */
#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */
#define CRYPTO_ALGO_AES_CCM 4
#define CRYPTO_ALGO_AES_OCB_MSDU 5
#define CRYPTO_ALGO_AES_OCB_MPDU 6
-#if !defined(BCMCCX) && !defined(BCMEXTCCX)
+#if !defined(BCMEXTCCX)
#define CRYPTO_ALGO_NALG 7
#else
#define CRYPTO_ALGO_CKIP 7
#define CRYPTO_ALGO_CKIP_MMH 8
#define CRYPTO_ALGO_WEP_MMH 9
#define CRYPTO_ALGO_NALG 10
-#endif /* !BCMCCX && !BCMEXTCCX */
+#endif
#define CRYPTO_ALGO_SMS4 11
#define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */
#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */
#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */
-#if defined(BCMCCX) || defined(BCMEXTCCX)
+#if defined(BCMEXTCCX)
#define WL_CKIP_KP (1 << 4) /* CMIC */
#define WL_CKIP_MMH (1 << 5) /* CKIP */
#else
#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */
#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */
-#endif /* BCMCCX || BCMEXTCCX */
+#endif
#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */
/* wireless security bitvec */
#define TKIP_ENABLED 0x0002
#define AES_ENABLED 0x0004
#define WSEC_SWFLAG 0x0008
-#ifdef BCMCCX
-#define CKIP_KP_ENABLED 0x0010
-#define CKIP_MIC_ENABLED 0x0020
-#endif /* BCMCCX */
#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */
-#ifdef BCMWAPI_WPI
-#define SMS4_ENABLED 0x0100
-#endif /* BCMWAPI_WPI */
/* wsec macros for operating on the above definitions */
#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED)
#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED)
#define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED)
-#ifdef BCMCCX
-#define WSEC_CKIP_KP_ENABLED(wsec) ((wsec) & CKIP_KP_ENABLED)
-#define WSEC_CKIP_MIC_ENABLED(wsec) ((wsec) & CKIP_MIC_ENABLED)
-#define WSEC_CKIP_ENABLED(wsec) ((wsec) & (CKIP_KP_ENABLED|CKIP_MIC_ENABLED))
-
-#ifdef BCMWAPI_WPI
-#define WSEC_ENABLED(wsec) \
- ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | \
- CKIP_MIC_ENABLED | SMS4_ENABLED))
-#else /* BCMWAPI_WPI */
-#define WSEC_ENABLED(wsec) \
- ((wsec) & \
- (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | CKIP_MIC_ENABLED))
-#endif /* BCMWAPI_WPI */
-#else /* defined BCMCCX */
-#ifdef BCMWAPI_WPI
-#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
-#else /* BCMWAPI_WPI */
#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
-#endif /* BCMWAPI_WPI */
-#endif /* BCMCCX */
#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED)
-#ifdef BCMWAPI_WAI
-#define WSEC_SMS4_ENABLED(wsec) ((wsec) & SMS4_ENABLED)
-#endif /* BCMWAPI_WAI */
+/* Following macros are not used any more. Just kept here to
+ * avoid build issue in BISON/CARIBOU branch
+ */
#define MFP_CAPABLE 0x0200
#define MFP_REQUIRED 0x0400
#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */
#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
-#if defined(BCMCCX) || defined(BCMEXTCCX)
+#if defined(BCMEXTCCX)
#define WPA_AUTH_CCKM 0x0008 /* CCKM */
#define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */
-#endif /* BCMCCX || BCMEXTCCX */
+#endif
/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */
#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */
#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */
-#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
-#define WPA_AUTH_WAPI 0x0400
-#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */
-#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */
-#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */
-#endif /* BCMWAPI_WAI || BCMWAPI_WPI */
-#define WPA2_AUTH_MFP 0x1000 /* MFP (11w) in contrast to CCX */
-#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */
-#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */
+#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */
+#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */
+#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */
+#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */
+/* WPA2_AUTH_SHA256 not used anymore. Just kept here to avoid build issue in DINGO */
+#define WPA2_AUTH_SHA256 0x8000
#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
/* pmkid */
#define MAXPMKID 16
-#ifdef SROM12
-#define WLC_IOCTL_MAXLEN 10000 /* max length ioctl buffer required */
-#else
+/* SROM12 changes */
#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
-#endif /* SROM12 */
+
#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
#define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */
-#if defined(LCNCONF) || defined(LCN40CONF)
-#define WLC_SAMPLECOLLECT_MAXLEN 1024 /* Max Sample Collect buffer */
+#if defined(LCNCONF) || defined(LCN40CONF) || defined(LCN20CONF)
+#define WLC_SAMPLECOLLECT_MAXLEN 8192 /* Max Sample Collect buffer */
#else
#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */
#endif
#define WLC_SET_LAZYWDS 139
#define WLC_GET_BANDLIST 140
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
#define WLC_GET_BAND 141
#define WLC_SET_BAND 142
#define WLC_SCB_DEAUTHENTICATE 143
/* #define WLC_DUMP_PHYREGS 177 */ /* no longer supported */
#define WLC_GET_PROTECTION_CONTROL 178
#define WLC_SET_PROTECTION_CONTROL 179
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
#define WLC_GET_PHYLIST 180
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
#define WLC_ENCRYPT_STRENGTH 181 /* ndis only */
#define WLC_DECRYPT_STATUS 182 /* ndis only */
#define WLC_GET_KEY_SEQ 183
/* #define WLC_GET_GMODE_PROTECTION_CTS 198 */ /* no longer supported */
/* #define WLC_SET_GMODE_PROTECTION_CTS 199 */ /* no longer supported */
#define WLC_SET_WSEC_TEST 200
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
#define WLC_TKIP_COUNTERMEASURES 202
#define WLC_GET_PIOMODE 203
#define WLC_SET_PIOMODE 204
#define WLC_START_CHANNEL_QA 214
#define WLC_GET_CHANNEL_SEL 215
#define WLC_START_CHANNEL_SEL 216
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
#define WLC_GET_VALID_CHANNELS 217
#define WLC_GET_FAKEFRAG 218
#define WLC_SET_FAKEFRAG 219
#define WLC_GET_KEY_PRIMARY 235
#define WLC_SET_KEY_PRIMARY 236
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
/* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */
#define WLC_GET_ACI_ARGS 238
#define WLC_LEGACY_LINK_BEHAVIOR 259
#define WLC_GET_CHANNELS_IN_COUNTRY 260
#define WLC_GET_COUNTRY_LIST 261
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
#define WLC_GET_VAR 262 /* get value of named variable */
#define WLC_SET_VAR 263 /* set named variable to value */
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
#define WLC_NVRAM_GET 264 /* deprecated */
#define WLC_NVRAM_SET 265
#define WLC_NVRAM_DUMP 266
#define WLC_REBOOT 267
-#endif /* !LINUX_POSTMOGRIFY_REMOVAL */
#define WLC_SET_WSEC_PMK 268
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
#define WLC_GET_AUTH_MODE 269
#define WLC_SET_AUTH_MODE 270
#define WLC_GET_WAKEENTRY 271
#define WL_CHAN_FREQ_RANGE_5G_BAND1 2
#define WL_CHAN_FREQ_RANGE_5G_BAND2 3
#define WL_CHAN_FREQ_RANGE_5G_BAND3 4
+#define WL_CHAN_FREQ_RANGE_5G_4BAND 5
-#ifdef SROM12
+
+/* SROM12 */
#define WL_CHAN_FREQ_RANGE_5G_BAND4 5
#define WL_CHAN_FREQ_RANGE_2G_40 6
#define WL_CHAN_FREQ_RANGE_5G_BAND0_40 7
#define WL_CHAN_FREQ_RANGE_5G_BAND3_80 15
#define WL_CHAN_FREQ_RANGE_5G_BAND4_80 16
-#define WL_CHAN_FREQ_RANGE_5G_4BAND 17
#define WL_CHAN_FREQ_RANGE_5G_5BAND 18
#define WL_CHAN_FREQ_RANGE_5G_5BAND_40 19
#define WL_CHAN_FREQ_RANGE_5G_5BAND_80 20
-#else
-#define WL_CHAN_FREQ_RANGE_5G_4BAND 5
-#endif /* SROM12 */
-/* MAC list modes */
+
#define WLC_MACMODE_DISABLED 0 /* MAC list disabled */
#define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */
#define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */
#define WLC_BW_40MHZ_BIT (1<<1)
#define WLC_BW_80MHZ_BIT (1<<2)
#define WLC_BW_160MHZ_BIT (1<<3)
+#define WLC_BW_10MHZ_BIT (1<<4)
+#define WLC_BW_5MHZ_BIT (1<<5)
+#define WLC_BW_2P5MHZ_BIT (1<<6)
/* Bandwidth capabilities */
#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT)
#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
#define WLC_BW_CAP_160MHZ (WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_2P5MHZ (WLC_BW_2P5MHZ_BIT)
+#define WLC_BW_CAP_5MHZ (WLC_BW_5MHZ_BIT)
+#define WLC_BW_CAP_10MHZ (WLC_BW_10MHZ_BIT)
#define WLC_BW_CAP_UNRESTRICTED 0xFF
#define WL_BW_CAP_20MHZ(bw_cap) (((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE)
#define WL_BW_CAP_40MHZ(bw_cap) (((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE)
#define WL_BW_CAP_80MHZ(bw_cap) (((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE)
#define WL_BW_CAP_160MHZ(bw_cap)(((bw_cap) & WLC_BW_160MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_2P5MHZ(bw_cap)(((bw_cap) & WLC_BW_2P5MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_5MHZ(bw_cap) (((bw_cap) & WLC_BW_5MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_10MHZ(bw_cap) (((bw_cap) & WLC_BW_10MHZ_BIT) ? TRUE : FALSE)
/* values to force tx/rx chain */
#define WLC_N_TXRX_CHAIN0 0
#define WL_OTA_ARG_PARSE_BLK_SIZE 1200
#define WL_OTA_TEST_MAX_NUM_RATE 30
#define WL_OTA_TEST_MAX_NUM_SEQ 100
+#define WL_OTA_TEST_MAX_NUM_RSSI 85
#define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */
#define WL_RADAR_SIMULATED 2 /* force radar detector to declare
* detection once
*/
+#define WL_RADAR_SIMULATED_SC 3 /* force radar detector to declare
+ * detection once on scan core
+ * if available and active
+ */
#define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */
#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */
-#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */
+#define WL_ANT_HT_RX_MAX 4 /* max 4 receive antennas/cores */
#define WL_ANT_IDX_1 0 /* antenna index 1 */
#define WL_ANT_IDX_2 1 /* antenna index 2 */
#define WL_BW_80MHZ 2
#define WL_BW_160MHZ 3
#define WL_BW_8080MHZ 4
+#define WL_BW_2P5MHZ 5
+#define WL_BW_5MHZ 6
+#define WL_BW_10MHZ 7
/* tx_power_t.flags bits */
#define WL_TX_POWER_F_ENABLED 1
#define WL_TX_POWER_F_HT 0x10
#define WL_TX_POWER_F_VHT 0x20
#define WL_TX_POWER_F_OPENLOOP 0x40
+#define WL_TX_POWER_F_PROP11NRATES 0x80
/* Message levels */
#define WL_ERROR_VAL 0x00000001
#define WL_ASSOC_VAL 0x00000100
#define WL_PRUSR_VAL 0x00000200
#define WL_PS_VAL 0x00000400
-#define WL_TXPWR_VAL 0x00000800 /* retired in TOT on 6/10/2009 */
+#define WL_TXPWR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */
#define WL_PORT_VAL 0x00001000
#define WL_DUAL_VAL 0x00002000
#define WL_WSEC_VAL 0x00004000
#define WL_WSEC_DUMP_VAL 0x00008000
#define WL_LOG_VAL 0x00010000
-#define WL_NRSSI_VAL 0x00020000 /* retired in TOT on 6/10/2009 */
-#define WL_LOFT_VAL 0x00040000 /* retired in TOT on 6/10/2009 */
+#define WL_NRSSI_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
+#define WL_BCNTRIM_VAL 0x00020000 /* Using retired NRSSI VAL */
+#define WL_LOFT_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
+#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */
#define WL_REGULATORY_VAL 0x00080000
#define WL_TAF_VAL 0x00100000
-#define WL_RADAR_VAL 0x00200000 /* retired in TOT on 6/10/2009 */
+#define WL_RADAR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
+#define WL_WDI_VAL 0x00200000 /* Using retired WL_RADAR_VAL VAL */
#define WL_MPC_VAL 0x00400000
#define WL_APSTA_VAL 0x00800000
#define WL_DFS_VAL 0x01000000
-#define WL_BA_VAL 0x02000000 /* retired in TOT on 6/14/2010 */
+#define WL_BA_VAL 0x00000000 /* retired in TOT on 6/14/2010 */
+#define WL_MUMIMO_VAL 0x02000000 /* Using retired WL_BA_VAL */
#define WL_ACI_VAL 0x04000000
#define WL_PRMAC_VAL 0x04000000
#define WL_MBSS_VAL 0x04000000
* wl_msg_level2 in wl_dbg.h
*/
#define WL_DPT_VAL 0x00000001
+/* re-using WL_DPT_VAL */
+#define WL_MESH_VAL 0x00000001
#define WL_SCAN_VAL 0x00000002
#define WL_WOWL_VAL 0x00000004
#define WL_COEX_VAL 0x00000008
#define WL_TXBF_VAL 0x00100000
#define WL_P2PO_VAL 0x00200000
#define WL_TBTT_VAL 0x00400000
+#define WL_FBT_VAL 0x00800000
#define WL_MQ_VAL 0x01000000
/* This level is currently used in Phoenix2 only */
#define WL_PWRSEL_VAL 0x10000000
#define WL_NET_DETECT_VAL 0x20000000
#define WL_PCIE_VAL 0x40000000
+#define WL_PMDUR_VAL 0x80000000
+
/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier
* rather than a message-type of its own
#define WL_LED_NUMBEHAVIOR 25
/* led behavior numeric value format */
-#define WL_LED_BEH_MASK 0x7f /* behavior mask */
+#define WL_LED_BEH_MASK 0x3f /* behavior mask */
+#define WL_LED_PMU_OVERRIDE 0x40 /* need to set PMU Override bit for the GPIO */
#define WL_LED_AL_MASK 0x80 /* activelow (polarity) bit */
/* number of bytes needed to define a proper bit mask for MAC event reporting */
#define WL_NUMCHANSPECS 110
#endif
-
/* WDS link local endpoint WPA role */
#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */
#define WL_WDS_WPA_ROLE_SUP 1 /* supplicant */
#define WL_PKTENG_PER_MASK 0xff
#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */
+#define WL_PKTENG_SYNCHRONOUS_UNBLK 0x200 /* synchronous unblock flag */
#define WL_PKTENG_MAXPKTSZ 16384 /* max pktsz limit for pkteng */
#define WL_WOWL_M1 (1 << 6) /* Wakeup after PTK refresh */
#define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */
#define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */
+#define WL_WOWL_ULP_BAILOUT (1 << 8) /* wakeind via unknown pkt by basic ULP-offloads -
+ * WL_WOWL_ULP_BAILOUT - same as WL_WOWL_PME_GPIO used only for DONGLE BUILDS and
+ * not WLC_HIGH_ONLY case
+ */
#define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */
#define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */
#define WL_WOWL_EXTMAGPAT (1 << 11) /* support extended magic packets */
#define WL_WNM_NOTIF 0x00000100
#define WL_WNM_MAX 0x00000200
+#ifdef WLWNM_BRCM
+#define BRCM_WNM_FEATURE_SET\
+ (WL_WNM_PROXYARP | \
+ WL_WNM_SLEEP | \
+ WL_WNM_FMS | \
+ WL_WNM_TFS | \
+ WL_WNM_TIMBC | \
+ WL_WNM_BSSTRANS | \
+ WL_WNM_DMS | \
+ WL_WNM_NOTIF | \
+ 0)
+#endif /* WLWNM_BRCM */
+
#ifndef ETHER_MAX_DATA
#define ETHER_MAX_DATA 1500
#endif /* ETHER_MAX_DATA */
#define TSPEC_UNKNOWN 3 /* TSPEC unknown */
#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */
-#ifdef BCMCCX
-/* "wlan_reason" iovar interface */
-#define WL_WLAN_ASSOC_REASON_NORMAL_NETWORK 0 /* normal WLAN network setup */
-#define WL_WLAN_ASSOC_REASON_ROAM_FROM_CELLULAR_NETWORK 1 /* roam from Cellular network */
-#define WL_WLAN_ASSOC_REASON_ROAM_FROM_LAN 2 /* roam from LAN */
-#define WL_WLAN_ASSOC_REASON_MAX 2 /* largest value allowed */
-#endif /* BCMCCX */
/* Software feature flag defines used by wlfeatureflag */
#ifdef WLAFTERBURNER
#define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT 2
#define WAKE_EVENT_GTK_HANDSHAKE_ERROR_BIT 4
#define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8
+#define WAKE_EVENT_NET_PACKET_BIT 0x10
#define MAX_NUM_WOL_PATTERN 22 /* LOGO requirements min 22 */
#define NET_DETECT_MAX_CHANNELS 50
#endif /* NET_DETECT */
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
#define WL_RADIO_SW_DISABLE (1<<0)
#define WL_RADIO_HW_DISABLE (1<<1)
#define WL_RADIO_MPC_DISABLE (1<<2)
#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */
+#define WL_RADIO_PERCORE_DISABLE (1<<4) /* Radio diable per core for DVT */
#define WL_SPURAVOID_OFF 0
#define WL_SPURAVOID_ON1 1
#define WLC_PHY_TYPE_LCN 8
#define WLC_PHY_TYPE_LCN40 10
#define WLC_PHY_TYPE_AC 11
+#define WLC_PHY_TYPE_LCN20 12
#define WLC_PHY_TYPE_NULL 0xf
/* Values for PM */
*
* Definitions subject to change without notice.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhdioctl.h 438755 2013-11-22 23:20:40Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhdioctl.h 585723 2015-09-11 06:26:37Z $
*/
#ifndef _dhdioctl_h_
#define DHD_GLOM_VAL 0x0400
#define DHD_EVENT_VAL 0x0800
#define DHD_BTA_VAL 0x1000
-#if 0 && (NDISVER >= 0x0630) && 1
-#define DHD_SCAN_VAL 0x2000
-#else
#define DHD_ISCAN_VAL 0x2000
-#endif
#define DHD_ARPOE_VAL 0x4000
#define DHD_REORDER_VAL 0x8000
#define DHD_NOCHECKDIED_VAL 0x20000 /* UTF WAR */
#define DHD_PNO_VAL 0x80000
+#define DHD_MSGTRACE_VAL 0x100000
+#define DHD_FWLOG_VAL 0x400000
+#define DHD_RTT_VAL 0x200000
+#define DHD_IOV_INFO_VAL 0x800000
#define DHD_ANDROID_VAL 0x10000
#define DHD_IW_VAL 0x20000
#define DHD_CFG_VAL 0x40000
/*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
*
* $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $
*
#define EPI_MAJOR_VERSION 1
-#define EPI_MINOR_VERSION 201
+#define EPI_MINOR_VERSION 363
#define EPI_RC_NUMBER 59
-#define EPI_INCREMENTAL_NUMBER 0
+#define EPI_INCREMENTAL_NUMBER 144
#define EPI_BUILD_NUMBER 0
-#define EPI_VERSION 1, 201, 59, 0
+#define EPI_VERSION 1, 363, 59, 144
-#define EPI_VERSION_NUM 0x01c93b00
+#define EPI_VERSION_NUM 0x0116b3b9
-#define EPI_VERSION_DEV 1.201.59
+#define EPI_VERSION_DEV 1.363.59
/* Driver Version String, ASCII, 32 chars max */
-#define EPI_VERSION_STR "1.201.59.5 (r506368)"
+#define EPI_VERSION_STR "1.363.59.144.1 (r)"
#endif /* _epivers_h_ */
/*
* EVENT_LOG system definitions
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: event_log.h 241182 2011-02-17 21:50:03Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: event_log.h 591285 2015-10-07 11:56:29Z $
*/
#ifndef _EVENT_LOG_H_
#define _EVENT_LOG_H_
#include <typedefs.h>
-
-/* Set a maximum number of sets here. It is not dynamic for
- * efficiency of the EVENT_LOG calls.
- */
-#define NUM_EVENT_LOG_SETS 4
-#define EVENT_LOG_SET_BUS 0
-#define EVENT_LOG_SET_WL 1
-#define EVENT_LOG_SET_PSM 2
-#define EVENT_LOG_SET_DBG 3
-
-/* Define new event log tags here */
-#define EVENT_LOG_TAG_NULL 0 /* Special null tag */
-#define EVENT_LOG_TAG_TS 1 /* Special timestamp tag */
-#define EVENT_LOG_TAG_BUS_OOB 2
-#define EVENT_LOG_TAG_BUS_STATE 3
-#define EVENT_LOG_TAG_BUS_PROTO 4
-#define EVENT_LOG_TAG_BUS_CTL 5
-#define EVENT_LOG_TAG_BUS_EVENT 6
-#define EVENT_LOG_TAG_BUS_PKT 7
-#define EVENT_LOG_TAG_BUS_FRAME 8
-#define EVENT_LOG_TAG_BUS_DESC 9
-#define EVENT_LOG_TAG_BUS_SETUP 10
-#define EVENT_LOG_TAG_BUS_MISC 11
-#define EVENT_LOG_TAG_SRSCAN 22
-#define EVENT_LOG_TAG_PWRSTATS_INFO 23
-#define EVENT_LOG_TAG_UCODE_WATCHDOG 26
-#define EVENT_LOG_TAG_UCODE_FIFO 27
-#define EVENT_LOG_TAG_SCAN_TRACE_LOW 28
-#define EVENT_LOG_TAG_SCAN_TRACE_HIGH 29
-#define EVENT_LOG_TAG_SCAN_ERROR 30
-#define EVENT_LOG_TAG_SCAN_WARN 31
-#define EVENT_LOG_TAG_MPF_ERR 32
-#define EVENT_LOG_TAG_MPF_WARN 33
-#define EVENT_LOG_TAG_MPF_INFO 34
-#define EVENT_LOG_TAG_MPF_DEBUG 35
-#define EVENT_LOG_TAG_EVENT_INFO 36
-#define EVENT_LOG_TAG_EVENT_ERR 37
-#define EVENT_LOG_TAG_PWRSTATS_ERROR 38
-#define EVENT_LOG_TAG_EXCESS_PM_ERROR 39
-#define EVENT_LOG_TAG_IOCTL_LOG 40
-#define EVENT_LOG_TAG_PFN_ERR 41
-#define EVENT_LOG_TAG_PFN_WARN 42
-#define EVENT_LOG_TAG_PFN_INFO 43
-#define EVENT_LOG_TAG_PFN_DEBUG 44
-#define EVENT_LOG_TAG_BEACON_LOG 45
-#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46
-#define EVENT_LOG_TAG_TRACE_CHANSW 47
-#define EVENT_LOG_TAG_PCI_ERROR 48
-#define EVENT_LOG_TAG_PCI_TRACE 49
-#define EVENT_LOG_TAG_PCI_WARN 50
-#define EVENT_LOG_TAG_PCI_INFO 51
-#define EVENT_LOG_TAG_PCI_DBG 52
-#define EVENT_LOG_TAG_PCI_DATA 53
-#define EVENT_LOG_TAG_PCI_RING 54
-#define EVENT_LOG_TAG_MAX 55 /* Set to the same value of last tag, not last tag + 1 */
-/* Note: New event should be added/reserved in trunk before adding it to branches */
-
-/* Flags for tag control */
-#define EVENT_LOG_TAG_FLAG_NONE 0
-#define EVENT_LOG_TAG_FLAG_LOG 0x80
-#define EVENT_LOG_TAG_FLAG_PRINT 0x40
-#define EVENT_LOG_TAG_FLAG_MASK 0x3f
+#include <proto/event_log_set.h>
+#include <proto/event_log_tag.h>
/* logstrs header */
#define LOGSTRS_MAGIC 0x4C4F4753
/* We make sure that the block size will fit in a single packet
* (allowing for a bit of overhead on each packet
*/
-#define EVENT_LOG_MAX_BLOCK_SIZE 1400
-#define EVENT_LOG_PSM_BLOCK 0x200
-#define EVENT_LOG_BUS_BLOCK 0x200
-#define EVENT_LOG_DBG_BLOCK 0x100
+#define EVENT_LOG_MAX_BLOCK_SIZE 1400
+#define EVENT_LOG_WL_BLOCK_SIZE 0x200
+#define EVENT_LOG_PSM_BLOCK_SIZE 0x200
+#define EVENT_LOG_BUS_BLOCK_SIZE 0x200
+#define EVENT_LOG_ERROR_BLOCK_SIZE 0x200
/*
* There are multiple levels of objects define here:
#define _EL_TOP_PTR struct event_log_top *
#endif /* EVENT_LOG_DUMPER */
-/* Each event log entry has a type. The type is the LAST word of the
- * event log. The printing code walks the event entries in reverse
- * order to find the first entry.
- */
-typedef union event_log_hdr {
- struct {
- uint8 tag; /* Event_log entry tag */
- uint8 count; /* Count of 4-byte entries */
- uint16 fmt_num; /* Format number */
- };
- uint32 t; /* Type cheat */
-} event_log_hdr_t;
-
/* Event log sets (a logical circurlar buffer) consist of one or more
* event_log_blocks. The blocks themselves form a logical circular
* list. The log entries are placed in each event_log_block until it
uint32 log_magic; /* MAGIC number for verification 'LOGS' */
} logstr_header_t;
+/*
+ * Use the following macros for generating log events.
+ *
+ * The FAST versions check the enable of the tag before evaluating the arguments and calling the
+ * event_log function. This adds 5 instructions. The COMPACT versions evaluate the arguments
+ * and call the event_log function unconditionally. The event_log function will then skip logging
+ * if this tag is disabled.
+ *
+ * To support easy usage of existing debugging (e.g. msglevel) via macro re-definition there are
+ * two variants of these macros to help.
+ *
+ * First there are the CAST versions. The event_log function normally logs uint32 values or else
+ * they have to be cast to uint32. The CAST versions blindly cast for you so you don't have to edit
+ * any existing code.
+ *
+ * Second there are the PAREN_ARGS versions. These expect the logging format string and arguments
+ * to be enclosed in parentheses. This allows us to make the following mapping of an existing
+ * msglevel macro:
+ * #define WL_ERROR(args) EVENT_LOG_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_ERROR, args)
+ *
+ * The versions of the macros without FAST or COMPACT in their name are just synonyms for the
+ * COMPACT versions.
+ *
+ * You should use the COMPACT macro (or its synonym) in cases where there is some preceding logic
+ * that prevents the execution of the macro, e.g. WL_ERROR by definition rarely gets executed.
+ * Use the FAST macro in performance sensitive paths. The key concept here is that you should be
+ * assuming that your macro usage is compiled into ROM and can't be changed ... so choose wisely.
+ *
+ */
#ifndef EVENT_LOG_DUMPER
/* Null define if no tracing */
#define EVENT_LOG(format, ...)
+#define EVENT_LOG_FAST(tag, fmt, ...)
+#define EVENT_LOG_COMPACT(tag, fmt, ...)
+
+#define EVENT_LOG_CAST(tag, fmt, ...)
+#define EVENT_LOG_FAST_CAST(tag, fmt, ...)
+#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...)
+
+#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs)
+#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs)
+#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs)
+
+#define EVENT_LOG_IS_LOG_ON(tag) 0
#else /* EVENT_LOG_COMPILE */
#define _EVENT_LOGE(tag, fmt_num, ...) event_logn(14, tag, fmt_num, __VA_ARGS__)
#define _EVENT_LOGF(tag, fmt_num, ...) event_logn(15, tag, fmt_num, __VA_ARGS__)
+
+/* Casting low level macros */
+#define _EVENT_LOG_CAST0(tag, fmt_num) \
+ event_log0(tag, fmt_num)
+#define _EVENT_LOG_CAST1(tag, fmt_num, t1) \
+ event_log1(tag, fmt_num, (uint32)(t1))
+#define _EVENT_LOG_CAST2(tag, fmt_num, t1, t2) \
+ event_log2(tag, fmt_num, (uint32)(t1), (uint32)(t2))
+#define _EVENT_LOG_CAST3(tag, fmt_num, t1, t2, t3) \
+ event_log3(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3))
+#define _EVENT_LOG_CAST4(tag, fmt_num, t1, t2, t3, t4) \
+ event_log4(tag, fmt_num, (uint32)(t1), (uint32)(t2), (uint32)(t3), (uint32)(t4))
+
+/* The rest call the generic routine that takes a count */
+#define _EVENT_LOG_CAST5(tag, fmt_num, ...) _EVENT_LOG5(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST6(tag, fmt_num, ...) _EVENT_LOG6(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST7(tag, fmt_num, ...) _EVENT_LOG7(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST8(tag, fmt_num, ...) _EVENT_LOG8(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CAST9(tag, fmt_num, ...) _EVENT_LOG9(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTA(tag, fmt_num, ...) _EVENT_LOGA(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTB(tag, fmt_num, ...) _EVENT_LOGB(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTC(tag, fmt_num, ...) _EVENT_LOGC(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTD(tag, fmt_num, ...) _EVENT_LOGD(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTE(tag, fmt_num, ...) _EVENT_LOGE(tag, fmt_num, __VA_ARGS__)
+#define _EVENT_LOG_CASTF(tag, fmt_num, ...) _EVENT_LOGF(tag, fmt_num, __VA_ARGS__)
+
/* Hack to make the proper routine call when variadic macros get
* passed. Note the max of 15 arguments. More than that can't be
* handled by the event_log entries anyways so best to catch it at compile
#define _EVENT_LOG_VA_NUM_ARGS(F, _1, _2, _3, _4, _5, _6, _7, _8, _9, \
_A, _B, _C, _D, _E, _F, N, ...) F ## N
-#define _EVENT_LOG(tag, fmt, ...) \
+/* cast = _EVENT_LOG for no casting
+ * cast = _EVENT_LOG_CAST for casting of fmt arguments to uint32.
+ * Only first 4 arguments are casted to uint32. event_logn() is called
+ * if more than 4 arguments are present. This function internally assumes
+ * all arguments are uint32
+ */
+#define _EVENT_LOG(cast, tag, fmt, ...) \
static char logstr[] __attribute__ ((section(".logstrs"))) = fmt; \
static uint32 fmtnum __attribute__ ((section(".lognums"))) = (uint32) &logstr; \
- _EVENT_LOG_VA_NUM_ARGS(_EVENT_LOG, ##__VA_ARGS__, \
+ _EVENT_LOG_VA_NUM_ARGS(cast, ##__VA_ARGS__, \
F, E, D, C, B, A, 9, 8, \
7, 6, 5, 4, 3, 2, 1, 0) \
- (tag, (int) &fmtnum , ## __VA_ARGS__); \
+ (tag, (int) &fmtnum , ## __VA_ARGS__)
#define EVENT_LOG_FAST(tag, fmt, ...) \
- if (event_log_tag_sets != NULL) { \
- uint8 tag_flag = *(event_log_tag_sets + tag); \
- if (tag_flag != 0) { \
- _EVENT_LOG(tag, fmt , ## __VA_ARGS__); \
+ do { \
+ if (event_log_tag_sets != NULL) { \
+ uint8 tag_flag = *(event_log_tag_sets + tag); \
+ if (tag_flag != 0) { \
+ _EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__); \
+ } \
} \
- }
+ } while (0)
#define EVENT_LOG_COMPACT(tag, fmt, ...) \
- if (1) { \
- _EVENT_LOG(tag, fmt , ## __VA_ARGS__); \
- }
+ do { \
+ _EVENT_LOG(_EVENT_LOG, tag, fmt , ## __VA_ARGS__); \
+ } while (0)
+
+/* Event log macro with casting to uint32 of arguments */
+#define EVENT_LOG_FAST_CAST(tag, fmt, ...) \
+ do { \
+ if (event_log_tag_sets != NULL) { \
+ uint8 tag_flag = *(event_log_tag_sets + tag); \
+ if (tag_flag != 0) { \
+ _EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \
+ } \
+ } \
+ } while (0)
+
+#define EVENT_LOG_COMPACT_CAST(tag, fmt, ...) \
+ do { \
+ _EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \
+ } while (0)
+
#define EVENT_LOG(tag, fmt, ...) EVENT_LOG_COMPACT(tag, fmt , ## __VA_ARGS__)
+#define EVENT_LOG_CAST(tag, fmt, ...) EVENT_LOG_COMPACT_CAST(tag, fmt , ## __VA_ARGS__)
+
+#define _EVENT_LOG_REMOVE_PAREN(...) __VA_ARGS__
+#define EVENT_LOG_REMOVE_PAREN(args) _EVENT_LOG_REMOVE_PAREN args
+
+#define EVENT_LOG_CAST_PAREN_ARGS(tag, pargs) \
+ EVENT_LOG_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs))
+
+#define EVENT_LOG_FAST_CAST_PAREN_ARGS(tag, pargs) \
+ EVENT_LOG_FAST_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs))
+
+#define EVENT_LOG_COMPACT_CAST_PAREN_ARGS(tag, pargs) \
+ EVENT_LOG_COMPACT_CAST(tag, EVENT_LOG_REMOVE_PAREN(pargs))
+
+
#define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG)
#define EVENT_DUMP event_log_buffer
extern void event_log4(int tag, int fmtNum, uint32 t1, uint32 t2, uint32 t3, uint32 t4);
extern void event_logn(int num_args, int tag, int fmtNum, ...);
-extern void event_log_time_sync(void);
+extern void event_log_time_sync(uint32 ms);
extern void event_log_buffer(int tag, uint8 *buf, int size);
#endif /* EVENT_LOG_DUMPER */
/*
* HND arm trap handling.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: hnd_armtrap.h 470663 2014-04-16 00:24:43Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_armtrap.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _hnd_armtrap_h_
#define TR_PC TR_REG(15)
#define TRAP_T_SIZE 80
+#define ASSERT_TRAP_SVC_NUMBER 255
#ifndef _LANGUAGE_ASSEMBLY
/*
* Console support for RTE - for host use only.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: hnd_cons.h 473343 2014-04-29 01:45:22Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_cons.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _hnd_cons_h_
#define _hnd_cons_h_
/*
* HND generic packet pool operation primitives
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_pktpool.h 591285 2015-10-07 11:56:29Z $
*/
#ifndef _hnd_pktpool_h_
#define _hnd_pktpool_h_
+#include <osl_ext.h>
+
#ifdef __cplusplus
extern "C" {
#endif
+/* mutex macros for thread safe */
+#ifdef HND_PKTPOOL_THREAD_SAFE
+#define HND_PKTPOOL_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex)
+#else
+#define HND_PKTPOOL_MUTEX_DECL(mutex)
+#endif
+
#ifdef BCMPKTPOOL
#define POOL_ENAB(pool) ((pool) && (pool)->inited)
-#define SHARED_POOL (pktpool_shared)
#else /* BCMPKTPOOL */
#define POOL_ENAB(bus) 0
-#define SHARED_POOL ((struct pktpool *)NULL)
#endif /* BCMPKTPOOL */
-#ifdef BCMFRAGPOOL
-#define SHARED_FRAG_POOL (pktpool_shared_lfrag)
-#endif
-#define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag)
-
-
#ifndef PKTPOOL_LEN_MAX
#define PKTPOOL_LEN_MAX 40
#endif /* PKTPOOL_LEN_MAX */
#define PKTPOOL_CB_MAX 3
+#define PKTPOOL_CB_MAX_AVL 4
+
/* forward declaration */
struct pktpool;
pktpool_cb_t cb;
void *arg;
} pktpool_cbinfo_t;
-/* call back fn extension to populate host address in pool pkt */
+
+/** PCIe SPLITRX related: call back fn extension to populate host address in pool pkt */
typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, bool arg2);
typedef struct {
pktpool_cb_extn_t cb;
#endif /* BCMDBG_POOL */
typedef struct pktpool {
- bool inited; /* pktpool_init was successful */
- uint8 type; /* type of lbuf: basic, frag, etc */
- uint8 id; /* pktpool ID: index in registry */
- bool istx; /* direction: transmit or receive data path */
-
- void * freelist; /* free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */
- uint16 avail; /* number of packets in pool's free list */
- uint16 len; /* number of packets managed by pool */
- uint16 maxlen; /* maximum size of pool <= PKTPOOL_LEN_MAX */
- uint16 plen; /* size of pkt buffer, excluding lbuf|lbuf_frag */
+ bool inited; /**< pktpool_init was successful */
+ uint8 type; /**< type of lbuf: basic, frag, etc */
+ uint8 id; /**< pktpool ID: index in registry */
+ bool istx; /**< direction: transmit or receive data path */
+ HND_PKTPOOL_MUTEX_DECL(mutex) /**< thread-safe mutex */
+
+ void * freelist; /**< free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */
+ uint16 avail; /**< number of packets in pool's free list */
+ uint16 len; /**< number of packets managed by pool */
+ uint16 maxlen; /**< maximum size of pool <= PKTPOOL_LEN_MAX */
+ uint16 plen; /**< size of pkt buffer, excluding lbuf|lbuf_frag */
bool empty;
uint8 cbtoggle;
uint8 cbcnt;
uint8 ecbcnt;
- bool emptycb_disable;
+ uint8 emptycb_disable; /**< Value of type enum pktpool_empty_cb_state */
pktpool_cbinfo_t *availcb_excl;
- pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX];
+ pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX_AVL];
pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
- pktpool_cbextn_info_t cbext;
+ pktpool_cbextn_info_t cbext; /**< PCIe SPLITRX related */
pktpool_cbextn_info_t rxcplidfn;
#ifdef BCMDBG_POOL
uint8 dbg_cbcnt;
pktpool_cbinfo_t dmarxfill;
} pktpool_t;
-extern pktpool_t *pktpool_shared;
-#ifdef BCMFRAGPOOL
-extern pktpool_t *pktpool_shared_lfrag;
-#endif
-extern pktpool_t *pktpool_shared_rxlfrag;
+
+pktpool_t *get_pktpools_registry(int id);
/* Incarnate a pktpool registry. On success returns total_pools. */
extern int pktpool_attach(osl_t *osh, uint32 total_pools);
#define PKTPOOL_MAXIMUM_ID (15)
/* Registry of pktpool(s) */
-extern pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1];
-
/* Pool ID to/from Pool Pointer converters */
-#define PKTPOOL_ID2PTR(id) (pktpools_registry[id])
+#define PKTPOOL_ID2PTR(id) (get_pktpools_registry(id))
#define PKTPOOL_PTR2ID(pp) (POOLID(pp))
-
#ifdef BCMDBG_POOL
extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
#endif /* BCMDBG_POOL */
+#ifdef BCMPKTPOOL
+#define SHARED_POOL (pktpool_shared)
+extern pktpool_t *pktpool_shared;
+#ifdef BCMFRAGPOOL
+#define SHARED_FRAG_POOL (pktpool_shared_lfrag)
+extern pktpool_t *pktpool_shared_lfrag;
+#endif
+
+/** PCIe SPLITRX related */
+#define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag)
+extern pktpool_t *pktpool_shared_rxlfrag;
+
+void hnd_pktpool_init(osl_t *osh);
+void hnd_pktpool_fill(pktpool_t *pktpool, bool minimal);
+void hnd_pktpool_refill(bool minimal);
+#else /* BCMPKTPOOL */
+#define SHARED_POOL ((struct pktpool *)NULL)
+#endif /* BCMPKTPOOL */
+
#ifdef __cplusplus
}
#endif
/*
* HND generic pktq operation primitives
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hnd_pktq.h 591283 2015-10-07 11:52:00Z $
*/
#ifndef _hnd_pktq_h_
#define _hnd_pktq_h_
+#include <osl_ext.h>
+
#ifdef __cplusplus
extern "C" {
#endif
+/* mutex macros for thread safe */
+#ifdef HND_PKTQ_THREAD_SAFE
+#define HND_PKTQ_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex)
+#else
+#define HND_PKTQ_MUTEX_DECL(mutex)
+#endif
+
/* osl multi-precedence packet queue */
#define PKTQ_LEN_MAX 0xFFFF /* Max uint16 65535 packets */
#ifndef PKTQ_LEN_DEFAULT
#endif
typedef struct pktq_prec {
- void *head; /* first packet to dequeue */
- void *tail; /* last packet to dequeue */
- uint16 len; /* number of queued packets */
- uint16 max; /* maximum number of queued packets */
+ void *head; /**< first packet to dequeue */
+ void *tail; /**< last packet to dequeue */
+ uint16 len; /**< number of queued packets */
+ uint16 max; /**< maximum number of queued packets */
} pktq_prec_t;
#ifdef PKTQ_LOG
typedef struct {
- uint32 requested; /* packets requested to be stored */
- uint32 stored; /* packets stored */
- uint32 saved; /* packets saved,
+ uint32 requested; /**< packets requested to be stored */
+ uint32 stored; /**< packets stored */
+ uint32 saved; /**< packets saved,
because a lowest priority queue has given away one packet
*/
- uint32 selfsaved; /* packets saved,
+ uint32 selfsaved; /**< packets saved,
because an older packet from the same queue has been dropped
*/
- uint32 full_dropped; /* packets dropped,
+ uint32 full_dropped; /**< packets dropped,
because pktq is full with higher precedence packets
*/
- uint32 dropped; /* packets dropped because pktq per that precedence is full */
- uint32 sacrificed; /* packets dropped,
+ uint32 dropped; /**< packets dropped because pktq per that precedence is full */
+ uint32 sacrificed; /**< packets dropped,
in order to save one from a queue of a highest priority
*/
- uint32 busy; /* packets droped because of hardware/transmission error */
- uint32 retry; /* packets re-sent because they were not received */
- uint32 ps_retry; /* packets retried again prior to moving power save mode */
- uint32 suppress; /* packets which were suppressed and not transmitted */
- uint32 retry_drop; /* packets finally dropped after retry limit */
- uint32 max_avail; /* the high-water mark of the queue capacity for packets -
+ uint32 busy; /**< packets droped because of hardware/transmission error */
+ uint32 retry; /**< packets re-sent because they were not received */
+ uint32 ps_retry; /**< packets retried again prior to moving power save mode */
+ uint32 suppress; /**< packets which were suppressed and not transmitted */
+ uint32 retry_drop; /**< packets finally dropped after retry limit */
+ uint32 max_avail; /**< the high-water mark of the queue capacity for packets -
goes to zero as queue fills
*/
- uint32 max_used; /* the high-water mark of the queue utilisation for packets -
+ uint32 max_used; /**< the high-water mark of the queue utilisation for packets -
increases with use ('inverse' of max_avail)
*/
- uint32 queue_capacity; /* the maximum capacity of the queue */
- uint32 rtsfail; /* count of rts attempts that failed to receive cts */
- uint32 acked; /* count of packets sent (acked) successfully */
- uint32 txrate_succ; /* running total of phy rate of packets sent successfully */
- uint32 txrate_main; /* running totoal of primary phy rate of all packets */
- uint32 throughput; /* actual data transferred successfully */
- uint32 airtime; /* cumulative total medium access delay in useconds */
- uint32 _logtime; /* timestamp of last counter clear */
+ uint32 queue_capacity; /**< the maximum capacity of the queue */
+ uint32 rtsfail; /**< count of rts attempts that failed to receive cts */
+ uint32 acked; /**< count of packets sent (acked) successfully */
+ uint32 txrate_succ; /**< running total of phy rate of packets sent successfully */
+ uint32 txrate_main; /**< running totoal of primary phy rate of all packets */
+ uint32 throughput; /**< actual data transferred successfully */
+ uint32 airtime; /**< cumulative total medium access delay in useconds */
+ uint32 _logtime; /**< timestamp of last counter clear */
} pktq_counters_t;
typedef struct {
uint32 _prec_log;
- pktq_counters_t* _prec_cnt[PKTQ_MAX_PREC]; /* Counters per queue */
+ pktq_counters_t* _prec_cnt[PKTQ_MAX_PREC]; /**< Counters per queue */
} pktq_log_t;
#endif /* PKTQ_LOG */
#define PKTQ_COMMON \
- uint16 num_prec; /* number of precedences in use */ \
- uint16 hi_prec; /* rapid dequeue hint (>= highest non-empty prec) */ \
- uint16 max; /* total max packets */ \
- uint16 len; /* total number of packets */
+ uint16 num_prec; /**< number of precedences in use */ \
+ uint16 hi_prec; /**< rapid dequeue hint (>= highest non-empty prec) */ \
+ uint16 max; /**< total max packets */ \
+ uint16 len; /**< total number of packets */
/* multi-priority pkt queue */
struct pktq {
PKTQ_COMMON
/* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
struct pktq_prec q[PKTQ_MAX_PREC];
+ HND_PKTQ_MUTEX_DECL(mutex)
#ifdef PKTQ_LOG
pktq_log_t* pktqlog;
#endif
PKTQ_COMMON
/* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
struct pktq_prec q[1];
+ HND_PKTQ_MUTEX_DECL(mutex)
};
#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
#define pktq_pmax(pq, prec) ((pq)->q[prec].max)
#define pktq_plen(pq, prec) ((pq)->q[prec].len)
-#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
-#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
-
#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
+#ifdef HND_PKTQ_THREAD_SAFE
+extern int pktq_pavail(struct pktq *pq, int prec);
+extern bool pktq_pfull(struct pktq *pq, int prec);
+#else
+#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
+#endif /* HND_PKTQ_THREAD_SAFE */
extern void pktq_append(struct pktq *pq, int prec, struct spktq *list);
extern void pktq_prepend(struct pktq *pq, int prec, struct spktq *list);
#define pktq_len(pq) ((int)(pq)->len)
#define pktq_max(pq) ((int)(pq)->max)
+#define pktq_empty(pq) ((pq)->len == 0)
+#ifdef HND_PKTQ_THREAD_SAFE
+extern int pktq_avail(struct pktq *pq);
+extern bool pktq_full(struct pktq *pq);
+#else
#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
#define pktq_full(pq) ((pq)->len >= (pq)->max)
-#define pktq_empty(pq) ((pq)->len == 0)
+#endif /* HND_PKTQ_THREAD_SAFE */
/* operations for single precedence queues */
#define pktenq(pq, p) pktq_penq(((struct pktq *)(void *)pq), 0, (p))
#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)(void *)pq), 0)
#define pktqflush(osh, pq) pktq_flush(osh, ((struct pktq *)(void *)pq), TRUE, NULL, 0)
#define pktqinit(pq, len) pktq_init(((struct pktq *)(void *)pq), 1, len)
+#define pktqdeinit(pq) pktq_deinit((struct pktq *)(void *)pq)
+#define pktqavail(pq) pktq_avail((struct pktq *)(void *)pq)
+#define pktqfull(pq) pktq_full((struct pktq *)(void *)pq)
+
+extern bool pktq_init(struct pktq *pq, int num_prec, int max_len);
+extern bool pktq_deinit(struct pktq *pq);
-extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len);
/* prec_out may be NULL if caller is not interested in return value */
/*
* HND SiliconBackplane PMU support.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: hndpmu.h 471127 2014-04-17 23:24:23Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hndpmu.h 530150 2015-01-29 08:43:40Z $
*/
#ifndef _hndpmu_h_
extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear);
extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh);
+extern void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag);
#endif /* _hndpmu_h_ */
/*
* Broadcom HND chip & on-chip-interconnect-related definitions.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: hndsoc.h 473238 2014-04-28 19:14:56Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: hndsoc.h 517544 2014-11-26 00:40:42Z $
*/
#ifndef _HNDSOC_H
#define SI_MAXCORES 32 /* NorthStar has more cores */
#endif /* SI_MAXCORES */
+#define SI_MAXBR 4 /* Max bridges (this is arbitrary, for software
+ * convenience and could be changed if we
+ * make any larger chips
+ */
+
#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */
#define SI_FASTRAM_SWAPPED 0x19800000
#define SI_ARMCR4_ROM 0x000f0000 /* ARM Cortex-R4 ROM */
#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */
#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */
+#define SI_ARMCA7_ROM 0x00000000 /* ARM Cortex-A7 ROM */
+#define SI_ARMCA7_RAM 0x00200000 /* ARM Cortex-A7 RAM */
#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */
#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */
#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2
* (2 ZettaBytes), high 32 bits
*/
+
+#define SI_BCM53573_NANDFLASH 0x30000000 /* 53573 NAND flash base */
+#define SI_BCM53573_NORFLASH 0x1c000000 /* 53573 NOR flash base */
+
+#define SI_BCM53573_NORFLASH_WINDOW 0x01000000 /* only support 16M direct access for
+ * 3-byte address modes in spi flash
+ */
+#define SI_BCM53573_BOOTDEV_MASK 0x3
+#define SI_BCM53573_BOOTDEV_NOR 0x0
+
+#define SI_BCM53573_DDRTYPE_MASK 0x10
+#define SI_BCM53573_DDRTYPE_DDR3 0x10
+
+/* APB bridge code */
+#define APB_BRIDGE_ID 0x135 /* APB Bridge 0, 1, etc. */
+
/* core codes */
#define NODEV_CORE_ID 0x700 /* Invalid coreid */
#define CC_CORE_ID 0x800 /* chipcommon core */
#define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */
#define GCI_CORE_ID 0x840 /* GCI Core */
#define M2MDMA_CORE_ID 0x844 /* memory to memory dma */
+#define CMEM_CORE_ID 0x846 /* CNDS DDR2/3 memory controller */
+#define ARMCA7_CORE_ID 0x847 /* ARM CA7 CPU */
+#define SYSMEM_CORE_ID 0x849 /* System memory core */
#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */
#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */
#define EROM_CORE_ID 0x366 /* EROM core ID */
#define CCS_HQCLKREQ 0x00000040 /* HQ Clock Required */
#define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */
#define CCS_SECICLKREQ 0x00000100 /* SECI Clock Req */
-#define CCS_ARMFASTCLOCKREQ 0x00000100 /* ARM CR4 fast clock request */
+#define CCS_ARMFASTCLOCKREQ 0x00000100 /* ARM CR4/CA7 fast clock request */
#define CCS_AVBCLKREQ 0x00000400 /* AVB Clock enable request */
#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */
#define CCS_ERSRC_REQ_SHIFT 8
/*
* Linux OS Independent Layer
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: linux_osl.h 503131 2014-09-17 12:16:08Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: linux_osl.h 601764 2015-11-24 03:47:41Z $
*/
#ifndef _linux_osl_h_
/* bcm_prefetch_32B */
static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B)
{
-#if defined(BCM47XX_CA9) && (__LINUX_ARM_ARCH__ >= 5)
- switch (cachelines_32B) {
- case 4: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 96) : "cc");
- case 3: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 64) : "cc");
- case 2: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 32) : "cc");
- case 1: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 0) : "cc");
- }
-#endif
}
/* microsecond delay */
extern uint osl_pcie_bus(osl_t *osh);
extern struct pci_dev *osl_pci_device(osl_t *osh);
+#define OSL_ACP_COHERENCE (1<<1L)
/* Pkttag flag should be part of public information */
typedef struct {
bool pkttag;
- bool mmbus; /* Bus supports memory-mapped register accesses */
- pktfree_cb_fn_t tx_fn; /* Callback function for PKTFREE */
- void *tx_ctx; /* Context to the callback function */
+ bool mmbus; /**< Bus supports memory-mapped register accesses */
+ pktfree_cb_fn_t tx_fn; /**< Callback function for PKTFREE */
+ void *tx_ctx; /**< Context to the callback function */
void *unused[3];
} osl_pubinfo_t;
#define DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \
osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
-#if defined(BCMPCIE)
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING)
-#define DMA_ALLOC_CONSISTENT_STATIC(osh, size, align, tot, pap, dmah, idx) \
- osl_dma_alloc_consistent_static((osh), (size), (align), (tot), (pap), (idx))
-#define DMA_FREE_CONSISTENT_STATIC(osh, va, size, pa, dmah, idx) \
- osl_dma_free_consistent_static((osh), (void*)(va), (size), (pa), (idx))
-
-extern void *osl_dma_alloc_consistent_static(osl_t *osh, uint size, uint16 align,
- uint *tot, dmaaddr_t *pap, uint16 idx);
-extern void osl_dma_free_consistent_static(osl_t *osh, void *va, uint size, dmaaddr_t pa,
- uint16 idx);
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */
-#endif /* BCMPCIE */
-
extern uint osl_dma_consistent_align(void);
extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align,
uint *tot, dmaaddr_t *pap);
osl_dma_unmap((osh), (pa), (size), (direction))
extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
hnddma_seg_map_t *txp_dmah);
-extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
+extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction);
/* API for DMA addressing capability */
#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);})
-#if (defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__))
+#define OSL_SMP_WMB() smp_wmb()
+
+/* API for CPU relax */
+extern void osl_cpu_relax(void);
+#define OSL_CPU_RELAX() osl_cpu_relax()
+
+#if (!defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__)) || \
+ (defined(STBLINUX) && defined(__ARM_ARCH_7A__)) || (defined(CONFIG_ARCH_MSM8996) || \
+ defined(CONFIG_SOC_EXYNOS8890))
extern void osl_cache_flush(void *va, uint size);
extern void osl_cache_inv(void *va, uint size);
extern void osl_prefetch(const void *ptr);
#ifdef __ARM_ARCH_7A__
extern int osl_arch_is_coherent(void);
#define OSL_ARCH_IS_COHERENT() osl_arch_is_coherent()
+ extern int osl_acp_war_enab(void);
+ #define OSL_ACP_WAR_ENAB() osl_acp_war_enab()
#else
#define OSL_ARCH_IS_COHERENT() NULL
+ #define OSL_ACP_WAR_ENAB() NULL
#endif /* __ARM_ARCH_7A__ */
#else
#define OSL_CACHE_FLUSH(va, len) BCM_REFERENCE(va)
#define OSL_PREFETCH(ptr) BCM_REFERENCE(ptr)
#define OSL_ARCH_IS_COHERENT() NULL
+ #define OSL_ACP_WAR_ENAB() NULL
#endif
/* register access macros */
(uintptr)(r), sizeof(*(r)), (v)))
#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \
(uintptr)(r), sizeof(*(r))))
-#elif defined(BCM47XX_ACP_WAR)
-extern void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size);
-
-#define OSL_READ_REG(osh, r) \
- ({\
- __typeof(*(r)) __osl_v; \
- osl_pcie_rreg(osh, (uintptr)(r), (void *)&__osl_v, sizeof(*(r))); \
- __osl_v; \
- })
#endif
-#if defined(BCM47XX_ACP_WAR)
- #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
- #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;})
-#else
-
#if defined(BCMSDIO)
#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
mmap_op else bus_op
#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
#define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
#endif
-#endif /* BCM47XX_ACP_WAR */
#define OSL_ERROR(bcmerror) osl_error(bcmerror)
extern int osl_error(int bcmerror);
/* register access macros */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && \
+ defined(CONFIG_X86)
#define R_REG(osh, r) (\
SELECT_BUS_READ(osh, \
({ \
readw((volatile uint16*)(r)); break; \
case sizeof(uint32): __osl_v = \
readl((volatile uint32*)(r)); break; \
+ case sizeof(uint64): __osl_v = \
+ readq((volatile uint64*)(r)); break; \
} \
__osl_v; \
}), \
OSL_READ_REG(osh, r)) \
)
-
+#else
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): __osl_v = \
+ readb((volatile uint8*)(r)); break; \
+ case sizeof(uint16): __osl_v = \
+ readw((volatile uint16*)(r)); break; \
+ case sizeof(uint32): __osl_v = \
+ readl((volatile uint32*)(r)); break; \
+ } \
+ __osl_v; \
+ }), \
+ OSL_READ_REG(osh, r)) \
+)
+#endif /* KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && defined(CONFIG_X86) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && \
+ defined(CONFIG_X86)
#define W_REG(osh, r, v) do { \
SELECT_BUS_WRITE(osh, \
switch (sizeof(*(r))) { \
case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
+ case sizeof(uint64): writeq((uint64)(v), (volatile uint64*)(r)); break; \
}, \
(OSL_WRITE_REG(osh, r, v))); \
} while (0)
+#else
+#define W_REG(osh, r, v) do { \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
+ case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
+ case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ } while (0)
+#endif /* KERNEL_VERSION(3, 11, 1)) && defined(CONFIG_64BIT) && defined(CONFIG_X86) */
#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
#define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FILE__)
#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__)
#else
+#ifdef BCM_OBJECT_TRACE
+#define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FUNCTION__)
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__)
+#else
#define PKTGET(osh, len, send) osl_pktget((osh), (len))
#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
+#endif /* BCM_OBJECT_TRACE */
#endif /* BCMDBG_CTRACE */
#define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh)
#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
+#if defined(BCM_OBJECT_TRACE)
+#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__)
+#else
#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send))
+#endif /* BCM_OBJECT_TRACE */
#ifdef CONFIG_DHD_USE_STATIC_BUF
#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len))
#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send))
#define PKTID(skb) ({BCM_REFERENCE(skb); 0;})
#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
#define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;})
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
-#define PKTORPHAN(skb) skb_orphan(skb)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER)
+#define PKTORPHAN(skb) osl_pkt_orphan_partial(skb)
+extern void osl_pkt_orphan_partial(struct sk_buff *skb);
#else
#define PKTORPHAN(skb) ({BCM_REFERENCE(skb); 0;})
#endif /* LINUX VERSION >= 3.6 */
#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->__unused)
#endif /* 2.6.22 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->ctfpool)
#define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->ctfpool)->head)
#else
#define PKTCLRFAFREED(skb) BCM_REFERENCE(skb)
#endif /* BCMFA */
+#if defined(BCM_OBJECT_TRACE)
+extern void osl_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller);
+#else
extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+#endif /* BCM_OBJECT_TRACE */
extern void *osl_pktget_static(osl_t *osh, uint len);
extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
extern void osl_pktclone(osl_t *osh, void **pkt);
struct bcmstrbuf;
extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b);
#else
-extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
+#ifdef BCM_OBJECT_TRACE
+extern void *osl_pktget(osl_t *osh, uint len, int line, const char *caller);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller);
+#else
extern void *osl_pktget(osl_t *osh, uint len);
extern void *osl_pktdup(osl_t *osh, void *skb);
+#endif /* BCM_OBJECT_TRACE */
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
#endif /* BCMDBG_CTRACE */
extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
#ifdef BCMDBG_CTRACE
struct sec_mem_elem *sec_alloc_list_tail;
} sec_cma_info_t;
+/* Current STB 7445D1 doesn't use ACP and it is non-coherrent.
+ * Adding these dummy values for build apss only
+ * When we revisit need to change these.
+ */
+#if defined(STBLINUX)
+
+#if defined(__ARM_ARCH_7A__)
+#define ACP_WAR_ENAB() 0
+#define ACP_WIN_LIMIT 1
+#define arch_is_coherent() 0
+#endif /* __ARM_ARCH_7A__ */
+
+#endif /* STBLINUX */
+
#ifdef BCM_SECURE_DMA
#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) \
#define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset) \
osl_sec_dma_unmap((osh), (pa), (size), (direction), (p), (dmah), (pcma), (offset))
#define SECURE_DMA_UNMAP_ALL(osh, pcma) \
-osl_sec_dma_unmap_all((osh), (pcma))
-
+ osl_sec_dma_unmap_all((osh), (pcma))
#if defined(__ARM_ARCH_7A__)
-#define ACP_WAR_ENAB() 0
-#define ACP_WIN_LIMIT 0
-#define arch_is_coherent() 0
-
#define CMA_BUFSIZE_4K 4096
#define CMA_BUFSIZE_2K 2048
#define CMA_BUFSIZE_512 512
typedef struct sec_mem_elem {
size_t size;
int direction;
- phys_addr_t pa_cma; /* physical address */
- void *va; /* virtual address of driver pkt */
- dma_addr_t dma_handle; /* bus address assign by linux */
- void *vac; /* virtual address of cma buffer */
+ phys_addr_t pa_cma; /**< physical address */
+ void *va; /**< virtual address of driver pkt */
+ dma_addr_t dma_handle; /**< bus address assign by linux */
+ void *vac; /**< virtual address of cma buffer */
struct sec_mem_elem *next;
} sec_mem_elem_t;
extern void osl_sec_dma_unmap_all(osl_t *osh, void *ptr_cma_info);
#endif /* BCM_SECURE_DMA */
+
+typedef struct sk_buff_head PKT_LIST;
+#define PKTLIST_INIT(x) skb_queue_head_init((x))
+#define PKTLIST_ENQ(x, y) skb_queue_head((struct sk_buff_head *)(x), (struct sk_buff *)(y))
+#define PKTLIST_DEQ(x) skb_dequeue((struct sk_buff_head *)(x))
+#define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x))
+#define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x))
+
#endif /* _linux_osl_h_ */
* Linux-specific abstractions to gain some independence from linux kernel versions.
* Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: linuxver.h 431983 2013-10-25 06:53:27Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: linuxver.h 604758 2015-12-08 12:01:08Z $
*/
#ifndef _linuxver_h_
#define _linuxver_h_
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
+#endif
+
#include <typedefs.h>
#include <linux/version.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
#include <linux/autoconf.h>
#endif
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
-#include <linux/module.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
#include <linux/kconfig.h>
#endif
+#include <linux/module.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
(tsk_ctl)->proc_name = name; \
(tsk_ctl)->terminated = FALSE; \
(tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \
- (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
- spin_lock_init(&((tsk_ctl)->spinlock)); \
- DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
- (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ if (IS_ERR((tsk_ctl)->p_task)) { \
+ (tsk_ctl)->thr_pid = DHD_PID_KT_INVALID; \
+ DBG_THR(("%s(): thread:%s:%lx failed\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ } else { \
+ (tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
+ spin_lock_init(&((tsk_ctl)->spinlock)); \
+ DBG_THR(("%s(): thread:%s:%lx started\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
+ } \
}
#define PROC_STOP(tsk_ctl) \
* Overide latest kfifo functions with
* older version to work on older kernels
*/
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
#define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c)
#define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c)
#define kfifo_esize(a) 1
#define kfifo_esize(a) 1
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC diagnostic pop
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+static inline struct inode *file_inode(const struct file *f)
+{
+ return f->f_dentry->d_inode;
+}
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
+
#endif /* _linuxver_h_ */
/*
* Command line options parser.
*
- * $Copyright Open Broadcom Corporation$
- * $Id: miniopt.h 484281 2014-06-12 22:42:26Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: miniopt.h 514727 2014-11-12 03:02:48Z $
*/
/*
* Trace messages sent over HBUS
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: msgtrace.h 439681 2013-11-27 15:39:50Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: msgtrace.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _MSGTRACE_H
/*
* OS Abstraction Layer
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: osl.h 503131 2014-09-17 12:16:08Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: osl.h 526460 2015-01-14 08:25:24Z $
*/
#ifndef _osl_h_
+#if defined(WL_UNITTEST)
+#include <utest_osl.h>
+#else
#include <linux_osl.h>
+#endif
#ifndef PKTDBG_TRACE
#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
#define OSL_SYSUPTIME_SUPPORT TRUE
#endif /* OSL_SYSUPTIME */
+#ifndef OSL_SYS_HALT
+#define OSL_SYS_HALT() do {} while (0)
+#endif
+
+#ifndef OSL_MEM_AVAIL
+#define OSL_MEM_AVAIL() (0xffffffff)
+#endif
+
#if !defined(PKTC) && !defined(PKTC_DONGLE)
#define PKTCGETATTR(skb) (0)
#define PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb)
#define PKTISFRAG(osh, lb) (0)
#define PKTFRAGISCHAINED(osh, i) (0)
/* TRIM Tail bytes from lfrag */
-#define PKTFRAG_TRIM_TAILBYTES(osh, p, len) PKTSETLEN(osh, p, PKTLEN(osh, p) - len)
+#define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type) PKTSETLEN(osh, p, PKTLEN(osh, p) - len)
+
#ifdef BCM_SECURE_DMA
#define SECURE_DMA_ENAB(osh) (1)
#else
#endif
+
#endif /* _osl_h_ */
/*
* osl forward declarations
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id$
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: osl_decl.h 591283 2015-10-07 11:52:00Z $
*/
#ifndef _osl_decl_h_
/* osl handle type forward declaration */
typedef struct osl_info osl_t;
typedef struct osl_dmainfo osldma_t;
-
+extern unsigned int lmtest; /* low memory test */
#endif
--- /dev/null
+/*
+ * OS Abstraction Layer Extension - the APIs defined by the "extension" API
+ * are only supported by a subset of all operating systems.
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: osl_ext.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef _osl_ext_h_
+#define _osl_ext_h_
+
+
+/* ---- Include Files ---------------------------------------------------- */
+
+#if defined(TARGETOS_symbian)
+ #include <e32def.h>
+ #include <symbian_osl_ext.h>
+#elif defined(THREADX)
+ #include <threadx_osl_ext.h>
+#else
+ #define OSL_EXT_DISABLED
+#endif
+
+/* Include base operating system abstraction. */
+#include <osl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+/* -----------------------------------------------------------------------
+ * Generic OS types.
+ */
+typedef enum osl_ext_status_t
+{
+ OSL_EXT_SUCCESS,
+ OSL_EXT_ERROR,
+ OSL_EXT_TIMEOUT
+
+} osl_ext_status_t;
+#define OSL_EXT_STATUS_DECL(status) osl_ext_status_t status;
+
+#define OSL_EXT_TIME_FOREVER ((osl_ext_time_ms_t)(-1))
+typedef unsigned int osl_ext_time_ms_t;
+
+typedef unsigned int osl_ext_event_bits_t;
+
+typedef unsigned int osl_ext_interrupt_state_t;
+
+/* -----------------------------------------------------------------------
+ * Timers.
+ */
+typedef enum
+{
+ /* One-shot timer. */
+ OSL_EXT_TIMER_MODE_ONCE,
+
+ /* Periodic timer. */
+ OSL_EXT_TIMER_MODE_REPEAT
+
+} osl_ext_timer_mode_t;
+
+/* User registered callback and parameter to invoke when timer expires. */
+typedef void* osl_ext_timer_arg_t;
+typedef void (*osl_ext_timer_callback)(osl_ext_timer_arg_t arg);
+
+
+/* -----------------------------------------------------------------------
+ * Tasks.
+ */
+
+/* Task entry argument. */
+typedef void* osl_ext_task_arg_t;
+
+/* Task entry function. */
+typedef void (*osl_ext_task_entry)(osl_ext_task_arg_t arg);
+
+/* Abstract task priority levels. */
+typedef enum
+{
+ OSL_EXT_TASK_IDLE_PRIORITY,
+ OSL_EXT_TASK_LOW_PRIORITY,
+ OSL_EXT_TASK_LOW_NORMAL_PRIORITY,
+ OSL_EXT_TASK_NORMAL_PRIORITY,
+ OSL_EXT_TASK_HIGH_NORMAL_PRIORITY,
+ OSL_EXT_TASK_HIGHEST_PRIORITY,
+ OSL_EXT_TASK_TIME_CRITICAL_PRIORITY,
+
+ /* This must be last. */
+ OSL_EXT_TASK_NUM_PRIORITES
+} osl_ext_task_priority_t;
+
+
+#ifndef OSL_EXT_DISABLED
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+
+/* --------------------------------------------------------------------------
+** Semaphore
+*/
+
+/****************************************************************************
+* Function: osl_ext_sem_create
+*
+* Purpose: Creates a counting semaphore object, which can subsequently be
+* used for thread notification.
+*
+* Parameters: name (in) Name to assign to the semaphore (must be unique).
+* init_cnt (in) Initial count that the semaphore should have.
+* sem (out) Newly created semaphore.
+*
+* Returns: OSL_EXT_SUCCESS if the semaphore was created successfully, or an
+* error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_create(char *name, int init_cnt, osl_ext_sem_t *sem);
+
+/****************************************************************************
+* Function: osl_ext_sem_delete
+*
+* Purpose: Destroys a previously created semaphore object.
+*
+* Parameters: sem (mod) Semaphore object to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the semaphore was deleted successfully, or an
+* error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_delete(osl_ext_sem_t *sem);
+
+/****************************************************************************
+* Function: osl_ext_sem_give
+*
+* Purpose: Increments the count associated with the semaphore. This will
+* cause one thread blocked on a take to wake up.
+*
+* Parameters: sem (mod) Semaphore object to give.
+*
+* Returns: OSL_EXT_SUCCESS if the semaphore was given successfully, or an
+* error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_give(osl_ext_sem_t *sem);
+
+/****************************************************************************
+* Function: osl_ext_sem_take
+*
+* Purpose: Decrements the count associated with the semaphore. If the count
+* is less than zero, then the calling task will become blocked until
+* another thread does a give on the semaphore. This function will only
+* block the calling thread for timeout_msec milliseconds, before
+* returning with OSL_EXT_TIMEOUT.
+*
+* Parameters: sem (mod) Semaphore object to take.
+* timeout_msec (in) Number of milliseconds to wait for the
+* semaphore to enter a state where it can be
+* taken.
+*
+* Returns: OSL_EXT_SUCCESS if the semaphore was taken successfully, or an
+* error code if the semaphore could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_sem_take(osl_ext_sem_t *sem, osl_ext_time_ms_t timeout_msec);
+
+
+/* --------------------------------------------------------------------------
+** Mutex
+*/
+
+/****************************************************************************
+* Function: osl_ext_mutex_create
+*
+* Purpose: Creates a mutex object, which can subsequently be used to control
+* mutually exclusion of resources.
+*
+* Parameters: name (in) Name to assign to the mutex (must be unique).
+* mutex (out) Mutex object to initialize.
+*
+* Returns: OSL_EXT_SUCCESS if the mutex was created successfully, or an
+* error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_create(char *name, osl_ext_mutex_t *mutex);
+
+/****************************************************************************
+* Function: osl_ext_mutex_delete
+*
+* Purpose: Destroys a previously created mutex object.
+*
+* Parameters: mutex (mod) Mutex object to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the mutex was deleted successfully, or an
+* error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_delete(osl_ext_mutex_t *mutex);
+
+/****************************************************************************
+* Function: osl_ext_mutex_acquire
+*
+* Purpose: Acquires the indicated mutual exclusion object. If the object is
+* currently acquired by another task, then this function will wait
+* for timeout_msec milli-seconds before returning with OSL_EXT_TIMEOUT.
+*
+* Parameters: mutex (mod) Mutex object to acquire.
+* timeout_msec (in) Number of milliseconds to wait for the mutex.
+*
+* Returns: OSL_EXT_SUCCESS if the mutex was acquired successfully, or an
+* error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_acquire(osl_ext_mutex_t *mutex, osl_ext_time_ms_t timeout_msec);
+
+/****************************************************************************
+* Function: osl_ext_mutex_release
+*
+* Purpose: Releases the indicated mutual exclusion object. This makes it
+* available for another task to acquire.
+*
+* Parameters: mutex (mod) Mutex object to release.
+*
+* Returns: OSL_EXT_SUCCESS if the mutex was released successfully, or an
+* error code if the mutex could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_mutex_release(osl_ext_mutex_t *mutex);
+
+
+/* --------------------------------------------------------------------------
+** Timers
+*/
+
+/****************************************************************************
+* Function: osl_ext_timer_create
+*
+* Purpose: Creates a timer object.
+*
+* Parameters: name (in) Name of timer.
+* timeout_msec (in) Invoke callback after this number of milliseconds.
+* mode (in) One-shot or periodic timer.
+* func (in) Callback function to invoke on timer expiry.
+* arg (in) Argument to callback function.
+* timer (out) Timer object to create.
+*
+* Note: The function callback occurs in interrupt context. The application is
+* required to provide context switch for the callback if required.
+*
+* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an
+* error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t
+osl_ext_timer_create(char *name, osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode,
+ osl_ext_timer_callback func, osl_ext_timer_arg_t arg, osl_ext_timer_t *timer);
+
+/****************************************************************************
+* Function: osl_ext_timer_delete
+*
+* Purpose: Destroys a previously created timer object.
+*
+* Parameters: timer (mod) Timer object to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an
+* error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_timer_delete(osl_ext_timer_t *timer);
+
+/****************************************************************************
+* Function: osl_ext_timer_start
+*
+* Purpose: Start a previously created timer object.
+*
+* Parameters: timer (in) Timer object.
+* timeout_msec (in) Invoke callback after this number of milliseconds.
+* mode (in) One-shot or periodic timer.
+*
+* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an
+* error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t
+osl_ext_timer_start(osl_ext_timer_t *timer,
+ osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode);
+
+/****************************************************************************
+* Function: osl_ext_timer_stop
+*
+* Purpose: Stop a previously created timer object.
+*
+* Parameters: timer (in) Timer object.
+*
+* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an
+* error code if the timer could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t
+osl_ext_timer_stop(osl_ext_timer_t *timer);
+
+/****************************************************************************
+* Function: osl_ext_time_get
+*
+* Purpose: Returns incrementing time counter.
+*
+* Parameters: None.
+*
+* Returns: Returns incrementing time counter in msec.
+*****************************************************************************
+*/
+osl_ext_time_ms_t osl_ext_time_get(void);
+
+/* --------------------------------------------------------------------------
+** Tasks
+*/
+
+/****************************************************************************
+* Function: osl_ext_task_create
+*
+* Purpose: Create a task.
+*
+* Parameters: name (in) Pointer to task string descriptor.
+* stack (in) Pointer to stack. NULL to allocate.
+* stack_size (in) Stack size - in bytes.
+* priority (in) Abstract task priority.
+* func (in) A pointer to the task entry point function.
+* arg (in) Value passed into task entry point function.
+* task (out) Task to create.
+*
+* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an
+* error code if the task could not be created.
+*****************************************************************************
+*/
+
+#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \
+ osl_ext_task_create_ex((name), (stack), (stack_size), (priority), 0, (func), \
+ (arg), (task))
+
+osl_ext_status_t osl_ext_task_create_ex(char* name,
+ void *stack, unsigned int stack_size, osl_ext_task_priority_t priority,
+ osl_ext_time_ms_t timslice_msec, osl_ext_task_entry func, osl_ext_task_arg_t arg,
+ osl_ext_task_t *task);
+
+/****************************************************************************
+* Function: osl_ext_task_delete
+*
+* Purpose: Destroy a task.
+*
+* Parameters: task (mod) Task to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an
+* error code if the task could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_delete(osl_ext_task_t *task);
+
+
+/****************************************************************************
+* Function: osl_ext_task_is_running
+*
+* Purpose: Returns current running task.
+*
+* Parameters: None.
+*
+* Returns: osl_ext_task_t of current running task.
+*****************************************************************************
+*/
+osl_ext_task_t *osl_ext_task_current(void);
+
+
+/****************************************************************************
+* Function: osl_ext_task_yield
+*
+* Purpose: Yield the CPU to other tasks of the same priority that are
+* ready-to-run.
+*
+* Parameters: None.
+*
+* Returns: OSL_EXT_SUCCESS if successful, else error code.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_yield(void);
+
+
+/****************************************************************************
+* Function: osl_ext_task_enable_stack_check
+*
+* Purpose: Enable task stack checking.
+*
+* Parameters: None.
+*
+* Returns: OSL_EXT_SUCCESS if successful, else error code.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_task_enable_stack_check(void);
+
+
+/* --------------------------------------------------------------------------
+** Queue
+*/
+
+/****************************************************************************
+* Function: osl_ext_queue_create
+*
+* Purpose: Create a queue.
+*
+* Parameters: name (in) Name to assign to the queue (must be unique).
+* buffer (in) Queue buffer. NULL to allocate.
+* size (in) Size of the queue.
+* queue (out) Newly created queue.
+*
+* Returns: OSL_EXT_SUCCESS if the queue was created successfully, or an
+* error code if the queue could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_create(char *name,
+ void *queue_buffer, unsigned int queue_size,
+ osl_ext_queue_t *queue);
+
+/****************************************************************************
+* Function: osl_ext_queue_delete
+*
+* Purpose: Destroys a previously created queue object.
+*
+* Parameters: queue (mod) Queue object to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the queue was deleted successfully, or an
+* error code if the queue could not be deleteed.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_delete(osl_ext_queue_t *queue);
+
+/****************************************************************************
+* Function: osl_ext_queue_send
+*
+* Purpose: Send/add data to the queue. This function will not block the
+* calling thread if the queue is full.
+*
+* Parameters: queue (mod) Queue object.
+* data (in) Data pointer to be queued.
+*
+* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an
+* error code if the data could not be queued.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_send(osl_ext_queue_t *queue, void *data);
+
+/****************************************************************************
+* Function: osl_ext_queue_send_synchronous
+*
+* Purpose: Send/add data to the queue. This function will block the
+* calling thread until the data is dequeued.
+*
+* Parameters: queue (mod) Queue object.
+* data (in) Data pointer to be queued.
+*
+* Returns: OSL_EXT_SUCCESS if the data was queued successfully, or an
+* error code if the data could not be queued.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_send_synchronous(osl_ext_queue_t *queue, void *data);
+
+/****************************************************************************
+* Function: osl_ext_queue_receive
+*
+* Purpose: Receive/remove data from the queue. This function will only
+* block the calling thread for timeout_msec milliseconds, before
+* returning with OSL_EXT_TIMEOUT.
+*
+* Parameters: queue (mod) Queue object.
+* timeout_msec (in) Number of milliseconds to wait for the
+* data from the queue.
+* data (out) Data pointer received/removed from the queue.
+*
+* Returns: OSL_EXT_SUCCESS if the data was dequeued successfully, or an
+* error code if the data could not be dequeued.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_receive(osl_ext_queue_t *queue,
+ osl_ext_time_ms_t timeout_msec, void **data);
+
+/****************************************************************************
+* Function: osl_ext_queue_count
+*
+* Purpose: Returns the number of items in the queue.
+*
+* Parameters: queue (mod) Queue object.
+* count (out) Data pointer received/removed from the queue.
+*
+* Returns: OSL_EXT_SUCCESS if the count was returned successfully, or an
+* error code if the count is invalid.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_queue_count(osl_ext_queue_t *queue, int *count);
+
+
+/* --------------------------------------------------------------------------
+** Event
+*/
+
+/****************************************************************************
+* Function: osl_ext_event_create
+*
+* Purpose: Creates a event object, which can subsequently be used to
+* notify and trigger tasks.
+*
+* Parameters: name (in) Name to assign to the event (must be unique).
+* event (out) Event object to initialize.
+*
+* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an
+* error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_create(char *name, osl_ext_event_t *event);
+
+/****************************************************************************
+* Function: osl_ext_event_delete
+*
+* Purpose: Destroys a previously created event object.
+*
+* Parameters: event (mod) Event object to destroy.
+*
+* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an
+* error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_delete(osl_ext_event_t *event);
+
+/****************************************************************************
+* Function: osl_ext_event_get
+*
+* Purpose: Get event from specified event object.
+*
+* Parameters: event (mod) Event object to get.
+* requested (in) Requested event to get.
+* timeout_msec (in) Number of milliseconds to wait for the event.
+* event_bits (out) Event bits retrieved.
+*
+* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an
+* error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_get(osl_ext_event_t *event,
+ osl_ext_event_bits_t requested, osl_ext_time_ms_t timeout_msec,
+ osl_ext_event_bits_t *event_bits);
+
+/****************************************************************************
+* Function: osl_ext_event_set
+*
+* Purpose: Set event of specified event object.
+*
+* Parameters: event (mod) Event object to set.
+* event_bits (in) Event bits to set.
+*
+* Returns: OSL_EXT_SUCCESS if the event was created successfully, or an
+* error code if the event could not be created.
+*****************************************************************************
+*/
+osl_ext_status_t osl_ext_event_set(osl_ext_event_t *event,
+ osl_ext_event_bits_t event_bits);
+
+
+/* --------------------------------------------------------------------------
+** Interrupt
+*/
+
+/****************************************************************************
+* Function: osl_ext_interrupt_disable
+*
+* Purpose: Disable CPU interrupt.
+*
+* Parameters: None.
+*
+* Returns: The interrupt state before disable for restoring interrupt.
+*****************************************************************************
+*/
+osl_ext_interrupt_state_t osl_ext_interrupt_disable(void);
+
+
+/****************************************************************************
+* Function: osl_ext_interrupt_restore
+*
+* Purpose: Restore CPU interrupt state.
+*
+* Parameters: state (in) Interrupt state to restore returned from
+* osl_ext_interrupt_disable().
+*
+* Returns: None.
+*****************************************************************************
+*/
+void osl_ext_interrupt_restore(osl_ext_interrupt_state_t state);
+
+#else
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+/* Semaphore. */
+#define osl_ext_sem_t
+#define OSL_EXT_SEM_DECL(sem)
+
+/* Mutex. */
+#define osl_ext_mutex_t
+#define OSL_EXT_MUTEX_DECL(mutex)
+
+/* Timer. */
+#define osl_ext_timer_t
+#define OSL_EXT_TIMER_DECL(timer)
+
+/* Task. */
+#define osl_ext_task_t void
+#define OSL_EXT_TASK_DECL(task)
+
+/* Queue. */
+#define osl_ext_queue_t
+#define OSL_EXT_QUEUE_DECL(queue)
+
+/* Event. */
+#define osl_ext_event_t
+#define OSL_EXT_EVENT_DECL(event)
+
+/* ---- Variable Externs ------------------------------------------------- */
+/* ---- Function Prototypes ---------------------------------------------- */
+
+#define osl_ext_sem_create(name, init_cnt, sem) (OSL_EXT_SUCCESS)
+#define osl_ext_sem_delete(sem) (OSL_EXT_SUCCESS)
+#define osl_ext_sem_give(sem) (OSL_EXT_SUCCESS)
+#define osl_ext_sem_take(sem, timeout_msec) (OSL_EXT_SUCCESS)
+
+#define osl_ext_mutex_create(name, mutex) (OSL_EXT_SUCCESS)
+#define osl_ext_mutex_delete(mutex) (OSL_EXT_SUCCESS)
+#define osl_ext_mutex_acquire(mutex, timeout_msec) (OSL_EXT_SUCCESS)
+#define osl_ext_mutex_release(mutex) (OSL_EXT_SUCCESS)
+
+#define osl_ext_timer_create(name, timeout_msec, mode, func, arg, timer) \
+ (OSL_EXT_SUCCESS)
+#define osl_ext_timer_delete(timer) (OSL_EXT_SUCCESS)
+#define osl_ext_timer_start(timer, timeout_msec, mode) (OSL_EXT_SUCCESS)
+#define osl_ext_timer_stop(timer) (OSL_EXT_SUCCESS)
+#define osl_ext_time_get() (0)
+
+#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \
+ (OSL_EXT_SUCCESS)
+#define osl_ext_task_delete(task) (OSL_EXT_SUCCESS)
+#define osl_ext_task_current() (NULL)
+#define osl_ext_task_yield() (OSL_EXT_SUCCESS)
+#define osl_ext_task_enable_stack_check() (OSL_EXT_SUCCESS)
+
+#define osl_ext_queue_create(name, queue_buffer, queue_size, queue) \
+ (OSL_EXT_SUCCESS)
+#define osl_ext_queue_delete(queue) (OSL_EXT_SUCCESS)
+#define osl_ext_queue_send(queue, data) (OSL_EXT_SUCCESS)
+#define osl_ext_queue_send_synchronous(queue, data) (OSL_EXT_SUCCESS)
+#define osl_ext_queue_receive(queue, timeout_msec, data) \
+ (OSL_EXT_SUCCESS)
+#define osl_ext_queue_count(queue, count) (OSL_EXT_SUCCESS)
+
+#define osl_ext_event_create(name, event) (OSL_EXT_SUCCESS)
+#define osl_ext_event_delete(event) (OSL_EXT_SUCCESS)
+#define osl_ext_event_get(event, requested, timeout_msec, event_bits) \
+ (OSL_EXT_SUCCESS)
+#define osl_ext_event_set(event, event_bits) (OSL_EXT_SUCCESS)
+
+#define osl_ext_interrupt_disable(void)
+#define osl_ext_interrupt_restore(state)
+
+#endif /* OSL_EXT_DISABLED */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _osl_ext_h_ */
* #include <packed_section_end.h>
*
*
- * $Copyright Open Broadcom Corporation$
- * $Id: packed_section_end.h 437241 2013-11-18 07:39:24Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: packed_section_end.h 514727 2014-11-12 03:02:48Z $
*/
* #include <packed_section_end.h>
*
*
- * $Copyright Open Broadcom Corporation$
- * $Id: packed_section_start.h 437241 2013-11-18 07:39:24Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: packed_section_start.h 514727 2014-11-12 03:02:48Z $
*/
/*
* pcicfg.h: PCI configuration constants and structures.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: pcicfg.h 506084 2014-10-02 15:34:59Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: pcicfg.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _h_pcicfg_
#define _h_pcicfg_
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-/* The following inside ifndef's so we don't collide with NTDDK.H */
-#ifndef PCI_MAX_BUS
-#define PCI_MAX_BUS 0x100
-#endif
-#ifndef PCI_MAX_DEVICES
-#define PCI_MAX_DEVICES 0x20
-#endif
-#ifndef PCI_MAX_FUNCTION
-#define PCI_MAX_FUNCTION 0x8
-#endif
-
-#ifndef PCI_INVALID_VENDORID
-#define PCI_INVALID_VENDORID 0xffff
-#endif
-#ifndef PCI_INVALID_DEVICEID
-#define PCI_INVALID_DEVICEID 0xffff
-#endif
-
-
-/* Convert between bus-slot-function-register and config addresses */
-
-#define PCICFG_BUS_SHIFT 16 /* Bus shift */
-#define PCICFG_SLOT_SHIFT 11 /* Slot shift */
-#define PCICFG_FUN_SHIFT 8 /* Function shift */
-#define PCICFG_OFF_SHIFT 0 /* Register shift */
-
-#define PCICFG_BUS_MASK 0xff /* Bus mask */
-#define PCICFG_SLOT_MASK 0x1f /* Slot mask */
-#define PCICFG_FUN_MASK 7 /* Function mask */
-#define PCICFG_OFF_MASK 0xff /* Bus mask */
-
-#define PCI_CONFIG_ADDR(b, s, f, o) \
- ((((b) & PCICFG_BUS_MASK) << PCICFG_BUS_SHIFT) \
- | (((s) & PCICFG_SLOT_MASK) << PCICFG_SLOT_SHIFT) \
- | (((f) & PCICFG_FUN_MASK) << PCICFG_FUN_SHIFT) \
- | (((o) & PCICFG_OFF_MASK) << PCICFG_OFF_SHIFT))
-
-#define PCI_CONFIG_BUS(a) (((a) >> PCICFG_BUS_SHIFT) & PCICFG_BUS_MASK)
-#define PCI_CONFIG_SLOT(a) (((a) >> PCICFG_SLOT_SHIFT) & PCICFG_SLOT_MASK)
-#define PCI_CONFIG_FUN(a) (((a) >> PCICFG_FUN_SHIFT) & PCICFG_FUN_MASK)
-#define PCI_CONFIG_OFF(a) (((a) >> PCICFG_OFF_SHIFT) & PCICFG_OFF_MASK)
-
-/* PCIE Config space accessing MACROS */
-
-#define PCIECFG_BUS_SHIFT 24 /* Bus shift */
-#define PCIECFG_SLOT_SHIFT 19 /* Slot/Device shift */
-#define PCIECFG_FUN_SHIFT 16 /* Function shift */
-#define PCIECFG_OFF_SHIFT 0 /* Register shift */
-
-#define PCIECFG_BUS_MASK 0xff /* Bus mask */
-#define PCIECFG_SLOT_MASK 0x1f /* Slot/Device mask */
-#define PCIECFG_FUN_MASK 7 /* Function mask */
-#define PCIECFG_OFF_MASK 0xfff /* Register mask */
-
-#define PCIE_CONFIG_ADDR(b, s, f, o) \
- ((((b) & PCIECFG_BUS_MASK) << PCIECFG_BUS_SHIFT) \
- | (((s) & PCIECFG_SLOT_MASK) << PCIECFG_SLOT_SHIFT) \
- | (((f) & PCIECFG_FUN_MASK) << PCIECFG_FUN_SHIFT) \
- | (((o) & PCIECFG_OFF_MASK) << PCIECFG_OFF_SHIFT))
-
-#define PCIE_CONFIG_BUS(a) (((a) >> PCIECFG_BUS_SHIFT) & PCIECFG_BUS_MASK)
-#define PCIE_CONFIG_SLOT(a) (((a) >> PCIECFG_SLOT_SHIFT) & PCIECFG_SLOT_MASK)
-#define PCIE_CONFIG_FUN(a) (((a) >> PCIECFG_FUN_SHIFT) & PCIECFG_FUN_MASK)
-#define PCIE_CONFIG_OFF(a) (((a) >> PCIECFG_OFF_SHIFT) & PCIECFG_OFF_MASK)
-
-/* The actual config space */
-
-#define PCI_BAR_MAX 6
-
-#define PCI_ROM_BAR 8
-
-#define PCR_RSVDA_MAX 2
-
-/* Bits in PCI bars' flags */
-
-#define PCIBAR_FLAGS 0xf
-#define PCIBAR_IO 0x1
-#define PCIBAR_MEM1M 0x2
-#define PCIBAR_MEM64 0x4
-#define PCIBAR_PREFETCH 0x8
-#define PCIBAR_MEM32_MASK 0xFFFFFF80
-
-typedef struct _pci_config_regs {
- uint16 vendor;
- uint16 device;
- uint16 command;
- uint16 status;
- uint8 rev_id;
- uint8 prog_if;
- uint8 sub_class;
- uint8 base_class;
- uint8 cache_line_size;
- uint8 latency_timer;
- uint8 header_type;
- uint8 bist;
- uint32 base[PCI_BAR_MAX];
- uint32 cardbus_cis;
- uint16 subsys_vendor;
- uint16 subsys_id;
- uint32 baserom;
- uint32 rsvd_a[PCR_RSVDA_MAX];
- uint8 int_line;
- uint8 int_pin;
- uint8 min_gnt;
- uint8 max_lat;
- uint8 dev_dep[192];
-} pci_config_regs;
-
-#define SZPCR (sizeof (pci_config_regs))
-#define MINSZPCR 64 /* offsetof (dev_dep[0] */
-
-#endif /* !LINUX_POSTMOGRIFY_REMOVAL */
/* pci config status reg has a bit to indicate that capability ptr is present */
#define PCI_CFG_MINGNT 0x3e
#define PCI_CFG_MAXLAT 0x3f
#define PCI_CFG_DEVCTRL 0xd8
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-
-
-
-/* Classes and subclasses */
-
-typedef enum {
- PCI_CLASS_OLD = 0,
- PCI_CLASS_DASDI,
- PCI_CLASS_NET,
- PCI_CLASS_DISPLAY,
- PCI_CLASS_MMEDIA,
- PCI_CLASS_MEMORY,
- PCI_CLASS_BRIDGE,
- PCI_CLASS_COMM,
- PCI_CLASS_BASE,
- PCI_CLASS_INPUT,
- PCI_CLASS_DOCK,
- PCI_CLASS_CPU,
- PCI_CLASS_SERIAL,
- PCI_CLASS_INTELLIGENT = 0xe,
- PCI_CLASS_SATELLITE,
- PCI_CLASS_CRYPT,
- PCI_CLASS_DSP,
- PCI_CLASS_XOR = 0xfe
-} pci_classes;
-
-typedef enum {
- PCI_DASDI_SCSI,
- PCI_DASDI_IDE,
- PCI_DASDI_FLOPPY,
- PCI_DASDI_IPI,
- PCI_DASDI_RAID,
- PCI_DASDI_OTHER = 0x80
-} pci_dasdi_subclasses;
-
-typedef enum {
- PCI_NET_ETHER,
- PCI_NET_TOKEN,
- PCI_NET_FDDI,
- PCI_NET_ATM,
- PCI_NET_OTHER = 0x80
-} pci_net_subclasses;
-
-typedef enum {
- PCI_DISPLAY_VGA,
- PCI_DISPLAY_XGA,
- PCI_DISPLAY_3D,
- PCI_DISPLAY_OTHER = 0x80
-} pci_display_subclasses;
-
-typedef enum {
- PCI_MMEDIA_VIDEO,
- PCI_MMEDIA_AUDIO,
- PCI_MMEDIA_PHONE,
- PCI_MEDIA_OTHER = 0x80
-} pci_mmedia_subclasses;
-
-typedef enum {
- PCI_MEMORY_RAM,
- PCI_MEMORY_FLASH,
- PCI_MEMORY_OTHER = 0x80
-} pci_memory_subclasses;
-
-typedef enum {
- PCI_BRIDGE_HOST,
- PCI_BRIDGE_ISA,
- PCI_BRIDGE_EISA,
- PCI_BRIDGE_MC,
- PCI_BRIDGE_PCI,
- PCI_BRIDGE_PCMCIA,
- PCI_BRIDGE_NUBUS,
- PCI_BRIDGE_CARDBUS,
- PCI_BRIDGE_RACEWAY,
- PCI_BRIDGE_OTHER = 0x80
-} pci_bridge_subclasses;
-
-typedef enum {
- PCI_COMM_UART,
- PCI_COMM_PARALLEL,
- PCI_COMM_MULTIUART,
- PCI_COMM_MODEM,
- PCI_COMM_OTHER = 0x80
-} pci_comm_subclasses;
-
-typedef enum {
- PCI_BASE_PIC,
- PCI_BASE_DMA,
- PCI_BASE_TIMER,
- PCI_BASE_RTC,
- PCI_BASE_PCI_HOTPLUG,
- PCI_BASE_OTHER = 0x80
-} pci_base_subclasses;
-
-typedef enum {
- PCI_INPUT_KBD,
- PCI_INPUT_PEN,
- PCI_INPUT_MOUSE,
- PCI_INPUT_SCANNER,
- PCI_INPUT_GAMEPORT,
- PCI_INPUT_OTHER = 0x80
-} pci_input_subclasses;
-
-typedef enum {
- PCI_DOCK_GENERIC,
- PCI_DOCK_OTHER = 0x80
-} pci_dock_subclasses;
-
-typedef enum {
- PCI_CPU_386,
- PCI_CPU_486,
- PCI_CPU_PENTIUM,
- PCI_CPU_ALPHA = 0x10,
- PCI_CPU_POWERPC = 0x20,
- PCI_CPU_MIPS = 0x30,
- PCI_CPU_COPROC = 0x40,
- PCI_CPU_OTHER = 0x80
-} pci_cpu_subclasses;
-
-typedef enum {
- PCI_SERIAL_IEEE1394,
- PCI_SERIAL_ACCESS,
- PCI_SERIAL_SSA,
- PCI_SERIAL_USB,
- PCI_SERIAL_FIBER,
- PCI_SERIAL_SMBUS,
- PCI_SERIAL_OTHER = 0x80
-} pci_serial_subclasses;
-
-typedef enum {
- PCI_INTELLIGENT_I2O
-} pci_intelligent_subclasses;
-
-typedef enum {
- PCI_SATELLITE_TV,
- PCI_SATELLITE_AUDIO,
- PCI_SATELLITE_VOICE,
- PCI_SATELLITE_DATA,
- PCI_SATELLITE_OTHER = 0x80
-} pci_satellite_subclasses;
-
-typedef enum {
- PCI_CRYPT_NETWORK,
- PCI_CRYPT_ENTERTAINMENT,
- PCI_CRYPT_OTHER = 0x80
-} pci_crypt_subclasses;
-
-typedef enum {
- PCI_DSP_DPIO,
- PCI_DSP_OTHER = 0x80
-} pci_dsp_subclasses;
-
-typedef enum {
- PCI_XOR_QDMA,
- PCI_XOR_OTHER = 0x80
-} pci_xor_subclasses;
-
-/* Overlay for a PCI-to-PCI bridge */
-
-#define PPB_RSVDA_MAX 2
-#define PPB_RSVDD_MAX 8
-
-typedef struct _ppb_config_regs {
- uint16 vendor;
- uint16 device;
- uint16 command;
- uint16 status;
- uint8 rev_id;
- uint8 prog_if;
- uint8 sub_class;
- uint8 base_class;
- uint8 cache_line_size;
- uint8 latency_timer;
- uint8 header_type;
- uint8 bist;
- uint32 rsvd_a[PPB_RSVDA_MAX];
- uint8 prim_bus;
- uint8 sec_bus;
- uint8 sub_bus;
- uint8 sec_lat;
- uint8 io_base;
- uint8 io_lim;
- uint16 sec_status;
- uint16 mem_base;
- uint16 mem_lim;
- uint16 pf_mem_base;
- uint16 pf_mem_lim;
- uint32 pf_mem_base_hi;
- uint32 pf_mem_lim_hi;
- uint16 io_base_hi;
- uint16 io_lim_hi;
- uint16 subsys_vendor;
- uint16 subsys_id;
- uint32 rsvd_b;
- uint8 rsvd_c;
- uint8 int_pin;
- uint16 bridge_ctrl;
- uint8 chip_ctrl;
- uint8 diag_ctrl;
- uint16 arb_ctrl;
- uint32 rsvd_d[PPB_RSVDD_MAX];
- uint8 dev_dep[192];
-} ppb_config_regs;
-
-/* Everything below is BRCM HND proprietary */
-
-
-/* Brcm PCI configuration registers */
-#define cap_list rsvd_a[0]
-#define bar0_window dev_dep[0x80 - 0x40]
-#define bar1_window dev_dep[0x84 - 0x40]
-#define sprom_control dev_dep[0x88 - 0x40]
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
/* PCI CAPABILITY DEFINES */
#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */
#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */
#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */
-#define PCI_LINK_CTRL 0xbc /* PCI link control register */
-#define PCI_DEV_STAT_CTRL2 0xd4 /* PCI device status control 2 register */
-#define PCIE_LTR_MAX_SNOOP 0x1b4 /* PCIE LTRMaxSnoopLatency */
-#define PCI_L1SS_CTRL 0x248 /* The L1 PM Substates Control register */
-#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control 2 register */
+#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control register */
/* Private Registers */
#define PCI_STAT_CTRL 0xa80
#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */
#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) /* bar0 + 12K accesses chipc core registers */
#define PCI_16KBB0_WINSZ (16 * 1024) /* bar0 window size */
+#define PCI_SECOND_BAR0_OFFSET (16 * 1024) /* secondary bar 0 window */
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-/* On AI chips we have a second window to map DMP regs are mapped: */
-#define PCI_16KB0_WIN2_OFFSET (4 * 1024) /* bar0 + 4K is "Window 2" */
-
-/* PCI_INT_STATUS */
-#define PCI_SBIM_STATUS_SERR 0x4 /* backplane SBErr interrupt status */
-
-/* PCI_INT_MASK */
-#define PCI_SBIM_SHIFT 8 /* backplane core interrupt mask bits offset */
-#define PCI_SBIM_MASK 0xff00 /* backplane core interrupt mask */
-#define PCI_SBIM_MASK_SERR 0x4 /* backplane SBErr interrupt mask */
-
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-/* PCI_SPROM_CONTROL */
-#define SPROM_SZ_MSK 0x02 /* SPROM Size Mask */
-#define SPROM_LOCKED 0x08 /* SPROM Locked */
-#define SPROM_BLANK 0x04 /* indicating a blank SPROM */
-#define SPROM_WRITEEN 0x10 /* SPROM write enable */
-#define SPROM_BOOTROM_WE 0x20 /* external bootrom write enable */
-#define SPROM_BACKPLANE_EN 0x40 /* Enable indirect backplane access */
-#define SPROM_OTPIN_USE 0x80 /* device OTP In use */
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
-/* Bits in PCI command and status regs */
-#define PCI_CMD_IO 0x00000001 /* I/O enable */
-#define PCI_CMD_MEMORY 0x00000002 /* Memory enable */
-#define PCI_CMD_MASTER 0x00000004 /* Master enable */
-#define PCI_CMD_SPECIAL 0x00000008 /* Special cycles enable */
-#define PCI_CMD_INVALIDATE 0x00000010 /* Invalidate? */
-#define PCI_CMD_VGA_PAL 0x00000040 /* VGA Palate */
-#define PCI_STAT_TA 0x08000000 /* target abort status */
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
/* Header types */
#define PCI_HEADER_MULTI 0x80
#define write_pci_cfg_byte(a, val) do { \
uint32 tmpval; \
tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \
- val << BYTE_POS(a); \
+ val << BYTE_POS(a); \
OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
} while (0)
#define write_pci_cfg_word(a, val) do { \
uint32 tmpval; \
tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \
- val << WORD_POS(a); \
+ val << WORD_POS(a); \
OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
} while (0)
/*
* BCM43XX PCIE core hardware definitions.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: pcie_core.h 483003 2014-06-05 19:57:46Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: pcie_core.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _PCIE_CORE_H
#define _PCIE_CORE_H
uint32 ltr_state; /* 0x1A0 */
uint32 pwr_int_status; /* 0x1A4 */
uint32 pwr_int_mask; /* 0x1A8 */
- uint32 PAD[21]; /* 0x1AC - 0x200 */
+ uint32 PAD[13]; /* 0x1AC - 0x1DF */
+ uint32 clk_ctl_st; /* 0x1E0 */
+ uint32 PAD[7]; /* 0x1E4 - 0x1FF */
pcie_devdmaregs_t h2d0_dmaregs; /* 0x200 - 0x23c */
pcie_devdmaregs_t d2h0_dmaregs; /* 0x240 - 0x27c */
pcie_devdmaregs_t h2d1_dmaregs; /* 0x280 - 0x2bc */
#define PCIE_DLYPERST 0x100 /* Delay PeRst to CoE Core */
#define PCIE_DISSPROMLD 0x200 /* DisableSpromLoadOnPerst */
#define PCIE_WakeModeL2 0x1000 /* Wake on L2 */
+#define PCIE_PipeIddqDisable0 0x8000 /* Disable assertion of pcie_pipe_iddq during L1.2 and L2 */
+#define PCIE_PipeIddqDisable1 0x10000 /* Disable assertion of pcie_pipe_iddq during L2 */
#define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */
#define PCIE_CFGDATA 0x124 /* offsetof(configdata) */
#define PCIE_MB_D2H_MB_MASK \
(PCIE_MB_TOPCIE_D2H0_DB0 | PCIE_MB_TOPCIE_D2H0_DB1 | \
- PCIE_MB_TOPCIE_D2H1_DB1 | PCIE_MB_TOPCIE_D2H1_DB1 | \
- PCIE_MB_TOPCIE_D2H2_DB1 | PCIE_MB_TOPCIE_D2H2_DB1 | \
- PCIE_MB_TOPCIE_D2H3_DB1 | PCIE_MB_TOPCIE_D2H3_DB1)
+ PCIE_MB_TOPCIE_D2H1_DB0 | PCIE_MB_TOPCIE_D2H1_DB1 | \
+ PCIE_MB_TOPCIE_D2H2_DB0 | PCIE_MB_TOPCIE_D2H2_DB1 | \
+ PCIE_MB_TOPCIE_D2H3_DB0 | PCIE_MB_TOPCIE_D2H3_DB1)
/* SB to PCIE translation masks */
#define SBTOPCIE0_MASK 0xfc000000
#define I_F0_B1 (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */
#define PCIECFGREG_DEVCONTROL 0xB4
+#define PCIECFGREG_DEVCONTROL_MRRS_SHFT 12
+#define PCIECFGREG_DEVCONTROL_MRRS_MASK (0x7 << PCIECFGREG_DEVCONTROL_MRRS_SHFT)
/* SROM hardware region */
#define SROM_OFFSET_BAR1_CTRL 52
#define PCIEGEN2_IOC_L2_L3_LINK_SHIFT 15
#define PCIEGEN2_IOC_D0_STATE_MASK (1 << PCIEGEN2_IOC_D0_STATE_SHIFT)
-#define PCIEGEN2_IOC_D1_STATE_MASK (1 << PCIEGEN2_IOC_D1_STATE_SHIF)
-#define PCIEGEN2_IOC_D2_STATE_MASK (1 << PCIEGEN2_IOC_D2_STATE_SHIF)
-#define PCIEGEN2_IOC_D3_STATE_MASK (1 << PCIEGEN2_IOC_D3_STATE_SHIF)
-#define PCIEGEN2_IOC_L0_LINK_MASK (1 << PCIEGEN2_IOC_L0_LINK_SHIF)
-#define PCIEGEN2_IOC_L1_LINK_MASK (1 << PCIEGEN2_IOC_L1_LINK_SHIF)
+#define PCIEGEN2_IOC_D1_STATE_MASK (1 << PCIEGEN2_IOC_D1_STATE_SHIFT)
+#define PCIEGEN2_IOC_D2_STATE_MASK (1 << PCIEGEN2_IOC_D2_STATE_SHIFT)
+#define PCIEGEN2_IOC_D3_STATE_MASK (1 << PCIEGEN2_IOC_D3_STATE_SHIFT)
+#define PCIEGEN2_IOC_L0_LINK_MASK (1 << PCIEGEN2_IOC_L0_LINK_SHIFT)
+#define PCIEGEN2_IOC_L1_LINK_MASK (1 << PCIEGEN2_IOC_L1_LINK_SHIFT)
#define PCIEGEN2_IOC_L1L2_LINK_MASK (1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT)
#define PCIEGEN2_IOC_L2_L3_LINK_MASK (1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT)
#ifdef BCMDRIVER
void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs);
+void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs);
#endif /* BCMDRIVER */
#endif /* _PCIE_CORE_H */
/*
- * $Copyright Open Broadcom Corporation$
- *
* Fundamental types and constants relating to 802.11
*
- * $Id: 802.11.h 495738 2014-08-08 03:36:17Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: 802.11.h 556559 2015-05-14 01:48:17Z $
*/
#ifndef _802_11_H_
struct ether_addr bssid; /* BSS ID */
uint16 seq; /* sequence control */
} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_management_header dot11_management_header_t;
#define DOT11_MGMT_HDR_LEN 24 /* d11 management header length */
/* Management frame payloads */
uint8 mode;
} BWL_POST_PACKED_STRUCT;
+/* These lengths assume 64 MU groups, as specified in 802.11ac-2013 */
+#define DOT11_ACTION_GID_MEMBERSHIP_LEN 8 /* bytes */
+#define DOT11_ACTION_GID_USER_POS_LEN 16 /* bytes */
+BWL_PRE_PACKED_STRUCT struct dot11_action_group_id {
+ uint8 category;
+ uint8 action;
+ uint8 membership_status[DOT11_ACTION_GID_MEMBERSHIP_LEN];
+ uint8 user_position[DOT11_ACTION_GID_USER_POS_LEN];
+} BWL_POST_PACKED_STRUCT;
+
#define SM_PWRSAVE_ENABLE 1
#define SM_PWRSAVE_MODE 2
uint8 action;
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_action_frmhdr dot11_action_frmhdr_t;
#define DOT11_ACTION_FRMHDR_LEN 2
/** CSA IE data structure */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_extcap_ie dot11_extcap_ie_t;
-#define DOT11_EXTCAP_LEN_MAX 8
-
#define DOT11_EXTCAP_LEN_COEX 1
#define DOT11_EXTCAP_LEN_BT 3
#define DOT11_EXTCAP_LEN_IW 4
#define DOT11_EXTCAP_LEN_TDLS_WBW 8
#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION 8
-BWL_PRE_PACKED_STRUCT struct dot11_extcap {
- uint8 extcap[DOT11_EXTCAP_LEN_MAX];
-} BWL_POST_PACKED_STRUCT;
-typedef struct dot11_extcap dot11_extcap_t;
-
/* TDLS Capabilities */
#define DOT11_TDLS_CAP_TDLS 37 /* TDLS support */
#define DOT11_TDLS_CAP_PU_BUFFER_STA 28 /* TDLS Peer U-APSD buffer STA support */
/* 802.11h/802.11k Measurement Request/Report IEs */
/* Measurement Type field */
-#define DOT11_MEASURE_TYPE_BASIC 0 /* d11 measurement basic type */
-#define DOT11_MEASURE_TYPE_CCA 1 /* d11 measurement CCA type */
-#define DOT11_MEASURE_TYPE_RPI 2 /* d11 measurement RPI type */
-#define DOT11_MEASURE_TYPE_CHLOAD 3 /* d11 measurement Channel Load type */
-#define DOT11_MEASURE_TYPE_NOISE 4 /* d11 measurement Noise Histogram type */
-#define DOT11_MEASURE_TYPE_BEACON 5 /* d11 measurement Beacon type */
-#define DOT11_MEASURE_TYPE_FRAME 6 /* d11 measurement Frame type */
-#define DOT11_MEASURE_TYPE_STAT 7 /* d11 measurement STA Statistics type */
-#define DOT11_MEASURE_TYPE_LCI 8 /* d11 measurement LCI type */
-#define DOT11_MEASURE_TYPE_TXSTREAM 9 /* d11 measurement TX Stream type */
-#define DOT11_MEASURE_TYPE_PAUSE 255 /* d11 measurement pause type */
+#define DOT11_MEASURE_TYPE_BASIC 0 /* d11 measurement basic type */
+#define DOT11_MEASURE_TYPE_CCA 1 /* d11 measurement CCA type */
+#define DOT11_MEASURE_TYPE_RPI 2 /* d11 measurement RPI type */
+#define DOT11_MEASURE_TYPE_CHLOAD 3 /* d11 measurement Channel Load type */
+#define DOT11_MEASURE_TYPE_NOISE 4 /* d11 measurement Noise Histogram type */
+#define DOT11_MEASURE_TYPE_BEACON 5 /* d11 measurement Beacon type */
+#define DOT11_MEASURE_TYPE_FRAME 6 /* d11 measurement Frame type */
+#define DOT11_MEASURE_TYPE_STAT 7 /* d11 measurement STA Statistics type */
+#define DOT11_MEASURE_TYPE_LCI 8 /* d11 measurement LCI type */
+#define DOT11_MEASURE_TYPE_TXSTREAM 9 /* d11 measurement TX Stream type */
+#define DOT11_MEASURE_TYPE_MCDIAGS 10 /* d11 measurement multicast diagnostics */
+#define DOT11_MEASURE_TYPE_CIVICLOC 11 /* d11 measurement location civic */
+#define DOT11_MEASURE_TYPE_LOC_ID 12 /* d11 measurement location identifier */
+#define DOT11_MEASURE_TYPE_DIRCHANQ 13 /* d11 measurement dir channel quality */
+#define DOT11_MEASURE_TYPE_DIRMEAS 14 /* d11 measurement directional */
+#define DOT11_MEASURE_TYPE_DIRSTATS 15 /* d11 measurement directional stats */
+#define DOT11_MEASURE_TYPE_FTMRANGE 16 /* d11 measurement Fine Timing */
+#define DOT11_MEASURE_TYPE_PAUSE 255 /* d11 measurement pause type */
/* Measurement Request Modes */
#define DOT11_MEASURE_MODE_PARALLEL (1<<0) /* d11 measurement parallel */
/* length of Measure Request IE data not including variable len */
#define DOT11_MNG_IE_MREQ_FIXED_LEN 3 /* d11 measurement request IE fixed length */
+BWL_PRE_PACKED_STRUCT struct dot11_meas_req_loc {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ BWL_PRE_PACKED_STRUCT union
+ {
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 subject;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT lci;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 subject;
+ uint8 type; /* type of civic location */
+ uint8 siu; /* service interval units */
+ uint16 si; /* service interval */
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT civic;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint16 max_init_delay; /* maximum random initial delay */
+ uint8 min_ap_count;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT ftm_range;
+ } BWL_POST_PACKED_STRUCT req;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_meas_req_loc dot11_meas_req_loc_t;
+#define DOT11_MNG_IE_MREQ_MIN_LEN 4 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREQ_LCI_FIXED_LEN 4 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREQ_CIVIC_FIXED_LEN 8 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREQ_FRNG_FIXED_LEN 6 /* d11 measurement report IE length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_lci_subelement {
+ uint8 subelement;
+ uint8 length;
+ uint8 lci_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_lci_subelement dot11_lci_subelement_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_civic_subelement {
+ uint8 type; /* type of civic location */
+ uint8 subelement;
+ uint8 length;
+ uint8 civic_data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_civic_subelement dot11_civic_subelement_t;
+
BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
uint8 id;
uint8 len;
uint16 duration;
uint8 map;
} BWL_POST_PACKED_STRUCT basic;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 subelement;
+ uint8 length;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT lci;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 type; /* type of civic location */
+ uint8 subelement;
+ uint8 length;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT civic;
+ BWL_PRE_PACKED_STRUCT struct {
+ uint8 entry_count;
+ uint8 data[1];
+ } BWL_POST_PACKED_STRUCT ftm_range;
uint8 data[1];
} BWL_POST_PACKED_STRUCT rep;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_meas_rep dot11_meas_rep_t;
+#define DOT11_MNG_IE_MREP_MIN_LEN 5 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_LCI_FIXED_LEN 5 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_CIVIC_FIXED_LEN 6 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_BASIC_FIXED_LEN 15 /* d11 measurement report IE length */
+#define DOT11_MNG_IE_MREP_FRNG_FIXED_LEN 4
/* length of Measure Report IE data not including variable len */
#define DOT11_MNG_IE_MREP_FIXED_LEN 3 /* d11 measurement response IE fixed length */
#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac))))
#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
+
/** WME Information Element (IE) */
BWL_PRE_PACKED_STRUCT struct wme_ie {
uint8 oui[3];
*/
#define DOT11_RC_BAD_PC 10 /* Unacceptable power capability element */
#define DOT11_RC_BAD_CHANNELS 11 /* Unacceptable supported channels element */
-/* 12 is unused */
+
+/* 12 is unused by STA but could be used by AP/GO */
+#define DOT11_RC_DISASSOC_BTM 12 /* Disassociated due to BSS Transition Magmt */
+
/* 32-39 are QSTA specific reasons added in 11e */
#define DOT11_RC_UNSPECIFIED_QOS 32 /* unspecified QoS-related reason */
#define DOT11_RC_SETUP_NEEDED 38 /* mechanism needs a setup */
#define DOT11_RC_TIMEOUT 39 /* timeout */
-#define DOT11_RC_MAX 23 /* Reason codes > 23 are reserved */
+#define DOT11_RC_MESH_PEERING_CANCELLED 52
+#define DOT11_RC_MESH_MAX_PEERS 53
+#define DOT11_RC_MESH_CONFIG_POLICY_VIOLN 54
+#define DOT11_RC_MESH_CLOSE_RECVD 55
+#define DOT11_RC_MESH_MAX_RETRIES 56
+#define DOT11_RC_MESH_CONFIRM_TIMEOUT 57
+#define DOT11_RC_MESH_INVALID_GTK 58
+#define DOT11_RC_MESH_INCONSISTENT_PARAMS 59
+
+#define DOT11_RC_MESH_INVALID_SEC_CAP 60
+#define DOT11_RC_MESH_PATHERR_NOPROXYINFO 61
+#define DOT11_RC_MESH_PATHERR_NOFWINFO 62
+#define DOT11_RC_MESH_PATHERR_DSTUNREACH 63
+#define DOT11_RC_MESH_MBSSMAC_EXISTS 64
+#define DOT11_RC_MESH_CHANSWITCH_REGREQ 65
+#define DOT11_RC_MESH_CHANSWITCH_UNSPEC 66
+
+#define DOT11_RC_MAX 66 /* Reason codes > 66 are reserved */
#define DOT11_RC_TDLS_PEER_UNREACH 25
#define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26
#define DOT11_SC_UNEXP_MSG 70 /* Unexpected message */
#define DOT11_SC_INVALID_SNONCE 71 /* Invalid SNonce */
#define DOT11_SC_INVALID_RSNIE 72 /* Invalid contents of RSNIE */
+
+#define DOT11_SC_ANTICLOG_TOCKEN_REQUIRED 76 /* Anti-clogging tocken required */
+#define DOT11_SC_INVALID_FINITE_CYCLIC_GRP 77 /* Invalid contents of RSNIE */
+
#define DOT11_SC_ASSOC_VHT_REQUIRED 104 /* Association denied because the requesting
* station does not support VHT features.
*/
#define DOT11_MNG_QOS_MAP_ID 110 /* 11u QoS map set */
#define DOT11_MNG_ROAM_CONSORT_ID 111 /* 11u roaming consortium */
#define DOT11_MNG_EMERGCY_ALERT_ID 112 /* 11u emergency alert identifier */
-#define DOT11_MNG_EXT_CAP_ID 127 /* d11 mgmt ext capability */
+#define DOT11_MNG_MESH_CONFIG 113 /* Mesh Configuration */
+#define DOT11_MNG_MESH_ID 114 /* Mesh ID */
+#define DOT11_MNG_MESH_PEER_MGMT_ID 117 /* Mesh PEER MGMT IE */
+
+#define DOT11_MNG_EXT_CAP_ID 127 /* d11 mgmt ext capability */
+#define DOT11_MNG_EXT_PREQ_ID 130 /* Mesh PREQ IE */
+#define DOT11_MNG_EXT_PREP_ID 131 /* Mesh PREP IE */
+#define DOT11_MNG_EXT_PERR_ID 132 /* Mesh PERR IE */
#define DOT11_MNG_VHT_CAP_ID 191 /* d11 mgmt VHT cap id */
#define DOT11_MNG_VHT_OPERATION_ID 192 /* d11 mgmt VHT op id */
+#define DOT11_MNG_EXT_BSSLOAD_ID 193 /* d11 mgmt VHT extended bss load id */
#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID 194 /* Wide BW Channel Switch IE */
#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID 195 /* VHT transmit Power Envelope IE */
#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID 196 /* Channel Switch Wrapper IE */
#define DOT11_MNG_AID_ID 197 /* Association ID IE */
#define DOT11_MNG_OPER_MODE_NOTIF_ID 199 /* d11 mgmt VHT oper mode notif */
-
+#define DOT11_MNG_FTM_PARAMS_ID 206
#define DOT11_MNG_WPA_ID 221 /* d11 management WPA id */
#define DOT11_MNG_PROPR_ID 221
#define DOT11_EXT_CAP_FMS 11
/* proxy ARP service support bit position */
#define DOT11_EXT_CAP_PROXY_ARP 12
+/* Civic Location */
+#define DOT11_EXT_CAP_CIVIC_LOC 14
+/* Geospatial Location */
+#define DOT11_EXT_CAP_LCI 15
/* Traffic Filter Service */
#define DOT11_EXT_CAP_TFS 16
/* WNM-Sleep Mode */
#define DOT11_EXT_CAP_WNM_NOTIF 46
/* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */
#define DOT11_EXT_CAP_OPER_MODE_NOTIF 62
+/* Fine timing measurement - D3.0 */
+#define DOT11_EXT_CAP_FTM_RESPONDER 70
+#define DOT11_EXT_CAP_FTM_INITIATOR 71 /* tentative 11mcd3.0 */
+#ifdef WL_FTM
+#define DOT11_EXT_CAP_MAX_BIT_IDX 95 /* !!!update this please!!! */
+#else
+#define DOT11_EXT_CAP_MAX_BIT_IDX 62 /* !!!update this please!!! */
+#endif
+
+/* extended capability */
+#ifndef DOT11_EXTCAP_LEN_MAX
+#define DOT11_EXTCAP_LEN_MAX ((DOT11_EXT_CAP_MAX_BIT_IDX + 8) >> 3)
+#endif
+
+BWL_PRE_PACKED_STRUCT struct dot11_extcap {
+ uint8 extcap[DOT11_EXTCAP_LEN_MAX];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_extcap dot11_extcap_t;
/* VHT Operating mode bit fields - (11ac D3.0 - 8.4.1.50) */
#define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0
#define DOT11_ACTION_CAT_PDPA 9 /* protected dual of public action */
#define DOT11_ACTION_CAT_WNM 10 /* category for WNM */
#define DOT11_ACTION_CAT_UWNM 11 /* category for Unprotected WNM */
+#define DOT11_ACTION_CAT_MESH 13 /* category for Mesh */
+#define DOT11_ACTION_CAT_SELFPROT 15 /* category for Mesh, self protected */
#define DOT11_ACTION_NOTIFICATION 17
#define DOT11_ACTION_CAT_VHT 21 /* VHT action */
#define DOT11_ACTION_CAT_VSP 126 /* protected vendor specific */
#define DOT11_PUB_ACTION_BSS_COEX_MNG 0 /* 20/40 Coexistence Management action id */
#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */
#define DOT11_PUB_ACTION_GAS_CB_REQ 12 /* GAS Comeback Request */
+#define DOT11_PUB_ACTION_FTM_REQ 32 /* FTM request */
+#define DOT11_PUB_ACTION_FTM 33 /* FTM measurement */
/* Block Ack action types */
#define DOT11_BA_ACTION_ADDBA_REQ 0 /* ADDBA Req action frame type */
typedef struct dot11_bsstrans_query dot11_bsstrans_query_t;
#define DOT11_BSSTRANS_QUERY_LEN 4 /* Fixed length */
+/* BTM transition reason */
+#define DOT11_BSSTRANS_REASON_UNSPECIFIED 0
+#define DOT11_BSSTRANS_REASON_EXC_FRAME_LOSS 1
+#define DOT11_BSSTRANS_REASON_EXC_TRAFFIC_DELAY 2
+#define DOT11_BSSTRANS_REASON_INSUFF_QOS_CAPACITY 3
+#define DOT11_BSSTRANS_REASON_FIRST_ASSOC 4
+#define DOT11_BSSTRANS_REASON_LOAD_BALANCING 5
+#define DOT11_BSSTRANS_REASON_BETTER_AP_FOUND 6
+#define DOT11_BSSTRANS_REASON_DEAUTH_RX 7
+#define DOT11_BSSTRANS_REASON_8021X_EAP_AUTH_FAIL 8
+#define DOT11_BSSTRANS_REASON_4WAY_HANDSHK_FAIL 9
+#define DOT11_BSSTRANS_REASON_MANY_REPLAYCNT_FAIL 10
+#define DOT11_BSSTRANS_REASON_MANY_DATAMIC_FAIL 11
+#define DOT11_BSSTRANS_REASON_EXCEED_MAX_RETRANS 12
+#define DOT11_BSSTRANS_REASON_MANY_BCAST_DISASSOC_RX 13
+#define DOT11_BSSTRANS_REASON_MANY_BCAST_DEAUTH_RX 14
+#define DOT11_BSSTRANS_REASON_PREV_TRANSITION_FAIL 15
+#define DOT11_BSSTRANS_REASON_LOW_RSSI 16
+#define DOT11_BSSTRANS_REASON_ROAM_FROM_NON_80211 17
+#define DOT11_BSSTRANS_REASON_RX_BTM_REQ 18
+#define DOT11_BSSTRANS_REASON_PREF_LIST_INCLUDED 19
+#define DOT11_BSSTRANS_REASON_LEAVING_ESS 20
+
/** BSS Management Transition Request frame header */
BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req {
uint8 category; /* category of action frame (10) */
#define DOT11_RRM_CAP_BSSAAD 31
#define DOT11_RRM_CAP_BSSAAC 32
#define DOT11_RRM_CAP_AI 33
+#define DOT11_RRM_CAP_FTM_RANGE 34
+#define DOT11_RRM_CAP_CIVIC_LOC 35
+#define DOT11_RRM_CAP_LAST 35
/* Operating Class (formerly "Regulatory Class") definitions */
#define DOT11_OP_CLASS_NONE 255
#define DOT11_RMREQ_BCN_REPDET_REQUEST 1 /* + requested information elems */
#define DOT11_RMREQ_BCN_REPDET_ALL 2 /* All fields */
+/* Reporting Information (reporting condition) element definition */
+#define DOT11_RMREQ_BCN_REPINFO_LEN 2 /* Beacon Reporting Information length */
+#define DOT11_RMREQ_BCN_REPCOND_DEFAULT 0 /* Report to be issued after each measurement */
+
/* Sub-element IDs for Beacon Report */
#define DOT11_RMREP_BCN_FRM_BODY 1
+#define DOT11_RMREP_BCN_FRM_BODY_LEN_MAX 224 /* 802.11k-2008 7.3.2.22.6 */
/* Sub-element IDs for Frame Report */
#define DOT11_RMREP_FRAME_COUNT_REPORT 1
-/** Channel load request */
+/* Statistics Group Report: Group IDs */
+#define DOT11_RRM_STATS_GRP_ID_0 0
+
+/* Statistics Group Report: Group Data length */
+#define DOT11_RRM_STATS_RPT_LEN_GRP_ID_0 28
+
+/* Channel load request */
BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload {
uint8 id;
uint8 len;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t;
+enum {
+ DOT11_FTM_LOCATION_SUBJ_LOCAL = 0, /* Where am I? */
+ DOT11_FTM_LOCATION_SUBJ_REMOTE = 1, /* Where are you? */
+ DOT11_FTM_LOCATION_SUBJ_THIRDPARTY = 2 /* Where is he/she? */
+};
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_lci {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 subj;
+
+ /* Following 3 fields are unused. Keep for ROM compatibility. */
+ uint8 lat_res;
+ uint8 lon_res;
+ uint8 alt_res;
+
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_ftm_lci dot11_rmreq_ftm_lci_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_lci {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 lci_sub_id;
+ uint8 lci_sub_len;
+ /* optional LCI field */
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_ftm_lci dot11_rmrep_ftm_lci_t;
+
+#define DOT11_FTM_LCI_SUBELEM_ID 0
+#define DOT11_FTM_LCI_SUBELEM_LEN 2
+#define DOT11_FTM_LCI_FIELD_LEN 16
+#define DOT11_FTM_LCI_UNKNOWN_LEN 2
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_civic {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 subj;
+ uint8 civloc_type;
+ uint8 siu; /* service interval units */
+ uint16 si; /* service interval */
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_ftm_civic dot11_rmreq_ftm_civic_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_civic {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 civloc_type;
+ uint8 civloc_sub_id;
+ uint8 civloc_sub_len;
+ /* optional location civic field */
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_ftm_civic dot11_rmrep_ftm_civic_t;
+
+#define DOT11_FTM_CIVIC_LOC_TYPE_RFC4776 0
+#define DOT11_FTM_CIVIC_SUBELEM_ID 0
+#define DOT11_FTM_CIVIC_SUBELEM_LEN 2
+#define DOT11_FTM_CIVIC_LOC_SI_NONE 0
+#define DOT11_FTM_CIVIC_TYPE_LEN 1
+#define DOT11_FTM_CIVIC_UNKNOWN_LEN 3
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_subel {
+ uint8 id;
+ uint8 len;
+ uint16 max_age;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_range_subel dot11_ftm_range_subel_t;
+#define DOT11_FTM_RANGE_SUBELEM_ID 4
+#define DOT11_FTM_RANGE_SUBELEM_LEN 2
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmreq_ftm_range {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint16 max_init_delay; /* maximum random initial delay */
+ uint8 min_ap_count;
+ uint8 data[1];
+ /* neighbor report sub-elements */
+ /* optional sub-elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmreq_ftm_range dot11_rmreq_ftm_range_t;
+#define DOT11_RMREQ_FTM_RANGE_LEN 8
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_entry {
+ uint32 start_tsf; /* 4 lsb of tsf */
+ struct ether_addr bssid;
+ uint16 range;
+ uint16 max_err;
+ uint8 rsvd;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_range_entry dot11_ftm_range_entry_t;
+#define DOT11_FTM_RANGE_ENTRY_MAX_COUNT 15
+
+enum {
+ DOT11_FTM_RANGE_ERROR_AP_INCAPABLE = 3,
+ DOT11_FTM_RANGE_ERROR_AP_FAILED = 4,
+ DOT11_FTM_RANGE_ERROR_TX_FAILED = 8,
+ DOT11_FTM_RANGE_ERROR_MAX
+};
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_range_error_entry {
+ uint32 start_tsf; /* 4 lsb of tsf */
+ struct ether_addr bssid;
+ uint8 code;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_range_error_entry dot11_ftm_range_error_entry_t;
+#define DOT11_FTM_RANGE_ERROR_ENTRY_MAX_COUNT 11
+
+BWL_PRE_PACKED_STRUCT struct dot11_rmrep_ftm_range {
+ uint8 id;
+ uint8 len;
+ uint8 token;
+ uint8 mode;
+ uint8 type;
+ uint8 entry_count;
+ uint8 data[2]; /* includes pad */
+ /*
+ dot11_ftm_range_entry_t entries[entry_count];
+ uint8 error_count;
+ dot11_ftm_error_entry_t errors[error_count];
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rmrep_ftm_range dot11_rmrep_ftm_range_t;
+
+#define DOT11_FTM_RANGE_REP_MIN_LEN 6 /* No extra byte for error_count */
+#define DOT11_FTM_RANGE_ENTRY_CNT_MAX 15
+#define DOT11_FTM_RANGE_ERROR_CNT_MAX 11
+#define DOT11_FTM_RANGE_REP_FIXED_LEN 1 /* No extra byte for error_count */
/** Measurement pause request */
BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time {
uint8 id;
#define DOT11_NGBR_BI_CAP_IMM_BA 0x0200
#define DOT11_NGBR_BI_MOBILITY 0x0400
#define DOT11_NGBR_BI_HT 0x0800
+#define DOT11_NGBR_BI_VHT 0x1000
+#define DOT11_NGBR_BI_FTM 0x2000
/** Neighbor Report element (11k & 11v) */
BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie {
#define DOT11_BSSTYPE_INFRASTRUCTURE 0 /* d11 infrastructure */
#define DOT11_BSSTYPE_INDEPENDENT 1 /* d11 independent */
#define DOT11_BSSTYPE_ANY 2 /* d11 any BSS type */
+#define DOT11_BSSTYPE_MESH 3 /* d11 Mesh */
#define DOT11_SCANTYPE_ACTIVE 0 /* d11 scan active */
#define DOT11_SCANTYPE_PASSIVE 1 /* d11 scan passive */
#define APHY_SERVICE_NBITS 16 /* APHY service nbits */
#define APHY_TAIL_NBITS 6 /* APHY tail nbits */
#define APHY_CWMIN 15 /* APHY cwmin */
+#define APHY_PHYHDR_DUR 20 /* APHY PHY Header Duration */
/* 802.11 B PHY constants */
#define BPHY_SLOT_TIME 20 /* BPHY slot time */
#define BPHY_PLCP_TIME 192 /* BPHY PLCP time */
#define BPHY_PLCP_SHORT_TIME 96 /* BPHY PLCP short time */
#define BPHY_CWMIN 31 /* BPHY cwmin */
+#define BPHY_SHORT_PHYHDR_DUR 96 /* BPHY Short PHY Header Duration */
+#define BPHY_LONG_PHYHDR_DUR 192 /* BPHY Long PHY Header Duration */
/* 802.11 G constants */
#define DOT11_OFDM_SIGNAL_EXTENSION 6 /* d11 OFDM signal extension */
#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00
#define VHT_SIGA1_NSTS_SHIFT 10
+#define VHT_SIGA1_MAX_USERPOS 3
#define VHT_SIGA1_PARTIAL_AID_MASK 0x3fe000
#define VHT_SIGA1_PARTIAL_AID_SHIFT 13
#define BRCM_PROP_OUI "\x00\x90\x4C"
+/* Action frame type for FTM Initiator Report */
+#define BRCM_FTM_VS_AF_TYPE 14
+enum {
+ BRCM_FTM_VS_INITIATOR_RPT_SUBTYPE = 1, /* FTM Initiator Report */
+ BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */
+};
+
/* Action frame type for RWL */
#define RWL_WIFI_DEFAULT 0
#define RWL_WIFI_FIND_MY_PEER 9 /* Used while finding server */
#define PROXD_AF_TYPE 11 /* Wifi proximity action frame type */
#define BRCM_RELMACST_AF_TYPE 12 /* RMC action frame type */
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-/*
- * This BRCM_PROP_OUI types is intended for use in events to embed additional
- * data, and would not be expected to appear on the air -- but having an IE
- * format allows IE frame data with extra data in events in that allows for
- * more flexible parsing.
- */
-#define BRCM_EVT_WL_BSS_INFO 64
-
-/**
- * Following is the generic structure for brcm_prop_ie (uses BRCM_PROP_OUI).
- * DPT uses this format with type set to DPT_IE_TYPE
- */
-BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s {
- uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
- uint8 len; /* IE length */
- uint8 oui[3];
- uint8 type; /* type of this IE */
- uint16 cap; /* DPT capabilities */
-} BWL_POST_PACKED_STRUCT;
-typedef struct brcm_prop_ie_s brcm_prop_ie_t;
-
-#define BRCM_PROP_IE_LEN 6 /* len of fixed part of brcm_prop ie */
-
-#define DPT_IE_TYPE 2
-#define BRCM_SYSCAP_IE_TYPE 3
-#define WET_TUNNEL_IE_TYPE 3
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
/* brcm syscap_ie cap */
#define BRCM_SYSCAP_WET_TUNNEL 0x0100 /* Device with WET_TUNNEL support */
#define BRF_BLOCKACK 0x8 /* BlockACK capable */
#define BRF_ABCOUNTER_MASK 0xf0 /* afterburner is obsolete, defined for backward compat */
#define BRF_PROP_11N_MCS 0x10 /* re-use afterburner bit */
+#define BRF_MEDIA_CLIENT 0x20 /* re-use afterburner bit to indicate media client device */
#define GET_BRF_PROP_11N_MCS(brcm_ie) \
(!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS))
/* brcm_ie flags1 */
#define BRF1_AMSDU 0x1 /* A-MSDU capable */
+#define BRF1_WNM 0x2 /* WNM capable */
#define BRF1_WMEPS 0x4 /* AP is capable of handling WME + PS w/o APSD */
#define BRF1_PSOFIX 0x8 /* AP has fixed PS mode out-of-order packets */
#define BRF1_RX_LARGE_AGG 0x10 /* device can rx large aggregates */
#define RELMCAST_BRCM_PROP_IE_TYPE 55
+/* BRCM BTC IE */
+BWL_PRE_PACKED_STRUCT struct btc_brcm_prop_ie {
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type; /* type inidicates what follows */
+ uint32 info;
+} BWL_POST_PACKED_STRUCT;
+typedef struct btc_brcm_prop_ie btc_brcm_prop_ie_t;
+
+#define BTC_INFO_BRCM_PROP_IE_TYPE 90
+#define BRCM_BTC_INFO_TYPE_LEN (sizeof(btc_brcm_prop_ie_t) - (2 * sizeof(uint8)))
+
/* ************* HT definitions. ************* */
#define MCSSET_LEN 16 /* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */
#define MAX_MCS_NUM (128) /* max mcs number = 128 */
#define VHT_CAP_MCS_MAP_M 0x3 /* mask for 1-stream */
/* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */
#define VHT_CAP_MCS_MAP_NONE_ALL 0xffff
+
+/* VHT rates bitmap */
+#define VHT_CAP_MCS_0_7_RATEMAP 0x00ff
+#define VHT_CAP_MCS_0_8_RATEMAP 0x01ff
+#define VHT_CAP_MCS_0_9_RATEMAP 0x03ff
+#define VHT_CAP_MCS_FULL_RATEMAP VHT_CAP_MCS_0_9_RATEMAP
+
+#define VHT_PROP_MCS_MAP_10_11 0
+#define VHT_PROP_MCS_MAP_UNUSED1 1
+#define VHT_PROP_MCS_MAP_UNUSED2 2
+#define VHT_PROP_MCS_MAP_NONE 3
+#define VHT_PROP_MCS_MAP_NONE_ALL 0xffff
+
+/* VHT prop rates bitmap */
+#define VHT_PROP_MCS_10_11_RATEMAP 0x0c00
+#define VHT_PROP_MCS_FULL_RATEMAP VHT_PROP_MCS_10_11_RATEMAP
+
+#if !defined(VHT_CAP_MCS_MAP_0_9_NSS3)
/* mcsmap with MCS0-9 for Nss = 3 */
#define VHT_CAP_MCS_MAP_0_9_NSS3 \
((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \
(VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \
(VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(3)))
+#endif /* !VHT_CAP_MCS_MAP_0_9_NSS3 */
#define VHT_CAP_MCS_MAP_NSS_MAX 8
/* Map the mcs code to mcs bit map */
#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \
- ((mcs_code == VHT_CAP_MCS_MAP_0_7) ? 0xff : \
- (mcs_code == VHT_CAP_MCS_MAP_0_8) ? 0x1ff : \
- (mcs_code == VHT_CAP_MCS_MAP_0_9) ? 0x3ff : 0)
+ ((mcs_code == VHT_CAP_MCS_MAP_0_7) ? VHT_CAP_MCS_0_7_RATEMAP : \
+ (mcs_code == VHT_CAP_MCS_MAP_0_8) ? VHT_CAP_MCS_0_8_RATEMAP : \
+ (mcs_code == VHT_CAP_MCS_MAP_0_9) ? VHT_CAP_MCS_0_9_RATEMAP : 0)
+
+#define VHT_PROP_MCS_CODE_TO_PROP_MCS_MAP(mcs_code) \
+ ((mcs_code == VHT_PROP_MCS_MAP_10_11) ? VHT_PROP_MCS_10_11_RATEMAP : 0)
/* Map the mcs bit map to mcs code */
#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \
- ((mcs_map == 0xff) ? VHT_CAP_MCS_MAP_0_7 : \
- (mcs_map == 0x1ff) ? VHT_CAP_MCS_MAP_0_8 : \
- (mcs_map == 0x3ff) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE)
+ ((mcs_map == VHT_CAP_MCS_0_7_RATEMAP) ? VHT_CAP_MCS_MAP_0_7 : \
+ (mcs_map == VHT_CAP_MCS_0_8_RATEMAP) ? VHT_CAP_MCS_MAP_0_8 : \
+ (mcs_map == VHT_CAP_MCS_0_9_RATEMAP) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE)
+
+#define VHT_PROP_MCS_MAP_TO_PROP_MCS_CODE(mcs_map) \
+ (((mcs_map & 0xc00) == 0xc00) ? VHT_PROP_MCS_MAP_10_11 : VHT_PROP_MCS_MAP_NONE)
/** VHT Capabilities Supported Channel Width */
typedef enum vht_cap_chan_width {
#define RSN_AKM_PSK 2 /* Pre-shared Key */
#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */
#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */
+/* RSN_AKM_MFP_1X and RSN_AKM_MFP_PSK are not used any more
+ * Just kept here to avoid build issue in BISON/CARIBOU branch
+ */
#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */
#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
-#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */
+#define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */
/* OSEN authenticated key managment suite */
#define OSEN_AKM_UNSPECIFIED RSN_AKM_UNSPECIFIED /* Over 802.1x */
#define WCN_OUI "\x00\x50\xf2" /* WCN OUI */
#define WCN_TYPE 4 /* WCN type */
-#ifdef BCMWAPI_WPI
-#define SMS4_KEY_LEN 16
-#define SMS4_WPI_CBC_MAC_LEN 16
-#endif
/* 802.11r protocol definitions */
} BWL_POST_PACKED_STRUCT;
typedef struct mmic_ie mmic_ie_t;
+/* 802.11r-2008, 11A.10.3 - RRB frame format */
+BWL_PRE_PACKED_STRUCT struct dot11_ft_rrb_frame {
+ uint8 frame_type; /* 1 for RRB */
+ uint8 packet_type; /* 0 for Request 1 for Response */
+ uint16 len;
+ uint8 cur_ap_addr[ETHER_ADDR_LEN];
+ uint8 data[1]; /* IEs Received/Sent in FT Action Req/Resp Frame */
+} BWL_POST_PACKED_STRUCT;
+
+typedef struct dot11_ft_rrb_frame dot11_ft_rrb_frame_t;
+
+#define DOT11_FT_RRB_FIXED_LEN 10
+#define DOT11_FT_REMOTE_FRAME_TYPE 1
+#define DOT11_FT_PACKET_REQ 0
+#define DOT11_FT_PACKET_RESP 1
+
#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00"
#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF"
-#ifdef BCMWAPI_WAI
-#define WAPI_IE_MIN_LEN 20 /* WAPI IE min length */
-#define WAPI_VERSION 1 /* WAPI version */
-#define WAPI_VERSION_LEN 2 /* WAPI version length */
-#define WAPI_OUI "\x00\x14\x72" /* WAPI OUI */
-#define WAPI_OUI_LEN DOT11_OUI_LEN /* WAPI OUI length */
-#endif /* BCMWAPI_WAI */
/* ************* WMM Parameter definitions. ************* */
#define WMM_OUI "\x00\x50\xF2" /* WNN OUI */
#define GAS_COMEBACK_REQUEST_ACTION_FRAME 12
#define GAS_COMEBACK_RESPONSE_ACTION_FRAME 13
+/* FTM - fine timing measurement public action frames */
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_req {
+ uint8 category; /* category of action frame (4) */
+ uint8 action; /* public action (32) */
+ uint8 trigger; /* trigger/continue? */
+ /* optional lci, civic loc, ftm params */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_req dot11_ftm_req_t;
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm {
+ uint8 category; /* category of action frame (4) */
+ uint8 action; /* public action (33) */
+ uint8 dialog; /* dialog token */
+ uint8 follow_up; /* follow up dialog token */
+ uint8 tod[6]; /* t1 - last depart timestamp */
+ uint8 toa[6]; /* t4 - last ack arrival timestamp */
+ uint8 tod_err[2]; /* t1 error */
+ uint8 toa_err[2]; /* t4 error */
+ /* optional lci report, civic loc report, ftm params */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm dot11_ftm_t;
+
+#define DOT11_FTM_ERR_NOT_CONT_OFFSET 0
+#define DOT11_FTM_ERR_NOT_CONT_MASK 0x0001
+#define DOT11_FTM_ERR_NOT_CONT_SHIFT 0
+#define DOT11_FTM_ERR_NOT_CONT(_err) (((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & \
+ DOT11_FTM_ERR_NOT_CONT_MASK) >> DOT11_FTM_ERR_NOT_CONT_SHIFT)
+#define DOT11_FTM_ERR_SET_NOT_CONT(_err, _val) do {\
+ uint8 _err2 = (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET]; \
+ _err2 &= ~DOT11_FTM_ERR_NOT_CONT_MASK; \
+ _err2 |= ((_val) << DOT11_FTM_ERR_NOT_CONT_SHIFT) & DOT11_FTM_ERR_NOT_CONT_MASK; \
+ (_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] = _err2; \
+} while (0)
+
+#define DOT11_FTM_ERR_MAX_ERR_OFFSET 0
+#define DOT11_FTM_ERR_MAX_ERR_MASK 0xfff7
+#define DOT11_FTM_ERR_MAX_ERR_SHIFT 1
+#define DOT11_FTM_ERR_MAX_ERR(_err) ((((_err)[1] << 7) | (_err)[0]) >> 1)
+#define DOT11_FTM_ERR_SET_MAX_ERR(_err, _val) do {\
+ uint16 _val2; \
+ _val2 = (((_val) << DOT11_FTM_ERR_MAX_ERR_SHIFT) |\
+ ((_err)[DOT11_FTM_ERR_NOT_CONT_OFFSET] & DOT11_FTM_ERR_NOT_CONT_MASK)); \
+ (_err)[0] = _val2 & 0xff; \
+ (_err)[1] = _val2 >> 8 & 0xff; \
+} while (0)
+
+BWL_PRE_PACKED_STRUCT struct dot11_ftm_params {
+ uint8 id; /* DOT11_MNG_FTM_PARAM_ID 8.4.2.166 11mcd2.6/2014 - revisit */
+ uint8 len;
+ uint8 info[9];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_ftm_params dot11_ftm_params_t;
+#define DOT11_FTM_PARAMS_IE_LEN (sizeof(dot11_ftm_params_t) - 2)
+
+#define FTM_PARAMS_FIELD(_p, _off, _mask, _shift) (((_p)->info[(_off)] & (_mask)) >> (_shift))
+#define FTM_PARAMS_SET_FIELD(_p, _off, _mask, _shift, _val) do {\
+ uint8 _ptmp = (_p)->info[_off] & ~(_mask); \
+ (_p)->info[(_off)] = _ptmp | (((_val) << (_shift)) & (_mask)); \
+} while (0)
+
+#define FTM_PARAMS_STATUS_OFFSET 0
+#define FTM_PARAMS_STATUS_MASK 0x03
+#define FTM_PARAMS_STATUS_SHIFT 0
+#define FTM_PARAMS_STATUS(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_STATUS_OFFSET, \
+ FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT)
+#define FTM_PARAMS_SET_STATUS(_p, _status) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_STATUS_OFFSET, FTM_PARAMS_STATUS_MASK, FTM_PARAMS_STATUS_SHIFT, _status)
+
+#define FTM_PARAMS_VALUE_OFFSET 0
+#define FTM_PARAMS_VALUE_MASK 0x7c
+#define FTM_PARAMS_VALUE_SHIFT 2
+#define FTM_PARAMS_VALUE(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_VALUE_OFFSET, \
+ FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT)
+#define FTM_PARAMS_SET_VALUE(_p, _value) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_VALUE_OFFSET, FTM_PARAMS_VALUE_MASK, FTM_PARAMS_VALUE_SHIFT, _value)
+#define FTM_PARAMS_MAX_VALUE 32
+
+#define FTM_PARAMS_NBURSTEXP_OFFSET 1
+#define FTM_PARAMS_NBURSTEXP_MASK 0x0f
+#define FTM_PARAMS_NBURSTEXP_SHIFT 0
+#define FTM_PARAMS_NBURSTEXP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_NBURSTEXP_OFFSET, \
+ FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT)
+#define FTM_PARAMS_SET_NBURSTEXP(_p, _bexp) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_NBURSTEXP_OFFSET, FTM_PARAMS_NBURSTEXP_MASK, FTM_PARAMS_NBURSTEXP_SHIFT, \
+ _bexp)
+
+#define FTM_PARAMS_NBURST(_p) (1 << FTM_PARAMS_NBURSTEXP(_p))
+
+enum {
+ FTM_PARAMS_BURSTTMO_NOPREF = 15
+};
+
+#define FTM_PARAMS_BURSTTMO_OFFSET 1
+#define FTM_PARAMS_BURSTTMO_MASK 0xf0
+#define FTM_PARAMS_BURSTTMO_SHIFT 4
+#define FTM_PARAMS_BURSTTMO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_BURSTTMO_OFFSET, \
+ FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT)
+/* set timeout in params using _tmo where timeout = 2^(_tmo) * 250us */
+#define FTM_PARAMS_SET_BURSTTMO(_p, _tmo) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_BURSTTMO_OFFSET, FTM_PARAMS_BURSTTMO_MASK, FTM_PARAMS_BURSTTMO_SHIFT, (_tmo)+2)
+
+#define FTM_PARAMS_BURSTTMO_USEC(_val) ((1 << ((_val)-2)) * 250)
+#define FTM_PARAMS_BURSTTMO_VALID(_val) ((((_val) < 12 && (_val) > 1)) || \
+ (_val) == FTM_PARAMS_BURSTTMO_NOPREF)
+#define FTM_PARAMS_BURSTTMO_MAX_MSEC 128 /* 2^9 * 250us */
+#define FTM_PARAMS_BURSTTMO_MAX_USEC 128000 /* 2^9 * 250us */
+
+#define FTM_PARAMS_MINDELTA_OFFSET 2
+#define FTM_PARAMS_MINDELTA_USEC(_p) ((_p)->info[FTM_PARAMS_MINDELTA_OFFSET] * 100)
+#define FTM_PARAMS_SET_MINDELTA_USEC(_p, _delta) do { \
+ (_p)->info[FTM_PARAMS_MINDELTA_OFFSET] = (_delta) / 100; \
+} while (0)
+
+#define FTM_PARAMS_PARTIAL_TSF(_p) ((_p)->info[4] << 8 | (_p)->info[3])
+#define FTM_PARAMS_SET_PARTIAL_TSF(_p, _partial_tsf) do { \
+ (_p)->info[3] = (_partial_tsf) & 0xff; \
+ (_p)->info[4] = ((_partial_tsf) >> 8) & 0xff; \
+} while (0)
+
+#define FTM_PARAMS_PARTIAL_TSF_MASK 0x0000000003fffc00ULL
+#define FTM_PARAMS_PARTIAL_TSF_SHIFT 10
+#define FTM_PARAMS_PARTIAL_TSF_BIT_LEN 16
+#define FTM_PARAMS_PARTIAL_TSF_MAX 0xffff
+
+#define FTM_PARAMS_ASAP_OFFSET 5
+#define FTM_PARAMS_ASAP_MASK 0x4
+#define FTM_PARAMS_ASAP_SHIFT 2
+#define FTM_PARAMS_ASAP(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_ASAP_OFFSET, \
+ FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT)
+#define FTM_PARAMS_SET_ASAP(_p, _asap) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_ASAP_OFFSET, FTM_PARAMS_ASAP_MASK, FTM_PARAMS_ASAP_SHIFT, _asap)
+
+#define FTM_PARAMS_FTM1_OFFSET 5
+#define FTM_PARAMS_FTM1_MASK 0x02
+#define FTM_PARAMS_FTM1_SHIFT 1
+#define FTM_PARAMS_FTM1(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTM1_OFFSET, \
+ FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT)
+#define FTM_PARAMS_SET_FTM1(_p, _ftm1) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_FTM1_OFFSET, FTM_PARAMS_FTM1_MASK, FTM_PARAMS_FTM1_SHIFT, _ftm1)
+
+#define FTM_PARAMS_FTMS_PER_BURST_OFFSET 5
+#define FTM_PARAMS_FTMS_PER_BURST_MASK 0xf8
+#define FTM_PARAMS_FTMS_PER_BURST_SHIFT 3
+#define FTM_PARAMS_FTMS_PER_BURST(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_FTMS_PER_BURST_OFFSET, \
+ FTM_PARAMS_FTMS_PER_BURST_MASK, FTM_PARAMS_FTMS_PER_BURST_SHIFT)
+#define FTM_PARAMS_SET_FTMS_PER_BURST(_p, _nftms) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_FTMS_PER_BURST_OFFSET, FTM_PARAMS_FTMS_PER_BURST_MASK, \
+ FTM_PARAMS_FTMS_PER_BURST_SHIFT, _nftms)
+
+#define FTM_PARAMS_CHAN_INFO_OFFSET 6
+#define FTM_PARAMS_CHAN_INFO_MASK 0xfc
+#define FTM_PARAMS_CHAN_INFO_SHIFT 2
+#define FTM_PARAMS_CHAN_INFO(_p) FTM_PARAMS_FIELD(_p, FTM_PARAMS_CHAN_INFO_OFFSET, \
+ FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT)
+#define FTM_PARAMS_SET_CHAN_INFO(_p, _ci) FTM_PARAMS_SET_FIELD(_p, \
+ FTM_PARAMS_CHAN_INFO_OFFSET, FTM_PARAMS_CHAN_INFO_MASK, FTM_PARAMS_CHAN_INFO_SHIFT, _ci)
+
+/* burst period - units of 100ms */
+#define FTM_PARAMS_BURST_PERIOD(_p) (((_p)->info[8] << 8) | (_p)->info[7])
+#define FTM_PARAMS_SET_BURST_PERIOD(_p, _bp) do {\
+ (_p)->info[7] = (_bp) & 0xff; \
+ (_p)->info[8] = ((_bp) >> 8) & 0xff; \
+} while (0)
+
+#define FTM_PARAMS_BURST_PERIOD_MS(_p) (FTM_PARAMS_BURST_PERIOD(_p) * 100)
+
+/* FTM status values - last updated from 11mcD4.0 */
+enum {
+ FTM_PARAMS_STATUS_RESERVED = 0,
+ FTM_PARAMS_STATUS_SUCCESSFUL = 1,
+ FTM_PARAMS_STATUS_INCAPABLE = 2,
+ FTM_PARAMS_STATUS_FAILED = 3,
+ /* Below are obsolte */
+ FTM_PARAMS_STATUS_OVERRIDDEN = 4,
+ FTM_PARAMS_STATUS_ASAP_INCAPABLE = 5,
+ FTM_PARAMS_STATUS_ASAP_FAILED = 6,
+ /* rest are reserved */
+};
+
+enum {
+ FTM_PARAMS_CHAN_INFO_NO_PREF = 0,
+ FTM_PARAMS_CHAN_INFO_RESERVE1 = 1,
+ FTM_PARAMS_CHAN_INFO_RESERVE2 = 2,
+ FTM_PARAMS_CHAN_INFO_RESERVE3 = 3,
+ FTM_PARAMS_CHAN_INFO_NON_HT_5 = 4,
+ FTM_PARAMS_CHAN_INFO_RESERVE5 = 5,
+ FTM_PARAMS_CHAN_INFO_NON_HT_10 = 6,
+ FTM_PARAMS_CHAN_INFO_RESERVE7 = 7,
+ FTM_PARAMS_CHAN_INFO_NON_HT_20 = 8, /* excludes 2.4G, and High rate DSSS */
+ FTM_PARAMS_CHAN_INFO_HT_MF_20 = 9,
+ FTM_PARAMS_CHAN_INFO_VHT_20 = 10,
+ FTM_PARAMS_CHAN_INFO_HT_MF_40 = 11,
+ FTM_PARAMS_CHAN_INFO_VHT_40 = 12,
+ FTM_PARAMS_CHAN_INFO_VHT_80 = 13,
+ FTM_PARAMS_CHAN_INFO_VHT_80_80 = 14,
+ FTM_PARAMS_CHAN_INFO_VHT_160_2_RFLOS = 15,
+ FTM_PARAMS_CHAN_INFO_VHT_160 = 16,
+ /* Reserved from 17 - 30 */
+ FTM_PARAMS_CHAN_INFO_DMG_2160 = 31,
+ /* Reserved from 32 - 63 */
+ FTM_PARAMS_CHAN_INFO_MAX = 63
+};
+
/* 802.11u interworking access network options */
-#define IW_ANT_MASK 0x0f
-#define IW_INTERNET_MASK 0x10
-#define IW_ASRA_MASK 0x20
-#define IW_ESR_MASK 0x40
-#define IW_UESA_MASK 0x80
+#define IW_ANT_MASK 0x0f
+#define IW_INTERNET_MASK 0x10
+#define IW_ASRA_MASK 0x20
+#define IW_ESR_MASK 0x40
+#define IW_UESA_MASK 0x80
/* 802.11u interworking access network type */
-#define IW_ANT_PRIVATE_NETWORK 0
+#define IW_ANT_PRIVATE_NETWORK 0
#define IW_ANT_PRIVATE_NETWORK_WITH_GUEST 1
#define IW_ANT_CHARGEABLE_PUBLIC_NETWORK 2
-#define IW_ANT_FREE_PUBLIC_NETWORK 3
+#define IW_ANT_FREE_PUBLIC_NETWORK 3
#define IW_ANT_PERSONAL_DEVICE_NETWORK 4
#define IW_ANT_EMERGENCY_SERVICES_NETWORK 5
-#define IW_ANT_TEST_NETWORK 14
-#define IW_ANT_WILDCARD_NETWORK 15
+#define IW_ANT_TEST_NETWORK 14
+#define IW_ANT_WILDCARD_NETWORK 15
/* 802.11u advertisement protocol */
-#define ADVP_ANQP_PROTOCOL_ID 0
+#define ADVP_ANQP_PROTOCOL_ID 0
+#define ADVP_MIH_PROTOCOL_ID 1
/* 802.11u advertisement protocol masks */
#define ADVP_QRL_MASK 0x7f
/* 802.11u advertisement protocol values */
#define ADVP_QRL_REQUEST 0x00
#define ADVP_QRL_RESPONSE 0x7f
-#define ADVP_PAME_BI_DEPENDENT 0x00
-#define ADVP_PAME_BI_INDEPENDENT ADVP_PAME_BI_MASK
+#define ADVP_PAME_BI_DEPENDENT 0x00
+#define ADVP_PAME_BI_INDEPENDENT ADVP_PAME_BI_MASK
/* 802.11u ANQP information ID */
-#define ANQP_ID_QUERY_LIST 256
-#define ANQP_ID_CAPABILITY_LIST 257
-#define ANQP_ID_VENUE_NAME_INFO 258
-#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO 259
+#define ANQP_ID_QUERY_LIST 256
+#define ANQP_ID_CAPABILITY_LIST 257
+#define ANQP_ID_VENUE_NAME_INFO 258
+#define ANQP_ID_EMERGENCY_CALL_NUMBER_INFO 259
#define ANQP_ID_NETWORK_AUTHENTICATION_TYPE_INFO 260
-#define ANQP_ID_ROAMING_CONSORTIUM_LIST 261
+#define ANQP_ID_ROAMING_CONSORTIUM_LIST 261
#define ANQP_ID_IP_ADDRESS_TYPE_AVAILABILITY_INFO 262
-#define ANQP_ID_NAI_REALM_LIST 263
-#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO 264
-#define ANQP_ID_AP_GEOSPATIAL_LOCATION 265
-#define ANQP_ID_AP_CIVIC_LOCATION 266
-#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI 267
-#define ANQP_ID_DOMAIN_NAME_LIST 268
-#define ANQP_ID_EMERGENCY_ALERT_ID_URI 269
-#define ANQP_ID_EMERGENCY_NAI 271
-#define ANQP_ID_VENDOR_SPECIFIC_LIST 56797
+#define ANQP_ID_NAI_REALM_LIST 263
+#define ANQP_ID_G3PP_CELLULAR_NETWORK_INFO 264
+#define ANQP_ID_AP_GEOSPATIAL_LOCATION 265
+#define ANQP_ID_AP_CIVIC_LOCATION 266
+#define ANQP_ID_AP_LOCATION_PUBLIC_ID_URI 267
+#define ANQP_ID_DOMAIN_NAME_LIST 268
+#define ANQP_ID_EMERGENCY_ALERT_ID_URI 269
+#define ANQP_ID_EMERGENCY_NAI 271
+#define ANQP_ID_VENDOR_SPECIFIC_LIST 56797
/* 802.11u ANQP OUI */
-#define ANQP_OUI_SUBTYPE 9
+#define ANQP_OUI_SUBTYPE 9
/* 802.11u venue name */
-#define VENUE_LANGUAGE_CODE_SIZE 3
+#define VENUE_LANGUAGE_CODE_SIZE 3
#define VENUE_NAME_SIZE 255
/* 802.11u venue groups */
#define VENUE_OUTDOOR 11
/* 802.11u network authentication type indicator */
-#define NATI_UNSPECIFIED -1
-#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS 0
-#define NATI_ONLINE_ENROLLMENT_SUPPORTED 1
-#define NATI_HTTP_HTTPS_REDIRECTION 2
-#define NATI_DNS_REDIRECTION 3
+#define NATI_UNSPECIFIED -1
+#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS 0
+#define NATI_ONLINE_ENROLLMENT_SUPPORTED 1
+#define NATI_HTTP_HTTPS_REDIRECTION 2
+#define NATI_DNS_REDIRECTION 3
/* 802.11u IP address type availability - IPv6 */
-#define IPA_IPV6_SHIFT 0
-#define IPA_IPV6_MASK (0x03 << IPA_IPV6_SHIFT)
+#define IPA_IPV6_SHIFT 0
+#define IPA_IPV6_MASK (0x03 << IPA_IPV6_SHIFT)
#define IPA_IPV6_NOT_AVAILABLE 0x00
-#define IPA_IPV6_AVAILABLE 0x01
-#define IPA_IPV6_UNKNOWN_AVAILABILITY 0x02
+#define IPA_IPV6_AVAILABLE 0x01
+#define IPA_IPV6_UNKNOWN_AVAILABILITY 0x02
/* 802.11u IP address type availability - IPv4 */
-#define IPA_IPV4_SHIFT 2
-#define IPA_IPV4_MASK (0x3f << IPA_IPV4_SHIFT)
+#define IPA_IPV4_SHIFT 2
+#define IPA_IPV4_MASK (0x3f << IPA_IPV4_SHIFT)
#define IPA_IPV4_NOT_AVAILABLE 0x00
-#define IPA_IPV4_PUBLIC 0x01
+#define IPA_IPV4_PUBLIC 0x01
#define IPA_IPV4_PORT_RESTRICT 0x02
-#define IPA_IPV4_SINGLE_NAT 0x03
-#define IPA_IPV4_DOUBLE_NAT 0x04
-#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT 0x05
-#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT 0x06
-#define IPA_IPV4_UNKNOWN_AVAILABILITY 0x07
+#define IPA_IPV4_SINGLE_NAT 0x03
+#define IPA_IPV4_DOUBLE_NAT 0x04
+#define IPA_IPV4_PORT_RESTRICT_SINGLE_NAT 0x05
+#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT 0x06
+#define IPA_IPV4_UNKNOWN_AVAILABILITY 0x07
/* 802.11u NAI realm encoding */
-#define REALM_ENCODING_RFC4282 0
-#define REALM_ENCODING_UTF8 1
+#define REALM_ENCODING_RFC4282 0
+#define REALM_ENCODING_UTF8 1
/* 802.11u IANA EAP method type numbers */
#define REALM_EAP_TLS 13
#define REALM_EAP_EXPANDED 254
/* 802.11u authentication ID */
-#define REALM_EXPANDED_EAP 1
+#define REALM_EXPANDED_EAP 1
#define REALM_NON_EAP_INNER_AUTHENTICATION 2
#define REALM_INNER_AUTHENTICATION_EAP 3
-#define REALM_EXPANDED_INNER_EAP 4
-#define REALM_CREDENTIAL 5
+#define REALM_EXPANDED_INNER_EAP 4
+#define REALM_CREDENTIAL 5
#define REALM_TUNNELED_EAP_CREDENTIAL 6
-#define REALM_VENDOR_SPECIFIC_EAP 221
+#define REALM_VENDOR_SPECIFIC_EAP 221
/* 802.11u non-EAP inner authentication type */
-#define REALM_RESERVED_AUTH 0
+#define REALM_RESERVED_AUTH 0
#define REALM_PAP 1
#define REALM_CHAP 2
-#define REALM_MSCHAP 3
-#define REALM_MSCHAPV2 4
+#define REALM_MSCHAP 3
+#define REALM_MSCHAPV2 4
/* 802.11u credential type */
#define REALM_SIM 1
#define REALM_USIM 2
#define REALM_NFC 3
-#define REALM_HARDWARE_TOKEN 4
-#define REALM_SOFTOKEN 5
-#define REALM_CERTIFICATE 6
-#define REALM_USERNAME_PASSWORD 7
-#define REALM_SERVER_SIDE 8
-#define REALM_RESERVED_CRED 9
-#define REALM_VENDOR_SPECIFIC_CRED 10
+#define REALM_HARDWARE_TOKEN 4
+#define REALM_SOFTOKEN 5
+#define REALM_CERTIFICATE 6
+#define REALM_USERNAME_PASSWORD 7
+#define REALM_SERVER_SIDE 8
+#define REALM_RESERVED_CRED 9
+#define REALM_VENDOR_SPECIFIC_CRED 10
/* 802.11u 3GPP PLMN */
-#define G3PP_GUD_VERSION 0
-#define G3PP_PLMN_LIST_IE 0
+#define G3PP_GUD_VERSION 0
+#define G3PP_PLMN_LIST_IE 0
/** hotspot2.0 indication element (vendor specific) */
BWL_PRE_PACKED_STRUCT struct hs20_ie {
/** IEEE 802.11 Annex E */
typedef enum {
- DOT11_2GHZ_20MHZ_CLASS_12 = 81, /* Ch 1-11 */
- DOT11_5GHZ_20MHZ_CLASS_1 = 115, /* Ch 36-48 */
- DOT11_5GHZ_20MHZ_CLASS_2_DFS = 118, /* Ch 52-64 */
- DOT11_5GHZ_20MHZ_CLASS_3 = 124, /* Ch 149-161 */
- DOT11_5GHZ_20MHZ_CLASS_4_DFS = 121, /* Ch 100-140 */
- DOT11_5GHZ_20MHZ_CLASS_5 = 125, /* Ch 149-165 */
- DOT11_5GHZ_40MHZ_CLASS_22 = 116, /* Ch 36-44, lower */
+ DOT11_2GHZ_20MHZ_CLASS_12 = 81, /* Ch 1-11 */
+ DOT11_5GHZ_20MHZ_CLASS_1 = 115, /* Ch 36-48 */
+ DOT11_5GHZ_20MHZ_CLASS_2_DFS = 118, /* Ch 52-64 */
+ DOT11_5GHZ_20MHZ_CLASS_3 = 124, /* Ch 149-161 */
+ DOT11_5GHZ_20MHZ_CLASS_4_DFS = 121, /* Ch 100-140 */
+ DOT11_5GHZ_20MHZ_CLASS_5 = 125, /* Ch 149-165 */
+ DOT11_5GHZ_40MHZ_CLASS_22 = 116, /* Ch 36-44, lower */
DOT11_5GHZ_40MHZ_CLASS_23_DFS = 119, /* Ch 52-60, lower */
DOT11_5GHZ_40MHZ_CLASS_24_DFS = 122, /* Ch 100-132, lower */
- DOT11_5GHZ_40MHZ_CLASS_25 = 126, /* Ch 149-157, lower */
- DOT11_5GHZ_40MHZ_CLASS_27 = 117, /* Ch 40-48, upper */
+ DOT11_5GHZ_40MHZ_CLASS_25 = 126, /* Ch 149-157, lower */
+ DOT11_5GHZ_40MHZ_CLASS_27 = 117, /* Ch 40-48, upper */
DOT11_5GHZ_40MHZ_CLASS_28_DFS = 120, /* Ch 56-64, upper */
DOT11_5GHZ_40MHZ_CLASS_29_DFS = 123, /* Ch 104-136, upper */
- DOT11_5GHZ_40MHZ_CLASS_30 = 127, /* Ch 153-161, upper */
- DOT11_2GHZ_40MHZ_CLASS_32 = 83, /* Ch 1-7, lower */
- DOT11_2GHZ_40MHZ_CLASS_33 = 84, /* Ch 5-11, upper */
+ DOT11_5GHZ_40MHZ_CLASS_30 = 127, /* Ch 153-161, upper */
+ DOT11_2GHZ_40MHZ_CLASS_32 = 83, /* Ch 1-7, lower */
+ DOT11_2GHZ_40MHZ_CLASS_33 = 84, /* Ch 5-11, upper */
} dot11_op_class_t;
/* QoS map */
/*
* BT-AMP (BlueTooth Alternate Mac and Phy) 802.11 PAL (Protocol Adaptation Layer)
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: 802.11_bta.h 382882 2013-02-04 23:24:31Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: 802.11_bta.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _802_11_BTA_H_
/*
* 802.11e protocol header file
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: 802.11e.h 382883 2013-02-04 23:26:09Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: 802.11e.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _802_11e_H_
#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */
#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */
#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */
-#ifdef BCMCCX
-#define CCX_STATUS_ASSOC_DENIED_UNKNOWN 0xc8 /* unspecified QoS related failure */
-#define CCX_STATUS_ASSOC_DENIED_AP_POLICY 0xc9 /* TSPEC refused due to AP policy */
-#define CCX_STATUS_ASSOC_DENIED_NO_BW 0xca /* Assoc denied due to AP insufficient BW */
-#define CCX_STATUS_ASSOC_DENIED_BAD_PARAM 0xcb /* one or more TSPEC with invalid parameter */
-#endif /* BCMCCX */
/* 802.11e DELTS status code */
#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */
/*
- * $Copyright Open Broadcom Corporation$
- *
* Fundamental types and constants relating to 802.1D
*
- * $Id: 802.1d.h 382882 2013-02-04 23:24:31Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: 802.1d.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _802_1_D_
/*
- * $Copyright Open Broadcom Corporation$
- *
* Fundamental constants relating to 802.3
*
- * $Id: 802.3.h 417943 2013-08-13 07:54:04Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: 802.3.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _802_3_h_
/*
- * Copyright (C) 2014, Broadcom Corporation
+ * Fundamental constants relating to DHCP Protocol
+ *
+ * Copyright (C) 2016, Broadcom Corporation
* All Rights Reserved.
*
* This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
* or duplicated in any form, in whole or in part, without the prior
* written permission of Broadcom Corporation.
*
- * Fundamental constants relating to DHCP Protocol
*
- * $Id: bcmdhcp.h 382883 2013-02-04 23:26:09Z $
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ *
+ * $Id: bcmdhcp.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _bcmdhcp_h_
/*
* Broadcom Ethernettype protocol definitions
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmeth.h 445746 2013-12-30 12:57:26Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmeth.h 518342 2014-12-01 23:21:41Z $
*/
/*
/*
* Broadcom Event protocol definitions
*
- * $Copyright Open Broadcom Corporation$
- *
* Dependencies: proto/bcmeth.h
*
- * $Id: bcmevent.h 505096 2014-09-26 12:49:04Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmevent.h 555154 2015-05-07 20:46:07Z $
*
*/
#define WLC_E_IBSS_ASSOC 39
#define WLC_E_RADIO 40
#define WLC_E_PSM_WATCHDOG 41 /* PSM microcode watchdog fired */
-#if defined(BCMCCX) && defined(CCX_SDK)
-#define WLC_E_CCX_ASSOC_START 42 /* CCX association start */
-#define WLC_E_CCX_ASSOC_ABORT 43 /* CCX association abort */
-#endif /* BCMCCX && CCX_SDK */
#define WLC_E_PROBREQ_MSG 44 /* probe request received */
#define WLC_E_SCAN_CONFIRM_IND 45
#define WLC_E_PSK_SUP 46 /* WPA Handshake fail */
#define WLC_E_UNICAST_DECODE_ERROR 50 /* Unsupported unicast encrypted frame */
#define WLC_E_MULTICAST_DECODE_ERROR 51 /* Unsupported multicast encrypted frame */
#define WLC_E_TRACE 52
-#ifdef WLBTAMP
-#define WLC_E_BTA_HCI_EVENT 53 /* BT-AMP HCI event */
-#endif
#define WLC_E_IF 54 /* I/F change (for dongle host notification) */
#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55 /* listen state expires */
#define WLC_E_RSSI 56 /* indicate RSSI change based on configured levels */
-#define WLC_E_PFN_SCAN_COMPLETE 57 /* PFN completed scan of network list */
-/* PFN best network batching event, re-use obsolete WLC_E_PFN_SCAN_COMPLETE */
-#define WLC_E_PFN_BEST_BATCHING 57
+#define WLC_E_PFN_BEST_BATCHING 57 /* PFN best network batching event */
#define WLC_E_EXTLOG_MSG 58
#define WLC_E_ACTION_FRAME 59 /* Action frame Rx */
#define WLC_E_ACTION_FRAME_COMPLETE 60 /* Action frame Tx complete */
#define WLC_E_IBSS_COALESCE 110 /* IBSS Coalescing */
#define WLC_E_AIBSS_TXFAIL 110 /* TXFAIL event for AIBSS, re using event 110 */
#define WLC_E_BSS_LOAD 114 /* Inform host of beacon bss load */
+#define WLC_E_MSCH 120 /* Multiple channel scheduler event */
#define WLC_E_CSA_START_IND 121
#define WLC_E_CSA_DONE_IND 122
#define WLC_E_CSA_FAILURE_IND 123
#define WLC_E_RSSI_LQM 133 /* Enhancement addition for WLC_E_RSSI */
#define WLC_E_PFN_GSCAN_FULL_RESULT 134 /* Full probe/beacon (IEs etc) results */
#define WLC_E_PFN_SWC 135 /* Significant change in rssi of bssids being tracked */
-#define WLC_E_RMC_EVENT 139 /* RMC event */
-#define WLC_E_LAST 140 /* highest val + 1 for range checking */
-
-#if (WLC_E_LAST > 140)
-#error "WLC_E_LAST: Invalid value for last event; must be <= 140."
+#define WLC_E_AUTHORIZED 136 /* a STA been authroized for traffic */
+#define WLC_E_PROBREQ_MSG_RX 137 /* probe req with wl_event_rx_frame_data_t header */
+#define WLC_E_PFN_SCAN_COMPLETE 138 /* PFN completed scan of network list */
+#define WLC_E_RMC_EVENT 139 /* RMC Event */
+#define WLC_E_DPSTA_INTF_IND 140 /* DPSTA interface indication */
+#define WLC_E_RRM 141 /* RRM Event */
+#define WLC_E_PFN_SSID_EXT 142 /* SSID EXT event */
+#define WLC_E_ROAM_EXP_EVENT 143 /* Expanded roam event */
+#define WLC_E_LAST 144 /* highest val + 1 for range checking */
+#if (WLC_E_LAST > 144)
+#error "WLC_E_LAST: Invalid value for last event; must be <= 141."
#endif /* WLC_E_LAST */
/* define an API for getting the string name of an event */
extern const char *bcmevent_get_name(uint event_type);
+extern void wl_event_to_host_order(wl_event_msg_t * evt);
+extern void wl_event_to_network_order(wl_event_msg_t * evt);
+/* conversion between host and network order for events */
+void wl_event_to_host_order(wl_event_msg_t * evt);
+void wl_event_to_network_order(wl_event_msg_t * evt);
/* Event status codes */
#define WLC_E_STATUS_11HQUIET 11 /* 802.11h quiet period started */
#define WLC_E_STATUS_SUPPRESS 12 /* user disabled scanning (WLC_SET_SCANSUPPRESS) */
#define WLC_E_STATUS_NOCHANS 13 /* no allowable channels to scan */
-#ifdef BCMCCX
-#define WLC_E_STATUS_CCXFASTRM 14 /* scan aborted due to CCX fast roam */
-#endif /* BCMCCX */
#define WLC_E_STATUS_CS_ABORT 15 /* abort channel select */
#define WLC_E_STATUS_ERROR 16 /* request failed due to error */
#define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */
-
/* roam reason codes */
#define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */
#define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */
#define WLC_E_RSN_MISMATCH 8 /* STA does not support AP's RSN */
#define WLC_E_PRUNE_NO_COMMON_RATES 9 /* No rates in common with AP */
#define WLC_E_PRUNE_BASIC_RATES 10 /* STA does not support all basic rates of BSS */
-#ifdef BCMCCX
-#define WLC_E_PRUNE_CCXFAST_PREVAP 11 /* CCX FAST ROAM: prune previous AP */
-#endif /* def BCMCCX */
#define WLC_E_PRUNE_CIPHER_NA 12 /* BSS's cipher not supported */
#define WLC_E_PRUNE_KNOWN_STA 13 /* AP is already known to us as a STA */
-#ifdef BCMCCX
-#define WLC_E_PRUNE_CCXFAST_DROAM 14 /* CCX FAST ROAM: prune unqualified AP */
-#endif /* def BCMCCX */
#define WLC_E_PRUNE_WDS_PEER 15 /* AP is already known to us as a WDS peer */
#define WLC_E_PRUNE_QBSS_LOAD 16 /* QBSS LOAD - AAC is too low */
#define WLC_E_PRUNE_HOME_AP 17 /* prune home AP */
-#ifdef BCMCCX
-#define WLC_E_PRUNE_AP_BLOCKED 18 /* prune blocked AP */
-#define WLC_E_PRUNE_NO_DIAG_SUPPORT 19 /* prune due to diagnostic mode not supported */
-#endif /* BCMCCX */
+#define WLC_E_PRUNE_AUTH_RESP_MAC 20 /* suppress auth resp by MAC filter */
/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */
#define WLC_E_SUP_OTHER 0 /* Other reason */
#define WLC_E_IF_ROLE_WDS 2 /* WDS link */
#define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */
#define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */
-#ifdef WLBTAMP
-#define WLC_E_IF_ROLE_BTA_CREATOR 5 /* BT-AMP Creator */
-#define WLC_E_IF_ROLE_BTA_ACCEPTOR 6 /* BT-AMP Acceptor */
-#endif
+#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */
/* WLC_E_RSSI event data */
typedef struct wl_event_data_rssi {
#define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */
/* Reason codes for LINK */
-#define WLC_E_LINK_BCN_LOSS 1 /* Link down because of beacon loss */
-#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */
-#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */
-#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */
+#define WLC_E_LINK_BCN_LOSS 1 /* Link down because of beacon loss */
+#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */
+#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */
+#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */
+
+
+/* WLC_E_NDIS_LINK event data */
+typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms {
+ struct ether_addr peer_mac; /* 6 bytes */
+ uint16 chanspec; /* 2 bytes */
+ uint32 link_speed; /* current datarate in units of 500 Kbit/s */
+ uint32 max_link_speed; /* max possible datarate for link in units of 500 Kbit/s */
+ int32 rssi; /* average rssi */
+} BWL_POST_PACKED_STRUCT ndis_link_parms_t;
/* reason codes for WLC_E_OVERLAY_REQ event */
#define WLC_E_OVL_DOWNLOAD 0 /* overlay download request */
#define TDLS_AF_CATEGORY 12
/* Wi-Fi Display (WFD) Vendor Specific Category */
/* used for WFD Tunneled Probe Request and Response */
-#define TDLS_VENDOR_SPECIFIC 127
+#define TDLS_VENDOR_SPECIFIC 127
/* TDLS Action Field Values */
-#define TDLS_ACTION_SETUP_REQ 0
-#define TDLS_ACTION_SETUP_RESP 1
-#define TDLS_ACTION_SETUP_CONFIRM 2
-#define TDLS_ACTION_TEARDOWN 3
-#define WLAN_TDLS_SET_PROBE_WFD_IE 11
-#define WLAN_TDLS_SET_SETUP_WFD_IE 12
+#define TDLS_ACTION_SETUP_REQ 0
+#define TDLS_ACTION_SETUP_RESP 1
+#define TDLS_ACTION_SETUP_CONFIRM 2
+#define TDLS_ACTION_TEARDOWN 3
+#define WLAN_TDLS_SET_PROBE_WFD_IE 11
+#define WLAN_TDLS_SET_SETUP_WFD_IE 12
+#define WLAN_TDLS_SET_WFD_ENABLED 13
+#define WLAN_TDLS_SET_WFD_DISABLED 14
#endif
wl_sd_tlv_t tlv[1]; /* service discovery TLV */
} BWL_POST_PACKED_STRUCT wl_event_sd_t;
+/* Note: proxd has a new API (ver 3.0) deprecates the following */
+
/* Reason codes for WLC_E_PROXD */
#define WLC_E_PROXD_FOUND 1 /* Found a proximity device */
#define WLC_E_PROXD_GONE 2 /* Lost a proximity device */
#define WLC_E_PROXD_COLLECT_COMPLETED 9 /* used by: initiator completed */
#define WLC_E_PROXD_COLLECT_ERROR 10 /* used by both initiator and target */
#define WLC_E_PROXD_NAN_EVENT 11 /* used by both initiator and target */
+#define WLC_E_PROXD_TS_RESULTS 12 /* used by: initiator completed */
/* proxd_event data */
typedef struct ftm_sample {
int8 rssi; /* RSSI */
} ftm_sample_t;
+typedef struct ts_sample {
+ uint32 t1;
+ uint32 t2;
+ uint32 t3;
+ uint32 t4;
+} ts_sample_t;
+
typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data {
uint16 ver; /* version */
uint16 mode; /* mode: target/initiator */
uint32 modertt; /* Mode delta */
uint32 medianrtt; /* median RTT */
uint32 sdrtt; /* Standard deviation of RTT */
- int gdcalcresult; /* Software or Hardware Kind of redundant, but if */
+ int32 gdcalcresult; /* Software or Hardware Kind of redundant, but if */
/* frame type is VHT, then we should do it by hardware */
int16 avg_rssi; /* avg rssi accroos the ftm frames */
int16 validfrmcnt; /* Firmware's valid frame counts */
- char *peer_router_info; /* Peer router information if available in TLV, */
+ int32 peer_router_info; /* Peer router information if available in TLV, */
/* We will add this field later */
int32 var1; /* average of group delay */
int32 var2; /* average of threshold crossing */
ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */
} BWL_POST_PACKED_STRUCT wl_proxd_event_data_t;
+typedef BWL_PRE_PACKED_STRUCT struct proxd_event_ts_results {
+ uint16 ver; /* version */
+ uint16 mode; /* mode: target/initiator */
+ uint16 method; /* method: rssi/TOF/AOA */
+ uint8 err_code; /* error classification */
+ uint8 TOF_type; /* one way or two way TOF */
+ uint16 ts_cnt; /* number of timestamp measurements */
+ ts_sample_t ts_buff[1]; /* Timestamps */
+} BWL_POST_PACKED_STRUCT wl_proxd_event_ts_results_t;
+
/* Video Traffic Interference Monitor Event */
#define INTFER_EVENT_VERSION 1
struct ether_addr prim_ea; /* primary intf ether addr */
} wl_psta_primary_intf_event_t;
+/* WLC_E_DPSTA_INTF_IND event data */
+typedef enum {
+ WL_INTF_PSTA = 1,
+ WL_INTF_DWDS = 2
+} wl_dpsta_intf_type;
+
+typedef struct wl_dpsta_intf_event {
+ wl_dpsta_intf_type intf_type; /* dwds/psta intf register */
+} wl_dpsta_intf_event_t;
/* ********** NAN protocol events/subevents ********** */
#define NAN_EVENT_BUFFER_SIZE 512 /* max size */
/* nan application events to the host driver */
-enum nan_app_events {
+typedef enum nan_app_events {
WL_NAN_EVENT_START = 1, /* NAN cluster started */
WL_NAN_EVENT_JOIN = 2, /* Joined to a NAN cluster */
WL_NAN_EVENT_ROLE = 3, /* Role or State changed */
WL_NAN_EVENT_STATUS_CHG = 9, /* generated on any change in nan_mac status */
WL_NAN_EVENT_MERGE = 10, /* Merged to a NAN cluster */
WL_NAN_EVENT_STOP = 11, /* NAN stopped */
- WL_NAN_EVENT_INVALID = 12, /* delimiter for max value */
-};
+ WL_NAN_EVENT_P2P = 12, /* NAN P2P EVENT */
+ WL_NAN_EVENT_WINDOW_BEGIN_P2P = 13, /* Event for begin of P2P further availability window */
+ WL_NAN_EVENT_WINDOW_BEGIN_MESH = 14,
+ WL_NAN_EVENT_WINDOW_BEGIN_IBSS = 15,
+ WL_NAN_EVENT_WINDOW_BEGIN_RANGING = 16,
+ WL_NAN_EVENT_POST_DISC = 17, /* Event for post discovery data */
+ WL_NAN_EVENT_INVALID /* delimiter for max value */
+} nan_app_events_e;
+
#define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0)
/* ******************* end of NAN section *************** */
+#define MSCH_EVENTS_BUFFER_SIZE 2048
+
+/* Reason codes for WLC_E_MSCH */
+#define WLC_E_MSCH_START 0 /* start event check */
+#define WLC_E_MSCH_EXIT 1 /* exit event check */
+#define WLC_E_MSCH_REQ 2 /* request event */
+#define WLC_E_MSCH_CALLBACK 3 /* call back event */
+#define WLC_E_MSCH_MESSAGE 4 /* message event */
+#define WLC_E_MSCH_PROFILE_START 5
+#define WLC_E_MSCH_PROFILE_END 6
+#define WLC_E_MSCH_REQ_HANDLE 7
+#define WLC_E_MSCH_REQ_ENTITY 8
+#define WLC_E_MSCH_CHAN_CTXT 9
+#define WLC_E_MSCH_TIMESLOT 10
+#define WLC_E_MSCH_REQ_TIMING 11
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+} BWL_POST_PACKED_STRUCT msch_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_start_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+ uint32 status;
+} BWL_POST_PACKED_STRUCT msch_start_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_message_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+ char message[1]; /* message */
+} BWL_POST_PACKED_STRUCT msch_message_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_req_param_event_data {
+ uint16 flags; /* Describe various request properties */
+ uint8 req_type; /* Describe start and end time flexiblilty */
+ uint8 priority; /* Define the request priority */
+ uint32 start_time_l; /* Requested start time offset in us unit */
+ uint32 start_time_h;
+ uint32 duration; /* Requested duration in us unit */
+ uint32 interval; /* Requested periodic interval in us unit,
+ * 0 means non-periodic
+ */
+ union {
+ uint32 dur_flex; /* MSCH_REG_DUR_FLEX, min_dur = duration - dur_flex */
+ struct {
+ uint32 min_dur; /* min duration for traffic, maps to home_time */
+ uint32 max_away_dur; /* max acceptable away dur, maps to home_away_time*/
+ uint32 lo_prio_time_l;
+ uint32 lo_prio_time_h;
+ uint32 lo_prio_interval; /* repeated low priority interval */
+ uint32 hi_prio_time_l;
+ uint32 hi_prio_time_h;
+ uint32 hi_prio_interval; /* repeated high priority interval */
+ } bf;
+ } flex;
+} BWL_POST_PACKED_STRUCT msch_req_param_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_timeslot_event_data {
+ uint32 p_timeslot;
+ uint32 p_prev;
+ uint32 p_next;
+ uint32 timeslot_id;
+ uint32 pre_start_time_l;
+ uint32 pre_start_time_h;
+ uint32 end_time_l;
+ uint32 end_time_h;
+ uint32 sch_dur_l;
+ uint32 sch_dur_h;
+ uint32 p_chan_ctxt;
+ uint32 fire_time_l;
+ uint32 fire_time_h;
+ uint32 state;
+} BWL_POST_PACKED_STRUCT msch_timeslot_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_req_timing_event_data {
+ uint32 p_req_timing;
+ uint32 p_prev;
+ uint32 p_next;
+ uint16 flags;
+ uint16 timeslot_ptr;
+ uint32 fire_time_l;
+ uint32 fire_time_h;
+ uint32 pre_start_time_l;
+ uint32 pre_start_time_h;
+ uint32 start_time_l;
+ uint32 start_time_h;
+ uint32 end_time_l;
+ uint32 end_time_h;
+ uint32 p_timeslot;
+} BWL_POST_PACKED_STRUCT msch_req_timing_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_chan_ctxt_event_data {
+ uint32 p_chan_ctxt;
+ uint32 p_prev;
+ uint32 p_next;
+ uint16 chanspec;
+ uint16 bf_sch_pending;
+ uint32 bf_link_prev;
+ uint32 bf_link_next;
+ uint32 onchan_time_l;
+ uint32 onchan_time_h;
+ uint32 actual_onchan_dur_l;
+ uint32 actual_onchan_dur_h;
+ uint32 pend_onchan_dur_l;
+ uint32 pend_onchan_dur_h;
+ uint16 req_entity_list_cnt;
+ uint16 req_entity_list_ptr;
+ uint16 bf_entity_list_cnt;
+ uint16 bf_entity_list_ptr;
+} BWL_POST_PACKED_STRUCT msch_chan_ctxt_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_prio_event_data {
+ uint32 is_lo;
+ uint32 time_l;
+ uint32 time_h;
+ uint32 p_entity;
+} BWL_POST_PACKED_STRUCT msch_prio_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_req_entity_event_data {
+ uint32 p_req_entity;
+ uint32 req_hdl_link_prev;
+ uint32 req_hdl_link_next;
+ uint32 chan_ctxt_link_prev;
+ uint32 chan_ctxt_link_next;
+ uint32 rt_specific_link_prev;
+ uint32 rt_specific_link_next;
+ uint16 chanspec;
+ uint16 req_param_ptr;
+ uint16 cur_slot_ptr;
+ uint16 pend_slot_ptr;
+ msch_prio_event_data_t lo_event;
+ msch_prio_event_data_t hi_event;
+ uint32 ts_change_dur_flex;
+ uint16 ts_change_flags;
+ uint16 chan_ctxt_ptr;
+ uint32 p_chan_ctxt;
+ uint32 p_req_hdl;
+ uint32 hi_cnt_l;
+ uint32 hi_cnt_h;
+ uint32 bf_last_serv_time_l;
+ uint32 bf_last_serv_time_h;
+} BWL_POST_PACKED_STRUCT msch_req_entity_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_req_handle_event_data {
+ uint32 p_req_handle;
+ uint32 p_prev;
+ uint32 p_next;
+ uint32 cb_func;
+ uint32 cb_ctxt;
+ uint16 req_param_ptr;
+ uint16 req_entity_list_cnt;
+ uint16 req_entity_list_ptr;
+ uint16 chan_cnt;
+ uint16 schd_chan_cnt;
+ uint16 chanspec_list_cnt;
+ uint16 chanspec_list_ptr;
+ uint16 pad;
+} BWL_POST_PACKED_STRUCT msch_req_handle_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_profile_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+ uint32 free_req_hdl_list;
+ uint32 free_req_entity_list;
+ uint32 free_chan_ctxt_list;
+ uint32 free_timeslot_list;
+ uint32 free_chanspec_list;
+ uint16 cur_msch_timeslot_ptr;
+ uint16 pad;
+ uint32 p_cur_msch_timeslot;
+ uint32 cur_armed_timeslot;
+ uint32 cur_armed_req_timing;
+ uint32 ts_id;
+ uint32 service_interval;
+ uint32 max_lo_prio_interval;
+ uint16 flex_list_cnt;
+ uint16 msch_chanspec_alloc_cnt;
+ uint16 msch_req_entity_alloc_cnt;
+ uint16 msch_req_hdl_alloc_cnt;
+ uint16 msch_chan_ctxt_alloc_cnt;
+ uint16 msch_timeslot_alloc_cnt;
+ uint16 msch_req_hdl_list_cnt;
+ uint16 msch_req_hdl_list_ptr;
+ uint16 msch_chan_ctxt_list_cnt;
+ uint16 msch_chan_ctxt_list_ptr;
+ uint16 msch_timeslot_list_cnt;
+ uint16 msch_timeslot_list_ptr;
+ uint16 msch_req_timing_list_cnt;
+ uint16 msch_req_timing_list_ptr;
+ uint16 msch_start_flex_list_cnt;
+ uint16 msch_start_flex_list_ptr;
+ uint16 msch_both_flex_list_cnt;
+ uint16 msch_both_flex_list_ptr;
+} BWL_POST_PACKED_STRUCT msch_profile_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_req_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+ uint16 chanspec_cnt;
+ uint16 chanspec_ptr;
+ uint16 req_param_ptr;
+ uint16 pad;
+} BWL_POST_PACKED_STRUCT msch_req_event_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct msch_callback_event_data {
+ uint32 time_lo; /* Request time */
+ uint32 time_hi;
+ uint16 type; /* callback type */
+ uint16 chanspec; /* actual chanspec, may different with requested one */
+ uint32 pre_start_time_l; /* time slot prestart time low 32bit */
+ uint32 pre_start_time_h; /* time slot prestart time high 32bit */
+ uint32 end_time_l; /* time slot end time low 32 bit */
+ uint32 end_time_h; /* time slot end time high 32 bit */
+ uint32 timeslot_id; /* unique time slot id */
+} BWL_POST_PACKED_STRUCT msch_callback_event_data_t;
+
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
/*
- * $Copyright Open Broadcom Corporation$
- *
* Fundamental constants relating to IP Protocol
*
- * $Id: bcmip.h 458522 2014-02-27 02:26:15Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmip.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _bcmip_h_
/*
- * $Copyright Open Broadcom Corporation$
- *
* Fundamental constants relating to Neighbor Discovery Protocol
*
- * $Id: bcmipv6.h 439574 2013-11-27 06:37:37Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmipv6.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _bcmipv6_h_
} BWL_POST_PACKED_STRUCT;
/* Neighbor Advertisement/Solicitation Packet Structure */
-BWL_PRE_PACKED_STRUCT struct nd_msg {
- struct icmp6_hdr icmph;
- struct ipv6_addr target;
+BWL_PRE_PACKED_STRUCT struct bcm_nd_msg {
+ struct icmp6_hdr icmph;
+ struct ipv6_addr target;
} BWL_POST_PACKED_STRUCT;
/*
* Fundamental constants relating to TCP Protocol
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmtcp.h 458522 2014-02-27 02:26:15Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bcmtcp.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _bcmtcp_h_
/*
- * Copyright (C) 2014, Broadcom Corporation
+ * Fundamental constants relating to UDP Protocol
+ *
+ * Copyright (C) 2016, Broadcom Corporation
* All Rights Reserved.
*
* This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
* or duplicated in any form, in whole or in part, without the prior
* written permission of Broadcom Corporation.
*
- * Fundamental constants relating to UDP Protocol
*
- * $Id: bcmudp.h 382882 2013-02-04 23:24:31Z $
+ * <<Broadcom-WL-IPTag/Proprietary:>>
+ *
+ * $Id: bcmudp.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _bcmudp_h_
/*
* BT-AMP (BlueTooth Alternate Mac and Phy) HCI (Host/Controller Interface)
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bt_amp_hci.h 382882 2013-02-04 23:24:31Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: bt_amp_hci.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _bt_amp_hci_h
* IEEE Std 802.1X-2001
* IEEE 802.1X RADIUS Usage Guidelines
*
- * Copyright Open Broadcom Corporation
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
*
- * $Id: eapol.h 452703 2014-01-31 20:33:06Z $
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: eapol.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _eapol_h_
/*
* From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: ethernet.h 473238 2014-04-28 19:14:56Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: ethernet.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _NET_ETHERNET_H_ /* use native BSD ethernet.h when available */
#define ETHER_TYPE_IPV6 0x86dd /* IPv6 */
#define ETHER_TYPE_BRCM 0x886c /* Broadcom Corp. */
#define ETHER_TYPE_802_1X 0x888e /* 802.1x */
-#ifdef PLC
-#define ETHER_TYPE_88E1 0x88e1 /* GIGLE */
-#define ETHER_TYPE_8912 0x8912 /* GIGLE */
-#define ETHER_TYPE_GIGLED 0xffff /* GIGLE */
-#endif /* PLC */
#define ETHER_TYPE_802_1X_PREAUTH 0x88c7 /* 802.1x preauthentication */
#define ETHER_TYPE_WAI 0x88b4 /* WAI */
#define ETHER_TYPE_89_0D 0x890d /* 89-0d frame for TDLS */
+#define ETHER_TYPE_RRB ETHER_TYPE_89_0D /* RRB 802.11r 2008 */
#define ETHER_TYPE_PPP_SES 0x8864 /* PPPoE Session */
--- /dev/null
+/*
+ * EVENT_LOG system definitions
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: event_log.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _EVENT_LOG_SET_H_
+#define _EVENT_LOG_SET_H_
+
+/* Set a maximum number of sets here. It is not dynamic for
+ * efficiency of the EVENT_LOG calls.
+ */
+#define NUM_EVENT_LOG_SETS 8
+
+/* Define new event log sets here */
+#define EVENT_LOG_SET_BUS 0
+#define EVENT_LOG_SET_WL 1
+#define EVENT_LOG_SET_PSM 2
+#define EVENT_LOG_SET_ERROR 3
+#define EVENT_LOG_SET_MEM_API 4
+
+#endif /* _EVENT_LOG_SET_H_ */
--- /dev/null
+/*
+ * EVENT_LOG system definitions
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: event_log.h 241182 2011-02-17 21:50:03Z $
+ */
+
+#ifndef _EVENT_LOG_TAG_H_
+#define _EVENT_LOG_TAG_H_
+
+#include <typedefs.h>
+
+/* Define new event log tags here */
+#define EVENT_LOG_TAG_NULL 0 /* Special null tag */
+#define EVENT_LOG_TAG_TS 1 /* Special timestamp tag */
+#define EVENT_LOG_TAG_BUS_OOB 2
+#define EVENT_LOG_TAG_BUS_STATE 3
+#define EVENT_LOG_TAG_BUS_PROTO 4
+#define EVENT_LOG_TAG_BUS_CTL 5
+#define EVENT_LOG_TAG_BUS_EVENT 6
+#define EVENT_LOG_TAG_BUS_PKT 7
+#define EVENT_LOG_TAG_BUS_FRAME 8
+#define EVENT_LOG_TAG_BUS_DESC 9
+#define EVENT_LOG_TAG_BUS_SETUP 10
+#define EVENT_LOG_TAG_BUS_MISC 11
+#define EVENT_LOG_TAG_SRSCAN 22
+#define EVENT_LOG_TAG_PWRSTATS_INFO 23
+#define EVENT_LOG_TAG_UCODE_WATCHDOG 26
+#define EVENT_LOG_TAG_UCODE_FIFO 27
+#define EVENT_LOG_TAG_SCAN_TRACE_LOW 28
+#define EVENT_LOG_TAG_SCAN_TRACE_HIGH 29
+#define EVENT_LOG_TAG_SCAN_ERROR 30
+#define EVENT_LOG_TAG_SCAN_WARN 31
+#define EVENT_LOG_TAG_MPF_ERR 32
+#define EVENT_LOG_TAG_MPF_WARN 33
+#define EVENT_LOG_TAG_MPF_INFO 34
+#define EVENT_LOG_TAG_MPF_DEBUG 35
+#define EVENT_LOG_TAG_EVENT_INFO 36
+#define EVENT_LOG_TAG_EVENT_ERR 37
+#define EVENT_LOG_TAG_PWRSTATS_ERROR 38
+#define EVENT_LOG_TAG_EXCESS_PM_ERROR 39
+#define EVENT_LOG_TAG_IOCTL_LOG 40
+#define EVENT_LOG_TAG_PFN_ERR 41
+#define EVENT_LOG_TAG_PFN_WARN 42
+#define EVENT_LOG_TAG_PFN_INFO 43
+#define EVENT_LOG_TAG_PFN_DEBUG 44
+#define EVENT_LOG_TAG_BEACON_LOG 45
+#define EVENT_LOG_TAG_WNM_BSSTRANS_INFO 46
+#define EVENT_LOG_TAG_TRACE_CHANSW 47
+#define EVENT_LOG_TAG_PCI_ERROR 48
+#define EVENT_LOG_TAG_PCI_TRACE 49
+#define EVENT_LOG_TAG_PCI_WARN 50
+#define EVENT_LOG_TAG_PCI_INFO 51
+#define EVENT_LOG_TAG_PCI_DBG 52
+#define EVENT_LOG_TAG_PCI_DATA 53
+#define EVENT_LOG_TAG_PCI_RING 54
+#define EVENT_LOG_TAG_AWDL_TRACE_RANGING 55
+#define EVENT_LOG_TAG_WL_ERROR 56
+#define EVENT_LOG_TAG_PHY_ERROR 57
+#define EVENT_LOG_TAG_OTP_ERROR 58
+#define EVENT_LOG_TAG_NOTIF_ERROR 59
+#define EVENT_LOG_TAG_MPOOL_ERROR 60
+#define EVENT_LOG_TAG_OBJR_ERROR 61
+#define EVENT_LOG_TAG_DMA_ERROR 62
+#define EVENT_LOG_TAG_PMU_ERROR 63
+#define EVENT_LOG_TAG_BSROM_ERROR 64
+#define EVENT_LOG_TAG_SI_ERROR 65
+#define EVENT_LOG_TAG_ROM_PRINTF 66
+#define EVENT_LOG_TAG_RATE_CNT 67
+#define EVENT_LOG_TAG_CTL_MGT_CNT 68
+#define EVENT_LOG_TAG_AMPDU_DUMP 69
+#define EVENT_LOG_TAG_MEM_ALLOC_SUCC 70
+#define EVENT_LOG_TAG_MEM_ALLOC_FAIL 71
+#define EVENT_LOG_TAG_MEM_FREE 72
+#define EVENT_LOG_TAG_WL_ASSOC_LOG 73
+#define EVENT_LOG_TAG_WL_PS_LOG 74
+#define EVENT_LOG_TAG_WL_ROAM_LOG 75
+#define EVENT_LOG_TAG_WL_MPC_LOG 76
+#define EVENT_LOG_TAG_WL_WSEC_LOG 77
+#define EVENT_LOG_TAG_WL_WSEC_DUMP 78
+#define EVENT_LOG_TAG_WL_MCNX_LOG 79
+#define EVENT_LOG_TAG_HEALTH_CHECK_ERROR 80
+#define EVENT_LOG_TAG_HNDRTE_EVENT_ERROR 81
+#define EVENT_LOG_TAG_ECOUNTERS_ERROR 82
+#define EVENT_LOG_TAG_WL_COUNTERS 83
+#define EVENT_LOG_TAG_ECOUNTERS_IPCSTATS 84
+#define EVENT_LOG_TAG_WL_P2P_LOG 85
+#define EVENT_LOG_TAG_SDIO_ERROR 86
+#define EVENT_LOG_TAG_SDIO_TRACE 87
+#define EVENT_LOG_TAG_SDIO_DBG 88
+#define EVENT_LOG_TAG_SDIO_PRHDRS 89
+#define EVENT_LOG_TAG_SDIO_PRPKT 90
+#define EVENT_LOG_TAG_SDIO_INFORM 91
+#define EVENT_LOG_TAG_MIMO_PS_ERROR 92
+#define EVENT_LOG_TAG_MIMO_PS_TRACE 93
+#define EVENT_LOG_TAG_MIMO_PS_INFO 94
+#define EVENT_LOG_TAG_BTCX_STATS 95
+#define EVENT_LOG_TAG_LEAKY_AP_STATS 96
+#define EVENT_LOG_TAG_AWDL_TRACE_ELECTION 97
+#define EVENT_LOG_TAG_MIMO_PS_STATS 98
+#define EVENT_LOG_TAG_PWRSTATS_PHY 99
+#define EVENT_LOG_TAG_PWRSTATS_SCAN 100
+#define EVENT_LOG_TAG_PWRSTATS_AWDL 101
+#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2 102
+#define EVENT_LOG_TAG_LQM 103
+#define EVENT_LOG_TAG_TRACE_WL_INFO 104
+#define EVENT_LOG_TAG_TRACE_BTCOEX_INFO 105
+#define EVENT_LOG_TAG_MAX 105 /* Set to the same value of last tag, not last tag + 1 */
+/* Note: New event should be added/reserved in trunk before adding it to branches */
+
+
+#define SD_PRHDRS(i, s, h, p, n, l)
+#define SD_PRPKT(m, b, n)
+#define SD_INFORM(args)
+
+/* Flags for tag control */
+#define EVENT_LOG_TAG_FLAG_NONE 0
+#define EVENT_LOG_TAG_FLAG_LOG 0x80
+#define EVENT_LOG_TAG_FLAG_PRINT 0x40
+#define EVENT_LOG_TAG_FLAG_SET_MASK 0x3f
+
+/* Each event log entry has a type. The type is the LAST word of the
+ * event log. The printing code walks the event entries in reverse
+ * order to find the first entry.
+ */
+typedef union event_log_hdr {
+ struct {
+ uint8 tag; /* Event_log entry tag */
+ uint8 count; /* Count of 4-byte entries */
+ uint16 fmt_num; /* Format number */
+ };
+ uint32 t; /* Type cheat */
+} event_log_hdr_t;
+
+#endif /* _EVENT_LOG_TAG_H_ */
/*
- * $Copyright Open Broadcom Corporation$
- *
* Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
*
- * $Id: p2p.h 457033 2014-02-20 19:39:45Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: p2p.h 536785 2015-02-24 08:35:00Z $
*/
#ifndef _P2P_H_
#define P2PSD_ACTION_ID_GAS_IRESP 0x0b
/* Action value for GAS Initial Response AF */
#define P2PSD_ACTION_ID_GAS_CREQ 0x0c
- /* Action value for GAS Comback Request AF */
+ /* Action value for GAS Comeback Request AF */
#define P2PSD_ACTION_ID_GAS_CRESP 0x0d
- /* Action value for GAS Comback Response AF */
+ /* Action value for GAS Comeback Response AF */
#define P2PSD_AD_EID 0x6c
/* Advertisement Protocol IE ID */
#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00
/*
* SD-SPI Protocol Standard
*
- * $ Copyright Open Broadcom Corporation $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: sdspi.h 382882 2013-02-04 23:24:31Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sdspi.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _SD_SPI_H
#define _SD_SPI_H
/*
* 802.1Q VLAN protocol definitions
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: vlan.h 382883 2013-02-04 23:26:09Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: vlan.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _vlan_h_
/*
* Fundamental types and constants relating to WPA
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wpa.h 492853 2014-07-23 17:20:34Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wpa.h 518342 2014-12-01 23:21:41Z $
*/
#ifndef _proto_wpa_h_
#define WPA_IE_FIXED_LEN 8
#define WPA_IE_TAG_FIXED_LEN 6
+#define BIP_OUI_TYPE WPA2_OUI "\x06"
+
typedef BWL_PRE_PACKED_STRUCT struct {
uint8 tag; /* TAG */
uint8 length; /* TAG length */
#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */
#define WPA_CIPHER_BIP 6 /* WEP (104-bit) */
#define WPA_CIPHER_TPK 7 /* Group addressed traffic not allowed */
-#ifdef BCMCCX
-#define WPA_CIPHER_CKIP 8 /* KP with no MIC */
-#define WPA_CIPHER_CKIP_MMH 9 /* KP with MIC ("CKIP/MMH", "CKIP+CMIC") */
-#define WPA_CIPHER_WEP_MMH 10 /* MIC with no KP ("WEP/MMH", "CMIC") */
-
-#define IS_CCX_CIPHER(cipher) ((cipher) == WPA_CIPHER_CKIP || \
- (cipher) == WPA_CIPHER_CKIP_MMH || \
- (cipher) == WPA_CIPHER_WEP_MMH)
-#endif
-
-#ifdef BCMWAPI_WAI
-#define WAPI_CIPHER_NONE WPA_CIPHER_NONE
-#define WAPI_CIPHER_SMS4 11
-#define WAPI_CSE_WPI_SMS4 1
-#endif /* BCMWAPI_WAI */
#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \
(cipher) == WPA_CIPHER_WEP_40 || \
(cipher) == WPA_CIPHER_AES_CCM || \
(cipher) == WPA_CIPHER_TPK)
-#ifdef BCMWAPI_WAI
-#define IS_WAPI_CIPHER(cipher) ((cipher) == WAPI_CIPHER_NONE || \
- (cipher) == WAPI_CSE_WPI_SMS4)
-
-/* convert WAPI_CSE_WPI_XXX to WAPI_CIPHER_XXX */
-#define WAPI_CSE_WPI_2_CIPHER(cse) ((cse) == WAPI_CSE_WPI_SMS4 ? \
- WAPI_CIPHER_SMS4 : WAPI_CIPHER_NONE)
-
-#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \
- WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE)
-#endif /* BCMWAPI_WAI */
/* WPA TKIP countermeasures parameters */
#define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */
#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH
#define WPA2_PMKID_COUNT_LEN 2
-#define RSN_GROUPMANAGE_CIPHER_LEN 4
-
-#ifdef BCMWAPI_WAI
-#define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH
-
-/* Other WAI definition */
-#define WAPI_WAI_REQUEST 0x00F1
-#define WAPI_UNICAST_REKEY 0x00F2
-#define WAPI_STA_AGING 0x00F3
-#define WAPI_MUTIL_REKEY 0x00F4
-#define WAPI_STA_STATS 0x00F5
-#define WAPI_USK_REKEY_COUNT 0x4000000 /* 0xA00000 */
-#define WAPI_MSK_REKEY_COUNT 0x4000000 /* 0xA00000 */
-#endif /* BCMWAPI_WAI */
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
--- /dev/null
+/*
+ * WPS IE definitions
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id$
+ */
+
+#ifndef _WPS_
+#define _WPS_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Data Element Definitions */
+#define WPS_ID_AP_CHANNEL 0x1001
+#define WPS_ID_ASSOC_STATE 0x1002
+#define WPS_ID_AUTH_TYPE 0x1003
+#define WPS_ID_AUTH_TYPE_FLAGS 0x1004
+#define WPS_ID_AUTHENTICATOR 0x1005
+#define WPS_ID_CONFIG_METHODS 0x1008
+#define WPS_ID_CONFIG_ERROR 0x1009
+#define WPS_ID_CONF_URL4 0x100A
+#define WPS_ID_CONF_URL6 0x100B
+#define WPS_ID_CONN_TYPE 0x100C
+#define WPS_ID_CONN_TYPE_FLAGS 0x100D
+#define WPS_ID_CREDENTIAL 0x100E
+#define WPS_ID_DEVICE_NAME 0x1011
+#define WPS_ID_DEVICE_PWD_ID 0x1012
+#define WPS_ID_E_HASH1 0x1014
+#define WPS_ID_E_HASH2 0x1015
+#define WPS_ID_E_SNONCE1 0x1016
+#define WPS_ID_E_SNONCE2 0x1017
+#define WPS_ID_ENCR_SETTINGS 0x1018
+#define WPS_ID_ENCR_TYPE 0x100F
+#define WPS_ID_ENCR_TYPE_FLAGS 0x1010
+#define WPS_ID_ENROLLEE_NONCE 0x101A
+#define WPS_ID_FEATURE_ID 0x101B
+#define WPS_ID_IDENTITY 0x101C
+#define WPS_ID_IDENTITY_PROOF 0x101D
+#define WPS_ID_KEY_WRAP_AUTH 0x101E
+#define WPS_ID_KEY_IDENTIFIER 0x101F
+#define WPS_ID_MAC_ADDR 0x1020
+#define WPS_ID_MANUFACTURER 0x1021
+#define WPS_ID_MSG_TYPE 0x1022
+#define WPS_ID_MODEL_NAME 0x1023
+#define WPS_ID_MODEL_NUMBER 0x1024
+#define WPS_ID_NW_INDEX 0x1026
+#define WPS_ID_NW_KEY 0x1027
+#define WPS_ID_NW_KEY_INDEX 0x1028
+#define WPS_ID_NEW_DEVICE_NAME 0x1029
+#define WPS_ID_NEW_PWD 0x102A
+#define WPS_ID_OOB_DEV_PWD 0x102C
+#define WPS_ID_OS_VERSION 0x102D
+#define WPS_ID_POWER_LEVEL 0x102F
+#define WPS_ID_PSK_CURRENT 0x1030
+#define WPS_ID_PSK_MAX 0x1031
+#define WPS_ID_PUBLIC_KEY 0x1032
+#define WPS_ID_RADIO_ENABLED 0x1033
+#define WPS_ID_REBOOT 0x1034
+#define WPS_ID_REGISTRAR_CURRENT 0x1035
+#define WPS_ID_REGISTRAR_ESTBLSHD 0x1036
+#define WPS_ID_REGISTRAR_LIST 0x1037
+#define WPS_ID_REGISTRAR_MAX 0x1038
+#define WPS_ID_REGISTRAR_NONCE 0x1039
+#define WPS_ID_REQ_TYPE 0x103A
+#define WPS_ID_RESP_TYPE 0x103B
+#define WPS_ID_RF_BAND 0x103C
+#define WPS_ID_R_HASH1 0x103D
+#define WPS_ID_R_HASH2 0x103E
+#define WPS_ID_R_SNONCE1 0x103F
+#define WPS_ID_R_SNONCE2 0x1040
+#define WPS_ID_SEL_REGISTRAR 0x1041
+#define WPS_ID_SERIAL_NUM 0x1042
+#define WPS_ID_SC_STATE 0x1044
+#define WPS_ID_SSID 0x1045
+#define WPS_ID_TOT_NETWORKS 0x1046
+#define WPS_ID_UUID_E 0x1047
+#define WPS_ID_UUID_R 0x1048
+#define WPS_ID_VENDOR_EXT 0x1049
+#define WPS_ID_VERSION 0x104A
+#define WPS_ID_X509_CERT_REQ 0x104B
+#define WPS_ID_X509_CERT 0x104C
+#define WPS_ID_EAP_IDENTITY 0x104D
+#define WPS_ID_MSG_COUNTER 0x104E
+#define WPS_ID_PUBKEY_HASH 0x104F
+#define WPS_ID_REKEY_KEY 0x1050
+#define WPS_ID_KEY_LIFETIME 0x1051
+#define WPS_ID_PERM_CFG_METHODS 0x1052
+#define WPS_ID_SEL_REG_CFG_METHODS 0x1053
+#define WPS_ID_PRIM_DEV_TYPE 0x1054
+#define WPS_ID_SEC_DEV_TYPE_LIST 0x1055
+#define WPS_ID_PORTABLE_DEVICE 0x1056
+#define WPS_ID_AP_SETUP_LOCKED 0x1057
+#define WPS_ID_APP_LIST 0x1058
+#define WPS_ID_EAP_TYPE 0x1059
+#define WPS_ID_INIT_VECTOR 0x1060
+#define WPS_ID_KEY_PROVIDED_AUTO 0x1061
+#define WPS_ID_8021X_ENABLED 0x1062
+#define WPS_ID_WEP_TRANSMIT_KEY 0x1064
+#define WPS_ID_REQ_DEV_TYPE 0x106A
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WFA_VENDOR_EXT_ID "\x00\x37\x2A"
+#define WPS_WFA_SUBID_VERSION2 0x00
+#define WPS_WFA_SUBID_AUTHORIZED_MACS 0x01
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE 0x02
+#define WPS_WFA_SUBID_REQ_TO_ENROLL 0x03
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME 0x04
+#define WPS_WFA_SUBID_REG_CFG_METHODS 0x05
+
+
+/* WCN-NET Windows Rally Vertical Pairing Vendor Extensions */
+#define MS_VENDOR_EXT_ID "\x00\x01\x37"
+#define WPS_MS_ID_VPI 0x1001 /* Vertical Pairing Identifier TLV */
+#define WPS_MS_ID_TRANSPORT_UUID 0x1002 /* Transport UUID TLV */
+
+/* Vertical Pairing Identifier TLV Definitions */
+#define WPS_MS_VPI_TRANSPORT_NONE 0x00 /* None */
+#define WPS_MS_VPI_TRANSPORT_DPWS 0x01 /* Devices Profile for Web Services */
+#define WPS_MS_VPI_TRANSPORT_UPNP 0x02 /* uPnP */
+#define WPS_MS_VPI_TRANSPORT_SDNWS 0x03 /* Secure Devices Profile for Web Services */
+#define WPS_MS_VPI_NO_PROFILE_REQ 0x00 /* Wi-Fi profile not requested.
+ * Not supported in Windows 7
+ */
+#define WPS_MS_VPI_PROFILE_REQ 0x01 /* Wi-Fi profile requested. */
+
+/* sizes of the fixed size elements */
+#define WPS_ID_AP_CHANNEL_S 2
+#define WPS_ID_ASSOC_STATE_S 2
+#define WPS_ID_AUTH_TYPE_S 2
+#define WPS_ID_AUTH_TYPE_FLAGS_S 2
+#define WPS_ID_AUTHENTICATOR_S 8
+#define WPS_ID_CONFIG_METHODS_S 2
+#define WPS_ID_CONFIG_ERROR_S 2
+#define WPS_ID_CONN_TYPE_S 1
+#define WPS_ID_CONN_TYPE_FLAGS_S 1
+#define WPS_ID_DEVICE_PWD_ID_S 2
+#define WPS_ID_ENCR_TYPE_S 2
+#define WPS_ID_ENCR_TYPE_FLAGS_S 2
+#define WPS_ID_FEATURE_ID_S 4
+#define WPS_ID_MAC_ADDR_S 6
+#define WPS_ID_MSG_TYPE_S 1
+#define WPS_ID_SC_STATE_S 1
+#define WPS_ID_RF_BAND_S 1
+#define WPS_ID_OS_VERSION_S 4
+#define WPS_ID_VERSION_S 1
+#define WPS_ID_SEL_REGISTRAR_S 1
+#define WPS_ID_SEL_REG_CFG_METHODS_S 2
+#define WPS_ID_REQ_TYPE_S 1
+#define WPS_ID_RESP_TYPE_S 1
+#define WPS_ID_AP_SETUP_LOCKED_S 1
+
+/* WSC 2.0, WFA Vendor Extension Subelements */
+#define WPS_WFA_SUBID_VERSION2_S 1
+#define WPS_WFA_SUBID_NW_KEY_SHAREABLE_S 1
+#define WPS_WFA_SUBID_REQ_TO_ENROLL_S 1
+#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME_S 1
+#define WPS_WFA_SUBID_REG_CFG_METHODS_S 2
+
+/* Association states */
+#define WPS_ASSOC_NOT_ASSOCIATED 0
+#define WPS_ASSOC_CONN_SUCCESS 1
+#define WPS_ASSOC_CONFIG_FAIL 2
+#define WPS_ASSOC_ASSOC_FAIL 3
+#define WPS_ASSOC_IP_FAIL 4
+
+/* Authentication types */
+#define WPS_AUTHTYPE_OPEN 0x0001
+#define WPS_AUTHTYPE_WPAPSK 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_SHARED 0x0004 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA 0x0008 /* Deprecated in WSC 2.0 */
+#define WPS_AUTHTYPE_WPA2 0x0010
+#define WPS_AUTHTYPE_WPA2PSK 0x0020
+
+/* Config methods */
+#define WPS_CONFMET_USBA 0x0001 /* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_ETHERNET 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_CONFMET_LABEL 0x0004
+#define WPS_CONFMET_DISPLAY 0x0008
+#define WPS_CONFMET_EXT_NFC_TOK 0x0010
+#define WPS_CONFMET_INT_NFC_TOK 0x0020
+#define WPS_CONFMET_NFC_INTF 0x0040
+#define WPS_CONFMET_PBC 0x0080
+#define WPS_CONFMET_KEYPAD 0x0100
+/* WSC 2.0 */
+#define WPS_CONFMET_VIRT_PBC 0x0280
+#define WPS_CONFMET_PHY_PBC 0x0480
+#define WPS_CONFMET_VIRT_DISPLAY 0x2008
+#define WPS_CONFMET_PHY_DISPLAY 0x4008
+
+/* WPS error messages */
+#define WPS_ERROR_NO_ERROR 0
+#define WPS_ERROR_OOB_INT_READ_ERR 1
+#define WPS_ERROR_DECRYPT_CRC_FAIL 2
+#define WPS_ERROR_CHAN24_NOT_SUPP 3
+#define WPS_ERROR_CHAN50_NOT_SUPP 4
+#define WPS_ERROR_SIGNAL_WEAK 5 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_AUTH_FAIL 6 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NW_ASSOC_FAIL 7 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_NO_DHCP_RESP 8 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAILED_DHCP_CONF 9 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_IP_ADDR_CONFLICT 10 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_FAIL_CONN_REGISTRAR 11
+#define WPS_ERROR_MULTI_PBC_DETECTED 12
+#define WPS_ERROR_ROGUE_SUSPECTED 13
+#define WPS_ERROR_DEVICE_BUSY 14
+#define WPS_ERROR_SETUP_LOCKED 15
+#define WPS_ERROR_MSG_TIMEOUT 16 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_REG_SESSION_TIMEOUT 17 /* Deprecated in WSC 2.0 */
+#define WPS_ERROR_DEV_PWD_AUTH_FAIL 18
+#define WPS_ERROR_60GHZ_NOT_SUPPORT 19
+#define WPS_ERROR_PKH_MISMATCH 20 /* Public Key Hash Mismatch */
+
+/* Connection types */
+#define WPS_CONNTYPE_ESS 0x01
+#define WPS_CONNTYPE_IBSS 0x02
+
+/* Device password ID */
+#define WPS_DEVICEPWDID_DEFAULT 0x0000
+#define WPS_DEVICEPWDID_USER_SPEC 0x0001
+#define WPS_DEVICEPWDID_MACHINE_SPEC 0x0002
+#define WPS_DEVICEPWDID_REKEY 0x0003
+#define WPS_DEVICEPWDID_PUSH_BTN 0x0004
+#define WPS_DEVICEPWDID_REG_SPEC 0x0005
+#define WPS_DEVICEPWDID_IBSS 0x0006
+#define WPS_DEVICEPWDID_NFC_CHO 0x0007 /* NFC-Connection-Handover */
+#define WPS_DEVICEPWDID_WFDS 0x0008 /* Wi-Fi Direct Services Specification */
+
+/* Encryption type */
+#define WPS_ENCRTYPE_NONE 0x0001
+#define WPS_ENCRTYPE_WEP 0x0002 /* Deprecated in WSC 2.0 */
+#define WPS_ENCRTYPE_TKIP 0x0004 /* Deprecated in version 2.0. TKIP can only
+ * be advertised on the AP when Mixed Mode
+ * is enabled (Encryption Type is 0x000c).
+ */
+#define WPS_ENCRTYPE_AES 0x0008
+
+
+/* WPS Message Types */
+#define WPS_ID_BEACON 0x01
+#define WPS_ID_PROBE_REQ 0x02
+#define WPS_ID_PROBE_RESP 0x03
+#define WPS_ID_MESSAGE_M1 0x04
+#define WPS_ID_MESSAGE_M2 0x05
+#define WPS_ID_MESSAGE_M2D 0x06
+#define WPS_ID_MESSAGE_M3 0x07
+#define WPS_ID_MESSAGE_M4 0x08
+#define WPS_ID_MESSAGE_M5 0x09
+#define WPS_ID_MESSAGE_M6 0x0A
+#define WPS_ID_MESSAGE_M7 0x0B
+#define WPS_ID_MESSAGE_M8 0x0C
+#define WPS_ID_MESSAGE_ACK 0x0D
+#define WPS_ID_MESSAGE_NACK 0x0E
+#define WPS_ID_MESSAGE_DONE 0x0F
+
+/* WSP private ID for local use */
+#define WPS_PRIVATE_ID_IDENTITY (WPS_ID_MESSAGE_DONE + 1)
+#define WPS_PRIVATE_ID_WPS_START (WPS_ID_MESSAGE_DONE + 2)
+#define WPS_PRIVATE_ID_FAILURE (WPS_ID_MESSAGE_DONE + 3)
+#define WPS_PRIVATE_ID_FRAG (WPS_ID_MESSAGE_DONE + 4)
+#define WPS_PRIVATE_ID_FRAG_ACK (WPS_ID_MESSAGE_DONE + 5)
+#define WPS_PRIVATE_ID_EAPOL_START (WPS_ID_MESSAGE_DONE + 6)
+
+
+/* Device Type categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_CAT_COMPUTER 1
+#define WPS_DEVICE_TYPE_CAT_INPUT_DEVICE 2
+#define WPS_DEVICE_TYPE_CAT_PRINTER 3
+#define WPS_DEVICE_TYPE_CAT_CAMERA 4
+#define WPS_DEVICE_TYPE_CAT_STORAGE 5
+#define WPS_DEVICE_TYPE_CAT_NW_INFRA 6
+#define WPS_DEVICE_TYPE_CAT_DISPLAYS 7
+#define WPS_DEVICE_TYPE_CAT_MM_DEVICES 8
+#define WPS_DEVICE_TYPE_CAT_GAME_DEVICES 9
+#define WPS_DEVICE_TYPE_CAT_TELEPHONE 10
+#define WPS_DEVICE_TYPE_CAT_AUDIO_DEVICES 11 /* WSC 2.0 */
+
+/* Device Type sub categories for primary and secondary device types */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_PC 1
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_SERVER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MEDIA_CTR 3
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_UM_PC 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NOTEBOOK 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_DESKTOP 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_MID 7 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_COMP_NETBOOK 8 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_Keyboard 1 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_MOUSE 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_JOYSTICK 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TRACKBALL 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_GAM_CTRL 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_REMOTE 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_TOUCHSCREEN 7 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BIO_READER 8 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_INP_BAR_READER 9 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_PRINTER 1
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_SCANNER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_FAX 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_COPIER 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PRTR_ALLINONE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_DGTL_STILL 1
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_VIDEO_CAM 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_WEB_CAM 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_CAM_SECU_CAM 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_STOR_NAS 1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_AP 1
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_ROUTER 2
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_SWITCH 3
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_GATEWAY 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_NW_BRIDGE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_TV 1
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PIC_FRAME 2
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_PROJECTOR 3
+#define WPS_DEVICE_TYPE_SUB_CAT_DISP_MONITOR 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_DAR 1
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVR 2
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MCX 3
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_STB 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_MS_ME 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_MM_PVP 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX 1
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_XBOX_360 2
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PS 3
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_GC 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_GAM_PGD 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_WM 1
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PSM 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_PDM 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SSM 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_PHONE_SDM 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_TUNER 1 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_SPEAKERS 2 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_PMP 3 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HEADSET 4 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HPHONE 5 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_MPHONE 6 /* WSC 2.0 */
+#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HTS 7 /* WSC 2.0 */
+
+
+/* Device request/response type */
+#define WPS_MSGTYPE_ENROLLEE_INFO_ONLY 0x00
+#define WPS_MSGTYPE_ENROLLEE_OPEN_8021X 0x01
+#define WPS_MSGTYPE_REGISTRAR 0x02
+#define WPS_MSGTYPE_AP_WLAN_MGR 0x03
+
+/* RF Band */
+#define WPS_RFBAND_24GHZ 0x01
+#define WPS_RFBAND_50GHZ 0x02
+
+/* Simple Config state */
+#define WPS_SCSTATE_UNCONFIGURED 0x01
+#define WPS_SCSTATE_CONFIGURED 0x02
+#define WPS_SCSTATE_OFF 11
+
+/* WPS Vendor extension key */
+#define WPS_OUI_HEADER_LEN 2
+#define WPS_OUI_HEADER_SIZE 4
+#define WPS_OUI_FIXED_HEADER_OFF 16
+#define WPS_WFA_SUBID_V2_OFF 3
+#define WPS_WFA_V2_OFF 5
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _WPS_ */
--- /dev/null
+/*
+ * HND Run Time Environment ioctl.
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: rte_ioctl.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef _rte_ioctl_h_
+#define _rte_ioctl_h_
+
+/* RTE IOCTL definitions for generic ether devices */
+#define RTEGHWADDR 0x8901
+#define RTESHWADDR 0x8902
+#define RTEGMTU 0x8903
+#define RTEGSTATS 0x8904
+#define RTEGALLMULTI 0x8905
+#define RTESALLMULTI 0x8906
+#define RTEGPROMISC 0x8907
+#define RTESPROMISC 0x8908
+#define RTESMULTILIST 0x8909
+#define RTEGUP 0x890A
+#define RTEGPERMADDR 0x890B
+#define RTEDEVPWRSTCHG 0x890C /* Device pwr state change for PCIedev */
+#define RTEDEVPMETOGGLE 0x890D /* Toggle PME# to wake up the host */
+
+#define RTE_IOCTL_QUERY 0x00
+#define RTE_IOCTL_SET 0x01
+#define RTE_IOCTL_OVL_IDX_MASK 0x1e
+#define RTE_IOCTL_OVL_RSV 0x20
+#define RTE_IOCTL_OVL 0x40
+#define RTE_IOCTL_OVL_IDX_SHIFT 1
+
+enum hnd_ioctl_cmd {
+ HND_RTE_DNGL_IS_SS = 1, /* true if device connected at super speed */
+
+ /* PCIEDEV specific wl <--> bus ioctls */
+ BUS_GET_VAR = 2,
+ BUS_SET_VAR = 3,
+ BUS_FLUSH_RXREORDER_Q = 4,
+ BUS_SET_LTR_STATE = 5,
+ BUS_FLUSH_CHAINED_PKTS = 6,
+ BUS_SET_COPY_COUNT = 7
+};
+
+#define SDPCMDEV_SET_MAXTXPKTGLOM 1
+
+typedef struct memuse_info {
+ uint16 ver; /* version of this struct */
+ uint16 len; /* length in bytes of this structure */
+ uint32 tot; /* Total memory */
+ uint32 text_len; /* Size of Text segment memory */
+ uint32 data_len; /* Size of Data segment memory */
+ uint32 bss_len; /* Size of BSS segment memory */
+
+ uint32 arena_size; /* Total Heap size */
+ uint32 arena_free; /* Heap memory available or free */
+ uint32 inuse_size; /* Heap memory currently in use */
+ uint32 inuse_hwm; /* High watermark of memory - reclaimed memory */
+ uint32 inuse_overhead; /* tally of allocated mem_t blocks */
+ uint32 inuse_total; /* Heap in-use + Heap overhead memory */
+} memuse_info_t;
+
+#endif /* _rte_ioctl_h_ */
* JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
* GPIO interface, extbus, and support for serial and parallel flashes.
*
- * $Id: sbchipc.h 474281 2014-04-30 18:24:55Z $
+ * $Id: sbchipc.h 574579 2015-07-27 15:36:37Z $
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
*/
#ifndef _SBCHIPC_H
uint32 pmuintmask1; /* 0x704 */
uint32 PAD[14];
uint32 pmuintstatus; /* 0x740 */
+ uint32 PAD[15];
+ uint32 pmuintctrl0; /* 0x780 */
} pmuregs_t;
typedef struct eci_prerev35 {
uint32 pmuintmask1; /* 0x704 */
uint32 PAD[14];
uint32 pmuintstatus; /* 0x740 */
- uint32 PAD[47];
+ uint32 PAD[15];
+ uint32 pmuintctrl0; /* 0x780 */
+ uint32 PAD[31];
uint16 sromotp[512]; /* 0x800 */
-#ifdef NFLASH_SUPPORT
+#ifdef CCNFLASH_SUPPORT
/* Nand flash MLC controller registers (corerev >= 38) */
uint32 nand_revision; /* 0xC00 */
uint32 nand_cmd_start;
uint32 nand_cache_data;
uint32 nand_ctrl_config;
uint32 nand_ctrl_status;
-#endif /* NFLASH_SUPPORT */
+#endif /* CCNFLASH_SUPPORT */
uint32 gci_corecaps0; /* GCI starting at 0xC00 */
uint32 gci_corecaps1;
uint32 gci_corecaps2;
#define CC_CLKC_M2 0x9c
#define CC_CLKC_M3 0xa0
#define CC_CLKDIV 0xa4
+#define CC_CAP_EXT 0xac
#define CC_SYS_CLK_CTL 0xc0
+#define CC_CLKDIV2 0xf0
#define CC_CLK_CTL_ST SI_CLK_CTL_ST
#define PMU_CTL 0x600
#define PMU_CAP 0x604
#define PMU_RES_DEP_MASK 0x624
#define RSRCUPDWNTIME 0x628
#define PMUREG_RESREQ_MASK 0x68c
+#define PMUREG_RESREQ_TIMER 0x688
+#define PMUREG_RESREQ_MASK1 0x6f4
+#define PMUREG_RESREQ_TIMER1 0x6f0
#define EXT_LPO_AVAIL 0x100
#define LPO_SEL (1 << 0)
#define CC_EXT_LPO_PU 0x200000
#define REGCTRL5_PWM_AUTO_CTRL_SHIFT 17
#define REGCTRL6_PWM_AUTO_CTRL_MASK 0x3fff0000
#define REGCTRL6_PWM_AUTO_CTRL_SHIFT 16
+#define CC_BP_IND_ACCESS_START_SHIFT 9
+#define CC_BP_IND_ACCESS_START_MASK (1 << CC_BP_IND_ACCESS_START_SHIFT)
+#define CC_BP_IND_ACCESS_RDWR_SHIFT 8
+#define CC_BP_IND_ACCESS_RDWR_MASK (1 << CC_BP_IND_ACCESS_RDWR_SHIFT)
+#define CC_BP_IND_ACCESS_ERROR_SHIFT 10
+#define CC_BP_IND_ACCESS_ERROR_MASK (1 << CC_BP_IND_ACCESS_ERROR_SHIFT)
#ifdef SR_DEBUG
#define SUBCORE_POWER_ON 0x0001
#define MEMLPLDO_POWER_ON_CHK 0x00200000
#endif /* SR_DEBUG */
-#ifdef NFLASH_SUPPORT
+#ifdef CCNFLASH_SUPPORT
/* NAND flash support */
#define CC_NAND_REVISION 0xC00
#define CC_NAND_CMD_START 0xC04
#define CC_NAND_DEVID 0xC60
#define CC_NAND_DEVID_EXT 0xC64
#define CC_NAND_INTFC_STATUS 0xC6C
-#endif /* NFLASH_SUPPORT */
+#endif /* CCNFLASH_SUPPORT */
/* chipid */
-#define CID_ID_MASK 0x0000ffff /* Chip Id mask */
-#define CID_REV_MASK 0x000f0000 /* Chip Revision mask */
-#define CID_REV_SHIFT 16 /* Chip Revision shift */
-#define CID_PKG_MASK 0x00f00000 /* Package Option mask */
-#define CID_PKG_SHIFT 20 /* Package Option shift */
-#define CID_CC_MASK 0x0f000000 /* CoreCount (corerev >= 4) */
+#define CID_ID_MASK 0x0000ffff /**< Chip Id mask */
+#define CID_REV_MASK 0x000f0000 /**< Chip Revision mask */
+#define CID_REV_SHIFT 16 /**< Chip Revision shift */
+#define CID_PKG_MASK 0x00f00000 /**< Package Option mask */
+#define CID_PKG_SHIFT 20 /**< Package Option shift */
+#define CID_CC_MASK 0x0f000000 /**< CoreCount (corerev >= 4) */
#define CID_CC_SHIFT 24
-#define CID_TYPE_MASK 0xf0000000 /* Chip Type */
+#define CID_TYPE_MASK 0xf0000000 /**< Chip Type */
#define CID_TYPE_SHIFT 28
/* capabilities */
-#define CC_CAP_UARTS_MASK 0x00000003 /* Number of UARTs */
-#define CC_CAP_MIPSEB 0x00000004 /* MIPS is in big-endian mode */
-#define CC_CAP_UCLKSEL 0x00000018 /* UARTs clock select */
-#define CC_CAP_UINTCLK 0x00000008 /* UARTs are driven by internal divided clock */
-#define CC_CAP_UARTGPIO 0x00000020 /* UARTs own GPIOs 15:12 */
-#define CC_CAP_EXTBUS_MASK 0x000000c0 /* External bus mask */
-#define CC_CAP_EXTBUS_NONE 0x00000000 /* No ExtBus present */
-#define CC_CAP_EXTBUS_FULL 0x00000040 /* ExtBus: PCMCIA, IDE & Prog */
-#define CC_CAP_EXTBUS_PROG 0x00000080 /* ExtBus: ProgIf only */
-#define CC_CAP_FLASH_MASK 0x00000700 /* Type of flash */
-#define CC_CAP_PLL_MASK 0x00038000 /* Type of PLL */
-#define CC_CAP_PWR_CTL 0x00040000 /* Power control */
-#define CC_CAP_OTPSIZE 0x00380000 /* OTP Size (0 = none) */
-#define CC_CAP_OTPSIZE_SHIFT 19 /* OTP Size shift */
-#define CC_CAP_OTPSIZE_BASE 5 /* OTP Size base */
-#define CC_CAP_JTAGP 0x00400000 /* JTAG Master Present */
-#define CC_CAP_ROM 0x00800000 /* Internal boot rom active */
-#define CC_CAP_BKPLN64 0x08000000 /* 64-bit backplane */
-#define CC_CAP_PMU 0x10000000 /* PMU Present, rev >= 20 */
-#define CC_CAP_ECI 0x20000000 /* ECI Present, rev >= 21 */
-#define CC_CAP_SROM 0x40000000 /* Srom Present, rev >= 32 */
-#define CC_CAP_NFLASH 0x80000000 /* Nand flash present, rev >= 35 */
-
-#define CC_CAP2_SECI 0x00000001 /* SECI Present, rev >= 36 */
-#define CC_CAP2_GSIO 0x00000002 /* GSIO (spi/i2c) present, rev >= 37 */
+#define CC_CAP_UARTS_MASK 0x00000003 /**< Number of UARTs */
+#define CC_CAP_MIPSEB 0x00000004 /**< MIPS is in big-endian mode */
+#define CC_CAP_UCLKSEL 0x00000018 /**< UARTs clock select */
+#define CC_CAP_UINTCLK 0x00000008 /**< UARTs are driven by internal divided clock */
+#define CC_CAP_UARTGPIO 0x00000020 /**< UARTs own GPIOs 15:12 */
+#define CC_CAP_EXTBUS_MASK 0x000000c0 /**< External bus mask */
+#define CC_CAP_EXTBUS_NONE 0x00000000 /**< No ExtBus present */
+#define CC_CAP_EXTBUS_FULL 0x00000040 /**< ExtBus: PCMCIA, IDE & Prog */
+#define CC_CAP_EXTBUS_PROG 0x00000080 /**< ExtBus: ProgIf only */
+#define CC_CAP_FLASH_MASK 0x00000700 /**< Type of flash */
+#define CC_CAP_PLL_MASK 0x00038000 /**< Type of PLL */
+#define CC_CAP_PWR_CTL 0x00040000 /**< Power control */
+#define CC_CAP_OTPSIZE 0x00380000 /**< OTP Size (0 = none) */
+#define CC_CAP_OTPSIZE_SHIFT 19 /**< OTP Size shift */
+#define CC_CAP_OTPSIZE_BASE 5 /**< OTP Size base */
+#define CC_CAP_JTAGP 0x00400000 /**< JTAG Master Present */
+#define CC_CAP_ROM 0x00800000 /**< Internal boot rom active */
+#define CC_CAP_BKPLN64 0x08000000 /**< 64-bit backplane */
+#define CC_CAP_PMU 0x10000000 /**< PMU Present, rev >= 20 */
+#define CC_CAP_ECI 0x20000000 /**< ECI Present, rev >= 21 */
+#define CC_CAP_SROM 0x40000000 /**< Srom Present, rev >= 32 */
+#define CC_CAP_NFLASH 0x80000000 /**< Nand flash present, rev >= 35 */
+
+#define CC_CAP2_SECI 0x00000001 /**< SECI Present, rev >= 36 */
+#define CC_CAP2_GSIO 0x00000002 /**< GSIO (spi/i2c) present, rev >= 37 */
/* capabilities extension */
-#define CC_CAP_EXT_SECI_PRESENT 0x00000001 /* SECI present */
-#define CC_CAP_EXT_GSIO_PRESENT 0x00000002 /* GSIO present */
-#define CC_CAP_EXT_GCI_PRESENT 0x00000004 /* GCI present */
-#define CC_CAP_EXT_AOB_PRESENT 0x00000040 /* AOB present */
+#define CC_CAP_EXT_SECI_PRESENT 0x00000001 /**< SECI present */
+#define CC_CAP_EXT_GSIO_PRESENT 0x00000002 /**< GSIO present */
+#define CC_CAP_EXT_GCI_PRESENT 0x00000004 /**< GCI present */
+#define CC_CAP_EXT_AOB_PRESENT 0x00000040 /**< AOB present */
+#define CC_CAP_EXT_SWD_PRESENT 0x00000400 /**< SWD present */
/* WL Channel Info to BT via GCI - bits 40 - 47 */
-#define GCI_WL_CHN_INFO_MASK (0xFF00)
+#define GCI_WL_CHN_INFO_MASK (0xFF00)
+/* bits [51:48] - reserved for wlan TX pwr index */
+/* bits [55:52] btc mode indication */
+#define GCI_WL_BTC_MODE_SHIFT (20)
+#define GCI_WL_BTC_MODE_MASK (0xF << GCI_WL_BTC_MODE_SHIFT)
+#define GCI_WL_ANT_BIT_MASK (0x00c0)
+#define GCI_WL_ANT_SHIFT_BITS (6)
/* PLL type */
#define PLL_NONE 0x00000000
-#define PLL_TYPE1 0x00010000 /* 48MHz base, 3 dividers */
-#define PLL_TYPE2 0x00020000 /* 48MHz, 4 dividers */
-#define PLL_TYPE3 0x00030000 /* 25MHz, 2 dividers */
-#define PLL_TYPE4 0x00008000 /* 48MHz, 4 dividers */
-#define PLL_TYPE5 0x00018000 /* 25MHz, 4 dividers */
-#define PLL_TYPE6 0x00028000 /* 100/200 or 120/240 only */
-#define PLL_TYPE7 0x00038000 /* 25MHz, 4 dividers */
+#define PLL_TYPE1 0x00010000 /**< 48MHz base, 3 dividers */
+#define PLL_TYPE2 0x00020000 /**< 48MHz, 4 dividers */
+#define PLL_TYPE3 0x00030000 /**< 25MHz, 2 dividers */
+#define PLL_TYPE4 0x00008000 /**< 48MHz, 4 dividers */
+#define PLL_TYPE5 0x00018000 /**< 25MHz, 4 dividers */
+#define PLL_TYPE6 0x00028000 /**< 100/200 or 120/240 only */
+#define PLL_TYPE7 0x00038000 /**< 25MHz, 4 dividers */
/* ILP clock */
#define ILP_CLOCK 32000
#define HT_CLOCK 80000000
/* corecontrol */
-#define CC_UARTCLKO 0x00000001 /* Drive UART with internal clock */
-#define CC_SE 0x00000002 /* sync clk out enable (corerev >= 3) */
-#define CC_ASYNCGPIO 0x00000004 /* 1=generate GPIO interrupt without backplane clock */
-#define CC_UARTCLKEN 0x00000008 /* enable UART Clock (corerev > = 21 */
+#define CC_UARTCLKO 0x00000001 /**< Drive UART with internal clock */
+#define CC_SE 0x00000002 /**< sync clk out enable (corerev >= 3) */
+#define CC_ASYNCGPIO 0x00000004 /**< 1=generate GPIO interrupt without backplane clock */
+#define CC_UARTCLKEN 0x00000008 /**< enable UART Clock (corerev > = 21 */
+
+/* retention_ctl */
+#define RCTL_MEM_RET_SLEEP_LOG_SHIFT 29
+#define RCTL_MEM_RET_SLEEP_LOG_MASK (1 << RCTL_MEM_RET_SLEEP_LOG_SHIFT)
/* 4321 chipcontrol */
#define CHIPCTRL_4321A0_DEFAULT 0x3a4
#define CHIPCTRL_4321A1_DEFAULT 0x0a4
-#define CHIPCTRL_4321_PLL_DOWN 0x800000 /* serdes PLL down override */
+#define CHIPCTRL_4321_PLL_DOWN 0x800000 /**< serdes PLL down override */
/* Fields in the otpstatus register in rev >= 21 */
#define OTPS_OL_MASK 0x000000ff
-#define OTPS_OL_MFG 0x00000001 /* manuf row is locked */
-#define OTPS_OL_OR1 0x00000002 /* otp redundancy row 1 is locked */
-#define OTPS_OL_OR2 0x00000004 /* otp redundancy row 2 is locked */
-#define OTPS_OL_GU 0x00000008 /* general use region is locked */
+#define OTPS_OL_MFG 0x00000001 /**< manuf row is locked */
+#define OTPS_OL_OR1 0x00000002 /**< otp redundancy row 1 is locked */
+#define OTPS_OL_OR2 0x00000004 /**< otp redundancy row 2 is locked */
+#define OTPS_OL_GU 0x00000008 /**< general use region is locked */
#define OTPS_GUP_MASK 0x00000f00
#define OTPS_GUP_SHIFT 8
-#define OTPS_GUP_HW 0x00000100 /* h/w subregion is programmed */
-#define OTPS_GUP_SW 0x00000200 /* s/w subregion is programmed */
-#define OTPS_GUP_CI 0x00000400 /* chipid/pkgopt subregion is programmed */
-#define OTPS_GUP_FUSE 0x00000800 /* fuse subregion is programmed */
+#define OTPS_GUP_HW 0x00000100 /**< h/w subregion is programmed */
+#define OTPS_GUP_SW 0x00000200 /**< s/w subregion is programmed */
+#define OTPS_GUP_CI 0x00000400 /**< chipid/pkgopt subregion is programmed */
+#define OTPS_GUP_FUSE 0x00000800 /**< fuse subregion is programmed */
#define OTPS_READY 0x00001000
-#define OTPS_RV(x) (1 << (16 + (x))) /* redundancy entry valid */
+#define OTPS_RV(x) (1 << (16 + (x))) /**< redundancy entry valid */
#define OTPS_RV_MASK 0x0fff0000
#define OTPS_PROGOK 0x40000000
/* Jtagm characteristics that appeared at a given corerev */
-#define JTAGM_CREV_OLD 10 /* Old command set, 16bit max IR */
-#define JTAGM_CREV_IRP 22 /* Able to do pause-ir */
-#define JTAGM_CREV_RTI 28 /* Able to do return-to-idle */
+#define JTAGM_CREV_OLD 10 /**< Old command set, 16bit max IR */
+#define JTAGM_CREV_IRP 22 /**< Able to do pause-ir */
+#define JTAGM_CREV_RTI 28 /**< Able to do return-to-idle */
/* jtagcmd */
#define JCMD_START 0x80000000
#define JCMD_BUSY 0x80000000
#define JCMD_STATE_MASK 0x60000000
-#define JCMD_STATE_TLR 0x00000000 /* Test-logic-reset */
-#define JCMD_STATE_PIR 0x20000000 /* Pause IR */
-#define JCMD_STATE_PDR 0x40000000 /* Pause DR */
-#define JCMD_STATE_RTI 0x60000000 /* Run-test-idle */
+#define JCMD_STATE_TLR 0x00000000 /**< Test-logic-reset */
+#define JCMD_STATE_PIR 0x20000000 /**< Pause IR */
+#define JCMD_STATE_PDR 0x40000000 /**< Pause DR */
+#define JCMD_STATE_RTI 0x60000000 /**< Run-test-idle */
#define JCMD0_ACC_MASK 0x0000f000
#define JCMD0_ACC_IRDR 0x00000000
#define JCMD0_ACC_DR 0x00001000
#define JCMD0_ACC_IRPDR 0x00004000
#define JCMD0_ACC_PDR 0x00005000
#define JCMD0_IRW_MASK 0x00000f00
-#define JCMD_ACC_MASK 0x000f0000 /* Changes for corerev 11 */
+#define JCMD_ACC_MASK 0x000f0000 /**< Changes for corerev 11 */
#define JCMD_ACC_IRDR 0x00000000
#define JCMD_ACC_DR 0x00010000
#define JCMD_ACC_IR 0x00020000
#define JCMD_ACC_IRPDR 0x00040000
#define JCMD_ACC_PDR 0x00050000
#define JCMD_ACC_PIR 0x00060000
-#define JCMD_ACC_IRDR_I 0x00070000 /* rev 28: return to run-test-idle */
-#define JCMD_ACC_DR_I 0x00080000 /* rev 28: return to run-test-idle */
+#define JCMD_ACC_IRDR_I 0x00070000 /**< rev 28: return to run-test-idle */
+#define JCMD_ACC_DR_I 0x00080000 /**< rev 28: return to run-test-idle */
#define JCMD_IRW_MASK 0x00001f00
#define JCMD_IRW_SHIFT 8
#define JCMD_DRW_MASK 0x0000003f
/* jtagctrl */
-#define JCTRL_FORCE_CLK 4 /* Force clock */
-#define JCTRL_EXT_EN 2 /* Enable external targets */
-#define JCTRL_EN 1 /* Enable Jtag master */
+#define JCTRL_FORCE_CLK 4 /**< Force clock */
+#define JCTRL_EXT_EN 2 /**< Enable external targets */
+#define JCTRL_EN 1 /**< Enable Jtag master */
+#define JCTRL_TAPSEL_BIT 0x00000008 /**< JtagMasterCtrl tap_sel bit */
-#define JCTRL_TAPSEL_BIT 0x00000008 /* JtagMasterCtrl tap_sel bit */
+/* swdmasterctrl */
+#define SWDCTRL_INT_EN 8 /**< Enable internal targets */
+#define SWDCTRL_FORCE_CLK 4 /**< Force clock */
+#define SWDCTRL_OVJTAG 2 /**< Enable shared SWD/JTAG pins */
+#define SWDCTRL_EN 1 /**< Enable Jtag master */
/* Fields in clkdiv */
-#define CLKD_SFLASH 0x0f000000
+#define CLKD_SFLASH 0x1f000000
#define CLKD_SFLASH_SHIFT 24
#define CLKD_OTP 0x000f0000
#define CLKD_OTP_SHIFT 16
#define CLKD_UART 0x000000ff
#define CLKD2_SROM 0x00000003
+#define CLKD2_SWD 0xf8000000
+#define CLKD2_SWD_SHIFT 27
/* intstatus/intmask */
-#define CI_GPIO 0x00000001 /* gpio intr */
-#define CI_EI 0x00000002 /* extif intr (corerev >= 3) */
-#define CI_TEMP 0x00000004 /* temp. ctrl intr (corerev >= 15) */
-#define CI_SIRQ 0x00000008 /* serial IRQ intr (corerev >= 15) */
-#define CI_ECI 0x00000010 /* eci intr (corerev >= 21) */
-#define CI_PMU 0x00000020 /* pmu intr (corerev >= 21) */
-#define CI_UART 0x00000040 /* uart intr (corerev >= 21) */
-#define CI_WDRESET 0x80000000 /* watchdog reset occurred */
+#define CI_GPIO 0x00000001 /**< gpio intr */
+#define CI_EI 0x00000002 /**< extif intr (corerev >= 3) */
+#define CI_TEMP 0x00000004 /**< temp. ctrl intr (corerev >= 15) */
+#define CI_SIRQ 0x00000008 /**< serial IRQ intr (corerev >= 15) */
+#define CI_ECI 0x00000010 /**< eci intr (corerev >= 21) */
+#define CI_PMU 0x00000020 /**< pmu intr (corerev >= 21) */
+#define CI_UART 0x00000040 /**< uart intr (corerev >= 21) */
+#define CI_WDRESET 0x80000000 /**< watchdog reset occurred */
/* slow_clk_ctl */
-#define SCC_SS_MASK 0x00000007 /* slow clock source mask */
-#define SCC_SS_LPO 0x00000000 /* source of slow clock is LPO */
-#define SCC_SS_XTAL 0x00000001 /* source of slow clock is crystal */
-#define SCC_SS_PCI 0x00000002 /* source of slow clock is PCI */
-#define SCC_LF 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */
-#define SCC_LP 0x00000400 /* LPOPowerDown, 1: LPO is disabled,
+#define SCC_SS_MASK 0x00000007 /**< slow clock source mask */
+#define SCC_SS_LPO 0x00000000 /**< source of slow clock is LPO */
+#define SCC_SS_XTAL 0x00000001 /**< source of slow clock is crystal */
+#define SCC_SS_PCI 0x00000002 /**< source of slow clock is PCI */
+#define SCC_LF 0x00000200 /**< LPOFreqSel, 1: 160Khz, 0: 32KHz */
+#define SCC_LP 0x00000400 /**< LPOPowerDown, 1: LPO is disabled,
* 0: LPO is enabled
*/
-#define SCC_FS 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock,
+#define SCC_FS 0x00000800 /**< ForceSlowClk, 1: sb/cores running on slow clock,
* 0: power logic control
*/
-#define SCC_IP 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors
+#define SCC_IP 0x00001000 /**< IgnorePllOffReq, 1/0: power logic ignores/honors
* PLL clock disable requests from core
*/
-#define SCC_XC 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't
+#define SCC_XC 0x00002000 /**< XtalControlEn, 1/0: power logic does/doesn't
* disable crystal when appropriate
*/
-#define SCC_XP 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */
-#define SCC_CD_MASK 0xffff0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */
+#define SCC_XP 0x00004000 /**< XtalPU (RO), 1/0: crystal running/disabled */
+#define SCC_CD_MASK 0xffff0000 /**< ClockDivider (SlowClk = 1/(4+divisor)) */
#define SCC_CD_SHIFT 16
/* system_clk_ctl */
-#define SYCC_IE 0x00000001 /* ILPen: Enable Idle Low Power */
-#define SYCC_AE 0x00000002 /* ALPen: Enable Active Low Power */
-#define SYCC_FP 0x00000004 /* ForcePLLOn */
-#define SYCC_AR 0x00000008 /* Force ALP (or HT if ALPen is not set */
-#define SYCC_HR 0x00000010 /* Force HT */
-#define SYCC_CD_MASK 0xffff0000 /* ClkDiv (ILP = 1/(4 * (divisor + 1)) */
+#define SYCC_IE 0x00000001 /**< ILPen: Enable Idle Low Power */
+#define SYCC_AE 0x00000002 /**< ALPen: Enable Active Low Power */
+#define SYCC_FP 0x00000004 /**< ForcePLLOn */
+#define SYCC_AR 0x00000008 /**< Force ALP (or HT if ALPen is not set */
+#define SYCC_HR 0x00000010 /**< Force HT */
+#define SYCC_CD_MASK 0xffff0000 /**< ClkDiv (ILP = 1/(4 * (divisor + 1)) */
#define SYCC_CD_SHIFT 16
/* Indirect backplane access */
#define BPIA_ERROR 0x00000400
/* pcmcia/prog/flash_config */
-#define CF_EN 0x00000001 /* enable */
-#define CF_EM_MASK 0x0000000e /* mode */
+#define CF_EN 0x00000001 /**< enable */
+#define CF_EM_MASK 0x0000000e /**< mode */
#define CF_EM_SHIFT 1
-#define CF_EM_FLASH 0 /* flash/asynchronous mode */
-#define CF_EM_SYNC 2 /* synchronous mode */
-#define CF_EM_PCMCIA 4 /* pcmcia mode */
-#define CF_DS 0x00000010 /* destsize: 0=8bit, 1=16bit */
-#define CF_BS 0x00000020 /* byteswap */
-#define CF_CD_MASK 0x000000c0 /* clock divider */
+#define CF_EM_FLASH 0 /**< flash/asynchronous mode */
+#define CF_EM_SYNC 2 /**< synchronous mode */
+#define CF_EM_PCMCIA 4 /**< pcmcia mode */
+#define CF_DS 0x00000010 /**< destsize: 0=8bit, 1=16bit */
+#define CF_BS 0x00000020 /**< byteswap */
+#define CF_CD_MASK 0x000000c0 /**< clock divider */
#define CF_CD_SHIFT 6
-#define CF_CD_DIV2 0x00000000 /* backplane/2 */
-#define CF_CD_DIV3 0x00000040 /* backplane/3 */
-#define CF_CD_DIV4 0x00000080 /* backplane/4 */
-#define CF_CE 0x00000100 /* clock enable */
-#define CF_SB 0x00000200 /* size/bytestrobe (synch only) */
+#define CF_CD_DIV2 0x00000000 /**< backplane/2 */
+#define CF_CD_DIV3 0x00000040 /**< backplane/3 */
+#define CF_CD_DIV4 0x00000080 /**< backplane/4 */
+#define CF_CE 0x00000100 /**< clock enable */
+#define CF_SB 0x00000200 /**< size/bytestrobe (synch only) */
/* pcmcia_memwait */
-#define PM_W0_MASK 0x0000003f /* waitcount0 */
-#define PM_W1_MASK 0x00001f00 /* waitcount1 */
+#define PM_W0_MASK 0x0000003f /**< waitcount0 */
+#define PM_W1_MASK 0x00001f00 /**< waitcount1 */
#define PM_W1_SHIFT 8
-#define PM_W2_MASK 0x001f0000 /* waitcount2 */
+#define PM_W2_MASK 0x001f0000 /**< waitcount2 */
#define PM_W2_SHIFT 16
-#define PM_W3_MASK 0x1f000000 /* waitcount3 */
+#define PM_W3_MASK 0x1f000000 /**< waitcount3 */
#define PM_W3_SHIFT 24
/* pcmcia_attrwait */
-#define PA_W0_MASK 0x0000003f /* waitcount0 */
-#define PA_W1_MASK 0x00001f00 /* waitcount1 */
+#define PA_W0_MASK 0x0000003f /**< waitcount0 */
+#define PA_W1_MASK 0x00001f00 /**< waitcount1 */
#define PA_W1_SHIFT 8
-#define PA_W2_MASK 0x001f0000 /* waitcount2 */
+#define PA_W2_MASK 0x001f0000 /**< waitcount2 */
#define PA_W2_SHIFT 16
-#define PA_W3_MASK 0x1f000000 /* waitcount3 */
+#define PA_W3_MASK 0x1f000000 /**< waitcount3 */
#define PA_W3_SHIFT 24
/* pcmcia_iowait */
-#define PI_W0_MASK 0x0000003f /* waitcount0 */
-#define PI_W1_MASK 0x00001f00 /* waitcount1 */
+#define PI_W0_MASK 0x0000003f /**< waitcount0 */
+#define PI_W1_MASK 0x00001f00 /**< waitcount1 */
#define PI_W1_SHIFT 8
-#define PI_W2_MASK 0x001f0000 /* waitcount2 */
+#define PI_W2_MASK 0x001f0000 /**< waitcount2 */
#define PI_W2_SHIFT 16
-#define PI_W3_MASK 0x1f000000 /* waitcount3 */
+#define PI_W3_MASK 0x1f000000 /**< waitcount3 */
#define PI_W3_SHIFT 24
/* prog_waitcount */
-#define PW_W0_MASK 0x0000001f /* waitcount0 */
-#define PW_W1_MASK 0x00001f00 /* waitcount1 */
+#define PW_W0_MASK 0x0000001f /**< waitcount0 */
+#define PW_W1_MASK 0x00001f00 /**< waitcount1 */
#define PW_W1_SHIFT 8
-#define PW_W2_MASK 0x001f0000 /* waitcount2 */
+#define PW_W2_MASK 0x001f0000 /**< waitcount2 */
#define PW_W2_SHIFT 16
-#define PW_W3_MASK 0x1f000000 /* waitcount3 */
+#define PW_W3_MASK 0x1f000000 /**< waitcount3 */
#define PW_W3_SHIFT 24
#define PW_W0 0x0000000c
#define PW_W3 0x01000000
/* flash_waitcount */
-#define FW_W0_MASK 0x0000003f /* waitcount0 */
-#define FW_W1_MASK 0x00001f00 /* waitcount1 */
+#define FW_W0_MASK 0x0000003f /**< waitcount0 */
+#define FW_W1_MASK 0x00001f00 /**< waitcount1 */
#define FW_W1_SHIFT 8
-#define FW_W2_MASK 0x001f0000 /* waitcount2 */
+#define FW_W2_MASK 0x001f0000 /**< waitcount2 */
#define FW_W2_SHIFT 16
-#define FW_W3_MASK 0x1f000000 /* waitcount3 */
+#define FW_W3_MASK 0x1f000000 /**< waitcount3 */
#define FW_W3_SHIFT 24
/* When Srom support present, fields in sromcontrol */
#define PCTL_ILP_DIV_MASK 0xffff0000
#define PCTL_ILP_DIV_SHIFT 16
#define PCTL_LQ_REQ_EN 0x00008000
-#define PCTL_PLL_PLLCTL_UPD 0x00000400 /* rev 2 */
-#define PCTL_NOILP_ON_WAIT 0x00000200 /* rev 1 */
+#define PCTL_PLL_PLLCTL_UPD 0x00000400 /**< rev 2 */
+#define PCTL_NOILP_ON_WAIT 0x00000200 /**< rev 1 */
#define PCTL_HT_REQ_EN 0x00000100
#define PCTL_ALP_REQ_EN 0x00000080
#define PCTL_XTALFREQ_MASK 0x0000007c
#define GPIO_ONTIME_SHIFT 16
/* clockcontrol_n */
-#define CN_N1_MASK 0x3f /* n1 control */
-#define CN_N2_MASK 0x3f00 /* n2 control */
+#define CN_N1_MASK 0x3f /**< n1 control */
+#define CN_N2_MASK 0x3f00 /**< n2 control */
#define CN_N2_SHIFT 8
-#define CN_PLLC_MASK 0xf0000 /* pll control */
+#define CN_PLLC_MASK 0xf0000 /**< pll control */
#define CN_PLLC_SHIFT 16
/* clockcontrol_sb/pci/uart */
-#define CC_M1_MASK 0x3f /* m1 control */
-#define CC_M2_MASK 0x3f00 /* m2 control */
+#define CC_M1_MASK 0x3f /**< m1 control */
+#define CC_M2_MASK 0x3f00 /**< m2 control */
#define CC_M2_SHIFT 8
-#define CC_M3_MASK 0x3f0000 /* m3 control */
+#define CC_M3_MASK 0x3f0000 /**< m3 control */
#define CC_M3_SHIFT 16
-#define CC_MC_MASK 0x1f000000 /* mux control */
+#define CC_MC_MASK 0x1f000000 /**< mux control */
#define CC_MC_SHIFT 24
/* N3M Clock control magic field values */
-#define CC_F6_2 0x02 /* A factor of 2 in */
-#define CC_F6_3 0x03 /* 6-bit fields like */
-#define CC_F6_4 0x05 /* N1, M1 or M3 */
+#define CC_F6_2 0x02 /**< A factor of 2 in */
+#define CC_F6_3 0x03 /**< 6-bit fields like */
+#define CC_F6_4 0x05 /**< N1, M1 or M3 */
#define CC_F6_5 0x09
#define CC_F6_6 0x11
#define CC_F6_7 0x21
-#define CC_F5_BIAS 5 /* 5-bit fields get this added */
+#define CC_F5_BIAS 5 /**< 5-bit fields get this added */
#define CC_MC_BYPASS 0x08
#define CC_MC_M1 0x04
#define CC_MC_M1M3 0x11
/* Type 2 Clock control magic field values */
-#define CC_T2_BIAS 2 /* n1, n2, m1 & m3 bias */
-#define CC_T2M2_BIAS 3 /* m2 bias */
+#define CC_T2_BIAS 2 /**< n1, n2, m1 & m3 bias */
+#define CC_T2M2_BIAS 3 /**< m2 bias */
#define CC_T2MC_M1BYP 1
#define CC_T2MC_M2BYP 2
#define CC_T2MC_M3BYP 4
/* Type 6 Clock control magic field values */
-#define CC_T6_MMASK 1 /* bits of interest in m */
-#define CC_T6_M0 120000000 /* sb clock for m = 0 */
-#define CC_T6_M1 100000000 /* sb clock for m = 1 */
+#define CC_T6_MMASK 1 /**< bits of interest in m */
+#define CC_T6_M0 120000000 /**< sb clock for m = 0 */
+#define CC_T6_M1 100000000 /**< sb clock for m = 1 */
#define SB2MIPS_T6(sb) (2 * (sb))
/* Common clock base */
-#define CC_CLOCK_BASE1 24000000 /* Half the clock freq */
-#define CC_CLOCK_BASE2 12500000 /* Alternate crystal on some PLLs */
+#define CC_CLOCK_BASE1 24000000 /**< Half the clock freq */
+#define CC_CLOCK_BASE2 12500000 /**< Alternate crystal on some PLLs */
/* Clock control values for 200MHz in 5350 */
#define CLKC_5350_N 0x0311
#define CLKC_5350_M 0x04020009
/* Flash types in the chipcommon capabilities register */
-#define FLASH_NONE 0x000 /* No flash */
-#define SFLASH_ST 0x100 /* ST serial flash */
-#define SFLASH_AT 0x200 /* Atmel serial flash */
+#define FLASH_NONE 0x000 /**< No flash */
+#define SFLASH_ST 0x100 /**< ST serial flash */
+#define SFLASH_AT 0x200 /**< Atmel serial flash */
#define NFLASH 0x300
-#define PFLASH 0x700 /* Parallel flash */
+#define PFLASH 0x700 /**< Parallel flash */
#define QSPIFLASH_ST 0x800
#define QSPIFLASH_AT 0x900
/* Bits in the ExtBus config registers */
-#define CC_CFG_EN 0x0001 /* Enable */
-#define CC_CFG_EM_MASK 0x000e /* Extif Mode */
-#define CC_CFG_EM_ASYNC 0x0000 /* Async/Parallel flash */
-#define CC_CFG_EM_SYNC 0x0002 /* Synchronous */
-#define CC_CFG_EM_PCMCIA 0x0004 /* PCMCIA */
-#define CC_CFG_EM_IDE 0x0006 /* IDE */
-#define CC_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */
-#define CC_CFG_CD_MASK 0x00e0 /* Sync: Clock divisor, rev >= 20 */
-#define CC_CFG_CE 0x0100 /* Sync: Clock enable, rev >= 20 */
-#define CC_CFG_SB 0x0200 /* Sync: Size/Bytestrobe, rev >= 20 */
-#define CC_CFG_IS 0x0400 /* Extif Sync Clk Select, rev >= 20 */
+#define CC_CFG_EN 0x0001 /**< Enable */
+#define CC_CFG_EM_MASK 0x000e /**< Extif Mode */
+#define CC_CFG_EM_ASYNC 0x0000 /**< Async/Parallel flash */
+#define CC_CFG_EM_SYNC 0x0002 /**< Synchronous */
+#define CC_CFG_EM_PCMCIA 0x0004 /**< PCMCIA */
+#define CC_CFG_EM_IDE 0x0006 /**< IDE */
+#define CC_CFG_DS 0x0010 /**< Data size, 0=8bit, 1=16bit */
+#define CC_CFG_CD_MASK 0x00e0 /**< Sync: Clock divisor, rev >= 20 */
+#define CC_CFG_CE 0x0100 /**< Sync: Clock enable, rev >= 20 */
+#define CC_CFG_SB 0x0200 /**< Sync: Size/Bytestrobe, rev >= 20 */
+#define CC_CFG_IS 0x0400 /**< Extif Sync Clk Select, rev >= 20 */
/* ExtBus address space */
-#define CC_EB_BASE 0x1a000000 /* Chipc ExtBus base address */
-#define CC_EB_PCMCIA_MEM 0x1a000000 /* PCMCIA 0 memory base address */
-#define CC_EB_PCMCIA_IO 0x1a200000 /* PCMCIA 0 I/O base address */
-#define CC_EB_PCMCIA_CFG 0x1a400000 /* PCMCIA 0 config base address */
-#define CC_EB_IDE 0x1a800000 /* IDE memory base */
-#define CC_EB_PCMCIA1_MEM 0x1a800000 /* PCMCIA 1 memory base address */
-#define CC_EB_PCMCIA1_IO 0x1aa00000 /* PCMCIA 1 I/O base address */
-#define CC_EB_PCMCIA1_CFG 0x1ac00000 /* PCMCIA 1 config base address */
-#define CC_EB_PROGIF 0x1b000000 /* ProgIF Async/Sync base address */
+#define CC_EB_BASE 0x1a000000 /**< Chipc ExtBus base address */
+#define CC_EB_PCMCIA_MEM 0x1a000000 /**< PCMCIA 0 memory base address */
+#define CC_EB_PCMCIA_IO 0x1a200000 /**< PCMCIA 0 I/O base address */
+#define CC_EB_PCMCIA_CFG 0x1a400000 /**< PCMCIA 0 config base address */
+#define CC_EB_IDE 0x1a800000 /**< IDE memory base */
+#define CC_EB_PCMCIA1_MEM 0x1a800000 /**< PCMCIA 1 memory base address */
+#define CC_EB_PCMCIA1_IO 0x1aa00000 /**< PCMCIA 1 I/O base address */
+#define CC_EB_PCMCIA1_CFG 0x1ac00000 /**< PCMCIA 1 config base address */
+#define CC_EB_PROGIF 0x1b000000 /**< ProgIF Async/Sync base address */
/* Start/busy bit in flashcontrol */
#define SFLASH_OPCODE 0x000000ff
#define SFLASH_ACTION 0x00000700
-#define SFLASH_CS_ACTIVE 0x00001000 /* Chip Select Active, rev >= 20 */
+#define SFLASH_CS_ACTIVE 0x00001000 /**< Chip Select Active, rev >= 20 */
#define SFLASH_START 0x80000000
#define SFLASH_BUSY SFLASH_START
/* flashcontrol action codes */
-#define SFLASH_ACT_OPONLY 0x0000 /* Issue opcode only */
-#define SFLASH_ACT_OP1D 0x0100 /* opcode + 1 data byte */
-#define SFLASH_ACT_OP3A 0x0200 /* opcode + 3 addr bytes */
-#define SFLASH_ACT_OP3A1D 0x0300 /* opcode + 3 addr & 1 data bytes */
-#define SFLASH_ACT_OP3A4D 0x0400 /* opcode + 3 addr & 4 data bytes */
-#define SFLASH_ACT_OP3A4X4D 0x0500 /* opcode + 3 addr, 4 don't care & 4 data bytes */
-#define SFLASH_ACT_OP3A1X4D 0x0700 /* opcode + 3 addr, 1 don't care & 4 data bytes */
+#define SFLASH_ACT_OPONLY 0x0000 /**< Issue opcode only */
+#define SFLASH_ACT_OP1D 0x0100 /**< opcode + 1 data byte */
+#define SFLASH_ACT_OP3A 0x0200 /**< opcode + 3 addr bytes */
+#define SFLASH_ACT_OP3A1D 0x0300 /**< opcode + 3 addr & 1 data bytes */
+#define SFLASH_ACT_OP3A4D 0x0400 /**< opcode + 3 addr & 4 data bytes */
+#define SFLASH_ACT_OP3A4X4D 0x0500 /**< opcode + 3 addr, 4 don't care & 4 data bytes */
+#define SFLASH_ACT_OP3A1X4D 0x0700 /**< opcode + 3 addr, 1 don't care & 4 data bytes */
/* flashcontrol action+opcodes for ST flashes */
-#define SFLASH_ST_WREN 0x0006 /* Write Enable */
-#define SFLASH_ST_WRDIS 0x0004 /* Write Disable */
-#define SFLASH_ST_RDSR 0x0105 /* Read Status Register */
-#define SFLASH_ST_WRSR 0x0101 /* Write Status Register */
-#define SFLASH_ST_READ 0x0303 /* Read Data Bytes */
-#define SFLASH_ST_PP 0x0302 /* Page Program */
-#define SFLASH_ST_SE 0x02d8 /* Sector Erase */
-#define SFLASH_ST_BE 0x00c7 /* Bulk Erase */
-#define SFLASH_ST_DP 0x00b9 /* Deep Power-down */
-#define SFLASH_ST_RES 0x03ab /* Read Electronic Signature */
-#define SFLASH_ST_CSA 0x1000 /* Keep chip select asserted */
-#define SFLASH_ST_SSE 0x0220 /* Sub-sector Erase */
-
-#define SFLASH_MXIC_RDID 0x0390 /* Read Manufacture ID */
-#define SFLASH_MXIC_MFID 0xc2 /* MXIC Manufacture ID */
+#define SFLASH_ST_WREN 0x0006 /**< Write Enable */
+#define SFLASH_ST_WRDIS 0x0004 /**< Write Disable */
+#define SFLASH_ST_RDSR 0x0105 /**< Read Status Register */
+#define SFLASH_ST_WRSR 0x0101 /**< Write Status Register */
+#define SFLASH_ST_READ 0x0303 /**< Read Data Bytes */
+#define SFLASH_ST_PP 0x0302 /**< Page Program */
+#define SFLASH_ST_SE 0x02d8 /**< Sector Erase */
+#define SFLASH_ST_BE 0x00c7 /**< Bulk Erase */
+#define SFLASH_ST_DP 0x00b9 /**< Deep Power-down */
+#define SFLASH_ST_RES 0x03ab /**< Read Electronic Signature */
+#define SFLASH_ST_CSA 0x1000 /**< Keep chip select asserted */
+#define SFLASH_ST_SSE 0x0220 /**< Sub-sector Erase */
+
+#define SFLASH_MXIC_RDID 0x0390 /**< Read Manufacture ID */
+#define SFLASH_MXIC_MFID 0xc2 /**< MXIC Manufacture ID */
/* Status register bits for ST flashes */
-#define SFLASH_ST_WIP 0x01 /* Write In Progress */
-#define SFLASH_ST_WEL 0x02 /* Write Enable Latch */
-#define SFLASH_ST_BP_MASK 0x1c /* Block Protect */
+#define SFLASH_ST_WIP 0x01 /**< Write In Progress */
+#define SFLASH_ST_WEL 0x02 /**< Write Enable Latch */
+#define SFLASH_ST_BP_MASK 0x1c /**< Block Protect */
#define SFLASH_ST_BP_SHIFT 2
-#define SFLASH_ST_SRWD 0x80 /* Status Register Write Disable */
+#define SFLASH_ST_SRWD 0x80 /**< Status Register Write Disable */
/* flashcontrol action+opcodes for Atmel flashes */
#define SFLASH_AT_READ 0x07e8
* a 8250, 16450, or 16550(A).
*/
-#define UART_RX 0 /* In: Receive buffer (DLAB=0) */
-#define UART_TX 0 /* Out: Transmit buffer (DLAB=0) */
-#define UART_DLL 0 /* Out: Divisor Latch Low (DLAB=1) */
-#define UART_IER 1 /* In/Out: Interrupt Enable Register (DLAB=0) */
-#define UART_DLM 1 /* Out: Divisor Latch High (DLAB=1) */
-#define UART_IIR 2 /* In: Interrupt Identity Register */
-#define UART_FCR 2 /* Out: FIFO Control Register */
-#define UART_LCR 3 /* Out: Line Control Register */
-#define UART_MCR 4 /* Out: Modem Control Register */
-#define UART_LSR 5 /* In: Line Status Register */
-#define UART_MSR 6 /* In: Modem Status Register */
-#define UART_SCR 7 /* I/O: Scratch Register */
-#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
-#define UART_LCR_WLEN8 0x03 /* Word length: 8 bits */
-#define UART_MCR_OUT2 0x08 /* MCR GPIO out 2 */
-#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */
-#define UART_LSR_RX_FIFO 0x80 /* Receive FIFO error */
-#define UART_LSR_TDHR 0x40 /* Data-hold-register empty */
-#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */
-#define UART_LSR_BREAK 0x10 /* Break interrupt */
-#define UART_LSR_FRAMING 0x08 /* Framing error */
-#define UART_LSR_PARITY 0x04 /* Parity error */
-#define UART_LSR_OVERRUN 0x02 /* Overrun error */
-#define UART_LSR_RXRDY 0x01 /* Receiver ready */
-#define UART_FCR_FIFO_ENABLE 1 /* FIFO control register bit controlling FIFO enable/disable */
+#define UART_RX 0 /**< In: Receive buffer (DLAB=0) */
+#define UART_TX 0 /**< Out: Transmit buffer (DLAB=0) */
+#define UART_DLL 0 /**< Out: Divisor Latch Low (DLAB=1) */
+#define UART_IER 1 /**< In/Out: Interrupt Enable Register (DLAB=0) */
+#define UART_DLM 1 /**< Out: Divisor Latch High (DLAB=1) */
+#define UART_IIR 2 /**< In: Interrupt Identity Register */
+#define UART_FCR 2 /**< Out: FIFO Control Register */
+#define UART_LCR 3 /**< Out: Line Control Register */
+#define UART_MCR 4 /**< Out: Modem Control Register */
+#define UART_LSR 5 /**< In: Line Status Register */
+#define UART_MSR 6 /**< In: Modem Status Register */
+#define UART_SCR 7 /**< I/O: Scratch Register */
+#define UART_LCR_DLAB 0x80 /**< Divisor latch access bit */
+#define UART_LCR_WLEN8 0x03 /**< Word length: 8 bits */
+#define UART_MCR_OUT2 0x08 /**< MCR GPIO out 2 */
+#define UART_MCR_LOOP 0x10 /**< Enable loopback test mode */
+#define UART_LSR_RX_FIFO 0x80 /**< Receive FIFO error */
+#define UART_LSR_TDHR 0x40 /**< Data-hold-register empty */
+#define UART_LSR_THRE 0x20 /**< Transmit-hold-register empty */
+#define UART_LSR_BREAK 0x10 /**< Break interrupt */
+#define UART_LSR_FRAMING 0x08 /**< Framing error */
+#define UART_LSR_PARITY 0x04 /**< Parity error */
+#define UART_LSR_OVERRUN 0x02 /**< Overrun error */
+#define UART_LSR_RXRDY 0x01 /**< Receiver ready */
+#define UART_FCR_FIFO_ENABLE 1 /**< FIFO control register bit controlling FIFO enable/disable */
/* Interrupt Identity Register (IIR) bits */
-#define UART_IIR_FIFO_MASK 0xc0 /* IIR FIFO disable/enabled mask */
-#define UART_IIR_INT_MASK 0xf /* IIR interrupt ID source */
-#define UART_IIR_MDM_CHG 0x0 /* Modem status changed */
-#define UART_IIR_NOINT 0x1 /* No interrupt pending */
-#define UART_IIR_THRE 0x2 /* THR empty */
-#define UART_IIR_RCVD_DATA 0x4 /* Received data available */
-#define UART_IIR_RCVR_STATUS 0x6 /* Receiver status */
-#define UART_IIR_CHAR_TIME 0xc /* Character time */
+#define UART_IIR_FIFO_MASK 0xc0 /**< IIR FIFO disable/enabled mask */
+#define UART_IIR_INT_MASK 0xf /**< IIR interrupt ID source */
+#define UART_IIR_MDM_CHG 0x0 /**< Modem status changed */
+#define UART_IIR_NOINT 0x1 /**< No interrupt pending */
+#define UART_IIR_THRE 0x2 /**< THR empty */
+#define UART_IIR_RCVD_DATA 0x4 /**< Received data available */
+#define UART_IIR_RCVR_STATUS 0x6 /**< Receiver status */
+#define UART_IIR_CHAR_TIME 0xc /**< Character time */
/* Interrupt Enable Register (IER) bits */
-#define UART_IER_PTIME 128 /* Programmable THRE Interrupt Mode Enable */
-#define UART_IER_EDSSI 8 /* enable modem status interrupt */
-#define UART_IER_ELSI 4 /* enable receiver line status interrupt */
-#define UART_IER_ETBEI 2 /* enable transmitter holding register empty interrupt */
-#define UART_IER_ERBFI 1 /* enable data available interrupt */
+#define UART_IER_PTIME 128 /**< Programmable THRE Interrupt Mode Enable */
+#define UART_IER_EDSSI 8 /**< enable modem status interrupt */
+#define UART_IER_ELSI 4 /**< enable receiver line status interrupt */
+#define UART_IER_ETBEI 2 /**< enable transmitter holding register empty interrupt */
+#define UART_IER_ERBFI 1 /**< enable data available interrupt */
/* pmustatus */
#define PST_SLOW_WR_PENDING 0x0400
#define PCAP_VC_SHIFT 21
#define PCAP_CC_MASK 0x1e000000
#define PCAP_CC_SHIFT 25
-#define PCAP5_PC_MASK 0x003e0000 /* PMU corerev >= 5 */
+#define PCAP5_PC_MASK 0x003e0000 /**< PMU corerev >= 5 */
#define PCAP5_PC_SHIFT 17
#define PCAP5_VC_MASK 0x07c00000
#define PCAP5_VC_SHIFT 22
#define PRRT_HT_REQ 0x2000
#define PRRT_HQ_REQ 0x4000
+/* PMU Int Control register bits */
+#define PMU_INTC_ALP_REQ 0x1
+#define PMU_INTC_HT_REQ 0x2
+#define PMU_INTC_HQ_REQ 0x4
+
/* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */
#define RSRC_INTR_MASK_TIMER_INT_0 1
#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080
#define PMU_CC1_SW_TYPE_RGMII 0x000000c0
+#define PMU_CC1_ENABLE_CLOSED_LOOP_MASK 0x00000080
+#define PMU_CC1_ENABLE_CLOSED_LOOP 0x00000000
+
/* PMU chip control2 register */
#define PMU_CHIPCTL2 2
#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON (1 << 18)
#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON (1 << 19)
#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON (1 << 20)
#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON (1 << 21)
+#define PMU_CC2_MASK_WL_DEV_WAKE (1 << 22)
+#define PMU_CC2_INV_GPIO_POLARITY_PMU_WAKE (1 << 25)
+
/* PMU chip control3 register */
#define PMU_CHIPCTL3 3
#define PMU_CC3_ENABLE_RF_SHIFT 22
#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23
+/* PMU chip control4 register */
+#define PMU_CHIPCTL4 4
+
+/* 53537 series moved switch_type and gmac_if_type to CC4 [15:14] and [13:12] */
+#define PMU_CC4_IF_TYPE_MASK 0x00003000
+#define PMU_CC4_IF_TYPE_RMII 0x00000000
+#define PMU_CC4_IF_TYPE_MII 0x00001000
+#define PMU_CC4_IF_TYPE_RGMII 0x00002000
+
+#define PMU_CC4_SW_TYPE_MASK 0x0000c000
+#define PMU_CC4_SW_TYPE_EPHY 0x00000000
+#define PMU_CC4_SW_TYPE_EPHYMII 0x00004000
+#define PMU_CC4_SW_TYPE_EPHYRMII 0x00008000
+#define PMU_CC4_SW_TYPE_RGMII 0x0000c000
+
/* PMU chip control5 register */
#define PMU_CHIPCTL5 5
#define PMU_CHIPCTL7 7
#define PMU_CC7_ENABLE_L2REFCLKPAD_PWRDWN (1 << 25)
#define PMU_CC7_ENABLE_MDIO_RESET_WAR (1 << 27)
+/* 53537 series have gmca1 gmac_if_type in cc7 [7:6](defalut 0b01) */
+#define PMU_CC7_IF_TYPE_MASK 0x000000c0
+#define PMU_CC7_IF_TYPE_RMII 0x00000000
+#define PMU_CC7_IF_TYPE_MII 0x00000040
+#define PMU_CC7_IF_TYPE_RGMII 0x00000080
/* PMU corerev and chip specific PLL controls.
#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000
#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17
#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1
-#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 /* recommended for 4319 */
+#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 /**< recommended for 4319 */
#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000
#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20
#define PMU1_PLL0_PLLCTL6 6
#define PMU1_PLL0_PLLCTL7 7
-
#define PMU1_PLL0_PLLCTL8 8
-#define PMU1_PLLCTL8_OPENLOOP_MASK 0x2
+
+#define PMU1_PLLCTL8_OPENLOOP_MASK (1 << 1)
+#define PMU_PLL4350_OPENLOOP_MASK (1 << 7)
/* PMU rev 2 control words */
#define PMU2_PHY_PLL_PLLCTL 4
/* 4706 PMU */
#define PMU4706_MAINPLL_PLL0 0
-#define PMU6_4706_PROCPLL_OFF 4 /* The CPU PLL */
+#define PMU6_4706_PROCPLL_OFF 4 /**< The CPU PLL */
#define PMU6_4706_PROC_P2DIV_MASK 0x000f0000
#define PMU6_4706_PROC_P2DIV_SHIFT 16
#define PMU6_4706_PROC_P1DIV_MASK 0x0000f000
#define PMU15_FREQTGT_480_DEFAULT 0x19AB1
#define PMU15_FREQTGT_492_DEFAULT 0x1A4F5
-#define PMU15_ARM_96MHZ 96000000 /* 96 Mhz */
-#define PMU15_ARM_98MHZ 98400000 /* 98.4 Mhz */
-#define PMU15_ARM_97MHZ 97000000 /* 97 Mhz */
+#define PMU15_ARM_96MHZ 96000000 /**< 96 Mhz */
+#define PMU15_ARM_98MHZ 98400000 /**< 98.4 Mhz */
+#define PMU15_ARM_97MHZ 97000000 /**< 97 Mhz */
#define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070
#define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000 /* I2C/SPI pins enable */
/* 5354 resources */
-#define RES5354_EXT_SWITCHER_PWM 0 /* 0x00001 */
-#define RES5354_BB_SWITCHER_PWM 1 /* 0x00002 */
-#define RES5354_BB_SWITCHER_BURST 2 /* 0x00004 */
-#define RES5354_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */
-#define RES5354_ILP_REQUEST 4 /* 0x00010 */
-#define RES5354_RADIO_SWITCHER_PWM 5 /* 0x00020 */
-#define RES5354_RADIO_SWITCHER_BURST 6 /* 0x00040 */
-#define RES5354_ROM_SWITCH 7 /* 0x00080 */
-#define RES5354_PA_REF_LDO 8 /* 0x00100 */
-#define RES5354_RADIO_LDO 9 /* 0x00200 */
-#define RES5354_AFE_LDO 10 /* 0x00400 */
-#define RES5354_PLL_LDO 11 /* 0x00800 */
-#define RES5354_BG_FILTBYP 12 /* 0x01000 */
-#define RES5354_TX_FILTBYP 13 /* 0x02000 */
-#define RES5354_RX_FILTBYP 14 /* 0x04000 */
-#define RES5354_XTAL_PU 15 /* 0x08000 */
-#define RES5354_XTAL_EN 16 /* 0x10000 */
-#define RES5354_BB_PLL_FILTBYP 17 /* 0x20000 */
-#define RES5354_RF_PLL_FILTBYP 18 /* 0x40000 */
-#define RES5354_BB_PLL_PU 19 /* 0x80000 */
+#define RES5354_EXT_SWITCHER_PWM 0 /**< 0x00001 */
+#define RES5354_BB_SWITCHER_PWM 1 /**< 0x00002 */
+#define RES5354_BB_SWITCHER_BURST 2 /**< 0x00004 */
+#define RES5354_BB_EXT_SWITCHER_BURST 3 /**< 0x00008 */
+#define RES5354_ILP_REQUEST 4 /**< 0x00010 */
+#define RES5354_RADIO_SWITCHER_PWM 5 /**< 0x00020 */
+#define RES5354_RADIO_SWITCHER_BURST 6 /**< 0x00040 */
+#define RES5354_ROM_SWITCH 7 /**< 0x00080 */
+#define RES5354_PA_REF_LDO 8 /**< 0x00100 */
+#define RES5354_RADIO_LDO 9 /**< 0x00200 */
+#define RES5354_AFE_LDO 10 /**< 0x00400 */
+#define RES5354_PLL_LDO 11 /**< 0x00800 */
+#define RES5354_BG_FILTBYP 12 /**< 0x01000 */
+#define RES5354_TX_FILTBYP 13 /**< 0x02000 */
+#define RES5354_RX_FILTBYP 14 /**< 0x04000 */
+#define RES5354_XTAL_PU 15 /**< 0x08000 */
+#define RES5354_XTAL_EN 16 /**< 0x10000 */
+#define RES5354_BB_PLL_FILTBYP 17 /**< 0x20000 */
+#define RES5354_RF_PLL_FILTBYP 18 /**< 0x40000 */
+#define RES5354_BB_PLL_PU 19 /**< 0x80000 */
/* 5357 Chip specific ChipControl register bits */
#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */
#define CCTRL43228_EXTPA_C1 (1<<9) /* core0 extPA in ChipControl 1, bit 1 */
/* 4328 resources */
-#define RES4328_EXT_SWITCHER_PWM 0 /* 0x00001 */
-#define RES4328_BB_SWITCHER_PWM 1 /* 0x00002 */
-#define RES4328_BB_SWITCHER_BURST 2 /* 0x00004 */
-#define RES4328_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */
-#define RES4328_ILP_REQUEST 4 /* 0x00010 */
-#define RES4328_RADIO_SWITCHER_PWM 5 /* 0x00020 */
-#define RES4328_RADIO_SWITCHER_BURST 6 /* 0x00040 */
-#define RES4328_ROM_SWITCH 7 /* 0x00080 */
-#define RES4328_PA_REF_LDO 8 /* 0x00100 */
-#define RES4328_RADIO_LDO 9 /* 0x00200 */
-#define RES4328_AFE_LDO 10 /* 0x00400 */
-#define RES4328_PLL_LDO 11 /* 0x00800 */
-#define RES4328_BG_FILTBYP 12 /* 0x01000 */
-#define RES4328_TX_FILTBYP 13 /* 0x02000 */
-#define RES4328_RX_FILTBYP 14 /* 0x04000 */
-#define RES4328_XTAL_PU 15 /* 0x08000 */
-#define RES4328_XTAL_EN 16 /* 0x10000 */
-#define RES4328_BB_PLL_FILTBYP 17 /* 0x20000 */
-#define RES4328_RF_PLL_FILTBYP 18 /* 0x40000 */
-#define RES4328_BB_PLL_PU 19 /* 0x80000 */
+#define RES4328_EXT_SWITCHER_PWM 0 /**< 0x00001 */
+#define RES4328_BB_SWITCHER_PWM 1 /**< 0x00002 */
+#define RES4328_BB_SWITCHER_BURST 2 /**< 0x00004 */
+#define RES4328_BB_EXT_SWITCHER_BURST 3 /**< 0x00008 */
+#define RES4328_ILP_REQUEST 4 /**< 0x00010 */
+#define RES4328_RADIO_SWITCHER_PWM 5 /**< 0x00020 */
+#define RES4328_RADIO_SWITCHER_BURST 6 /**< 0x00040 */
+#define RES4328_ROM_SWITCH 7 /**< 0x00080 */
+#define RES4328_PA_REF_LDO 8 /**< 0x00100 */
+#define RES4328_RADIO_LDO 9 /**< 0x00200 */
+#define RES4328_AFE_LDO 10 /**< 0x00400 */
+#define RES4328_PLL_LDO 11 /**< 0x00800 */
+#define RES4328_BG_FILTBYP 12 /**< 0x01000 */
+#define RES4328_TX_FILTBYP 13 /**< 0x02000 */
+#define RES4328_RX_FILTBYP 14 /**< 0x04000 */
+#define RES4328_XTAL_PU 15 /**< 0x08000 */
+#define RES4328_XTAL_EN 16 /**< 0x10000 */
+#define RES4328_BB_PLL_FILTBYP 17 /**< 0x20000 */
+#define RES4328_RF_PLL_FILTBYP 18 /**< 0x40000 */
+#define RES4328_BB_PLL_PU 19 /**< 0x80000 */
/* 4325 A0/A1 resources */
-#define RES4325_BUCK_BOOST_BURST 0 /* 0x00000001 */
-#define RES4325_CBUCK_BURST 1 /* 0x00000002 */
-#define RES4325_CBUCK_PWM 2 /* 0x00000004 */
-#define RES4325_CLDO_CBUCK_BURST 3 /* 0x00000008 */
-#define RES4325_CLDO_CBUCK_PWM 4 /* 0x00000010 */
-#define RES4325_BUCK_BOOST_PWM 5 /* 0x00000020 */
-#define RES4325_ILP_REQUEST 6 /* 0x00000040 */
-#define RES4325_ABUCK_BURST 7 /* 0x00000080 */
-#define RES4325_ABUCK_PWM 8 /* 0x00000100 */
-#define RES4325_LNLDO1_PU 9 /* 0x00000200 */
-#define RES4325_OTP_PU 10 /* 0x00000400 */
-#define RES4325_LNLDO3_PU 11 /* 0x00000800 */
-#define RES4325_LNLDO4_PU 12 /* 0x00001000 */
-#define RES4325_XTAL_PU 13 /* 0x00002000 */
-#define RES4325_ALP_AVAIL 14 /* 0x00004000 */
-#define RES4325_RX_PWRSW_PU 15 /* 0x00008000 */
-#define RES4325_TX_PWRSW_PU 16 /* 0x00010000 */
-#define RES4325_RFPLL_PWRSW_PU 17 /* 0x00020000 */
-#define RES4325_LOGEN_PWRSW_PU 18 /* 0x00040000 */
-#define RES4325_AFE_PWRSW_PU 19 /* 0x00080000 */
-#define RES4325_BBPLL_PWRSW_PU 20 /* 0x00100000 */
-#define RES4325_HT_AVAIL 21 /* 0x00200000 */
+#define RES4325_BUCK_BOOST_BURST 0 /**< 0x00000001 */
+#define RES4325_CBUCK_BURST 1 /**< 0x00000002 */
+#define RES4325_CBUCK_PWM 2 /**< 0x00000004 */
+#define RES4325_CLDO_CBUCK_BURST 3 /**< 0x00000008 */
+#define RES4325_CLDO_CBUCK_PWM 4 /**< 0x00000010 */
+#define RES4325_BUCK_BOOST_PWM 5 /**< 0x00000020 */
+#define RES4325_ILP_REQUEST 6 /**< 0x00000040 */
+#define RES4325_ABUCK_BURST 7 /**< 0x00000080 */
+#define RES4325_ABUCK_PWM 8 /**< 0x00000100 */
+#define RES4325_LNLDO1_PU 9 /**< 0x00000200 */
+#define RES4325_OTP_PU 10 /**< 0x00000400 */
+#define RES4325_LNLDO3_PU 11 /**< 0x00000800 */
+#define RES4325_LNLDO4_PU 12 /**< 0x00001000 */
+#define RES4325_XTAL_PU 13 /**< 0x00002000 */
+#define RES4325_ALP_AVAIL 14 /**< 0x00004000 */
+#define RES4325_RX_PWRSW_PU 15 /**< 0x00008000 */
+#define RES4325_TX_PWRSW_PU 16 /**< 0x00010000 */
+#define RES4325_RFPLL_PWRSW_PU 17 /**< 0x00020000 */
+#define RES4325_LOGEN_PWRSW_PU 18 /**< 0x00040000 */
+#define RES4325_AFE_PWRSW_PU 19 /**< 0x00080000 */
+#define RES4325_BBPLL_PWRSW_PU 20 /**< 0x00100000 */
+#define RES4325_HT_AVAIL 21 /**< 0x00200000 */
/* 4325 B0/C0 resources */
-#define RES4325B0_CBUCK_LPOM 1 /* 0x00000002 */
-#define RES4325B0_CBUCK_BURST 2 /* 0x00000004 */
-#define RES4325B0_CBUCK_PWM 3 /* 0x00000008 */
-#define RES4325B0_CLDO_PU 4 /* 0x00000010 */
+#define RES4325B0_CBUCK_LPOM 1 /**< 0x00000002 */
+#define RES4325B0_CBUCK_BURST 2 /**< 0x00000004 */
+#define RES4325B0_CBUCK_PWM 3 /**< 0x00000008 */
+#define RES4325B0_CLDO_PU 4 /**< 0x00000010 */
/* 4325 C1 resources */
-#define RES4325C1_LNLDO2_PU 12 /* 0x00001000 */
+#define RES4325C1_LNLDO2_PU 12 /**< 0x00001000 */
/* 4325 chip-specific ChipStatus register bits */
#define CST4325_SPROM_OTP_SEL_MASK 0x00000003
-#define CST4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
-#define CST4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
-#define CST4325_OTP_SEL 2 /* OTP is powered up, no SPROM */
-#define CST4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
+#define CST4325_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */
+#define CST4325_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */
+#define CST4325_OTP_SEL 2 /**< OTP is powered up, no SPROM */
+#define CST4325_OTP_PWRDN 3 /**< OTP is powered down, SPROM is present */
#define CST4325_SDIO_USB_MODE_MASK 0x00000004
#define CST4325_SDIO_USB_MODE_SHIFT 2
#define CST4325_RCAL_VALID_MASK 0x00000008
#define CST4325_RCAL_VALID_SHIFT 3
#define CST4325_RCAL_VALUE_MASK 0x000001f0
#define CST4325_RCAL_VALUE_SHIFT 4
-#define CST4325_PMUTOP_2B_MASK 0x00000200 /* 1 for 2b, 0 for to 2a */
+#define CST4325_PMUTOP_2B_MASK 0x00000200 /**< 1 for 2b, 0 for to 2a */
#define CST4325_PMUTOP_2B_SHIFT 9
-#define RES4329_RESERVED0 0 /* 0x00000001 */
-#define RES4329_CBUCK_LPOM 1 /* 0x00000002 */
-#define RES4329_CBUCK_BURST 2 /* 0x00000004 */
-#define RES4329_CBUCK_PWM 3 /* 0x00000008 */
-#define RES4329_CLDO_PU 4 /* 0x00000010 */
-#define RES4329_PALDO_PU 5 /* 0x00000020 */
-#define RES4329_ILP_REQUEST 6 /* 0x00000040 */
-#define RES4329_RESERVED7 7 /* 0x00000080 */
-#define RES4329_RESERVED8 8 /* 0x00000100 */
-#define RES4329_LNLDO1_PU 9 /* 0x00000200 */
-#define RES4329_OTP_PU 10 /* 0x00000400 */
-#define RES4329_RESERVED11 11 /* 0x00000800 */
-#define RES4329_LNLDO2_PU 12 /* 0x00001000 */
-#define RES4329_XTAL_PU 13 /* 0x00002000 */
-#define RES4329_ALP_AVAIL 14 /* 0x00004000 */
-#define RES4329_RX_PWRSW_PU 15 /* 0x00008000 */
-#define RES4329_TX_PWRSW_PU 16 /* 0x00010000 */
-#define RES4329_RFPLL_PWRSW_PU 17 /* 0x00020000 */
-#define RES4329_LOGEN_PWRSW_PU 18 /* 0x00040000 */
-#define RES4329_AFE_PWRSW_PU 19 /* 0x00080000 */
-#define RES4329_BBPLL_PWRSW_PU 20 /* 0x00100000 */
-#define RES4329_HT_AVAIL 21 /* 0x00200000 */
+#define RES4329_RESERVED0 0 /**< 0x00000001 */
+#define RES4329_CBUCK_LPOM 1 /**< 0x00000002 */
+#define RES4329_CBUCK_BURST 2 /**< 0x00000004 */
+#define RES4329_CBUCK_PWM 3 /**< 0x00000008 */
+#define RES4329_CLDO_PU 4 /**< 0x00000010 */
+#define RES4329_PALDO_PU 5 /**< 0x00000020 */
+#define RES4329_ILP_REQUEST 6 /**< 0x00000040 */
+#define RES4329_RESERVED7 7 /**< 0x00000080 */
+#define RES4329_RESERVED8 8 /**< 0x00000100 */
+#define RES4329_LNLDO1_PU 9 /**< 0x00000200 */
+#define RES4329_OTP_PU 10 /**< 0x00000400 */
+#define RES4329_RESERVED11 11 /**< 0x00000800 */
+#define RES4329_LNLDO2_PU 12 /**< 0x00001000 */
+#define RES4329_XTAL_PU 13 /**< 0x00002000 */
+#define RES4329_ALP_AVAIL 14 /**< 0x00004000 */
+#define RES4329_RX_PWRSW_PU 15 /**< 0x00008000 */
+#define RES4329_TX_PWRSW_PU 16 /**< 0x00010000 */
+#define RES4329_RFPLL_PWRSW_PU 17 /**< 0x00020000 */
+#define RES4329_LOGEN_PWRSW_PU 18 /**< 0x00040000 */
+#define RES4329_AFE_PWRSW_PU 19 /**< 0x00080000 */
+#define RES4329_BBPLL_PWRSW_PU 20 /**< 0x00100000 */
+#define RES4329_HT_AVAIL 21 /**< 0x00200000 */
#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
-#define CST4329_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
-#define CST4329_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
-#define CST4329_OTP_SEL 2 /* OTP is powered up, no SPROM */
-#define CST4329_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
+#define CST4329_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */
+#define CST4329_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */
+#define CST4329_OTP_SEL 2 /**< OTP is powered up, no SPROM */
+#define CST4329_OTP_PWRDN 3 /**< OTP is powered down, SPROM is present */
#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
#define CST4329_SPI_SDIO_MODE_SHIFT 2
/* 4312 chip-specific ChipStatus register bits */
#define CST4312_SPROM_OTP_SEL_MASK 0x00000003
-#define CST4312_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
-#define CST4312_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
-#define CST4312_OTP_SEL 2 /* OTP is powered up, no SPROM */
-#define CST4312_OTP_BAD 3 /* OTP is broken, SPROM is present */
+#define CST4312_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */
+#define CST4312_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */
+#define CST4312_OTP_SEL 2 /**< OTP is powered up, no SPROM */
+#define CST4312_OTP_BAD 3 /**< OTP is broken, SPROM is present */
/* 4312 resources (all PMU chips with little memory constraint) */
-#define RES4312_SWITCHER_BURST 0 /* 0x00000001 */
-#define RES4312_SWITCHER_PWM 1 /* 0x00000002 */
-#define RES4312_PA_REF_LDO 2 /* 0x00000004 */
-#define RES4312_CORE_LDO_BURST 3 /* 0x00000008 */
-#define RES4312_CORE_LDO_PWM 4 /* 0x00000010 */
-#define RES4312_RADIO_LDO 5 /* 0x00000020 */
-#define RES4312_ILP_REQUEST 6 /* 0x00000040 */
-#define RES4312_BG_FILTBYP 7 /* 0x00000080 */
-#define RES4312_TX_FILTBYP 8 /* 0x00000100 */
-#define RES4312_RX_FILTBYP 9 /* 0x00000200 */
-#define RES4312_XTAL_PU 10 /* 0x00000400 */
-#define RES4312_ALP_AVAIL 11 /* 0x00000800 */
-#define RES4312_BB_PLL_FILTBYP 12 /* 0x00001000 */
-#define RES4312_RF_PLL_FILTBYP 13 /* 0x00002000 */
-#define RES4312_HT_AVAIL 14 /* 0x00004000 */
+#define RES4312_SWITCHER_BURST 0 /**< 0x00000001 */
+#define RES4312_SWITCHER_PWM 1 /**< 0x00000002 */
+#define RES4312_PA_REF_LDO 2 /**< 0x00000004 */
+#define RES4312_CORE_LDO_BURST 3 /**< 0x00000008 */
+#define RES4312_CORE_LDO_PWM 4 /**< 0x00000010 */
+#define RES4312_RADIO_LDO 5 /**< 0x00000020 */
+#define RES4312_ILP_REQUEST 6 /**< 0x00000040 */
+#define RES4312_BG_FILTBYP 7 /**< 0x00000080 */
+#define RES4312_TX_FILTBYP 8 /**< 0x00000100 */
+#define RES4312_RX_FILTBYP 9 /**< 0x00000200 */
+#define RES4312_XTAL_PU 10 /**< 0x00000400 */
+#define RES4312_ALP_AVAIL 11 /**< 0x00000800 */
+#define RES4312_BB_PLL_FILTBYP 12 /**< 0x00001000 */
+#define RES4312_RF_PLL_FILTBYP 13 /**< 0x00002000 */
+#define RES4312_HT_AVAIL 14 /**< 0x00004000 */
/* 4322 resources */
#define RES4322_RF_LDO 0
#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020
#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0
#define CST4322_SPROM_OTP_SEL_SHIFT 6
-#define CST4322_NO_SPROM_OTP 0 /* no OTP, no SPROM */
-#define CST4322_SPROM_PRESENT 1 /* SPROM is present */
-#define CST4322_OTP_PRESENT 2 /* OTP is present */
+#define CST4322_NO_SPROM_OTP 0 /**< no OTP, no SPROM */
+#define CST4322_SPROM_PRESENT 1 /**< SPROM is present */
+#define CST4322_OTP_PRESENT 2 /**< OTP is present */
#define CST4322_PCI_OR_USB 0x00000100
#define CST4322_BOOT_MASK 0x00000600
#define CST4322_BOOT_SHIFT 9
-#define CST4322_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
-#define CST4322_BOOT_FROM_ROM 1 /* boot from ROM */
-#define CST4322_BOOT_FROM_FLASH 2 /* boot from FLASH */
+#define CST4322_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */
+#define CST4322_BOOT_FROM_ROM 1 /**< boot from ROM */
+#define CST4322_BOOT_FROM_FLASH 2 /**< boot from FLASH */
#define CST4322_BOOT_FROM_INVALID 3
#define CST4322_ILP_DIV_EN 0x00000800
#define CST4322_FLASH_TYPE_MASK 0x00001000
#define CST4322_FLASH_TYPE_SHIFT 12
-#define CST4322_FLASH_TYPE_SHIFT_ST 0 /* ST serial FLASH */
-#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 /* ATMEL flash */
+#define CST4322_FLASH_TYPE_SHIFT_ST 0 /**< ST serial FLASH */
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 /**< ATMEL flash */
#define CST4322_ARM_TAP_SEL 0x00002000
#define CST4322_RES_INIT_MODE_MASK 0x0000c000
#define CST4322_RES_INIT_MODE_SHIFT 14
-#define CST4322_RES_INIT_MODE_ILPAVAIL 0 /* resinitmode: ILP available */
-#define CST4322_RES_INIT_MODE_ILPREQ 1 /* resinitmode: ILP request */
-#define CST4322_RES_INIT_MODE_ALPAVAIL 2 /* resinitmode: ALP available */
-#define CST4322_RES_INIT_MODE_HTAVAIL 3 /* resinitmode: HT available */
+#define CST4322_RES_INIT_MODE_ILPAVAIL 0 /**< resinitmode: ILP available */
+#define CST4322_RES_INIT_MODE_ILPREQ 1 /**< resinitmode: ILP request */
+#define CST4322_RES_INIT_MODE_ALPAVAIL 2 /**< resinitmode: ALP available */
+#define CST4322_RES_INIT_MODE_HTAVAIL 3 /**< resinitmode: HT available */
#define CST4322_PCIPLLCLK_GATING 0x00010000
#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000
#define CST4322_PCI_CARDBUS_MODE 0x00040000
#define RES43236_HT_SI_AVAIL 5
/* 43236 chip-specific ChipControl register bits */
-#define CCTRL43236_BT_COEXIST (1<<0) /* 0 disable */
-#define CCTRL43236_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */
-#define CCTRL43236_EXT_LNA (1<<2) /* 0 disable */
-#define CCTRL43236_ANT_MUX_2o3 (1<<3) /* 2o3 mux, chipcontrol bit 3 */
-#define CCTRL43236_GSIO (1<<4) /* 0 disable */
+#define CCTRL43236_BT_COEXIST (1<<0) /**< 0 disable */
+#define CCTRL43236_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */
+#define CCTRL43236_EXT_LNA (1<<2) /**< 0 disable */
+#define CCTRL43236_ANT_MUX_2o3 (1<<3) /**< 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43236_GSIO (1<<4) /**< 0 disable */
/* 43236 Chip specific ChipStatus register bits */
#define CST43236_SFLASH_MASK 0x00000040
#define CST43236_OTP_SEL_MASK 0x00000080
#define CST43236_OTP_SEL_SHIFT 7
-#define CST43236_HSIC_MASK 0x00000100 /* USB/HSIC */
-#define CST43236_BP_CLK 0x00000200 /* 120/96Mbps */
+#define CST43236_HSIC_MASK 0x00000100 /**< USB/HSIC */
+#define CST43236_BP_CLK 0x00000200 /**< 120/96Mbps */
#define CST43236_BOOT_MASK 0x00001800
#define CST43236_BOOT_SHIFT 11
-#define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
-#define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */
-#define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */
+#define CST43236_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */
+#define CST43236_BOOT_FROM_ROM 1 /**< boot from ROM */
+#define CST43236_BOOT_FROM_FLASH 2 /**< boot from FLASH */
#define CST43236_BOOT_FROM_INVALID 3
/* 43237 resources */
#define RES43237_HT_SI_AVAIL 5
/* 43237 chip-specific ChipControl register bits */
-#define CCTRL43237_BT_COEXIST (1<<0) /* 0 disable */
-#define CCTRL43237_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */
-#define CCTRL43237_EXT_LNA (1<<2) /* 0 disable */
-#define CCTRL43237_ANT_MUX_2o3 (1<<3) /* 2o3 mux, chipcontrol bit 3 */
-#define CCTRL43237_GSIO (1<<4) /* 0 disable */
+#define CCTRL43237_BT_COEXIST (1<<0) /**< 0 disable */
+#define CCTRL43237_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */
+#define CCTRL43237_EXT_LNA (1<<2) /**< 0 disable */
+#define CCTRL43237_ANT_MUX_2o3 (1<<3) /**< 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43237_GSIO (1<<4) /**< 0 disable */
/* 43237 Chip specific ChipStatus register bits */
#define CST43237_SFLASH_MASK 0x00000040
#define CST43237_OTP_SEL_MASK 0x00000080
#define CST43237_OTP_SEL_SHIFT 7
-#define CST43237_HSIC_MASK 0x00000100 /* USB/HSIC */
-#define CST43237_BP_CLK 0x00000200 /* 120/96Mbps */
+#define CST43237_HSIC_MASK 0x00000100 /**< USB/HSIC */
+#define CST43237_BP_CLK 0x00000200 /**< 120/96Mbps */
#define CST43237_BOOT_MASK 0x00001800
#define CST43237_BOOT_SHIFT 11
-#define CST43237_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
-#define CST43237_BOOT_FROM_ROM 1 /* boot from ROM */
-#define CST43237_BOOT_FROM_FLASH 2 /* boot from FLASH */
+#define CST43237_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */
+#define CST43237_BOOT_FROM_ROM 1 /**< boot from ROM */
+#define CST43237_BOOT_FROM_FLASH 2 /**< boot from FLASH */
#define CST43237_BOOT_FROM_INVALID 3
/* 43239 resources */
#define CST43239_SFLASH_MASK 0x00000004
#define CST43239_RES_INIT_MODE_SHIFT 7
#define CST43239_RES_INIT_MODE_MASK 0x000001f0
-#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15)) /* SDIO || gSPI */
-#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15)) /* USB || USBDA */
-#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0) /* SDIO */
-#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0)) /* gSPI */
+#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15)) /**< SDIO || gSPI */
+#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15)) /**< USB || USBDA */
+#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0) /**< SDIO */
+#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0)) /**< gSPI */
/* 4324 resources */
/* 43242 use same PMU as 4324 */
#define CST4324_RES_INIT_MODE_SHIFT 10
#define CST4324_RES_INIT_MODE_MASK 0x00000c00
#define CST4324_CHIPMODE_MASK 0x7
-#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2)) /* SDIO || gSPI */
-#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6) /* USB || USBDA */
+#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2)) /**< SDIO || gSPI */
+#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6) /**< USB || USBDA */
/* 43242 Chip specific ChipStatus register bits */
#define CST43242_SFLASH_MASK 0x00000008
#define RES4331_HT_SI_AVAIL 5
/* 4331 chip-specific ChipControl register bits */
-#define CCTRL4331_BT_COEXIST (1<<0) /* 0 disable */
-#define CCTRL4331_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */
-#define CCTRL4331_EXT_LNA_G (1<<2) /* 0 disable */
-#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /* sprom/gpio13-15 mux */
-#define CCTRL4331_EXTPA_EN (1<<4) /* 0 ext pa disable, 1 ext pa enabled */
-#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /* set drive out GPIO_CLK on sprom_cs pin */
-#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /* use sprom_cs pin as PCIE mdio interface */
+#define CCTRL4331_BT_COEXIST (1<<0) /**< 0 disable */
+#define CCTRL4331_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */
+#define CCTRL4331_EXT_LNA_G (1<<2) /**< 0 disable */
+#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /**< sprom/gpio13-15 mux */
+#define CCTRL4331_EXTPA_EN (1<<4) /**< 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /**< set drive out GPIO_CLK on sprom_cs pin */
+#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /**< use sprom_cs pin as PCIE mdio interface */
#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) /* aband extpa will be at gpio2/5 and sprom_dout */
-#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /* override core control on pipe_AuxClkEnable */
-#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /* override core control on pipe_AuxPowerDown */
-#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /* pcie_auxclkenable */
-#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /* pcie_pipe_pllpowerdown */
-#define CCTRL4331_EXTPA_EN2 (1<<12) /* 0 ext pa disable, 1 ext pa enabled */
-#define CCTRL4331_EXT_LNA_A (1<<13) /* 0 disable */
-#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /* enable bt_shd0 at gpio4 */
-#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /* enable bt_shd1 at gpio5 */
-#define CCTRL4331_EXTPA_ANA_EN (1<<24) /* 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /**< override core control on pipe_AuxClkEnable */
+#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /**< override core control on pipe_AuxPowerDown */
+#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /**< pcie_auxclkenable */
+#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /**< pcie_pipe_pllpowerdown */
+#define CCTRL4331_EXTPA_EN2 (1<<12) /**< 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_EXT_LNA_A (1<<13) /**< 0 disable */
+#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /**< enable bt_shd0 at gpio4 */
+#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /**< enable bt_shd1 at gpio5 */
+#define CCTRL4331_EXTPA_ANA_EN (1<<24) /**< 0 ext pa disable, 1 ext pa enabled */
/* 4331 Chip specific ChipStatus register bits */
-#define CST4331_XTAL_FREQ 0x00000001 /* crystal frequency 20/40Mhz */
+#define CST4331_XTAL_FREQ 0x00000001 /**< crystal frequency 20/40Mhz */
#define CST4331_SPROM_OTP_SEL_MASK 0x00000006
#define CST4331_SPROM_OTP_SEL_SHIFT 1
#define CST4331_SPROM_PRESENT 0x00000002
#define CST4331_LDO_PAR 0x00000010
/* 4315 resource */
-#define RES4315_CBUCK_LPOM 1 /* 0x00000002 */
-#define RES4315_CBUCK_BURST 2 /* 0x00000004 */
-#define RES4315_CBUCK_PWM 3 /* 0x00000008 */
-#define RES4315_CLDO_PU 4 /* 0x00000010 */
-#define RES4315_PALDO_PU 5 /* 0x00000020 */
-#define RES4315_ILP_REQUEST 6 /* 0x00000040 */
-#define RES4315_LNLDO1_PU 9 /* 0x00000200 */
-#define RES4315_OTP_PU 10 /* 0x00000400 */
-#define RES4315_LNLDO2_PU 12 /* 0x00001000 */
-#define RES4315_XTAL_PU 13 /* 0x00002000 */
-#define RES4315_ALP_AVAIL 14 /* 0x00004000 */
-#define RES4315_RX_PWRSW_PU 15 /* 0x00008000 */
-#define RES4315_TX_PWRSW_PU 16 /* 0x00010000 */
-#define RES4315_RFPLL_PWRSW_PU 17 /* 0x00020000 */
-#define RES4315_LOGEN_PWRSW_PU 18 /* 0x00040000 */
-#define RES4315_AFE_PWRSW_PU 19 /* 0x00080000 */
-#define RES4315_BBPLL_PWRSW_PU 20 /* 0x00100000 */
-#define RES4315_HT_AVAIL 21 /* 0x00200000 */
+#define RES4315_CBUCK_LPOM 1 /**< 0x00000002 */
+#define RES4315_CBUCK_BURST 2 /**< 0x00000004 */
+#define RES4315_CBUCK_PWM 3 /**< 0x00000008 */
+#define RES4315_CLDO_PU 4 /**< 0x00000010 */
+#define RES4315_PALDO_PU 5 /**< 0x00000020 */
+#define RES4315_ILP_REQUEST 6 /**< 0x00000040 */
+#define RES4315_LNLDO1_PU 9 /**< 0x00000200 */
+#define RES4315_OTP_PU 10 /**< 0x00000400 */
+#define RES4315_LNLDO2_PU 12 /**< 0x00001000 */
+#define RES4315_XTAL_PU 13 /**< 0x00002000 */
+#define RES4315_ALP_AVAIL 14 /**< 0x00004000 */
+#define RES4315_RX_PWRSW_PU 15 /**< 0x00008000 */
+#define RES4315_TX_PWRSW_PU 16 /**< 0x00010000 */
+#define RES4315_RFPLL_PWRSW_PU 17 /**< 0x00020000 */
+#define RES4315_LOGEN_PWRSW_PU 18 /**< 0x00040000 */
+#define RES4315_AFE_PWRSW_PU 19 /**< 0x00080000 */
+#define RES4315_BBPLL_PWRSW_PU 20 /**< 0x00100000 */
+#define RES4315_HT_AVAIL 21 /**< 0x00200000 */
/* 4315 chip-specific ChipStatus register bits */
-#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 /* gpio [7:6], SDIO CIS selection */
-#define CST4315_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */
-#define CST4315_SPROM_SEL 0x00000001 /* use SPROM, OTP is powered up */
-#define CST4315_OTP_SEL 0x00000002 /* use OTP, OTP is powered up */
-#define CST4315_OTP_PWRDN 0x00000003 /* use SPROM, OTP is powered down */
-#define CST4315_SDIO_MODE 0x00000004 /* gpio [8], sdio/usb mode */
+#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 /**< gpio [7:6], SDIO CIS selection */
+#define CST4315_DEFCIS_SEL 0x00000000 /**< use default CIS, OTP is powered up */
+#define CST4315_SPROM_SEL 0x00000001 /**< use SPROM, OTP is powered up */
+#define CST4315_OTP_SEL 0x00000002 /**< use OTP, OTP is powered up */
+#define CST4315_OTP_PWRDN 0x00000003 /**< use SPROM, OTP is powered down */
+#define CST4315_SDIO_MODE 0x00000004 /**< gpio [8], sdio/usb mode */
#define CST4315_RCAL_VALID 0x00000008
#define CST4315_RCAL_VALUE_MASK 0x000001f0
#define CST4315_RCAL_VALUE_SHIFT 4
-#define CST4315_PALDO_EXTPNP 0x00000200 /* PALDO is configured with external PNP */
+#define CST4315_PALDO_EXTPNP 0x00000200 /**< PALDO is configured with external PNP */
#define CST4315_CBUCK_MODE_MASK 0x00000c00
#define CST4315_CBUCK_MODE_BURST 0x00000400
#define CST4315_CBUCK_MODE_LPBURST 0x00000c00
/* 4319 resources */
-#define RES4319_CBUCK_LPOM 1 /* 0x00000002 */
-#define RES4319_CBUCK_BURST 2 /* 0x00000004 */
-#define RES4319_CBUCK_PWM 3 /* 0x00000008 */
-#define RES4319_CLDO_PU 4 /* 0x00000010 */
-#define RES4319_PALDO_PU 5 /* 0x00000020 */
-#define RES4319_ILP_REQUEST 6 /* 0x00000040 */
-#define RES4319_LNLDO1_PU 9 /* 0x00000200 */
-#define RES4319_OTP_PU 10 /* 0x00000400 */
-#define RES4319_LNLDO2_PU 12 /* 0x00001000 */
-#define RES4319_XTAL_PU 13 /* 0x00002000 */
-#define RES4319_ALP_AVAIL 14 /* 0x00004000 */
-#define RES4319_RX_PWRSW_PU 15 /* 0x00008000 */
-#define RES4319_TX_PWRSW_PU 16 /* 0x00010000 */
-#define RES4319_RFPLL_PWRSW_PU 17 /* 0x00020000 */
-#define RES4319_LOGEN_PWRSW_PU 18 /* 0x00040000 */
-#define RES4319_AFE_PWRSW_PU 19 /* 0x00080000 */
-#define RES4319_BBPLL_PWRSW_PU 20 /* 0x00100000 */
-#define RES4319_HT_AVAIL 21 /* 0x00200000 */
+#define RES4319_CBUCK_LPOM 1 /**< 0x00000002 */
+#define RES4319_CBUCK_BURST 2 /**< 0x00000004 */
+#define RES4319_CBUCK_PWM 3 /**< 0x00000008 */
+#define RES4319_CLDO_PU 4 /**< 0x00000010 */
+#define RES4319_PALDO_PU 5 /**< 0x00000020 */
+#define RES4319_ILP_REQUEST 6 /**< 0x00000040 */
+#define RES4319_LNLDO1_PU 9 /**< 0x00000200 */
+#define RES4319_OTP_PU 10 /**< 0x00000400 */
+#define RES4319_LNLDO2_PU 12 /**< 0x00001000 */
+#define RES4319_XTAL_PU 13 /**< 0x00002000 */
+#define RES4319_ALP_AVAIL 14 /**< 0x00004000 */
+#define RES4319_RX_PWRSW_PU 15 /**< 0x00008000 */
+#define RES4319_TX_PWRSW_PU 16 /**< 0x00010000 */
+#define RES4319_RFPLL_PWRSW_PU 17 /**< 0x00020000 */
+#define RES4319_LOGEN_PWRSW_PU 18 /**< 0x00040000 */
+#define RES4319_AFE_PWRSW_PU 19 /**< 0x00080000 */
+#define RES4319_BBPLL_PWRSW_PU 20 /**< 0x00100000 */
+#define RES4319_HT_AVAIL 21 /**< 0x00200000 */
/* 4319 chip-specific ChipStatus register bits */
#define CST4319_SPI_CPULESSUSB 0x00000001
#define CST4319_SPI_CLK_POL 0x00000002
#define CST4319_SPI_CLK_PH 0x00000008
-#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /* gpio [7:6], SDIO CIS selection */
+#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /**< gpio [7:6], SDIO CIS selection */
#define CST4319_SPROM_OTP_SEL_SHIFT 6
-#define CST4319_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */
-#define CST4319_SPROM_SEL 0x00000040 /* use SPROM, OTP is powered up */
+#define CST4319_DEFCIS_SEL 0x00000000 /**< use default CIS, OTP is powered up */
+#define CST4319_SPROM_SEL 0x00000040 /**< use SPROM, OTP is powered up */
#define CST4319_OTP_SEL 0x00000080 /* use OTP, OTP is powered up */
#define CST4319_OTP_PWRDN 0x000000c0 /* use SPROM, OTP is powered down */
-#define CST4319_SDIO_USB_MODE 0x00000100 /* gpio [8], sdio/usb mode */
+#define CST4319_SDIO_USB_MODE 0x00000100 /**< gpio [8], sdio/usb mode */
#define CST4319_REMAP_SEL_MASK 0x00000600
#define CST4319_ILPDIV_EN 0x00000800
#define CST4319_XTAL_PD_POL 0x00001000
#define CST4319_LPO_SEL 0x00002000
#define CST4319_RES_INIT_MODE 0x0000c000
-#define CST4319_PALDO_EXTPNP 0x00010000 /* PALDO is configured with external PNP */
+#define CST4319_PALDO_EXTPNP 0x00010000 /**< PALDO is configured with external PNP */
#define CST4319_CBUCK_MODE_MASK 0x00060000
#define CST4319_CBUCK_MODE_BURST 0x00020000
#define CST4319_CBUCK_MODE_LPBURST 0x00060000
#define RES4330_5g_LOGEN_PWRSW_PU 27
/* 4330 chip-specific ChipStatus register bits */
-#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /* SDIO || gSPI */
-#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /* USB || USBDA */
-#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /* SDIO */
-#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /* gSPI */
-#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /* USB packet-oriented */
-#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /* USB Direct Access */
+#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /**< SDIO || gSPI */
+#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /**< USB || USBDA */
+#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /**< SDIO */
+#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /**< gSPI */
+#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /**< USB packet-oriented */
+#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /**< USB Direct Access */
#define CST4330_OTP_PRESENT 0x00000010
#define CST4330_LPO_AUTODET_EN 0x00000020
#define CST4330_ARMREMAP_0 0x00000040
-#define CST4330_SPROM_PRESENT 0x00000080 /* takes priority over OTP if both set */
+#define CST4330_SPROM_PRESENT 0x00000080 /**< takes priority over OTP if both set */
#define CST4330_ILPDIV_EN 0x00000100
#define CST4330_LPO_SEL 0x00000200
#define CST4330_RES_INIT_MODE_SHIFT 10
#define PMU_VREG4_LPLDO2_1p15V 1
#define PMU_VREG4_LPLDO2_1p20V 2
#define PMU_VREG4_LPLDO2_1p10V 3
-#define PMU_VREG4_LPLDO2_0p90V 4 /* 4 - 7 is 0.90V */
+#define PMU_VREG4_LPLDO2_0p90V 4 /**< 4 - 7 is 0.90V */
#define PMU_VREG4_HSICLDO_BYPASS_SHIFT 27
#define PMU_VREG4_HSICLDO_BYPASS_MASK 0x1
#define CCTRL1_4334_ERCX_SEL (1 << 1) /* 1=select ERCX BT coex to be muxed out */
#define CCTRL1_4334_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */
#define CCTRL1_4334_JTAG_DISABLE (1 << 3) /* 1=disable JTAG interface on mux'd pins */
-#define CCTRL1_4334_UART_ON_4_5 (1 << 28) /* 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */
+#define CCTRL1_4334_UART_ON_4_5 (1 << 28) /**< 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */
/* 4324 Chip specific ChipControl1 register bits */
#define CCTRL1_4324_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */
/* 00: SECI is disabled (JATG functional), 01: 2 wire, 10: 4 wire */
#define CCTRL_43143_SECI (1<<0)
#define CCTRL_43143_BT_LEGACY (1<<1)
-#define CCTRL_43143_I2S_MODE (1<<2) /* 0: SDIO enabled */
-#define CCTRL_43143_I2S_MASTER (1<<3) /* 0: I2S MCLK input disabled */
-#define CCTRL_43143_I2S_FULL (1<<4) /* 0: I2S SDIN and SPDIF_TX inputs disabled */
-#define CCTRL_43143_GSIO (1<<5) /* 0: sFlash enabled */
-#define CCTRL_43143_RF_SWCTRL_MASK (7<<6) /* 0: disabled */
+#define CCTRL_43143_I2S_MODE (1<<2) /**< 0: SDIO enabled */
+#define CCTRL_43143_I2S_MASTER (1<<3) /**< 0: I2S MCLK input disabled */
+#define CCTRL_43143_I2S_FULL (1<<4) /**< 0: I2S SDIN and SPDIF_TX inputs disabled */
+#define CCTRL_43143_GSIO (1<<5) /**< 0: sFlash enabled */
+#define CCTRL_43143_RF_SWCTRL_MASK (7<<6) /**< 0: disabled */
#define CCTRL_43143_RF_SWCTRL_0 (1<<6)
#define CCTRL_43143_RF_SWCTRL_1 (2<<6)
#define CCTRL_43143_RF_SWCTRL_2 (4<<6)
-#define CCTRL_43143_RF_XSWCTRL (1<<9) /* 0: UART enabled */
-#define CCTRL_43143_HOST_WAKE0 (1<<11) /* 1: SDIO separate interrupt output from GPIO4 */
+#define CCTRL_43143_RF_XSWCTRL (1<<9) /**< 0: UART enabled */
+#define CCTRL_43143_HOST_WAKE0 (1<<11) /**< 1: SDIO separate interrupt output from GPIO4 */
#define CCTRL_43143_HOST_WAKE1 (1<<12) /* 1: SDIO separate interrupt output from GPIO16 */
/* 43143 resources, based on pmu_params.xls V1.19 */
-#define RES43143_EXT_SWITCHER_PWM 0 /* 0x00001 */
-#define RES43143_XTAL_PU 1 /* 0x00002 */
-#define RES43143_ILP_REQUEST 2 /* 0x00004 */
-#define RES43143_ALP_AVAIL 3 /* 0x00008 */
-#define RES43143_WL_CORE_READY 4 /* 0x00010 */
-#define RES43143_BBPLL_PWRSW_PU 5 /* 0x00020 */
-#define RES43143_HT_AVAIL 6 /* 0x00040 */
-#define RES43143_RADIO_PU 7 /* 0x00080 */
-#define RES43143_MACPHY_CLK_AVAIL 8 /* 0x00100 */
-#define RES43143_OTP_PU 9 /* 0x00200 */
-#define RES43143_LQ_AVAIL 10 /* 0x00400 */
+#define RES43143_EXT_SWITCHER_PWM 0 /**< 0x00001 */
+#define RES43143_XTAL_PU 1 /**< 0x00002 */
+#define RES43143_ILP_REQUEST 2 /**< 0x00004 */
+#define RES43143_ALP_AVAIL 3 /**< 0x00008 */
+#define RES43143_WL_CORE_READY 4 /**< 0x00010 */
+#define RES43143_BBPLL_PWRSW_PU 5 /**< 0x00020 */
+#define RES43143_HT_AVAIL 6 /**< 0x00040 */
+#define RES43143_RADIO_PU 7 /**< 0x00080 */
+#define RES43143_MACPHY_CLK_AVAIL 8 /**< 0x00100 */
+#define RES43143_OTP_PU 9 /**< 0x00200 */
+#define RES43143_LQ_AVAIL 10 /**< 0x00400 */
#define PMU43143_XTAL_CORE_SIZE_MASK 0x3F
/* 4706 flashstrconfig reg bits */
#define FLSTRCF4706_MASK 0x000000ff
-#define FLSTRCF4706_SF1 0x00000001 /* 2nd serial flash present */
-#define FLSTRCF4706_PF1 0x00000002 /* 2nd parallel flash present */
-#define FLSTRCF4706_SF1_TYPE 0x00000004 /* 2nd serial flash type : 0 : ST, 1 : Atmel */
-#define FLSTRCF4706_NF1 0x00000008 /* 2nd NAND flash present */
-#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0 /* Valid value mask */
-#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010 /* 4MB */
-#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020 /* 8MB */
-#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030 /* 16MB */
-#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040 /* 32MB */
-#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050 /* 64MB */
-#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060 /* 128MB */
-#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070 /* 256MB */
+#define FLSTRCF4706_SF1 0x00000001 /**< 2nd serial flash present */
+#define FLSTRCF4706_PF1 0x00000002 /**< 2nd parallel flash present */
+#define FLSTRCF4706_SF1_TYPE 0x00000004 /**< 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define FLSTRCF4706_NF1 0x00000008 /**< 2nd NAND flash present */
+#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0 /**< Valid value mask */
+#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010 /**< 4MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020 /**< 8MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030 /**< 16MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040 /**< 32MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050 /**< 64MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060 /**< 128MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070 /**< 256MB */
/* 4360 Chip specific ChipControl register bits */
#define CCTRL4360_I2C_MODE (1 << 0)
#define RES4360_BBPLLPWRSW_PU 6
#define RES4360_HT_AVAIL 7
#define RES4360_OTP_PU 8
+#define RES4360_AVB_PLL_PWRSW_PU 9
+#define RES4360_PCIE_TL_CLK_AVAIL 10
#define CST4360_XTAL_40MZ 0x00000001
#define CST4360_SFLASH 0x00000002
#define PMU43602_CC3_ARMCR4_DBG_CLK (1 << 29)
+/* 4365 PMU resources */
+#define RES4365_REGULATOR_PU 0
+#define RES4365_XTALLDO_PU 1
+#define RES4365_XTAL_PU 2
+#define RES4365_CPU_PLLLDO_PU 3
+#define RES4365_CPU_PLL_PU 4
+#define RES4365_WL_CORE_RDY 5
+#define RES4365_ILP_REQ 6
+#define RES4365_ALP_AVAIL 7
+#define RES4365_HT_AVAIL 8
+#define RES4365_BB_PLLLDO_PU 9
+#define RES4365_BB_PLL_PU 10
+#define RES4365_MINIMU_PU 11
+#define RES4365_RADIO_PU 12
+#define RES4365_MACPHY_CLK_AVAIL 13
+
/* 4349 related */
#define RES4349_LPLDO_PU 0
#define RES4349_BG_PU 1
#define RES4349_MACPHY_CLKAVAIL 30
#define CR4_4349_RAM_BASE (0x180000)
-#define CC4_4349_SR_ASM_ADDR (0x48)
+#define CR4_4349_RAM_BASE_FROM_REV_9 (0x160000)
+
+/* SR binary offset is at 8K */
+#define CC_SR1_4349_SR_ASM_ADDR (0x10)
#define CST4349_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */
#define CST4349_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */
#define CST4349_SPROM_PRESENT 0x00000010
+#define CC2_4349_VDDM_PWRSW_EN_MASK (1 << 20)
+#define CC2_4349_VDDM_PWRSW_EN_SHIFT (20)
+#define CC2_4349_SDIO_AOS_WAKEUP_MASK (1 << 24)
+#define CC2_4349_SDIO_AOS_WAKEUP_SHIFT (24)
+
+
+#define CC6_4349_PCIE_CLKREQ_WAKEUP_MASK (1 << 4)
+#define CC6_4349_PCIE_CLKREQ_WAKEUP_SHIFT (4)
+#define CC6_4349_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6)
+#define CC6_4349_PMU_WAKEUP_ALPAVAIL_SHIFT (6)
+#define CC6_4349_PMU_EN_EXT_PERST_MASK (1 << 13)
+#define CC6_4349_PMU_ENABLE_L2REFCLKPAD_PWRDWN (1 << 15)
+#define CC6_4349_PMU_EN_MDIO_MASK (1 << 16)
+#define CC6_4349_PMU_EN_ASSERT_L2_MASK (1 << 25)
+
+
/* 43430 PMU resources based on pmu_params.xls */
#define RES43430_LPLDO_PU 0
#define CST43430_TRIM_EN 0x00800000
#define CST43430_DIN_PACKAGE_OPTION 0x10000000
+#define PMU_MACCORE_0_RES_REQ_TIMER 0x19000000
+#define PMU_MACCORE_0_RES_REQ_MASK 0x5FF2364F
+
+#define PMU_MACCORE_1_RES_REQ_TIMER 0x19000000
+#define PMU_MACCORE_1_RES_REQ_MASK 0x5FF2364F
+
/* defines to detect active host interface in use */
#define CHIP_HOSTIF_PCIEMODE 0x1
#define CHIP_HOSTIF_USBMODE 0x2
#define CST4335_CHIPMODE_MASK 0xF
#define CST4335_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0) /* SDIO */
#define CST4335_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0) /* gSPI */
-#define CST4335_CHIPMODE_USB20D(cs) (((cs) & (1 << 2)) != 0) /* HSIC || USBDA */
+#define CST4335_CHIPMODE_USB20D(cs) (((cs) & (1 << 2)) != 0) /**< HSIC || USBDA */
#define CST4335_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0) /* PCIE */
/* 4335 Chip specific ChipControl1 register bits */
#define CR4_4350_RAM_BASE (0x180000)
#define CR4_4360_RAM_BASE (0x0)
#define CR4_43602_RAM_BASE (0x180000)
+#define CA7_4365_RAM_BASE (0x200000)
+
/* 4335 chip OTP present & OTP select bits. */
#define SPROM4335_OTP_SELECT 0x00000010
#define MUXENAB4350_UART_MASK (0x0000000f)
#define MUXENAB4350_UART_SHIFT 0
-#define MUXENAB4350_HOSTWAKE_MASK (0x000000f0) /* configure GPIO for SDIO host_wake */
+#define MUXENAB4350_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for SDIO host_wake */
#define MUXENAB4350_HOSTWAKE_SHIFT 4
#define CC_GCI_CHIPCTRL_06 (6)
#define CC_GCI_CHIPCTRL_07 (7)
#define CC_GCI_CHIPCTRL_08 (8)
+#define CC_GCI_CHIPCTRL_11 (11)
#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12)
#define CC_GCI_06_JTAG_SEL_SHIFT 4
#define CC4335_FNSEL_TRI (15)
/* GCI Core Control Reg */
-#define GCI_CORECTRL_SR_MASK (1 << 0) /* SECI block Reset */
-#define GCI_CORECTRL_RSL_MASK (1 << 1) /* ResetSECILogic */
-#define GCI_CORECTRL_ES_MASK (1 << 2) /* EnableSECI */
-#define GCI_CORECTRL_FSL_MASK (1 << 3) /* Force SECI Out Low */
-#define GCI_CORECTRL_SOM_MASK (7 << 4) /* SECI Op Mode */
-#define GCI_CORECTRL_US_MASK (1 << 7) /* Update SECI */
-#define GCI_CORECTRL_BOS_MASK (1 << 8) /* Break On Sleep */
+#define GCI_CORECTRL_SR_MASK (1 << 0) /**< SECI block Reset */
+#define GCI_CORECTRL_RSL_MASK (1 << 1) /**< ResetSECILogic */
+#define GCI_CORECTRL_ES_MASK (1 << 2) /**< EnableSECI */
+#define GCI_CORECTRL_FSL_MASK (1 << 3) /**< Force SECI Out Low */
+#define GCI_CORECTRL_SOM_MASK (7 << 4) /**< SECI Op Mode */
+#define GCI_CORECTRL_US_MASK (1 << 7) /**< Update SECI */
+#define GCI_CORECTRL_BOS_MASK (1 << 8) /**< Break On Sleep */
/* 4345 pins
* note: only the values set as default/used are added here.
/* 4335 GCI Intstatus(Mask)/WakeMask Register bits. */
-#define GCI_INTSTATUS_RBI (1 << 0) /* Rx Break Interrupt */
-#define GCI_INTSTATUS_UB (1 << 1) /* UART Break Interrupt */
-#define GCI_INTSTATUS_SPE (1 << 2) /* SECI Parity Error Interrupt */
-#define GCI_INTSTATUS_SFE (1 << 3) /* SECI Framing Error Interrupt */
-#define GCI_INTSTATUS_SRITI (1 << 9) /* SECI Rx Idle Timer Interrupt */
-#define GCI_INTSTATUS_STFF (1 << 10) /* SECI Tx FIFO Full Interrupt */
-#define GCI_INTSTATUS_STFAE (1 << 11) /* SECI Tx FIFO Almost Empty Intr */
-#define GCI_INTSTATUS_SRFAF (1 << 12) /* SECI Rx FIFO Almost Full */
-#define GCI_INTSTATUS_SRFNE (1 << 14) /* SECI Rx FIFO Not Empty */
-#define GCI_INTSTATUS_SRFOF (1 << 15) /* SECI Rx FIFO Not Empty Timeout */
-#define GCI_INTSTATUS_GPIOINT (1 << 25) /* GCIGpioInt */
-#define GCI_INTSTATUS_GPIOWAKE (1 << 26) /* GCIGpioWake */
+#define GCI_INTSTATUS_RBI (1 << 0) /**< Rx Break Interrupt */
+#define GCI_INTSTATUS_UB (1 << 1) /**< UART Break Interrupt */
+#define GCI_INTSTATUS_SPE (1 << 2) /**< SECI Parity Error Interrupt */
+#define GCI_INTSTATUS_SFE (1 << 3) /**< SECI Framing Error Interrupt */
+#define GCI_INTSTATUS_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */
+#define GCI_INTSTATUS_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */
+#define GCI_INTSTATUS_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTSTATUS_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */
+#define GCI_INTSTATUS_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */
+#define GCI_INTSTATUS_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTSTATUS_GPIOINT (1 << 25) /**< GCIGpioInt */
+#define GCI_INTSTATUS_GPIOWAKE (1 << 26) /**< GCIGpioWake */
/* 4335 GCI IntMask Register bits. */
-#define GCI_INTMASK_RBI (1 << 0) /* Rx Break Interrupt */
-#define GCI_INTMASK_UB (1 << 1) /* UART Break Interrupt */
-#define GCI_INTMASK_SPE (1 << 2) /* SECI Parity Error Interrupt */
-#define GCI_INTMASK_SFE (1 << 3) /* SECI Framing Error Interrupt */
-#define GCI_INTMASK_SRITI (1 << 9) /* SECI Rx Idle Timer Interrupt */
-#define GCI_INTMASK_STFF (1 << 10) /* SECI Tx FIFO Full Interrupt */
-#define GCI_INTMASK_STFAE (1 << 11) /* SECI Tx FIFO Almost Empty Intr */
-#define GCI_INTMASK_SRFAF (1 << 12) /* SECI Rx FIFO Almost Full */
-#define GCI_INTMASK_SRFNE (1 << 14) /* SECI Rx FIFO Not Empty */
-#define GCI_INTMASK_SRFOF (1 << 15) /* SECI Rx FIFO Not Empty Timeout */
-#define GCI_INTMASK_GPIOINT (1 << 25) /* GCIGpioInt */
-#define GCI_INTMASK_GPIOWAKE (1 << 26) /* GCIGpioWake */
+#define GCI_INTMASK_RBI (1 << 0) /**< Rx Break Interrupt */
+#define GCI_INTMASK_UB (1 << 1) /**< UART Break Interrupt */
+#define GCI_INTMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */
+#define GCI_INTMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */
+#define GCI_INTMASK_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */
+#define GCI_INTMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */
+#define GCI_INTMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */
+#define GCI_INTMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */
+#define GCI_INTMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTMASK_GPIOINT (1 << 25) /**< GCIGpioInt */
+#define GCI_INTMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */
/* 4335 GCI WakeMask Register bits. */
-#define GCI_WAKEMASK_RBI (1 << 0) /* Rx Break Interrupt */
-#define GCI_WAKEMASK_UB (1 << 1) /* UART Break Interrupt */
-#define GCI_WAKEMASK_SPE (1 << 2) /* SECI Parity Error Interrupt */
-#define GCI_WAKEMASK_SFE (1 << 3) /* SECI Framing Error Interrupt */
-#define GCI_WAKE_SRITI (1 << 9) /* SECI Rx Idle Timer Interrupt */
-#define GCI_WAKEMASK_STFF (1 << 10) /* SECI Tx FIFO Full Interrupt */
-#define GCI_WAKEMASK_STFAE (1 << 11) /* SECI Tx FIFO Almost Empty Intr */
-#define GCI_WAKEMASK_SRFAF (1 << 12) /* SECI Rx FIFO Almost Full */
-#define GCI_WAKEMASK_SRFNE (1 << 14) /* SECI Rx FIFO Not Empty */
-#define GCI_WAKEMASK_SRFOF (1 << 15) /* SECI Rx FIFO Not Empty Timeout */
-#define GCI_WAKEMASK_GPIOINT (1 << 25) /* GCIGpioInt */
-#define GCI_WAKEMASK_GPIOWAKE (1 << 26) /* GCIGpioWake */
+#define GCI_WAKEMASK_RBI (1 << 0) /**< Rx Break Interrupt */
+#define GCI_WAKEMASK_UB (1 << 1) /**< UART Break Interrupt */
+#define GCI_WAKEMASK_SPE (1 << 2) /**< SECI Parity Error Interrupt */
+#define GCI_WAKEMASK_SFE (1 << 3) /**< SECI Framing Error Interrupt */
+#define GCI_WAKE_SRITI (1 << 9) /**< SECI Rx Idle Timer Interrupt */
+#define GCI_WAKEMASK_STFF (1 << 10) /**< SECI Tx FIFO Full Interrupt */
+#define GCI_WAKEMASK_STFAE (1 << 11) /**< SECI Tx FIFO Almost Empty Intr */
+#define GCI_WAKEMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */
+#define GCI_WAKEMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */
+#define GCI_WAKEMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */
+#define GCI_WAKEMASK_GPIOINT (1 << 25) /**< GCIGpioInt */
+#define GCI_WAKEMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */
#define GCI_WAKE_ON_GCI_GPIO1 1
#define GCI_WAKE_ON_GCI_GPIO2 2
#define MUXENAB4335_UART_MASK (0x0000000f)
#define MUXENAB4335_UART_SHIFT 0
-#define MUXENAB4335_HOSTWAKE_MASK (0x000000f0) /* configure GPIO for SDIO host_wake */
+#define MUXENAB4335_HOSTWAKE_MASK (0x000000f0) /**< configure GPIO for SDIO host_wake */
#define MUXENAB4335_HOSTWAKE_SHIFT 4
#define MUXENAB4335_GETIX(val, name) \
((((val) & MUXENAB4335_ ## name ## _MASK) >> MUXENAB4335_ ## name ## _SHIFT) - 1)
/*
* Broadcom SiliconBackplane hardware register definitions.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: sbconfig.h 456346 2014-02-18 16:48:52Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbconfig.h 530150 2015-01-29 08:43:40Z $
*/
#ifndef _SBCONFIG_H
/* enumeration in SB is based on the premise that cores are contiguos in the
* enumeration space.
*/
-#define SB_BUS_SIZE 0x10000 /* Each bus gets 64Kbytes for cores */
+#define SB_BUS_SIZE 0x10000 /**< Each bus gets 64Kbytes for cores */
#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE)
-#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /* Max cores per bus */
+#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /**< Max cores per bus */
/*
* Sonics Configuration Space Registers.
*/
-#define SBCONFIGOFF 0xf00 /* core sbconfig regs are top 256bytes of regs */
-#define SBCONFIGSIZE 256 /* sizeof (sbconfig_t) */
+#define SBCONFIGOFF 0xf00 /**< core sbconfig regs are top 256bytes of regs */
+#define SBCONFIGSIZE 256 /**< sizeof (sbconfig_t) */
#define SBIPSFLAG 0x08
#define SBTPSFLAG 0x18
-#define SBTMERRLOGA 0x48 /* sonics >= 2.3 */
-#define SBTMERRLOG 0x50 /* sonics >= 2.3 */
+#define SBTMERRLOGA 0x48 /**< sonics >= 2.3 */
+#define SBTMERRLOG 0x50 /**< sonics >= 2.3 */
#define SBADMATCH3 0x60
#define SBADMATCH2 0x68
#define SBADMATCH1 0x70
typedef volatile struct _sbconfig {
uint32 PAD[2];
- uint32 sbipsflag; /* initiator port ocp slave flag */
+ uint32 sbipsflag; /**< initiator port ocp slave flag */
uint32 PAD[3];
- uint32 sbtpsflag; /* target port ocp slave flag */
+ uint32 sbtpsflag; /**< target port ocp slave flag */
uint32 PAD[11];
- uint32 sbtmerrloga; /* (sonics >= 2.3) */
+ uint32 sbtmerrloga; /**< (sonics >= 2.3) */
uint32 PAD;
- uint32 sbtmerrlog; /* (sonics >= 2.3) */
+ uint32 sbtmerrlog; /**< (sonics >= 2.3) */
uint32 PAD[3];
- uint32 sbadmatch3; /* address match3 */
+ uint32 sbadmatch3; /**< address match3 */
uint32 PAD;
- uint32 sbadmatch2; /* address match2 */
+ uint32 sbadmatch2; /**< address match2 */
uint32 PAD;
- uint32 sbadmatch1; /* address match1 */
+ uint32 sbadmatch1; /**< address match1 */
uint32 PAD[7];
- uint32 sbimstate; /* initiator agent state */
- uint32 sbintvec; /* interrupt mask */
- uint32 sbtmstatelow; /* target state */
- uint32 sbtmstatehigh; /* target state */
- uint32 sbbwa0; /* bandwidth allocation table0 */
+ uint32 sbimstate; /**< initiator agent state */
+ uint32 sbintvec; /**< interrupt mask */
+ uint32 sbtmstatelow; /**< target state */
+ uint32 sbtmstatehigh; /**< target state */
+ uint32 sbbwa0; /**< bandwidth allocation table0 */
uint32 PAD;
- uint32 sbimconfiglow; /* initiator configuration */
- uint32 sbimconfighigh; /* initiator configuration */
- uint32 sbadmatch0; /* address match0 */
+ uint32 sbimconfiglow; /**< initiator configuration */
+ uint32 sbimconfighigh; /**< initiator configuration */
+ uint32 sbadmatch0; /**< address match0 */
uint32 PAD;
- uint32 sbtmconfiglow; /* target configuration */
- uint32 sbtmconfighigh; /* target configuration */
- uint32 sbbconfig; /* broadcast configuration */
+ uint32 sbtmconfiglow; /**< target configuration */
+ uint32 sbtmconfighigh; /**< target configuration */
+ uint32 sbbconfig; /**< broadcast configuration */
uint32 PAD;
- uint32 sbbstate; /* broadcast state */
+ uint32 sbbstate; /**< broadcast state */
uint32 PAD[3];
- uint32 sbactcnfg; /* activate configuration */
+ uint32 sbactcnfg; /**< activate configuration */
uint32 PAD[3];
- uint32 sbflagst; /* current sbflags */
+ uint32 sbflagst; /**< current sbflags */
uint32 PAD[3];
- uint32 sbidlow; /* identification */
- uint32 sbidhigh; /* identification */
+ uint32 sbidlow; /**< identification */
+ uint32 sbidhigh; /**< identification */
} sbconfig_t;
#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
/* sbipsflag */
-#define SBIPS_INT1_MASK 0x3f /* which sbflags get routed to mips interrupt 1 */
+#define SBIPS_INT1_MASK 0x3f /**< which sbflags get routed to mips interrupt 1 */
#define SBIPS_INT1_SHIFT 0
-#define SBIPS_INT2_MASK 0x3f00 /* which sbflags get routed to mips interrupt 2 */
+#define SBIPS_INT2_MASK 0x3f00 /**< which sbflags get routed to mips interrupt 2 */
#define SBIPS_INT2_SHIFT 8
-#define SBIPS_INT3_MASK 0x3f0000 /* which sbflags get routed to mips interrupt 3 */
+#define SBIPS_INT3_MASK 0x3f0000 /**< which sbflags get routed to mips interrupt 3 */
#define SBIPS_INT3_SHIFT 16
-#define SBIPS_INT4_MASK 0x3f000000 /* which sbflags get routed to mips interrupt 4 */
+#define SBIPS_INT4_MASK 0x3f000000 /**< which sbflags get routed to mips interrupt 4 */
#define SBIPS_INT4_SHIFT 24
/* sbtpsflag */
-#define SBTPS_NUM0_MASK 0x3f /* interrupt sbFlag # generated by this core */
-#define SBTPS_F0EN0 0x40 /* interrupt is always sent on the backplane */
+#define SBTPS_NUM0_MASK 0x3f /**< interrupt sbFlag # generated by this core */
+#define SBTPS_F0EN0 0x40 /**< interrupt is always sent on the backplane */
/* sbtmerrlog */
-#define SBTMEL_CM 0x00000007 /* command */
-#define SBTMEL_CI 0x0000ff00 /* connection id */
-#define SBTMEL_EC 0x0f000000 /* error code */
-#define SBTMEL_ME 0x80000000 /* multiple error */
+#define SBTMEL_CM 0x00000007 /**< command */
+#define SBTMEL_CI 0x0000ff00 /**< connection id */
+#define SBTMEL_EC 0x0f000000 /**< error code */
+#define SBTMEL_ME 0x80000000 /**< multiple error */
/* sbimstate */
-#define SBIM_PC 0xf /* pipecount */
-#define SBIM_AP_MASK 0x30 /* arbitration policy */
-#define SBIM_AP_BOTH 0x00 /* use both timeslaces and token */
-#define SBIM_AP_TS 0x10 /* use timesliaces only */
-#define SBIM_AP_TK 0x20 /* use token only */
-#define SBIM_AP_RSV 0x30 /* reserved */
-#define SBIM_IBE 0x20000 /* inbanderror */
-#define SBIM_TO 0x40000 /* timeout */
-#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */
-#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */
+#define SBIM_PC 0xf /**< pipecount */
+#define SBIM_AP_MASK 0x30 /**< arbitration policy */
+#define SBIM_AP_BOTH 0x00 /**< use both timeslaces and token */
+#define SBIM_AP_TS 0x10 /**< use timesliaces only */
+#define SBIM_AP_TK 0x20 /**< use token only */
+#define SBIM_AP_RSV 0x30 /**< reserved */
+#define SBIM_IBE 0x20000 /**< inbanderror */
+#define SBIM_TO 0x40000 /**< timeout */
+#define SBIM_BY 0x01800000 /**< busy (sonics >= 2.3) */
+#define SBIM_RJ 0x02000000 /**< reject (sonics >= 2.3) */
/* sbtmstatelow */
-#define SBTML_RESET 0x0001 /* reset */
-#define SBTML_REJ_MASK 0x0006 /* reject field */
-#define SBTML_REJ 0x0002 /* reject */
-#define SBTML_TMPREJ 0x0004 /* temporary reject, for error recovery */
+#define SBTML_RESET 0x0001 /**< reset */
+#define SBTML_REJ_MASK 0x0006 /**< reject field */
+#define SBTML_REJ 0x0002 /**< reject */
+#define SBTML_TMPREJ 0x0004 /**< temporary reject, for error recovery */
-#define SBTML_SICF_SHIFT 16 /* Shift to locate the SI control flags in sbtml */
+#define SBTML_SICF_SHIFT 16 /**< Shift to locate the SI control flags in sbtml */
/* sbtmstatehigh */
-#define SBTMH_SERR 0x0001 /* serror */
-#define SBTMH_INT 0x0002 /* interrupt */
-#define SBTMH_BUSY 0x0004 /* busy */
-#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */
+#define SBTMH_SERR 0x0001 /**< serror */
+#define SBTMH_INT 0x0002 /**< interrupt */
+#define SBTMH_BUSY 0x0004 /**< busy */
+#define SBTMH_TO 0x0020 /**< timeout (sonics >= 2.3) */
-#define SBTMH_SISF_SHIFT 16 /* Shift to locate the SI status flags in sbtmh */
+#define SBTMH_SISF_SHIFT 16 /**< Shift to locate the SI status flags in sbtmh */
/* sbbwa0 */
-#define SBBWA_TAB0_MASK 0xffff /* lookup table 0 */
-#define SBBWA_TAB1_MASK 0xffff /* lookup table 1 */
+#define SBBWA_TAB0_MASK 0xffff /**< lookup table 0 */
+#define SBBWA_TAB1_MASK 0xffff /**< lookup table 1 */
#define SBBWA_TAB1_SHIFT 16
/* sbimconfiglow */
-#define SBIMCL_STO_MASK 0x7 /* service timeout */
-#define SBIMCL_RTO_MASK 0x70 /* request timeout */
+#define SBIMCL_STO_MASK 0x7 /**< service timeout */
+#define SBIMCL_RTO_MASK 0x70 /**< request timeout */
#define SBIMCL_RTO_SHIFT 4
-#define SBIMCL_CID_MASK 0xff0000 /* connection id */
+#define SBIMCL_CID_MASK 0xff0000 /**< connection id */
#define SBIMCL_CID_SHIFT 16
/* sbimconfighigh */
-#define SBIMCH_IEM_MASK 0xc /* inband error mode */
-#define SBIMCH_TEM_MASK 0x30 /* timeout error mode */
+#define SBIMCH_IEM_MASK 0xc /**< inband error mode */
+#define SBIMCH_TEM_MASK 0x30 /**< timeout error mode */
#define SBIMCH_TEM_SHIFT 4
-#define SBIMCH_BEM_MASK 0xc0 /* bus error mode */
+#define SBIMCH_BEM_MASK 0xc0 /**< bus error mode */
#define SBIMCH_BEM_SHIFT 6
/* sbadmatch0 */
-#define SBAM_TYPE_MASK 0x3 /* address type */
-#define SBAM_AD64 0x4 /* reserved */
-#define SBAM_ADINT0_MASK 0xf8 /* type0 size */
+#define SBAM_TYPE_MASK 0x3 /**< address type */
+#define SBAM_AD64 0x4 /**< reserved */
+#define SBAM_ADINT0_MASK 0xf8 /**< type0 size */
#define SBAM_ADINT0_SHIFT 3
-#define SBAM_ADINT1_MASK 0x1f8 /* type1 size */
+#define SBAM_ADINT1_MASK 0x1f8 /**< type1 size */
#define SBAM_ADINT1_SHIFT 3
-#define SBAM_ADINT2_MASK 0x1f8 /* type2 size */
+#define SBAM_ADINT2_MASK 0x1f8 /**< type2 size */
#define SBAM_ADINT2_SHIFT 3
-#define SBAM_ADEN 0x400 /* enable */
-#define SBAM_ADNEG 0x800 /* negative decode */
-#define SBAM_BASE0_MASK 0xffffff00 /* type0 base address */
+#define SBAM_ADEN 0x400 /**< enable */
+#define SBAM_ADNEG 0x800 /**< negative decode */
+#define SBAM_BASE0_MASK 0xffffff00 /**< type0 base address */
#define SBAM_BASE0_SHIFT 8
-#define SBAM_BASE1_MASK 0xfffff000 /* type1 base address for the core */
+#define SBAM_BASE1_MASK 0xfffff000 /**< type1 base address for the core */
#define SBAM_BASE1_SHIFT 12
-#define SBAM_BASE2_MASK 0xffff0000 /* type2 base address for the core */
+#define SBAM_BASE2_MASK 0xffff0000 /**< type2 base address for the core */
#define SBAM_BASE2_SHIFT 16
/* sbtmconfiglow */
-#define SBTMCL_CD_MASK 0xff /* clock divide */
-#define SBTMCL_CO_MASK 0xf800 /* clock offset */
+#define SBTMCL_CD_MASK 0xff /**< clock divide */
+#define SBTMCL_CO_MASK 0xf800 /**< clock offset */
#define SBTMCL_CO_SHIFT 11
-#define SBTMCL_IF_MASK 0xfc0000 /* interrupt flags */
+#define SBTMCL_IF_MASK 0xfc0000 /**< interrupt flags */
#define SBTMCL_IF_SHIFT 18
-#define SBTMCL_IM_MASK 0x3000000 /* interrupt mode */
+#define SBTMCL_IM_MASK 0x3000000 /**< interrupt mode */
#define SBTMCL_IM_SHIFT 24
/* sbtmconfighigh */
-#define SBTMCH_BM_MASK 0x3 /* busy mode */
-#define SBTMCH_RM_MASK 0x3 /* retry mode */
+#define SBTMCH_BM_MASK 0x3 /**< busy mode */
+#define SBTMCH_RM_MASK 0x3 /**< retry mode */
#define SBTMCH_RM_SHIFT 2
-#define SBTMCH_SM_MASK 0x30 /* stop mode */
+#define SBTMCH_SM_MASK 0x30 /**< stop mode */
#define SBTMCH_SM_SHIFT 4
-#define SBTMCH_EM_MASK 0x300 /* sb error mode */
+#define SBTMCH_EM_MASK 0x300 /**< sb error mode */
#define SBTMCH_EM_SHIFT 8
-#define SBTMCH_IM_MASK 0xc00 /* int mode */
+#define SBTMCH_IM_MASK 0xc00 /**< int mode */
#define SBTMCH_IM_SHIFT 10
/* sbbconfig */
-#define SBBC_LAT_MASK 0x3 /* sb latency */
-#define SBBC_MAX0_MASK 0xf0000 /* maxccntr0 */
+#define SBBC_LAT_MASK 0x3 /**< sb latency */
+#define SBBC_MAX0_MASK 0xf0000 /**< maxccntr0 */
#define SBBC_MAX0_SHIFT 16
-#define SBBC_MAX1_MASK 0xf00000 /* maxccntr1 */
+#define SBBC_MAX1_MASK 0xf00000 /**< maxccntr1 */
#define SBBC_MAX1_SHIFT 20
/* sbbstate */
-#define SBBS_SRD 0x1 /* st reg disable */
-#define SBBS_HRD 0x2 /* hold reg disable */
+#define SBBS_SRD 0x1 /**< st reg disable */
+#define SBBS_HRD 0x2 /**< hold reg disable */
/* sbidlow */
-#define SBIDL_CS_MASK 0x3 /* config space */
-#define SBIDL_AR_MASK 0x38 /* # address ranges supported */
+#define SBIDL_CS_MASK 0x3 /**< config space */
+#define SBIDL_AR_MASK 0x38 /**< # address ranges supported */
#define SBIDL_AR_SHIFT 3
-#define SBIDL_SYNCH 0x40 /* sync */
-#define SBIDL_INIT 0x80 /* initiator */
-#define SBIDL_MINLAT_MASK 0xf00 /* minimum backplane latency */
+#define SBIDL_SYNCH 0x40 /**< sync */
+#define SBIDL_INIT 0x80 /**< initiator */
+#define SBIDL_MINLAT_MASK 0xf00 /**< minimum backplane latency */
#define SBIDL_MINLAT_SHIFT 8
-#define SBIDL_MAXLAT 0xf000 /* maximum backplane latency */
+#define SBIDL_MAXLAT 0xf000 /**< maximum backplane latency */
#define SBIDL_MAXLAT_SHIFT 12
-#define SBIDL_FIRST 0x10000 /* this initiator is first */
-#define SBIDL_CW_MASK 0xc0000 /* cycle counter width */
+#define SBIDL_FIRST 0x10000 /**< this initiator is first */
+#define SBIDL_CW_MASK 0xc0000 /**< cycle counter width */
#define SBIDL_CW_SHIFT 18
-#define SBIDL_TP_MASK 0xf00000 /* target ports */
+#define SBIDL_TP_MASK 0xf00000 /**< target ports */
#define SBIDL_TP_SHIFT 20
-#define SBIDL_IP_MASK 0xf000000 /* initiator ports */
+#define SBIDL_IP_MASK 0xf000000 /**< initiator ports */
#define SBIDL_IP_SHIFT 24
-#define SBIDL_RV_MASK 0xf0000000 /* sonics backplane revision code */
+#define SBIDL_RV_MASK 0xf0000000 /**< sonics backplane revision code */
#define SBIDL_RV_SHIFT 28
-#define SBIDL_RV_2_2 0x00000000 /* version 2.2 or earlier */
-#define SBIDL_RV_2_3 0x10000000 /* version 2.3 */
+#define SBIDL_RV_2_2 0x00000000 /**< version 2.2 or earlier */
+#define SBIDL_RV_2_3 0x10000000 /**< version 2.3 */
/* sbidhigh */
-#define SBIDH_RC_MASK 0x000f /* revision code */
-#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */
+#define SBIDH_RC_MASK 0x000f /**< revision code */
+#define SBIDH_RCE_MASK 0x7000 /**< revision code extension field */
#define SBIDH_RCE_SHIFT 8
#define SBCOREREV(sbidh) \
((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
-#define SBIDH_CC_MASK 0x8ff0 /* core code */
+#define SBIDH_CC_MASK 0x8ff0 /**< core code */
#define SBIDH_CC_SHIFT 4
-#define SBIDH_VC_MASK 0xffff0000 /* vendor code */
+#define SBIDH_VC_MASK 0xffff0000 /**< vendor code */
#define SBIDH_VC_SHIFT 16
-#define SB_COMMIT 0xfd8 /* update buffered registers value */
+#define SB_COMMIT 0xfd8 /**< update buffered registers value */
/* vendor codes */
-#define SB_VEND_BCM 0x4243 /* Broadcom's SB vendor code */
+#define SB_VEND_BCM 0x4243 /**< Broadcom's SB vendor code */
#endif /* _SBCONFIG_H */
* Generic Broadcom Home Networking Division (HND) DMA engine HW interface
* This supports the following chips: BCM42xx, 44xx, 47xx .
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: sbhnddma.h 452424 2014-01-30 09:43:39Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbhnddma.h 530150 2015-01-29 08:43:40Z $
*/
#ifndef _sbhnddma_h_
/* 32 bits addressing */
-/* dma registers per channel(xmt or rcv) */
+/** dma registers per channel(xmt or rcv) */
typedef volatile struct {
- uint32 control; /* enable, et al */
- uint32 addr; /* descriptor ring base address (4K aligned) */
- uint32 ptr; /* last descriptor posted to chip */
- uint32 status; /* current active descriptor, et al */
+ uint32 control; /**< enable, et al */
+ uint32 addr; /**< descriptor ring base address (4K aligned) */
+ uint32 ptr; /**< last descriptor posted to chip */
+ uint32 status; /**< current active descriptor, et al */
} dma32regs_t;
typedef volatile struct {
- dma32regs_t xmt; /* dma tx channel */
- dma32regs_t rcv; /* dma rx channel */
+ dma32regs_t xmt; /**< dma tx channel */
+ dma32regs_t rcv; /**< dma rx channel */
} dma32regp_t;
typedef volatile struct { /* diag access */
- uint32 fifoaddr; /* diag address */
- uint32 fifodatalow; /* low 32bits of data */
- uint32 fifodatahigh; /* high 32bits of data */
- uint32 pad; /* reserved */
+ uint32 fifoaddr; /**< diag address */
+ uint32 fifodatalow; /**< low 32bits of data */
+ uint32 fifodatahigh; /**< high 32bits of data */
+ uint32 pad; /**< reserved */
} dma32diag_t;
-/*
+/**
* DMA Descriptor
* Descriptors are only read by the hardware, never written back.
*/
typedef volatile struct {
- uint32 ctrl; /* misc control bits & bufcount */
- uint32 addr; /* data buffer address */
+ uint32 ctrl; /**< misc control bits & bufcount */
+ uint32 addr; /**< data buffer address */
} dma32dd_t;
-/*
- * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page.
- */
+/** Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. */
#define D32RINGALIGN_BITS 12
#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS)
#define D32RINGALIGN (1 << D32RINGALIGN_BITS)
#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t))
/* transmit channel control */
-#define XC_XE ((uint32)1 << 0) /* transmit enable */
-#define XC_SE ((uint32)1 << 1) /* transmit suspend request */
-#define XC_LE ((uint32)1 << 2) /* loopback enable */
-#define XC_FL ((uint32)1 << 4) /* flush request */
-#define XC_MR_MASK 0x000001C0 /* Multiple outstanding reads */
+#define XC_XE ((uint32)1 << 0) /**< transmit enable */
+#define XC_SE ((uint32)1 << 1) /**< transmit suspend request */
+#define XC_LE ((uint32)1 << 2) /**< loopback enable */
+#define XC_FL ((uint32)1 << 4) /**< flush request */
+#define XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */
#define XC_MR_SHIFT 6
-#define XC_PD ((uint32)1 << 11) /* parity check disable */
-#define XC_AE ((uint32)3 << 16) /* address extension bits */
+#define XC_PD ((uint32)1 << 11) /**< parity check disable */
+#define XC_AE ((uint32)3 << 16) /**< address extension bits */
#define XC_AE_SHIFT 16
-#define XC_BL_MASK 0x001C0000 /* BurstLen bits */
+#define XC_BL_MASK 0x001C0000 /**< BurstLen bits */
#define XC_BL_SHIFT 18
-#define XC_PC_MASK 0x00E00000 /* Prefetch control */
+#define XC_PC_MASK 0x00E00000 /**< Prefetch control */
#define XC_PC_SHIFT 21
-#define XC_PT_MASK 0x03000000 /* Prefetch threshold */
+#define XC_PT_MASK 0x03000000 /**< Prefetch threshold */
#define XC_PT_SHIFT 24
-/* Multiple outstanding reads */
+/** Multiple outstanding reads */
#define DMA_MR_1 0
#define DMA_MR_2 1
#define DMA_MR_4 2
#define DMA_MR_20 6
#define DMA_MR_32 7
-/* DMA Burst Length in bytes */
+/** DMA Burst Length in bytes */
#define DMA_BL_16 0
#define DMA_BL_32 1
#define DMA_BL_64 2
#define DMA_BL_512 5
#define DMA_BL_1024 6
-/* Prefetch control */
+/** Prefetch control */
#define DMA_PC_0 0
#define DMA_PC_4 1
#define DMA_PC_8 2
#define DMA_PC_16 3
/* others: reserved */
-/* Prefetch threshold */
+/** Prefetch threshold */
#define DMA_PT_1 0
#define DMA_PT_2 1
#define DMA_PT_4 2
#define DMA_PT_8 3
/* transmit descriptor table pointer */
-#define XP_LD_MASK 0xfff /* last valid descriptor */
+#define XP_LD_MASK 0xfff /**< last valid descriptor */
/* transmit channel status */
-#define XS_CD_MASK 0x0fff /* current descriptor pointer */
-#define XS_XS_MASK 0xf000 /* transmit state */
+#define XS_CD_MASK 0x0fff /**< current descriptor pointer */
+#define XS_XS_MASK 0xf000 /**< transmit state */
#define XS_XS_SHIFT 12
-#define XS_XS_DISABLED 0x0000 /* disabled */
-#define XS_XS_ACTIVE 0x1000 /* active */
-#define XS_XS_IDLE 0x2000 /* idle wait */
-#define XS_XS_STOPPED 0x3000 /* stopped */
-#define XS_XS_SUSP 0x4000 /* suspend pending */
-#define XS_XE_MASK 0xf0000 /* transmit errors */
+#define XS_XS_DISABLED 0x0000 /**< disabled */
+#define XS_XS_ACTIVE 0x1000 /**< active */
+#define XS_XS_IDLE 0x2000 /**< idle wait */
+#define XS_XS_STOPPED 0x3000 /**< stopped */
+#define XS_XS_SUSP 0x4000 /**< suspend pending */
+#define XS_XE_MASK 0xf0000 /**< transmit errors */
#define XS_XE_SHIFT 16
-#define XS_XE_NOERR 0x00000 /* no error */
-#define XS_XE_DPE 0x10000 /* descriptor protocol error */
-#define XS_XE_DFU 0x20000 /* data fifo underrun */
-#define XS_XE_BEBR 0x30000 /* bus error on buffer read */
-#define XS_XE_BEDA 0x40000 /* bus error on descriptor access */
-#define XS_AD_MASK 0xfff00000 /* active descriptor */
+#define XS_XE_NOERR 0x00000 /**< no error */
+#define XS_XE_DPE 0x10000 /**< descriptor protocol error */
+#define XS_XE_DFU 0x20000 /**< data fifo underrun */
+#define XS_XE_BEBR 0x30000 /**< bus error on buffer read */
+#define XS_XE_BEDA 0x40000 /**< bus error on descriptor access */
+#define XS_AD_MASK 0xfff00000 /**< active descriptor */
#define XS_AD_SHIFT 20
/* receive channel control */
-#define RC_RE ((uint32)1 << 0) /* receive enable */
-#define RC_RO_MASK 0xfe /* receive frame offset */
+#define RC_RE ((uint32)1 << 0) /**< receive enable */
+#define RC_RO_MASK 0xfe /**< receive frame offset */
#define RC_RO_SHIFT 1
-#define RC_FM ((uint32)1 << 8) /* direct fifo receive (pio) mode */
-#define RC_SH ((uint32)1 << 9) /* separate rx header descriptor enable */
-#define RC_OC ((uint32)1 << 10) /* overflow continue */
-#define RC_PD ((uint32)1 << 11) /* parity check disable */
-#define RC_AE ((uint32)3 << 16) /* address extension bits */
+#define RC_FM ((uint32)1 << 8) /**< direct fifo receive (pio) mode */
+#define RC_SH ((uint32)1 << 9) /**< separate rx header descriptor enable */
+#define RC_OC ((uint32)1 << 10) /**< overflow continue */
+#define RC_PD ((uint32)1 << 11) /**< parity check disable */
+#define RC_AE ((uint32)3 << 16) /**< address extension bits */
#define RC_AE_SHIFT 16
-#define RC_BL_MASK 0x001C0000 /* BurstLen bits */
+#define RC_BL_MASK 0x001C0000 /**< BurstLen bits */
#define RC_BL_SHIFT 18
-#define RC_PC_MASK 0x00E00000 /* Prefetch control */
+#define RC_PC_MASK 0x00E00000 /**< Prefetch control */
#define RC_PC_SHIFT 21
-#define RC_PT_MASK 0x03000000 /* Prefetch threshold */
+#define RC_PT_MASK 0x03000000 /**< Prefetch threshold */
#define RC_PT_SHIFT 24
/* receive descriptor table pointer */
-#define RP_LD_MASK 0xfff /* last valid descriptor */
+#define RP_LD_MASK 0xfff /**< last valid descriptor */
/* receive channel status */
-#define RS_CD_MASK 0x0fff /* current descriptor pointer */
-#define RS_RS_MASK 0xf000 /* receive state */
+#define RS_CD_MASK 0x0fff /**< current descriptor pointer */
+#define RS_RS_MASK 0xf000 /**< receive state */
#define RS_RS_SHIFT 12
-#define RS_RS_DISABLED 0x0000 /* disabled */
-#define RS_RS_ACTIVE 0x1000 /* active */
-#define RS_RS_IDLE 0x2000 /* idle wait */
-#define RS_RS_STOPPED 0x3000 /* reserved */
-#define RS_RE_MASK 0xf0000 /* receive errors */
+#define RS_RS_DISABLED 0x0000 /**< disabled */
+#define RS_RS_ACTIVE 0x1000 /**< active */
+#define RS_RS_IDLE 0x2000 /**< idle wait */
+#define RS_RS_STOPPED 0x3000 /**< reserved */
+#define RS_RE_MASK 0xf0000 /**< receive errors */
#define RS_RE_SHIFT 16
-#define RS_RE_NOERR 0x00000 /* no error */
-#define RS_RE_DPE 0x10000 /* descriptor protocol error */
-#define RS_RE_DFO 0x20000 /* data fifo overflow */
-#define RS_RE_BEBW 0x30000 /* bus error on buffer write */
-#define RS_RE_BEDA 0x40000 /* bus error on descriptor access */
-#define RS_AD_MASK 0xfff00000 /* active descriptor */
+#define RS_RE_NOERR 0x00000 /**< no error */
+#define RS_RE_DPE 0x10000 /**< descriptor protocol error */
+#define RS_RE_DFO 0x20000 /**< data fifo overflow */
+#define RS_RE_BEBW 0x30000 /**< bus error on buffer write */
+#define RS_RE_BEDA 0x40000 /**< bus error on descriptor access */
+#define RS_AD_MASK 0xfff00000 /**< active descriptor */
#define RS_AD_SHIFT 20
/* fifoaddr */
-#define FA_OFF_MASK 0xffff /* offset */
-#define FA_SEL_MASK 0xf0000 /* select */
+#define FA_OFF_MASK 0xffff /**< offset */
+#define FA_SEL_MASK 0xf0000 /**< select */
#define FA_SEL_SHIFT 16
-#define FA_SEL_XDD 0x00000 /* transmit dma data */
-#define FA_SEL_XDP 0x10000 /* transmit dma pointers */
-#define FA_SEL_RDD 0x40000 /* receive dma data */
-#define FA_SEL_RDP 0x50000 /* receive dma pointers */
-#define FA_SEL_XFD 0x80000 /* transmit fifo data */
-#define FA_SEL_XFP 0x90000 /* transmit fifo pointers */
-#define FA_SEL_RFD 0xc0000 /* receive fifo data */
-#define FA_SEL_RFP 0xd0000 /* receive fifo pointers */
-#define FA_SEL_RSD 0xe0000 /* receive frame status data */
-#define FA_SEL_RSP 0xf0000 /* receive frame status pointers */
+#define FA_SEL_XDD 0x00000 /**< transmit dma data */
+#define FA_SEL_XDP 0x10000 /**< transmit dma pointers */
+#define FA_SEL_RDD 0x40000 /**< receive dma data */
+#define FA_SEL_RDP 0x50000 /**< receive dma pointers */
+#define FA_SEL_XFD 0x80000 /**< transmit fifo data */
+#define FA_SEL_XFP 0x90000 /**< transmit fifo pointers */
+#define FA_SEL_RFD 0xc0000 /**< receive fifo data */
+#define FA_SEL_RFP 0xd0000 /**< receive fifo pointers */
+#define FA_SEL_RSD 0xe0000 /**< receive frame status data */
+#define FA_SEL_RSP 0xf0000 /**< receive frame status pointers */
/* descriptor control flags */
-#define CTRL_BC_MASK 0x00001fff /* buffer byte count, real data len must <= 4KB */
-#define CTRL_AE ((uint32)3 << 16) /* address extension bits */
+#define CTRL_BC_MASK 0x00001fff /**< buffer byte count, real data len must <= 4KB */
+#define CTRL_AE ((uint32)3 << 16) /**< address extension bits */
#define CTRL_AE_SHIFT 16
-#define CTRL_PARITY ((uint32)3 << 18) /* parity bit */
-#define CTRL_EOT ((uint32)1 << 28) /* end of descriptor table */
-#define CTRL_IOC ((uint32)1 << 29) /* interrupt on completion */
-#define CTRL_EOF ((uint32)1 << 30) /* end of frame */
-#define CTRL_SOF ((uint32)1 << 31) /* start of frame */
+#define CTRL_PARITY ((uint32)3 << 18) /**< parity bit */
+#define CTRL_EOT ((uint32)1 << 28) /**< end of descriptor table */
+#define CTRL_IOC ((uint32)1 << 29) /**< interrupt on completion */
+#define CTRL_EOF ((uint32)1 << 30) /**< end of frame */
+#define CTRL_SOF ((uint32)1 << 31) /**< start of frame */
-/* control flags in the range [27:20] are core-specific and not defined here */
+/** control flags in the range [27:20] are core-specific and not defined here */
#define CTRL_CORE_MASK 0x0ff00000
/* 64 bits addressing */
-/* dma registers per channel(xmt or rcv) */
+/** dma registers per channel(xmt or rcv) */
typedef volatile struct {
- uint32 control; /* enable, et al */
- uint32 ptr; /* last descriptor posted to chip */
- uint32 addrlow; /* descriptor ring base address low 32-bits (8K aligned) */
- uint32 addrhigh; /* descriptor ring base address bits 63:32 (8K aligned) */
- uint32 status0; /* current descriptor, xmt state */
- uint32 status1; /* active descriptor, xmt error */
+ uint32 control; /**< enable, et al */
+ uint32 ptr; /**< last descriptor posted to chip */
+ uint32 addrlow; /**< descriptor ring base address low 32-bits (8K aligned) */
+ uint32 addrhigh; /**< descriptor ring base address bits 63:32 (8K aligned) */
+ uint32 status0; /**< current descriptor, xmt state */
+ uint32 status1; /**< active descriptor, xmt error */
} dma64regs_t;
typedef volatile struct {
- dma64regs_t tx; /* dma64 tx channel */
- dma64regs_t rx; /* dma64 rx channel */
+ dma64regs_t tx; /**< dma64 tx channel */
+ dma64regs_t rx; /**< dma64 rx channel */
} dma64regp_t;
-typedef volatile struct { /* diag access */
- uint32 fifoaddr; /* diag address */
- uint32 fifodatalow; /* low 32bits of data */
- uint32 fifodatahigh; /* high 32bits of data */
- uint32 pad; /* reserved */
+typedef volatile struct { /**< diag access */
+ uint32 fifoaddr; /**< diag address */
+ uint32 fifodatalow; /**< low 32bits of data */
+ uint32 fifodatahigh; /**< high 32bits of data */
+ uint32 pad; /**< reserved */
} dma64diag_t;
-/*
+/**
* DMA Descriptor
* Descriptors are only read by the hardware, never written back.
*/
typedef volatile struct {
- uint32 ctrl1; /* misc control bits */
- uint32 ctrl2; /* buffer count and address extension */
- uint32 addrlow; /* memory address of the date buffer, bits 31:0 */
- uint32 addrhigh; /* memory address of the date buffer, bits 63:32 */
+ uint32 ctrl1; /**< misc control bits */
+ uint32 ctrl2; /**< buffer count and address extension */
+ uint32 addrlow; /**< memory address of the date buffer, bits 31:0 */
+ uint32 addrhigh; /**< memory address of the date buffer, bits 63:32 */
} dma64dd_t;
-/*
+/**
* Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
*/
#define D64RINGALIGN_BITS 13
#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
-/* for cores with large descriptor ring support, descriptor ring size can be up to 4096 */
+/** for cores with large descriptor ring support, descriptor ring size can be up to 4096 */
#define D64MAXDD_LARGE ((1 << 16) / sizeof (dma64dd_t))
-/* for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross
+/**
+ * for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross
* 64K boundary
*/
#define D64RINGBOUNDARY_LARGE (1 << 16)
#endif
/* transmit channel control */
-#define D64_XC_XE 0x00000001 /* transmit enable */
-#define D64_XC_SE 0x00000002 /* transmit suspend request */
-#define D64_XC_LE 0x00000004 /* loopback enable */
-#define D64_XC_FL 0x00000010 /* flush request */
-#define D64_XC_MR_MASK 0x000001C0 /* Multiple outstanding reads */
+#define D64_XC_XE 0x00000001 /**< transmit enable */
+#define D64_XC_SE 0x00000002 /**< transmit suspend request */
+#define D64_XC_LE 0x00000004 /**< loopback enable */
+#define D64_XC_FL 0x00000010 /**< flush request */
+#define D64_XC_MR_MASK 0x000001C0 /**< Multiple outstanding reads */
#define D64_XC_MR_SHIFT 6
-#define D64_XC_PD 0x00000800 /* parity check disable */
-#define D64_XC_AE 0x00030000 /* address extension bits */
+#define D64_XC_PD 0x00000800 /**< parity check disable */
+#define D64_XC_AE 0x00030000 /**< address extension bits */
#define D64_XC_AE_SHIFT 16
-#define D64_XC_BL_MASK 0x001C0000 /* BurstLen bits */
+#define D64_XC_BL_MASK 0x001C0000 /**< BurstLen bits */
#define D64_XC_BL_SHIFT 18
-#define D64_XC_PC_MASK 0x00E00000 /* Prefetch control */
+#define D64_XC_PC_MASK 0x00E00000 /**< Prefetch control */
#define D64_XC_PC_SHIFT 21
-#define D64_XC_PT_MASK 0x03000000 /* Prefetch threshold */
+#define D64_XC_PT_MASK 0x03000000 /**< Prefetch threshold */
#define D64_XC_PT_SHIFT 24
/* transmit descriptor table pointer */
-#define D64_XP_LD_MASK 0x00001fff /* last valid descriptor */
+#define D64_XP_LD_MASK 0x00001fff /**< last valid descriptor */
/* transmit channel status */
-#define D64_XS0_CD_MASK (di->d64_xs0_cd_mask) /* current descriptor pointer */
-#define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
+#define D64_XS0_CD_MASK (di->d64_xs0_cd_mask) /**< current descriptor pointer */
+#define D64_XS0_XS_MASK 0xf0000000 /**< transmit state */
#define D64_XS0_XS_SHIFT 28
-#define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
-#define D64_XS0_XS_ACTIVE 0x10000000 /* active */
-#define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
-#define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
-#define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
-
-#define D64_XS1_AD_MASK (di->d64_xs1_ad_mask) /* active descriptor */
-#define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
+#define D64_XS0_XS_DISABLED 0x00000000 /**< disabled */
+#define D64_XS0_XS_ACTIVE 0x10000000 /**< active */
+#define D64_XS0_XS_IDLE 0x20000000 /**< idle wait */
+#define D64_XS0_XS_STOPPED 0x30000000 /**< stopped */
+#define D64_XS0_XS_SUSP 0x40000000 /**< suspend pending */
+
+#define D64_XS1_AD_MASK (di->d64_xs1_ad_mask) /**< active descriptor */
+#define D64_XS1_XE_MASK 0xf0000000 /**< transmit errors */
#define D64_XS1_XE_SHIFT 28
-#define D64_XS1_XE_NOERR 0x00000000 /* no error */
-#define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
-#define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
-#define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
-#define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
-#define D64_XS1_XE_COREE 0x50000000 /* core error */
+#define D64_XS1_XE_NOERR 0x00000000 /**< no error */
+#define D64_XS1_XE_DPE 0x10000000 /**< descriptor protocol error */
+#define D64_XS1_XE_DFU 0x20000000 /**< data fifo underrun */
+#define D64_XS1_XE_DTE 0x30000000 /**< data transfer error */
+#define D64_XS1_XE_DESRE 0x40000000 /**< descriptor read error */
+#define D64_XS1_XE_COREE 0x50000000 /**< core error */
/* receive channel control */
-#define D64_RC_RE 0x00000001 /* receive enable */
-#define D64_RC_RO_MASK 0x000000fe /* receive frame offset */
+#define D64_RC_RE 0x00000001 /**< receive enable */
+#define D64_RC_RO_MASK 0x000000fe /**< receive frame offset */
#define D64_RC_RO_SHIFT 1
-#define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */
-#define D64_RC_SH 0x00000200 /* separate rx header descriptor enable */
-#define D64_RC_SHIFT 9 /* separate rx header descriptor enable */
-#define D64_RC_OC 0x00000400 /* overflow continue */
-#define D64_RC_PD 0x00000800 /* parity check disable */
-#define D64_RC_GE 0x00004000 /* Glom enable */
-#define D64_RC_AE 0x00030000 /* address extension bits */
+#define D64_RC_FM 0x00000100 /**< direct fifo receive (pio) mode */
+#define D64_RC_SH 0x00000200 /**< separate rx header descriptor enable */
+#define D64_RC_SHIFT 9 /**< separate rx header descriptor enable */
+#define D64_RC_OC 0x00000400 /**< overflow continue */
+#define D64_RC_PD 0x00000800 /**< parity check disable */
+#define D64_RC_SA 0x00002000 /**< select active */
+#define D64_RC_GE 0x00004000 /**< Glom enable */
+#define D64_RC_AE 0x00030000 /**< address extension bits */
#define D64_RC_AE_SHIFT 16
-#define D64_RC_BL_MASK 0x001C0000 /* BurstLen bits */
+#define D64_RC_BL_MASK 0x001C0000 /**< BurstLen bits */
#define D64_RC_BL_SHIFT 18
-#define D64_RC_PC_MASK 0x00E00000 /* Prefetch control */
+#define D64_RC_PC_MASK 0x00E00000 /**< Prefetch control */
#define D64_RC_PC_SHIFT 21
-#define D64_RC_PT_MASK 0x03000000 /* Prefetch threshold */
+#define D64_RC_PT_MASK 0x03000000 /**< Prefetch threshold */
#define D64_RC_PT_SHIFT 24
/* flags for dma controller */
-#define DMA_CTRL_PEN (1 << 0) /* partity enable */
-#define DMA_CTRL_ROC (1 << 1) /* rx overflow continue */
-#define DMA_CTRL_RXMULTI (1 << 2) /* allow rx scatter to multiple descriptors */
-#define DMA_CTRL_UNFRAMED (1 << 3) /* Unframed Rx/Tx data */
+#define DMA_CTRL_PEN (1 << 0) /**< partity enable */
+#define DMA_CTRL_ROC (1 << 1) /**< rx overflow continue */
+#define DMA_CTRL_RXMULTI (1 << 2) /**< allow rx scatter to multiple descriptors */
+#define DMA_CTRL_UNFRAMED (1 << 3) /**< Unframed Rx/Tx data */
#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4)
-#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5) /* DMA avoidance WAR for 4331 */
-#define DMA_CTRL_RXSINGLE (1 << 6) /* always single buffer */
-#define DMA_CTRL_SDIO_RXGLOM (1 << 7) /* DMA Rx glome is enabled */
+#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5) /**< DMA avoidance WAR for 4331 */
+#define DMA_CTRL_RXSINGLE (1 << 6) /**< always single buffer */
+#define DMA_CTRL_SDIO_RXGLOM (1 << 7) /**< DMA Rx glome is enabled */
/* receive descriptor table pointer */
-#define D64_RP_LD_MASK 0x00001fff /* last valid descriptor */
+#define D64_RP_LD_MASK 0x00001fff /**< last valid descriptor */
/* receive channel status */
-#define D64_RS0_CD_MASK (di->d64_rs0_cd_mask) /* current descriptor pointer */
-#define D64_RS0_RS_MASK 0xf0000000 /* receive state */
+#define D64_RS0_CD_MASK (di->d64_rs0_cd_mask) /**< current descriptor pointer */
+#define D64_RS0_RS_MASK 0xf0000000 /**< receive state */
#define D64_RS0_RS_SHIFT 28
-#define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
-#define D64_RS0_RS_ACTIVE 0x10000000 /* active */
-#define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
-#define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
-#define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
-
-#define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
-#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
+#define D64_RS0_RS_DISABLED 0x00000000 /**< disabled */
+#define D64_RS0_RS_ACTIVE 0x10000000 /**< active */
+#define D64_RS0_RS_IDLE 0x20000000 /**< idle wait */
+#define D64_RS0_RS_STOPPED 0x30000000 /**< stopped */
+#define D64_RS0_RS_SUSP 0x40000000 /**< suspend pending */
+
+#define D64_RS1_AD_MASK 0x0001ffff /**< active descriptor */
+#define D64_RS1_RE_MASK 0xf0000000 /**< receive errors */
#define D64_RS1_RE_SHIFT 28
-#define D64_RS1_RE_NOERR 0x00000000 /* no error */
-#define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
-#define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
-#define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
-#define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
-#define D64_RS1_RE_COREE 0x50000000 /* core error */
+#define D64_RS1_RE_NOERR 0x00000000 /**< no error */
+#define D64_RS1_RE_DPO 0x10000000 /**< descriptor protocol error */
+#define D64_RS1_RE_DFU 0x20000000 /**< data fifo overflow */
+#define D64_RS1_RE_DTE 0x30000000 /**< data transfer error */
+#define D64_RS1_RE_DESRE 0x40000000 /**< descriptor read error */
+#define D64_RS1_RE_COREE 0x50000000 /**< core error */
/* fifoaddr */
-#define D64_FA_OFF_MASK 0xffff /* offset */
-#define D64_FA_SEL_MASK 0xf0000 /* select */
+#define D64_FA_OFF_MASK 0xffff /**< offset */
+#define D64_FA_SEL_MASK 0xf0000 /**< select */
#define D64_FA_SEL_SHIFT 16
-#define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
-#define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
-#define D64_FA_SEL_RDD 0x40000 /* receive dma data */
-#define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
-#define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
-#define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
-#define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
-#define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
-#define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
-#define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
+#define D64_FA_SEL_XDD 0x00000 /**< transmit dma data */
+#define D64_FA_SEL_XDP 0x10000 /**< transmit dma pointers */
+#define D64_FA_SEL_RDD 0x40000 /**< receive dma data */
+#define D64_FA_SEL_RDP 0x50000 /**< receive dma pointers */
+#define D64_FA_SEL_XFD 0x80000 /**< transmit fifo data */
+#define D64_FA_SEL_XFP 0x90000 /**< transmit fifo pointers */
+#define D64_FA_SEL_RFD 0xc0000 /**< receive fifo data */
+#define D64_FA_SEL_RFP 0xd0000 /**< receive fifo pointers */
+#define D64_FA_SEL_RSD 0xe0000 /**< receive frame status data */
+#define D64_FA_SEL_RSP 0xf0000 /**< receive frame status pointers */
/* descriptor control flags 1 */
-#define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
-#define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /* buirst size control */
-#define D64_CTRL1_EOT ((uint32)1 << 28) /* end of descriptor table */
-#define D64_CTRL1_IOC ((uint32)1 << 29) /* interrupt on completion */
-#define D64_CTRL1_EOF ((uint32)1 << 30) /* end of frame */
-#define D64_CTRL1_SOF ((uint32)1 << 31) /* start of frame */
+#define D64_CTRL_COREFLAGS 0x0ff00000 /**< core specific flags */
+#define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /**< buirst size control */
+#define D64_CTRL1_EOT ((uint32)1 << 28) /**< end of descriptor table */
+#define D64_CTRL1_IOC ((uint32)1 << 29) /**< interrupt on completion */
+#define D64_CTRL1_EOF ((uint32)1 << 30) /**< end of frame */
+#define D64_CTRL1_SOF ((uint32)1 << 31) /**< start of frame */
/* descriptor control flags 2 */
-#define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count. real data len must <= 16KB */
-#define D64_CTRL2_AE 0x00030000 /* address extension bits */
+#define D64_CTRL2_BC_MASK 0x00007fff /**< buffer byte count. real data len must <= 16KB */
+#define D64_CTRL2_AE 0x00030000 /**< address extension bits */
#define D64_CTRL2_AE_SHIFT 16
#define D64_CTRL2_PARITY 0x00040000 /* parity bit */
-/* control flags in the range [27:20] are core-specific and not defined here */
+/** control flags in the range [27:20] are core-specific and not defined here */
#define D64_CTRL_CORE_MASK 0x0ff00000
-#define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
-#define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
-#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1, d11corerev >= 22 */
-#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
+#define D64_RX_FRM_STS_LEN 0x0000ffff /**< frame length mask */
+#define D64_RX_FRM_STS_OVFL 0x00800000 /**< RxOverFlow */
+#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */
+#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /**< core-dependent data type */
-/* receive frame status */
+/** receive frame status */
typedef volatile struct {
uint16 len;
uint16 flags;
/*
* BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: sbpcmcia.h 446298 2014-01-03 11:30:17Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbpcmcia.h 521344 2014-12-17 10:03:55Z $
*/
#ifndef _SBPCMCIA_H
#define SRI_BLANK 0x04
#define SRI_OTP 0x80
-#if !defined(LINUX_POSTMOGRIFY_REMOVAL)
-/* CIS stuff */
-
-/* The CIS stops where the FCRs start */
-#define CIS_SIZE PCMCIA_FCR
-#define CIS_SIZE_12K 1154 /* Maximum h/w + s/w sub region size for 12k OTP */
-
-/* CIS tuple length field max */
-#define CIS_TUPLE_LEN_MAX 0xff
-
-/* Standard tuples we know about */
-
-#define CISTPL_NULL 0x00
-#define CISTPL_VERS_1 0x15 /* CIS ver, manf, dev & ver strings */
-#define CISTPL_MANFID 0x20 /* Manufacturer and device id */
-#define CISTPL_FUNCID 0x21 /* Function identification */
-#define CISTPL_FUNCE 0x22 /* Function extensions */
-#define CISTPL_CFTABLE 0x1b /* Config table entry */
-#define CISTPL_END 0xff /* End of the CIS tuple chain */
-
-/* Function identifier provides context for the function extentions tuple */
-#define CISTPL_FID_SDIO 0x0c /* Extensions defined by SDIO spec */
-
-/* Function extensions for LANs (assumed for extensions other than SDIO) */
-#define LAN_TECH 1 /* Technology type */
-#define LAN_SPEED 2 /* Raw bit rate */
-#define LAN_MEDIA 3 /* Transmission media */
-#define LAN_NID 4 /* Node identification (aka MAC addr) */
-#define LAN_CONN 5 /* Connector standard */
-
-
-/* CFTable */
-#define CFTABLE_REGWIN_2K 0x08 /* 2k reg windows size */
-#define CFTABLE_REGWIN_4K 0x10 /* 4k reg windows size */
-#define CFTABLE_REGWIN_8K 0x20 /* 8k reg windows size */
-
-/* Vendor unique tuples are 0x80-0x8f. Within Broadcom we'll
- * take one for HNBU, and use "extensions" (a la FUNCE) within it.
- */
-
-#define CISTPL_BRCM_HNBU 0x80
-
-/* Subtypes of BRCM_HNBU: */
-
-#define HNBU_SROMREV 0x00 /* A byte with sromrev, 1 if not present */
-#define HNBU_CHIPID 0x01 /* Two 16bit values: PCI vendor & device id */
-#define HNBU_BOARDREV 0x02 /* One byte board revision */
-#define HNBU_PAPARMS 0x03 /* PA parameters: 8 (sromrev == 1)
- * or 9 (sromrev > 1) bytes
- */
-#define HNBU_OEM 0x04 /* Eight bytes OEM data (sromrev == 1) */
-#define HNBU_CC 0x05 /* Default country code (sromrev == 1) */
-#define HNBU_AA 0x06 /* Antennas available */
-#define HNBU_AG 0x07 /* Antenna gain */
-#define HNBU_BOARDFLAGS 0x08 /* board flags (2 or 4 bytes) */
-#define HNBU_LEDS 0x09 /* LED set */
-#define HNBU_CCODE 0x0a /* Country code (2 bytes ascii + 1 byte cctl)
- * in rev 2
- */
-#define HNBU_CCKPO 0x0b /* 2 byte cck power offsets in rev 3 */
-#define HNBU_OFDMPO 0x0c /* 4 byte 11g ofdm power offsets in rev 3 */
-#define HNBU_GPIOTIMER 0x0d /* 2 bytes with on/off values in rev 3 */
-#define HNBU_PAPARMS5G 0x0e /* 5G PA params */
-#define HNBU_ANT5G 0x0f /* 4328 5G antennas available/gain */
-#define HNBU_RDLID 0x10 /* 2 byte USB remote downloader (RDL) product Id */
-#define HNBU_RSSISMBXA2G 0x11 /* 4328 2G RSSI mid pt sel & board switch arch,
- * 2 bytes, rev 3.
- */
-#define HNBU_RSSISMBXA5G 0x12 /* 4328 5G RSSI mid pt sel & board switch arch,
- * 2 bytes, rev 3.
- */
-#define HNBU_XTALFREQ 0x13 /* 4 byte Crystal frequency in kilohertz */
-#define HNBU_TRI2G 0x14 /* 4328 2G TR isolation, 1 byte */
-#define HNBU_TRI5G 0x15 /* 4328 5G TR isolation, 3 bytes */
-#define HNBU_RXPO2G 0x16 /* 4328 2G RX power offset, 1 byte */
-#define HNBU_RXPO5G 0x17 /* 4328 5G RX power offset, 1 byte */
-#define HNBU_BOARDNUM 0x18 /* board serial number, independent of mac addr */
-#define HNBU_MACADDR 0x19 /* mac addr override for the standard CIS LAN_NID */
-#define HNBU_RDLSN 0x1a /* 2 bytes; serial # advertised in USB descriptor */
-#define HNBU_BOARDTYPE 0x1b /* 2 bytes; boardtype */
-#define HNBU_LEDDC 0x1c /* 2 bytes; LED duty cycle */
-#define HNBU_HNBUCIS 0x1d /* what follows is proprietary HNBU CIS format */
-#define HNBU_PAPARMS_SSLPNPHY 0x1e /* SSLPNPHY PA params */
-#define HNBU_RSSISMBXA2G_SSLPNPHY 0x1f /* SSLPNPHY RSSI mid pt sel & board switch arch */
-#define HNBU_RDLRNDIS 0x20 /* 1 byte; 1 = RDL advertises RNDIS config */
-#define HNBU_CHAINSWITCH 0x21 /* 2 byte; txchain, rxchain */
-#define HNBU_REGREV 0x22 /* 1 byte; */
-#define HNBU_FEM 0x23 /* 2 or 4 byte: 11n frontend specification */
-#define HNBU_PAPARMS_C0 0x24 /* 8 or 30 bytes: 11n pa paramater for chain 0 */
-#define HNBU_PAPARMS_C1 0x25 /* 8 or 30 bytes: 11n pa paramater for chain 1 */
-#define HNBU_PAPARMS_C2 0x26 /* 8 or 30 bytes: 11n pa paramater for chain 2 */
-#define HNBU_PAPARMS_C3 0x27 /* 8 or 30 bytes: 11n pa paramater for chain 3 */
-#define HNBU_PO_CCKOFDM 0x28 /* 6 or 18 bytes: cck2g/ofdm2g/ofdm5g power offset */
-#define HNBU_PO_MCS2G 0x29 /* 8 bytes: mcs2g power offset */
-#define HNBU_PO_MCS5GM 0x2a /* 8 bytes: mcs5g mid band power offset */
-#define HNBU_PO_MCS5GLH 0x2b /* 16 bytes: mcs5g low-high band power offset */
-#define HNBU_PO_CDD 0x2c /* 2 bytes: cdd2g/5g power offset */
-#define HNBU_PO_STBC 0x2d /* 2 bytes: stbc2g/5g power offset */
-#define HNBU_PO_40M 0x2e /* 2 bytes: 40Mhz channel 2g/5g power offset */
-#define HNBU_PO_40MDUP 0x2f /* 2 bytes: 40Mhz channel dup 2g/5g power offset */
-
-#define HNBU_RDLRWU 0x30 /* 1 byte; 1 = RDL advertises Remote Wake-up */
-#define HNBU_WPS 0x31 /* 1 byte; GPIO pin for WPS button */
-#define HNBU_USBFS 0x32 /* 1 byte; 1 = USB advertises FS mode only */
-#define HNBU_BRMIN 0x33 /* 4 byte bootloader min resource mask */
-#define HNBU_BRMAX 0x34 /* 4 byte bootloader max resource mask */
-#define HNBU_PATCH 0x35 /* bootloader patch addr(2b) & data(4b) pair */
-#define HNBU_CCKFILTTYPE 0x36 /* CCK digital filter selection options */
-#define HNBU_OFDMPO5G 0x37 /* 4 * 3 = 12 byte 11a ofdm power offsets in rev 3 */
-#define HNBU_ELNA2G 0x38
-#define HNBU_ELNA5G 0x39
-#define HNBU_TEMPTHRESH 0x3A /* 2 bytes
- * byte1 tempthresh
- * byte2 period(msb 4 bits) | hysterisis(lsb 4 bits)
- */
-#define HNBU_UUID 0x3B /* 16 Bytes Hex */
-
-#define HNBU_USBEPNUM 0x40 /* USB endpoint numbers */
-
-/* POWER PER RATE for SROM V9 */
-#define HNBU_CCKBW202GPO 0x41 /* 2 bytes each
- * CCK Power offsets for 20 MHz rates (11, 5.5, 2, 1Mbps)
- * cckbw202gpo cckbw20ul2gpo
- */
-
-#define HNBU_LEGOFDMBW202GPO 0x42 /* 4 bytes each
- * OFDM power offsets for 20 MHz Legacy rates
- * (54, 48, 36, 24, 18, 12, 9, 6 Mbps)
- * legofdmbw202gpo legofdmbw20ul2gpo
- */
-
-#define HNBU_LEGOFDMBW205GPO 0x43 /* 4 bytes each
- * 5G band: OFDM power offsets for 20 MHz Legacy rates
- * (54, 48, 36, 24, 18, 12, 9, 6 Mbps)
- * low subband : legofdmbw205glpo legofdmbw20ul2glpo
- * mid subband :legofdmbw205gmpo legofdmbw20ul2gmpo
- * high subband :legofdmbw205ghpo legofdmbw20ul2ghpo
- */
-
-#define HNBU_MCS2GPO 0x44 /* 4 bytes each
- * mcs 0-7 power-offset. LSB nibble: m0, MSB nibble: m7
- * mcsbw202gpo mcsbw20ul2gpo mcsbw402gpo
- */
-#define HNBU_MCS5GLPO 0x45 /* 4 bytes each
- * 5G low subband mcs 0-7 power-offset.
- * LSB nibble: m0, MSB nibble: m7
- * mcsbw205glpo mcsbw20ul5glpo mcsbw405glpo
- */
-#define HNBU_MCS5GMPO 0x46 /* 4 bytes each
- * 5G mid subband mcs 0-7 power-offset.
- * LSB nibble: m0, MSB nibble: m7
- * mcsbw205gmpo mcsbw20ul5gmpo mcsbw405gmpo
- */
-#define HNBU_MCS5GHPO 0x47 /* 4 bytes each
- * 5G high subband mcs 0-7 power-offset.
- * LSB nibble: m0, MSB nibble: m7
- * mcsbw205ghpo mcsbw20ul5ghpo mcsbw405ghpo
- */
-#define HNBU_MCS32PO 0x48 /* 2 bytes total
- * mcs-32 power offset for each band/subband.
- * LSB nibble: 2G band, MSB nibble:
- * mcs322ghpo, mcs325gmpo, mcs325glpo, mcs322gpo
- */
-#define HNBU_LEG40DUPPO 0x49 /* 2 bytes total
- * Additional power offset for Legacy Dup40 transmissions.
- * Applied in addition to legofdmbw20ulXpo, X=2g, 5gl, 5gm, or 5gh.
- * LSB nibble: 2G band, MSB nibble: 5G band high subband.
- * leg40dup5ghpo, leg40dup5gmpo, leg40dup5glpo, leg40dup2gpo
- */
-
-#define HNBU_PMUREGS 0x4a /* Variable length (5 bytes for each register)
- * The setting of the ChipCtrl, PLL, RegulatorCtrl, Up/Down Timer and
- * ResourceDependency Table registers.
- */
-
-#define HNBU_PATCH2 0x4b /* bootloader TCAM patch addr(4b) & data(4b) pair .
- * This is required for socram rev 15 onwards.
- */
-
-#define HNBU_USBRDY 0x4c /* Variable length (upto 5 bytes)
- * This is to indicate the USB/HSIC host controller
- * that the device is ready for enumeration.
- */
-
-#define HNBU_USBREGS 0x4d /* Variable length
- * The setting of the devcontrol, HSICPhyCtrl1 and HSICPhyCtrl2
- * registers during the USB initialization.
- */
-
-#define HNBU_BLDR_TIMEOUT 0x4e /* 2 bytes used for HSIC bootloader to reset chip
- * on connect timeout.
- * The Delay after USBConnect for timeout till dongle receives
- * get_descriptor request.
- */
-#define HNBU_USBFLAGS 0x4f
-#define HNBU_PATCH_AUTOINC 0x50
-#define HNBU_MDIO_REGLIST 0x51
-#define HNBU_MDIOEX_REGLIST 0x52
-/* Unified OTP: tupple to embed USB manfid inside SDIO CIS */
-#define HNBU_UMANFID 0x53
-#define HNBU_PUBKEY 0x54 /* 128 byte; publick key to validate downloaded FW */
-#define HNBU_WOWLGPIO 0x55 /* 1 byte bit 7 initial polarity, bit 6..0 gpio pin */
-#define HNBU_MUXENAB 0x56 /* 1 byte to enable mux options */
-#define HNBU_GCI_CCR 0x57 /* GCI Chip control register */
-
-#define HNBU_FEM_CFG 0x58 /* FEM config */
-#define HNBU_ACPA_C0 0x59 /* ACPHY PA parameters: chain 0 */
-#define HNBU_ACPA_C1 0x5a /* ACPHY PA parameters: chain 1 */
-#define HNBU_ACPA_C2 0x5b /* ACPHY PA parameters: chain 2 */
-#define HNBU_MEAS_PWR 0x5c
-#define HNBU_PDOFF 0x5d
-#define HNBU_ACPPR_2GPO 0x5e /* ACPHY Power-per-rate 2gpo */
-#define HNBU_ACPPR_5GPO 0x5f /* ACPHY Power-per-rate 5gpo */
-#define HNBU_ACPPR_SBPO 0x60 /* ACPHY Power-per-rate sbpo */
-#define HNBU_NOISELVL 0x61
-#define HNBU_RXGAIN_ERR 0x62
-#define HNBU_AGBGA 0x63
-#define HNBU_USBDESC_COMPOSITE 0x64 /* USB WLAN/BT composite descriptor */
-#define HNBU_PATCH_AUTOINC8 0x65 /* Auto increment patch entry for 8 byte patching */
-#define HNBU_PATCH8 0x66 /* Patch entry for 8 byte patching */
-#define HNBU_ACRXGAINS_C0 0x67 /* ACPHY rxgains: chain 0 */
-#define HNBU_ACRXGAINS_C1 0x68 /* ACPHY rxgains: chain 1 */
-#define HNBU_ACRXGAINS_C2 0x69 /* ACPHY rxgains: chain 2 */
-#define HNBU_TXDUTY 0x6a /* Tx duty cycle for ACPHY 5g 40/80 Mhz */
-#define HNBU_USBUTMI_CTL 0x6b /* 2 byte USB UTMI/LDO Control */
-#define HNBU_PDOFF_2G 0x6c
-#define HNBU_USBSSPHY_UTMI_CTL0 0x6d /* 4 byte USB SSPHY UTMI Control */
-#define HNBU_USBSSPHY_UTMI_CTL1 0x6e /* 4 byte USB SSPHY UTMI Control */
-#define HNBU_USBSSPHY_UTMI_CTL2 0x6f /* 4 byte USB SSPHY UTMI Control */
-#define HNBU_USBSSPHY_SLEEP0 0x70 /* 2 byte USB SSPHY sleep */
-#define HNBU_USBSSPHY_SLEEP1 0x71 /* 2 byte USB SSPHY sleep */
-#define HNBU_USBSSPHY_SLEEP2 0x72 /* 2 byte USB SSPHY sleep */
-#define HNBU_USBSSPHY_SLEEP3 0x73 /* 2 byte USB SSPHY sleep */
-#define HNBU_USBSSPHY_MDIO 0x74 /* USB SSPHY INIT regs setting */
-#define HNBU_USB30PHY_NOSS 0x75 /* USB30 NO Super Speed */
-#define HNBU_USB30PHY_U1U2 0x76 /* USB30 PHY U1U2 Enable */
-#define HNBU_USB30PHY_REGS 0x77 /* USB30 PHY REGs update */
-
-#define HNBU_SROM3SWRGN 0x80 /* 78 bytes; srom rev 3 s/w region without crc8
- * plus extra info appended.
- */
-#define HNBU_RESERVED 0x81 /* Reserved for non-BRCM post-mfg additions */
-#define HNBU_CUSTOM1 0x82 /* 4 byte; For non-BRCM post-mfg additions */
-#define HNBU_CUSTOM2 0x83 /* Reserved; For non-BRCM post-mfg additions */
-#define HNBU_ACPAPARAM 0x84 /* ACPHY PAPARAM */
-#define HNBU_ACPA_CCK 0x86 /* ACPHY PA trimming parameters: CCK */
-#define HNBU_ACPA_40 0x87 /* ACPHY PA trimming parameters: 40 */
-#define HNBU_ACPA_80 0x88 /* ACPHY PA trimming parameters: 80 */
-#define HNBU_ACPA_4080 0x89 /* ACPHY PA trimming parameters: 40/80 */
-#define HNBU_SUBBAND5GVER 0x8a /* subband5gver */
-#define HNBU_PAPARAMBWVER 0x8b /* paparambwver */
-
-#define HNBU_MCS5Gx1PO 0x8c
-#define HNBU_ACPPR_SB8080_PO 0x8d
-
-
-#endif /* !defined(LINUX_POSTMOGRIFY_REMOVAL) */
/* sbtmstatelow */
#define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */
*
* SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
*
- * $Copyright Open 2003 Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: sbsdio.h 383835 2013-02-07 23:32:39Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbsdio.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _SBSDIO_H
* Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
* device core support
*
- * $Copyright Open 2005 Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: sbsdpcmdev.h 416730 2013-08-06 09:33:19Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbsdpcmdev.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _sbsdpcmdev_h_
/* HW frame tag */
#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */
-#if !defined(NDISVER) || (NDISVER < 0x0630)
#define SDPCM_HWEXT_LEN 8
-#else
-#define SDPCM_HWEXT_LEN 0
-#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
#endif /* _sbsdpcmdev_h_ */
/*
* BCM47XX Sonics SiliconBackplane embedded ram core
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: sbsocram.h 481602 2014-05-29 22:43:34Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbsocram.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _SBSOCRAM_H
--- /dev/null
+/*
+ * SiliconBackplane System Memory core
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbsysmem.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef _SBSYSMEM_H
+#define _SBSYSMEM_H
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+/* sysmem core registers */
+typedef volatile struct sysmemregs {
+ uint32 coreinfo;
+ uint32 bwalloc;
+ uint32 extracoreinfo;
+ uint32 biststat;
+ uint32 bankidx;
+ uint32 standbyctrl;
+
+ uint32 errlogstatus;
+ uint32 errlogaddr;
+
+ uint32 cambankidx;
+ uint32 cambankstandbyctrl;
+ uint32 cambankpatchctrl;
+ uint32 cambankpatchtblbaseaddr;
+ uint32 cambankcmdreg;
+ uint32 cambankdatareg;
+ uint32 cambankmaskreg;
+ uint32 PAD[1];
+ uint32 bankinfo;
+ uint32 PAD[15];
+ uint32 extmemconfig;
+ uint32 extmemparitycsr;
+ uint32 extmemparityerrdata;
+ uint32 extmemparityerrcnt;
+ uint32 extmemwrctrlandsize;
+ uint32 PAD[84];
+ uint32 workaround;
+ uint32 pwrctl;
+ uint32 PAD[133];
+ uint32 sr_control;
+ uint32 sr_status;
+ uint32 sr_address;
+ uint32 sr_data;
+} sysmemregs_t;
+
+#endif /* _LANGUAGE_ASSEMBLY */
+
+/* Register offsets */
+#define SR_COREINFO 0x00
+#define SR_BWALLOC 0x04
+#define SR_BISTSTAT 0x0c
+#define SR_BANKINDEX 0x10
+#define SR_BANKSTBYCTL 0x14
+#define SR_PWRCTL 0x1e8
+
+/* Coreinfo register */
+#define SRCI_PT_MASK 0x00070000 /* port type[18:16] */
+#define SRCI_PT_SHIFT 16
+/* port types : SRCI_PT_<processorPT>_<backplanePT> */
+#define SRCI_PT_OCP_OCP 0
+#define SRCI_PT_AXI_OCP 1
+#define SRCI_PT_ARM7AHB_OCP 2
+#define SRCI_PT_CM3AHB_OCP 3
+#define SRCI_PT_AXI_AXI 4
+#define SRCI_PT_AHB_AXI 5
+
+#define SRCI_LSS_MASK 0x00f00000
+#define SRCI_LSS_SHIFT 20
+#define SRCI_LRS_MASK 0x0f000000
+#define SRCI_LRS_SHIFT 24
+
+/* In corerev 0, the memory size is 2 to the power of the
+ * base plus 16 plus to the contents of the memsize field plus 1.
+ */
+#define SRCI_MS0_MASK 0xf
+#define SR_MS0_BASE 16
+
+/*
+ * In corerev 1 the bank size is 2 ^ the bank size field plus 14,
+ * the memory size is number of banks times bank size.
+ * The same applies to rom size.
+ */
+#define SRCI_ROMNB_MASK 0xf000
+#define SRCI_ROMNB_SHIFT 12
+#define SRCI_ROMBSZ_MASK 0xf00
+#define SRCI_ROMBSZ_SHIFT 8
+#define SRCI_SRNB_MASK 0xf0
+#define SRCI_SRNB_SHIFT 4
+#define SRCI_SRBSZ_MASK 0xf
+#define SRCI_SRBSZ_SHIFT 0
+
+#define SR_BSZ_BASE 14
+
+/* Standby control register */
+#define SRSC_SBYOVR_MASK 0x80000000
+#define SRSC_SBYOVR_SHIFT 31
+#define SRSC_SBYOVRVAL_MASK 0x60000000
+#define SRSC_SBYOVRVAL_SHIFT 29
+#define SRSC_SBYEN_MASK 0x01000000
+#define SRSC_SBYEN_SHIFT 24
+
+/* Power control register */
+#define SRPC_PMU_STBYDIS_MASK 0x00000010
+#define SRPC_PMU_STBYDIS_SHIFT 4
+#define SRPC_STBYOVRVAL_MASK 0x00000008
+#define SRPC_STBYOVRVAL_SHIFT 3
+#define SRPC_STBYOVR_MASK 0x00000007
+#define SRPC_STBYOVR_SHIFT 0
+
+/* Extra core capability register */
+#define SRECC_NUM_BANKS_MASK 0x000000F0
+#define SRECC_NUM_BANKS_SHIFT 4
+#define SRECC_BANKSIZE_MASK 0x0000000F
+#define SRECC_BANKSIZE_SHIFT 0
+
+#define SRECC_BANKSIZE(value) (1 << (value))
+
+/* CAM bank patch control */
+#define SRCBPC_PATCHENABLE 0x80000000
+
+#define SRP_ADDRESS 0x0001FFFC
+#define SRP_VALID 0x8000
+
+/* CAM bank command reg */
+#define SRCMD_WRITE 0x00020000
+#define SRCMD_READ 0x00010000
+#define SRCMD_DONE 0x80000000
+
+#define SRCMD_DONE_DLY 1000
+
+/* bankidx and bankinfo reg defines */
+#define SYSMEM_BANKINFO_SZMASK 0x7f
+#define SYSMEM_BANKIDX_ROM_MASK 0x100
+
+#define SYSMEM_BANKIDX_MEMTYPE_SHIFT 8
+/* sysmem bankinfo memtype */
+#define SYSMEM_MEMTYPE_RAM 0
+#define SYSMEM_MEMTYPE_R0M 1
+#define SYSMEM_MEMTYPE_DEVRAM 2
+
+#define SYSMEM_BANKINFO_REG 0x40
+#define SYSMEM_BANKIDX_REG 0x10
+#define SYSMEM_BANKINFO_STDBY_MASK 0x400
+#define SYSMEM_BANKINFO_STDBY_TIMER 0x800
+
+#define SYSMEM_BANKINFO_DEVRAMSEL_SHIFT 13
+#define SYSMEM_BANKINFO_DEVRAMSEL_MASK 0x2000
+#define SYSMEM_BANKINFO_DEVRAMPRO_SHIFT 14
+#define SYSMEM_BANKINFO_DEVRAMPRO_MASK 0x4000
+#define SYSMEM_BANKINFO_SLPSUPP_SHIFT 15
+#define SYSMEM_BANKINFO_SLPSUPP_MASK 0x8000
+#define SYSMEM_BANKINFO_RETNTRAM_SHIFT 16
+#define SYSMEM_BANKINFO_RETNTRAM_MASK 0x00010000
+#define SYSMEM_BANKINFO_PDASZ_SHIFT 17
+#define SYSMEM_BANKINFO_PDASZ_MASK 0x003E0000
+#define SYSMEM_BANKINFO_DEVRAMREMAP_SHIFT 24
+#define SYSMEM_BANKINFO_DEVRAMREMAP_MASK 0x01000000
+
+/* extracoreinfo register */
+#define SYSMEM_DEVRAMBANK_MASK 0xF000
+#define SYSMEM_DEVRAMBANK_SHIFT 12
+
+/* bank info to calculate bank size */
+#define SYSMEM_BANKINFO_SZBASE 8192
+#define SYSMEM_BANKSIZE_SHIFT 13 /* SYSMEM_BANKINFO_SZBASE */
+
+#endif /* _SBSYSMEM_H */
* SDIO spec header file
* Protocol and standard (common) device definitions
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: sdio.h 416730 2013-08-06 09:33:19Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sdio.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _SDIO_H
* SDIO Host Controller Spec header file
* Register map and definitions for the Standard Host Controller
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: sdioh.h 345499 2012-07-18 06:59:05Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sdioh.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _SDIOH_H
* Structure used by apps whose drivers access SDIO drivers.
* Pulled out separately so dhdu and wlu can both use it.
*
- * $ Copyright Open Broadcom Corporation $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: sdiovar.h 241182 2011-02-17 21:50:03Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sdiovar.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _sdiovar_h_
* Misc utility routines for accessing the SOC Interconnects
* of Broadcom HNBU chips.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: siutils.h 481602 2014-05-29 22:43:34Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: siutils.h 530150 2015-01-29 08:43:40Z $
*/
#ifndef _siutils_h_
#endif /* SR_DEBUG */
-/*
+/**
* Data structure to export all chip specific common variables
* public (read-only) portion of siutils handle returned by si_attach()/si_kattach()
*/
struct si_pub {
- uint socitype; /* SOCI_SB, SOCI_AI */
-
- uint bustype; /* SI_BUS, PCI_BUS */
- uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
- uint buscorerev; /* buscore rev */
- uint buscoreidx; /* buscore index */
- int ccrev; /* chip common core rev */
- uint32 cccaps; /* chip common capabilities */
- uint32 cccaps_ext; /* chip common capabilities extension */
- int pmurev; /* pmu core rev */
- uint32 pmucaps; /* pmu capabilities */
- uint boardtype; /* board type */
+ uint socitype; /**< SOCI_SB, SOCI_AI */
+
+ uint bustype; /**< SI_BUS, PCI_BUS */
+ uint buscoretype; /**< PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
+ uint buscorerev; /**< buscore rev */
+ uint buscoreidx; /**< buscore index */
+ int ccrev; /**< chip common core rev */
+ uint32 cccaps; /**< chip common capabilities */
+ uint32 cccaps_ext; /**< chip common capabilities extension */
+ int pmurev; /**< pmu core rev */
+ uint32 pmucaps; /**< pmu capabilities */
+ uint boardtype; /**< board type */
uint boardrev; /* board rev */
- uint boardvendor; /* board vendor */
- uint boardflags; /* board flags */
- uint boardflags2; /* board flags2 */
- uint chip; /* chip number */
- uint chiprev; /* chip revision */
- uint chippkg; /* chip package option */
- uint32 chipst; /* chip status */
- bool issim; /* chip is in simulation or emulation */
- uint socirev; /* SOC interconnect rev */
+ uint boardvendor; /**< board vendor */
+ uint boardflags; /**< board flags */
+ uint boardflags2; /**< board flags2 */
+ uint chip; /**< chip number */
+ uint chiprev; /**< chip revision */
+ uint chippkg; /**< chip package option */
+ uint32 chipst; /**< chip status */
+ bool issim; /**< chip is in simulation or emulation */
+ uint socirev; /**< SOC interconnect rev */
bool pci_pr32414;
};
* (the "current core").
* Use si_setcore() or si_setcoreidx() to change the association to another core.
*/
-#define SI_OSH NULL /* Use for si_kattach when no osh is available */
+#define SI_OSH NULL /**< Use for si_kattach when no osh is available */
#define BADIDX (SI_MAXCORES + 1)
/* clkctl xtal what flags */
-#define XTAL 0x1 /* primary crystal oscillator (2050) */
-#define PLL 0x2 /* main chip pll */
+#define XTAL 0x1 /**< primary crystal oscillator (2050) */
+#define PLL 0x2 /**< main chip pll */
/* clkctl clk mode */
-#define CLK_FAST 0 /* force fast (pll) clock */
-#define CLK_DYNAMIC 2 /* enable dynamic clock control */
+#define CLK_FAST 0 /**< force fast (pll) clock */
+#define CLK_DYNAMIC 2 /**< enable dynamic clock control */
/* GPIO usage priorities */
-#define GPIO_DRV_PRIORITY 0 /* Driver */
-#define GPIO_APP_PRIORITY 1 /* Application */
-#define GPIO_HI_PRIORITY 2 /* Highest priority. Ignore GPIO reservation */
+#define GPIO_DRV_PRIORITY 0 /**< Driver */
+#define GPIO_APP_PRIORITY 1 /**< Application */
+#define GPIO_HI_PRIORITY 2 /**< Highest priority. Ignore GPIO reservation */
/* GPIO pull up/down */
#define GPIO_PULLUP 0
#define GPIO_PULLDN 1
/* GPIO event regtype */
-#define GPIO_REGEVT 0 /* GPIO register event */
-#define GPIO_REGEVT_INTMSK 1 /* GPIO register event int mask */
-#define GPIO_REGEVT_INTPOL 2 /* GPIO register event int polarity */
+#define GPIO_REGEVT 0 /**< GPIO register event */
+#define GPIO_REGEVT_INTMSK 1 /**< GPIO register event int mask */
+#define GPIO_REGEVT_INTPOL 2 /**< GPIO register event int polarity */
/* device path */
-#define SI_DEVPATH_BUFSZ 16 /* min buffer size in bytes */
+#define SI_DEVPATH_BUFSZ 16 /**< min buffer size in bytes */
/* SI routine enumeration: to be used by update function with multiple hooks */
#define SI_DOATTACH 1
-#define SI_PCIDOWN 2 /* wireless interface is down */
-#define SI_PCIUP 3 /* wireless interface is up */
+#define SI_PCIDOWN 2 /**< wireless interface is down */
+#define SI_PCIUP 3 /**< wireless interface is up */
#ifdef SR_DEBUG
#define PMU_RES 31
#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK)
#endif
-typedef void (*gpio_handler_t)(uint32 stat, void *arg);
typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg);
+
/* External BT Coex enable mask */
#define CC_BTCOEX_EN_MASK 0x01
/* External PA enable mask */
#define GPIO_OUT_7_EN_MASK 0x80
+
/* CR4 specific defines used by the host driver */
#define SI_CR4_CAP (0x04)
#define SI_CR4_BANKIDX (0x40)
#define SICF_CPUHALT (0x0020)
#define ARMCR4_BSZ_MASK 0x3f
#define ARMCR4_BSZ_MULT 8192
-
+#define SI_BPIND_1BYTE 0x1
+#define SI_BPIND_2BYTE 0x3
+#define SI_BPIND_4BYTE 0xF
#include <osl_decl.h>
/* === exported functions === */
extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
extern uint si_corerev(si_t *sih);
extern void *si_osh(si_t *sih);
extern void si_setosh(si_t *sih, osl_t *osh);
+extern uint si_backplane_access(si_t *sih, uint addr, uint size,
+ uint *val, bool read);
extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val);
extern uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff);
extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val);
extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
+extern void si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih);
+extern void si_d11rsdb_core1_alt_reg_clk_en(si_t *sih);
extern bool si_iscoreup(si_t *sih);
extern uint si_numcoreunits(si_t *sih, uint coreid);
extern uint si_numd11coreunits(si_t *sih);
extern uint32 si_gpiotimerval(si_t *sih, uint32 mask, uint32 val);
extern void si_btcgpiowar(si_t *sih);
extern bool si_deviceremoved(si_t *sih);
+extern uint32 si_sysmem_size(si_t *sih);
extern uint32 si_socram_size(si_t *sih);
extern uint32 si_socdevram_size(si_t *sih);
extern uint32 si_socram_srmem_size(si_t *sih);
extern uint8 si_gci_host_wake_gpio_init(si_t *sih);
extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state);
-/* GPIO event handlers */
-extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg);
-extern void si_gpio_handler_unregister(si_t *sih, void* gpioh);
-extern void si_gpio_handler_process(si_t *sih);
-
/* GCI interrupt handlers */
extern void si_gci_handler_process(si_t *sih);
extern void si_pci_pmestatclr(si_t *sih);
extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val);
+extern void si_deepsleep_count(si_t *sih, bool arm_wakeup);
#ifdef BCMSDIO
#define CIS_OTP 2
/* Fab-id information */
-#define DEFAULT_FAB 0x0 /* Original/first fab used for this chip */
-#define CSM_FAB7 0x1 /* CSM Fab7 chip */
-#define TSMC_FAB12 0x2 /* TSMC Fab12/Fab14 chip */
-#define SMIC_FAB4 0x3 /* SMIC Fab4 chip */
+#define DEFAULT_FAB 0x0 /**< Original/first fab used for this chip */
+#define CSM_FAB7 0x1 /**< CSM Fab7 chip */
+#define TSMC_FAB12 0x2 /**< TSMC Fab12/Fab14 chip */
+#define SMIC_FAB4 0x3 /**< SMIC Fab4 chip */
extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw);
extern uint16 si_fabid(si_t *sih);
extern void si_chipcontrl_epa4331(si_t *sih, bool on);
extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl);
extern void si_chipcontrl_srom4360(si_t *sih, bool on);
+extern void si_clk_srom4365(si_t *sih);
/* Enable BT-COEX & Ex-PA for 4313 */
extern void si_epa_4313war(si_t *sih);
extern void si_btc_enable_chipcontrol(si_t *sih);
extern void si_btcombo_p250_4313_war(si_t *sih);
extern void si_btcombo_43228_war(si_t *sih);
extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear);
+extern void si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag);
extern void si_pmu_synth_pwrsw_4313_war(si_t *sih);
extern uint si_pll_reset(si_t *sih);
/* === debug routines === */
extern bool si_taclear(si_t *sih, bool details);
+#if defined(BCMDBG_PHYDUMP)
+struct bcmstrbuf;
+extern int si_dump_pcieinfo(si_t *sih, struct bcmstrbuf *b);
+#endif
#if defined(BCMDBG_PHYDUMP)
extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b);
extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
+extern int si_bpind_access(si_t *sih, uint32 addr_high, uint32 addr_low,
+ int32* data, bool read);
#ifdef SR_DEBUG
extern void si_dump_pmu(si_t *sih, void *pmu_var);
extern void si_pmu_keep_on(si_t *sih, int32 int_val);
#define PLL_DIV2_MASK (0x37 << PLL_DIV2_BIT_START)
#define PLL_DIV2_DIS_OP (0x37 << PLL_DIV2_BIT_START)
-#define PMUREG(si, member) \
- (AOB_ENAB(si) ? \
- si_corereg_addr(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
- OFFSETOF(pmuregs_t, member)): \
- si_corereg_addr(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member)))
-
#define pmu_corereg(si, cc_idx, member, mask, val) \
(AOB_ENAB(si) ? \
- si_pmu_corereg(si, si_findcoreidx(sih, PMU_CORE_ID, 0), \
+ si_pmu_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
OFFSETOF(pmuregs_t, member), mask, val): \
si_pmu_corereg(si, cc_idx, OFFSETOF(chipcregs_t, member), mask, val))
/* GCI Macros */
#define ALLONES_32 0xFFFFFFFF
-#define GCI_CCTL_SECIRST_OFFSET 0 /* SeciReset */
-#define GCI_CCTL_RSTSL_OFFSET 1 /* ResetSeciLogic */
-#define GCI_CCTL_SECIEN_OFFSET 2 /* EnableSeci */
-#define GCI_CCTL_FSL_OFFSET 3 /* ForceSeciOutLow */
-#define GCI_CCTL_SMODE_OFFSET 4 /* SeciOpMode, 6:4 */
-#define GCI_CCTL_US_OFFSET 7 /* UpdateSeci */
-#define GCI_CCTL_BRKONSLP_OFFSET 8 /* BreakOnSleep */
-#define GCI_CCTL_SILOWTOUT_OFFSET 9 /* SeciInLowTimeout, 10:9 */
-#define GCI_CCTL_RSTOCC_OFFSET 11 /* ResetOffChipCoex */
-#define GCI_CCTL_ARESEND_OFFSET 12 /* AutoBTSigResend */
-#define GCI_CCTL_FGCR_OFFSET 16 /* ForceGciClkReq */
-#define GCI_CCTL_FHCRO_OFFSET 17 /* ForceHWClockReqOff */
-#define GCI_CCTL_FREGCLK_OFFSET 18 /* ForceRegClk */
-#define GCI_CCTL_FSECICLK_OFFSET 19 /* ForceSeciClk */
-#define GCI_CCTL_FGCA_OFFSET 20 /* ForceGciClkAvail */
-#define GCI_CCTL_FGCAV_OFFSET 21 /* ForceGciClkAvailValue */
-#define GCI_CCTL_SCS_OFFSET 24 /* SeciClkStretch, 31:24 */
+#define GCI_CCTL_SECIRST_OFFSET 0 /**< SeciReset */
+#define GCI_CCTL_RSTSL_OFFSET 1 /**< ResetSeciLogic */
+#define GCI_CCTL_SECIEN_OFFSET 2 /**< EnableSeci */
+#define GCI_CCTL_FSL_OFFSET 3 /**< ForceSeciOutLow */
+#define GCI_CCTL_SMODE_OFFSET 4 /**< SeciOpMode, 6:4 */
+#define GCI_CCTL_US_OFFSET 7 /**< UpdateSeci */
+#define GCI_CCTL_BRKONSLP_OFFSET 8 /**< BreakOnSleep */
+#define GCI_CCTL_SILOWTOUT_OFFSET 9 /**< SeciInLowTimeout, 10:9 */
+#define GCI_CCTL_RSTOCC_OFFSET 11 /**< ResetOffChipCoex */
+#define GCI_CCTL_ARESEND_OFFSET 12 /**< AutoBTSigResend */
+#define GCI_CCTL_FGCR_OFFSET 16 /**< ForceGciClkReq */
+#define GCI_CCTL_FHCRO_OFFSET 17 /**< ForceHWClockReqOff */
+#define GCI_CCTL_FREGCLK_OFFSET 18 /**< ForceRegClk */
+#define GCI_CCTL_FSECICLK_OFFSET 19 /**< ForceSeciClk */
+#define GCI_CCTL_FGCA_OFFSET 20 /**< ForceGciClkAvail */
+#define GCI_CCTL_FGCAV_OFFSET 21 /**< ForceGciClkAvailValue */
+#define GCI_CCTL_SCS_OFFSET 24 /**< SeciClkStretch, 31:24 */
#define GCI_MODE_UART 0x0
#define GCI_MODE_SECI 0x1
#define GCI_GPIOIDX_OFFSET 16
-#define GCI_LTECX_SECI_ID 0 /* SECI port for LTECX */
+#define GCI_LTECX_SECI_ID 0 /**< SECI port for LTECX */
/* To access per GCI bit registers */
#define GCI_REG_WIDTH 32
#define PMU_OOB 0x2
#define D11_OOB 0x3
#define SDIOD_OOB 0x4
-#define PMU_OOB_BIT (0x10 | PMU_OOB)
+#define WLAN_OOB 0x5
+#define PMU_OOB_BIT 0x12
#endif /* REROUTE_OOBINT */
+extern void si_pll_sr_reinit(si_t *sih);
+extern void si_pll_closeloop(si_t *sih);
#endif /* _siutils_h_ */
--- /dev/null
+/*
+ * SPI device spec header file
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: spid.h 514727 2014-11-12 03:02:48Z $
+ */
+
+#ifndef _SPI_H
+#define _SPI_H
+
+/*
+ * Brcm SPI Device Register Map.
+ *
+ */
+
+typedef volatile struct {
+ uint8 config; /* 0x00, len, endian, clock, speed, polarity, wakeup */
+ uint8 response_delay; /* 0x01, read response delay in bytes (corerev < 3) */
+ uint8 status_enable; /* 0x02, status-enable, intr with status, response_delay
+ * function selection, command/data error check
+ */
+ uint8 reset_bp; /* 0x03, reset on wlan/bt backplane reset (corerev >= 1) */
+ uint16 intr_reg; /* 0x04, Intr status register */
+ uint16 intr_en_reg; /* 0x06, Intr mask register */
+ uint32 status_reg; /* 0x08, RO, Status bits of last spi transfer */
+ uint16 f1_info_reg; /* 0x0c, RO, enabled, ready for data transfer, blocksize */
+ uint16 f2_info_reg; /* 0x0e, RO, enabled, ready for data transfer, blocksize */
+ uint16 f3_info_reg; /* 0x10, RO, enabled, ready for data transfer, blocksize */
+ uint32 test_read; /* 0x14, RO 0xfeedbead signature */
+ uint32 test_rw; /* 0x18, RW */
+ uint8 resp_delay_f0; /* 0x1c, read resp delay bytes for F0 (corerev >= 3) */
+ uint8 resp_delay_f1; /* 0x1d, read resp delay bytes for F1 (corerev >= 3) */
+ uint8 resp_delay_f2; /* 0x1e, read resp delay bytes for F2 (corerev >= 3) */
+ uint8 resp_delay_f3; /* 0x1f, read resp delay bytes for F3 (corerev >= 3) */
+} spi_regs_t;
+
+/* SPI device register offsets */
+#define SPID_CONFIG 0x00
+#define SPID_RESPONSE_DELAY 0x01
+#define SPID_STATUS_ENABLE 0x02
+#define SPID_RESET_BP 0x03 /* (corerev >= 1) */
+#define SPID_INTR_REG 0x04 /* 16 bits - Interrupt status */
+#define SPID_INTR_EN_REG 0x06 /* 16 bits - Interrupt mask */
+#define SPID_STATUS_REG 0x08 /* 32 bits */
+#define SPID_F1_INFO_REG 0x0C /* 16 bits */
+#define SPID_F2_INFO_REG 0x0E /* 16 bits */
+#define SPID_F3_INFO_REG 0x10 /* 16 bits */
+#define SPID_TEST_READ 0x14 /* 32 bits */
+#define SPID_TEST_RW 0x18 /* 32 bits */
+#define SPID_RESP_DELAY_F0 0x1c /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F1 0x1d /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F2 0x1e /* 8 bits (corerev >= 3) */
+#define SPID_RESP_DELAY_F3 0x1f /* 8 bits (corerev >= 3) */
+
+/* Bit masks for SPID_CONFIG device register */
+#define WORD_LENGTH_32 0x1 /* 0/1 16/32 bit word length */
+#define ENDIAN_BIG 0x2 /* 0/1 Little/Big Endian */
+#define CLOCK_PHASE 0x4 /* 0/1 clock phase delay */
+#define CLOCK_POLARITY 0x8 /* 0/1 Idle state clock polarity is low/high */
+#define HIGH_SPEED_MODE 0x10 /* 1/0 High Speed mode / Normal mode */
+#define INTR_POLARITY 0x20 /* 1/0 Interrupt active polarity is high/low */
+#define WAKE_UP 0x80 /* 0/1 Wake-up command from Host to WLAN */
+
+/* Bit mask for SPID_RESPONSE_DELAY device register */
+#define RESPONSE_DELAY_MASK 0xFF /* Configurable rd response delay in multiples of 8 bits */
+
+/* Bit mask for SPID_STATUS_ENABLE device register */
+#define STATUS_ENABLE 0x1 /* 1/0 Status sent/not sent to host after read/write */
+#define INTR_WITH_STATUS 0x2 /* 0/1 Do-not / do-interrupt if status is sent */
+#define RESP_DELAY_ALL 0x4 /* Applicability of resp delay to F1 or all func's read */
+#define DWORD_PKT_LEN_EN 0x8 /* Packet len denoted in dwords instead of bytes */
+#define CMD_ERR_CHK_EN 0x20 /* Command error check enable */
+#define DATA_ERR_CHK_EN 0x40 /* Data error check enable */
+
+/* Bit mask for SPID_RESET_BP device register */
+#define RESET_ON_WLAN_BP_RESET 0x4 /* enable reset for WLAN backplane */
+#define RESET_ON_BT_BP_RESET 0x8 /* enable reset for BT backplane */
+#define RESET_SPI 0x80 /* reset the above enabled logic */
+
+/* Bit mask for SPID_INTR_REG device register */
+#define DATA_UNAVAILABLE 0x0001 /* Requested data not available; Clear by writing a "1" */
+#define F2_F3_FIFO_RD_UNDERFLOW 0x0002
+#define F2_F3_FIFO_WR_OVERFLOW 0x0004
+#define COMMAND_ERROR 0x0008 /* Cleared by writing 1 */
+#define DATA_ERROR 0x0010 /* Cleared by writing 1 */
+#define F2_PACKET_AVAILABLE 0x0020
+#define F3_PACKET_AVAILABLE 0x0040
+#define F1_OVERFLOW 0x0080 /* Due to last write. Bkplane has pending write requests */
+#define MISC_INTR0 0x0100
+#define MISC_INTR1 0x0200
+#define MISC_INTR2 0x0400
+#define MISC_INTR3 0x0800
+#define MISC_INTR4 0x1000
+#define F1_INTR 0x2000
+#define F2_INTR 0x4000
+#define F3_INTR 0x8000
+
+/* Bit mask for 32bit SPID_STATUS_REG device register */
+#define STATUS_DATA_NOT_AVAILABLE 0x00000001
+#define STATUS_UNDERFLOW 0x00000002
+#define STATUS_OVERFLOW 0x00000004
+#define STATUS_F2_INTR 0x00000008
+#define STATUS_F3_INTR 0x00000010
+#define STATUS_F2_RX_READY 0x00000020
+#define STATUS_F3_RX_READY 0x00000040
+#define STATUS_HOST_CMD_DATA_ERR 0x00000080
+#define STATUS_F2_PKT_AVAILABLE 0x00000100
+#define STATUS_F2_PKT_LEN_MASK 0x000FFE00
+#define STATUS_F2_PKT_LEN_SHIFT 9
+#define STATUS_F3_PKT_AVAILABLE 0x00100000
+#define STATUS_F3_PKT_LEN_MASK 0xFFE00000
+#define STATUS_F3_PKT_LEN_SHIFT 21
+
+/* Bit mask for 16 bits SPID_F1_INFO_REG device register */
+#define F1_ENABLED 0x0001
+#define F1_RDY_FOR_DATA_TRANSFER 0x0002
+#define F1_MAX_PKT_SIZE 0x01FC
+
+/* Bit mask for 16 bits SPID_F2_INFO_REG device register */
+#define F2_ENABLED 0x0001
+#define F2_RDY_FOR_DATA_TRANSFER 0x0002
+#define F2_MAX_PKT_SIZE 0x3FFC
+
+/* Bit mask for 16 bits SPID_F3_INFO_REG device register */
+#define F3_ENABLED 0x0001
+#define F3_RDY_FOR_DATA_TRANSFER 0x0002
+#define F3_MAX_PKT_SIZE 0x3FFC
+
+/* Bit mask for 32 bits SPID_TEST_READ device register read in 16bit LE mode */
+#define TEST_RO_DATA_32BIT_LE 0xFEEDBEAD
+
+/* Maximum number of I/O funcs */
+#define SPI_MAX_IOFUNCS 4
+
+#define SPI_MAX_PKT_LEN (2048*4)
+
+/* Misc defines */
+#define SPI_FUNC_0 0
+#define SPI_FUNC_1 1
+#define SPI_FUNC_2 2
+#define SPI_FUNC_3 3
+
+#define WAIT_F2RXFIFORDY 100
+#define WAIT_F2RXFIFORDY_DELAY 20
+
+#endif /* _SPI_H */
/*
* TRX image file header format.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: trxhdr.h 349211 2012-08-07 09:45:24Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: trxhdr.h 520026 2014-12-10 01:29:40Z $
*/
#ifndef _TRX_HDR_H
#define TRX_V2_MAX_OFFSETS 5
#define SIZEOF_TRXHDR_V1 (sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32))
#define SIZEOF_TRXHDR_V2 (sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32))
-#define TRX_VER(trx) (trx->flag_version>>16)
+#define TRX_VER(trx) ((trx)->flag_version>>16)
#define ISTRX_V1(trx) (TRX_VER(trx) == TRX_V1)
#define ISTRX_V2(trx) (TRX_VER(trx) == TRX_V2)
/* For V2, return size of V2 size: others, return V1 size */
/*
- * $Copyright Open Broadcom Corporation$
- * $Id: typedefs.h 484281 2014-06-12 22:42:26Z $
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: typedefs.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _TYPEDEFS_H_
* a duplicate typedef error; there is no way to "undefine" a typedef.
* We know when it's per-port code because each file defines LINUX_PORT at the top.
*/
-#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
#define TYPEDEF_UINT
#ifndef TARGETENV_android
#define TYPEDEF_USHORT
#endif
#endif /* == 2.6.18 */
#endif /* __KERNEL__ */
-#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */
/* Do not support the (u)int64 types with strict ansi for GNU C */
#if defined(__KERNEL__)
/* See note above */
-#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
#include <linux/types.h> /* sys/types.h and linux/types.h are oil and water */
-#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */
#else
/*
-* $Copyright Open 2009 Broadcom Corporation$
-* $Id: wlfc_proto.h 499510 2014-08-28 23:40:47Z $
-*
-*/
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wlfc_proto.h 542895 2015-03-22 14:13:12Z $
+ *
+ */
+
+/** WL flow control for PROP_TXSTATUS. Related to host AMPDU reordering. */
+
+
#ifndef __wlfc_proto_definitions_h__
#define __wlfc_proto_definitions_h__
| 3 | 2 | (count, handle, prec_bmp)| Set the credit depth for a MAC dstn
---------------------------------------------------------------------------
| 4 | 4+ | see pkttag comments | TXSTATUS
- | | | TX status & timestamps | Present only when pkt timestamp is enabled
+ | | 12 | TX status & timestamps | Present only when pkt timestamp is enabled
---------------------------------------------------------------------------
| 5 | 4 | see pkttag comments | PKKTTAG [host->firmware]
---------------------------------------------------------------------------
#define WLFC_CTL_TYPE_MAC_CLOSE 2
#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT 3
#define WLFC_CTL_TYPE_TXSTATUS 4
-#define WLFC_CTL_TYPE_PKTTAG 5
+#define WLFC_CTL_TYPE_PKTTAG 5 /** host<->dongle */
#define WLFC_CTL_TYPE_MACDESC_ADD 6
#define WLFC_CTL_TYPE_MACDESC_DEL 7
#define WLFC_CTL_TYPE_FIFO_CREDITBACK 11
-#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP 12
+#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP 12 /** host->dongle */
#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET 13
#define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS 14
-
#define WLFC_CTL_TYPE_TX_ENTRY_STAMP 15
#define WLFC_CTL_TYPE_RX_STAMP 16
+#define WLFC_CTL_TYPE_TX_STATUS_STAMP 17 /** obsolete */
#define WLFC_CTL_TYPE_TRANS_ID 18
#define WLFC_CTL_TYPE_COMP_TXSTATUS 19
#define WLFC_CTL_TYPE_FILLER 255
-#define WLFC_CTL_VALUE_LEN_MACDESC 8 /* handle, interface, MAC */
+#define WLFC_CTL_VALUE_LEN_MACDESC 8 /** handle, interface, MAC */
-#define WLFC_CTL_VALUE_LEN_MAC 1 /* MAC-handle */
+#define WLFC_CTL_VALUE_LEN_MAC 1 /** MAC-handle */
#define WLFC_CTL_VALUE_LEN_RSSI 1
#define WLFC_CTL_VALUE_LEN_INTERFACE 1
#define WLFC_CTL_VALUE_LEN_TXSTATUS 4
#define WLFC_CTL_VALUE_LEN_PKTTAG 4
+#define WLFC_CTL_VALUE_LEN_TIMESTAMP 12 /** 4-byte rate info + 2 TSF */
#define WLFC_CTL_VALUE_LEN_SEQ 2
+/* The high bits of ratespec report in timestamp are used for various status */
+#define WLFC_TSFLAGS_RX_RETRY (1 << 31)
+#define WLFC_TSFLAGS_PM_ENABLED (1 << 30)
+#define WLFC_TSFLAGS_MASK (WLFC_TSFLAGS_RX_RETRY | WLFC_TSFLAGS_PM_ENABLED)
+
/* enough space to host all 4 ACs, bc/mc and atim fifo credit */
#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6
#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */
-#define WLFC_PKTFLAG_PKTFROMHOST 0x01 /* packet originated from hot side */
-#define WLFC_PKTFLAG_PKT_REQUESTED 0x02 /* packet requsted by firmware side */
-#define WLFC_PKTFLAG_PKT_FORCELOWRATE 0x04 /* force low rate for this packet */
+#define WLFC_PKTFLAG_PKTFROMHOST 0x01
+#define WLFC_PKTFLAG_PKT_REQUESTED 0x02
#define WL_TXSTATUS_STATUS_MASK 0xff /* allow 8 bits */
#define WL_TXSTATUS_STATUS_SHIFT 24
#define WL_TXSTATUS_GET_STATUS(x) (((x) >> WL_TXSTATUS_STATUS_SHIFT) & \
WL_TXSTATUS_STATUS_MASK)
+/**
+ * Bit 31 of the 32-bit packet tag is defined as 'generation ID'. It is set by the host to the
+ * "current" generation, and by the firmware to the "expected" generation, toggling on suppress. The
+ * firmware accepts a packet when the generation matches; on reset (startup) both "current" and
+ * "expected" are set to 0.
+ */
#define WL_TXSTATUS_GENERATION_MASK 1 /* allow 1 bit */
#define WL_TXSTATUS_GENERATION_SHIFT 31
((ctr) & WL_TXSTATUS_FREERUNCTR_MASK))
#define WL_TXSTATUS_GET_FREERUNCTR(x) ((x)& WL_TXSTATUS_FREERUNCTR_MASK)
+/* Seq number part of AMSDU */
+#define WL_SEQ_AMSDU_MASK 0x1 /* allow 1 bit */
+#define WL_SEQ_AMSDU_SHIFT 14
+#define WL_SEQ_SET_AMSDU(x, val) ((x) = \
+ ((x) & ~(WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT)) | \
+ (((val) & WL_SEQ_AMSDU_MASK) << WL_SEQ_AMSDU_SHIFT))
+#define WL_SEQ_GET_AMSDU(x) (((x) >> WL_SEQ_AMSDU_SHIFT) & \
+ WL_SEQ_AMSDU_MASK)
+
+/* Seq number is valid coming from FW */
#define WL_SEQ_FROMFW_MASK 0x1 /* allow 1 bit */
#define WL_SEQ_FROMFW_SHIFT 13
#define WL_SEQ_SET_FROMFW(x, val) ((x) = \
#define WL_SEQ_GET_FROMFW(x) (((x) >> WL_SEQ_FROMFW_SHIFT) & \
WL_SEQ_FROMFW_MASK)
+/**
+ * Proptxstatus related.
+ *
+ * Pkt from bus layer (DHD for SDIO and pciedev for PCIE)
+ * is re-using seq number previously suppressed
+ * so FW should not assign new one
+ */
#define WL_SEQ_FROMDRV_MASK 0x1 /* allow 1 bit */
#define WL_SEQ_FROMDRV_SHIFT 12
#define WL_SEQ_SET_FROMDRV(x, val) ((x) = \
#define WL_SEQ_GET_NUM(x) (((x) >> WL_SEQ_NUM_SHIFT) & \
WL_SEQ_NUM_MASK)
+#define WL_SEQ_AMSDU_SUPPR_MASK ((WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT) | \
+ (WL_SEQ_AMSDU_MASK << WL_SEQ_AMSDU_SHIFT) | \
+ (WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT))
+
/* 32 STA should be enough??, 6 bits; Must be power of 2 */
#define WLFC_MAC_DESC_TABLE_SIZE 32
#define WLFC_MAX_IFNUM 16
/* b[7:5] -reuse guard, b[4:0] -value */
#define WLFC_MAC_DESC_GET_LOOKUP_INDEX(x) ((x) & 0x1f)
+#define WLFC_PKTFLAG_SET_PKTREQUESTED(x) (x) |= \
+ (WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \
+ ~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
+
#define WLFC_MAX_PENDING_DATALEN 120
/* host is free to discard the packet */
#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3
/* Firmware tossed after retries */
#define WLFC_CTL_PKTFLAG_DISCARD_NOACK 4
+/* Firmware wrongly reported suppressed previously,now fixing to acked */
+#define WLFC_CTL_PKTFLAG_SUPPRESS_ACKED 5
#define WLFC_D11_STATUS_INTERPRET(txs) \
- (((txs)->status.suppr_ind != TX_STATUS_SUPR_NONE) ? \
- WLFC_CTL_PKTFLAG_D11SUPPRESS : \
- ((txs)->status.was_acked ? \
- WLFC_CTL_PKTFLAG_DISCARD : WLFC_CTL_PKTFLAG_DISCARD_NOACK))
+ ((txs)->status.was_acked ? WLFC_CTL_PKTFLAG_DISCARD : \
+ (TXS_SUPR_MAGG_DONE((txs)->status.suppr_ind) ? \
+ WLFC_CTL_PKTFLAG_DISCARD_NOACK : WLFC_CTL_PKTFLAG_D11SUPPRESS))
+
#ifdef PROP_TXSTATUS_DEBUG
#define WLFC_DBGMESG(x) printf x
#define WLFC_TYPE_TRANS_ID_LEN 6
#define WLFC_MODE_HANGER 1 /* use hanger */
-#define WLFC_MODE_AFQ 2 /* use afq */
+#define WLFC_MODE_AFQ 2 /* use afq (At Firmware Queue) */
#define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2))
#define WLFC_MODE_AFQ_SHIFT 2 /* afq bit */
/*
* Custom OID/ioctl definitions for
+ *
+ *
* Broadcom 802.11abg Networking Device Driver
*
* Definitions subject to change without notice.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wlioctl.h 504503 2014-09-24 11:28:56Z $
+ * $Id: wlioctl.h 609280 2016-01-01 06:31:38Z $
*/
#ifndef _wlioctl_h_
#include <bcmwifi_channels.h>
#include <bcmwifi_rates.h>
#include <devctrl_if/wlioctl_defs.h>
+#include <proto/bcmipv6.h>
-
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
#include <bcm_mpool_pub.h>
#include <bcmcdc.h>
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
+typedef struct {
+ uint32 num;
+ chanspec_t list[1];
+} chanspec_list_t;
+
+#define RSN_KCK_LENGTH 16
+#define RSN_KEK_LENGTH 16
+
#ifndef INTF_NAME_SIZ
#define INTF_NAME_SIZ 16
} rem_ioctl_t;
#define REMOTE_SIZE sizeof(rem_ioctl_t)
-typedef struct {
- uint32 num;
- chanspec_t list[1];
-} chanspec_list_t;
/* DFS Forced param */
typedef struct wl_dfs_forced_params {
/* association decision information */
typedef struct {
- bool assoc_approved; /* (re)association approved */
- uint16 reject_reason; /* reason code for rejecting association */
+ bool assoc_approved; /**< (re)association approved */
+ uint16 reject_reason; /**< reason code for rejecting association */
struct ether_addr da;
-#if 0 && (NDISVER >= 0x0620)
- LARGE_INTEGER sys_time; /* current system time */
-#else
- int64 sys_time; /* current system time */
-#endif
+ int64 sys_time; /**< current system time */
} assoc_decision_t;
+#define DFS_SCAN_S_IDLE -1
+#define DFS_SCAN_S_RADAR_FREE 0
+#define DFS_SCAN_S_RADAR_FOUND 1
+#define DFS_SCAN_S_INPROGESS 2
+#define DFS_SCAN_S_SCAN_ABORTED 3
+#define DFS_SCAN_S_SCAN_MODESW_INPROGRESS 4
+#define DFS_SCAN_S_MAX 5
+
+
#define ACTION_FRAME_SIZE 1800
typedef struct wl_action_frame {
typedef struct ssid_info
{
- uint8 ssid_len; /* the length of SSID */
- uint8 ssid[32]; /* SSID string */
+ uint8 ssid_len; /**< the length of SSID */
+ uint8 ssid[32]; /**< SSID string */
} ssid_info_t;
typedef struct wl_af_params {
struct ether_addr da;
} wl_sa_query_t;
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
/* require default structure packing */
#define BWL_DEFAULT_PACKING
#include <packed_section_start.h>
} BWL_POST_PACKED_STRUCT wlc_prot_dynbwsw_config_t;
typedef BWL_PRE_PACKED_STRUCT struct {
- uint32 version; /* version field */
+ uint32 version; /**< version field */
uint32 config_mask;
uint32 reset_mask;
wlc_prot_dynbwsw_config_t config_params;
} BWL_POST_PACKED_STRUCT obss_config_params_t;
+/* bsscfg type */
+typedef enum bsscfg_type_t {
+ BSSCFG_TYPE_GENERIC = 0, /**< default */
+ BSSCFG_TYPE_P2P = 1, /**< The BSS is for p2p link */
+ BSSCFG_TYPE_BTA = 2,
+ BSSCFG_TYPE_TDLS = 4,
+ BSSCFG_TYPE_AWDL = 5,
+ BSSCFG_TYPE_PROXD = 6,
+ BSSCFG_TYPE_NAN = 7,
+ BSSCFG_TYPE_MAX
+} bsscfg_type_t;
+
+/* bsscfg subtype */
+enum {
+ BSSCFG_GENERIC_STA = 1, /* GENERIC */
+ BSSCFG_GENERIC_AP = 2, /* GENERIC */
+ BSSCFG_P2P_GC = 3, /* P2P */
+ BSSCFG_P2P_GO = 4, /* P2P */
+ BSSCFG_P2P_DISC = 5, /* P2P */
+};
+
+typedef struct wlc_bsscfg_info {
+ uint32 type;
+ uint32 subtype;
+} wlc_bsscfg_info_t;
+
+
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
/* Legacy structure to help keep backward compatible wl tool and tray app */
-#define LEGACY_WL_BSS_INFO_VERSION 107 /* older version of wl_bss_info struct */
+#define LEGACY_WL_BSS_INFO_VERSION 107 /**< older version of wl_bss_info struct */
typedef struct wl_bss_info_107 {
- uint32 version; /* version field */
- uint32 length; /* byte length of data in this record,
+ uint32 version; /**< version field */
+ uint32 length; /**< byte length of data in this record,
* starting at version and including IEs
*/
struct ether_addr BSSID;
- uint16 beacon_period; /* units are Kusec */
- uint16 capability; /* Capability information */
+ uint16 beacon_period; /**< units are Kusec */
+ uint16 capability; /**< Capability information */
uint8 SSID_len;
uint8 SSID[32];
struct {
- uint count; /* # rates in this set */
- uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
- } rateset; /* supported rates */
- uint8 channel; /* Channel no. */
- uint16 atim_window; /* units are Kusec */
- uint8 dtim_period; /* DTIM period */
- int16 RSSI; /* receive signal strength (in dBm) */
- int8 phy_noise; /* noise (in dBm) */
- uint32 ie_length; /* byte length of Information Elements */
+ uint count; /**< # rates in this set */
+ uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */
+ } rateset; /**< supported rates */
+ uint8 channel; /**< Channel no. */
+ uint16 atim_window; /**< units are Kusec */
+ uint8 dtim_period; /**< DTIM period */
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ int8 phy_noise; /**< noise (in dBm) */
+ uint32 ie_length; /**< byte length of Information Elements */
/* variable length Information Elements */
} wl_bss_info_107_t;
* Per-BSS information structure.
*/
-#define LEGACY2_WL_BSS_INFO_VERSION 108 /* old version of wl_bss_info struct */
+#define LEGACY2_WL_BSS_INFO_VERSION 108 /**< old version of wl_bss_info struct */
/* BSS info structure
* Applications MUST CHECK ie_offset field and length field to access IEs and
* next bss_info structure in a vector (in wl_scan_results_t)
*/
typedef struct wl_bss_info_108 {
- uint32 version; /* version field */
- uint32 length; /* byte length of data in this record,
+ uint32 version; /**< version field */
+ uint32 length; /**< byte length of data in this record,
* starting at version and including IEs
*/
struct ether_addr BSSID;
- uint16 beacon_period; /* units are Kusec */
- uint16 capability; /* Capability information */
+ uint16 beacon_period; /**< units are Kusec */
+ uint16 capability; /**< Capability information */
uint8 SSID_len;
uint8 SSID[32];
struct {
- uint count; /* # rates in this set */
- uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
- } rateset; /* supported rates */
- chanspec_t chanspec; /* chanspec for bss */
- uint16 atim_window; /* units are Kusec */
- uint8 dtim_period; /* DTIM period */
- int16 RSSI; /* receive signal strength (in dBm) */
- int8 phy_noise; /* noise (in dBm) */
-
- uint8 n_cap; /* BSS is 802.11N Capable */
- uint32 nbss_cap; /* 802.11N BSS Capabilities (based on HT_CAP_*) */
- uint8 ctl_ch; /* 802.11N BSS control channel number */
- uint32 reserved32[1]; /* Reserved for expansion of BSS properties */
- uint8 flags; /* flags */
- uint8 reserved[3]; /* Reserved for expansion of BSS properties */
- uint8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
-
- uint16 ie_offset; /* offset at which IEs start, from beginning */
- uint32 ie_length; /* byte length of Information Elements */
+ uint count; /**< # rates in this set */
+ uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */
+ } rateset; /**< supported rates */
+ chanspec_t chanspec; /**< chanspec for bss */
+ uint16 atim_window; /**< units are Kusec */
+ uint8 dtim_period; /**< DTIM period */
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ int8 phy_noise; /**< noise (in dBm) */
+
+ uint8 n_cap; /**< BSS is 802.11N Capable */
+ uint32 nbss_cap; /**< 802.11N BSS Capabilities (based on HT_CAP_*) */
+ uint8 ctl_ch; /**< 802.11N BSS control channel number */
+ uint32 reserved32[1]; /**< Reserved for expansion of BSS properties */
+ uint8 flags; /**< flags */
+ uint8 reserved[3]; /**< Reserved for expansion of BSS properties */
+ uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */
+
+ uint16 ie_offset; /**< offset at which IEs start, from beginning */
+ uint32 ie_length; /**< byte length of Information Elements */
/* Add new fields here */
/* variable length Information Elements */
} wl_bss_info_108_t;
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
-#define WL_BSS_INFO_VERSION 109 /* current version of wl_bss_info struct */
+#define WL_BSS_INFO_VERSION 109 /**< current version of wl_bss_info struct */
/* BSS info structure
* Applications MUST CHECK ie_offset field and length field to access IEs and
* next bss_info structure in a vector (in wl_scan_results_t)
*/
typedef struct wl_bss_info {
- uint32 version; /* version field */
- uint32 length; /* byte length of data in this record,
+ uint32 version; /**< version field */
+ uint32 length; /**< byte length of data in this record,
* starting at version and including IEs
*/
struct ether_addr BSSID;
- uint16 beacon_period; /* units are Kusec */
- uint16 capability; /* Capability information */
+ uint16 beacon_period; /**< units are Kusec */
+ uint16 capability; /**< Capability information */
uint8 SSID_len;
uint8 SSID[32];
struct {
- uint count; /* # rates in this set */
- uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
- } rateset; /* supported rates */
- chanspec_t chanspec; /* chanspec for bss */
- uint16 atim_window; /* units are Kusec */
- uint8 dtim_period; /* DTIM period */
- int16 RSSI; /* receive signal strength (in dBm) */
- int8 phy_noise; /* noise (in dBm) */
-
- uint8 n_cap; /* BSS is 802.11N Capable */
- uint32 nbss_cap; /* 802.11N+AC BSS Capabilities */
- uint8 ctl_ch; /* 802.11N BSS control channel number */
- uint8 padding1[3]; /* explicit struct alignment padding */
- uint16 vht_rxmcsmap; /* VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
- uint16 vht_txmcsmap; /* VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
- uint8 flags; /* flags */
- uint8 vht_cap; /* BSS is vht capable */
- uint8 reserved[2]; /* Reserved for expansion of BSS properties */
- uint8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
-
- uint16 ie_offset; /* offset at which IEs start, from beginning */
- uint32 ie_length; /* byte length of Information Elements */
- int16 SNR; /* average SNR of during frame reception */
+ uint count; /**< # rates in this set */
+ uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */
+ } rateset; /**< supported rates */
+ chanspec_t chanspec; /**< chanspec for bss */
+ uint16 atim_window; /**< units are Kusec */
+ uint8 dtim_period; /**< DTIM period */
+ int16 RSSI; /**< receive signal strength (in dBm) */
+ int8 phy_noise; /**< noise (in dBm) */
+
+ uint8 n_cap; /**< BSS is 802.11N Capable */
+ uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */
+ uint8 ctl_ch; /**< 802.11N BSS control channel number */
+ uint8 padding1[3]; /**< explicit struct alignment padding */
+ uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint8 flags; /**< flags */
+ uint8 vht_cap; /**< BSS is vht capable */
+ uint8 reserved[2]; /**< Reserved for expansion of BSS properties */
+ uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */
+
+ uint16 ie_offset; /**< offset at which IEs start, from beginning */
+ uint32 ie_length; /**< byte length of Information Elements */
+ int16 SNR; /**< average SNR of during frame reception */
+ uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */
+ uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */
+ uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */
/* Add new fields here */
/* variable length Information Elements */
} wl_bss_info_t;
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WL_GSCAN_BSS_INFO_VERSION 1 /* current version of wl_gscan_bss_info struct */
+#define WL_GSCAN_INFO_FIXED_FIELD_SIZE (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t))
+
+typedef struct wl_gscan_bss_info {
+ uint32 timestamp[2];
+ wl_bss_info_t info;
+ /* Do not add any more members below, fixed */
+ /* and variable length Information Elements to follow */
+} wl_gscan_bss_info_t;
+
typedef struct wl_bsscfg {
uint32 bsscfg_idx;
uint32 if_flags;
uint32 ap;
struct ether_addr mac_addr;
+ uint32 wlc_index;
} wl_if_add_t;
typedef struct wl_bss_config {
uint32 chanspec;
} wl_bss_config_t;
-#define WL_BSS_USER_RADAR_CHAN_SELECT 0x1 /* User application will randomly select
+#define WL_BSS_USER_RADAR_CHAN_SELECT 0x1 /**< User application will randomly select
* radar channel.
*/
-#define DLOAD_HANDLER_VER 1 /* Downloader version */
-#define DLOAD_FLAG_VER_MASK 0xf000 /* Downloader version mask */
-#define DLOAD_FLAG_VER_SHIFT 12 /* Downloader version shift */
+#define DLOAD_HANDLER_VER 1 /**< Downloader version */
+#define DLOAD_FLAG_VER_MASK 0xf000 /**< Downloader version mask */
+#define DLOAD_FLAG_VER_SHIFT 12 /**< Downloader version shift */
-#define DL_CRC_NOT_INUSE 0x0001
+#define DL_CRC_NOT_INUSE 0x0001
+#define DL_BEGIN 0x0002
+#define DL_END 0x0004
/* generic download types & flags */
enum {
};
typedef struct wl_clm_dload_info wl_clm_dload_info_t;
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
typedef struct wlc_ssid {
uint32 SSID_len;
uchar SSID[DOT11_MAX_SSID_LEN];
} wlc_ssid_t;
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
+typedef struct wlc_ssid_ext {
+ bool hidden;
+ uint32 SSID_len;
+ uchar SSID[DOT11_MAX_SSID_LEN];
+} wlc_ssid_ext_t;
+
#define MAX_PREFERRED_AP_NUM 5
typedef struct wlc_fastssidinfo {
wlc_ssid_t SSID_info[MAX_PREFERRED_AP_NUM];
} wlc_fastssidinfo_t;
+#ifdef CUSTOMER_HW_31_1
+
+#define AP_NORM 0
+#define AP_STEALTH 1
+#define STREET_PASS_AP 2
+
+#define NSC_MAX_TGT_SSID 20
+typedef struct nsc_ssid_entry_list {
+ wlc_ssid_t ssid_info;
+ int ssid_type;
+} nsc_ssid_entry_list_t;
+
+typedef struct nsc_ssid_list {
+ uint32 num_entries; /* N wants 150 */
+ nsc_ssid_entry_list_t ssid_entry[1];
+} nsc_ssid_list_t;
+
+#define NSC_TGT_SSID_BUFSZ (sizeof(nsc_ssid_entry_list_t) * \
+ (NSC_MAX_TGT_SSID - 1) + sizeof(nsc_ssid_list_t))
+
+/* Default values from N */
+#define NSC_SCPATT_ARRSZ 32
+
+/* scan types */
+#define UNI_SCAN 0
+#define SP_SCAN_ACTIVE 1
+#define SP_SCAN_PASSIVE 2
+#define DOZE 3
+
+/* what we found */
+typedef struct nsc_scan_results {
+ wlc_ssid_t ssid;
+ struct ether_addr mac;
+ int scantype;
+ uint16 channel;
+} nsc_scan_results_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct nsc_af_body {
+ uint8 type; /* should be 0x7f */
+ uint8 oui[DOT11_OUI_LEN]; /* just like it says */
+ uint8 subtype;
+ uint8 ielen; /* */
+ uint8 data[1]; /* variable */
+} BWL_POST_PACKED_STRUCT nsc_af_body_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct nsc_sdlist {
+ uint8 scantype;
+ uint16 duration;
+ uint16 channel; /* SP only */
+ uint8 ssid_index; /* SP only */
+ uint16 rate; /* SP only */
+} BWL_POST_PACKED_STRUCT nsc_sdlist_t;
+
+typedef struct nsc_scandes {
+ uint32 num_entries; /* number of list entries */
+ nsc_sdlist_t sdlist[1]; /* variable */
+} nsc_scandes_t;
+
+#define NSC_MAX_SDLIST_ENTRIES 8
+#define NSC_SDDESC_BUFSZ (sizeof(nsc_sdlist_t) * \
+ (NSC_MAX_SDLIST_ENTRIES - 1) + sizeof(nsc_scandes_t))
+
+#define SCAN_ARR_END (NSC_MAX_SDLIST_ENTRIES)
+#endif /* CUSTOMER_HW_31_1 */
+
typedef BWL_PRE_PACKED_STRUCT struct wnm_url {
uint8 len;
uint8 data[1];
} BWL_POST_PACKED_STRUCT wnm_url_t;
+#define WNM_BSS_SELECT_TYPE_RSSI 0
+#define WNM_BSS_SELECT_TYPE_CU 1
+
+#define WNM_BSSLOAD_MONITOR_VERSION 1
+typedef struct wnm_bssload_monitor_cfg {
+ uint8 version;
+ uint8 band;
+ uint8 duration; /* duration between 1 to 20sec */
+} wnm_bssload_monitor_cfg_t;
+
+#define BSS_MAXTABLE_SIZE 10
+#define WNM_BSS_SELECT_FACTOR_VERSION 1
+typedef struct wnm_bss_select_factor_params {
+ uint8 low;
+ uint8 high;
+ uint8 factor;
+ uint8 pad;
+} wnm_bss_select_factor_params_t;
+
+typedef struct wnm_bss_select_factor_cfg {
+ uint8 version;
+ uint8 band;
+ uint16 type;
+ uint16 pad;
+ uint16 count;
+ wnm_bss_select_factor_params_t params[1];
+} wnm_bss_select_factor_cfg_t;
+
+#define WNM_BSS_SELECT_WEIGHT_VERSION 1
+typedef struct wnm_bss_select_weight_cfg {
+ uint8 version;
+ uint8 band;
+ uint16 type;
+ uint16 weight; /* weightage for each type between 0 to 100 */
+} wnm_bss_select_weight_cfg_t;
+
+#define WNM_ROAM_TRIGGER_VERSION 1
+typedef struct wnm_roam_trigger_cfg {
+ uint8 version;
+ uint8 band;
+ uint16 type;
+ int16 trigger; /* trigger for each type in new roam algorithm */
+} wnm_roam_trigger_cfg_t;
+
typedef struct chan_scandata {
uint8 txpower;
uint8 pad;
- chanspec_t channel; /* Channel num, bw, ctrl_sb and band */
+ chanspec_t channel; /**< Channel num, bw, ctrl_sb and band */
uint32 channel_mintime;
uint32 channel_maxtime;
} chan_scandata_t;
#define WLC_EXTDSCAN_MAX_SSID 5
typedef struct wl_extdscan_params {
- int8 nprobes; /* 0, passive, otherwise active */
- int8 split_scan; /* split scan */
- int8 band; /* band */
+ int8 nprobes; /**< 0, passive, otherwise active */
+ int8 split_scan; /**< split scan */
+ int8 band; /**< band */
int8 pad;
wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */
- uint32 tx_rate; /* in 500ksec units */
- wl_scan_type_t scan_type; /* enum */
+ uint32 tx_rate; /**< in 500ksec units */
+ wl_scan_type_t scan_type; /**< enum */
int32 channel_num;
- chan_scandata_t channel_list[1]; /* list of chandata structs */
+ chan_scandata_t channel_list[1]; /**< list of chandata structs */
} wl_extdscan_params_t;
#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
#define WL_SCAN_PARAMS_SSID_MAX 10
typedef struct wl_scan_params {
- wlc_ssid_t ssid; /* default: {0, ""} */
- struct ether_addr bssid; /* default: bcast */
- int8 bss_type; /* default: any,
+ wlc_ssid_t ssid; /**< default: {0, ""} */
+ struct ether_addr bssid; /**< default: bcast */
+ int8 bss_type; /**< default: any,
* DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
*/
- uint8 scan_type; /* flags, 0 use default */
- int32 nprobes; /* -1 use default, number of probes per channel */
- int32 active_time; /* -1 use default, dwell time per channel for
+ uint8 scan_type; /**< flags, 0 use default */
+ int32 nprobes; /**< -1 use default, number of probes per channel */
+ int32 active_time; /**< -1 use default, dwell time per channel for
* active scanning
*/
- int32 passive_time; /* -1 use default, dwell time per channel
+ int32 passive_time; /**< -1 use default, dwell time per channel
* for passive scanning
*/
- int32 home_time; /* -1 use default, dwell time for the home channel
+ int32 home_time; /**< -1 use default, dwell time for the home channel
* between channel scans
*/
- int32 channel_num; /* count of channels and ssids that follow
+ int32 channel_num; /**< count of channels and ssids that follow
*
* low half is count of channels in channel_list, 0
* means default (use all available channels)
* parameter portion is assumed, otherwise ssid in
* the fixed portion is ignored
*/
- uint16 channel_list[1]; /* list of chanspecs */
+ uint16 channel_list[1]; /**< list of chanspecs */
} wl_scan_params_t;
/* size of wl_scan_params not including variable length array */
/* 3 fields + size of wl_scan_params, not including variable length array */
#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
typedef struct wl_scan_results {
uint32 buflen;
wl_bss_info_t bss_info[1];
} wl_scan_results_t;
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
/* size of wl_scan_results not including variable length array */
#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
#define ESCAN_REQ_VERSION 1
+/** event scan reduces amount of SOC memory needed to store scan results */
typedef struct wl_escan_params {
uint32 version;
uint16 action;
#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
+/** event scan reduces amount of SOC memory needed to store scan results */
typedef struct wl_escan_result {
uint32 buflen;
uint32 version;
#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
+typedef struct wl_gscan_result {
+ uint32 buflen;
+ uint32 version;
+ wl_gscan_bss_info_t bss_info[1];
+} wl_gscan_result_t;
+
+#define WL_GSCAN_RESULTS_FIXED_SIZE (sizeof(wl_gscan_result_t) - sizeof(wl_gscan_bss_info_t))
+
/* incremental scan results struct */
typedef struct wl_iscan_results {
uint32 status;
typedef struct scanol_params {
uint32 version;
- uint32 flags; /* offload scanning flags */
- int32 active_time; /* -1 use default, dwell time per channel for active scanning */
- int32 passive_time; /* -1 use default, dwell time per channel for passive scanning */
- int32 idle_rest_time; /* -1 use default, time idle between scan cycle */
+ uint32 flags; /**< offload scanning flags */
+ int32 active_time; /**< -1 use default, dwell time per channel for active scanning */
+ int32 passive_time; /**< -1 use default, dwell time per channel for passive scanning */
+ int32 idle_rest_time; /**< -1 use default, time idle between scan cycle */
int32 idle_rest_time_multiplier;
int32 active_rest_time;
int32 active_rest_time_multiplier;
int32 scan_cycle_active_rest_multiplier;
int32 max_rest_time;
int32 max_scan_cycles;
- int32 nprobes; /* -1 use default, number of probes per channel */
+ int32 nprobes; /**< -1 use default, number of probes per channel */
int32 scan_start_delay;
uint32 nchannels;
uint32 ssid_count;
struct ether_addr bssid;
struct ether_addr mac;
} wl_probe_params_t;
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-#define WL_MAXRATES_IN_SET 16 /* max # of rates in a rateset */
+#define WL_MAXRATES_IN_SET 16 /**< max # of rates in a rateset */
typedef struct wl_rateset {
- uint32 count; /* # rates in this set */
- uint8 rates[WL_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
+ uint32 count; /**< # rates in this set */
+ uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */
} wl_rateset_t;
typedef struct wl_rateset_args {
- uint32 count; /* # rates in this set */
- uint8 rates[WL_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
+ uint32 count; /**< # rates in this set */
+ uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */
uint8 mcs[MCSSET_LEN]; /* supported mcs index bit map */
uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
} wl_rateset_args_t;
#define TXBF_RATE_OFDM_ALL 8
typedef struct wl_txbf_rateset {
- uint8 txbf_rate_mcs[TXBF_RATE_MCS_ALL]; /* one for each stream */
- uint8 txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL]; /* one for each stream */
- uint16 txbf_rate_vht[TXBF_RATE_VHT_ALL]; /* one for each stream */
- uint16 txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL]; /* one for each stream */
- uint8 txbf_rate_ofdm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */
+ uint8 txbf_rate_mcs[TXBF_RATE_MCS_ALL]; /**< one for each stream */
+ uint8 txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL]; /**< one for each stream */
+ uint16 txbf_rate_vht[TXBF_RATE_VHT_ALL]; /**< one for each stream */
+ uint16 txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL]; /**< one for each stream */
+ uint8 txbf_rate_ofdm[TXBF_RATE_OFDM_ALL]; /**< bitmap of ofdm rates that enables txbf */
uint8 txbf_rate_ofdm_bcm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */
uint8 txbf_rate_ofdm_cnt;
uint8 txbf_rate_ofdm_cnt_bcm;
/* used for association with a specific BSSID and chanspec list */
typedef struct wl_assoc_params {
- struct ether_addr bssid; /* 00:00:00:00:00:00: broadcast scan */
- uint16 bssid_cnt; /* 0: use chanspec_num, and the single bssid,
+ struct ether_addr bssid; /**< 00:00:00:00:00:00: broadcast scan */
+ uint16 bssid_cnt; /**< 0: use chanspec_num, and the single bssid,
* otherwise count of chanspecs in chanspec_list
* AND paired bssids following chanspec_list
* also, chanspec_num has to be set to zero
* for bssid list to be used
*/
- int32 chanspec_num; /* 0: all available channels,
+ int32 chanspec_num; /**< 0: all available channels,
* otherwise count of chanspecs in chanspec_list
*/
- chanspec_t chanspec_list[1]; /* list of chanspecs */
+ chanspec_t chanspec_list[1]; /**< list of chanspecs */
} wl_assoc_params_t;
#define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list)
/* used for join with or without a specific bssid and channel list */
typedef struct wl_join_params {
wlc_ssid_t ssid;
- wl_assoc_params_t params; /* optional field, but it must include the fixed portion
+ wl_assoc_params_t params; /**< optional field, but it must include the fixed portion
* of the wl_assoc_params_t struct when it does present.
*/
} wl_join_params_t;
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
#define WL_JOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_join_params_t, params) + \
WL_ASSOC_PARAMS_FIXED_SIZE)
/* scan params for extended join */
typedef struct wl_join_scan_params {
- uint8 scan_type; /* 0 use default, active or passive scan */
- int32 nprobes; /* -1 use default, number of probes per channel */
- int32 active_time; /* -1 use default, dwell time per channel for
+ uint8 scan_type; /**< 0 use default, active or passive scan */
+ int32 nprobes; /**< -1 use default, number of probes per channel */
+ int32 active_time; /**< -1 use default, dwell time per channel for
* active scanning
*/
- int32 passive_time; /* -1 use default, dwell time per channel
+ int32 passive_time; /**< -1 use default, dwell time per channel
* for passive scanning
*/
- int32 home_time; /* -1 use default, dwell time for the home channel
+ int32 home_time; /**< -1 use default, dwell time for the home channel
* between channel scans
*/
} wl_join_scan_params_t;
/* extended join params */
typedef struct wl_extjoin_params {
- wlc_ssid_t ssid; /* {0, ""}: wildcard scan */
+ wlc_ssid_t ssid; /**< {0, ""}: wildcard scan */
wl_join_scan_params_t scan;
- wl_join_assoc_params_t assoc; /* optional field, but it must include the fixed portion
+ wl_join_assoc_params_t assoc; /**< optional field, but it must include the fixed portion
* of the wl_join_assoc_params_t struct when it does
* present.
*/
#define WL_EXTJOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_extjoin_params_t, assoc) + \
WL_JOIN_ASSOC_PARAMS_FIXED_SIZE)
-#define ANT_SELCFG_MAX 4 /* max number of antenna configurations */
-#define MAX_STREAMS_SUPPORTED 4 /* max number of streams supported */
+#define ANT_SELCFG_MAX 4 /**< max number of antenna configurations */
+#define MAX_STREAMS_SUPPORTED 4 /**< max number of streams supported */
typedef struct {
- uint8 ant_config[ANT_SELCFG_MAX]; /* antenna configuration */
- uint8 num_antcfg; /* number of available antenna configurations */
+ uint8 ant_config[ANT_SELCFG_MAX]; /**< antenna configuration */
+ uint8 num_antcfg; /**< number of available antenna configurations */
} wlc_antselcfg_t;
typedef struct {
- uint32 duration; /* millisecs spent sampling this channel */
- uint32 congest_ibss; /* millisecs in our bss (presumably this traffic will */
- /* move if cur bss moves channels) */
- uint32 congest_obss; /* traffic not in our bss */
- uint32 interference; /* millisecs detecting a non 802.11 interferer. */
- uint32 timestamp; /* second timestamp */
+ uint32 duration; /**< millisecs spent sampling this channel */
+ uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss; /**< traffic not in our bss */
+ uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
+ uint32 timestamp; /**< second timestamp */
} cca_congest_t;
typedef struct {
- chanspec_t chanspec; /* Which channel? */
- uint8 num_secs; /* How many secs worth of data */
- cca_congest_t secs[1]; /* Data */
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 num_secs; /**< How many secs worth of data */
+ cca_congest_t secs[1]; /**< Data */
} cca_congest_channel_req_t;
+typedef struct {
+ uint32 duration; /**< millisecs spent sampling this channel */
+ uint32 congest; /**< millisecs detecting busy CCA */
+ uint32 timestamp; /**< second timestamp */
+} cca_congest_simple_t;
+
+typedef struct {
+ uint16 status;
+ uint16 id;
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 len;
+ union {
+ cca_congest_simple_t cca_busy; /**< CCA busy */
+ int noise; /**< noise floor */
+ };
+} cca_chan_qual_event_t;
+
/* interference sources */
enum interference_source {
- ITFR_NONE = 0, /* interference */
- ITFR_PHONE, /* wireless phone */
- ITFR_VIDEO_CAMERA, /* wireless video camera */
- ITFR_MICROWAVE_OVEN, /* microwave oven */
- ITFR_BABY_MONITOR, /* wireless baby monitor */
- ITFR_BLUETOOTH, /* bluetooth */
- ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /* wireless camera or baby monitor */
- ITFR_BLUETOOTH_OR_BABY_MONITOR, /* bluetooth or baby monitor */
- ITFR_VIDEO_CAMERA_OR_PHONE, /* video camera or phone */
- ITFR_UNIDENTIFIED /* interference from unidentified source */
+ ITFR_NONE = 0, /**< interference */
+ ITFR_PHONE, /**< wireless phone */
+ ITFR_VIDEO_CAMERA, /**< wireless video camera */
+ ITFR_MICROWAVE_OVEN, /**< microwave oven */
+ ITFR_BABY_MONITOR, /**< wireless baby monitor */
+ ITFR_BLUETOOTH, /**< bluetooth */
+ ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /**< wireless camera or baby monitor */
+ ITFR_BLUETOOTH_OR_BABY_MONITOR, /**< bluetooth or baby monitor */
+ ITFR_VIDEO_CAMERA_OR_PHONE, /**< video camera or phone */
+ ITFR_UNIDENTIFIED /**< interference from unidentified source */
};
/* structure for interference source report */
typedef struct {
- uint32 flags; /* flags. bit definitions below */
- uint32 source; /* last detected interference source */
- uint32 timestamp; /* second timestamp on interferenced flag change */
+ uint32 flags; /**< flags. bit definitions below */
+ uint32 source; /**< last detected interference source */
+ uint32 timestamp; /**< second timestamp on interferenced flag change */
} interference_source_rep_t;
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-#define WLC_CNTRY_BUF_SZ 4 /* Country string is 3 bytes + NUL */
+#define WLC_CNTRY_BUF_SZ 4 /**< Country string is 3 bytes + NUL */
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
typedef struct wl_country {
- char country_abbrev[WLC_CNTRY_BUF_SZ]; /* nul-terminated country code used in
+ char country_abbrev[WLC_CNTRY_BUF_SZ]; /**< nul-terminated country code used in
* the Country IE
*/
- int32 rev; /* revision specifier for ccode
+ int32 rev; /**< revision specifier for ccode
* on set, -1 indicates unspecified.
* on get, rev >= 0
*/
- char ccode[WLC_CNTRY_BUF_SZ]; /* nul-terminated built-in country code.
+ char ccode[WLC_CNTRY_BUF_SZ]; /**< nul-terminated built-in country code.
* variable length, but fixed size in
* struct allows simple allocation for
* expected country strings <= 3 chars.
*/
} wl_country_t;
+#define CCODE_INFO_VERSION 1
+
+typedef enum wl_ccode_role {
+ WLC_CCODE_ROLE_ACTIVE = 0,
+ WLC_CCODE_ROLE_HOST,
+ WLC_CCODE_ROLE_80211D_ASSOC,
+ WLC_CCODE_ROLE_80211D_SCAN,
+ WLC_CCODE_ROLE_DEFAULT,
+ WLC_CCODE_LAST
+} wl_ccode_role_t;
+#define WLC_NUM_CCODE_INFO WLC_CCODE_LAST
+
+typedef struct wl_ccode_entry {
+ uint16 reserved;
+ uint8 band;
+ uint8 role;
+ char ccode[WLC_CNTRY_BUF_SZ];
+} wl_ccode_entry_t;
+
+typedef struct wl_ccode_info {
+ uint16 version;
+ uint16 count; /* Number of ccodes entries in the set */
+ wl_ccode_entry_t ccodelist[1];
+} wl_ccode_info_t;
+#define WL_CCODE_INFO_FIXED_LEN OFFSETOF(wl_ccode_info_t, ccodelist)
+
typedef struct wl_channels_in_country {
uint32 buflen;
uint32 band;
int8 type;
int8 flags;
chanspec_t chanspec;
- uint32 token; /* token for this measurement */
- uint32 tsf_h; /* TSF high 32-bits of Measurement start time */
- uint32 tsf_l; /* TSF low 32-bits */
- uint32 dur; /* TUs */
+ uint32 token; /**< token for this measurement */
+ uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */
+ uint32 tsf_l; /**< TSF low 32-bits */
+ uint32 dur; /**< TUs */
} wl_rm_req_elt_t;
typedef struct wl_rm_req {
- uint32 token; /* overall measurement set token */
- uint32 count; /* number of measurement requests */
- void *cb; /* completion callback function: may be NULL */
- void *cb_arg; /* arg to completion callback function */
- wl_rm_req_elt_t req[1]; /* variable length block of requests */
+ uint32 token; /**< overall measurement set token */
+ uint32 count; /**< number of measurement requests */
+ void *cb; /**< completion callback function: may be NULL */
+ void *cb_arg; /**< arg to completion callback function */
+ wl_rm_req_elt_t req[1]; /**< variable length block of requests */
} wl_rm_req_t;
#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req)
int8 type;
int8 flags;
chanspec_t chanspec;
- uint32 token; /* token for this measurement */
- uint32 tsf_h; /* TSF high 32-bits of Measurement start time */
- uint32 tsf_l; /* TSF low 32-bits */
- uint32 dur; /* TUs */
- uint32 len; /* byte length of data block */
- uint8 data[1]; /* variable length data block */
+ uint32 token; /**< token for this measurement */
+ uint32 tsf_h; /**< TSF high 32-bits of Measurement start time */
+ uint32 tsf_l; /**< TSF low 32-bits */
+ uint32 dur; /**< TUs */
+ uint32 len; /**< byte length of data block */
+ uint8 data[1]; /**< variable length data block */
} wl_rm_rep_elt_t;
-#define WL_RM_REP_ELT_FIXED_LEN 24 /* length excluding data block */
+#define WL_RM_REP_ELT_FIXED_LEN 24 /**< length excluding data block */
#define WL_RPI_REP_BIN_NUM 8
typedef struct wl_rm_rpi_rep {
} wl_rm_rpi_rep_t;
typedef struct wl_rm_rep {
- uint32 token; /* overall measurement set token */
- uint32 len; /* length of measurement report block */
- wl_rm_rep_elt_t rep[1]; /* variable length block of reports */
+ uint32 token; /**< overall measurement set token */
+ uint32 len; /**< length of measurement report block */
+ wl_rm_rep_elt_t rep[1]; /**< variable length block of reports */
} wl_rm_rep_t;
#define WL_RM_REP_FIXED_LEN 8
-#ifdef BCMCCX
-
-#define LEAP_USER_MAX 32
-#define LEAP_DOMAIN_MAX 32
-#define LEAP_PASSWORD_MAX 32
-
-typedef struct wl_leap_info {
- wlc_ssid_t ssid;
- uint8 user_len;
- uchar user[LEAP_USER_MAX];
- uint8 password_len;
- uchar password[LEAP_PASSWORD_MAX];
- uint8 domain_len;
- uchar domain[LEAP_DOMAIN_MAX];
-} wl_leap_info_t;
-
-typedef struct wl_leap_list {
- uint32 buflen;
- uint32 version;
- uint32 count;
- wl_leap_info_t leap_info[1];
-} wl_leap_list_t;
-#endif /* BCMCCX */
typedef enum sup_auth_status {
/* Basic supplicant authentication states */
WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
/* Waiting to receive handshake msg M3 */
WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
- WLC_SUP_KEYXCHANGE_PREP_M4, /* Preparing to send handshake msg M4 */
- WLC_SUP_KEYXCHANGE_WAIT_G1, /* Waiting to receive handshake msg G1 */
- WLC_SUP_KEYXCHANGE_PREP_G2 /* Preparing to send handshake msg G2 */
+ WLC_SUP_KEYXCHANGE_PREP_M4, /**< Preparing to send handshake msg M4 */
+ WLC_SUP_KEYXCHANGE_WAIT_G1, /**< Waiting to receive handshake msg G1 */
+ WLC_SUP_KEYXCHANGE_PREP_G2 /**< Preparing to send handshake msg G2 */
} sup_auth_status_t;
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
typedef struct wl_wsec_key {
- uint32 index; /* key index */
- uint32 len; /* key length */
- uint8 data[DOT11_MAX_KEY_SIZE]; /* key data */
+ uint32 index; /**< key index */
+ uint32 len; /**< key length */
+ uint8 data[DOT11_MAX_KEY_SIZE]; /**< key data */
uint32 pad_1[18];
- uint32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
- uint32 flags; /* misc flags */
+ uint32 algo; /**< CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ uint32 flags; /**< misc flags */
uint32 pad_2[2];
int pad_3;
- int iv_initialized; /* has IV been initialized already? */
+ int iv_initialized; /**< has IV been initialized already? */
int pad_4;
/* Rx IV */
struct {
- uint32 hi; /* upper 32 bits of IV */
- uint16 lo; /* lower 16 bits of IV */
+ uint32 hi; /**< upper 32 bits of IV */
+ uint16 lo; /**< lower 16 bits of IV */
} rxiv;
uint32 pad_5[2];
- struct ether_addr ea; /* per station */
+ struct ether_addr ea; /**< per station */
} wl_wsec_key_t;
#define WSEC_MIN_PSK_LEN 8
/* receptacle for WLC_SET_WSEC_PMK parameter */
typedef struct {
- ushort key_len; /* octets in key material */
- ushort flags; /* key handling qualification */
- uint8 key[WSEC_MAX_PSK_LEN]; /* PMK material */
+ ushort key_len; /**< octets in key material */
+ ushort flags; /**< key handling qualification */
+ uint8 key[WSEC_MAX_PSK_LEN]; /**< PMK material */
} wsec_pmk_t;
typedef struct _pmkid {
pmkid_cand_t pmkid_cand[1];
} pmkid_cand_list_t;
-#define WL_STA_ANT_MAX 4 /* max possible rx antennas */
+#define WL_STA_ANT_MAX 4 /**< max possible rx antennas */
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
typedef struct wl_assoc_info {
uint32 req_len;
uint32 resp_len;
/* srom read/write struct passed through ioctl */
typedef struct {
- uint byteoff; /* byte offset */
- uint nbytes; /* number of bytes */
+ uint byteoff; /**< byte offset */
+ uint nbytes; /**< number of bytes */
uint16 buf[1];
} srom_rw_t;
#define CISH_FLAG_PCIECIS (1 << 15) /* write CIS format bit for PCIe CIS */
/* similar cis (srom or otp) struct [iovar: may not be aligned] */
typedef struct {
- uint16 source; /* cis source */
- uint16 flags; /* flags */
- uint32 byteoff; /* byte offset */
- uint32 nbytes; /* number of bytes */
+ uint16 source; /**< cis source */
+ uint16 flags; /**< flags */
+ uint32 byteoff; /**< byte offset */
+ uint32 nbytes; /**< number of bytes */
/* data follows here */
} cis_rw_t;
/* R_REG and W_REG struct passed through ioctl */
typedef struct {
- uint32 byteoff; /* byte offset of the field in d11regs_t */
- uint32 val; /* read/write value of the field */
- uint32 size; /* sizeof the field */
- uint band; /* band (optional) */
+ uint32 byteoff; /**< byte offset of the field in d11regs_t */
+ uint32 val; /**< read/write value of the field */
+ uint32 size; /**< sizeof the field */
+ uint band; /**< band (optional) */
} rw_reg_t;
/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */
/* PCL - Power Control Loop */
typedef struct {
- uint16 auto_ctrl; /* WL_ATTEN_XX */
- uint16 bb; /* Baseband attenuation */
- uint16 radio; /* Radio attenuation */
- uint16 txctl1; /* Radio TX_CTL1 value */
+ uint16 auto_ctrl; /**< WL_ATTEN_XX */
+ uint16 bb; /**< Baseband attenuation */
+ uint16 radio; /**< Radio attenuation */
+ uint16 txctl1; /**< Radio TX_CTL1 value */
} atten_t;
/* Per-AC retry parameters */
#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT)
-typedef struct wl_plc_nodelist {
- uint32 count; /* Number of nodes */
- struct _node {
- struct ether_addr ea; /* Node ether address */
- uint32 node_type; /* Node type */
- uint32 cost; /* PLC affinity */
- } node[1];
-} wl_plc_nodelist_t;
-
-typedef struct wl_plc_params {
- uint32 cmd; /* Command */
- uint8 plc_failover; /* PLC failover control/status */
- struct ether_addr node_ea; /* Node ether address */
- uint32 cost; /* Link cost or mac cost */
-} wl_plc_params_t;
-
/* Used to get specific link/ac parameters */
typedef struct {
int32 ac;
#define WL_PM_MUTE_TX_VER 1
typedef struct wl_pm_mute_tx {
- uint16 version; /* version */
- uint16 len; /* length */
- uint16 deadline; /* deadline timer (in milliseconds) */
- uint8 enable; /* set to 1 to enable mode; set to 0 to disable it */
+ uint16 version; /**< version */
+ uint16 len; /**< length */
+ uint16 deadline; /**< deadline timer (in milliseconds) */
+ uint8 enable; /**< set to 1 to enable mode; set to 0 to disable it */
} wl_pm_mute_tx_t;
typedef struct {
- uint16 ver; /* version of this struct */
- uint16 len; /* length in bytes of this structure */
- uint16 cap; /* sta's advertised capabilities */
- uint32 flags; /* flags defined below */
- uint32 idle; /* time since data pkt rx'd from sta */
- struct ether_addr ea; /* Station address */
- wl_rateset_t rateset; /* rateset in use */
- uint32 in; /* seconds elapsed since associated */
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ uint16 cap; /**< sta's advertised capabilities */
+ uint32 flags; /**< flags defined below */
+ uint32 idle; /**< time since data pkt rx'd from sta */
+ struct ether_addr ea; /**< Station address */
+ wl_rateset_t rateset; /**< rateset in use */
+ uint32 in; /**< seconds elapsed since associated */
uint32 listen_interval_inms; /* Min Listen interval in ms for this STA */
- uint32 tx_pkts; /* # of user packets transmitted (unicast) */
- uint32 tx_failures; /* # of user packets failed */
- uint32 rx_ucast_pkts; /* # of unicast packets received */
- uint32 rx_mcast_pkts; /* # of multicast packets received */
- uint32 tx_rate; /* Rate used by last tx frame */
- uint32 rx_rate; /* Rate of last successful rx frame */
- uint32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
- uint32 rx_decrypt_failures; /* # of packet decrypted unsuccessfully */
- uint32 tx_tot_pkts; /* # of user tx pkts (ucast + mcast) */
- uint32 rx_tot_pkts; /* # of data packets recvd (uni + mcast) */
- uint32 tx_mcast_pkts; /* # of mcast pkts txed */
- uint64 tx_tot_bytes; /* data bytes txed (ucast + mcast) */
- uint64 rx_tot_bytes; /* data bytes recvd (ucast + mcast) */
- uint64 tx_ucast_bytes; /* data bytes txed (ucast) */
- uint64 tx_mcast_bytes; /* # data bytes txed (mcast) */
- uint64 rx_ucast_bytes; /* data bytes recvd (ucast) */
- uint64 rx_mcast_bytes; /* data bytes recvd (mcast) */
+ uint32 tx_pkts; /**< # of user packets transmitted (unicast) */
+ uint32 tx_failures; /**< # of user packets failed */
+ uint32 rx_ucast_pkts; /**< # of unicast packets received */
+ uint32 rx_mcast_pkts; /**< # of multicast packets received */
+ uint32 tx_rate; /**< Rate used by last tx frame */
+ uint32 rx_rate; /**< Rate of last successful rx frame */
+ uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */
+ uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */
+ uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */
+ uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */
+ uint32 tx_mcast_pkts; /**< # of mcast pkts txed */
+ uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */
+ uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */
+ uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */
+ uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */
+ uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */
+ uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */
int8 rssi[WL_STA_ANT_MAX]; /* average rssi per antenna
* of data frames
*/
- int8 nf[WL_STA_ANT_MAX]; /* per antenna noise floor */
- uint16 aid; /* association ID */
- uint16 ht_capabilities; /* advertised ht caps */
- uint16 vht_flags; /* converted vht flags */
- uint32 tx_pkts_retried; /* # of frames where a retry was
+ int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */
+ uint16 aid; /**< association ID */
+ uint16 ht_capabilities; /**< advertised ht caps */
+ uint16 vht_flags; /**< converted vht flags */
+ uint32 tx_pkts_retried; /**< # of frames where a retry was
* necessary
*/
uint32 tx_pkts_retry_exhausted; /* # of user frames where a retry
* Separated for host requested frames and WLAN locally generated frames.
* Include unicast frame only where the retries/failures can be counted.
*/
- uint32 tx_pkts_total; /* # user frames sent successfully */
- uint32 tx_pkts_retries; /* # user frames retries */
- uint32 tx_pkts_fw_total; /* # FW generated sent successfully */
- uint32 tx_pkts_fw_retries; /* # retries for FW generated frames */
- uint32 tx_pkts_fw_retry_exhausted; /* # FW generated where a retry
+ uint32 tx_pkts_total; /**< # user frames sent successfully */
+ uint32 tx_pkts_retries; /**< # user frames retries */
+ uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */
+ uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */
+ uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry
* was exhausted
*/
- uint32 rx_pkts_retried; /* # rx with retry bit set */
- uint32 tx_rate_fallback; /* lowest fallback TX rate */
+ uint32 rx_pkts_retried; /**< # rx with retry bit set */
+ uint32 tx_rate_fallback; /**< lowest fallback TX rate */
} sta_info_t;
#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_tot_pkts)
#define WL_STA_VER 4
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
-#define WLC_NUMRATES 16 /* max # of rates in a rateset */
+typedef struct {
+ uint32 auto_en;
+ uint32 active_ant;
+ uint32 rxcount;
+ int32 avg_snr_per_ant0;
+ int32 avg_snr_per_ant1;
+ int32 avg_snr_per_ant2;
+ uint32 swap_ge_rxcount0;
+ uint32 swap_ge_rxcount1;
+ uint32 swap_ge_snrthresh0;
+ uint32 swap_ge_snrthresh1;
+ uint32 swap_txfail0;
+ uint32 swap_txfail1;
+ uint32 swap_timer0;
+ uint32 swap_timer1;
+ uint32 swap_alivecheck0;
+ uint32 swap_alivecheck1;
+ uint32 rxcount_per_ant0;
+ uint32 rxcount_per_ant1;
+ uint32 acc_rxcount;
+ uint32 acc_rxcount_per_ant0;
+ uint32 acc_rxcount_per_ant1;
+ uint32 tx_auto_en;
+ uint32 tx_active_ant;
+ uint32 rx_policy;
+ uint32 tx_policy;
+ uint32 cell_policy;
+} wlc_swdiv_stats_t;
+
+#define WLC_NUMRATES 16 /**< max # of rates in a rateset */
typedef struct wlc_rateset {
- uint32 count; /* number of rates in rates[] */
- uint8 rates[WLC_NUMRATES]; /* rates in 500kbps units w/hi bit set if basic */
- uint8 htphy_membership; /* HT PHY Membership */
- uint8 mcs[MCSSET_LEN]; /* supported mcs index bit map */
- uint16 vht_mcsmap; /* supported vht mcs nss bit map */
+ uint32 count; /**< number of rates in rates[] */
+ uint8 rates[WLC_NUMRATES]; /**< rates in 500kbps units w/hi bit set if basic */
+ uint8 htphy_membership; /**< HT PHY Membership */
+ uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */
+ uint16 vht_mcsmap; /**< supported vht mcs nss bit map */
+ uint16 vht_mcsmap_prop; /**< supported prop vht mcs nss bit map */
} wlc_rateset_t;
/* Used to get specific STA parameters */
/* For ioctls that take a list of MAC addresses */
typedef struct maclist {
- uint count; /* number of MAC addresses */
- struct ether_addr ea[1]; /* variable length array of MAC addresses */
+ uint count; /**< number of MAC addresses */
+ struct ether_addr ea[1]; /**< variable length array of MAC addresses */
} maclist_t;
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
/* get pkt count struct passed through ioctl */
typedef struct get_pktcnt {
uint rx_good_pkt;
/* Get MAC specific rate histogram command */
typedef struct {
- struct ether_addr ea; /* MAC Address */
- uint8 ac_cat; /* Access Category */
- uint8 num_pkts; /* Number of packet entries to be averaged */
-} wl_mac_ratehisto_cmd_t; /* MAC Specific Rate Histogram command */
+ struct ether_addr ea; /**< MAC Address */
+ uint8 ac_cat; /**< Access Category */
+ uint8 num_pkts; /**< Number of packet entries to be averaged */
+} wl_mac_ratehisto_cmd_t; /**< MAC Specific Rate Histogram command */
/* Get MAC rate histogram response */
typedef struct {
- uint32 rate[DOT11_RATE_MAX + 1]; /* Rates */
- uint32 mcs[WL_RATESET_SZ_HT_MCS * WL_TX_CHAINS_MAX]; /* MCS counts */
- uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /* VHT counts */
- uint32 tsf_timer[2][2]; /* Start and End time for 8bytes value */
-} wl_mac_ratehisto_res_t; /* MAC Specific Rate Histogram Response */
-
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+ uint32 rate[DOT11_RATE_MAX + 1]; /**< Rates */
+ uint32 mcs[WL_RATESET_SZ_HT_IOCTL * WL_TX_CHAINS_MAX]; /**< MCS counts */
+ uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /**< VHT counts */
+ uint32 tsf_timer[2][2]; /**< Start and End time for 8bytes value */
+ uint32 prop11n_mcs[WLC_11N_LAST_PROP_MCS - WLC_11N_FIRST_PROP_MCS + 1]; /* MCS counts */
+} wl_mac_ratehisto_res_t; /**< MAC Specific Rate Histogram Response */
/* Linux network driver ioctl encoding */
typedef struct wl_ioctl {
- uint cmd; /* common ioctl definition */
- void *buf; /* pointer to user buffer */
- uint len; /* length of user buffer */
- uint8 set; /* 1=set IOCTL; 0=query IOCTL */
- uint used; /* bytes read or written (optional) */
- uint needed; /* bytes needed (optional) */
+ uint cmd; /**< common ioctl definition */
+ void *buf; /**< pointer to user buffer */
+ uint len; /**< length of user buffer */
+ uint8 set; /**< 1=set IOCTL; 0=query IOCTL */
+ uint used; /**< bytes read or written (optional) */
+ uint needed; /**< bytes needed (optional) */
} wl_ioctl_t;
#ifdef CONFIG_COMPAT
typedef struct compat_wl_ioctl {
- uint cmd; /* common ioctl definition */
- uint32 buf; /* pointer to user buffer */
- uint len; /* length of user buffer */
- uint8 set; /* 1=set IOCTL; 0=query IOCTL */
- uint used; /* bytes read or written (optional) */
- uint needed; /* bytes needed (optional) */
+ uint cmd; /**< common ioctl definition */
+ uint32 buf; /**< pointer to user buffer */
+ uint len; /**< length of user buffer */
+ uint8 set; /**< 1=set IOCTL; 0=query IOCTL */
+ uint used; /**< bytes read or written (optional) */
+ uint needed; /**< bytes needed (optional) */
} compat_wl_ioctl_t;
#endif /* CONFIG_COMPAT */
#define WL_NUM_RATES_VHT 10
#define WL_NUM_RATES_MCS32 1
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
/*
* Structure for passing hardware and software
* revision info up from the driver.
*/
typedef struct wlc_rev_info {
- uint vendorid; /* PCI vendor id */
- uint deviceid; /* device id of chip */
- uint radiorev; /* radio revision */
- uint chiprev; /* chip revision */
- uint corerev; /* core revision */
- uint boardid; /* board identifier (usu. PCI sub-device id) */
- uint boardvendor; /* board vendor (usu. PCI sub-vendor id) */
- uint boardrev; /* board revision */
- uint driverrev; /* driver version */
- uint ucoderev; /* microcode version */
- uint bus; /* bus type */
- uint chipnum; /* chip number */
- uint phytype; /* phy type */
- uint phyrev; /* phy revision */
- uint anarev; /* anacore rev */
- uint chippkg; /* chip package info */
- uint nvramrev; /* nvram revision number */
+ uint vendorid; /**< PCI vendor id */
+ uint deviceid; /**< device id of chip */
+ uint radiorev; /**< radio revision */
+ uint chiprev; /**< chip revision */
+ uint corerev; /**< core revision */
+ uint boardid; /**< board identifier (usu. PCI sub-device id) */
+ uint boardvendor; /**< board vendor (usu. PCI sub-vendor id) */
+ uint boardrev; /**< board revision */
+ uint driverrev; /**< driver version */
+ uint ucoderev; /**< microcode version */
+ uint bus; /**< bus type */
+ uint chipnum; /**< chip number */
+ uint phytype; /**< phy type */
+ uint phyrev; /**< phy revision */
+ uint anarev; /**< anacore rev */
+ uint chippkg; /**< chip package info */
+ uint nvramrev; /**< nvram revision number */
} wlc_rev_info_t;
#define WL_REV_INFO_LEGACY_LENGTH 48
#endif
-#define WL_PHY_PAVARS_LEN 32 /* Phy type, Band range, chain, a1[0], b0[0], b1[0] ... */
-#define WL_PHY_PAVAR_VER 1 /* pavars version */
-#define WL_PHY_PAVARS2_NUM 3 /* a1, b0, b1 */
+#define WL_PHY_PAVARS_LEN 32 /**< Phytype, Bandrange, chain, a[0], b[0], c[0], d[0] .. */
+
+
+#define WL_PHY_PAVAR_VER 1 /**< pavars version */
+#define WL_PHY_PAVARS2_NUM 3 /**< a1, b0, b1 */
typedef struct wl_pavars2 {
- uint16 ver; /* version of this struct */
- uint16 len; /* len of this structure */
- uint16 inuse; /* driver return 1 for a1,b0,b1 in current band range */
- uint16 phy_type; /* phy type */
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< len of this structure */
+ uint16 inuse; /**< driver return 1 for a1,b0,b1 in current band range */
+ uint16 phy_type; /**< phy type */
uint16 bandrange;
uint16 chain;
- uint16 inpa[WL_PHY_PAVARS2_NUM]; /* phy pavars for one band range */
+ uint16 inpa[WL_PHY_PAVARS2_NUM]; /**< phy pavars for one band range */
} wl_pavars2_t;
typedef struct wl_po {
- uint16 phy_type; /* Phy type */
+ uint16 phy_type; /**< Phy type */
uint16 band;
uint16 cckpo;
uint32 ofdmpo;
uint16 mcspo[8];
} wl_po_t;
-#define WL_NUM_RPCALVARS 5 /* number of rpcal vars */
+#define WL_NUM_RPCALVARS 5 /**< number of rpcal vars */
typedef struct wl_rpcal {
uint16 value;
int exit_aci_thresh; /* Trigger level to exit ACI mode */
int usec_spin; /* microsecs to delay between rssi samples */
int glitch_delay; /* interval between ACI scans when glitch count is consistently high */
- uint16 nphy_adcpwr_enter_thresh; /* ADC power to enter ACI mitigation mode */
- uint16 nphy_adcpwr_exit_thresh; /* ADC power to exit ACI mitigation mode */
- uint16 nphy_repeat_ctr; /* Number of tries per channel to compute power */
- uint16 nphy_num_samples; /* Number of samples to compute power on one channel */
- uint16 nphy_undetect_window_sz; /* num of undetects to exit ACI Mitigation mode */
- uint16 nphy_b_energy_lo_aci; /* low ACI power energy threshold for bphy */
- uint16 nphy_b_energy_md_aci; /* mid ACI power energy threshold for bphy */
- uint16 nphy_b_energy_hi_aci; /* high ACI power energy threshold for bphy */
+ uint16 nphy_adcpwr_enter_thresh; /**< ADC power to enter ACI mitigation mode */
+ uint16 nphy_adcpwr_exit_thresh; /**< ADC power to exit ACI mitigation mode */
+ uint16 nphy_repeat_ctr; /**< Number of tries per channel to compute power */
+ uint16 nphy_num_samples; /**< Number of samples to compute power on one channel */
+ uint16 nphy_undetect_window_sz; /**< num of undetects to exit ACI Mitigation mode */
+ uint16 nphy_b_energy_lo_aci; /**< low ACI power energy threshold for bphy */
+ uint16 nphy_b_energy_md_aci; /**< mid ACI power energy threshold for bphy */
+ uint16 nphy_b_energy_hi_aci; /**< high ACI power energy threshold for bphy */
uint16 nphy_noise_noassoc_glitch_th_up; /* wl interference 4 */
uint16 nphy_noise_noassoc_glitch_th_dn;
uint16 nphy_noise_assoc_glitch_th_up;
uint16 nphy_noise_crsidx_decr;
} wl_aci_args_t;
-#define WL_ACI_ARGS_LEGACY_LENGTH 16 /* bytes of pre NPHY aci args */
-#define WL_SAMPLECOLLECT_T_VERSION 2 /* version of wl_samplecollect_args_t struct */
+#define WL_ACI_ARGS_LEGACY_LENGTH 16 /**< bytes of pre NPHY aci args */
+#define WL_SAMPLECOLLECT_T_VERSION 2 /**< version of wl_samplecollect_args_t struct */
typedef struct wl_samplecollect_args {
/* version 0 fields */
uint8 coll_us;
uint8 gpio_sel;
uint8 downsamp;
uint8 be_deaf;
- uint8 agc; /* loop from init gain and going down */
- uint8 filter; /* override high pass corners to lowest */
+ uint8 agc; /**< loop from init gain and going down */
+ uint8 filter; /**< override high pass corners to lowest */
/* add'l version 2 fields */
uint8 trigger_state;
uint8 module_sel1;
uint32 gpioCapMask;
} wl_samplecollect_args_t;
-#define WL_SAMPLEDATA_T_VERSION 1 /* version of wl_samplecollect_args_t struct */
+#define WL_SAMPLEDATA_T_VERSION 1 /**< version of wl_samplecollect_args_t struct */
/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */
#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2
typedef struct wl_sampledata {
- uint16 version; /* structure version */
- uint16 size; /* size of structure */
- uint16 tag; /* Header/Data */
- uint16 length; /* data length */
- uint32 flag; /* bit def */
+ uint16 version; /**< structure version */
+ uint16 size; /**< size of structure */
+ uint16 tag; /**< Header/Data */
+ uint16 length; /**< data length */
+ uint32 flag; /**< bit def */
} wl_sampledata_t;
/* WL_OTA START */
/* OTA Test Status */
enum {
- WL_OTA_TEST_IDLE = 0, /* Default Idle state */
- WL_OTA_TEST_ACTIVE = 1, /* Test Running */
- WL_OTA_TEST_SUCCESS = 2, /* Successfully Finished Test */
- WL_OTA_TEST_FAIL = 3 /* Test Failed in the Middle */
+ WL_OTA_TEST_IDLE = 0, /**< Default Idle state */
+ WL_OTA_TEST_ACTIVE = 1, /**< Test Running */
+ WL_OTA_TEST_SUCCESS = 2, /**< Successfully Finished Test */
+ WL_OTA_TEST_FAIL = 3 /**< Test Failed in the Middle */
};
/* OTA SYNC Status */
enum {
- WL_OTA_SYNC_IDLE = 0, /* Idle state */
- WL_OTA_SYNC_ACTIVE = 1, /* Waiting for Sync */
- WL_OTA_SYNC_FAIL = 2 /* Sync pkt not recieved */
+ WL_OTA_SYNC_IDLE = 0, /**< Idle state */
+ WL_OTA_SYNC_ACTIVE = 1, /**< Waiting for Sync */
+ WL_OTA_SYNC_FAIL = 2 /**< Sync pkt not recieved */
};
/* Various error states dut can get stuck during test */
enum {
- WL_OTA_SKIP_TEST_CAL_FAIL = 1, /* Phy calibration failed */
- WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /* Sync Packet not recieved */
- WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3, /* Cmd flow file download failed */
- WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4, /* No test found in Flow file */
- WL_OTA_SKIP_TEST_WL_NOT_UP = 5, /* WL UP failed */
- WL_OTA_SKIP_TEST_UNKNOWN_CALL /* Unintentional scheduling on ota test */
+ WL_OTA_SKIP_TEST_CAL_FAIL = 1, /**< Phy calibration failed */
+ WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /**< Sync Packet not recieved */
+ WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3, /**< Cmd flow file download failed */
+ WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4, /**< No test found in Flow file */
+ WL_OTA_SKIP_TEST_WL_NOT_UP = 5, /**< WL UP failed */
+ WL_OTA_SKIP_TEST_UNKNOWN_CALL /**< Unintentional scheduling on ota test */
};
/* Differentiator for ota_tx and ota_rx */
enum {
- WL_OTA_TEST_TX = 0, /* ota_tx */
- WL_OTA_TEST_RX = 1, /* ota_rx */
+ WL_OTA_TEST_TX = 0, /**< ota_tx */
+ WL_OTA_TEST_RX = 1, /**< ota_rx */
};
/* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */
enum {
- WL_OTA_TEST_BW_20_IN_40MHZ = 0, /* 20 in 40 operation */
- WL_OTA_TEST_BW_20MHZ = 1, /* 20 Mhz operation */
- WL_OTA_TEST_BW_40MHZ = 2 /* full 40Mhz operation */
+ WL_OTA_TEST_BW_20_IN_40MHZ = 0, /**< 20 in 40 operation */
+ WL_OTA_TEST_BW_20MHZ = 1, /**< 20 Mhz operation */
+ WL_OTA_TEST_BW_40MHZ = 2, /**< full 40Mhz operation */
+ WL_OTA_TEST_BW_80MHZ = 3 /* full 80Mhz operation */
};
+
+#define HT_MCS_INUSE 0x00000080 /* HT MCS in use,indicates b0-6 holds an mcs */
+#define VHT_MCS_INUSE 0x00000100 /* VHT MCS in use,indicates b0-6 holds an mcs */
+#define OTA_RATE_MASK 0x0000007f /* rate/mcs value */
+#define OTA_STF_SISO 0
+#define OTA_STF_CDD 1
+#define OTA_STF_STBC 2
+#define OTA_STF_SDM 3
+
typedef struct ota_rate_info {
- uint8 rate_cnt; /* Total number of rates */
- uint8 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE]; /* array of rates from 1mbps to 130mbps */
- /* for legacy rates : ratein mbps * 2 */
- /* for HT rates : mcs index */
+ uint8 rate_cnt; /**< Total number of rates */
+ uint16 rate_val_mbps[WL_OTA_TEST_MAX_NUM_RATE]; /**< array of rates from 1mbps to 130mbps */
+ /**< for legacy rates : ratein mbps * 2 */
+ /**< for HT rates : mcs index */
} ota_rate_info_t;
typedef struct ota_power_info {
- int8 pwr_ctrl_on; /* power control on/off */
- int8 start_pwr; /* starting power/index */
- int8 delta_pwr; /* delta power/index */
- int8 end_pwr; /* end power/index */
+ int8 pwr_ctrl_on; /**< power control on/off */
+ int8 start_pwr; /**< starting power/index */
+ int8 delta_pwr; /**< delta power/index */
+ int8 end_pwr; /**< end power/index */
} ota_power_info_t;
typedef struct ota_packetengine {
uint16 delay; /* Inter-packet delay */
- /* for ota_tx, delay is tx ifs in micro seconds */
+ /**< for ota_tx, delay is tx ifs in micro seconds */
/* for ota_rx, delay is wait time in milliseconds */
uint16 nframes; /* Number of frames */
uint16 length; /* Packet length */
/* Test info vector */
typedef struct wl_ota_test_args {
- uint8 cur_test; /* test phase */
- uint8 chan; /* channel */
- uint8 bw; /* bandwidth */
- uint8 control_band; /* control band */
- uint8 stf_mode; /* stf mode */
- ota_rate_info_t rt_info; /* Rate info */
- ota_packetengine_t pkteng; /* packeteng info */
- uint8 txant; /* tx antenna */
- uint8 rxant; /* rx antenna */
- ota_power_info_t pwr_info; /* power sweep info */
- uint8 wait_for_sync; /* wait for sync or not */
+ uint8 cur_test; /**< test phase */
+ uint8 chan; /**< channel */
+ uint8 bw; /**< bandwidth */
+ uint8 control_band; /**< control band */
+ uint8 stf_mode; /**< stf mode */
+ ota_rate_info_t rt_info; /**< Rate info */
+ ota_packetengine_t pkteng; /**< packeteng info */
+ uint8 txant; /**< tx antenna */
+ uint8 rxant; /**< rx antenna */
+ ota_power_info_t pwr_info; /**< power sweep info */
+ uint8 wait_for_sync; /**< wait for sync or not */
+ uint8 ldpc;
+ uint8 sgi;
+ /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */
} wl_ota_test_args_t;
+#define WL_OTA_TESTVEC_T_VERSION 1 /* version of wl_ota_test_vector_t struct */
typedef struct wl_ota_test_vector {
- wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ]; /* Test argument struct */
- uint16 test_cnt; /* Total no of test */
- uint8 file_dwnld_valid; /* File successfully downloaded */
- uint8 sync_timeout; /* sync packet timeout */
- int8 sync_fail_action; /* sync fail action */
- struct ether_addr sync_mac; /* macaddress for sync pkt */
- struct ether_addr tx_mac; /* macaddress for tx */
- struct ether_addr rx_mac; /* macaddress for rx */
- int8 loop_test; /* dbg feature to loop the test */
+ uint16 version;
+ wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ]; /**< Test argument struct */
+ uint16 test_cnt; /**< Total no of test */
+ uint8 file_dwnld_valid; /**< File successfully downloaded */
+ uint8 sync_timeout; /**< sync packet timeout */
+ int8 sync_fail_action; /**< sync fail action */
+ struct ether_addr sync_mac; /**< macaddress for sync pkt */
+ struct ether_addr tx_mac; /**< macaddress for tx */
+ struct ether_addr rx_mac; /**< macaddress for rx */
+ int8 loop_test; /**< dbg feature to loop the test */
+ uint16 test_rxcnt;
+ /* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */
} wl_ota_test_vector_t;
/* struct copied back form dongle to host to query the status */
typedef struct wl_ota_test_status {
- int16 cur_test_cnt; /* test phase */
- int8 skip_test_reason; /* skip test reasoin */
- wl_ota_test_args_t test_arg; /* cur test arg details */
- uint16 test_cnt; /* total no of test downloaded */
- uint8 file_dwnld_valid; /* file successfully downloaded ? */
- uint8 sync_timeout; /* sync timeout */
- int8 sync_fail_action; /* sync fail action */
- struct ether_addr sync_mac; /* macaddress for sync pkt */
- struct ether_addr tx_mac; /* tx mac address */
- struct ether_addr rx_mac; /* rx mac address */
- uint8 test_stage; /* check the test status */
- int8 loop_test; /* Debug feature to puts test enfine in a loop */
- uint8 sync_status; /* sync status */
+ int16 cur_test_cnt; /**< test phase */
+ int8 skip_test_reason; /**< skip test reasoin */
+ wl_ota_test_args_t test_arg; /**< cur test arg details */
+ uint16 test_cnt; /**< total no of test downloaded */
+ uint8 file_dwnld_valid; /**< file successfully downloaded ? */
+ uint8 sync_timeout; /**< sync timeout */
+ int8 sync_fail_action; /**< sync fail action */
+ struct ether_addr sync_mac; /**< macaddress for sync pkt */
+ struct ether_addr tx_mac; /**< tx mac address */
+ struct ether_addr rx_mac; /**< rx mac address */
+ uint8 test_stage; /**< check the test status */
+ int8 loop_test; /**< Debug feature to puts test enfine in a loop */
+ uint8 sync_status; /**< sync status */
} wl_ota_test_status_t;
+typedef struct wl_ota_rx_rssi {
+ uint16 pktcnt; /* Pkt count used for this rx test */
+ chanspec_t chanspec; /* Channel info on which the packets are received */
+ int16 rssi; /* Average RSSI of the first 50% packets received */
+} wl_ota_rx_rssi_t;
+
+#define WL_OTARSSI_T_VERSION 1 /* version of wl_ota_test_rssi_t struct */
+#define WL_OTA_TEST_RSSI_FIXED_SIZE OFFSETOF(wl_ota_test_rssi_t, rx_rssi)
+
+typedef struct wl_ota_test_rssi {
+ uint8 version;
+ uint8 testcnt; /* total measured RSSI values, valid on output only */
+ wl_ota_rx_rssi_t rx_rssi[1]; /* Variable length array of wl_ota_rx_rssi_t */
+} wl_ota_test_rssi_t;
/* WL_OTA END */
/* wl_radar_args_t */
typedef struct {
- int npulses; /* required number of pulses at n * t_int */
- int ncontig; /* required number of pulses at t_int */
- int min_pw; /* minimum pulse width (20 MHz clocks) */
- int max_pw; /* maximum pulse width (20 MHz clocks) */
- uint16 thresh0; /* Radar detection, thresh 0 */
- uint16 thresh1; /* Radar detection, thresh 1 */
- uint16 blank; /* Radar detection, blank control */
- uint16 fmdemodcfg; /* Radar detection, fmdemod config */
+ int npulses; /**< required number of pulses at n * t_int */
+ int ncontig; /**< required number of pulses at t_int */
+ int min_pw; /**< minimum pulse width (20 MHz clocks) */
+ int max_pw; /**< maximum pulse width (20 MHz clocks) */
+ uint16 thresh0; /**< Radar detection, thresh 0 */
+ uint16 thresh1; /**< Radar detection, thresh 1 */
+ uint16 blank; /**< Radar detection, blank control */
+ uint16 fmdemodcfg; /**< Radar detection, fmdemod config */
int npulses_lp; /* Radar detection, minimum long pulses */
int min_pw_lp; /* Minimum pulsewidth for long pulses */
int max_pw_lp; /* Maximum pulsewidth for long pulses */
int max_span_lp; /* Maximum deltat for long pulses */
int min_deltat; /* Minimum spacing between pulses */
int max_deltat; /* Maximum spacing between pulses */
- uint16 autocorr; /* Radar detection, autocorr on or off */
- uint16 st_level_time; /* Radar detection, start_timing level */
+ uint16 autocorr; /**< Radar detection, autocorr on or off */
+ uint16 st_level_time; /**< Radar detection, start_timing level */
uint16 t2_min; /* minimum clocks needed to remain in state 2 */
uint32 version; /* version */
- uint32 fra_pulse_err; /* sample error margin for detecting French radar pulsed */
+ uint32 fra_pulse_err; /**< sample error margin for detecting French radar pulsed */
int npulses_fra; /* Radar detection, minimum French pulses set */
int npulses_stg2; /* Radar detection, minimum staggered-2 pulses set */
int npulses_stg3; /* Radar detection, minimum staggered-3 pulses set */
- uint16 percal_mask; /* defines which period cal is masked from radar detection */
- int quant; /* quantization resolution to pulse positions */
- uint32 min_burst_intv_lp; /* minimum burst to burst interval for bin3 radar */
- uint32 max_burst_intv_lp; /* maximum burst to burst interval for bin3 radar */
- int nskip_rst_lp; /* number of skipped pulses before resetting lp buffer */
- int max_pw_tol; /* maximum tollerance allowed in detected pulse width for radar detection */
+ uint16 percal_mask; /**< defines which period cal is masked from radar detection */
+ int quant; /**< quantization resolution to pulse positions */
+ uint32 min_burst_intv_lp; /**< minimum burst to burst interval for bin3 radar */
+ uint32 max_burst_intv_lp; /**< maximum burst to burst interval for bin3 radar */
+ int nskip_rst_lp; /**< number of skipped pulses before resetting lp buffer */
+ int max_pw_tol; /**< maximum tolerance allowd in detected pulse width for radar detection */
uint16 feature_mask; /* 16-bit mask to specify enabled features */
} wl_radar_args_t;
/* RSSI per antenna */
typedef struct {
- uint32 version; /* version field */
- uint32 count; /* number of valid antenna rssi */
- int8 rssi_ant[WL_RSSI_ANT_MAX]; /* rssi per antenna */
+ uint32 version; /**< version field */
+ uint32 count; /**< number of valid antenna rssi */
+ int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */
} wl_rssi_ant_t;
/* data structure used in 'dfs_status' wl interface, which is used to query dfs status */
typedef struct {
- uint state; /* noted by WL_DFS_CACSTATE_XX. */
- uint duration; /* time spent in ms in state. */
+ uint state; /**< noted by WL_DFS_CACSTATE_XX. */
+ uint duration; /**< time spent in ms in state. */
/* as dfs enters ISM state, it removes the operational channel from quiet channel
* list and notes the channel in channel_cleared. set to 0 if no channel is cleared
*/
uint16 pad;
} wl_dfs_status_t;
+typedef struct {
+ uint state; /* noted by WL_DFS_CACSTATE_XX */
+ uint duration; /* time spent in ms in state */
+ chanspec_t chanspec; /* chanspec of this core */
+ chanspec_t chanspec_last_cleared; /* chanspec last cleared for operation by scanning */
+ uint16 sub_type; /* currently just the index of the core or the respective PLL */
+ uint16 pad;
+} wl_dfs_sub_status_t;
+
+#define WL_DFS_STATUS_ALL_VERSION (1)
+typedef struct {
+ uint16 version; /* version field; current max version 1 */
+ uint16 num_sub_status;
+ wl_dfs_sub_status_t dfs_sub_status[1]; /* struct array of length num_sub_status */
+} wl_dfs_status_all_t;
+
+#define WL_DFS_AP_MOVE_VERSION (1)
+typedef struct wl_dfs_ap_move_status {
+ int8 version; /* version field; current max version 1 */
+ int8 move_status; /* DFS move status */
+ chanspec_t chanspec; /* New AP Chanspec */
+ wl_dfs_status_all_t scan_status; /* status; see dfs_status_all for wl_dfs_status_all_t */
+} wl_dfs_ap_move_status_t;
+
+
/* data structure used in 'radar_status' wl interface, which is use to query radar det status */
typedef struct {
bool detected;
#define NUM_PWRCTRL_RATES 12
typedef struct {
- uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /* User set target */
- uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /* reg and local power limit */
- uint8 txpwr_local_max; /* local max according to the AP */
- uint8 txpwr_local_constraint; /* local constraint according to the AP */
- uint8 txpwr_chan_reg_max; /* Regulatory max for this channel */
- uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /* Latest target for 2.4 and 5 Ghz */
- uint8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
- uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /* On G phy, OFDM power offset */
- uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /* Max CCK power for this band (SROM) */
- uint8 txpwr_bphy_ofdm_max; /* Max OFDM power for this band (SROM) */
- uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /* Max power for A band (SROM) */
- int8 txpwr_antgain[2]; /* Ant gain for each band - from SROM */
- uint8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
+ uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /**< User set target */
+ uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /**< reg and local power limit */
+ uint8 txpwr_local_max; /**< local max according to the AP */
+ uint8 txpwr_local_constraint; /**< local constraint according to the AP */
+ uint8 txpwr_chan_reg_max; /**< Regulatory max for this channel */
+ uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /**< Latest target for 2.4 and 5 Ghz */
+ uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */
+ uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /**< On G phy, OFDM power offset */
+ uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /**< Max CCK power for this band (SROM) */
+ uint8 txpwr_bphy_ofdm_max; /**< Max OFDM power for this band (SROM) */
+ uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /**< Max power for A band (SROM) */
+ int8 txpwr_antgain[2]; /**< Ant gain for each band - from SROM */
+ uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */
} tx_power_legacy_t;
#define WL_TX_POWER_RATES_LEGACY 45
uint8 target[WL_TX_POWER_RATES_LEGACY]; /* Latest target power */
} tx_power_legacy2_t;
-/* TX Power index defines */
-#define WLC_NUM_RATES_CCK WL_NUM_RATES_CCK
-#define WLC_NUM_RATES_OFDM WL_NUM_RATES_OFDM
-#define WLC_NUM_RATES_MCS_1_STREAM WL_NUM_RATES_MCS_1STREAM
-#define WLC_NUM_RATES_MCS_2_STREAM WL_NUM_RATES_MCS_1STREAM
-#define WLC_NUM_RATES_MCS32 WL_NUM_RATES_MCS32
-#define WL_TX_POWER_CCK_NUM WL_NUM_RATES_CCK
-#define WL_TX_POWER_OFDM_NUM WL_NUM_RATES_OFDM
-#define WL_TX_POWER_MCS_1_STREAM_NUM WL_NUM_RATES_MCS_1STREAM
-#define WL_TX_POWER_MCS_2_STREAM_NUM WL_NUM_RATES_MCS_1STREAM
-#define WL_TX_POWER_MCS_32_NUM WL_NUM_RATES_MCS32
-
#define WL_NUM_2x2_ELEMENTS 4
#define WL_NUM_3x3_ELEMENTS 6
+#define WL_NUM_4x4_ELEMENTS 10
typedef struct {
- uint16 ver; /* version of this struct */
- uint16 len; /* length in bytes of this structure */
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
uint32 flags;
- chanspec_t chanspec; /* txpwr report for this channel */
- chanspec_t local_chanspec; /* channel on which we are associated */
- uint32 buflen; /* ppr buffer length */
- uint8 pprbuf[1]; /* Latest target power buffer */
+ chanspec_t chanspec; /**< txpwr report for this channel */
+ chanspec_t local_chanspec; /**< channel on which we are associated */
+ uint32 buflen; /**< ppr buffer length */
+ uint8 pprbuf[1]; /**< Latest target power buffer */
} wl_txppr_t;
#define WL_TXPPR_VERSION 1
} chanspec_txpwr_max_t;
typedef struct wl_chanspec_txpwr_max {
- uint16 ver; /* version of this struct */
- uint16 len; /* length in bytes of this structure */
- uint32 count; /* number of elements of (chanspec, txpwr_max) pair */
- chanspec_txpwr_max_t txpwr[1]; /* array of (chanspec, max_txpwr) pair */
+ uint16 ver; /**< version of this struct */
+ uint16 len; /**< length in bytes of this structure */
+ uint32 count; /**< number of elements of (chanspec, txpwr_max) pair */
+ chanspec_txpwr_max_t txpwr[1]; /**< array of (chanspec, max_txpwr) pair */
} wl_chanspec_txpwr_max_t;
#define WL_CHANSPEC_TXPWR_MAX_VER 1
#define WL_CHANSPEC_TXPWR_MAX_LEN (sizeof(wl_chanspec_txpwr_max_t))
typedef struct tx_inst_power {
- uint8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
- uint8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
+ uint8 txpwr_est_Pout[2]; /**< Latest estimate for 2.4 and 5 Ghz */
+ uint8 txpwr_est_Pout_gofdm; /**< Pwr estimate for 2.4 OFDM */
} tx_inst_power_t;
#define WL_NUM_TXCHAIN_MAX 4
typedef struct wl_txchain_pwr_offsets {
- int8 offset[WL_NUM_TXCHAIN_MAX]; /* quarter dBm signed offset for each chain */
+ int8 offset[WL_NUM_TXCHAIN_MAX]; /**< quarter dBm signed offset for each chain */
} wl_txchain_pwr_offsets_t;
/* maximum channels returned by the get valid channels iovar */
#define WL_NUMCHANNELS 64
struct tsinfo_arg {
uint8 octets[3];
};
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
#define RATE_CCK_1MBPS 0
#define RATE_CCK_2MBPS 1
#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1
typedef struct wl_bsstrans_rssi {
- int8 rssi_2g; /* RSSI in dbm for 2.4 G */
- int8 rssi_5g; /* RSSI in dbm for 5G, unused for cck */
+ int8 rssi_2g; /**< RSSI in dbm for 2.4 G */
+ int8 rssi_5g; /**< RSSI in dbm for 5G, unused for cck */
} wl_bsstrans_rssi_t;
-#define RSSI_RATE_MAP_MAX_STREAMS 4 /* max streams supported */
+#define RSSI_RATE_MAP_MAX_STREAMS 4 /**< max streams supported */
/* RSSI to rate mapping, all 20Mhz, no SGI */
typedef struct wl_bsstrans_rssi_rate_map {
uint16 scans_allowed;
} wl_bsstrans_roamthrottle_t;
-#define NFIFO 6 /* # tx/rx fifopairs */
+#define NFIFO 6 /**< # tx/rx fifopairs */
#define NREINITREASONCOUNT 8
#define REINITREASONIDX(_x) (((_x) < NREINITREASONCOUNT) ? (_x) : 0)
-#define WL_CNT_T_VERSION 10 /* current version of wl_cnt_t struct */
+#define WL_CNT_T_VERSION 30 /**< current version of wl_cnt_t struct */
+#define WL_CNT_VERSION_6 6
+#define WL_CNT_VERSION_11 11
+
+#define WLC_WITH_XTLV_CNT
+
+/*
+ * tlv IDs uniquely identifies counter component
+ * packed into wl_cmd_t container
+ */
+enum wl_cnt_xtlv_id {
+ WL_CNT_XTLV_WLC = 0x100, /**< WLC layer counters */
+ WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200, /**< wl counter ver < 11 UCODE MACSTAT */
+ WL_CNT_XTLV_LT40_UCODE_V1 = 0x300, /**< corerev < 40 UCODE MACSTAT */
+ WL_CNT_XTLV_GE40_UCODE_V1 = 0x400, /**< corerev >= 40 UCODE MACSTAT */
+ WL_CNT_XTLV_GE64_UCODEX_V1 = 0x800 /* corerev >= 64 UCODEX MACSTAT */
+};
+
+/* The number of variables in wl macstat cnt struct.
+ * (wl_cnt_ge40mcst_v1_t, wl_cnt_lt40mcst_v1_t, wl_cnt_v_le10_mcst_t)
+ */
+#define WL_CNT_MCST_VAR_NUM 64
+/* sizeof(wl_cnt_ge40mcst_v1_t), sizeof(wl_cnt_lt40mcst_v1_t), and sizeof(wl_cnt_v_le10_mcst_t) */
+#define WL_CNT_MCST_STRUCT_SZ ((uint)sizeof(uint32) * WL_CNT_MCST_VAR_NUM)
+#define INVALID_CNT_VAL (uint32)(-1)
+#define WL_CNT_MCXST_STRUCT_SZ ((uint)sizeof(wl_cnt_ge64mcxst_v1_t))
+
+#define WL_XTLV_CNTBUF_MAX_SIZE ((uint)(OFFSETOF(wl_cnt_info_t, data)) + \
+ (uint)BCM_XTLV_HDR_SIZE + (uint)sizeof(wl_cnt_wlc_t) + \
+ (uint)BCM_XTLV_HDR_SIZE + WL_CNT_MCST_STRUCT_SZ + \
+ (uint)BCM_XTLV_HDR_SIZE + WL_CNT_MCXST_STRUCT_SZ)
+
+#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint)sizeof(wl_cnt_ver_11_t))
+
+/* Top structure of counters IOVar buffer */
typedef struct {
- uint16 version; /* see definition of WL_CNT_T_VERSION */
- uint16 length; /* length of entire structure */
+ uint16 version; /**< see definition of WL_CNT_T_VERSION */
+ uint16 datalen; /**< length of data including all paddings. */
+ uint8 data [1]; /**< variable length payload:
+ * 1 or more bcm_xtlv_t type of tuples.
+ * each tuple is padded to multiple of 4 bytes.
+ * 'datalen' field of this structure includes all paddings.
+ */
+} wl_cnt_info_t;
+/* wlc layer counters */
+typedef struct {
/* transmit stat counters */
- uint32 txframe; /* tx data frames */
- uint32 txbyte; /* tx data bytes */
- uint32 txretrans; /* tx mac retransmits */
- uint32 txerror; /* tx data errors (derived: sum of others) */
- uint32 txctl; /* tx management frames */
- uint32 txprshort; /* tx short preamble frames */
- uint32 txserr; /* tx status errors */
- uint32 txnobuf; /* tx out of buffers errors */
- uint32 txnoassoc; /* tx discard because we're not associated */
- uint32 txrunt; /* tx runt frames */
- uint32 txchit; /* tx header cache hit (fastpath) */
- uint32 txcmiss; /* tx header cache miss (slowpath) */
+ uint32 txframe; /**< tx data frames */
+ uint32 txbyte; /**< tx data bytes */
+ uint32 txretrans; /**< tx mac retransmits */
+ uint32 txerror; /**< tx data errors (derived: sum of others) */
+ uint32 txctl; /**< tx management frames */
+ uint32 txprshort; /**< tx short preamble frames */
+ uint32 txserr; /**< tx status errors */
+ uint32 txnobuf; /**< tx out of buffers errors */
+ uint32 txnoassoc; /**< tx discard because we're not associated */
+ uint32 txrunt; /**< tx runt frames */
+ uint32 txchit; /**< tx header cache hit (fastpath) */
+ uint32 txcmiss; /**< tx header cache miss (slowpath) */
/* transmit chip error counters */
- uint32 txuflo; /* tx fifo underflows */
- uint32 txphyerr; /* tx phy errors (indicated in tx status) */
+ uint32 txuflo; /**< tx fifo underflows */
+ uint32 txphyerr; /**< tx phy errors (indicated in tx status) */
uint32 txphycrs;
/* receive stat counters */
- uint32 rxframe; /* rx data frames */
- uint32 rxbyte; /* rx data bytes */
- uint32 rxerror; /* rx data errors (derived: sum of others) */
- uint32 rxctl; /* rx management frames */
- uint32 rxnobuf; /* rx out of buffers errors */
- uint32 rxnondata; /* rx non data frames in the data channel errors */
- uint32 rxbadds; /* rx bad DS errors */
- uint32 rxbadcm; /* rx bad control or management frames */
- uint32 rxfragerr; /* rx fragmentation errors */
- uint32 rxrunt; /* rx runt frames */
- uint32 rxgiant; /* rx giant frames */
- uint32 rxnoscb; /* rx no scb error */
- uint32 rxbadproto; /* rx invalid frames */
- uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */
- uint32 rxbadda; /* rx frames tossed for invalid da */
- uint32 rxfilter; /* rx frames filtered out */
+ uint32 rxframe; /**< rx data frames */
+ uint32 rxbyte; /**< rx data bytes */
+ uint32 rxerror; /**< rx data errors (derived: sum of others) */
+ uint32 rxctl; /**< rx management frames */
+ uint32 rxnobuf; /**< rx out of buffers errors */
+ uint32 rxnondata; /**< rx non data frames in the data channel errors */
+ uint32 rxbadds; /**< rx bad DS errors */
+ uint32 rxbadcm; /**< rx bad control or management frames */
+ uint32 rxfragerr; /**< rx fragmentation errors */
+ uint32 rxrunt; /**< rx runt frames */
+ uint32 rxgiant; /**< rx giant frames */
+ uint32 rxnoscb; /**< rx no scb error */
+ uint32 rxbadproto; /**< rx invalid frames */
+ uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */
+ uint32 rxbadda; /**< rx frames tossed for invalid da */
+ uint32 rxfilter; /**< rx frames filtered out */
/* receive chip error counters */
- uint32 rxoflo; /* rx fifo overflow errors */
- uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */
+ uint32 rxoflo; /**< rx fifo overflow errors */
+ uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */
- uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */
- uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */
- uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */
+ uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */
+ uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */
+ uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */
/* misc counters */
- uint32 dmade; /* tx/rx dma descriptor errors */
- uint32 dmada; /* tx/rx dma data errors */
- uint32 dmape; /* tx/rx dma descriptor protocol errors */
- uint32 reset; /* reset count */
- uint32 tbtt; /* cnts the TBTT int's */
+ uint32 dmade; /**< tx/rx dma descriptor errors */
+ uint32 dmada; /**< tx/rx dma data errors */
+ uint32 dmape; /**< tx/rx dma descriptor protocol errors */
+ uint32 reset; /**< reset count */
+ uint32 tbtt; /**< cnts the TBTT int's */
uint32 txdmawar;
- uint32 pkt_callback_reg_fail; /* callbacks register failure */
+ uint32 pkt_callback_reg_fail; /**< callbacks register failure */
+
+ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
+ uint32 txfrag; /**< dot11TransmittedFragmentCount */
+ uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /**< dot11FailedCount */
+ uint32 txretry; /**< dot11RetryCount */
+ uint32 txretrie; /**< dot11MultipleRetryCount */
+ uint32 rxdup; /**< dot11FrameduplicateCount */
+ uint32 txrts; /**< dot11RTSSuccessCount */
+ uint32 txnocts; /**< dot11RTSFailureCount */
+ uint32 txnoack; /**< dot11ACKFailureCount */
+ uint32 rxfrag; /**< dot11ReceivedFragmentCount */
+ uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /**< dot11FCSErrorCount */
+ uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */
+ uint32 rxundec; /**< dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill; /**< TKIPLocalMICFailures */
+ uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay; /**< TKIPReplays */
+ uint32 ccmpfmterr; /**< CCMPFormatErrors */
+ uint32 ccmpreplay; /**< CCMPReplays */
+ uint32 ccmpundec; /**< CCMPDecryptErrors */
+ uint32 fourwayfail; /**< FourWayHandshakeFailures */
+ uint32 wepundec; /**< dot11WEPUndecryptableCount */
+ uint32 wepicverr; /**< dot11WEPICVErrorCount */
+ uint32 decsuccess; /**< DecryptSuccessCount */
+ uint32 tkipicverr; /**< TKIPICVErrorCount */
+ uint32 wepexcluded; /**< dot11WEPExcludedCount */
+
+ uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */
+ uint32 psmwds; /**< Count PSM watchdogs */
+ uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */
+
+ /* MBSS counters, AP only */
+ uint32 prq_entries_handled; /**< PRQ entries read in */
+ uint32 prq_undirected_entries; /**< which were bcast bss & ssid */
+ uint32 prq_bad_entries; /**< which could not be translated to info */
+ uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */
+ uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */
+ uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
+ uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */
+ /* per-rate receive stat counters */
+ uint32 rx1mbps; /* packets rx at 1Mbps */
+ uint32 rx2mbps; /* packets rx at 2Mbps */
+ uint32 rx5mbps5; /* packets rx at 5.5Mbps */
+ uint32 rx6mbps; /* packets rx at 6Mbps */
+ uint32 rx9mbps; /* packets rx at 9Mbps */
+ uint32 rx11mbps; /* packets rx at 11Mbps */
+ uint32 rx12mbps; /* packets rx at 12Mbps */
+ uint32 rx18mbps; /* packets rx at 18Mbps */
+ uint32 rx24mbps; /* packets rx at 24Mbps */
+ uint32 rx36mbps; /* packets rx at 36Mbps */
+ uint32 rx48mbps; /* packets rx at 48Mbps */
+ uint32 rx54mbps; /* packets rx at 54Mbps */
+ uint32 rx108mbps; /* packets rx at 108mbps */
+ uint32 rx162mbps; /* packets rx at 162mbps */
+ uint32 rx216mbps; /* packets rx at 216 mbps */
+ uint32 rx270mbps; /* packets rx at 270 mbps */
+ uint32 rx324mbps; /* packets rx at 324 mbps */
+ uint32 rx378mbps; /* packets rx at 378 mbps */
+ uint32 rx432mbps; /* packets rx at 432 mbps */
+ uint32 rx486mbps; /* packets rx at 486 mbps */
+ uint32 rx540mbps; /* packets rx at 540 mbps */
+
+ uint32 rfdisable; /**< count of radio disables */
+
+ uint32 txexptime; /**< Tx frames suppressed due to timer expiration */
+
+ uint32 txmpdu_sgi; /**< count for sgi transmit */
+ uint32 rxmpdu_sgi; /**< count for sgi received */
+ uint32 txmpdu_stbc; /**< count for stbc transmit */
+ uint32 rxmpdu_stbc; /**< count for stbc received */
+
+ uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */
+
+ /* WPA2 counters (see rxundec for DecryptFailureCount) */
+ uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */
+ uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay_mcst; /**< TKIPReplays */
+ uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */
+ uint32 ccmpreplay_mcst; /**< CCMPReplays */
+ uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */
+ uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */
+ uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */
+ uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */
+ uint32 decsuccess_mcst; /**< DecryptSuccessCount */
+ uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */
+ uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */
+
+ uint32 dma_hang; /**< count for dma hang */
+ uint32 reinit; /**< count for reinit */
+
+ uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */
+ uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */
+ uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */
+ uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */
+ uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */
+
+ uint32 cso_passthrough; /* hw cso required but passthrough */
+ uint32 cso_normal; /**< hw cso hdr for normal process */
+ uint32 chained; /**< number of frames chained */
+ uint32 chainedsz1; /**< number of chain size 1 frames */
+ uint32 unchained; /**< number of frames not chained */
+ uint32 maxchainsz; /**< max chain size so far */
+ uint32 currchainsz; /**< current chain size */
+ uint32 pciereset; /**< Secondary Bus Reset issued by driver */
+ uint32 cfgrestore; /**< configspace restore by driver */
+ uint32 reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */
+ uint32 rxrtry;
+
+ uint32 rxmpdu_mu; /* Number of MU MPDUs received */
+
+ /* detailed control/management frames */
+ uint32 txbar; /**< Number of TX BAR */
+ uint32 rxbar; /**< Number of RX BAR */
+ uint32 txpspoll; /**< Number of TX PS-poll */
+ uint32 rxpspoll; /**< Number of RX PS-poll */
+ uint32 txnull; /**< Number of TX NULL_DATA */
+ uint32 rxnull; /**< Number of RX NULL_DATA */
+ uint32 txqosnull; /**< Number of TX NULL_QoSDATA */
+ uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */
+ uint32 txassocreq; /**< Number of TX ASSOC request */
+ uint32 rxassocreq; /**< Number of RX ASSOC request */
+ uint32 txreassocreq; /**< Number of TX REASSOC request */
+ uint32 rxreassocreq; /**< Number of RX REASSOC request */
+ uint32 txdisassoc; /**< Number of TX DISASSOC */
+ uint32 rxdisassoc; /**< Number of RX DISASSOC */
+ uint32 txassocrsp; /**< Number of TX ASSOC response */
+ uint32 rxassocrsp; /**< Number of RX ASSOC response */
+ uint32 txreassocrsp; /**< Number of TX REASSOC response */
+ uint32 rxreassocrsp; /**< Number of RX REASSOC response */
+ uint32 txauth; /**< Number of TX AUTH */
+ uint32 rxauth; /**< Number of RX AUTH */
+ uint32 txdeauth; /**< Number of TX DEAUTH */
+ uint32 rxdeauth; /**< Number of RX DEAUTH */
+ uint32 txprobereq; /**< Number of TX probe request */
+ uint32 rxprobereq; /**< Number of RX probe request */
+ uint32 txprobersp; /**< Number of TX probe response */
+ uint32 rxprobersp; /**< Number of RX probe response */
+ uint32 txaction; /**< Number of TX action frame */
+ uint32 rxaction; /**< Number of RX action frame */
+} wl_cnt_wlc_t;
+
+/* MACXSTAT counters for ucodex (corerev >= 64) */
+typedef struct {
+ uint32 macxsusp;
+ uint32 m2vmsg;
+ uint32 v2mmsg;
+ uint32 mboxout;
+ uint32 musnd;
+ uint32 sfb2v;
+} wl_cnt_ge64mcxst_v1_t;
+
+/* MACSTAT counters for ucode (corerev >= 40) */
+typedef struct {
/* MAC counters: 32-bit version of d11.h's macstat_t */
- uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS,
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
* Control Management (includes retransmissions)
*/
- uint32 txrtsfrm; /* number of RTS sent out by the MAC */
- uint32 txctsfrm; /* number of CTS sent out by the MAC */
- uint32 txackfrm; /* number of ACK frames sent out */
- uint32 txdnlfrm; /* Not used */
- uint32 txbcnfrm; /* beacons transmitted */
- uint32 txfunfl[6]; /* per-fifo tx underflows */
- uint32 rxtoolate; /* receive too late */
- uint32 txfbw; /* transmit at fallback bw (dynamic bw) */
- uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txackfrm; /**< number of ACK frames sent out */
+ uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 txfunfl[6]; /**< per-fifo tx underflows */
+ uint32 txampdu; /**< number of AMPDUs transmitted */
+ uint32 txmpdu; /**< number of MPDUs transmitted */
+ uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
* or BCN)
*/
- uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for
+ uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
* driver enqueued frames
*/
- uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */
- uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */
- uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not
+ uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+ uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /**< parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /**< Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */
+ uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */
+ uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastobss; /* number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< number of receive fifo 1 overflows */
+ uint32 rxhlovfl; /**< number of length / header fifo overflows */
+ uint32 missbcn_dbg; /**< number of beacon missed to receive */
+ uint32 pmqovfl; /**< number of PMQ overflows */
+ uint32 rxcgprqfrm; /**< number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
+ uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
+ uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 txback; /**< blockack txcnt */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxdrop20s; /**< drop secondary cnt */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+} wl_cnt_ge40mcst_v1_t;
+
+/* MACSTAT counters for ucode (corerev < 40) */
+typedef struct {
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txackfrm; /**< number of ACK frames sent out */
+ uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 txfunfl[6]; /**< per-fifo tx underflows */
+ uint32 txampdu; /**< number of AMPDUs transmitted */
+ uint32 txmpdu; /**< number of MPDUs transmitted */
+ uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
+ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+ uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /**< parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /**< Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
+ uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */
+ uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */
+ uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdtucastobss; /* number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 dbgoff46;
+ uint32 dbgoff47;
+ uint32 dbgoff48; /**< Used for counting txstatus queue overflow (corerev <= 4) */
+ uint32 pmqovfl; /**< number of PMQ overflows */
+ uint32 rxcgprqfrm; /**< number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
+ uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
+ uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 txback; /**< blockack txcnt */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 phywatch;
+ uint32 rxtoolate; /**< receive too late */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+} wl_cnt_lt40mcst_v1_t;
+
+/* MACSTAT counters for "wl counter" version <= 10 */
+typedef struct {
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txackfrm; /**< number of ACK frames sent out */
+ uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 txfunfl[6]; /**< per-fifo tx underflows */
+ uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */
+ uint32 PAD0; /**< number of MPDUs transmitted */
+ uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */
+ uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */
+ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+ uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not
+ * data/control/management
+ */
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /**< parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /**< Number of received frames with a good PLCP
+ * (i.e. passing parity check)
+ */
+ uint32 rxdfrmucastmbss; /* number of received DATA frames with good FCS and matching RA */
+ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+ uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */
+ uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC
+ * (unlikely to see these)
+ */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
+ uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from
+ * other BSS (WDS FRAME)
+ */
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
+ * expecting a response
+ */
+ uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 PAD1;
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */
+ uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */
+ uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */
+ uint32 pmqovfl; /**< number of PMQ overflows */
+ uint32 rxcgprqfrm; /**< number of received Probe requests that made it into
+ * the PRQ fifo
+ */
+ uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did
+ * not get ACK
+ */
+ uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ
+ * fifo because a probe response could not be sent out within
+ * the time limit defined in M_PRS_MAXTIME
+ */
+ uint32 rxnack; /**< obsolete */
+ uint32 frmscons; /**< obsolete */
+ uint32 txnack; /**< obsolete */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 txback; /**< blockack txcnt */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
+ uint32 rxdrop20s; /**< drop secondary cnt */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
+} wl_cnt_v_le10_mcst_t;
+
+typedef struct {
+ uint16 version; /**< see definition of WL_CNT_T_VERSION */
+ uint16 length; /**< length of entire structure */
+
+ /* transmit stat counters */
+ uint32 txframe; /**< tx data frames */
+ uint32 txbyte; /**< tx data bytes */
+ uint32 txretrans; /**< tx mac retransmits */
+ uint32 txerror; /**< tx data errors (derived: sum of others) */
+ uint32 txctl; /**< tx management frames */
+ uint32 txprshort; /**< tx short preamble frames */
+ uint32 txserr; /**< tx status errors */
+ uint32 txnobuf; /**< tx out of buffers errors */
+ uint32 txnoassoc; /**< tx discard because we're not associated */
+ uint32 txrunt; /**< tx runt frames */
+ uint32 txchit; /**< tx header cache hit (fastpath) */
+ uint32 txcmiss; /**< tx header cache miss (slowpath) */
+
+ /* transmit chip error counters */
+ uint32 txuflo; /**< tx fifo underflows */
+ uint32 txphyerr; /**< tx phy errors (indicated in tx status) */
+ uint32 txphycrs;
+
+ /* receive stat counters */
+ uint32 rxframe; /**< rx data frames */
+ uint32 rxbyte; /**< rx data bytes */
+ uint32 rxerror; /**< rx data errors (derived: sum of others) */
+ uint32 rxctl; /**< rx management frames */
+ uint32 rxnobuf; /**< rx out of buffers errors */
+ uint32 rxnondata; /**< rx non data frames in the data channel errors */
+ uint32 rxbadds; /**< rx bad DS errors */
+ uint32 rxbadcm; /**< rx bad control or management frames */
+ uint32 rxfragerr; /**< rx fragmentation errors */
+ uint32 rxrunt; /**< rx runt frames */
+ uint32 rxgiant; /**< rx giant frames */
+ uint32 rxnoscb; /**< rx no scb error */
+ uint32 rxbadproto; /**< rx invalid frames */
+ uint32 rxbadsrcmac; /**< rx frames with Invalid Src Mac */
+ uint32 rxbadda; /**< rx frames tossed for invalid da */
+ uint32 rxfilter; /**< rx frames filtered out */
+
+ /* receive chip error counters */
+ uint32 rxoflo; /**< rx fifo overflow errors */
+ uint32 rxuflo[NFIFO]; /**< rx dma descriptor underflow errors */
+
+ uint32 d11cnt_txrts_off; /**< d11cnt txrts value when reset d11cnt */
+ uint32 d11cnt_rxcrc_off; /**< d11cnt rxcrc value when reset d11cnt */
+ uint32 d11cnt_txnocts_off; /**< d11cnt txnocts value when reset d11cnt */
+
+ /* misc counters */
+ uint32 dmade; /**< tx/rx dma descriptor errors */
+ uint32 dmada; /**< tx/rx dma data errors */
+ uint32 dmape; /**< tx/rx dma descriptor protocol errors */
+ uint32 reset; /**< reset count */
+ uint32 tbtt; /**< cnts the TBTT int's */
+ uint32 txdmawar;
+ uint32 pkt_callback_reg_fail; /**< callbacks register failure */
+
+ /* MAC counters: 32-bit version of d11.h's macstat_t */
+ uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
+ * Control Management (includes retransmissions)
+ */
+ uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
+ uint32 txctsfrm; /**< number of CTS sent out by the MAC */
+ uint32 txackfrm; /**< number of ACK frames sent out */
+ uint32 txdnlfrm; /**< Not used */
+ uint32 txbcnfrm; /**< beacons transmitted */
+ uint32 txfunfl[6]; /**< per-fifo tx underflows */
+ uint32 rxtoolate; /**< receive too late */
+ uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */
+ uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
+ * or BCN)
+ */
+ uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
+ * driver enqueued frames
+ */
+ uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
+ uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
+ uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not
* data/control/management
*/
- uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */
- uint32 rxbadplcp; /* parity check of the PLCP header failed */
- uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */
- uint32 rxstrt; /* Number of received frames with a good PLCP
+ uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
+ uint32 rxbadplcp; /**< parity check of the PLCP header failed */
+ uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
+ uint32 rxstrt; /**< Number of received frames with a good PLCP
* (i.e. passing parity check)
*/
uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
- uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */
- uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */
- uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */
- uint32 rxackucast; /* number of ucast ACKS received (good FCS) */
- uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */
- uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */
- uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */
- uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */
- uint32 rxctsocast; /* number of received CTS not addressed to the MAC */
- uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */
- uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */
- uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC
+ uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
+ uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
+ uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
+ uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */
+ uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC
* (unlikely to see these)
*/
- uint32 rxbeaconmbss; /* beacons received from member of BSS */
+ uint32 rxbeaconmbss; /**< beacons received from member of BSS */
uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
* other BSS (WDS FRAME)
*/
- uint32 rxbeaconobss; /* beacons received from other BSS */
- uint32 rxrsptmout; /* Number of response timeouts for transmitted frames
+ uint32 rxbeaconobss; /**< beacons received from other BSS */
+ uint32 rxrsptmout; /**< Number of response timeouts for transmitted frames
* expecting a response
*/
- uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */
- uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */
- uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */
- uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */
- uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */
- uint32 pmqovfl; /* Number of PMQ overflows */
- uint32 rxcgprqfrm; /* Number of received Probe requests that made it into
+ uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
+ uint32 rxf0ovfl; /**< Number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */
+ uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */
+ uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */
+ uint32 pmqovfl; /**< Number of PMQ overflows */
+ uint32 rxcgprqfrm; /**< Number of received Probe requests that made it into
* the PRQ fifo
*/
- uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */
- uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did
+ uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */
+ uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did
* not get ACK
*/
- uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */
- uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ
+ uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
+ uint32 prs_timeout; /**< Number of probe requests that were dropped from the PRQ
* fifo because a probe response could not be sent out within
* the time limit defined in M_PRS_MAXTIME
*/
- uint32 rxnack; /* obsolete */
- uint32 frmscons; /* obsolete */
- uint32 txnack; /* obsolete */
- uint32 rxback; /* blockack rxcnt */
- uint32 txback; /* blockack txcnt */
+ uint32 rxnack; /**< obsolete */
+ uint32 frmscons; /**< obsolete */
+ uint32 txnack; /**< obsolete */
+ uint32 rxback; /**< blockack rxcnt */
+ uint32 txback; /**< blockack txcnt */
/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
- uint32 txfrag; /* dot11TransmittedFragmentCount */
- uint32 txmulti; /* dot11MulticastTransmittedFrameCount */
- uint32 txfail; /* dot11FailedCount */
- uint32 txretry; /* dot11RetryCount */
- uint32 txretrie; /* dot11MultipleRetryCount */
- uint32 rxdup; /* dot11FrameduplicateCount */
- uint32 txrts; /* dot11RTSSuccessCount */
- uint32 txnocts; /* dot11RTSFailureCount */
- uint32 txnoack; /* dot11ACKFailureCount */
- uint32 rxfrag; /* dot11ReceivedFragmentCount */
- uint32 rxmulti; /* dot11MulticastReceivedFrameCount */
- uint32 rxcrc; /* dot11FCSErrorCount */
- uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */
- uint32 rxundec; /* dot11WEPUndecryptableCount */
+ uint32 txfrag; /**< dot11TransmittedFragmentCount */
+ uint32 txmulti; /**< dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /**< dot11FailedCount */
+ uint32 txretry; /**< dot11RetryCount */
+ uint32 txretrie; /**< dot11MultipleRetryCount */
+ uint32 rxdup; /**< dot11FrameduplicateCount */
+ uint32 txrts; /**< dot11RTSSuccessCount */
+ uint32 txnocts; /**< dot11RTSFailureCount */
+ uint32 txnoack; /**< dot11ACKFailureCount */
+ uint32 rxfrag; /**< dot11ReceivedFragmentCount */
+ uint32 rxmulti; /**< dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /**< dot11FCSErrorCount */
+ uint32 txfrmsnt; /**< dot11TransmittedFrameCount (bogus MIB?) */
+ uint32 rxundec; /**< dot11WEPUndecryptableCount */
/* WPA2 counters (see rxundec for DecryptFailureCount) */
- uint32 tkipmicfaill; /* TKIPLocalMICFailures */
- uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */
- uint32 tkipreplay; /* TKIPReplays */
- uint32 ccmpfmterr; /* CCMPFormatErrors */
- uint32 ccmpreplay; /* CCMPReplays */
- uint32 ccmpundec; /* CCMPDecryptErrors */
- uint32 fourwayfail; /* FourWayHandshakeFailures */
- uint32 wepundec; /* dot11WEPUndecryptableCount */
- uint32 wepicverr; /* dot11WEPICVErrorCount */
- uint32 decsuccess; /* DecryptSuccessCount */
- uint32 tkipicverr; /* TKIPICVErrorCount */
- uint32 wepexcluded; /* dot11WEPExcludedCount */
-
- uint32 txchanrej; /* Tx frames suppressed due to channel rejection */
- uint32 psmwds; /* Count PSM watchdogs */
- uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */
+ uint32 tkipmicfaill; /**< TKIPLocalMICFailures */
+ uint32 tkipcntrmsr; /**< TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay; /**< TKIPReplays */
+ uint32 ccmpfmterr; /**< CCMPFormatErrors */
+ uint32 ccmpreplay; /**< CCMPReplays */
+ uint32 ccmpundec; /**< CCMPDecryptErrors */
+ uint32 fourwayfail; /**< FourWayHandshakeFailures */
+ uint32 wepundec; /**< dot11WEPUndecryptableCount */
+ uint32 wepicverr; /**< dot11WEPICVErrorCount */
+ uint32 decsuccess; /**< DecryptSuccessCount */
+ uint32 tkipicverr; /**< TKIPICVErrorCount */
+ uint32 wepexcluded; /**< dot11WEPExcludedCount */
+
+ uint32 txchanrej; /**< Tx frames suppressed due to channel rejection */
+ uint32 psmwds; /**< Count PSM watchdogs */
+ uint32 phywatchdog; /**< Count Phy watchdogs (triggered by ucode) */
/* MBSS counters, AP only */
- uint32 prq_entries_handled; /* PRQ entries read in */
- uint32 prq_undirected_entries; /* which were bcast bss & ssid */
- uint32 prq_bad_entries; /* which could not be translated to info */
- uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */
- uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */
+ uint32 prq_entries_handled; /**< PRQ entries read in */
+ uint32 prq_undirected_entries; /**< which were bcast bss & ssid */
+ uint32 prq_bad_entries; /**< which could not be translated to info */
+ uint32 atim_suppress_count; /**< TX suppressions on ATIM fifo */
+ uint32 bcn_template_not_ready; /**< Template marked in use on send bcn ... */
uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
- uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */
+ uint32 late_tbtt_dpc; /**< TBTT DPC did not happen in time */
/* per-rate receive stat counters */
uint32 rx1mbps; /* packets rx at 1Mbps */
uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */
uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */
- uint32 rfdisable; /* count of radio disables */
- uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */
+ uint32 rfdisable; /**< count of radio disables */
+ uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
uint32 bphy_badplcp;
- uint32 txexptime; /* Tx frames suppressed due to timer expiration */
+ uint32 txexptime; /**< Tx frames suppressed due to timer expiration */
- uint32 txmpdu_sgi; /* count for sgi transmit */
- uint32 rxmpdu_sgi; /* count for sgi received */
- uint32 txmpdu_stbc; /* count for stbc transmit */
- uint32 rxmpdu_stbc; /* count for stbc received */
+ uint32 txmpdu_sgi; /**< count for sgi transmit */
+ uint32 rxmpdu_sgi; /**< count for sgi received */
+ uint32 txmpdu_stbc; /**< count for stbc transmit */
+ uint32 rxmpdu_stbc; /**< count for stbc received */
- uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */
+ uint32 rxundec_mcst; /**< dot11WEPUndecryptableCount */
/* WPA2 counters (see rxundec for DecryptFailureCount) */
- uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */
- uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */
- uint32 tkipreplay_mcst; /* TKIPReplays */
- uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */
- uint32 ccmpreplay_mcst; /* CCMPReplays */
- uint32 ccmpundec_mcst; /* CCMPDecryptErrors */
- uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */
- uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */
- uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */
- uint32 decsuccess_mcst; /* DecryptSuccessCount */
- uint32 tkipicverr_mcst; /* TKIPICVErrorCount */
- uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */
-
- uint32 dma_hang; /* count for dma hang */
- uint32 reinit; /* count for reinit */
-
- uint32 pstatxucast; /* count of ucast frames xmitted on all psta assoc */
- uint32 pstatxnoassoc; /* count of txnoassoc frames xmitted on all psta assoc */
- uint32 pstarxucast; /* count of ucast frames received on all psta assoc */
- uint32 pstarxbcmc; /* count of bcmc frames received on all psta */
- uint32 pstatxbcmc; /* count of bcmc frames transmitted on all psta */
+ uint32 tkipmicfaill_mcst; /**< TKIPLocalMICFailures */
+ uint32 tkipcntrmsr_mcst; /**< TKIPCounterMeasuresInvoked */
+ uint32 tkipreplay_mcst; /**< TKIPReplays */
+ uint32 ccmpfmterr_mcst; /**< CCMPFormatErrors */
+ uint32 ccmpreplay_mcst; /**< CCMPReplays */
+ uint32 ccmpundec_mcst; /**< CCMPDecryptErrors */
+ uint32 fourwayfail_mcst; /**< FourWayHandshakeFailures */
+ uint32 wepundec_mcst; /**< dot11WEPUndecryptableCount */
+ uint32 wepicverr_mcst; /**< dot11WEPICVErrorCount */
+ uint32 decsuccess_mcst; /**< DecryptSuccessCount */
+ uint32 tkipicverr_mcst; /**< TKIPICVErrorCount */
+ uint32 wepexcluded_mcst; /**< dot11WEPExcludedCount */
+
+ uint32 dma_hang; /**< count for dma hang */
+ uint32 reinit; /**< count for reinit */
+
+ uint32 pstatxucast; /**< count of ucast frames xmitted on all psta assoc */
+ uint32 pstatxnoassoc; /**< count of txnoassoc frames xmitted on all psta assoc */
+ uint32 pstarxucast; /**< count of ucast frames received on all psta assoc */
+ uint32 pstarxbcmc; /**< count of bcmc frames received on all psta */
+ uint32 pstatxbcmc; /**< count of bcmc frames transmitted on all psta */
uint32 cso_passthrough; /* hw cso required but passthrough */
- uint32 cso_normal; /* hw cso hdr for normal process */
- uint32 chained; /* number of frames chained */
- uint32 chainedsz1; /* number of chain size 1 frames */
- uint32 unchained; /* number of frames not chained */
- uint32 maxchainsz; /* max chain size so far */
- uint32 currchainsz; /* current chain size */
- uint32 rxdrop20s; /* drop secondary cnt */
- uint32 pciereset; /* Secondary Bus Reset issued by driver */
- uint32 cfgrestore; /* configspace restore by driver */
+ uint32 cso_normal; /**< hw cso hdr for normal process */
+ uint32 chained; /**< number of frames chained */
+ uint32 chainedsz1; /**< number of chain size 1 frames */
+ uint32 unchained; /**< number of frames not chained */
+ uint32 maxchainsz; /**< max chain size so far */
+ uint32 currchainsz; /**< current chain size */
+ uint32 rxdrop20s; /**< drop secondary cnt */
+ uint32 pciereset; /**< Secondary Bus Reset issued by driver */
+ uint32 cfgrestore; /**< configspace restore by driver */
uint32 reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */
-} wl_cnt_t;
+ uint32 rxrtry; /**< num of received packets with retry bit on */
+ uint32 txmpdu; /**< macstat cnt only valid in ver 11. number of MPDUs txed. */
+ uint32 rxnodelim; /**< macstat cnt only valid in ver 11.
+ * number of occasions that no valid delimiter is detected
+ * by ampdu parser.
+ */
+ uint32 rxmpdu_mu; /* Number of MU MPDUs received */
+
+ /* detailed control/management frames */
+ uint32 txbar; /**< Number of TX BAR */
+ uint32 rxbar; /**< Number of RX BAR */
+ uint32 txpspoll; /**< Number of TX PS-poll */
+ uint32 rxpspoll; /**< Number of RX PS-poll */
+ uint32 txnull; /**< Number of TX NULL_DATA */
+ uint32 rxnull; /**< Number of RX NULL_DATA */
+ uint32 txqosnull; /**< Number of TX NULL_QoSDATA */
+ uint32 rxqosnull; /**< Number of RX NULL_QoSDATA */
+ uint32 txassocreq; /**< Number of TX ASSOC request */
+ uint32 rxassocreq; /**< Number of RX ASSOC request */
+ uint32 txreassocreq; /**< Number of TX REASSOC request */
+ uint32 rxreassocreq; /**< Number of RX REASSOC request */
+ uint32 txdisassoc; /**< Number of TX DISASSOC */
+ uint32 rxdisassoc; /**< Number of RX DISASSOC */
+ uint32 txassocrsp; /**< Number of TX ASSOC response */
+ uint32 rxassocrsp; /**< Number of RX ASSOC response */
+ uint32 txreassocrsp; /**< Number of TX REASSOC response */
+ uint32 rxreassocrsp; /**< Number of RX REASSOC response */
+ uint32 txauth; /**< Number of TX AUTH */
+ uint32 rxauth; /**< Number of RX AUTH */
+ uint32 txdeauth; /**< Number of TX DEAUTH */
+ uint32 rxdeauth; /**< Number of RX DEAUTH */
+ uint32 txprobereq; /**< Number of TX probe request */
+ uint32 rxprobereq; /**< Number of RX probe request */
+ uint32 txprobersp; /**< Number of TX probe response */
+ uint32 rxprobersp; /**< Number of RX probe response */
+ uint32 txaction; /**< Number of TX action frame */
+ uint32 rxaction; /**< Number of RX action frame */
+
+} wl_cnt_ver_11_t;
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
typedef struct {
uint16 version; /* see definition of WL_CNT_T_VERSION */
uint16 length; /* length of entire structure */
uint32 rxmpdu_stbc; /* count for stbc received */
uint32 rxdrop20s; /* drop secondary cnt */
-
-} wl_cnt_ver_six_t;
+} wl_cnt_ver_6_t;
#define WL_DELTA_STATS_T_VERSION 2 /* current version of wl_delta_stats_t struct */
} wl_delta_stats_t;
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-/* structure to store per-rate rx statistics */
-typedef struct wl_scb_rx_rate_stats {
- uint32 rx1mbps[2]; /* packets rx at 1Mbps */
- uint32 rx2mbps[2]; /* packets rx at 2Mbps */
- uint32 rx5mbps5[2]; /* packets rx at 5.5Mbps */
- uint32 rx6mbps[2]; /* packets rx at 6Mbps */
- uint32 rx9mbps[2]; /* packets rx at 9Mbps */
- uint32 rx11mbps[2]; /* packets rx at 11Mbps */
- uint32 rx12mbps[2]; /* packets rx at 12Mbps */
- uint32 rx18mbps[2]; /* packets rx at 18Mbps */
- uint32 rx24mbps[2]; /* packets rx at 24Mbps */
- uint32 rx36mbps[2]; /* packets rx at 36Mbps */
- uint32 rx48mbps[2]; /* packets rx at 48Mbps */
- uint32 rx54mbps[2]; /* packets rx at 54Mbps */
-} wl_scb_rx_rate_stats_t;
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
-
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
typedef struct {
uint32 packets;
uint32 bytes;
} wl_wme_cnt_t;
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
struct wl_msglevel2 {
uint32 low;
uint32 high;
uint32 lifetime; /* Packet lifetime value in ms */
} wl_lifetime_t;
+
/* Channel Switch Announcement param */
typedef struct wl_chan_switch {
uint8 mode; /* value 0 or 1 */
#define PFN_PARTIAL_SCAN_BIT 0
#define PFN_PARTIAL_SCAN_MASK 1
+#define PFN_SWC_RSSI_WINDOW_MAX 8
+#define PFN_SWC_MAX_NUM_APS 16
+#define PFN_HOTLIST_MAX_NUM_APS 64
/* PFN network info structure */
typedef struct wl_pfn_subnet_info {
wl_pfn_net_info_t netinfo[1];
} wl_pfn_scanresults_t;
+typedef struct wl_pfn_significant_net {
+ uint16 flags;
+ uint16 channel;
+ struct ether_addr BSSID;
+ int8 rssi[PFN_SWC_RSSI_WINDOW_MAX];
+} wl_pfn_significant_net_t;
+
+
+typedef struct wl_pfn_swc_results {
+ uint32 version;
+ uint32 pkt_count;
+ uint32 total_count;
+ wl_pfn_significant_net_t list[1];
+} wl_pfn_swc_results_t;
+
/* used to report exactly one scan result */
/* plus reports detailed scan info in bss_info */
typedef struct wl_pfn_scanresult {
/* Bit4: suppress_lost, Bit3: suppress_found */
uint16 flags;
} wl_pfn_bssid_t;
+
+typedef struct wl_pfn_significant_bssid {
+ struct ether_addr macaddr;
+ int8 rssi_low_threshold;
+ int8 rssi_high_threshold;
+} wl_pfn_significant_bssid_t;
#define WL_PFN_SUPPRESSFOUND_MASK 0x08
#define WL_PFN_SUPPRESSLOST_MASK 0x10
#define WL_PFN_RSSI_MASK 0xff00
uint16 channel_list[WL_NUMCHANNELS];
uint32 flags;
} wl_pfn_cfg_t;
+
+#define CH_BUCKET_REPORT_REGULAR 0
+#define CH_BUCKET_REPORT_FULL_RESULT 2
+#define CH_BUCKET_GSCAN 4
+
+
+typedef struct wl_pfn_gscan_channel_bucket {
+ uint16 bucket_end_index;
+ uint8 bucket_freq_multiple;
+ uint8 report_flag;
+} wl_pfn_gscan_channel_bucket_t;
+
+#define GSCAN_SEND_ALL_RESULTS_MASK (1 << 0)
+#define GSCAN_CFG_FLAGS_ONLY_MASK (1 << 7)
+
+typedef struct wl_pfn_gscan_cfg {
+ /* BIT0 1 = send probes/beacons to HOST
+ * BIT2 Reserved
+ * Add any future flags here
+ * BIT7 1 = no other useful cfg sent
+ */
+ uint8 flags;
+ /* Buffer filled threshold in % to generate an event */
+ uint8 buffer_threshold;
+ /* No. of BSSIDs with "change" to generate an evt
+ * change - crosses rssi threshold/lost
+ */
+ uint8 swc_nbssid_threshold;
+ /* Max=8 (for now) Size of rssi cache buffer */
+ uint8 swc_rssi_window_size;
+ uint16 count_of_channel_buckets;
+ uint16 lost_ap_window;
+ wl_pfn_gscan_channel_bucket_t channel_bucket[1];
+} wl_pfn_gscan_cfg_t;
+
+
#define WL_PFN_REPORT_ALLNET 0
#define WL_PFN_REPORT_SSIDNET 1
#define WL_PFN_REPORT_BSSIDNET 2
-
#define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */
#define WL_PFN_CFG_FLAGS_RESERVED 0xfffffffe /* Remaining reserved for future use */
wl_pfn_t pfn[1];
} wl_pfn_list_t;
+#define WL_PFN_MAC_OUI_ONLY_MASK 1
+#define WL_PFN_SET_MAC_UNASSOC_MASK 2
+/* To configure pfn_macaddr */
+typedef struct wl_pfn_macaddr_cfg {
+ uint8 version;
+ uint8 flags;
+ struct ether_addr macaddr;
+} wl_pfn_macaddr_cfg_t;
+#define WL_PFN_MACADDR_CFG_VER 1
typedef BWL_PRE_PACKED_STRUCT struct pfn_olmsg_params_t {
wlc_ssid_t ssid;
uint32 cipher_type;
#define MSCAN_MAX 90
#endif
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+/*
+ * WLFCTS definition
+ */
+typedef struct wl_txstatus_additional_info {
+ uint32 rspec;
+ uint32 enq_ts;
+ uint32 last_ts;
+ uint32 entry_ts;
+ uint16 seq;
+ uint8 rts_cnt;
+ uint8 tx_cnt;
+} wl_txstatus_additional_info_t;
/* Service discovery */
typedef struct {
typedef struct {
uint16 period; /* extended listen period */
uint16 interval; /* extended listen interval */
+ uint16 count; /* count to repeat */
+ uint16 pad; /* pad for 32bit align */
} wl_p2po_listen_t;
/* GAS state machine tunable parameters. Structure field values of 0 means use the default. */
struct ether_addr bssid[1]; /* max ANQPO_MAX_IGNORE_BSSID */
} wl_anqpo_ignore_bssid_list_t;
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
struct toe_ol_stats_t {
/* Num of tx packets that don't need to be checksummed */
* Dongle pattern matching filter.
*/
-/* Packet filter operation mode */
-/* True: 1; False: 0 */
-#define PKT_FILTER_MODE_FORWARD_ON_MATCH 1
-/* Enable and disable pkt_filter as a whole */
-#define PKT_FILTER_MODE_DISABLE 2
-/* Cache first matched rx pkt(be queried by host later) */
-#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH 4
-/* If pkt_filter is enabled and no filter is set, don't forward anything */
-#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-/* Ports only filter mode */
-#define PKT_FILTER_MODE_PORTS_ONLY 16
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
-
#define MAX_WAKE_PACKET_CACHE_BYTES 128 /* Maximum cached wake packet */
#define MAX_WAKE_PACKET_BYTES (DOT11_A3_HDR_LEN + \
/* Secured WOWL packet was encrypted, need decrypted before check filter match */
typedef struct wl_pkt_decrypter {
- uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending);
- void* dec_ctx;
+ uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending);
+ void* dec_ctx;
} wl_pkt_decrypter_t;
/* Pattern matching filter. Specifies an offset within received packets to
* that indicates which bits within the pattern should be matched.
*/
typedef struct wl_pkt_filter_pattern {
-// terence 20150525: fix pkt filter error -14 in 64bit OS
-// union {
- uint32 offset; /* Offset within received packet to start pattern matching.
+ uint32 offset; /* Offset within received packet to start pattern matching.
* Offset '0' is the first byte of the ethernet header.
*/
-// wl_pkt_decrypter_t* decrypt_ctx; /* Decrypt context */
-// };
uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */
uint8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts
- * at offset 0. Pattern immediately follows mask.
+ * at offset 0. Pattern immediately follows mask. for
+ * secured pattern, put the descrypter pointer to the
+ * beginning, mask and pattern postponed correspondingly
*/
} wl_pkt_filter_pattern_t;
#define WL_PKT_FILTER_PORTS_VERSION 0
#define WL_PKT_FILTER_PORTS_MAX 128
-#define RSN_KCK_LENGTH 16
-#define RSN_KEK_LENGTH 16
#define RSN_REPLAY_LEN 8
typedef struct _gtkrefresh {
uchar KCK[RSN_KCK_LENGTH];
*/
} wl_rssi_event_t;
+/* CCA based channel quality event configuration */
+#define WL_CHAN_QUAL_CCA 0
+#define WL_CHAN_QUAL_NF 1
+#define WL_CHAN_QUAL_NF_LTE 2
+#define WL_CHAN_QUAL_TOTAL 3
+
+#define MAX_CHAN_QUAL_LEVELS 8
+
+typedef struct wl_chan_qual_metric {
+ uint8 id; /* metric ID */
+ uint8 num_levels; /* Number of entries in rssi_levels[] below */
+ uint16 flags;
+ int16 htol[MAX_CHAN_QUAL_LEVELS]; /* threshold level array: hi-to-lo */
+ int16 ltoh[MAX_CHAN_QUAL_LEVELS]; /* threshold level array: lo-to-hi */
+} wl_chan_qual_metric_t;
+
+typedef struct wl_chan_qual_event {
+ uint32 rate_limit_msec; /* # of events posted to application will be limited to
+ * one per specified period (0 to disable rate limit).
+ */
+ uint16 flags;
+ uint16 num_metrics;
+ wl_chan_qual_metric_t metric[WL_CHAN_QUAL_TOTAL]; /* metric array */
+} wl_chan_qual_event_t;
+
typedef struct wl_action_obss_coex_req {
uint8 info;
uint8 num;
#define PKTQ_LOG_AUTO (1 << 31)
#define PKTQ_LOG_DEF_PREC (1 << 30)
+
+#define LEGACY1_WL_PFN_MACADDR_CFG_VER 0
+
+#define WL_PFN_MAC_OUI_ONLY_MASK 1
+#define WL_PFN_SET_MAC_UNASSOC_MASK 2
+#define WL_PFN_RESTRICT_LA_MAC_MASK 4
+#define WL_PFN_MACADDR_FLAG_MASK 0x7
+
+
/*
* SCB_BS_DATA iovar definitions start.
*/
} wl_ioctl_overlay_t;
#endif /* DONGLEOVERLAYS */
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
-/* 11k Neighbor Report element */
+/* 11k Neighbor Report element (unversioned, deprecated) */
typedef struct nbr_element {
uint8 id;
uint8 len;
uint8 pad;
} nbr_element_t;
+#define WL_RRM_NBR_RPT_VER 1
+/* 11k Neighbor Report element */
+typedef struct nbr_rpt_elem {
+ uint8 version;
+ uint8 id;
+ uint8 len;
+ uint8 pad;
+ struct ether_addr bssid;
+ uint8 pad_1[2];
+ uint32 bssid_info;
+ uint8 reg;
+ uint8 channel;
+ uint8 phytype;
+ uint8 pad_2;
+ wlc_ssid_t ssid;
+ uint8 bss_trans_preference;
+ uint8 pad_3[3];
+} nbr_rpt_elem_t;
typedef enum event_msgs_ext_command {
EVENTMSGS_NONE = 0,
#define MAX_ROAMOFFL_BSSID_NUM 100
typedef BWL_PRE_PACKED_STRUCT struct roamoffl_bssid_list {
- int cnt;
+ int32 cnt;
struct ether_addr bssid[1];
} BWL_POST_PACKED_STRUCT roamoffl_bssid_list_t;
#define WL_PWRSTATS_TYPE_PHY 0 /* struct wl_pwr_phy_stats */
#define WL_PWRSTATS_TYPE_SCAN 1 /* struct wl_pwr_scan_stats */
#define WL_PWRSTATS_TYPE_USB_HSIC 2 /* struct wl_pwr_usb_hsic_stats */
-#define WL_PWRSTATS_TYPE_PM_AWAKE 3 /* struct wl_pwr_pm_awake_stats */
+#define WL_PWRSTATS_TYPE_PM_AWAKE1 3 /* struct wl_pwr_pm_awake_stats_v1 */
#define WL_PWRSTATS_TYPE_CONNECTION 4 /* struct wl_pwr_connect_stats; assoc and key-exch time */
#define WL_PWRSTATS_TYPE_PCIE 6 /* struct wl_pwr_pcie_stats */
+#define WL_PWRSTATS_TYPE_PM_AWAKE2 7 /* struct wl_pwr_pm_awake_stats_v2 */
/* Bits for wake reasons */
#define WLC_PMD_WAKE_SET 0x1
uint32 reason; /* reason(s) for staying awake */
} BWL_POST_PACKED_STRUCT wlc_pm_debug_t;
+/* WL_PWRSTATS_TYPE_PM_AWAKE1 structures (for 6.25 firmware) */
+#define WLC_STA_AWAKE_STATES_MAX_V1 30
+#define WLC_PMD_EVENT_MAX_V1 32
+/* Data sent as part of pwrstats IOVAR (and EXCESS_PM_WAKE event) */
+typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v1 {
+ uint32 curr_time; /* ms */
+ uint32 hw_macc; /* HW maccontrol */
+ uint32 sw_macc; /* SW maccontrol */
+ uint32 pm_dur; /* Total sleep time in PM, msecs */
+ uint32 mpc_dur; /* Total sleep time in MPC, msecs */
+
+ /* int32 drifts = remote - local; +ve drift => local-clk slow */
+ int32 last_drift; /* Most recent TSF drift from beacon */
+ int32 min_drift; /* Min TSF drift from beacon in magnitude */
+ int32 max_drift; /* Max TSF drift from beacon in magnitude */
+
+ uint32 avg_drift; /* Avg TSF drift from beacon */
+
+ /* Wake history tracking */
+ uint8 pmwake_idx; /* for stepping through pm_state */
+ wlc_pm_debug_t pm_state[WLC_STA_AWAKE_STATES_MAX_V1]; /* timestamped wake bits */
+ uint32 pmd_event_wake_dur[WLC_PMD_EVENT_MAX_V1]; /* cumulative usecs per wake reason */
+ uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */
+} BWL_POST_PACKED_STRUCT pm_awake_data_v1_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v1 {
+ uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+
+ pm_awake_data_v1_t awake_data;
+ uint32 frts_time; /* Cumulative ms spent in frts since driver load */
+ uint32 frts_end_cnt; /* No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v1_t;
+
+/* WL_PWRSTATS_TYPE_PM_AWAKE2 structures */
/* Data sent as part of pwrstats IOVAR */
-typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data {
+typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data_v2 {
uint32 curr_time; /* ms */
uint32 hw_macc; /* HW maccontrol */
uint32 sw_macc; /* SW maccontrol */
- uint32 pm_dur; /* Total sleep time in PM, usecs */
- uint32 mpc_dur; /* Total sleep time in MPC, usecs */
+ uint32 pm_dur; /* Total sleep time in PM, msecs */
+ uint32 mpc_dur; /* Total sleep time in MPC, msecs */
/* int32 drifts = remote - local; +ve drift => local-clk slow */
int32 last_drift; /* Most recent TSF drift from beacon */
uint8 pad[3];
uint32 frts_time; /* Cumulative ms spent in frts since driver load */
uint32 frts_end_cnt; /* No of times frts ended since driver load */
-} BWL_POST_PACKED_STRUCT pm_awake_data_t;
+} BWL_POST_PACKED_STRUCT pm_awake_data_v2_t;
-typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats {
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats_v2 {
uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */
uint16 len; /* Up to 4K-1, top 4 bits are reserved */
- pm_awake_data_t awake_data;
-} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_t;
+ pm_awake_data_v2_t awake_data;
+} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_v2_t;
/* Original bus structure is for HSIC */
typedef BWL_PRE_PACKED_STRUCT struct bus_metrics {
uint32 l1_2_usecs; /* L1_2ss duration in usecs */
uint32 l2_cnt; /* L2 entry count */
uint32 l2_usecs; /* L2 duration in usecs */
+ uint32 timestamp; /* Timestamp on when stats are collected */
+ uint32 num_h2d_doorbell; /* # of doorbell interrupts - h2d */
+ uint32 num_d2h_doorbell; /* # of doorbell interrupts - d2h */
+ uint32 num_submissions; /* # of submissions */
+ uint32 num_completions; /* # of completions */
+ uint32 num_rxcmplt; /* # of rx completions */
+ uint32 num_rxcmplt_drbl; /* of drbl interrupts for rx complt. */
+ uint32 num_txstatus; /* # of tx completions */
+ uint32 num_txstatus_drbl; /* of drbl interrupts for tx complt. */
+ uint32 ltr_active_ct; /* # of times chip went to LTR ACTIVE */
+ uint32 ltr_active_dur; /* # of msecs chip was in LTR ACTIVE */
+ uint32 ltr_sleep_ct; /* # of times chip went to LTR SLEEP */
+ uint32 ltr_sleep_dur; /* # of msecs chip was in LTR SLEEP */
+ uint32 deepsleep_count; /* # of times chip went to deepsleep */
+ uint32 deepsleep_dur; /* # of msecs chip was in deepsleep */
} BWL_POST_PACKED_STRUCT pcie_bus_metrics_t;
/* Bus interface info for PCIE */
uint8 id;
} BWL_POST_PACKED_STRUCT;
+/* Return values */
+#define ND_REPLY_PEER 0x1 /* Reply was sent to service NS request from peer */
+#define ND_REQ_SINK 0x2 /* Input packet should be discarded */
+#define ND_FORCE_FORWARD 0X3 /* For the dongle to forward req to HOST */
+
+/* Neighbor Solicitation Response Offload IOVAR param */
+typedef BWL_PRE_PACKED_STRUCT struct nd_param {
+ struct ipv6_addr host_ip[2];
+ struct ipv6_addr solicit_ip;
+ struct ipv6_addr remote_ip;
+ uint8 host_mac[ETHER_ADDR_LEN];
+ uint32 offload_id;
+} BWL_POST_PACKED_STRUCT nd_param_t;
typedef BWL_PRE_PACKED_STRUCT struct wl_pfn_roam_thresh {
uint32 pfn_alert_thresh; /* time in ms */
#define WL_PMALERT_PMSTATE 1 /* struct wl_pmalert_pmstate_t, variable */
#define WL_PMALERT_EVENT_DUR 2 /* struct wl_pmalert_event_dur_t, variable */
#define WL_PMALERT_UCODE_DBG 3 /* struct wl_pmalert_ucode_dbg_t, variable */
+#define WL_PMALERT_PS_ALLOWED_HIST 4 /* struct wl_pmalert_ps_allowed_history, variable */
+#define WL_PMALERT_EXT_UCODE_DBG 5 /* struct wl_pmalert_ext_ucode_dbg_t, variable */
+#define WL_PMALERT_EPM_START_EVENT_DUR 6 /* struct wl_pmalert_event_dur_t, variable */
typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_fixed {
uint16 type; /* WL_PMALERT_FIXED */
uint16 len; /* Up to 4K-1, top 4 bits are reserved */
uint32 prev_stats_time; /* msecs */
uint32 curr_time; /* ms */
- uint32 prev_pm_dur; /* usecs */
- uint32 pm_dur; /* Total sleep time in PM, usecs */
- uint32 prev_mpc_dur; /* usecs */
- uint32 mpc_dur; /* Total sleep time in MPC, usecs */
+ uint32 prev_pm_dur; /* msecs */
+ uint32 pm_dur; /* Total sleep time in PM, msecs */
+ uint32 prev_mpc_dur; /* msecs */
+ uint32 mpc_dur; /* Total sleep time in MPC, msecs */
uint32 hw_macc; /* HW maccontrol */
uint32 sw_macc; /* SW maccontrol */
uint32 avg_drift; /* Avg TSF drift from beacon */
uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */
- uint32 frts_time; /* Cumulative ms spent in frts since driver load */
+ uint32 frts_time; /* Cumulative ms spent in data frts since driver load */
uint32 frts_end_cnt; /* No of times frts ended since driver load */
+ uint32 prev_frts_dur; /* Data frts duration at start of pm-period */
+ uint32 cal_dur; /* Cumulative ms spent in calibration */
+ uint32 prev_cal_dur; /* cal duration at start of pm-period */
} BWL_POST_PACKED_STRUCT wl_pmalert_fixed_t;
typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_pmstate {
uint32 phydebug[20];
} BWL_POST_PACKED_STRUCT wl_pmalert_ucode_dbg_t;
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
/* Structures and constants used for "vndr_ie" IOVar interface */
#define VNDR_IE_CMD_LEN 4 /* length of the set command string:
} BWL_POST_PACKED_STRUCT ibss_route_tbl_t;
#define MAX_IBSS_ROUTE_TBL_ENTRY 64
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
#define TXPWR_TARGET_VERSION 0
typedef BWL_PRE_PACKED_STRUCT struct {
} BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t;
#define AIBSS_TXFAIL_CONFIG_VER_0 0
+#define AIBSS_TXFAIL_CONFIG_VER_1 1
+#define AIBSS_TXFAIL_CONFIG_CUR_VER AIBSS_TXFAIL_CONFIG_VER_1
/* structure used to configure aibss tx fail event */
typedef BWL_PRE_PACKED_STRUCT struct {
uint16 len;
uint32 bcn_timeout; /* dur in seconds to receive 1 bcn */
uint32 max_tx_retry; /* no of consecutive no acks to send txfail event */
+ uint32 max_atim_failure; /* no of consecutive atim failure */
} BWL_POST_PACKED_STRUCT aibss_txfail_config_t;
typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if {
/* no strict structure packing */
#include <packed_section_end.h>
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
/* Global ASSERT Logging */
#define ASSERTLOG_CUR_VER 0x0100
#define MAX_ASSRTSTR_LEN 64
#define LOGRRC_FIX_LEN 8
#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
-#ifdef BCMWAPI_WAI
-#define IV_LEN 16
- struct wapi_sta_msg_t
- {
- uint16 msg_type;
- uint16 datalen;
- uint8 vap_mac[6];
- uint8 reserve_data1[2];
- uint8 sta_mac[6];
- uint8 reserve_data2[2];
- uint8 gsn[IV_LEN];
- uint8 wie[256];
- };
-#endif /* BCMWAPI_WAI */
/* chanim acs record */
typedef struct {
int8 bgnoise;
uint32 glitch_cnt;
uint8 ccastats;
+ uint8 chan_idle;
uint timestamp;
} chanim_acs_record_t;
uint32 glitchcnt; /* normalized as per second count */
uint32 badplcp; /* normalized as per second count */
uint8 ccastats[CCASTATS_MAX]; /* normalized as 0-255 */
- int8 bgnoise; /* background noise level (in dBm) */
- chanspec_t chanspec;
- uint32 timestamp;
+ int8 bgnoise; /* background noise level (in dBm) */
+ chanspec_t chanspec; /* ctrl chanspec of the interface */
+ uint32 timestamp; /* time stamp at which the stats are collected */
uint32 bphy_glitchcnt; /* normalized as per second count */
uint32 bphy_badplcp; /* normalized as per second count */
uint8 chan_idle; /* normalized as 0~255 */
#define WLC_TXCAL_CORE_MAX 2 /* max number of txcore supports for txcal */
#define MAX_NUM_TXCAL_MEAS 128
-
+#define MAX_NUM_PWR_STEP 40
+#define TXCAL_ROUNDING_FIX 1
typedef struct wl_txcal_meas {
+#ifdef TXCAL_ROUNDING_FIX
+ uint16 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+#else
uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+#endif /* TXCAL_ROUNDING_FIX */
int16 pwr[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
uint8 valid_cnt;
} wl_txcal_meas_t;
typedef struct wl_txcal_power_tssi {
uint8 set_core;
uint8 channel;
+ int16 tempsense[WLC_TXCAL_CORE_MAX];
int16 pwr_start[WLC_TXCAL_CORE_MAX];
+ uint8 pwr_start_idx[WLC_TXCAL_CORE_MAX];
uint8 num_entries[WLC_TXCAL_CORE_MAX];
- uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+ uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_PWR_STEP];
bool gen_tbl;
} wl_txcal_power_tssi_t;
#define WL_PROXD_FLAG_SEQ_EN 0x80
#define WL_PROXD_RANDOM_WAKEUP 0x8000
+#define WL_PROXD_MAXREPORT 8
typedef struct wl_proxd_iovar {
uint16 method; /* Proxmity Detection method */
/* add more params required for other methods can be added here */
} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_method_t;
+typedef struct wl_proxd_seq_config
+{
+ int16 N_tx_log2;
+ int16 N_rx_log2;
+ int16 N_tx_scale;
+ int16 N_rx_scale;
+ int16 w_len;
+ int16 w_offset;
+} wl_proxd_seq_config_t;
+
+
typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune {
uint32 Ki; /* h/w delay K factor for initiator */
uint32 Kt; /* h/w delay K factor for target */
uint8 hw_adj; /* enable hw assisted timestamp adjustment */
uint8 seq_en; /* enable ranging sequence */
uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /* number of ftm frames based on bandwidth */
+ int16 N_log2_2g; /* simple threshold crossing for 2g channel */
+ int16 N_scale_2g; /* simple threshold crossing for 2g channel */
+ wl_proxd_seq_config_t seq_5g20;
} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_t;
typedef struct wl_proxd_params_iovar {
#define PROXD_COLLECT_QUERY_DATA 3
#define PROXD_COLLECT_QUERY_DEBUG 4
#define PROXD_COLLECT_REMOTE_REQUEST 5
+#define PROXD_COLLECT_DONE 6
typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_query {
uint32 method; /* method */
} BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t;
+#ifdef WL_NAN
/* ********************** NAN wl interface struct types and defs ******************** */
#define WL_NAN_IOCTL_VERSION 0x1
+#define NAN_IOC_BUFSZ 256 /**< some sufficient ioc buff size for our module */
+#define NAN_IOC_BUFSZ_EXT 1024 /* some sufficient ioc buff size for dump commands */
/* wl_nan_sub_cmd may also be used in dhd */
typedef struct wl_nan_sub_cmd wl_nan_sub_cmd_t;
uint16 version; /* interface command or event version */
uint16 id; /* nan ioctl cmd ID */
uint16 len; /* total length of all tlv records in data[] */
+ uint16 pad; /* pad to be 32 bit aligment */
uint8 data [1]; /* var len payload of bcm_xtlv_t type */
} BWL_POST_PACKED_STRUCT wl_nan_ioc_t;
uint32 cnt_svc_disc_tx; /* TX svc disc frame count */
uint32 cnt_svc_disc_rx; /* RX svc disc frame count */
struct ether_addr cid;
+ uint32 chspec_5g;
} wl_nan_status_t;
+typedef struct wl_nan_count {
+ uint32 cnt_bcn_tx; /* TX disc/sync beacon count */
+ uint32 cnt_bcn_rx; /* RX disc/sync beacon count */
+ uint32 cnt_svc_disc_tx; /* TX svc disc frame count */
+ uint32 cnt_svc_disc_rx; /* RX svc disc frame count */
+} wl_nan_count_t;
+
/* various params and ctl swithce for nan_debug instance */
typedef struct nan_debug_params {
uint8 enabled; /* runtime debuging enabled */
uint16 status;
} nan_debug_params_t;
+/* time slot */
+#define NAN_MAX_TIMESLOT 32
+typedef struct nan_timeslot {
+ uint32 abitmap; /* available bitmap */
+ uint32 chanlist[NAN_MAX_TIMESLOT];
+} nan_timeslot_t;
/* nan passive scan params */
#define NAN_SCAN_MAX_CHCNT 8
-typedef BWL_PRE_PACKED_STRUCT struct nan_scan_params {
+typedef struct nan_scan_params {
uint16 scan_time;
uint16 home_time;
uint16 ms_intvl; /* interval between merge scan */
uint16 ms_dur; /* duration of merge scan */
uint16 chspec_num;
+ uint8 pad[2];
chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /* act. used 3, 5 rfu */
-} BWL_POST_PACKED_STRUCT nan_scan_params_t;
+} nan_scan_params_t;
enum wl_nan_role {
WL_NAN_ROLE_AUTO = 0,
WL_NAN_CMD_LEAVE = 4,
WL_NAN_CMD_MERGE = 5,
WL_NAN_CMD_STATUS = 6,
+ WL_NAN_CMD_TSRESERVE = 7,
+ WL_NAN_CMD_TSSCHEDULE = 8,
+ WL_NAN_CMD_TSRELEASE = 9,
+ WL_NAN_CMD_OUI = 10,
+
+ WL_NAN_CMD_COUNT = 15,
+ WL_NAN_CMD_CLEARCOUNT = 16,
+
/* discovery engine commands */
WL_NAN_CMD_PUBLISH = 20,
WL_NAN_CMD_SUBSCRIBE = 21,
WL_NAN_CMD_SCAN_RESULTS = 48,
WL_NAN_CMD_EVENT_MASK = 49,
WL_NAN_CMD_EVENT_CHECK = 50,
+ WL_NAN_CMD_DUMP = 51,
+ WL_NAN_CMD_CLEAR = 52,
+ WL_NAN_CMD_RSSI = 53,
WL_NAN_CMD_DEBUG = 60,
WL_NAN_CMD_TEST1 = 61,
WL_NAN_CMD_TEST2 = 62,
- WL_NAN_CMD_TEST3 = 63
+ WL_NAN_CMD_TEST3 = 63,
+ WL_NAN_CMD_DISC_RESULTS = 64
};
/*
/* 0x02 ~ 0xFF: reserved. In case to use with the same data format as NAN attribute TLV */
/* 0x100 ~ : private TLV ID defined just for NAN command */
/* common types */
- WL_NAN_XTLV_BUFFER = 0x101, /* generic type, function depends on cmd context */
WL_NAN_XTLV_MAC_ADDR = 0x102, /* used in various cmds */
WL_NAN_XTLV_REASON = 0x103,
- WL_NAN_XTLV_ENABLE = 0x104,
+ WL_NAN_XTLV_ENABLED = 0x104,
/* explicit types, primarily for discovery engine iovars */
WL_NAN_XTLV_SVC_PARAMS = 0x120, /* Contains required params: wl_nan_disc_params_t */
WL_NAN_XTLV_MATCH_RX = 0x121, /* Matching filter to evaluate on receive */
WL_NAN_XTLV_PRIORITY = 0x126, /* used in transmit cmd context */
WL_NAN_XTLV_REQUESTOR_ID = 0x127, /* Requestor instance ID */
WL_NAN_XTLV_VNDR = 0x128, /* Vendor specific attribute */
+ WL_NAN_XTLV_SR_FILTER = 0x129, /* Service Response Filter */
+ WL_NAN_XTLV_FOLLOWUP = 0x130, /* Service Info for Follow-Up SDF */
+ WL_NAN_XTLV_PEER_INSTANCE_ID = 0x131, /* Used to parse remote instance Id */
/* explicit types, primarily for NAN MAC iovars */
WL_NAN_XTLV_DW_LEN = 0x140, /* discovery win length */
WL_NAN_XTLV_BCN_INTERVAL = 0x141, /* beacon interval, both sync and descovery bcns? */
WL_NAN_XTLV_SUBSCR_ID = 0x154, /* subscriber id */
WL_NAN_XTLV_PUBLR_ID = 0x155, /* publisher id */
WL_NAN_XTLV_EVENT_MASK = 0x156,
- WL_NAN_XTLV_MERGE = 0x157
+ WL_NAN_XTLV_MASTER_RANK = 0x158,
+ WL_NAN_XTLV_WARM_UP_TIME = 0x159,
+ WL_NAN_XTLV_PM_OPTION = 0x15a,
+ WL_NAN_XTLV_OUI = 0x15b, /* NAN OUI */
+ WL_NAN_XTLV_MAC_COUNT = 0x15c, /* xtlv payload is nan_count_t */
+ /* nan timeslot management */
+ WL_NAN_XTLV_TSRESERVE = 0x160,
+ WL_NAN_XTLV_TSRELEASE = 0x161,
+ WL_NAN_XTLV_IDLE_DW_TIMEOUT = 0x162,
+ WL_NAN_XTLV_IDLE_DW_LEN = 0x163,
+ WL_NAN_XTLV_RND_FACTOR = 0x164,
+ WL_NAN_XTLV_SVC_DISC_TXTIME = 0x165, /* svc disc frame tx time in DW */
+ WL_NAN_XTLV_OPERATING_BAND = 0x166,
+ WL_NAN_XTLV_STOP_BCN_TX = 0x167,
+ WL_NAN_XTLV_CONCUR_SCAN = 0x168,
+ WL_NAN_XTLV_DUMP_CLR_TYPE = 0x175, /* wl nan dump/clear subtype */
+ WL_NAN_XTLV_PEER_RSSI = 0x176, /* xtlv payload for wl nan dump rssi */
+ WL_NAN_XTLV_MAC_CHANSPEC_1 = 0x17A, /* to get chanspec[1] */
+ WL_NAN_XTLV_DISC_RESULTS = 0x17B, /* get disc results */
+ WL_NAN_XTLV_MAC_STATS = 0x17C /* xtlv payload for wl nan dump stats */
};
/* Flag bits for Publish and Subscribe (wl_nan_disc_params_t flags) */
/* The service hash (service id) is exactly this many bytes. */
#define WL_NAN_SVC_HASH_LEN 6
+/* Number of hash functions per bloom filter */
+#define WL_NAN_HASHES_PER_BLOOM 4
+
/* Instance ID type (unique identifier) */
typedef uint8 wl_nan_instance_id_t;
-/* Mandatory parameters for publish/subscribe iovars - NAN_TLV_SVC_PARAMS */
+/* no. of max last disc results */
+#define WL_NAN_MAX_DISC_RESULTS 3
+
+/** Mandatory parameters for publish/subscribe iovars - NAN_TLV_SVC_PARAMS */
typedef struct wl_nan_disc_params_s {
/* Periodicity of unsolicited/query transmissions, in DWs */
uint32 period;
uint32 flags;
/* Publish or subscribe service id, i.e. hash of the service name */
uint8 svc_hash[WL_NAN_SVC_HASH_LEN];
+ /* pad to make 4 byte alignment, can be used for something else in the future */
+ uint8 pad;
/* Publish or subscribe id */
wl_nan_instance_id_t instance_id;
} wl_nan_disc_params_t;
+/* recent discovery results */
+typedef struct wl_nan_disc_result_s
+{
+ wl_nan_instance_id_t instance_id; /* instance id of pub/sub req */
+ wl_nan_instance_id_t peer_instance_id; /* peer instance id of pub/sub req/resp */
+ uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* service descp string */
+ struct ether_addr peer_mac; /* peer mac address */
+} wl_nan_disc_result_t;
+
+/* list of recent discovery results */
+typedef struct wl_nan_disc_results_s
+{
+ wl_nan_disc_result_t disc_result[WL_NAN_MAX_DISC_RESULTS];
+} wl_nan_disc_results_list_t;
+
/*
* desovery interface event structures *
*/
uint8 count; /* number of peers in the list */
wl_nan_ranging_result_t rr[1]; /* variable array of ranging peers */
} wl_nan_ranging_event_data_t;
+enum {
+ WL_NAN_RSSI_DATA = 1,
+ WL_NAN_STATS_DATA = 2,
+/*
+ * ***** ADD before this line ****
+ */
+ WL_NAN_INVALID
+};
+
+typedef struct wl_nan_stats {
+ /* general */
+ uint32 cnt_dw; /* DW slots */
+ uint32 cnt_disc_bcn_sch; /* disc beacon slots */
+ uint32 cnt_amr_exp; /* count of ambtt expiries resetting roles */
+ uint32 cnt_bcn_upd; /* count of beacon template updates */
+ uint32 cnt_bcn_tx; /* count of sync & disc bcn tx */
+ uint32 cnt_bcn_rx; /* count of sync & disc bcn rx */
+ uint32 cnt_sync_bcn_tx; /* count of sync bcn tx within DW */
+ uint32 cnt_disc_bcn_tx; /* count of disc bcn tx */
+ uint32 cnt_sdftx_bcmc; /* count of bcast/mcast sdf tx */
+ uint32 cnt_sdftx_uc; /* count of unicast sdf tx */
+ uint32 cnt_sdftx_fail; /* count of unicast sdf tx fails */
+ uint32 cnt_sdf_rx; /* count of sdf rx */
+ /* NAN roles */
+ uint32 cnt_am; /* anchor master */
+ uint32 cnt_master; /* master */
+ uint32 cnt_nms; /* non master sync */
+ uint32 cnt_nmns; /* non master non sync */
+ /* TX */
+ uint32 cnt_err_txtime; /* error in txtime */
+ uint32 cnt_err_unsch_tx; /* tx while not in DW/ disc bcn slot */
+ uint32 cnt_err_bcn_tx; /* beacon tx error */
+ uint32 cnt_sync_bcn_tx_miss; /* no. of times time delta between 2 cosequetive
+ * sync beacons is more than dw interval
+ */
+ /* SCANS */
+ uint32 cnt_mrg_scan; /* count of merge scans completed */
+ uint32 cnt_err_ms_rej; /* number of merge scan failed */
+ uint32 cnt_scan_results; /* no. of nan beacons scanned */
+ uint32 cnt_join_scan_rej; /* no. of join scans rejected */
+ uint32 cnt_nan_scan_abort; /* no. of join scans rejected */
+ /* enable/disable */
+ uint32 cnt_nan_enab; /* no. of times nan feature got enabled */
+ uint32 cnt_nan_disab; /* no. of times nan feature got disabled */
+} wl_nan_stats_t;
+
+#define WL_NAN_MAC_MAX_NAN_PEERS 6
+#define WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER 10
+
+typedef struct wl_nan_nbr_rssi {
+ uint8 rx_chan; /* channel number on which bcn rcvd */
+ int rssi_raw; /* received rssi value */
+ int rssi_avg; /* normalized rssi value */
+} wl_nan_peer_rssi_t;
+
+typedef struct wl_nan_peer_rssi_entry {
+ struct ether_addr mac; /* peer mac address */
+ uint8 flags; /* TODO:rssi data order: latest first, oldest first etc */
+ uint8 rssi_cnt; /* rssi data sample present */
+ wl_nan_peer_rssi_t rssi[WL_NAN_MAC_MAX_RSSI_DATA_PER_PEER]; /* RSSI data frm peer */
+} wl_nan_peer_rssi_entry_t;
+
+#define WL_NAN_PEER_RSSI 0x1
+#define WL_NAN_PEER_RSSI_LIST 0x2
+
+typedef struct wl_nan_nbr_rssi_data {
+ uint8 flags; /* this is a list or single rssi data */
+ uint8 peer_cnt; /* number of peers */
+ uint16 pad; /* padding */
+ wl_nan_peer_rssi_entry_t peers[1]; /* peers data list */
+} wl_nan_peer_rssi_data_t;
/* ********************* end of NAN section ******************************** */
+#endif /* WL_NAN */
#define RSSI_THRESHOLD_SIZE 16
typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias {
int32 version; /* version */
int32 threshold[RSSI_THRESHOLD_SIZE]; /* threshold */
- int32 peak_offset; /* peak offset */
+ int32 peak_offset; /* peak offset */
int32 bias; /* rssi bias */
int32 gd_delta; /* GD - GD_ADJ */
int32 imp_resp[MAX_IMP_RESP_SIZE]; /* (Hi*Hi)+(Hr*Hr) */
wl_wsec_info_tlv_t tlvs[1]; /* tlv data follows */
} wl_wsec_info_t;
+/*
+ * scan MAC definitions
+ */
+
+/* common iovar struct */
+typedef struct wl_scanmac {
+ uint16 subcmd_id; /* subcommand id */
+ uint16 len; /* total length of data[] */
+ uint8 data[1]; /* subcommand data */
+} wl_scanmac_t;
+
+/* subcommand ids */
+#define WL_SCANMAC_SUBCMD_ENABLE 0
+#define WL_SCANMAC_SUBCMD_BSSCFG 1 /* only GET supported */
+#define WL_SCANMAC_SUBCMD_CONFIG 2
+
+/* scanmac enable data struct */
+typedef struct wl_scanmac_enable {
+ uint8 enable; /* 1 - enable, 0 - disable */
+ uint8 pad[3]; /* 4-byte struct alignment */
+} wl_scanmac_enable_t;
+
+/* scanmac bsscfg data struct */
+typedef struct wl_scanmac_bsscfg {
+ uint32 bsscfg; /* bsscfg index */
+} wl_scanmac_bsscfg_t;
+
+/* scanmac config data struct */
+typedef struct wl_scanmac_config {
+ struct ether_addr mac; /* 6 bytes of MAC address or MAC prefix (i.e. OUI) */
+ struct ether_addr random_mask; /* randomized bits on each scan */
+ uint16 scan_bitmap; /* scans to use this MAC address */
+ uint8 pad[2]; /* 4-byte struct alignment */
+} wl_scanmac_config_t;
+
+/* scan bitmap */
+#define WL_SCANMAC_SCAN_UNASSOC (0x01 << 0) /* unassociated scans */
+#define WL_SCANMAC_SCAN_ASSOC_ROAM (0x01 << 1) /* associated roam scans */
+#define WL_SCANMAC_SCAN_ASSOC_PNO (0x01 << 2) /* associated PNO scans */
+#define WL_SCANMAC_SCAN_ASSOC_HOST (0x01 << 3) /* associated host scans */
+
/* no default structure packing */
#include <packed_section_end.h>
#endif /* NET_DETECT */
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
+/* (unversioned, deprecated) */
typedef struct bcnreq {
uint8 bcn_mode;
int dur;
uint16 reps;
} bcnreq_t;
+#define WL_RRM_BCN_REQ_VER 1
+typedef struct bcn_req {
+ uint8 version;
+ uint8 bcn_mode;
+ uint8 pad_1[2];
+ int32 dur;
+ int32 channel;
+ struct ether_addr da;
+ uint16 random_int;
+ wlc_ssid_t ssid;
+ uint16 reps;
+ uint8 req_elements;
+ uint8 pad_2;
+ chanspec_list_t chspec_list;
+} bcn_req_t;
+
typedef struct rrmreq {
struct ether_addr da;
uint8 reg;
} statreq_t;
#define WL_RRM_RPT_VER 0
-#define WL_RRM_RPT_MAX_PAYLOAD 64
+#define WL_RRM_RPT_MAX_PAYLOAD 256
#define WL_RRM_RPT_MIN_PAYLOAD 7
#define WL_RRM_RPT_FALG_ERR 0
-#define WL_RRM_RPT_FALG_OK 1
+#define WL_RRM_RPT_FALG_GRP_ID_PROPR (1 << 0)
+#define WL_RRM_RPT_FALG_GRP_ID_0 (1 << 1)
typedef struct {
uint16 ver; /* version */
struct ether_addr addr; /* STA MAC addr */
} wl_bssload_static_t;
+/* IO Var Operations - the Value of iov_op In wlc_ap_doiovar */
+typedef enum wlc_ap_iov_operation {
+ WLC_AP_IOV_OP_DELETE = -1,
+ WLC_AP_IOV_OP_DISABLE = 0,
+ WLC_AP_IOV_OP_ENABLE = 1,
+ WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE = 2,
+ WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE = 3,
+ WLC_AP_IOV_OP_MOVE = 4
+} wlc_ap_iov_oper_t;
+
/* LTE coex info */
/* Analogue of HCI Set MWS Signaling cmd */
typedef struct {
#define WLC_VERSION_MAJOR 3
#define WLC_VERSION_MINOR 0
+/* begin proxd definitions */
+#include <packed_section_start.h>
+
+#define WL_PROXD_API_VERSION 0x0300 /* version 3.0 */
+
+/* Minimum supported API version */
+#define WL_PROXD_API_MIN_VERSION 0x0300
+
+/* proximity detection methods */
+enum {
+ WL_PROXD_METHOD_NONE = 0,
+ WL_PROXD_METHOD_RSVD1 = 1, /* backward compatibility - RSSI, not supported */
+ WL_PROXD_METHOD_TOF = 2,
+ WL_PROXD_METHOD_RSVD2 = 3, /* 11v only - if needed */
+ WL_PROXD_METHOD_FTM = 4, /* IEEE rev mc/2014 */
+ WL_PROXD_METHOD_MAX
+};
+typedef int16 wl_proxd_method_t;
+
+/* global and method configuration flags */
+enum {
+ WL_PROXD_FLAG_NONE = 0x00000000,
+ WL_PROXD_FLAG_RX_ENABLED = 0x00000001, /* respond to requests */
+ WL_PROXD_FLAG_RX_RANGE_REQ = 0x00000002, /* 11mc range requests enabled */
+ WL_PROXD_FLAG_TX_LCI = 0x00000004, /* transmit location, if available */
+ WL_PROXD_FLAG_TX_CIVIC = 0x00000008, /* tx civic loc, if available */
+ WL_PROXD_FLAG_RX_AUTO_BURST = 0x00000010, /* respond to requests w/o host action */
+ WL_PROXD_FLAG_TX_AUTO_BURST = 0x00000020, /* continue requests w/o host action */
+ WL_PROXD_FLAG_AVAIL_PUBLISH = 0x00000040, /* publish availability */
+ WL_PROXD_FLAG_AVAIL_SCHEDULE = 0x00000080, /* schedule using availability */
+ WL_PROXD_FLAG_ALL = 0xffffffff
+};
+typedef uint32 wl_proxd_flags_t;
+
+#define WL_PROXD_FLAGS_AVAIL (WL_PROXD_FLAG_AVAIL_PUBLISH | \
+ WL_PROXD_FLAG_AVAIL_SCHEDULE)
+
+/* session flags */
+enum {
+ WL_PROXD_SESSION_FLAG_NONE = 0x00000000, /* no flags */
+ WL_PROXD_SESSION_FLAG_INITIATOR = 0x00000001, /* local device is initiator */
+ WL_PROXD_SESSION_FLAG_TARGET = 0x00000002, /* local device is target */
+ WL_PROXD_SESSION_FLAG_ONE_WAY = 0x00000004, /* (initiated) 1-way rtt */
+ WL_PROXD_SESSION_FLAG_AUTO_BURST = 0x00000008, /* created w/ rx_auto_burst */
+ WL_PROXD_SESSION_FLAG_PERSIST = 0x00000010, /* good until cancelled */
+ WL_PROXD_SESSION_FLAG_RTT_DETAIL = 0x00000020, /* rtt detail in results */
+ WL_PROXD_SESSION_FLAG_TOF_COMPAT = 0x00000040, /* TOF compatibility - TBD */
+ WL_PROXD_SESSION_FLAG_AOA = 0x00000080, /* AOA along w/ RTT */
+ WL_PROXD_SESSION_FLAG_RX_AUTO_BURST = 0x00000100, /* Same as proxd flags above */
+ WL_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /* Same as proxd flags above */
+ WL_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /* Use NAN BSS, if applicable */
+ WL_PROXD_SESSION_FLAG_TS1 = 0x00000800, /* e.g. FTM1 - cap or rx */
+ WL_PROXD_SESSION_FLAG_REPORT_FAILURE= 0x00002000, /* report failure to target */
+ WL_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /* report distance to target */
+ WL_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000, /* No channel switching */
+ WL_PROXD_SESSION_FLAG_NETRUAL = 0x00010000, /* netrual mode */
+ WL_PROXD_SESSION_FLAG_SEQ_EN = 0x00020000, /* Toast */
+ WL_PROXD_SESSION_FLAG_NO_PARAM_OVRD = 0x00040000, /* no param override from target */
+ WL_PROXD_SESSION_FLAG_ASAP = 0x00080000, /* ASAP session */
+ WL_PROXD_SESSION_FLAG_REQ_LCI = 0x00100000, /* transmit LCI req */
+ WL_PROXD_SESSION_FLAG_REQ_CIV = 0x00200000, /* transmit civic loc req */
+ WL_PROXD_SESSION_FLAG_COLLECT = 0x80000000, /* debug - collect */
+ WL_PROXD_SESSION_FLAG_ALL = 0xffffffff
+};
+typedef uint32 wl_proxd_session_flags_t;
+
+/* time units - mc supports up to 0.1ns resolution */
+enum {
+ WL_PROXD_TMU_TU = 0, /* 1024us */
+ WL_PROXD_TMU_SEC = 1,
+ WL_PROXD_TMU_MILLI_SEC = 2,
+ WL_PROXD_TMU_MICRO_SEC = 3,
+ WL_PROXD_TMU_NANO_SEC = 4,
+ WL_PROXD_TMU_PICO_SEC = 5
+};
+typedef int16 wl_proxd_tmu_t;
+
+/* time interval e.g. 10ns */
+typedef struct wl_proxd_intvl {
+ uint32 intvl;
+ wl_proxd_tmu_t tmu;
+ uint8 pad[2];
+} wl_proxd_intvl_t;
+
+/* commands that can apply to proxd, method or a session */
+enum {
+ WL_PROXD_CMD_NONE = 0,
+ WL_PROXD_CMD_GET_VERSION = 1,
+ WL_PROXD_CMD_ENABLE = 2,
+ WL_PROXD_CMD_DISABLE = 3,
+ WL_PROXD_CMD_CONFIG = 4,
+ WL_PROXD_CMD_START_SESSION = 5,
+ WL_PROXD_CMD_BURST_REQUEST = 6,
+ WL_PROXD_CMD_STOP_SESSION = 7,
+ WL_PROXD_CMD_DELETE_SESSION = 8,
+ WL_PROXD_CMD_GET_RESULT = 9,
+ WL_PROXD_CMD_GET_INFO = 10,
+ WL_PROXD_CMD_GET_STATUS = 11,
+ WL_PROXD_CMD_GET_SESSIONS = 12,
+ WL_PROXD_CMD_GET_COUNTERS = 13,
+ WL_PROXD_CMD_CLEAR_COUNTERS = 14,
+ WL_PROXD_CMD_COLLECT = 15,
+ WL_PROXD_CMD_TUNE = 16,
+ WL_PROXD_CMD_DUMP = 17,
+ WL_PROXD_CMD_START_RANGING = 18,
+ WL_PROXD_CMD_STOP_RANGING = 19,
+ WL_PROXD_CMD_GET_RANGING_INFO = 20,
+ WL_PROXD_CMD_IS_TLV_SUPPORTED = 21,
+
+ WL_PROXD_CMD_MAX
+};
+typedef int16 wl_proxd_cmd_t;
+
+/* session ids:
+ * id 0 is reserved
+ * ids 1..0x7fff - allocated by host/app
+ * 0x8000-0xffff - allocated by firmware, used for auto/rx
+ */
+enum {
+ WL_PROXD_SESSION_ID_GLOBAL = 0
+};
+
+#define WL_PROXD_SID_HOST_MAX 0x7fff
+#define WL_PROXD_SID_HOST_ALLOC(_sid) ((_sid) > 0 && (_sid) <= WL_PROXD_SID_HOST_MAX)
+
+/* maximum number sessions that can be allocated, may be less if tunable */
+#define WL_PROXD_MAX_SESSIONS 16
+
+typedef uint16 wl_proxd_session_id_t;
+
+/* status - TBD BCME_ vs proxd status - range reserved for BCME_ */
+enum {
+ WL_PROXD_E_POLICY = -1045,
+ WL_PROXD_E_INCOMPLETE = -1044,
+ WL_PROXD_E_OVERRIDDEN = -1043,
+ WL_PROXD_E_ASAP_FAILED = -1042,
+ WL_PROXD_E_NOTSTARTED = -1041,
+ WL_PROXD_E_INVALIDAVB = -1040,
+ WL_PROXD_E_INCAPABLE = -1039,
+ WL_PROXD_E_MISMATCH = -1038,
+ WL_PROXD_E_DUP_SESSION = -1037,
+ WL_PROXD_E_REMOTE_FAIL = -1036,
+ WL_PROXD_E_REMOTE_INCAPABLE = -1035,
+ WL_PROXD_E_SCHED_FAIL = -1034,
+ WL_PROXD_E_PROTO = -1033,
+ WL_PROXD_E_EXPIRED = -1032,
+ WL_PROXD_E_TIMEOUT = -1031,
+ WL_PROXD_E_NOACK = -1030,
+ WL_PROXD_E_DEFERRED = -1029,
+ WL_PROXD_E_INVALID_SID = -1028,
+ WL_PROXD_E_REMOTE_CANCEL = -1027,
+ WL_PROXD_E_CANCELED = -1026, /* local */
+ WL_PROXD_E_INVALID_SESSION = -1025,
+ WL_PROXD_E_BAD_STATE = -1024,
+ WL_PROXD_E_ERROR = -1,
+ WL_PROXD_E_OK = 0
+};
+typedef int32 wl_proxd_status_t;
+
+/* session states */
+enum {
+ WL_PROXD_SESSION_STATE_NONE = 0,
+ WL_PROXD_SESSION_STATE_CREATED = 1,
+ WL_PROXD_SESSION_STATE_CONFIGURED = 2,
+ WL_PROXD_SESSION_STATE_STARTED = 3,
+ WL_PROXD_SESSION_STATE_DELAY = 4,
+ WL_PROXD_SESSION_STATE_USER_WAIT = 5,
+ WL_PROXD_SESSION_STATE_SCHED_WAIT = 6,
+ WL_PROXD_SESSION_STATE_BURST = 7,
+ WL_PROXD_SESSION_STATE_STOPPING = 8,
+ WL_PROXD_SESSION_STATE_ENDED = 9,
+ WL_PROXD_SESSION_STATE_DESTROYING = -1
+};
+typedef int16 wl_proxd_session_state_t;
+
+/* RTT sample flags */
+enum {
+ WL_PROXD_RTT_SAMPLE_NONE = 0x00,
+ WL_PROXD_RTT_SAMPLE_DISCARD = 0x01
+};
+typedef uint8 wl_proxd_rtt_sample_flags_t;
+
+typedef struct wl_proxd_rtt_sample {
+ uint8 id; /* id for the sample - non-zero */
+ wl_proxd_rtt_sample_flags_t flags;
+ int16 rssi;
+ wl_proxd_intvl_t rtt; /* round trip time */
+ uint32 ratespec;
+} wl_proxd_rtt_sample_t;
+
+/* result flags */
+enum {
+ WL_PRXOD_RESULT_FLAG_NONE = 0x0000,
+ WL_PROXD_RESULT_FLAG_NLOS = 0x0001, /* LOS - if available */
+ WL_PROXD_RESULT_FLAG_LOS = 0x0002, /* NLOS - if available */
+ WL_PROXD_RESULT_FLAG_FATAL = 0x0004, /* Fatal error during burst */
+ WL_PROXD_RESULT_FLAG_ALL = 0xffff
+};
+typedef int16 wl_proxd_result_flags_t;
+
+/* rtt measurement result */
+typedef struct wl_proxd_rtt_result {
+ wl_proxd_session_id_t sid;
+ wl_proxd_result_flags_t flags;
+ wl_proxd_status_t status;
+ struct ether_addr peer;
+ wl_proxd_session_state_t state; /* current state */
+ union {
+ wl_proxd_intvl_t retry_after; /* hint for errors */
+ wl_proxd_intvl_t burst_duration; /* burst duration */
+ } u;
+ wl_proxd_rtt_sample_t avg_rtt;
+ uint32 avg_dist; /* 1/256m units */
+ uint16 sd_rtt; /* RTT standard deviation */
+ uint8 num_valid_rtt; /* valid rtt cnt */
+ uint8 num_ftm; /* actual num of ftm cnt */
+ uint16 burst_num; /* in a session */
+ uint16 num_rtt; /* 0 if no detail */
+ wl_proxd_rtt_sample_t rtt[1]; /* variable */
+} wl_proxd_rtt_result_t;
+
+/* aoa measurement result */
+typedef struct wl_proxd_aoa_result {
+ wl_proxd_session_id_t sid;
+ wl_proxd_result_flags_t flags;
+ wl_proxd_status_t status;
+ struct ether_addr peer;
+ wl_proxd_session_state_t state;
+ uint16 burst_num;
+ uint8 pad[2];
+ /* wl_proxd_aoa_sample_t sample_avg; TBD */
+} BWL_POST_PACKED_STRUCT wl_proxd_aoa_result_t;
+
+/* global stats */
+typedef struct wl_proxd_counters {
+ uint32 tx; /* tx frame count */
+ uint32 rx; /* rx frame count */
+ uint32 burst; /* total number of burst */
+ uint32 sessions; /* total number of sessions */
+ uint32 max_sessions; /* max concurrency */
+ uint32 sched_fail; /* scheduling failures */
+ uint32 timeouts; /* timeouts */
+ uint32 protoerr; /* protocol errors */
+ uint32 noack; /* tx w/o ack */
+ uint32 txfail; /* any tx falure */
+ uint32 lci_req_tx; /* tx LCI requests */
+ uint32 lci_req_rx; /* rx LCI requests */
+ uint32 lci_rep_tx; /* tx LCI reports */
+ uint32 lci_rep_rx; /* rx LCI reports */
+ uint32 civic_req_tx; /* tx civic requests */
+ uint32 civic_req_rx; /* rx civic requests */
+ uint32 civic_rep_tx; /* tx civic reports */
+ uint32 civic_rep_rx; /* rx civic reports */
+ uint32 rctx; /* ranging contexts created */
+ uint32 rctx_done; /* count of ranging done */
+ uint32 publish_err; /* availability publishing errors */
+ uint32 on_chan; /* count of scheduler onchan */
+ uint32 off_chan; /* count of scheduler offchan */
+} wl_proxd_counters_t;
+
+typedef struct wl_proxd_counters wl_proxd_session_counters_t;
+
+enum {
+ WL_PROXD_CAP_NONE = 0x0000,
+ WL_PROXD_CAP_ALL = 0xffff
+};
+typedef int16 wl_proxd_caps_t;
+
+/* method capabilities */
+enum {
+ WL_PROXD_FTM_CAP_NONE = 0x0000,
+ WL_PROXD_FTM_CAP_FTM1 = 0x0001
+};
+typedef uint16 wl_proxd_ftm_caps_t;
+
+typedef struct BWL_PRE_PACKED_STRUCT wl_proxd_tlv_id_list {
+ uint16 num_ids;
+ uint16 ids[1];
+} BWL_POST_PACKED_STRUCT wl_proxd_tlv_id_list_t;
+
+typedef struct wl_proxd_session_id_list {
+ uint16 num_ids;
+ wl_proxd_session_id_t ids[1];
+} wl_proxd_session_id_list_t;
+
+/* tlvs returned for get_info on ftm method
+ * configuration:
+ * proxd flags
+ * event mask
+ * debug mask
+ * session defaults (session tlvs)
+ * status tlv - not supported for ftm method
+ * info tlv
+ */
+typedef struct wl_proxd_ftm_info {
+ wl_proxd_ftm_caps_t caps;
+ uint16 max_sessions;
+ uint16 num_sessions;
+ uint16 rx_max_burst;
+} wl_proxd_ftm_info_t;
+
+/* tlvs returned for get_info on session
+ * session config (tlvs)
+ * session info tlv
+ */
+typedef struct wl_proxd_ftm_session_info {
+ uint16 sid;
+ uint8 bss_index;
+ uint8 pad;
+ struct ether_addr bssid;
+ wl_proxd_session_state_t state;
+ wl_proxd_status_t status;
+ uint16 burst_num;
+} wl_proxd_ftm_session_info_t;
+
+typedef struct wl_proxd_ftm_session_status {
+ uint16 sid;
+ wl_proxd_session_state_t state;
+ wl_proxd_status_t status;
+ uint16 burst_num;
+} wl_proxd_ftm_session_status_t;
+
+/* rrm range request */
+typedef struct wl_proxd_range_req {
+ uint16 num_repeat;
+ uint16 init_delay_range; /* in TUs */
+ uint8 pad;
+ uint8 num_nbr; /* number of (possible) neighbors */
+ nbr_element_t nbr[1];
+} wl_proxd_range_req_t;
+
+#define WL_PROXD_LCI_LAT_OFF 0
+#define WL_PROXD_LCI_LONG_OFF 5
+#define WL_PROXD_LCI_ALT_OFF 10
+
+#define WL_PROXD_LCI_GET_LAT(_lci, _lat, _lat_err) { \
+ unsigned _off = WL_PROXD_LCI_LAT_OFF; \
+ _lat_err = (_lci)->data[(_off)] & 0x3f; \
+ _lat = (_lci)->data[(_off)+1]; \
+ _lat |= (_lci)->data[(_off)+2] << 8; \
+ _lat |= (_lci)->data[_(_off)+3] << 16; \
+ _lat |= (_lci)->data[(_off)+4] << 24; \
+ _lat <<= 2; \
+ _lat |= (_lci)->data[(_off)] >> 6; \
+}
+
+#define WL_PROXD_LCI_GET_LONG(_lci, _lcilong, _long_err) { \
+ unsigned _off = WL_PROXD_LCI_LONG_OFF; \
+ _long_err = (_lci)->data[(_off)] & 0x3f; \
+ _lcilong = (_lci)->data[(_off)+1]; \
+ _lcilong |= (_lci)->data[(_off)+2] << 8; \
+ _lcilong |= (_lci)->data[_(_off)+3] << 16; \
+ _lcilong |= (_lci)->data[(_off)+4] << 24; \
+ __lcilong <<= 2; \
+ _lcilong |= (_lci)->data[(_off)] >> 6; \
+}
+
+#define WL_PROXD_LCI_GET_ALT(_lci, _alt_type, _alt, _alt_err) { \
+ unsigned _off = WL_PROXD_LCI_ALT_OFF; \
+ _alt_type = (_lci)->data[_off] & 0x0f; \
+ _alt_err = (_lci)->data[(_off)] >> 4; \
+ _alt_err |= ((_lci)->data[(_off)+1] & 0x03) << 4; \
+ _alt = (_lci)->data[(_off)+2]; \
+ _alt |= (_lci)->data[(_off)+3] << 8; \
+ _alt |= (_lci)->data[_(_off)+4] << 16; \
+ _alt <<= 6; \
+ _alt |= (_lci)->data[(_off) + 1] >> 2; \
+}
+
+#define WL_PROXD_LCI_VERSION(_lci) ((_lci)->data[15] >> 6)
+
+/* availability. advertising mechanism bss specific */
+/* availablity flags */
+enum {
+ WL_PROXD_AVAIL_NONE = 0,
+ WL_PROXD_AVAIL_NAN_PUBLISHED = 0x0001,
+ WL_PROXD_AVAIL_SCHEDULED = 0x0002 /* scheduled by proxd */
+};
+typedef int16 wl_proxd_avail_flags_t;
+
+/* time reference */
+enum {
+ WL_PROXD_TREF_NONE = 0,
+ WL_PROXD_TREF_DEV_TSF = 1,
+ WL_PROXD_TREF_NAN_DW = 2,
+ WL_PROXD_TREF_TBTT = 3,
+ WL_PROXD_TREF_MAX /* last entry */
+};
+typedef int16 wl_proxd_time_ref_t;
+
+/* proxd channel-time slot */
+typedef struct {
+ wl_proxd_intvl_t start; /* from ref */
+ wl_proxd_intvl_t duration; /* from start */
+ uint32 chanspec;
+} wl_proxd_time_slot_t;
+
+typedef struct wl_proxd_avail24 {
+ wl_proxd_avail_flags_t flags; /* for query only */
+ wl_proxd_time_ref_t time_ref;
+ uint16 max_slots; /* for query only */
+ uint16 num_slots;
+ wl_proxd_time_slot_t slots[1]; /* ROM compat - not used */
+ wl_proxd_intvl_t repeat;
+ wl_proxd_time_slot_t ts0[1];
+} wl_proxd_avail24_t;
+#define WL_PROXD_AVAIL24_TIMESLOT(_avail24, _i) (&(_avail24)->ts0[(_i)])
+#define WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) OFFSETOF(wl_proxd_avail24_t, ts0)
+#define WL_PROXD_AVAIL24_TIMESLOTS(_avail24) WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0)
+#define WL_PROXD_AVAIL24_SIZE(_avail24, _num_slots) (\
+ WL_PROXD_AVAIL24_TIMESLOT_OFFSET(_avail24) + \
+ (_num_slots) * sizeof(*WL_PROXD_AVAIL24_TIMESLOT(_avail24, 0)))
+
+typedef struct wl_proxd_avail {
+ wl_proxd_avail_flags_t flags; /* for query only */
+ wl_proxd_time_ref_t time_ref;
+ uint16 max_slots; /* for query only */
+ uint16 num_slots;
+ wl_proxd_intvl_t repeat;
+ wl_proxd_time_slot_t slots[1];
+} wl_proxd_avail_t;
+#define WL_PROXD_AVAIL_TIMESLOT(_avail, _i) (&(_avail)->slots[(_i)])
+#define WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) OFFSETOF(wl_proxd_avail_t, slots)
+
+#define WL_PROXD_AVAIL_TIMESLOTS(_avail) WL_PROXD_AVAIL_TIMESLOT(_avail, 0)
+#define WL_PROXD_AVAIL_SIZE(_avail, _num_slots) (\
+ WL_PROXD_AVAIL_TIMESLOT_OFFSET(_avail) + \
+ (_num_slots) * sizeof(*WL_PROXD_AVAIL_TIMESLOT(_avail, 0)))
+
+/* collect support TBD */
+
+/* debugging */
+enum {
+ WL_PROXD_DEBUG_NONE = 0x00000000,
+ WL_PROXD_DEBUG_LOG = 0x00000001,
+ WL_PROXD_DEBUG_IOV = 0x00000002,
+ WL_PROXD_DEBUG_EVENT = 0x00000004,
+ WL_PROXD_DEBUG_SESSION = 0x00000008,
+ WL_PROXD_DEBUG_PROTO = 0x00000010,
+ WL_PROXD_DEBUG_SCHED = 0x00000020,
+ WL_PROXD_DEBUG_RANGING = 0x00000040,
+ WL_PROXD_DEBUG_ALL = 0xffffffff
+};
+typedef uint32 wl_proxd_debug_mask_t;
+
+/* tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */
+enum {
+ WL_PROXD_TLV_ID_NONE = 0,
+ WL_PROXD_TLV_ID_METHOD = 1,
+ WL_PROXD_TLV_ID_FLAGS = 2,
+ WL_PROXD_TLV_ID_CHANSPEC = 3, /* note: uint32 */
+ WL_PROXD_TLV_ID_TX_POWER = 4,
+ WL_PROXD_TLV_ID_RATESPEC = 5,
+ WL_PROXD_TLV_ID_BURST_DURATION = 6, /* intvl - length of burst */
+ WL_PROXD_TLV_ID_BURST_PERIOD = 7, /* intvl - between bursts */
+ WL_PROXD_TLV_ID_BURST_FTM_SEP = 8, /* intvl - between FTMs */
+ WL_PROXD_TLV_ID_BURST_NUM_FTM = 9, /* uint16 - per burst */
+ WL_PROXD_TLV_ID_NUM_BURST = 10, /* uint16 */
+ WL_PROXD_TLV_ID_FTM_RETRIES = 11, /* uint16 at FTM level */
+ WL_PROXD_TLV_ID_BSS_INDEX = 12, /* uint8 */
+ WL_PROXD_TLV_ID_BSSID = 13,
+ WL_PROXD_TLV_ID_INIT_DELAY = 14, /* intvl - optional, non-standalone only */
+ WL_PROXD_TLV_ID_BURST_TIMEOUT = 15, /* expect response within - intvl */
+ WL_PROXD_TLV_ID_EVENT_MASK = 16, /* interested events - in/out */
+ WL_PROXD_TLV_ID_FLAGS_MASK = 17, /* interested flags - in only */
+ WL_PROXD_TLV_ID_PEER_MAC = 18, /* mac address of peer */
+ WL_PROXD_TLV_ID_FTM_REQ = 19, /* dot11_ftm_req */
+ WL_PROXD_TLV_ID_LCI_REQ = 20,
+ WL_PROXD_TLV_ID_LCI = 21,
+ WL_PROXD_TLV_ID_CIVIC_REQ = 22,
+ WL_PROXD_TLV_ID_CIVIC = 23,
+ WL_PROXD_TLV_ID_AVAIL24 = 24, /* ROM compatibility */
+ WL_PROXD_TLV_ID_SESSION_FLAGS = 25,
+ WL_PROXD_TLV_ID_SESSION_FLAGS_MASK = 26, /* in only */
+ WL_PROXD_TLV_ID_RX_MAX_BURST = 27, /* uint16 - limit bursts per session */
+ WL_PROXD_TLV_ID_RANGING_INFO = 28, /* ranging info */
+ WL_PROXD_TLV_ID_RANGING_FLAGS = 29, /* uint16 */
+ WL_PROXD_TLV_ID_RANGING_FLAGS_MASK = 30, /* uint16, in only */
+ WL_PROXD_TLV_ID_NAN_MAP_ID = 31,
+ WL_PROXD_TLV_ID_DEV_ADDR = 32,
+ WL_PROXD_TLV_ID_AVAIL = 33, /* wl_proxd_avail_t */
+ WL_PROXD_TLV_ID_TLV_ID = 34, /* uint16 tlv-id */
+ WL_PROXD_TLV_ID_FTM_REQ_RETRIES = 35, /* uint16 FTM request retries */
+
+ /* output - 512 + x */
+ WL_PROXD_TLV_ID_STATUS = 512,
+ WL_PROXD_TLV_ID_COUNTERS = 513,
+ WL_PROXD_TLV_ID_INFO = 514,
+ WL_PROXD_TLV_ID_RTT_RESULT = 515,
+ WL_PROXD_TLV_ID_AOA_RESULT = 516,
+ WL_PROXD_TLV_ID_SESSION_INFO = 517,
+ WL_PROXD_TLV_ID_SESSION_STATUS = 518,
+ WL_PROXD_TLV_ID_SESSION_ID_LIST = 519,
+
+ /* debug tlvs can be added starting 1024 */
+ WL_PROXD_TLV_ID_DEBUG_MASK = 1024,
+ WL_PROXD_TLV_ID_COLLECT = 1025, /* output only */
+ WL_PROXD_TLV_ID_STRBUF = 1026,
+
+ WL_PROXD_TLV_ID_MAX
+};
+
+typedef struct wl_proxd_tlv {
+ uint16 id;
+ uint16 len;
+ uint8 data[1];
+} wl_proxd_tlv_t;
+
+/* proxd iovar - applies to proxd, method or session */
+typedef struct wl_proxd_iov {
+ uint16 version;
+ uint16 len;
+ wl_proxd_cmd_t cmd;
+ wl_proxd_method_t method;
+ wl_proxd_session_id_t sid;
+ uint8 pad[2];
+ wl_proxd_tlv_t tlvs[1]; /* variable */
+} wl_proxd_iov_t;
+
+#define WL_PROXD_IOV_HDR_SIZE OFFSETOF(wl_proxd_iov_t, tlvs)
+
+/* The following event definitions may move to bcmevent.h, but sharing proxd types
+ * across needs more invasive changes unrelated to proxd
+ */
+enum {
+ WL_PROXD_EVENT_NONE = 0, /* not an event, reserved */
+ WL_PROXD_EVENT_SESSION_CREATE = 1,
+ WL_PROXD_EVENT_SESSION_START = 2,
+ WL_PROXD_EVENT_FTM_REQ = 3,
+ WL_PROXD_EVENT_BURST_START = 4,
+ WL_PROXD_EVENT_BURST_END = 5,
+ WL_PROXD_EVENT_SESSION_END = 6,
+ WL_PROXD_EVENT_SESSION_RESTART = 7,
+ WL_PROXD_EVENT_BURST_RESCHED = 8, /* burst rescheduled - e.g. partial TSF */
+ WL_PROXD_EVENT_SESSION_DESTROY = 9,
+ WL_PROXD_EVENT_RANGE_REQ = 10,
+ WL_PROXD_EVENT_FTM_FRAME = 11,
+ WL_PROXD_EVENT_DELAY = 12,
+ WL_PROXD_EVENT_VS_INITIATOR_RPT = 13, /* (target) rx initiator-report */
+ WL_PROXD_EVENT_RANGING = 14,
+ WL_PROXD_EVENT_LCI_MEAS_REP = 15, /* LCI measurement report */
+ WL_PROXD_EVENT_CIVIC_MEAS_REP = 16, /* civic measurement report */
+
+ WL_PROXD_EVENT_MAX
+};
+typedef int16 wl_proxd_event_type_t;
+
+/* proxd event mask - upto 32 events for now */
+typedef uint32 wl_proxd_event_mask_t;
+
+#define WL_PROXD_EVENT_MASK_ALL 0xfffffffe
+#define WL_PROXD_EVENT_MASK_EVENT(_event_type) (1 << (_event_type))
+#define WL_PROXD_EVENT_ENABLED(_mask, _event_type) (\
+ ((_mask) & WL_PROXD_EVENT_MASK_EVENT(_event_type)) != 0)
+
+/* proxd event - applies to proxd, method or session */
+typedef struct wl_proxd_event {
+ uint16 version;
+ uint16 len;
+ wl_proxd_event_type_t type;
+ wl_proxd_method_t method;
+ wl_proxd_session_id_t sid;
+ uint8 pad[2];
+ wl_proxd_tlv_t tlvs[1]; /* variable */
+} wl_proxd_event_t;
+
+enum {
+ WL_PROXD_RANGING_STATE_NONE = 0,
+ WL_PROXD_RANGING_STATE_NOTSTARTED = 1,
+ WL_PROXD_RANGING_STATE_INPROGRESS = 2,
+ WL_PROXD_RANGING_STATE_DONE = 3
+};
+typedef int16 wl_proxd_ranging_state_t;
+
+/* proxd ranging flags */
+enum {
+ WL_PROXD_RANGING_FLAG_NONE = 0x0000, /* no flags */
+ WL_PROXD_RANGING_FLAG_DEL_SESSIONS_ON_STOP = 0x0001,
+ WL_PROXD_RANGING_FLAG_ALL = 0xffff
+};
+typedef uint16 wl_proxd_ranging_flags_t;
+
+struct wl_proxd_ranging_info {
+ wl_proxd_status_t status;
+ wl_proxd_ranging_state_t state;
+ wl_proxd_ranging_flags_t flags;
+ uint16 num_sids;
+ uint16 num_done;
+};
+typedef struct wl_proxd_ranging_info wl_proxd_ranging_info_t;
+#include <packed_section_end.h>
+/* end proxd definitions */
/* require strict packing */
#include <packed_section_start.h>
/* Multiple roaming profile suport */
#define WL_MAX_ROAM_PROF_BRACKETS 4
-#define WL_MAX_ROAM_PROF_VER 0
+#define WL_MAX_ROAM_PROF_VER 1
#define WL_ROAM_PROF_NONE (0 << 0)
#define WL_ROAM_PROF_LAZY (1 << 0)
#define WL_ROAM_PROF_SYNC_DTIM (1 << 6)
#define WL_ROAM_PROF_DEFAULT (1 << 7) /* backward compatible single default profile */
+#define WL_FACTOR_TABLE_MAX_LIMIT 5
+
typedef struct wl_roam_prof {
int8 roam_flags; /* bit flags */
int8 roam_trigger; /* RSSI trigger level per profile/RSSI bracket */
uint16 init_scan_period;
uint16 backoff_multiplier;
uint16 max_scan_period;
+ uint8 channel_usage;
+ uint8 cu_avg_calc_dur;
} wl_roam_prof_t;
typedef struct wl_roam_prof_band {
/* no default structure packing */
#include <packed_section_end.h>
+#define TBOW_MAX_SSID_LEN 32
+#define TBOW_MAX_PASSPHRASE_LEN 63
+
+#define WL_TBOW_SETUPINFO_T_VERSION 1 /* version of tbow_setup_netinfo_t */
+typedef struct tbow_setup_netinfo {
+ uint32 version;
+ uint8 opmode;
+ uint8 pad;
+ uint8 macaddr[ETHER_ADDR_LEN];
+ uint32 ssid_len;
+ uint8 ssid[TBOW_MAX_SSID_LEN];
+ uint8 passphrase_len;
+ uint8 passphrase[TBOW_MAX_PASSPHRASE_LEN];
+ chanspec_t chanspec;
+} tbow_setup_netinfo_t;
+
+typedef enum tbow_ho_opmode {
+ TBOW_HO_MODE_START_GO = 0,
+ TBOW_HO_MODE_START_STA,
+ TBOW_HO_MODE_START_GC,
+ TBOW_HO_MODE_TEST_GO,
+ TBOW_HO_MODE_STOP_GO = 0x10,
+ TBOW_HO_MODE_STOP_STA,
+ TBOW_HO_MODE_STOP_GC,
+ TBOW_HO_MODE_TEARDOWN
+} tbow_ho_opmode_t;
+
+/* Beacon trim feature statistics */
+/* Configuration params */
+#define M_BCNTRIM_N (0) /* Enable/Disable Beacon Trim */
+#define M_BCNTRIM_TIMEND (1) /* Waiting time for TIM IE to end */
+#define M_BCNTRIM_TSFTLRN (2) /* TSF tolerance value (usecs) */
+/* PSM internal use */
+#define M_BCNTRIM_PREVBCNLEN (3) /* Beacon length excluding the TIM IE */
+#define M_BCNTRIM_N_COUNTER (4) /* PSM's local beacon trim counter */
+#define M_BCNTRIM_STATE (5) /* PSM's Beacon trim status register */
+#define M_BCNTRIM_TIMLEN (6) /* TIM IE Length */
+#define M_BCNTRIM_BMPCTL (7) /* Bitmap control word */
+#define M_BCNTRIM_TSF_L (8) /* Lower TSF word */
+#define M_BCNTRIM_TSF_ML (9) /* Lower middle TSF word */
+#define M_BCNTRIM_RSSI (10) /* Partial beacon RSSI */
+#define M_BCNTRIM_CHANNEL (11) /* Partial beacon channel */
+/* Trimming Counters */
+#define M_BCNTRIM_SBCNRXED (12) /* Self-BSSID beacon received */
+#define M_BCNTRIM_CANTRIM (13) /* Num of beacons which can be trimmed */
+#define M_BCNTRIM_TRIMMED (14) /* # beacons which were trimmed */
+#define M_BCNTRIM_BCNLENCNG (15) /* # beacons trimmed due to length change */
+#define M_BCNTRIM_TSFADJ (16) /* # beacons not trimmed due to large TSF delta */
+#define M_BCNTRIM_TIMNOTFOUND (17) /* # beacons not trimmed due to TIM missing */
+#define M_RXTSFTMRVAL_WD0 (18)
+#define M_RXTSFTMRVAL_WD1 (19)
+#define M_RXTSFTMRVAL_WD2 (20)
+#define M_RXTSFTMRVAL_WD3 (21)
+#define BCNTRIM_STATS_NUMPARAMS (22) /* 16 bit words */
+
+#define TXPWRCAP_MAX_NUM_CORES 8
+#define TXPWRCAP_MAX_NUM_ANTENNAS (TXPWRCAP_MAX_NUM_CORES * 2)
+
+typedef struct wl_txpwrcap_tbl {
+ uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES];
+ /* Stores values for valid antennas */
+ int8 pwrcap_cell_on[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */
+ int8 pwrcap_cell_off[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */
+} wl_txpwrcap_tbl_t;
+
+/* -------------- dynamic BTCOEX --------------- */
+/* require strict packing */
+#include <packed_section_start.h>
+
+#define DCTL_TROWS 2 /* currently practical number of rows */
+#define DCTL_TROWS_MAX 4 /* 2 extra rows RFU */
+/* DYNCTL profile flags */
+#define DCTL_FLAGS_DYNCTL (1 << 0) /* 1 - enabled, 0 - legacy only */
+#define DCTL_FLAGS_DESENSE (1 << 1) /* auto desense is enabled */
+#define DCTL_FLAGS_MSWITCH (1 << 2) /* mode switching is enabled */
+/* for now AGG on/off is handled separately */
+#define DCTL_FLAGS_TX_AGG_OFF (1 << 3) /* TBD: allow TX agg Off */
+#define DCTL_FLAGS_RX_AGG_OFF (1 << 4) /* TBD: allow RX agg Off */
+/* used for dry run testing only */
+#define DCTL_FLAGS_DRYRUN (1 << 7) /* Eenables dynctl dry run mode */
+#define IS_DYNCTL_ON(prof) ((prof->flags & DCTL_FLAGS_DYNCTL) != 0)
+#define IS_DESENSE_ON(prof) ((prof->flags & DCTL_FLAGS_DESENSE) != 0)
+#define IS_MSWITCH_ON(prof) ((prof->flags & DCTL_FLAGS_MSWITCH) != 0)
+/* desense level currently in use */
+#define DESENSE_OFF 0
+#define DFLT_DESENSE_MID 12
+#define DFLT_DESENSE_HIGH 2
+
+/*
+ * dynctl data points(a set of btpwr & wlrssi thresholds)
+ * for mode & desense switching
+ */
+typedef struct btc_thr_data {
+ int8 mode; /* used by desense sw */
+ int8 bt_pwr; /* BT tx power threshold */
+ int8 bt_rssi; /* BT rssi threshold */
+ /* wl rssi range when mode or desense change may be needed */
+ int8 wl_rssi_high;
+ int8 wl_rssi_low;
+} btc_thr_data_t;
+
+/* dynctl. profile data structure */
+#define DCTL_PROFILE_VER 0x01
+typedef BWL_PRE_PACKED_STRUCT struct dctl_prof {
+ uint8 version; /* dynctl profile version */
+ /* dynctl profile flags bit:0 - dynctl On, bit:1 dsns On, bit:2 mode sw On, */
+ uint8 flags; /* bit[6:3] reserved, bit7 - Dryrun (sim) - On */
+ /* wl desense levels to apply */
+ uint8 dflt_dsns_level;
+ uint8 low_dsns_level;
+ uint8 mid_dsns_level;
+ uint8 high_dsns_level;
+ /* mode switching hysteresis in dBm */
+ int8 msw_btrssi_hyster;
+ /* default btcoex mode */
+ uint8 default_btc_mode;
+ /* num of active rows in mode switching table */
+ uint8 msw_rows;
+ /* num of rows in desense table */
+ uint8 dsns_rows;
+ /* dynctl mode switching data table */
+ btc_thr_data_t msw_data[DCTL_TROWS_MAX];
+ /* dynctl desense switching data table */
+ btc_thr_data_t dsns_data[DCTL_TROWS_MAX];
+} BWL_POST_PACKED_STRUCT dctl_prof_t;
+
+/* dynctl status info */
+typedef BWL_PRE_PACKED_STRUCT struct dynctl_status {
+ bool sim_on; /* true if simulation is On */
+ uint16 bt_pwr_shm; /* BT per/task power as read from ucode */
+ int8 bt_pwr; /* BT pwr extracted & converted to dBm */
+ int8 bt_rssi; /* BT rssi in dBm */
+ int8 wl_rssi; /* last wl rssi reading used by btcoex */
+ uint8 dsns_level; /* current desense level */
+ uint8 btc_mode; /* current btcoex mode */
+ /* add more status items if needed, pad to 4 BB if needed */
+} BWL_POST_PACKED_STRUCT dynctl_status_t;
+
+/* dynctl simulation (dryrun data) */
+typedef BWL_PRE_PACKED_STRUCT struct dynctl_sim {
+ bool sim_on; /* simulation mode on/off */
+ int8 btpwr; /* simulated BT power in dBm */
+ int8 btrssi; /* simulated BT rssi in dBm */
+ int8 wlrssi; /* simulated WL rssi in dBm */
+} BWL_POST_PACKED_STRUCT dynctl_sim_t;
+/* no default structure packing */
+#include <packed_section_end.h>
+
+/* PTK key maintained per SCB */
+#define RSN_TEMP_ENCR_KEY_LEN 16
+typedef struct wpa_ptk {
+ uint8 kck[RSN_KCK_LENGTH]; /* EAPOL-Key Key Confirmation Key (KCK) */
+ uint8 kek[RSN_KEK_LENGTH]; /* EAPOL-Key Key Encryption Key (KEK) */
+ uint8 tk1[RSN_TEMP_ENCR_KEY_LEN]; /* Temporal Key 1 (TK1) */
+ uint8 tk2[RSN_TEMP_ENCR_KEY_LEN]; /* Temporal Key 2 (TK2) */
+} wpa_ptk_t;
+
+/* GTK key maintained per SCB */
+typedef struct wpa_gtk {
+ uint32 idx;
+ uint32 key_len;
+ uint8 key[DOT11_MAX_KEY_SIZE];
+} wpa_gtk_t;
+
+/* FBT Auth Response Data structure */
+typedef struct wlc_fbt_auth_resp {
+ uint8 macaddr[ETHER_ADDR_LEN]; /* station mac address */
+ uint8 pad[2];
+ uint8 pmk_r1_name[WPA2_PMKID_LEN];
+ wpa_ptk_t ptk; /* pairwise key */
+ wpa_gtk_t gtk; /* group key */
+ uint32 ie_len;
+ uint8 status; /* Status of parsing FBT authentication
+ Request in application
+ */
+ uint8 ies[1]; /* IEs contains MDIE, RSNIE,
+ FBTIE (ANonce, SNonce,R0KH-ID, R1KH-ID)
+ */
+} wlc_fbt_auth_resp_t;
+
+/* FBT Action Response frame */
+typedef struct wlc_fbt_action_resp {
+ uint16 version; /* structure version */
+ uint16 length; /* length of structure */
+ uint8 macaddr[ETHER_ADDR_LEN]; /* station mac address */
+ uint8 data_len; /* len of ie from Category */
+ uint8 data[1]; /* data contains category, action, sta address, target ap,
+ status code,fbt response frame body
+ */
+} wlc_fbt_action_resp_t;
+
+#define MACDBG_PMAC_ADDR_INPUT_MAXNUM 16
+#define MACDBG_PMAC_OBJ_TYPE_LEN 8
+
+typedef struct _wl_macdbg_pmac_param_t {
+ char type[MACDBG_PMAC_OBJ_TYPE_LEN];
+ uint8 step;
+ uint8 num;
+ uint32 bitmap;
+ bool addr_raw;
+ uint8 addr_num;
+ uint16 addr[MACDBG_PMAC_ADDR_INPUT_MAXNUM];
+} wl_macdbg_pmac_param_t;
+
+/* IOVAR 'svmp_mem' parameter. Used to read/clear svmp memory */
+typedef struct svmp_mem {
+ uint32 addr; /* offset to read svmp memory from vasip base address */
+ uint16 len; /* length in count of uint16's */
+ uint16 val; /* set the range of addr/len with a value */
+} svmp_mem_t;
+
+#define WL_NAN_BAND_STR_SIZE 5 /* sizeof ("auto") */
+
+/* Definitions of different NAN Bands */
+enum { /* mode selection for reading/writing tx iqlo cal coefficients */
+ NAN_BAND_AUTO,
+ NAN_BAND_B,
+ NAN_BAND_A,
+ NAN_BAND_INVALID = 0xFF
+};
+
+#if defined(WL_LINKSTAT)
+typedef struct {
+ uint32 preamble;
+ uint32 nss;
+ uint32 bw;
+ uint32 rateMcsIdx;
+ uint32 reserved;
+ uint32 bitrate;
+} wifi_rate;
+
+typedef struct {
+ uint16 version;
+ uint16 length;
+ uint32 tx_mpdu;
+ uint32 rx_mpdu;
+ uint32 mpdu_lost;
+ uint32 retries;
+ uint32 retries_short;
+ uint32 retries_long;
+ wifi_rate rate;
+} wifi_rate_stat_t;
+
+typedef int32 wifi_radio;
+
+typedef struct {
+ uint16 version;
+ uint16 length;
+ wifi_radio radio;
+ uint32 on_time;
+ uint32 tx_time;
+ uint32 rx_time;
+ uint32 on_time_scan;
+ uint32 on_time_nbd;
+ uint32 on_time_gscan;
+ uint32 on_time_roam_scan;
+ uint32 on_time_pno_scan;
+ uint32 on_time_hs20;
+ uint32 num_channels;
+ uint8 channels[1];
+} wifi_radio_stat;
+#endif /* WL_LINKSTAT */
+
+#ifdef WL11ULB
+/* ULB Mode configured via "ulb_mode" IOVAR */
+enum {
+ ULB_MODE_DISABLED = 0,
+ ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */
+ ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */
+ /* Add all other enums before this */
+ MAX_SUPP_ULB_MODES
+};
+
+/* ULB BWs configured via "ulb_bw" IOVAR during Standalone Mode Only.
+ * Values of this enumeration are also used to specify 'Current Operational Bandwidth'
+ * and 'Primary Operational Bandwidth' sub-fields in 'ULB Operations' field (used in
+ * 'ULB Operations' Attribute or 'ULB Mode Switch' Attribute)
+ */
+typedef enum {
+ ULB_BW_DISABLED = 0,
+ ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */
+ ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */
+ ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */
+ /* Add all other enums before this */
+ MAX_SUPP_ULB_BW
+} ulb_bw_type_t;
+#endif /* WL11ULB */
+
+#if defined(WLRCC)
+#define MAX_ROAM_CHANNEL 20
+
+typedef struct {
+ int n;
+ chanspec_t channels[MAX_ROAM_CHANNEL];
+} wl_roam_channel_list_t;
+#endif
+
+
+/*
+ * Neighbor Discover Offload: enable NDO feature
+ * Called by ipv6 event handler when interface comes up
+ * Set RA rate limit interval value(%)
+ */
+typedef struct nd_ra_ol_limits {
+ uint16 version; /* version of the iovar buffer */
+ uint16 type; /* type of data provided */
+ uint16 length; /* length of the entire structure */
+ uint16 pad1; /* pad union to 4 byte boundary */
+ union {
+ struct {
+ uint16 min_time; /* seconds, min time for RA offload hold */
+ uint16 lifetime_percent;
+ /* percent, lifetime percentage for offload hold time */
+ } lifetime_relative;
+ struct {
+ uint16 hold_time; /* seconds, RA offload hold time */
+ uint16 pad2; /* unused */
+ } fixed;
+ } limits;
+} nd_ra_ol_limits_t;
+
+#define ND_RA_OL_LIMITS_VER 1
+
+/* nd_ra_ol_limits sub-types */
+#define ND_RA_OL_LIMITS_REL_TYPE 0 /* relative, percent of RA lifetime */
+#define ND_RA_OL_LIMITS_FIXED_TYPE 1 /* fixed time */
+
+/* buffer lengths for the different nd_ra_ol_limits types */
+#define ND_RA_OL_LIMITS_REL_TYPE_LEN 12
+#define ND_RA_OL_LIMITS_FIXED_TYPE_LEN 10
+
+#define ND_RA_OL_SET "SET"
+#define ND_RA_OL_GET "GET"
+#define ND_PARAM_SIZE 50
+#define ND_VALUE_SIZE 5
+#define ND_PARAMS_DELIMETER " "
+#define ND_PARAM_VALUE_DELLIMETER '='
+#define ND_LIMIT_STR_FMT ("%50s %50s")
+
+#define ND_RA_TYPE "TYPE"
+#define ND_RA_MIN_TIME "MIN"
+#define ND_RA_PER "PER"
+#define ND_RA_HOLD "HOLD"
+
+/*
+ * Temperature Throttling control mode
+ */
+typedef struct wl_temp_control {
+ bool enable;
+ uint16 control_bit;
+} wl_temp_control_t;
+
#endif /* _wlioctl_h_ */
--- /dev/null
+/*
+ * Custom OID/ioctl related helper functions.
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wlioctl_utils.h 555740 2015-05-11 10:16:23Z $
+ */
+
+#ifndef _wlioctl_utils_h_
+#define _wlioctl_utils_h_
+
+#include <wlioctl.h>
+
+#ifndef BCMDRIVER
+#define CCA_THRESH_MILLI 14
+#define CCA_THRESH_INTERFERE 6
+
+extern cca_congest_channel_req_t * cca_per_chan_summary(cca_congest_channel_req_t *input,
+ cca_congest_channel_req_t *avg, bool percent);
+
+extern int cca_analyze(cca_congest_channel_req_t *input[], int num_chans,
+ uint flags, chanspec_t *answer);
+#endif /* BCMDRIVER */
+
+extern int wl_cntbuf_to_xtlv_format(void *ctx, void *cntbuf,
+ int buflen, uint32 corerev);
+
+/* Get data pointer of wlc layer counters tuple from xtlv formatted counters IOVar buffer. */
+#define GET_WLCCNT_FROM_CNTBUF(cntbuf) \
+ bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)cntbuf)->data, \
+ ((wl_cnt_info_t *)cntbuf)->datalen, WL_CNT_XTLV_WLC, \
+ NULL, BCM_XTLV_OPTION_ALIGN32)
+
+#endif /* _wlioctl_utils_h_ */
/*
* Linux OS Independent Layer
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: linux_osl.c 503131 2014-09-17 12:16:08Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: linux_osl.c 602478 2015-11-26 04:46:12Z $
*/
#define LINUX_PORT
#include <linuxver.h>
#include <bcmdefs.h>
-#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
+
+#if !defined(STBLINUX)
+#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
#include <asm/cacheflush.h>
-#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */
+#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
+#endif /* STBLINUX */
#include <linux/random.h>
#include <pcicfg.h>
-
#ifdef BCM_SECURE_DMA
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
-#ifdef BCM47XX_ACP_WAR
-#include <linux/spinlock.h>
-extern spinlock_t l2x0_reg_lock;
-#endif
-#if defined(BCMPCIE)
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_FLOWRING)
-#include <bcmpcie.h>
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_FLOWRING */
-#endif /* BCMPCIE */
+#ifdef BCM_OBJECT_TRACE
+#include <bcmutils.h>
+#endif /* BCM_OBJECT_TRACE */
#define PCI_CFG_RETRY 10
#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
#define DUMPBUFSZ 1024
+/* dependancy check */
+#if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF)
+#error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only"
+#endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */
+
#ifdef CONFIG_DHD_USE_STATIC_BUF
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
+#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
+#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
+
+#define PREALLOC_FREE_MAGIC 0xFEDC
+#define PREALLOC_USED_MAGIC 0xFCDE
+#else
#define DHD_SKB_HDRSIZE 336
#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
+#endif /* DHD_USE_STATIC_CTRLBUF */
#define STATIC_BUF_MAX_NUM 16
#define STATIC_BUF_SIZE (PAGE_SIZE*2)
static bcm_static_buf_t *bcm_static_buf = 0;
-#define STATIC_PKT_MAX_NUM 8
-#if defined(ENHANCED_STATIC_BUF)
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define STATIC_PKT_4PAGE_NUM 0
+#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
+#elif defined(ENHANCED_STATIC_BUF)
#define STATIC_PKT_4PAGE_NUM 1
#define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE
#else
#define STATIC_PKT_4PAGE_NUM 0
-#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
-#endif /* ENHANCED_STATIC_BUF */
+#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define STATIC_PKT_1PAGE_NUM 0
+#define STATIC_PKT_2PAGE_NUM 64
+#else
+#define STATIC_PKT_1PAGE_NUM 8
+#define STATIC_PKT_2PAGE_NUM 8
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+#define STATIC_PKT_1_2PAGE_NUM \
+ ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
+#define STATIC_PKT_MAX_NUM \
+ ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
typedef struct bcm_static_pkt {
- struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
- struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
+#ifdef DHD_USE_STATIC_CTRLBUF
+ struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
+ unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM];
+ spinlock_t osl_pkt_lock;
+ uint32 last_allocated_index;
+#else
+ struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM];
+ struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
#ifdef ENHANCED_STATIC_BUF
struct sk_buff *skb_16k;
-#endif
+#endif /* ENHANCED_STATIC_BUF */
struct semaphore osl_pkt_sem;
- unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM];
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ unsigned char pkt_use[STATIC_PKT_MAX_NUM];
} bcm_static_pkt_t;
static bcm_static_pkt_t *bcm_static_skb = 0;
-#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
-#define STATIC_BUF_FLOWRING_SIZE ((PAGE_SIZE)*(7))
-#define STATIC_BUF_FLOWRING_NUM 42
-#define RINGID_TO_FLOWID(idx) ((idx) + (BCMPCIE_H2D_COMMON_MSGRINGS) \
- - (BCMPCIE_H2D_TXFLOWRINGID))
-typedef struct bcm_static_flowring_buf {
- spinlock_t flowring_lock;
- void *buf_ptr[STATIC_BUF_FLOWRING_NUM];
- unsigned char buf_use[STATIC_BUF_FLOWRING_NUM];
-} bcm_static_flowring_buf_t;
-
-bcm_static_flowring_buf_t *bcm_static_flowring = 0;
-#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
-
void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
#endif /* CONFIG_DHD_USE_STATIC_BUF */
struct osl_info {
osl_pubinfo_t pub;
+ uint32 flags; /* If specific cases to be handled in the OSL */
#ifdef CTFPOOL
ctfpool_t *ctfpool;
#endif /* CTFPOOL */
struct list_head ctrace_list;
int ctrace_num;
#endif /* BCMDBG_CTRACE */
- uint32 flags; /* If specific cases to be handled in the OSL */
#ifdef BCM_SECURE_DMA
struct cma_dev *cma;
struct sec_mem_elem *sec_list_512;
} sec_cma_coherent[SEC_CMA_COHERENT_MAX];
#endif /* BCM_SECURE_DMA */
-
};
#ifdef BCM_SECURE_DMA
phys_addr_t g_contig_delta_va_pa;
static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
#endif /* BCM_SECURE_DMA */
+#ifdef BCM_OBJECT_TRACE
+/* don't clear the first 4 byte that is the pkt sn */
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+ struct sk_buff *s = (struct sk_buff *)(p); \
+ ASSERT(OSL_PKTTAG_SZ == 32); \
+ *(uint32 *)(&s->cb[4]) = 0; \
+ *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+ *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+ *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+#else
#define OSL_PKTTAG_CLEAR(p) \
do { \
struct sk_buff *s = (struct sk_buff *)(p); \
*(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
} while (0)
+#endif /* BCM_OBJECT_TRACE */
/* PCMCIA attribute space access macros */
/* Global ASSERT type flag */
-uint32 g_assert_type = 0;
+uint32 g_assert_type = 1;
module_param(g_assert_type, int, 0);
static int16 linuxbcmerrormap[] =
-EIO, /* BCME_MICERR */
-ERANGE, /* BCME_REPLAY */
-EINVAL, /* BCME_IE_NOTFOUND */
+ -EINVAL, /* BCME_DATA_NOTFOUND */
/* When an new error code is added to bcmutils.h, add os
* specific error translation here as well
*/
/* check if BCME_LAST changed since the last time this function was updated */
-#if BCME_LAST != -52
+#if BCME_LAST != -53
#error "You need to add a OS error translation in the linuxbcmerrormap \
for new error code defined in bcmutils.h"
#endif
};
-
-#if 1
-static inline void *
-pci_alloc_consistent1(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-{
- return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline dma_addr_t
-pci_map_single1(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
- return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_free_consistent1(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
-}
-
-static inline void
-pci_unmap_single1(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
-}
-#endif
+uint lmtest = FALSE;
/* translate bcmerrors into linux errors */
int
}
atomic_add(1, &osh->cmn->refcount);
+ bcm_object_trace_init();
+
/* Check that error map has the right number of entries in it */
ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
osl_sec_dma_setup_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION);
-#ifdef BCM47XX_CA9
- osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
- phys_to_page((u32)osh->contig_base_alloc),
- CMA_DMA_DESC_MEMBLOCK, TRUE, TRUE);
-#else
osh->contig_base_alloc_coherent_va = osl_sec_dma_ioremap(osh,
phys_to_page((u32)osh->contig_base_alloc),
CMA_DMA_DESC_MEMBLOCK, FALSE, TRUE);
-#endif /* BCM47XX_CA9 */
osh->contig_base_alloc_coherent = osh->contig_base_alloc;
osl_sec_dma_init_consistent(osh);
int osl_static_mem_init(osl_t *osh, void *adapter)
{
#ifdef CONFIG_DHD_USE_STATIC_BUF
- if (!bcm_static_buf && adapter) {
- if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
- 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
- printk("can not alloc static buf!\n");
- bcm_static_skb = NULL;
- ASSERT(osh->magic == OS_HANDLE_MAGIC);
- return -ENOMEM;
- }
- else
- printk("alloc static buf at %p!\n", bcm_static_buf);
-
-
- sema_init(&bcm_static_buf->static_sem, 1);
+ if (!bcm_static_buf && adapter) {
+ if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
+ 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
+ printk("can not alloc static buf!\n");
+ bcm_static_skb = NULL;
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ return -ENOMEM;
+ } else {
+ printk("alloc static buf at %p!\n", bcm_static_buf);
+ }
- bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
- }
+ sema_init(&bcm_static_buf->static_sem, 1);
-#ifdef BCMSDIO
- if (!bcm_static_skb && adapter) {
- int i;
- void *skb_buff_ptr = 0;
- bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
- skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
- if (!skb_buff_ptr) {
- printk("cannot alloc static buf!\n");
- bcm_static_buf = NULL;
- bcm_static_skb = NULL;
- ASSERT(osh->magic == OS_HANDLE_MAGIC);
- return -ENOMEM;
+ bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
}
- bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
- (STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM));
- for (i = 0; i < STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM; i++)
- bcm_static_skb->pkt_use[i] = 0;
+#if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
+ if (!bcm_static_skb && adapter) {
+ int i;
+ void *skb_buff_ptr = 0;
+ bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+ skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
+ if (!skb_buff_ptr) {
+ printk("cannot alloc static buf!\n");
+ bcm_static_buf = NULL;
+ bcm_static_skb = NULL;
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ return -ENOMEM;
+ }
- sema_init(&bcm_static_skb->osl_pkt_sem, 1);
- }
-#endif /* BCMSDIO */
-#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
- if (!bcm_static_flowring && adapter) {
- int i;
- void *flowring_ptr = 0;
- bcm_static_flowring =
- (bcm_static_flowring_buf_t *)((char *)bcm_static_buf + 4096);
- flowring_ptr = wifi_platform_prealloc(adapter, 10, 0);
- if (!flowring_ptr) {
- printk("%s: flowring_ptr is NULL\n", __FUNCTION__);
- bcm_static_buf = NULL;
- bcm_static_skb = NULL;
- bcm_static_flowring = NULL;
- ASSERT(osh->magic == OS_HANDLE_MAGIC);
- return -ENOMEM;
- }
+ bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
+ (STATIC_PKT_MAX_NUM));
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ bcm_static_skb->pkt_use[i] = 0;
+ }
- bcopy(flowring_ptr, bcm_static_flowring->buf_ptr,
- sizeof(void *) * STATIC_BUF_FLOWRING_NUM);
- for (i = 0; i < STATIC_BUF_FLOWRING_NUM; i++) {
- bcm_static_flowring->buf_use[i] = 0;
+#ifdef DHD_USE_STATIC_CTRLBUF
+ spin_lock_init(&bcm_static_skb->osl_pkt_lock);
+ bcm_static_skb->last_allocated_index = 0;
+#else
+ sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+#endif /* DHD_USE_STATIC_CTRLBUF */
}
-
- spin_lock_init(&bcm_static_flowring->flowring_lock);
- }
-#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
+#endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
#endif /* CONFIG_DHD_USE_STATIC_BUF */
return 0;
{
if (osh == NULL)
return;
+
#ifdef BCM_SECURE_DMA
osl_sec_dma_free_contig_mem(osh, CMA_MEMBLOCK, CONT_ARMREGION);
osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512);
osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_MEMBLOCK);
#endif /* BCM_SECURE_DMA */
+
+ bcm_object_trace_deinit();
+
ASSERT(osh->magic == OS_HANDLE_MAGIC);
atomic_sub(1, &osh->cmn->refcount);
if (atomic_read(&osh->cmn->refcount) == 0) {
bcm_static_skb = 0;
}
#endif /* BCMSDIO */
-#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
- if (bcm_static_flowring) {
- bcm_static_flowring = 0;
- }
-#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
#endif /* CONFIG_DHD_USE_STATIC_BUF */
return 0;
}
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
flags |= GFP_ATOMIC;
#endif
+#ifdef DHD_USE_ATOMIC_PKTGET
+ flags = GFP_ATOMIC;
+#endif /* DHD_USE_ATOMIC_PKTGET */
skb = __dev_alloc_skb(len, flags);
#else
skb = dev_alloc_skb(len);
bcm_static_skb = 0;
}
#endif /* BCMSDIO */
-#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
- if (bcm_static_flowring) {
- bcm_static_flowring = 0;
- }
-#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
#endif /* CONFIG_DHD_USE_STATIC_BUF */
bb = b;
#ifdef BCMDBG_CTRACE
osl_pktget(osl_t *osh, uint len, int line, char *file)
#else
+#ifdef BCM_OBJECT_TRACE
+osl_pktget(osl_t *osh, uint len, int line, const char *caller)
+#else
osl_pktget(osl_t *osh, uint len)
+#endif /* BCM_OBJECT_TRACE */
#endif /* BCMDBG_CTRACE */
{
struct sk_buff *skb;
+ uchar num = 0;
+ if (lmtest != FALSE) {
+ get_random_bytes(&num, sizeof(uchar));
+ if ((num + 1) <= (256 * lmtest / 100))
+ return NULL;
+ }
#ifdef CTFPOOL
/* Allocate from local pool */
ADD_CTRACE(osh, skb, file, line);
#endif
atomic_inc(&osh->cmn->pktalloced);
+#ifdef BCM_OBJECT_TRACE
+ bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
+#endif /* BCM_OBJECT_TRACE */
}
return ((void*) skb);
/* Free the driver packet. Free the tag if present */
void BCMFASTPATH
+#ifdef BCM_OBJECT_TRACE
+osl_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller)
+#else
osl_pktfree(osl_t *osh, void *p, bool send)
+#endif /* BCM_OBJECT_TRACE */
{
struct sk_buff *skb, *nskb;
if (osh == NULL)
PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
+ if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
+ printk("%s: pkt %p is from static pool\n",
+ __FUNCTION__, p);
+ dump_stack();
+ return;
+ }
+
+ if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
+ printk("%s: pkt %p is from static pool and not in used\n",
+ __FUNCTION__, p);
+ dump_stack();
+ return;
+ }
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
+
/* perversion: we use skb->next to chain multi-skb packets */
while (skb) {
nskb = skb->next;
#endif
+#ifdef BCM_OBJECT_TRACE
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
+#endif /* BCM_OBJECT_TRACE */
+
#ifdef CTFPOOL
if (PKTISFAST(osh, skb)) {
if (atomic_read(&skb->users) == 1)
{
int i = 0;
struct sk_buff *skb;
+#ifdef DHD_USE_STATIC_CTRLBUF
+ unsigned long flags;
+#endif /* DHD_USE_STATIC_CTRLBUF */
if (!bcm_static_skb)
return osl_pktget(osh, len);
return osl_pktget(osh, len);
}
+#ifdef DHD_USE_STATIC_CTRLBUF
+ spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
+
+ if (len <= DHD_SKB_2PAGE_BUFSIZE) {
+ uint32 index;
+ for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
+ index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
+ bcm_static_skb->last_allocated_index++;
+ if (bcm_static_skb->skb_8k[index] &&
+ bcm_static_skb->pkt_use[index] == 0) {
+ break;
+ }
+ }
+
+ if ((i != STATIC_PKT_2PAGE_NUM) &&
+ (index >= 0) && (index < STATIC_PKT_2PAGE_NUM)) {
+ bcm_static_skb->pkt_use[index] = 1;
+ skb = bcm_static_skb->skb_8k[index];
+ skb->data = skb->head;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, NET_SKB_PAD);
+#else
+ skb->tail = skb->data + NET_SKB_PAD;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->data += NET_SKB_PAD;
+ skb->cloned = 0;
+ skb->priority = 0;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
+#else
+ skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->len = len;
+ skb->mac_len = PREALLOC_USED_MAGIC;
+ spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+ return skb;
+ }
+ }
+
+ spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+ printk("%s: all static pkt in use!\n", __FUNCTION__);
+ return NULL;
+#else
down(&bcm_static_skb->osl_pkt_sem);
if (len <= DHD_SKB_1PAGE_BUFSIZE) {
for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
- if (bcm_static_skb->pkt_use[i] == 0)
+ if (bcm_static_skb->skb_4k[i] &&
+ bcm_static_skb->pkt_use[i] == 0) {
break;
+ }
}
if (i != STATIC_PKT_MAX_NUM) {
}
if (len <= DHD_SKB_2PAGE_BUFSIZE) {
- for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
- if (bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM]
- == 0)
+ for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
+ if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
+ bcm_static_skb->pkt_use[i] == 0) {
break;
+ }
}
- if (i != STATIC_PKT_MAX_NUM) {
- bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 1;
- skb = bcm_static_skb->skb_8k[i];
+ if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
+ bcm_static_skb->pkt_use[i] = 1;
+ skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb_set_tail_pointer(skb, len);
#else
}
#if defined(ENHANCED_STATIC_BUF)
- if (bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] == 0) {
- bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 1;
+ if (bcm_static_skb->skb_16k &&
+ bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
+ bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
skb = bcm_static_skb->skb_16k;
#ifdef NET_SKBUFF_DATA_USES_OFFSET
up(&bcm_static_skb->osl_pkt_sem);
printk("%s: all static pkt in use!\n", __FUNCTION__);
return osl_pktget(osh, len);
+#endif /* DHD_USE_STATIC_CTRLBUF */
}
void
osl_pktfree_static(osl_t *osh, void *p, bool send)
{
int i;
+#ifdef DHD_USE_STATIC_CTRLBUF
+ struct sk_buff *skb = (struct sk_buff *)p;
+ unsigned long flags;
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+ if (!p) {
+ return;
+ }
+
if (!bcm_static_skb) {
osl_pktfree(osh, p, send);
return;
}
+#ifdef DHD_USE_STATIC_CTRLBUF
+ spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
+
+ for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
+ if (p == bcm_static_skb->skb_8k[i]) {
+ if (bcm_static_skb->pkt_use[i] == 0) {
+ printk("%s: static pkt idx %d(%p) is double free\n",
+ __FUNCTION__, i, p);
+ } else {
+ bcm_static_skb->pkt_use[i] = 0;
+ }
+
+ if (skb->mac_len != PREALLOC_USED_MAGIC) {
+ printk("%s: static pkt idx %d(%p) is not in used\n",
+ __FUNCTION__, i, p);
+ }
+
+ skb->mac_len = PREALLOC_FREE_MAGIC;
+ spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+ return;
+ }
+ }
+
+ spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+ printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
+#else
down(&bcm_static_skb->osl_pkt_sem);
- for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
if (p == bcm_static_skb->skb_4k[i]) {
bcm_static_skb->pkt_use[i] = 0;
up(&bcm_static_skb->osl_pkt_sem);
}
}
- for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
- if (p == bcm_static_skb->skb_8k[i]) {
- bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
+ for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
+ if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
+ bcm_static_skb->pkt_use[i] = 0;
up(&bcm_static_skb->osl_pkt_sem);
return;
}
}
#ifdef ENHANCED_STATIC_BUF
if (p == bcm_static_skb->skb_16k) {
- bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 0;
+ bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
up(&bcm_static_skb->osl_pkt_sem);
return;
}
#endif
up(&bcm_static_skb->osl_pkt_sem);
osl_pktfree(osh, p, send);
+#endif /* DHD_USE_STATIC_CTRLBUF */
}
-
-#if defined(BCMPCIE) && defined(DHD_USE_STATIC_FLOWRING)
-void*
-osl_dma_alloc_consistent_static(osl_t *osh, uint size, uint16 align_bits,
- uint *alloced, dmaaddr_t *pap, uint16 idx)
-{
- void *va = NULL;
- uint16 align = (1 << align_bits);
- uint16 flow_id = RINGID_TO_FLOWID(idx);
- unsigned long flags;
-
- ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-
- if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
- size += align;
-
- if ((flow_id < 0) || (flow_id >= STATIC_BUF_FLOWRING_NUM)) {
- printk("%s: flow_id %d is wrong\n", __FUNCTION__, flow_id);
- return osl_dma_alloc_consistent(osh, size, align_bits,
- alloced, pap);
- }
-
- if (!bcm_static_flowring) {
- printk("%s: bcm_static_flowring is not initialized\n",
- __FUNCTION__);
- return osl_dma_alloc_consistent(osh, size, align_bits,
- alloced, pap);
- }
-
- if (size > STATIC_BUF_FLOWRING_SIZE) {
- printk("%s: attempt to allocate huge packet, size=%d\n",
- __FUNCTION__, size);
- return osl_dma_alloc_consistent(osh, size, align_bits,
- alloced, pap);
- }
-
- *alloced = size;
-
- spin_lock_irqsave(&bcm_static_flowring->flowring_lock, flags);
- if (bcm_static_flowring->buf_use[flow_id]) {
- printk("%s: flowring %d is already alloced\n",
- __FUNCTION__, flow_id);
- spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags);
- return NULL;
- }
-
- va = bcm_static_flowring->buf_ptr[flow_id];
- if (va) {
- *pap = (ulong)__virt_to_phys((ulong)va);
- bcm_static_flowring->buf_use[flow_id] = 1;
- }
- spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags);
-
- return va;
-}
-
-void
-osl_dma_free_consistent_static(osl_t *osh, void *va, uint size,
- dmaaddr_t pa, uint16 idx)
-{
- uint16 flow_id = RINGID_TO_FLOWID(idx);
- unsigned long flags;
-
- ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-
- if ((flow_id < 0) || (flow_id >= STATIC_BUF_FLOWRING_NUM)) {
- printk("%s: flow_id %d is wrong\n", __FUNCTION__, flow_id);
- return osl_dma_free_consistent(osh, va, size, pa);
- }
-
- if (!bcm_static_flowring) {
- printk("%s: bcm_static_flowring is not initialized\n",
- __FUNCTION__);
- return osl_dma_free_consistent(osh, va, size, pa);
- }
-
- spin_lock_irqsave(&bcm_static_flowring->flowring_lock, flags);
- if (bcm_static_flowring->buf_use[flow_id]) {
- bcm_static_flowring->buf_use[flow_id] = 0;
- } else {
- printk("%s: flowring %d is already freed\n",
- __FUNCTION__, flow_id);
- }
- spin_unlock_irqrestore(&bcm_static_flowring->flowring_lock, flags);
-}
-#endif /* BCMPCIE && DHD_USE_STATIC_FLOWRING */
#endif /* CONFIG_DHD_USE_STATIC_BUF */
uint32
*alloced = size;
#ifndef BCM_SECURE_DMA
-#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
+#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
if (va)
*pap = (ulong)__virt_to_phys((ulong)va);
{
dma_addr_t pap_lin;
struct pci_dev *hwdev = osh->pdev;
-#ifdef PCIE_TX_DEFERRAL
- va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, GFP_KERNEL);
+ gfp_t flags;
+#ifdef DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL
+ flags = GFP_ATOMIC;
+#else
+ flags = GFP_KERNEL;
+#endif /* DHD_ALLOC_COHERENT_MEM_FROM_ATOMIC_POOL */
+ va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, flags);
+#ifdef BCMDMA64OSL
+ PHYSADDRLOSET(*pap, pap_lin & 0xffffffff);
+ PHYSADDRHISET(*pap, (pap_lin >> 32) & 0xffffffff);
#else
- va = dma_alloc_coherent(&hwdev->dev, size, &pap_lin, GFP_ATOMIC);
-#endif
*pap = (dmaaddr_t)pap_lin;
+#endif /* BCMDMA64OSL */
}
-#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */
+#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
#else
va = osl_sec_dma_alloc_consistent(osh, size, align_bits, pap);
#endif /* BCM_SECURE_DMA */
void
osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
{
-#ifndef BCM_SECURE_DMA
-#if !defined(BCM47XX_CA9) || !defined(__ARM_ARCH_7A__)
- struct pci_dev *hwdev = osh->pdev;
-#endif
+#ifdef BCMDMA64OSL
+ dma_addr_t paddr;
+#endif /* BCMDMA64OSL */
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
+#ifndef BCM_SECURE_DMA
+#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
kfree(va);
#else
- dma_free_coherent(&hwdev->dev, size, va, (dma_addr_t)pa);
-#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */
+#ifdef BCMDMA64OSL
+ PHYSADDRTOULONG(pa, paddr);
+ pci_free_consistent(osh->pdev, size, va, paddr);
+#else
+ pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+#endif /* BCMDMA64OSL */
+#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
#else
osl_sec_dma_free_consistent(osh, va, size, pa);
#endif /* BCM_SECURE_DMA */
osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
{
int dir;
-#ifdef BCM47XX_ACP_WAR
- uint pa;
-#endif
+#ifdef BCMDMA64OSL
+ dmaaddr_t ret;
+ dma_addr_t map_addr;
+#endif /* BCMDMA64OSL */
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
-#if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
- if (dmah != NULL) {
- int32 nsegs, i, totsegs = 0, totlen = 0;
- struct scatterlist *sg, _sg[MAX_DMA_SEGS * 2];
-#ifdef BCM47XX_ACP_WAR
- struct scatterlist *s;
-#endif
- struct sk_buff *skb;
- for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
- sg = &_sg[totsegs];
- if (skb_is_nonlinear(skb)) {
- nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
- ASSERT((nsegs > 0) && (totsegs + nsegs <= MAX_DMA_SEGS));
-#ifdef BCM47XX_ACP_WAR
- for_each_sg(sg, s, nsegs, i) {
- if (sg_phys(s) >= ACP_WIN_LIMIT) {
- dma_map_page(&((struct pci_dev *)osh->pdev)->dev,
- sg_page(s), s->offset, s->length, dir);
- }
- }
-#else
- pci_map_sg(osh->pdev, sg, nsegs, dir);
-#endif
- } else {
- nsegs = 1;
- ASSERT(totsegs + nsegs <= MAX_DMA_SEGS);
- sg->page_link = 0;
- sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));
-#ifdef BCM47XX_ACP_WAR
- if (virt_to_phys(PKTDATA(osh, skb)) >= ACP_WIN_LIMIT)
-#endif
- pci_map_single1(osh->pdev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir);
- }
- totsegs += nsegs;
- totlen += PKTLEN(osh, skb);
- }
- dmah->nsegs = totsegs;
- dmah->origsize = totlen;
- for (i = 0, sg = _sg; i < totsegs; i++, sg++) {
- dmah->segs[i].addr = sg_phys(sg);
- dmah->segs[i].length = sg->length;
- }
- return dmah->segs[0].addr;
- }
-#endif /* __ARM_ARCH_7A__ && BCMDMASGLISTOSL */
-#ifdef BCM47XX_ACP_WAR
- pa = virt_to_phys(va);
- if (pa < ACP_WIN_LIMIT)
- return (pa);
-#endif
- return (pci_map_single1(osh->pdev, va, size, dir));
+
+
+#ifdef BCMDMA64OSL
+ map_addr = pci_map_single(osh->pdev, va, size, dir);
+ PHYSADDRLOSET(ret, map_addr & 0xffffffff);
+ PHYSADDRHISET(ret, (map_addr >> 32) & 0xffffffff);
+ return ret;
+#else
+ return (pci_map_single(osh->pdev, va, size, dir));
+#endif /* BCMDMA64OSL */
}
void BCMFASTPATH
-osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction)
{
int dir;
+#ifdef BCMDMA64OSL
+ dma_addr_t paddr;
+#endif /* BCMDMA64OSL */
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-#ifdef BCM47XX_ACP_WAR
- if (pa < ACP_WIN_LIMIT)
- return;
-#endif
+
+
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
- pci_unmap_single1(osh->pdev, (uint32)pa, size, dir);
+#ifdef BCMDMA64OSL
+ PHYSADDRTOULONG(pa, paddr);
+ pci_unmap_single(osh->pdev, paddr, size, dir);
+#else
+ pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+#endif /* BCMDMA64OSL */
+}
+
+/* OSL function for CPU relax */
+inline void BCMFASTPATH
+osl_cpu_relax(void)
+{
+ cpu_relax();
}
-#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
+#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING) || \
+ defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8890))
+#include <asm/dma-mapping.h>
+
+/*
+ * Note that its gauranteed that the Ring is cache line aligned, but
+ * the messages are not. And we see that __dma_inv_range in
+ * arch/arm64/mm/cache.S invalidates only if the request size is
+ * cache line aligned. If not, it will Clean and invalidate.
+ * So we'll better invalidate the whole ring.
+ *
+ * Also, the latest Kernel versions invoke cache maintenance operations
+ * from arch/arm64/mm/dma-mapping.c, __swiotlb_sync_single_for_device
+ * Only if is_device_dma_coherent returns 0. Since we don't have BSP
+ * source, assuming that its the case, since we pass NULL for the dev ptr
+ */
inline void BCMFASTPATH
osl_cache_flush(void *va, uint size)
{
+ /*
+ * using long for address arithmatic is OK, in linux
+ * 32 bit its 4 bytes and 64 bit its 8 bytes
+ */
+ unsigned long end_cache_line_start;
+ unsigned long end_addr;
+ unsigned long next_cache_line_start;
+
+ end_addr = (unsigned long)va + size;
+
+ /* Start address beyond the cache line we plan to operate */
+ end_cache_line_start = (end_addr & ~(L1_CACHE_BYTES - 1));
+ next_cache_line_start = end_cache_line_start + L1_CACHE_BYTES;
+
+ /* Align the start address to cache line boundary */
+ va = (void *)((unsigned long)va & ~(L1_CACHE_BYTES - 1));
+
+ /* Ensure that size is also aligned and extends partial line to full */
+ size = next_cache_line_start - (unsigned long)va;
+
#ifndef BCM_SECURE_DMA
-#ifdef BCM47XX_ACP_WAR
- if (virt_to_phys(va) < ACP_WIN_LIMIT)
- return;
-#endif
+
+#ifdef CONFIG_ARM64
+ /*
+ * virt_to_dma is not present in arm64/include/dma-mapping.h
+ * So have to convert the va to pa first and then get the dma addr
+ * of the same.
+ */
+ {
+ phys_addr_t pa;
+ dma_addr_t dma_addr;
+ pa = virt_to_phys(va);
+ dma_addr = phys_to_dma(NULL, pa);
+ if (size > 0)
+ dma_sync_single_for_device(OSH_NULL, dma_addr, size, DMA_TX);
+ }
+#else
if (size > 0)
dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX);
+#endif /* !CONFIG_ARM64 */
#else
phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa);
if (size > 0)
inline void BCMFASTPATH
osl_cache_inv(void *va, uint size)
{
+ /*
+ * using long for address arithmatic is OK, in linux
+ * 32 bit its 4 bytes and 64 bit its 8 bytes
+ */
+ unsigned long end_cache_line_start;
+ unsigned long end_addr;
+ unsigned long next_cache_line_start;
+
+ end_addr = (unsigned long)va + size;
+
+ /* Start address beyond the cache line we plan to operate */
+ end_cache_line_start = (end_addr & ~(L1_CACHE_BYTES - 1));
+ next_cache_line_start = end_cache_line_start + L1_CACHE_BYTES;
+
+ /* Align the start address to cache line boundary */
+ va = (void *)((unsigned long)va & ~(L1_CACHE_BYTES - 1));
+
+ /* Ensure that size is also aligned and extends partial line to full */
+ size = next_cache_line_start - (unsigned long)va;
+
#ifndef BCM_SECURE_DMA
-#ifdef BCM47XX_ACP_WAR
- if (virt_to_phys(va) < ACP_WIN_LIMIT)
- return;
-#endif
+
+#ifdef CONFIG_ARM64
+ /*
+ * virt_to_dma is not present in arm64/include/dma-mapping.h
+ * So have to convert the va to pa first and then get the dma addr
+ * of the same.
+ */
+ {
+ phys_addr_t pa;
+ dma_addr_t dma_addr;
+ pa = virt_to_phys(va);
+ dma_addr = phys_to_dma(NULL, pa);
+ dma_sync_single_for_cpu(OSH_NULL, dma_addr, size, DMA_RX);
+ }
+#else
dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX);
+#endif /* !CONFIG_ARM64 */
#else
phys_addr_t orig_pa = (phys_addr_t)(va - g_contig_delta_va_pa);
dma_sync_single_for_cpu(OSH_NULL, orig_pa, size, DMA_RX);
inline void osl_prefetch(const void *ptr)
{
- /* Borrowed from linux/linux-2.6/include/asm-arm/processor.h */
- __asm__ __volatile__(
- "pld\t%0"
- :
- : "o" (*(char *)ptr)
- : "cc");
+ /* PLD instruction is not applicable in ARM 64. We don't care for now */
+#ifndef CONFIG_ARM64
+ __asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
+#endif
}
int osl_arch_is_coherent(void)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
return 0;
-#else
- return arch_is_coherent();
-#endif
}
+
+
+inline int osl_acp_war_enab(void)
+{
+ return 0;
+}
+
#endif
#if defined(BCMASSERT_LOG)
#ifdef BCMASSERT_LOG
snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
exp, basename, line);
- printk("%s", tempbuf);
#endif /* BCMASSERT_LOG */
+#if defined(BCMASSERT_LOG)
+ switch (g_assert_type) {
+ case 0:
+ panic("%s", tempbuf);
+ break;
+ case 1:
+ printk("%s", tempbuf);
+ break;
+ case 2:
+ printk("%s", tempbuf);
+ BUG();
+ break;
+ default:
+ break;
+ }
+#endif
+
}
#endif
#ifdef BCMDBG_CTRACE
osl_pktdup(osl_t *osh, void *skb, int line, char *file)
#else
+#ifdef BCM_OBJECT_TRACE
+osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
+#else
osl_pktdup(osl_t *osh, void *skb)
+#endif /* BCM_OBJECT_TRACE */
#endif /* BCMDBG_CTRACE */
{
void * p;
/* Increment the packet counter */
atomic_inc(&osh->cmn->pktalloced);
+#ifdef BCM_OBJECT_TRACE
+ bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
+#endif /* BCM_OBJECT_TRACE */
+
#ifdef BCMDBG_CTRACE
ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
#endif
/* Linux Kernel: File Operations: end */
-#ifdef BCM47XX_ACP_WAR
-inline void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size)
-{
- uint32 flags;
- int pci_access = 0;
-
- if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
- pci_access = 1;
-
- if (pci_access)
- spin_lock_irqsave(&l2x0_reg_lock, flags);
- switch (size) {
- case sizeof(uint8):
- *(uint8*)v = readb((volatile uint8*)(addr));
- break;
- case sizeof(uint16):
- *(uint16*)v = readw((volatile uint16*)(addr));
- break;
- case sizeof(uint32):
- *(uint32*)v = readl((volatile uint32*)(addr));
- break;
- case sizeof(uint64):
- *(uint64*)v = *((volatile uint64*)(addr));
- break;
- }
- if (pci_access)
- spin_unlock_irqrestore(&l2x0_reg_lock, flags);
-}
-#endif /* BCM47XX_ACP_WAR */
/* APIs to set/get specific quirks in OSL layer */
void
{
return (osh->flags & mask);
}
+
#ifdef BCM_SECURE_DMA
static void
vunmap(contig_base_va);
}
-static void
-osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
-{
- if (sec_list_base)
- kfree(sec_list_base);
-}
-
static void
osl_sec_dma_init_elem_mem_block(osl_t *osh, size_t mbsize, int max, sec_mem_elem_t **list)
{
}
+static void
+osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
+{
+ if (sec_list_base)
+ kfree(sec_list_base);
+}
+
static sec_mem_elem_t * BCMFASTPATH
osl_sec_dma_alloc_mem_elem(osl_t *osh, void *va, uint size, int direction,
struct sec_cma_info *ptr_cma_info, uint offset)
printf("%s free failed size=%d \n", __FUNCTION__, sec_mem_elem->size);
}
-
static sec_mem_elem_t * BCMFASTPATH
osl_sec_dma_find_rem_elem(osl_t *osh, struct sec_cma_info *ptr_cma_info, dma_addr_t dma_handle)
{
(direction == DMA_TX ? DMA_TO_DEVICE:DMA_FROM_DEVICE));
return dma_handle;
-
}
void BCMFASTPATH
}
#endif /* BCM_SECURE_DMA */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
+#include <linux/kallsyms.h>
+#include <net/sock.h>
+void
+osl_pkt_orphan_partial(struct sk_buff *skb)
+{
+ uint32 fraction;
+ static void *p_tcp_wfree = NULL;
+
+ if (!skb->destructor || skb->destructor == sock_wfree)
+ return;
+
+ if (unlikely(!p_tcp_wfree)) {
+ char sym[KSYM_SYMBOL_LEN];
+ sprint_symbol(sym, (unsigned long)skb->destructor);
+ sym[9] = 0;
+ if (!strcmp(sym, "tcp_wfree"))
+ p_tcp_wfree = skb->destructor;
+ else
+ return;
+ }
+
+ if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
+ return;
+
+ /* abstract a certain portion of skb truesize from the socket
+ * sk_wmem_alloc to allow more skb can be allocated for this
+ * socket for better cusion meeting WiFi device requirement
+ */
+ fraction = skb->truesize * (TSQ_MULTIPLIER - 1) / TSQ_MULTIPLIER;
+ skb->truesize -= fraction;
+ atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
+}
+#endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */
* Contains PCIe related functions that are shared between different driver models (e.g. firmware
* builds, DHD builds, BMAC builds), in order to avoid code duplication.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: pcie_core.c 444841 2013-12-21 04:32:29Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: pcie_core.c 591285 2015-10-07 11:56:29Z $
*/
#include <bcm_cfg.h>
W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
W_REG(osh, &sbpcieregs->configdata, lsc);
- /* Write configuration registers back to the shadow registers
- * cause shadow registers are cleared out after watchdog reset.
- */
- for (i = 0; i < ARRAYSIZE(cfg_offset); i++) {
- W_REG(osh, &sbpcieregs->configaddr, cfg_offset[i]);
- val = R_REG(osh, &sbpcieregs->configdata);
- W_REG(osh, &sbpcieregs->configdata, val);
+ if (sih->buscorerev <= 13) {
+ /* Write configuration registers back to the shadow registers
+ * cause shadow registers are cleared out after watchdog reset.
+ */
+ for (i = 0; i < ARRAYSIZE(cfg_offset); i++) {
+ W_REG(osh, &sbpcieregs->configaddr, cfg_offset[i]);
+ val = R_REG(osh, &sbpcieregs->configdata);
+ W_REG(osh, &sbpcieregs->configdata, val);
+ }
}
si_setcoreidx(sih, origidx);
}
+
+/* CRWLPCIEGEN2-117 pcie_pipe_Iddq should be controlled
+ * by the L12 state from MAC to save power by putting the
+ * SerDes analog in IDDQ mode
+ */
+void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs)
+{
+ sbpcieregs_t *pcie = NULL;
+ uint crwlpciegen2_117_disable = 0;
+ uint32 origidx = si_coreidx(sih);
+
+ crwlpciegen2_117_disable = PCIE_PipeIddqDisable0 | PCIE_PipeIddqDisable1;
+ /* Switch to PCIE2 core */
+ pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
+ BCM_REFERENCE(pcie);
+ ASSERT(pcie != NULL);
+
+ OR_REG(osh, &sbpcieregs->control,
+ crwlpciegen2_117_disable);
+
+ si_setcoreidx(sih, origidx);
+}
#endif /* BCMDRIVER */
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: sbutils.c 467150 2014-04-02 17:30:43Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: sbutils.c 514727 2014-11-12 03:02:48Z $
*/
#include <bcm_cfg.h>
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: siutils.c 497460 2014-08-19 15:14:13Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: siutils.c 552034 2015-04-24 19:00:35Z $
*/
#include <bcm_cfg.h>
#include <bcmdevs.h>
#include <hndsoc.h>
#include <sbchipc.h>
+#ifdef BCMPCIEDEV
+#include <pciedev.h>
+#endif /* BCMPCIEDEV */
#include <pcicfg.h>
#include <sbpcmcia.h>
+#include <sbsysmem.h>
#include <sbsocram.h>
#ifdef BCMSDIO
#include <bcmsdh.h>
#endif /* BCMLTECOEX */
-
/* global variable to indicate reservation/release of gpio's */
static uint32 si_gpioreservation = 0;
uint i;
uint pciidx, pcieidx, pcirev, pcierev;
+ /* first, enable backplane timeouts */
+ if (CHIPTYPE(sii->pub.socitype) == SOCI_AI)
+ ai_enable_backplane_timeouts(&sii->pub);
+
cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
ASSERT((uintptr)cc);
/* now look at the chipstatus register to figure the pacakge */
/* for SDIO but downloaded on PCIE dev */
if (cid == PCIE2_CORE_ID) {
- if ((CHIPID(sii->pub.chip) == BCM43602_CHIP_ID) ||
- ((CHIPID(sii->pub.chip) == BCM4345_CHIP_ID) &&
+ if (BCM43602_CHIP(sii->pub.chip) ||
+ (CHIPID(sii->pub.chip) == BCM4365_CHIP_ID) ||
+ (CHIPID(sii->pub.chip) == BCM4366_CHIP_ID) ||
+ ((CHIPID(sii->pub.chip) == BCM4345_CHIP_ID ||
+ CHIPID(sii->pub.chip) == BCM43454_CHIP_ID) &&
CST4345_CHIPMODE_PCIE(sii->pub.chipst))) {
pcieidx = i;
pcierev = crev;
*origidx = i;
}
+
#if defined(PCIE_FULL_DONGLE)
- pci = FALSE;
-#endif
+ if (pcie) {
+ if (pcie_gen2)
+ sii->pub.buscoretype = PCIE2_CORE_ID;
+ else
+ sii->pub.buscoretype = PCIE_CORE_ID;
+ sii->pub.buscorerev = pcierev;
+ sii->pub.buscoreidx = pcieidx;
+ }
+ BCM_REFERENCE(pci);
+ BCM_REFERENCE(pcirev);
+ BCM_REFERENCE(pciidx);
+#else
if (pci) {
sii->pub.buscoretype = PCI_CORE_ID;
sii->pub.buscorerev = pcirev;
sii->pub.buscorerev = pcierev;
sii->pub.buscoreidx = pcieidx;
}
+#endif /* defined(PCIE_FULL_DONGLE) */
SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
sii->pub.buscorerev));
ASSERT(sii->chipnew == 0);
switch (sih->chip) {
- case BCM43570_CHIP_ID:
+ case BCM43567_CHIP_ID:
+ sii->chipnew = sih->chip; /* save it */
+ sii->pub.chip = BCM43570_CHIP_ID; /* chip class */
+ break;
case BCM4358_CHIP_ID:
+ case BCM43566_CHIP_ID:
sii->chipnew = sih->chip; /* save it */
sii->pub.chip = BCM43569_CHIP_ID; /* chip class */
break;
sii->pub.chip = BCM4354_CHIP_ID; /* chip class */
break;
default:
- ASSERT(0);
break;
}
}
sii->curmap = regs;
sii->sdh = sdh;
sii->osh = osh;
+ sii->second_bar0win = ~0x0;
/* check to see if we are a si core mimic'ing a pci core */
SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__));
return NULL;
}
-#ifdef COSTOMER_HW4
-#ifdef CONFIG_MACH_UNIVERSAL5433
- /* old revision check */
- if (!check_rev()) {
- /* abnormal link status */
- if (!check_pcie_link_status()) {
- printk("%s : PCIE LINK is abnormal status\n", __FUNCTION__);
- return NULL;
- }
- }
-#endif /* CONFIG_MACH_UNIVERSAL5433 */
-#endif
w = R_REG(osh, &cc->chipid);
if ((w & 0xfffff) == 148277) w -= 65532;
sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
-#if defined(HW_OOB)
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
dhd_conf_set_hw_oob_intr(sdh, sih->chip);
#endif
- if ((sih->chip == BCM4358_CHIP_ID) ||
- (sih->chip == BCM43570_CHIP_ID) ||
- (sih->chip == BCM4358_CHIP_ID)) {
- si_chipid_fixup(sih);
- }
+ si_chipid_fixup(sih);
- if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) &&
+ if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && CHIPREV(sih->chiprev == 0) &&
(sih->chippkg != BCM4329_289PIN_PKG_ID)) {
sih->chippkg = BCM4329_182PIN_PKG_ID;
}
gpiopulldown |= 0x20500;
}
+
W_REG(osh, &cc->gpiopullup, gpiopullup);
W_REG(osh, &cc->gpiopulldown, gpiopulldown);
si_setcoreidx(sih, origidx);
return (ai_wrap_reg(sih, offset, mask, val));
return 0;
}
+/* si_backplane_access is used to read full backplane address from host for PCIE FD
+ * it uses secondary bar-0 window which lies at an offset of 16K from primary bar-0
+ * Provides support for read/write of 1/2/4 bytes of backplane address
+ * Can be used to read/write
+ * 1. core regs
+ * 2. Wrapper regs
+ * 3. memory
+ * 4. BT area
+ * For accessing any 32 bit backplane address, [31 : 12] of backplane should be given in "region"
+ * [11 : 0] should be the "regoff"
+ * for reading 4 bytes from reg 0x200 of d11 core use it like below
+ * : si_backplane_access(sih, 0x18001000, 0x200, 4, 0, TRUE)
+ */
+static int si_backplane_addr_sane(uint addr, uint size)
+{
+ int bcmerror = BCME_OK;
+ /* For 2 byte access, address has to be 2 byte aligned */
+ if (size == 2) {
+ if (addr & 0x1) {
+ bcmerror = BCME_ERROR;
+ }
+ }
+ /* For 4 byte access, address has to be 4 byte aligned */
+ if (size == 4) {
+ if (addr & 0x3) {
+ bcmerror = BCME_ERROR;
+ }
+ }
+
+ return bcmerror;
+}
+uint
+si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read)
+{
+ uint32 *r = NULL;
+ uint32 region = 0;
+ si_info_t *sii = SI_INFO(sih);
+
+ /* Valid only for pcie bus */
+ if (BUSTYPE(sih->bustype) != PCI_BUS) {
+ SI_ERROR(("Valid only for pcie bus \n"));
+ return BCME_ERROR;
+ }
+
+ /* Split adrr into region and address offset */
+ region = (addr & (0xFFFFF << 12));
+ addr = addr & 0xFFF;
+
+ /* check for address and size sanity */
+ if (si_backplane_addr_sane(addr, size) != BCME_OK)
+ return BCME_ERROR;
+
+ /* Update window if required */
+ if (sii->second_bar0win != region) {
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, 4, region);
+ sii->second_bar0win = region;
+ }
+
+ /* Estimate effective address
+ * sii->curmap : bar-0 virtual address
+ * PCI_SECOND_BAR0_OFFSET : secondar bar-0 offset
+ * regoff : actual reg offset
+ */
+ r = (uint32 *)((char *)sii->curmap + PCI_SECOND_BAR0_OFFSET + addr);
+
+ SI_VMSG(("si curmap %p region %x regaddr %x effective addr %p READ %d\n",
+ (char*)sii->curmap, region, addr, r, read));
+
+ switch (size) {
+ case sizeof(uint8) :
+ if (read)
+ *val = R_REG(sii->osh, (uint8*)r);
+ else
+ W_REG(sii->osh, (uint8*)r, *val);
+ break;
+ case sizeof(uint16) :
+ if (read)
+ *val = R_REG(sii->osh, (uint16*)r);
+ else
+ W_REG(sii->osh, (uint16*)r, *val);
+ break;
+ case sizeof(uint32) :
+ if (read)
+ *val = R_REG(sii->osh, (uint32*)r);
+ else
+ W_REG(sii->osh, (uint32*)r, *val);
+ break;
+
+ default :
+ SI_ERROR(("Invalid size %d \n", size));
+ return (BCME_ERROR);
+ break;
+ }
+
+ return (BCME_OK);
+}
uint
si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
{
}
}
+/*
+ * Divide the clock by the divisor with protection for
+ * a zero divisor.
+ */
+static uint32
+divide_clock(uint32 clock, uint32 div)
+{
+ return div ? clock / div : 0;
+}
+
+
/** calculate the speed the SI would run at given a set of clockcontrol values */
uint32
si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
switch (mc) {
case CC_MC_BYPASS: return (clock);
- case CC_MC_M1: return (clock / m1);
- case CC_MC_M1M2: return (clock / (m1 * m2));
- case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
- case CC_MC_M1M3: return (clock / (m1 * m3));
+ case CC_MC_M1: return divide_clock(clock, m1);
+ case CC_MC_M1M2: return divide_clock(clock, m1 * m2);
+ case CC_MC_M1M2M3: return divide_clock(clock, m1 * m2 * m3);
+ case CC_MC_M1M3: return divide_clock(clock, m1 * m3);
default: return (0);
}
} else {
return (clock);
}
+ return 0;
}
/**
switch (CHIPID(sih->chip)) {
- case BCM43602_CHIP_ID:
+ CASE_BCM43602_CHIP:
hosti = CHIP_HOSTIF_PCIEMODE;
break;
break;
case BCM4345_CHIP_ID:
+ case BCM43454_CHIP_ID:
if (CST4345_CHIPMODE_USB20D(sih->chipst) || CST4345_CHIPMODE_HSIC(sih->chipst))
hosti = CHIP_HOSTIF_USBMODE;
else if (CST4345_CHIPMODE_SDIOD(sih->chipst))
return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
}
-void *
-si_gpio_handler_register(si_t *sih, uint32 event,
- bool level, gpio_handler_t cb, void *arg)
+uint32
+si_gpio_int_enable(si_t *sih, bool enable)
{
- si_info_t *sii = SI_INFO(sih);
- gpioh_item_t *gi;
-
- ASSERT(event);
- ASSERT(cb != NULL);
+ uint offs;
if (sih->ccrev < 11)
- return NULL;
+ return 0xffffffff;
- if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
- return NULL;
+ offs = OFFSETOF(chipcregs_t, intmask);
+ return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+}
- bzero(gi, sizeof(gpioh_item_t));
- gi->event = event;
- gi->handler = cb;
- gi->arg = arg;
- gi->level = level;
+/** Return the size of the specified SYSMEM bank */
+static uint
+sysmem_banksize(si_info_t *sii, sysmemregs_t *regs, uint8 idx, uint8 mem_type)
+{
+ uint banksize, bankinfo;
+ uint bankidx = idx | (mem_type << SYSMEM_BANKIDX_MEMTYPE_SHIFT);
- gi->next = sii->gpioh_head;
- sii->gpioh_head = gi;
+ ASSERT(mem_type <= SYSMEM_MEMTYPE_DEVRAM);
- return (void *)(gi);
+ W_REG(sii->osh, ®s->bankidx, bankidx);
+ bankinfo = R_REG(sii->osh, ®s->bankinfo);
+ banksize = SYSMEM_BANKINFO_SZBASE * ((bankinfo & SYSMEM_BANKINFO_SZMASK) + 1);
+ return banksize;
}
-void
-si_gpio_handler_unregister(si_t *sih, void *gpioh)
+/** Return the RAM size of the SYSMEM core */
+uint32
+si_sysmem_size(si_t *sih)
{
si_info_t *sii = SI_INFO(sih);
- gpioh_item_t *p, *n;
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ uint origidx;
+ uint intr_val = 0;
- if (sih->ccrev < 11)
- return;
+ sysmemregs_t *regs;
+ bool wasup;
+ uint32 coreinfo;
+ uint memsize = 0;
+ uint8 i;
+ uint nb;
- ASSERT(sii->gpioh_head != NULL);
- if ((void*)sii->gpioh_head == gpioh) {
- sii->gpioh_head = sii->gpioh_head->next;
- MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
- return;
- } else {
- p = sii->gpioh_head;
- n = p->next;
- while (n) {
- if ((void*)n == gpioh) {
- p->next = n->next;
- MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
- return;
- }
- p = n;
- n = n->next;
- }
- }
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
- ASSERT(0); /* Not found in list */
-}
+ /* Switch to SYSMEM core */
+ if (!(regs = si_setcore(sih, SYSMEM_CORE_ID, 0)))
+ goto done;
-void
-si_gpio_handler_process(si_t *sih)
-{
- si_info_t *sii = SI_INFO(sih);
- gpioh_item_t *h;
- uint32 level = si_gpioin(sih);
- uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0);
- uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
- uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0);
-
- for (h = sii->gpioh_head; h != NULL; h = h->next) {
- if (h->handler) {
- uint32 status = (h->level ? level : edge) & h->event;
- uint32 polarity = (h->level ? levelp : edgep) & h->event;
-
- /* polarity bitval is opposite of status bitval */
- if ((h->level && (status ^ polarity)) || (!h->level && status))
- h->handler(status, h->arg);
- }
- }
+ /* Get info for determining size */
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+ coreinfo = R_REG(sii->osh, ®s->coreinfo);
- si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
-}
+ nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ for (i = 0; i < nb; i++)
+ memsize += sysmem_banksize(sii, regs, i, SYSMEM_MEMTYPE_RAM);
-uint32
-si_gpio_int_enable(si_t *sih, bool enable)
-{
- uint offs;
+ si_setcoreidx(sih, origidx);
- if (sih->ccrev < 11)
- return 0xffffffff;
+done:
+ INTR_RESTORE(sii, intr_val);
- offs = OFFSETOF(chipcregs_t, intmask);
- return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
+ return memsize;
}
-
/** Return the size of the specified SOCRAM bank */
static uint
socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type)
origidx = si_coreidx(sih);
- cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
val = R_REG(sii->osh, &cc->chipcontrol);
chipcregs_t *cc;
uint origidx = si_coreidx(sih);
- cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
W_REG(sii->osh, &cc->chipcontrol, val);
si_setcoreidx(sih, origidx);
}
uint origidx = si_coreidx(sih);
uint32 val;
- cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return -1;
+ }
val = R_REG(sii->osh, &cc->chipcontrol);
si_setcoreidx(sih, origidx);
return val;
uint origidx = si_coreidx(sih);
uint32 val;
- cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
val = R_REG(sii->osh, &cc->chipcontrol);
if (on) {
W_REG(sii->osh, &cc->chipcontrol, val);
} else {
/* Ext PA Controls for 4331 12x12 Package */
- if (sih->chiprev > 0) {
+ if (CHIPREV(sih->chiprev) > 0) {
W_REG(sii->osh, &cc->chipcontrol, val |
(CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2));
} else {
uint origidx = si_coreidx(sih);
uint32 val;
- cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
val = R_REG(sii->osh, &cc->chipcontrol);
if (on) {
si_setcoreidx(sih, origidx);
}
+void
+si_clk_srom4365(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+ uint32 val;
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
+ val = R_REG(sii->osh, &cc->clkdiv2);
+ W_REG(sii->osh, &cc->clkdiv2, ((val&~0xf) | 0x4));
+
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_d11rsdb_core1_alt_reg_clk_dis(si_t *sih)
+{
+#if defined(WLRSDB) && !defined(WLRSDB_DISABLED)
+ ai_d11rsdb_core1_alt_reg_clk_dis(sih);
+#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */
+}
+
+void
+si_d11rsdb_core1_alt_reg_clk_en(si_t *sih)
+{
+#if defined(WLRSDB) && !defined(WLRSDB_DISABLED)
+ ai_d11rsdb_core1_alt_reg_clk_en(sih);
+#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */
+}
+
void
si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl)
{
sii = SI_INFO(sih);
origidx = si_coreidx(sih);
- cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
val = R_REG(sii->osh, &cc->chipcontrol);
chipcregs_t *cc;
uint origidx = si_coreidx(sih);
- cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
/* EPA Fix */
W_REG(sii->osh, &cc->gpiocontrol,
{
}
+void
+si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag)
+{
+}
+
/** Re-enable synth_pwrsw resource in min_res_mask for 4313 */
void
si_pmu_synth_pwrsw_4313_war(si_t *sih)
chipcregs_t *cc;
uint origidx = si_coreidx(sih);
- cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
W_REG(sii->osh, &cc->gpiocontrol,
R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK);
chipcregs_t *cc;
uint origidx = si_coreidx(sih);
- cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
/* BT fix */
W_REG(sii->osh, &cc->chipcontrol,
chipcregs_t *cc;
uint origidx = si_coreidx(sih);
- cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK);
W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK);
!(sih->chipst & CST4324_SFLASH_MASK));
case BCM4335_CHIP_ID:
case BCM4345_CHIP_ID:
+ case BCM43454_CHIP_ID:
return ((sih->chipst & CST4335_SPROM_MASK) &&
!(sih->chipst & CST4335_SFLASH_MASK));
case BCM4349_CHIP_GRPID:
case BCM43570_CHIP_ID:
case BCM4358_CHIP_ID:
return (sih->chipst & CST4350_SPROM_PRESENT) != 0;
- case BCM43602_CHIP_ID:
+ CASE_BCM43602_CHIP:
return (sih->chipst & CST43602_SPROM_PRESENT) != 0;
case BCM43131_CHIP_ID:
case BCM43217_CHIP_ID:
chipcregs_t *cc;
uint origidx = si_coreidx(sih);
osl_t *osh = si_osh(sih);
+ int ret = BCME_OK;
cc = si_setcoreidx(sih, SI_CC_IDX);
ASSERT((uintptr)cc);
/* get chipcommon rev */
- if (si_corerev(sih) < 32)
- return BCME_UNSUPPORTED;
-
- W_REG(osh, &cc->sromcontrol, value);
+ if (si_corerev(sih) >= 32) {
+ /* SpromCtrl is only accessible if CoreCapabilities.SpromSupported and
+ * SpromPresent is 1.
+ */
+ if ((R_REG(osh, &cc->capabilities) & CC_CAP_SROM) != 0 &&
+ (R_REG(osh, &cc->sromcontrol) & SRC_PRESENT)) {
+ W_REG(osh, &cc->sromcontrol, value);
+ } else {
+ ret = BCME_NODEVICE;
+ }
+ } else {
+ ret = BCME_UNSUPPORTED;
+ }
/* return to the original core */
si_setcoreidx(sih, origidx);
- return BCME_OK;
+ return ret;
}
uint
static void
si_watchdog_reset(si_t *sih)
{
- si_info_t *sii = SI_INFO(sih);
uint32 i;
/* issue a watchdog reset */
si_pcie_prep_D3(si_t *sih, bool enter_D3)
{
}
+
+
+
+void
+si_pll_sr_reinit(si_t *sih)
+{
+}
+
+void
+si_pll_closeloop(si_t *sih)
+{
+#if defined(SAVERESTORE)
+ uint32 data;
+
+ /* disable PLL open loop operation */
+ switch (CHIPID(sih->chip)) {
+#ifdef SAVERESTORE
+ case BCM43430_CHIP_ID:
+ if (SR_ENAB() && sr_isenab(sih)) {
+ /* read back the pll openloop state */
+ data = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8, 0, 0);
+ /* current mode is openloop (possible POR) */
+ if ((data & PMU1_PLLCTL8_OPENLOOP_MASK) != 0) {
+ si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL8,
+ PMU1_PLLCTL8_OPENLOOP_MASK, 0);
+ si_pmu_pllupd(sih);
+ }
+ }
+ break;
+#endif /* SAVERESTORE */
+ default:
+ /* any unsupported chip bail */
+ return;
+ }
+#endif
+}
/*
* Include file private to the SOC Interconnect support files.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: siutils_priv.h 474902 2014-05-02 18:31:33Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: siutils_priv.h 520760 2014-12-15 00:54:16Z $
*/
#ifndef _siutils_priv_h_
typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
typedef bool (*si_intrsenabled_t)(void *intr_arg);
-typedef struct gpioh_item {
- void *arg;
- bool level;
- gpio_handler_t handler;
- uint32 event;
- struct gpioh_item *next;
-} gpioh_item_t;
-
#define SI_GPIO_MAX 16
void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
+ void *wrappers2[SI_MAXCORES]; /* other cores wrapper va */
+ uint32 wrapba2[SI_MAXCORES]; /* address of controlling wrapper */
+
uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */
uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */
} si_cores_info_t;
void *pch; /* PCI/E core handle */
- gpioh_item_t *gpioh_head; /* GPIO event handlers list */
-
bool memseg; /* flag to toggle MEM_SEG register */
char *vars;
void *cores_info;
gci_gpio_item_t *gci_gpio_head; /* gci gpio interrupts head */
uint chipnew; /* new chip number */
+ uint second_bar0win; /* Backplane region */
+ uint num_br; /* # discovered bridges */
+ uint32 br_wrapba[SI_MAXBR]; /* address of bridge controlling wrapper */
+ uint32 xtalfreq;
} si_info_t;
/* Force fast clock for 4360b0 */
#define PCI_FORCEHT(si) \
- (((PCIE_GEN1(si)) && (si->pub.chip == BCM4311_CHIP_ID) && ((si->pub.chiprev <= 1))) || \
- ((PCI(si) || PCIE_GEN1(si)) && (si->pub.chip == BCM4321_CHIP_ID)) || \
- (PCIE_GEN1(si) && (si->pub.chip == BCM4716_CHIP_ID)) || \
- (PCIE_GEN1(si) && (si->pub.chip == BCM4748_CHIP_ID)))
+ (((PCIE_GEN1(si)) && (CHIPID(si->pub.chip) == BCM4311_CHIP_ID) && \
+ ((CHIPREV(si->pub.chiprev) <= 1))) || \
+ ((PCI(si) || PCIE_GEN1(si)) && (CHIPID(si->pub.chip) == BCM4321_CHIP_ID)) || \
+ (PCIE_GEN1(si) && (CHIPID(si->pub.chip) == BCM4716_CHIP_ID)) || \
+ (PCIE_GEN1(si) && (CHIPID(si->pub.chip) == BCM4748_CHIP_ID)))
/* GPIO Based LED powersave defines */
#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
extern uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff);
extern bool ai_iscoreup(si_t *sih);
extern void *ai_setcoreidx(si_t *sih, uint coreidx);
+extern void *ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx);
extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits,
uint32 resetbits, void *p, void *s);
+extern void ai_d11rsdb_core1_alt_reg_clk_en(si_t *sih);
+extern void ai_d11rsdb_core1_alt_reg_clk_dis(si_t *sih);
+
extern void ai_core_disable(si_t *sih, uint32 bits);
extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits,
aidmp_t *pmacai, aidmp_t *smacai);
extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+extern void ai_enable_backplane_timeouts(si_t *sih);
+extern void ai_clear_backplane_to(si_t *sih);
#if defined(BCMDBG_PHYDUMP)
extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b);
*
* Description: Universal AMP API
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: uamp_api.h 467328 2014-04-03 01:23:40Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: uamp_api.h 514727 2014-11-12 03:02:48Z $
*
*/
/*
* Linux cfg80211 driver - Android related functions
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_android.c 505064 2014-09-26 09:40:28Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_android.c 608788 2015-12-29 10:59:33Z $
*/
#include <linux/module.h>
#ifdef WL_NAN
#include <wl_cfgnan.h>
#endif /* WL_NAN */
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
#ifndef WL_CFG80211
#define htod32(i) i
#define CMD_SCAN_PASSIVE "SCAN-PASSIVE"
#define CMD_RSSI "RSSI"
#define CMD_LINKSPEED "LINKSPEED"
-#ifdef PKT_FILTER_SUPPORT
#define CMD_RXFILTER_START "RXFILTER-START"
#define CMD_RXFILTER_STOP "RXFILTER-STOP"
#define CMD_RXFILTER_ADD "RXFILTER-ADD"
#define CMD_RXFILTER_REMOVE "RXFILTER-REMOVE"
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-#define CMD_PKT_FILTER_MODE "PKT_FILTER_MODE"
-#define CMD_PKT_FILTER_PORTS "PKT_FILTER_PORTS"
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
-#endif /* PKT_FILTER_SUPPORT */
#define CMD_BTCOEXSCAN_START "BTCOEXSCAN-START"
#define CMD_BTCOEXSCAN_STOP "BTCOEXSCAN-STOP"
#define CMD_BTCOEXMODE "BTCOEXMODE"
#define CMD_P2P_GET_NOA "P2P_GET_NOA"
#endif /* WL_ENABLE_P2P_IF */
#define CMD_P2P_SD_OFFLOAD "P2P_SD_"
+#define CMD_P2P_LISTEN_OFFLOAD "P2P_LO_"
#define CMD_P2P_SET_PS "P2P_SET_PS"
+#define CMD_P2P_ECSA "P2P_ECSA"
#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE"
#define CMD_SETROAMMODE "SETROAMMODE"
#define CMD_SETIBSSBEACONOUIDATA "SETIBSSBEACONOUIDATA"
#define CMD_MIRACAST "MIRACAST"
+#ifdef WL_NAN
#define CMD_NAN "NAN_"
+#endif /* WL_NAN */
+#define CMD_COUNTRY_DELIMITER "/"
+#ifdef WL11ULB
+#define CMD_ULB_MODE "ULB_MODE"
+#define CMD_ULB_BW "ULB_BW"
+#endif /* WL11ULB */
#define CMD_GET_CHANNEL "GET_CHANNEL"
#define CMD_SET_ROAM "SET_ROAM_TRIGGER"
#define CMD_GET_ROAM "GET_ROAM_TRIGGER"
#define CMD_GET_BEST_CHANNELS "GET_BEST_CHANNELS"
#endif /* WL_SUPPORT_AUTO_CHANNEL */
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-#define CMD_SETMIRACAST "SETMIRACAST"
-#define CMD_ASSOCRESPIE "ASSOCRESPIE"
-#define CMD_RXRATESTATS "RXRATESTATS"
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
-
+#define CMD_80211_MODE "MODE" /* 802.11 mode a/b/g/n/ac */
+#define CMD_CHANSPEC "CHANSPEC"
+#define CMD_DATARATE "DATARATE"
+#define CMD_ASSOC_CLIENTS "ASSOCLIST"
+#define CMD_SET_CSA "SETCSA"
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+#define CMD_SET_HAPD_AUTO_CHANNEL "HAPD_AUTO_CHANNEL"
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef SUPPORT_SET_LPC
+#define CMD_HAPD_LPC_ENABLED "HAPD_LPC_ENABLED"
+#endif /* SUPPORT_SET_LPC */
+#ifdef SUPPORT_TRIGGER_HANG_EVENT
+#define CMD_TEST_FORCE_HANG "TEST_FORCE_HANG"
+#endif /* SUPPORT_TRIGGER_HANG_EVENT */
+#ifdef TEST_TX_POWER_CONTROL
+#define CMD_TEST_SET_TX_POWER "TEST_SET_TX_POWER"
+#define CMD_TEST_GET_TX_POWER "TEST_GET_TX_POWER"
+#endif /* TEST_TX_POWER_CONTROL */
+#define CMD_SARLIMIT_TX_CONTROL "SET_TX_POWER_CALLING"
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
#define CMD_KEEP_ALIVE "KEEPALIVE"
-/* CCX Private Commands */
-#ifdef BCMCCX
-#define CMD_GETCCKM_RN "get cckm_rn"
-#define CMD_SETCCKM_KRK "set cckm_krk"
-#define CMD_GET_ASSOC_RES_IES "get assoc_res_ies"
-#endif
#ifdef PNO_SUPPORT
#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR"
#define CMD_WLS_BATCHING "WLS_BATCHING"
#endif /* PNO_SUPPORT */
-#define CMD_OKC_SET_PMK "SET_PMK"
-#define CMD_OKC_ENABLE "OKC_ENABLE"
-
#define CMD_HAPD_MAC_FILTER "HAPD_MAC_FILTER"
-#ifdef WLFBT
-#define CMD_GET_FTKEY "GET_FTKEY"
-#endif
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+#define ENABLE_RANDOM_MAC "ENABLE_RANDOM_MAC"
+#define DISABLE_RANDOM_MAC "DISABLE_RANDOM_MAC"
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+
+#define CMD_CHANGE_RL "CHANGE_RL"
+#define CMD_RESTORE_RL "RESTORE_RL"
+
+#define CMD_SET_RMC_ENABLE "SETRMCENABLE"
+#define CMD_SET_RMC_TXRATE "SETRMCTXRATE"
+#define CMD_SET_RMC_ACTPERIOD "SETRMCACTIONPERIOD"
+#define CMD_SET_RMC_IDLEPERIOD "SETRMCIDLEPERIOD"
+#define CMD_SET_RMC_LEADER "SETRMCLEADER"
+#define CMD_SET_RMC_EVENT "SETRMCEVENT"
+
+#define CMD_SET_SCSCAN "SETSINGLEANT"
+#define CMD_GET_SCSCAN "GETSINGLEANT"
+
+/* FCC_PWR_LIMIT_2G */
+#define CUSTOMER_HW4_ENABLE 0
+#define CUSTOMER_HW4_DISABLE -1
+#define CUSTOMER_HW4_EN_CONVERT(i) (i += 1)
+
+#ifdef WLTDLS
+#define CMD_TDLS_RESET "TDLS_RESET"
+#endif /* WLTDLS */
+
+#ifdef IPV6_NDO_SUPPORT
+#define CMD_NDRA_LIMIT "NDRA_LIMIT"
+#endif /* IPV6_NDO_SUPPORT */
+
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
-#ifdef WLAIBSS
-#define CMD_SETIBSSTXFAILEVENT "SETIBSSTXFAILEVENT"
-#define CMD_GET_IBSS_PEER_INFO "GETIBSSPEERINFO"
-#define CMD_GET_IBSS_PEER_INFO_ALL "GETIBSSPEERINFOALL"
-#define CMD_SETIBSSROUTETABLE "SETIBSSROUTETABLE"
-#define CMD_SETIBSSAMPDU "SETIBSSAMPDU"
-#define CMD_SETIBSSANTENNAMODE "SETIBSSANTENNAMODE"
-#endif /* WLAIBSS */
#define CMD_ROAM_OFFLOAD "SETROAMOFFLOAD"
-#define CMD_ROAM_OFFLOAD_APLIST "SETROAMOFFLAPLIST"
-#define CMD_GET_LINK_STATUS "GETLINKSTATUS"
+#define CMD_ROAM_OFFLOAD_APLIST "SETROAMOFFLAPLIST"
+#define CMD_INTERFACE_CREATE "INTERFACE_CREATE"
+#define CMD_INTERFACE_DELETE "INTERFACE_DELETE"
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define CMD_GET_BSS_INFO "GETBSSINFO"
+#define CMD_GET_ASSOC_REJECT_INFO "GETASSOCREJECTINFO"
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
#ifdef P2PRESP_WFDIE_SRC
#define CMD_P2P_SET_WFDIE_RESP "P2P_SET_WFDIE_RESP"
#define CMD_P2P_GET_WFDIE_RESP "P2P_GET_WFDIE_RESP"
#endif /* P2PRESP_WFDIE_SRC */
-/* related with CMD_GET_LINK_STATUS */
-#define WL_ANDROID_LINK_VHT 0x01
-#define WL_ANDROID_LINK_MIMO 0x02
-#define WL_ANDROID_LINK_AP_VHT_SUPPORT 0x04
-#define WL_ANDROID_LINK_AP_MIMO_SUPPORT 0x08
+#define CMD_DFS_AP_MOVE "DFS_AP_MOVE"
+#define CMD_WBTEXT_ENABLE "WBTEXT_ENABLE"
+#define CMD_WBTEXT_PROFILE_CONFIG "WBTEXT_PROFILE_CONFIG"
+#define CMD_WBTEXT_WEIGHT_CONFIG "WBTEXT_WEIGHT_CONFIG"
+#define CMD_WBTEXT_TABLE_CONFIG "WBTEXT_TABLE_CONFIG"
+#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG"
+
+#ifdef WLWFDS
+#define CMD_ADD_WFDS_HASH "ADD_WFDS_HASH"
+#define CMD_DEL_WFDS_HASH "DEL_WFDS_HASH"
+#endif /* WLWFDS */
+
+#ifdef SET_RPS_CPUS
+#define CMD_RPSMODE "RPSMODE"
+#endif /* SET_RPS_CPUS */
+
+#ifdef BT_WIFI_HANDOVER
+#define CMD_TBOW_TEARDOWN "TBOW_TEARDOWN"
+#endif /* BT_WIFI_HANDOVER */
+
+#define CMD_MURX_BFE_CAP "MURX_BFE_CAP"
/* miracast related definition */
#define MIRACAST_MODE_OFF 0
static u8 miracast_cur_mode;
#endif
+#ifdef DHD_LOG_DUMP
+#define CMD_NEW_DEBUG_PRINT_DUMP "DEBUG_DUMP"
+extern void dhd_schedule_log_dump(dhd_pub_t *dhdp);
+extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
+#endif /* DHD_LOG_DUMP */
+#ifdef DHD_TRACE_WAKE_LOCK
+extern void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp);
+#endif /* DHD_TRACE_WAKE_LOCK */
+
struct io_cfg {
s8 *iovar;
s32 param;
(JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES))
#endif /* BCMFW_ROAM_ENABLE */
-#ifdef WL_GENL
-static s32 wl_genl_handle_msg(struct sk_buff *skb, struct genl_info *info);
-static int wl_genl_init(void);
-static int wl_genl_deinit(void);
-
-extern struct net init_net;
-/* attribute policy: defines which attribute has which type (e.g int, char * etc)
- * possible values defined in net/netlink.h
- */
-static struct nla_policy wl_genl_policy[BCM_GENL_ATTR_MAX + 1] = {
- [BCM_GENL_ATTR_STRING] = { .type = NLA_NUL_STRING },
- [BCM_GENL_ATTR_MSG] = { .type = NLA_BINARY },
-};
-
-#define WL_GENL_VER 1
-/* family definition */
-static struct genl_family wl_genl_family = {
- .id = GENL_ID_GENERATE, /* Genetlink would generate the ID */
- .hdrsize = 0,
- .name = "bcm-genl", /* Netlink I/F for Android */
- .version = WL_GENL_VER, /* Version Number */
- .maxattr = BCM_GENL_ATTR_MAX,
-};
-
-/* commands: mapping between the command enumeration and the actual function */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
-struct genl_ops wl_genl_ops[] = {
- {
- .cmd = BCM_GENL_CMD_MSG,
- .flags = 0,
- .policy = wl_genl_policy,
- .doit = wl_genl_handle_msg,
- .dumpit = NULL,
- },
-};
-#else
-struct genl_ops wl_genl_ops = {
- .cmd = BCM_GENL_CMD_MSG,
- .flags = 0,
- .policy = wl_genl_policy,
- .doit = wl_genl_handle_msg,
- .dumpit = NULL,
-
-};
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
-static struct genl_multicast_group wl_genl_mcast[] = {
- { .name = "bcm-genl-mcast", },
-};
-#else
-static struct genl_multicast_group wl_genl_mcast = {
- .id = GENL_ID_GENERATE, /* Genetlink would generate the ID */
- .name = "bcm-genl-mcast",
-};
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
-#endif /* WL_GENL */
/**
* Extern function declarations (TODO: move them to dhd_linux.h)
{ return 0; }
int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
{ return 0; }
+int wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len)
+{ return 0; }
#endif /* WL_CFG80211 */
/**
* Local (static) function definitions
*/
+
+#ifdef WLWFDS
+static int wl_android_set_wfds_hash(
+ struct net_device *dev, char *command, int total_len, bool enable)
+{
+ int error = 0;
+ wl_p2p_wfds_hash_t *wfds_hash = NULL;
+ char *smbuf = NULL;
+ smbuf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+
+ if (smbuf == NULL) {
+ ANDROID_ERROR(("%s: failed to allocated memory %d bytes\n",
+ __FUNCTION__, WLC_IOCTL_MAXLEN));
+ return -ENOMEM;
+ }
+
+ if (enable) {
+ wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_ADD_WFDS_HASH) + 1);
+ error = wldev_iovar_setbuf(dev, "p2p_add_wfds_hash", wfds_hash,
+ sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
+ }
+ else {
+ wfds_hash = (wl_p2p_wfds_hash_t *)(command + strlen(CMD_DEL_WFDS_HASH) + 1);
+ error = wldev_iovar_setbuf(dev, "p2p_del_wfds_hash", wfds_hash,
+ sizeof(wl_p2p_wfds_hash_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
+ }
+
+ if (error) {
+ ANDROID_ERROR(("%s: failed to %s, error=%d\n", __FUNCTION__, command, error));
+ }
+
+ if (smbuf)
+ kfree(smbuf);
+ return error;
+}
+#endif /* WLWFDS */
+
static int wl_android_get_link_speed(struct net_device *net, char *command, int total_len)
{
int link_speed;
static int wl_android_get_rssi(struct net_device *net, char *command, int total_len)
{
wlc_ssid_t ssid = {0};
- int rssi;
int bytes_written = 0;
- int error;
+ int error = 0;
+ scb_val_t scbval;
+ char *delim = NULL;
+
+ delim = strchr(command, ' ');
+ /* For Ap mode rssi command would be
+ * driver rssi <sta_mac_addr>
+ * for STA/GC mode
+ * driver rssi
+ */
+ if (delim) {
+ /* Ap/GO mode
+ * driver rssi <sta_mac_addr>
+ */
+ ANDROID_TRACE(("%s: cmd:%s\n", __FUNCTION__, delim));
+ /* skip space from delim after finding char */
+ delim++;
+ if (!(bcm_ether_atoe((delim), &scbval.ea)))
+ {
+ ANDROID_ERROR(("%s:address err\n", __FUNCTION__));
+ return -1;
+ }
+ scbval.val = htod32(0);
+ ANDROID_TRACE(("%s: address:"MACDBG, __FUNCTION__, MAC2STRDBG(scbval.ea.octet)));
+ }
+ else {
+ /* STA/GC mode */
+ memset(&scbval, 0, sizeof(scb_val_t));
+ }
- error = wldev_get_rssi(net, &rssi);
+ error = wldev_get_rssi(net, &scbval);
if (error)
return -1;
#if defined(RSSIOFFSET)
- rssi = wl_update_rssi_offset(net, rssi);
+ scbval.val = wl_update_rssi_offset(net, scbval.val);
#endif
error = wldev_get_ssid(net, &ssid);
memcpy(command, ssid.SSID, ssid.SSID_len);
bytes_written = ssid.SSID_len;
}
- bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", rssi);
- ANDROID_INFO(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
+ bytes_written += snprintf(&command[bytes_written], total_len, " rssi %d", scbval.val);
+ ANDROID_TRACE(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
return bytes_written;
}
suspend_flag = *(command + strlen(CMD_SETSUSPENDOPT) + 1) - '0';
- if (suspend_flag != 0)
+ if (suspend_flag != 0) {
suspend_flag = 1;
+ }
ret_now = net_os_set_suspend_disable(dev, suspend_flag);
if (ret_now != suspend_flag) {
- if (!(ret = net_os_set_suspend(dev, ret_now, 1)))
+ if (!(ret = net_os_set_suspend(dev, ret_now, 1))) {
ANDROID_INFO(("%s: Suspend Flag %d -> %d\n",
__FUNCTION__, ret_now, suspend_flag));
- else
+ } else {
ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+ }
}
+
return ret;
}
return ret;
}
+int wl_android_get_80211_mode(struct net_device *dev, char *command, int total_len)
+{
+ uint8 mode[4];
+ int error = 0;
+ int bytes_written = 0;
+
+ error = wldev_get_mode(dev, mode);
+ if (error)
+ return -1;
+
+ ANDROID_INFO(("%s: mode:%s\n", __FUNCTION__, mode));
+ bytes_written = snprintf(command, total_len, "%s %s", CMD_80211_MODE, mode);
+ ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command));
+ return bytes_written;
+
+}
+
+extern chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec);
+int wl_android_get_chanspec(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int bytes_written = 0;
+ int chsp = {0};
+ uint16 band = 0;
+ uint16 bw = 0;
+ uint16 channel = 0;
+ u32 sb = 0;
+ chanspec_t chanspec;
+
+ /* command is
+ * driver chanspec
+ */
+ error = wldev_iovar_getint(dev, "chanspec", &chsp);
+ if (error)
+ return -1;
+
+ chanspec = wl_chspec_driver_to_host(chsp);
+ ANDROID_INFO(("%s:return value of chanspec:%x\n", __FUNCTION__, chanspec));
+
+ channel = chanspec & WL_CHANSPEC_CHAN_MASK;
+ band = chanspec & WL_CHANSPEC_BAND_MASK;
+ bw = chanspec & WL_CHANSPEC_BW_MASK;
+
+ ANDROID_INFO(("%s:channel:%d band:%d bandwidth:%d\n", __FUNCTION__, channel, band, bw));
+
+ if (bw == WL_CHANSPEC_BW_80)
+ bw = WL_CH_BANDWIDTH_80MHZ;
+ else if (bw == WL_CHANSPEC_BW_40)
+ bw = WL_CH_BANDWIDTH_40MHZ;
+ else if (bw == WL_CHANSPEC_BW_20)
+ bw = WL_CH_BANDWIDTH_20MHZ;
+ else
+ bw = WL_CH_BANDWIDTH_20MHZ;
+
+ if (bw == WL_CH_BANDWIDTH_40MHZ) {
+ if (CHSPEC_SB_UPPER(chanspec)) {
+ channel += CH_10MHZ_APART;
+ } else {
+ channel -= CH_10MHZ_APART;
+ }
+ }
+ else if (bw == WL_CH_BANDWIDTH_80MHZ) {
+ sb = chanspec & WL_CHANSPEC_CTL_SB_MASK;
+ if (sb == WL_CHANSPEC_CTL_SB_LL) {
+ channel -= (CH_10MHZ_APART + CH_20MHZ_APART);
+ } else if (sb == WL_CHANSPEC_CTL_SB_LU) {
+ channel -= CH_10MHZ_APART;
+ } else if (sb == WL_CHANSPEC_CTL_SB_UL) {
+ channel += CH_10MHZ_APART;
+ } else {
+ /* WL_CHANSPEC_CTL_SB_UU */
+ channel += (CH_10MHZ_APART + CH_20MHZ_APART);
+ }
+ }
+ bytes_written = snprintf(command, total_len, "%s channel %d band %s bw %d", CMD_CHANSPEC,
+ channel, band == WL_CHANSPEC_BAND_5G ? "5G":"2G", bw);
+
+ ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command));
+ return bytes_written;
+
+}
+
+/* returns current datarate datarate returned from firmware are in 500kbps */
+int wl_android_get_datarate(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int datarate = 0;
+ int bytes_written = 0;
+
+ error = wldev_get_datarate(dev, &datarate);
+ if (error)
+ return -1;
+
+ ANDROID_INFO(("%s:datarate:%d\n", __FUNCTION__, datarate));
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_DATARATE, (datarate/2));
+ return bytes_written;
+}
+int wl_android_get_assoclist(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int bytes_written = 0;
+ uint i;
+ char mac_buf[MAX_NUM_OF_ASSOCLIST *
+ sizeof(struct ether_addr) + sizeof(uint)] = {0};
+ struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+
+ ANDROID_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+ assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST);
+
+ error = wldev_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf), false);
+ if (error)
+ return -1;
+
+ assoc_maclist->count = dtoh32(assoc_maclist->count);
+ bytes_written = snprintf(command, total_len, "%s listcount: %d Stations:",
+ CMD_ASSOC_CLIENTS, assoc_maclist->count);
+
+ for (i = 0; i < assoc_maclist->count; i++) {
+ bytes_written += snprintf(command + bytes_written, total_len, " " MACDBG,
+ MAC2STRDBG(assoc_maclist->ea[i].octet));
+ }
+ return bytes_written;
+
+}
+extern chanspec_t
+wl_chspec_host_to_driver(chanspec_t chanspec);
+static int wl_android_set_csa(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ char smbuf[WLC_IOCTL_SMLEN];
+ wl_chan_switch_t csa_arg;
+ u32 chnsp = 0;
+ int err = 0;
+
+ ANDROID_INFO(("%s: command:%s\n", __FUNCTION__, command));
+
+ command = (command + strlen(CMD_SET_CSA));
+ /* Order is mode, count channel */
+ if (!*++command) {
+ ANDROID_ERROR(("%s:error missing arguments\n", __FUNCTION__));
+ return -1;
+ }
+ csa_arg.mode = bcm_atoi(command);
+
+ if (csa_arg.mode != 0 && csa_arg.mode != 1) {
+ ANDROID_ERROR(("Invalid mode\n"));
+ return -1;
+ }
+
+ if (!*++command) {
+ ANDROID_ERROR(("%s:error missing count\n", __FUNCTION__));
+ return -1;
+ }
+ command++;
+ csa_arg.count = bcm_atoi(command);
+
+ csa_arg.reg = 0;
+ csa_arg.chspec = 0;
+ command += 2;
+ if (!*command) {
+ ANDROID_ERROR(("%s:error missing channel\n", __FUNCTION__));
+ return -1;
+ }
+
+ chnsp = wf_chspec_aton(command);
+ if (chnsp == 0) {
+ ANDROID_ERROR(("%s:chsp is not correct\n", __FUNCTION__));
+ return -1;
+ }
+ chnsp = wl_chspec_host_to_driver(chnsp);
+ csa_arg.chspec = chnsp;
+
+ if (chnsp & WL_CHANSPEC_BAND_5G) {
+ u32 chanspec = chnsp;
+ err = wldev_iovar_getint(dev, "per_chan_info", &chanspec);
+ if (!err) {
+ if ((chanspec & WL_CHAN_RADAR) || (chanspec & WL_CHAN_PASSIVE)) {
+ ANDROID_ERROR(("Channel is radar sensitive\n"));
+ return -1;
+ }
+ if (chanspec == 0) {
+ ANDROID_ERROR(("Invalid hw channel\n"));
+ return -1;
+ }
+ } else {
+ ANDROID_ERROR(("does not support per_chan_info\n"));
+ return -1;
+ }
+ ANDROID_INFO(("non radar sensitivity\n"));
+ }
+ error = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg),
+ smbuf, sizeof(smbuf), NULL);
+ if (error) {
+ ANDROID_ERROR(("%s:set csa failed:%d\n", __FUNCTION__, error));
+ return -1;
+ }
+ return 0;
+}
static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
{
uint band;
return bytes_written;
}
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+
+#ifdef FCC_PWR_LIMIT_2G
+int
+wl_android_set_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int enable = 0;
+
+ sscanf(command+sizeof("SET_FCC_CHANNEL"), "%d", &enable);
+
+ if ((enable != CUSTOMER_HW4_ENABLE) && (enable != CUSTOMER_HW4_DISABLE)) {
+ ANDROID_ERROR(("%s: Invalid data\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ CUSTOMER_HW4_EN_CONVERT(enable);
+
+ ANDROID_ERROR(("%s: fccpwrlimit2g set (%d)\n", __FUNCTION__, enable));
+ error = wldev_iovar_setint(dev, "fccpwrlimit2g", enable);
+ if (error) {
+ ANDROID_ERROR(("%s: fccpwrlimit2g set returned (%d)\n", __FUNCTION__, error));
+ return BCME_ERROR;
+ }
+
+ return error;
+}
+
+int
+wl_android_get_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int enable = 0;
+ int bytes_written = 0;
+
+ error = wldev_iovar_getint(dev, "fccpwrlimit2g", &enable);
+ if (error) {
+ ANDROID_ERROR(("%s: fccpwrlimit2g get error (%d)\n", __FUNCTION__, error));
+ return BCME_ERROR;
+ }
+ ANDROID_ERROR(("%s: fccpwrlimit2g get (%d)\n", __FUNCTION__, enable));
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_FCC_PWR_LIMIT_2G, enable);
+
+ return bytes_written;
+}
+#endif /* FCC_PWR_LIMIT_2G */
+
+#ifdef IPV6_NDO_SUPPORT
+int
+wl_android_nd_ra_limit(struct net_device *dev, char *command, int total_len)
+{
+ int err = 0;
+ int bytes_written = 0;
+ uint tokens;
+ char *pos, *token, *delim;
+ char smbuf[WLC_IOCTL_SMLEN];
+ char param[ND_PARAM_SIZE+1], value[ND_VALUE_SIZE+1];
+ uint16 type = 0xff, min = 0, per = 0, hold = 0;
+ nd_ra_ol_limits_t ra_ol_limit;
+
+ WL_TRACE(("command=%s, len=%d\n", command, total_len));
+ pos = command + strlen(CMD_NDRA_LIMIT) + 1;
+ memset(&ra_ol_limit, 0, sizeof(nd_ra_ol_limits_t));
+
+ if (!strncmp(pos, ND_RA_OL_SET, strlen(ND_RA_OL_SET))) {
+ WL_TRACE(("SET NDRA_LIMIT\n"));
+ pos += strlen(ND_RA_OL_SET) + 1;
+ while ((token = strsep(&pos, ND_PARAMS_DELIMETER)) != NULL) {
+ memset(param, 0, sizeof(param));
+ memset(value, 0, sizeof(value));
+
+ delim = strchr(token, ND_PARAM_VALUE_DELLIMETER);
+ if (delim != NULL)
+ *delim = ' ';
+
+ tokens = sscanf(token, ND_LIMIT_STR_FMT, param, value);
+ if (!strncmp(param, ND_RA_TYPE, strlen(ND_RA_TYPE))) {
+ type = simple_strtol(value, NULL, 0);
+ } else if (!strncmp(param, ND_RA_MIN_TIME, strlen(ND_RA_MIN_TIME))) {
+ min = simple_strtol(value, NULL, 0);
+ } else if (!strncmp(param, ND_RA_PER, strlen(ND_RA_PER))) {
+ per = simple_strtol(value, NULL, 0);
+ if (per > 100) {
+ ANDROID_ERROR(("Invalid PERCENT %d\n", per));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ } else if (!strncmp(param, ND_RA_HOLD, strlen(ND_RA_HOLD))) {
+ hold = simple_strtol(value, NULL, 0);
+ }
+ }
+
+ ra_ol_limit.version = htod32(ND_RA_OL_LIMITS_VER);
+ ra_ol_limit.type = htod32(type);
+ if (type == ND_RA_OL_LIMITS_REL_TYPE) {
+ if ((min == 0) || (per == 0)) {
+ ANDROID_ERROR(("Invalid min_time %d, percent %d\n", min, per));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ ra_ol_limit.length = htod32(ND_RA_OL_LIMITS_REL_TYPE_LEN);
+ ra_ol_limit.limits.lifetime_relative.min_time = htod32(min);
+ ra_ol_limit.limits.lifetime_relative.lifetime_percent = htod32(per);
+ } else if (type == ND_RA_OL_LIMITS_FIXED_TYPE) {
+ if (hold == 0) {
+ ANDROID_ERROR(("Invalid hold_time %d\n", hold));
+ err = BCME_BADARG;
+ goto exit;
+ }
+ ra_ol_limit.length = htod32(ND_RA_OL_LIMITS_FIXED_TYPE_LEN);
+ ra_ol_limit.limits.fixed.hold_time = htod32(hold);
+ } else {
+ ANDROID_ERROR(("unknown TYPE %d\n", type));
+ err = BCME_BADARG;
+ goto exit;
+ }
+
+ err = wldev_iovar_setbuf(dev, "nd_ra_limit_intv", &ra_ol_limit,
+ sizeof(nd_ra_ol_limits_t), smbuf, sizeof(smbuf), NULL);
+ if (err) {
+ ANDROID_ERROR(("Failed to set nd_ra_limit_intv, error = %d\n", err));
+ goto exit;
+ }
+
+ WL_TRACE(("TYPE %d, MIN %d, PER %d, HOLD %d\n", type, min, per, hold));
+ } else if (!strncmp(pos, ND_RA_OL_GET, strlen(ND_RA_OL_GET))) {
+ WL_TRACE(("GET NDRA_LIMIT\n"));
+ err = wldev_iovar_getbuf(dev, "nd_ra_limit_intv", NULL, 0,
+ smbuf, sizeof(smbuf), NULL);
+ if (err) {
+ ANDROID_ERROR(("Failed to get nd_ra_limit_intv, error = %d\n", err));
+ goto exit;
+ }
+
+ memcpy(&ra_ol_limit, (uint8 *)smbuf, sizeof(nd_ra_ol_limits_t));
+ type = ra_ol_limit.type;
+ if (ra_ol_limit.version != ND_RA_OL_LIMITS_VER) {
+ ANDROID_ERROR(("Invalid Version %d\n", ra_ol_limit.version));
+ err = BCME_VERSION;
+ goto exit;
+ }
+
+ if (ra_ol_limit.type == ND_RA_OL_LIMITS_REL_TYPE) {
+ min = ra_ol_limit.limits.lifetime_relative.min_time;
+ per = ra_ol_limit.limits.lifetime_relative.lifetime_percent;
+ ANDROID_ERROR(("TYPE %d, MIN %d, PER %d\n", type, min, per));
+ bytes_written = snprintf(command, total_len,
+ "%s GET TYPE %d, MIN %d, PER %d", CMD_NDRA_LIMIT, type, min, per);
+ } else if (ra_ol_limit.type == ND_RA_OL_LIMITS_FIXED_TYPE) {
+ hold = ra_ol_limit.limits.fixed.hold_time;
+ ANDROID_ERROR(("TYPE %d, HOLD %d\n", type, hold));
+ bytes_written = snprintf(command, total_len,
+ "%s GET TYPE %d, HOLD %d", CMD_NDRA_LIMIT, type, hold);
+ } else {
+ ANDROID_ERROR(("unknown TYPE %d\n", type));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+ return bytes_written;
+ } else {
+ ANDROID_ERROR(("unknown command\n"));
+ err = BCME_ERROR;
+ goto exit;
+ }
+
+exit:
+ return err;
+}
+#endif /* IPV6_NDO_SUPPORT */
+#ifdef WLTDLS
+int wl_android_tdls_reset(struct net_device *dev)
+{
+ int ret = 0;
+ ret = dhd_tdls_enable(dev, false, false, NULL);
+ if (ret < 0) {
+ ANDROID_ERROR(("Disable tdls failed. %d\n", ret));
+ return ret;
+ }
+ ret = dhd_tdls_enable(dev, true, true, NULL);
+ if (ret < 0) {
+ ANDROID_ERROR(("enable tdls failed. %d\n", ret));
+ return ret;
+ }
+ return 0;
+}
+#endif /* WLTDLS */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+static int wl_android_wbtext(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0, argc = 0;
+ int data, bytes_written;
+
+ argc = sscanf(command+sizeof("WBTEXT_ENABLE"), "%d", &data);
+ if (!argc) {
+ error = wldev_iovar_getint(dev, "wnm_bsstrans_resp", &data);
+ if (error) {
+ ANDROID_ERROR(("%s: Failed to set wbtext error = %d\n",
+ __FUNCTION__, error));
+ }
+ bytes_written = snprintf(command, total_len, "WBTEXT %s\n",
+ (data == WL_BSSTRANS_POLICY_PRODUCT)? "ENABLED" : "DISABLED");
+ return bytes_written;
+ } else {
+ if (data)
+ data = WL_BSSTRANS_POLICY_PRODUCT;
+
+ error = wldev_iovar_setint(dev, "wnm_bsstrans_resp", data);
+ if (error) {
+ ANDROID_ERROR(("%s: Failed to set wbtext error = %d\n",
+ __FUNCTION__, error));
+ }
+ }
+ return error;
+}
#ifdef PNO_SUPPORT
#define PNO_PARAM_SIZE 50
ANDROID_ERROR(("failed to configure batch scan\n"));
} else {
memset(command, 0, total_len);
- err = sprintf(command, "%d", err);
+ err = snprintf(command, total_len, "%d", err);
}
} else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) {
err = dhd_dev_pno_get_for_batch(dev, command, total_len);
ANDROID_ERROR(("failed to stop batching scan\n"));
} else {
memset(command, 0, total_len);
- err = sprintf(command, "OK");
+ err = snprintf(command, total_len, "OK");
}
} else {
ANDROID_ERROR(("%s : unknown command\n", __FUNCTION__));
#ifndef WL_SCHED_SCAN
static int wl_android_set_pno_setup(struct net_device *dev, char *command, int total_len)
{
- wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+ wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
int res = -1;
int nssid = 0;
cmd_tlv_t *cmd_tlv_temp;
return bytes_written;
}
-#ifdef BCMCCX
-static int wl_android_get_cckm_rn(struct net_device *dev, char *command)
-{
- int error, rn;
-
- ANDROID_TRACE(("%s:wl_android_get_cckm_rn\n", dev->name));
-
- error = wldev_iovar_getint(dev, "cckm_rn", &rn);
- if (unlikely(error)) {
- ANDROID_ERROR(("wl_android_get_cckm_rn error (%d)\n", error));
- return -1;
- }
- memcpy(command, &rn, sizeof(int));
-
- return sizeof(int);
-}
-
-static int wl_android_set_cckm_krk(struct net_device *dev, char *command)
-{
- int error;
- unsigned char key[16];
- static char iovar_buf[WLC_IOCTL_MEDLEN];
-
- ANDROID_TRACE(("%s: wl_iw_set_cckm_krk\n", dev->name));
-
- memset(iovar_buf, 0, sizeof(iovar_buf));
- memcpy(key, command+strlen("set cckm_krk")+1, 16);
-
- error = wldev_iovar_setbuf(dev, "cckm_krk", key, sizeof(key),
- iovar_buf, WLC_IOCTL_MEDLEN, NULL);
- if (unlikely(error))
- {
- ANDROID_ERROR((" cckm_krk set error (%d)\n", error));
- return -1;
- }
- return 0;
-}
-
-static int wl_android_get_assoc_res_ies(struct net_device *dev, char *command)
-{
- int error;
- u8 buf[WL_ASSOC_INFO_MAX];
- wl_assoc_info_t assoc_info;
- u32 resp_ies_len = 0;
- int bytes_written = 0;
-
- ANDROID_TRACE(("%s: wl_iw_get_assoc_res_ies\n", dev->name));
-
- error = wldev_iovar_getbuf(dev, "assoc_info", NULL, 0, buf, WL_ASSOC_INFO_MAX, NULL);
- if (unlikely(error)) {
- ANDROID_ERROR(("could not get assoc info (%d)\n", error));
- return -1;
- }
-
- memcpy(&assoc_info, buf, sizeof(wl_assoc_info_t));
- assoc_info.req_len = htod32(assoc_info.req_len);
- assoc_info.resp_len = htod32(assoc_info.resp_len);
- assoc_info.flags = htod32(assoc_info.flags);
-
- if (assoc_info.resp_len) {
- resp_ies_len = assoc_info.resp_len - sizeof(struct dot11_assoc_resp);
- }
-
- /* first 4 bytes are ie len */
- memcpy(command, &resp_ies_len, sizeof(u32));
- bytes_written = sizeof(u32);
-
- /* get the association resp IE's if there are any */
- if (resp_ies_len) {
- error = wldev_iovar_getbuf(dev, "assoc_resp_ies", NULL, 0,
- buf, WL_ASSOC_INFO_MAX, NULL);
- if (unlikely(error)) {
- ANDROID_ERROR(("could not get assoc resp_ies (%d)\n", error));
- return -1;
- }
-
- memcpy(command+sizeof(u32), buf, resp_ies_len);
- bytes_written += resp_ies_len;
- }
- return bytes_written;
-}
-
-#endif /* BCMCCX */
int
wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist)
int macmode = MACLIST_MODE_DISABLED;
struct maclist *list;
char eabuf[ETHER_ADDR_STR_LEN];
+ char *token;
/* string should look like below (macmode/macnum/maclist) */
/* 1 2 00:11:22:33:44:55 00:11:22:33:44:ff */
/* get the MAC filter mode */
- macmode = bcm_atoi(strsep((char**)&str, " "));
+ token = strsep((char**)&str, " ");
+ if (!token) {
+ return -1;
+ }
+ macmode = bcm_atoi(token);
if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) {
ANDROID_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode));
return -1;
}
- macnum = bcm_atoi(strsep((char**)&str, " "));
+ token = strsep((char**)&str, " ");
+ if (!token) {
+ return -1;
+ }
+ macnum = bcm_atoi(token);
if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
ANDROID_ERROR(("%s : invalid number of MAC address entries %d\n",
__FUNCTION__, macnum));
int wl_android_wifi_on(struct net_device *dev)
{
int ret = 0;
-#ifdef CONFIG_MACH_UNIVERSAL5433
- int retry;
- /* Do not retry old revision Helsinki Prime */
- if (!check_rev()) {
- retry = 1;
- } else {
- retry = POWERUP_MAX_RETRY;
- }
-#else
int retry = POWERUP_MAX_RETRY;
-#endif /* CONFIG_MACH_UNIVERSAL5433 */
if (!dev) {
ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__));
#ifdef BCMPCIE
ret = dhd_net_bus_devreset(dev, FALSE);
#endif /* BCMPCIE */
- if (ret == 0)
+ if (ret == 0) {
break;
+ }
ANDROID_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
retry));
#ifdef BCMPCIE
#endif
}
-int wl_android_wifi_off(struct net_device *dev)
+int wl_android_wifi_off(struct net_device *dev, bool on_failure)
{
int ret = 0;
printf("%s in 1\n", __FUNCTION__);
dhd_net_if_lock(dev);
- printf("%s in 2: g_wifi_on=%d\n", __FUNCTION__, g_wifi_on);
- if (g_wifi_on) {
+ printf("%s in 2: g_wifi_on=%d, on_failure=%d\n", __FUNCTION__, g_wifi_on, on_failure);
+ if (g_wifi_on || on_failure) {
#if defined(BCMSDIO) || defined(BCMPCIE)
ret = dhd_net_bus_devreset(dev, TRUE);
#ifdef BCMSDIO
wl_android_get_connection_stats(struct net_device *dev, char *command, int total_len)
{
wl_cnt_t* cnt = NULL;
+#ifndef DISABLE_IF_COUNTERS
+ wl_if_stats_t* if_stats = NULL;
+#endif /* DISABLE_IF_COUNTERS */
+
int link_speed = 0;
struct connection_stats *output;
unsigned int bufsize = 0;
- int bytes_written = 0;
+ int bytes_written = -1;
int ret = 0;
ANDROID_INFO(("%s: enter Get Connection Stats\n", __FUNCTION__));
bufsize = total_len;
if (bufsize < sizeof(struct connection_stats)) {
- ANDROID_ERROR(("%s: not enough buffer size, provided=%u, requires=%u\n",
+ ANDROID_ERROR(("%s: not enough buffer size, provided=%u, requires=%zu\n",
__FUNCTION__, bufsize,
sizeof(struct connection_stats)));
goto error;
}
- if ((cnt = kmalloc(sizeof(*cnt), GFP_KERNEL)) == NULL) {
- ANDROID_ERROR(("kmalloc failed\n"));
- return -1;
+ output = (struct connection_stats *)command;
+
+#ifndef DISABLE_IF_COUNTERS
+ if ((if_stats = kmalloc(sizeof(*if_stats), GFP_KERNEL)) == NULL) {
+ ANDROID_ERROR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__));
+ goto error;
}
- memset(cnt, 0, sizeof(*cnt));
+ memset(if_stats, 0, sizeof(*if_stats));
- ret = wldev_iovar_getbuf(dev, "counters", NULL, 0, (char *)cnt, sizeof(wl_cnt_t), NULL);
+ ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
+ (char *)if_stats, sizeof(*if_stats), NULL);
if (ret) {
- ANDROID_ERROR(("%s: wldev_iovar_getbuf() failed, ret=%d\n",
+ ANDROID_ERROR(("%s: if_counters not supported ret=%d\n",
__FUNCTION__, ret));
+
+ /* In case if_stats IOVAR is not supported, get information from counters. */
+#endif /* DISABLE_IF_COUNTERS */
+ if ((cnt = kmalloc(sizeof(*cnt), GFP_KERNEL)) == NULL) {
+ ANDROID_ERROR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__));
+ goto error;
+ }
+ memset(cnt, 0, sizeof(*cnt));
+
+ ret = wldev_iovar_getbuf(dev, "counters", NULL, 0,
+ (char *)cnt, sizeof(wl_cnt_t), NULL);
+ if (ret) {
+ ANDROID_ERROR(("%s: wldev_iovar_getbuf() failed, ret=%d\n",
+ __FUNCTION__, ret));
+ goto error;
+ }
+
+ if (dtoh16(cnt->version) > WL_CNT_T_VERSION) {
+ ANDROID_ERROR(("%s: incorrect version of wl_cnt_t, expected=%u got=%u\n",
+ __FUNCTION__, WL_CNT_T_VERSION, cnt->version));
+ goto error;
+ }
+
+ output->txframe = dtoh32(cnt->txframe);
+ output->txbyte = dtoh32(cnt->txbyte);
+ output->txerror = dtoh32(cnt->txerror);
+ output->rxframe = dtoh32(cnt->rxframe);
+ output->rxbyte = dtoh32(cnt->rxbyte);
+ output->txfail = dtoh32(cnt->txfail);
+ output->txretry = dtoh32(cnt->txretry);
+ output->txretrie = dtoh32(cnt->txretrie);
+ output->txrts = dtoh32(cnt->txrts);
+ output->txnocts = dtoh32(cnt->txnocts);
+ output->txexptime = dtoh32(cnt->txexptime);
+#ifndef DISABLE_IF_COUNTERS
+ } else {
+ /* Populate from if_stats. */
+ if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) {
+ ANDROID_ERROR(("%s: incorrect version of wl_if_stats_t, expected=%u got=%u\n",
+ __FUNCTION__, WL_IF_STATS_T_VERSION, if_stats->version));
+ goto error;
+ }
+
+ output->txframe = (uint32)dtoh64(if_stats->txframe);
+ output->txbyte = (uint32)dtoh64(if_stats->txbyte);
+ output->txerror = (uint32)dtoh64(if_stats->txerror);
+ output->rxframe = (uint32)dtoh64(if_stats->rxframe);
+ output->rxbyte = (uint32)dtoh64(if_stats->rxbyte);
+ output->txfail = (uint32)dtoh64(if_stats->txfail);
+ output->txretry = (uint32)dtoh64(if_stats->txretry);
+ output->txretrie = (uint32)dtoh64(if_stats->txretrie);
+ /* Unavailable */
+ output->txrts = 0;
+ output->txnocts = 0;
+ output->txexptime = 0;
+ }
+#endif /* DISABLE_IF_COUNTERS */
+
+ /* link_speed is in kbps */
+ ret = wldev_get_link_speed(dev, &link_speed);
+ if (ret || link_speed < 0) {
+ ANDROID_ERROR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n",
+ __FUNCTION__, ret, link_speed));
goto error;
}
- if (dtoh16(cnt->version) > WL_CNT_T_VERSION) {
- ANDROID_ERROR(("%s: incorrect version of wl_cnt_t, expected=%u got=%u\n",
- __FUNCTION__, WL_CNT_T_VERSION, cnt->version));
- goto error;
+ output->txrate = link_speed;
+
+ /* Channel idle ratio. */
+ if (wl_chanim_stats(dev, &(output->chan_idle)) < 0) {
+ output->chan_idle = 0;
+ };
+
+ bytes_written = sizeof(struct connection_stats);
+
+error:
+#ifndef DISABLE_IF_COUNTERS
+ if (if_stats) {
+ kfree(if_stats);
+ }
+#endif /* DISABLE_IF_COUNTERS */
+ if (cnt) {
+ kfree(cnt);
+ }
+
+ return bytes_written;
+}
+#endif /* CONNECTION_STATISTICS */
+
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+/* SoftAP feature */
+#define APCS_BAND_2G_LEGACY1 20
+#define APCS_BAND_2G_LEGACY2 0
+#define APCS_BAND_AUTO "band=auto"
+#define APCS_BAND_2G "band=2g"
+#define APCS_BAND_5G "band=5g"
+#define APCS_MAX_2G_CHANNELS 11
+#define APCS_MAX_RETRY 10
+#define APCS_DEFAULT_2G_CH 1
+#define APCS_DEFAULT_5G_CH 149
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+static int
+wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str,
+ char* command, int total_len)
+{
+ int channel = 0;
+ int chosen = 0;
+ int retry = 0;
+ int ret = 0;
+ int spect = 0;
+ u8 *reqbuf = NULL;
+ uint32 band = WLC_BAND_2G;
+ uint32 buf_size;
+
+ if (cmd_str) {
+ ANDROID_INFO(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str)));
+ if (strncmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) {
+ band = WLC_BAND_AUTO;
+ } else if (strncmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) {
+ band = WLC_BAND_5G;
+ } else if (strncmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) {
+ band = WLC_BAND_2G;
+ } else {
+ /*
+ * For backward compatibility: Some platforms used to issue argument 20 or 0
+ * to enforce the 2G channel selection
+ */
+ channel = bcm_atoi(cmd_str);
+ if ((channel == APCS_BAND_2G_LEGACY1) ||
+ (channel == APCS_BAND_2G_LEGACY2)) {
+ band = WLC_BAND_2G;
+ } else {
+ ANDROID_ERROR(("Invalid argument\n"));
+ return -EINVAL;
+ }
+ }
+ } else {
+ /* If no argument is provided, default to 2G */
+ ANDROID_ERROR(("No argument given default to 2.4G scan\n"));
+ band = WLC_BAND_2G;
+ }
+ ANDROID_INFO(("HAPD_AUTO_CHANNEL = %d, band=%d \n", channel, band));
+
+ if ((ret = wldev_ioctl(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect), false)) < 0) {
+ ANDROID_ERROR(("ACS: error getting the spect\n"));
+ goto done;
+ }
+
+ if (spect > 0) {
+ /* If STA is connected, return is STA channel, else ACS can be issued,
+ * set spect to 0 and proceed with ACS
+ */
+ channel = wl_cfg80211_get_sta_channel();
+ if (channel) {
+ channel = (channel <= CH_MAX_2G_CHANNEL) ? channel : APCS_DEFAULT_2G_CH;
+ goto done2;
+ }
+
+ if ((ret = wl_cfg80211_set_spect(dev, 0) < 0)) {
+ ANDROID_ERROR(("ACS: error while setting spect\n"));
+ goto done;
+ }
+ }
+
+ reqbuf = kzalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
+ if (reqbuf == NULL) {
+ ANDROID_ERROR(("failed to allocate chanspec buffer\n"));
+ return -ENOMEM;
+ }
+
+ if (band == WLC_BAND_AUTO) {
+ ANDROID_INFO(("ACS full channel scan \n"));
+ reqbuf[0] = htod32(0);
+ } else if (band == WLC_BAND_5G) {
+ ANDROID_INFO(("ACS 5G band scan \n"));
+ if ((ret = wl_cfg80211_get_chanspecs_5g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) {
+ ANDROID_ERROR(("ACS 5g chanspec retreival failed! \n"));
+ goto done;
+ }
+ } else if (band == WLC_BAND_2G) {
+ /*
+ * If channel argument is not provided/ argument 20 is provided,
+ * Restrict channel to 2GHz, 20MHz BW, No SB
+ */
+ ANDROID_INFO(("ACS 2G band scan \n"));
+ if ((ret = wl_cfg80211_get_chanspecs_2g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) {
+ ANDROID_ERROR(("ACS 2g chanspec retreival failed! \n"));
+ goto done;
+ }
+ } else {
+ ANDROID_ERROR(("ACS: No band chosen\n"));
+ goto done2;
+ }
+
+ buf_size = (band == WLC_BAND_AUTO) ? sizeof(int) : CHANSPEC_BUF_SIZE;
+ ret = wldev_ioctl(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf,
+ buf_size, true);
+ if (ret < 0) {
+ ANDROID_ERROR(("can't start auto channel scan, err = %d\n", ret));
+ channel = 0;
+ goto done;
+ }
+
+ /* Wait for auto channel selection, max 3000 ms */
+ if ((band == WLC_BAND_2G) || (band == WLC_BAND_5G)) {
+ OSL_SLEEP(500);
+ } else {
+ /*
+ * Full channel scan at the minimum takes 1.2secs
+ * even with parallel scan. max wait time: 3500ms
+ */
+ OSL_SLEEP(1000);
+ }
+
+ retry = APCS_MAX_RETRY;
+ while (retry--) {
+ ret = wldev_ioctl(dev, WLC_GET_CHANNEL_SEL, &chosen,
+ sizeof(chosen), false);
+ if (ret < 0) {
+ chosen = 0;
+ } else {
+ chosen = dtoh32(chosen);
+ }
+
+ if (chosen) {
+ int chosen_band;
+ int apcs_band;
+#ifdef D11AC_IOTYPES
+ if (wl_cfg80211_get_ioctl_version() == 1) {
+ channel = LCHSPEC_CHANNEL((chanspec_t)chosen);
+ } else {
+ channel = CHSPEC_CHANNEL((chanspec_t)chosen);
+ }
+#else
+ channel = CHSPEC_CHANNEL((chanspec_t)chosen);
+#endif /* D11AC_IOTYPES */
+ apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band;
+ chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G;
+ if (apcs_band == chosen_band) {
+ ANDROID_ERROR(("selected channel = %d\n", channel));
+ break;
+ }
+ }
+ ANDROID_INFO(("%d tried, ret = %d, chosen = 0x%x\n",
+ (APCS_MAX_RETRY - retry), ret, chosen));
+ OSL_SLEEP(250);
+ }
+
+done:
+ if ((retry == 0) || (ret < 0)) {
+ /* On failure, fallback to a default channel */
+ if ((band == WLC_BAND_5G)) {
+ channel = APCS_DEFAULT_5G_CH;
+ } else {
+ channel = APCS_DEFAULT_2G_CH;
+ }
+ ANDROID_ERROR(("ACS failed. Fall back to default channel (%d) \n", channel));
+ }
+done2:
+ if (spect > 0) {
+ if ((ret = wl_cfg80211_set_spect(dev, spect) < 0)) {
+ ANDROID_ERROR(("ACS: error while setting spect\n"));
+ }
+ }
+
+ if (reqbuf) {
+ kfree(reqbuf);
+ }
+
+ if (channel) {
+ snprintf(command, 4, "%d", channel);
+ ANDROID_INFO(("command result is %s \n", command));
+ return strlen(command);
+ } else {
+ return ret;
+ }
+}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+
+
+#ifdef SUPPORT_SET_LPC
+static int
+wl_android_set_lpc(struct net_device *dev, const char* string_num)
+{
+ int lpc_enabled, ret;
+ s32 val = 1;
+
+ lpc_enabled = bcm_atoi(string_num);
+ ANDROID_INFO(("%s : HAPD_LPC_ENABLED = %d\n", __FUNCTION__, lpc_enabled));
+
+ ret = wldev_ioctl(dev, WLC_DOWN, &val, sizeof(s32), true);
+ if (ret < 0)
+ ANDROID_ERROR(("WLC_DOWN error %d\n", ret));
+
+ wldev_iovar_setint(dev, "lpc", lpc_enabled);
+
+ ret = wldev_ioctl(dev, WLC_UP, &val, sizeof(s32), true);
+ if (ret < 0)
+ ANDROID_ERROR(("WLC_UP error %d\n", ret));
+
+ return 1;
+}
+#endif /* SUPPORT_SET_LPC */
+
+static int
+wl_android_ch_res_rl(struct net_device *dev, bool change)
+{
+ int error = 0;
+ s32 srl = 7;
+ s32 lrl = 4;
+ printk("%s enter\n", __FUNCTION__);
+ if (change) {
+ srl = 4;
+ lrl = 2;
+ }
+ error = wldev_ioctl(dev, WLC_SET_SRL, &srl, sizeof(s32), true);
+ if (error) {
+ ANDROID_ERROR(("Failed to set SRL, error = %d\n", error));
+ }
+ error = wldev_ioctl(dev, WLC_SET_LRL, &lrl, sizeof(s32), true);
+ if (error) {
+ ANDROID_ERROR(("Failed to set LRL, error = %d\n", error));
+ }
+ return error;
+}
+
+
+static int
+wl_android_rmc_enable(struct net_device *net, int rmc_enable)
+{
+ int err;
+
+ err = wldev_iovar_setint(net, "rmc_ackreq", rmc_enable);
+ return err;
+}
+
+static int
+wl_android_rmc_set_leader(struct net_device *dev, const char* straddr)
+{
+ int error = BCME_OK;
+ char smbuf[WLC_IOCTL_SMLEN];
+ wl_rmc_entry_t rmc_entry;
+ ANDROID_INFO(("%s: Set new RMC leader %s\n", __FUNCTION__, straddr));
+
+ memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t));
+ if (!bcm_ether_atoe(straddr, &rmc_entry.addr)) {
+ if (strlen(straddr) == 1 && bcm_atoi(straddr) == 0) {
+ ANDROID_INFO(("%s: Set auto leader selection mode\n", __FUNCTION__));
+ memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t));
+ } else {
+ ANDROID_ERROR(("%s: No valid mac address provided\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ }
+
+ error = wldev_iovar_setbuf(dev, "rmc_ar", &rmc_entry, sizeof(wl_rmc_entry_t),
+ smbuf, sizeof(smbuf), NULL);
+
+ if (error != BCME_OK) {
+ ANDROID_ERROR(("%s: Unable to set RMC leader, error = %d\n",
+ __FUNCTION__, error));
+ }
+
+ return error;
+}
+
+static int wl_android_set_rmc_event(struct net_device *dev, char *command, int total_len)
+{
+ int err = 0;
+ int pid = 0;
+
+ if (sscanf(command, CMD_SET_RMC_EVENT " %d", &pid) <= 0) {
+ ANDROID_ERROR(("Failed to get Parameter from : %s\n", command));
+ return -1;
+ }
+
+ /* set pid, and if the event was happened, let's send a notification through netlink */
+ wl_cfg80211_set_rmc_pid(pid);
+
+ ANDROID_TRACE(("RMC pid=%d\n", pid));
+
+ return err;
+}
+
+int wl_android_get_singlecore_scan(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int bytes_written = 0;
+ int mode = 0;
+
+ error = wldev_iovar_getint(dev, "scan_ps", &mode);
+ if (error) {
+ ANDROID_ERROR(("%s: Failed to get single core scan Mode, error = %d\n",
+ __FUNCTION__, error));
+ return -1;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_SCSCAN, mode);
+
+ return bytes_written;
+}
+
+int wl_android_set_singlecore_scan(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int mode = 0;
+
+ if (sscanf(command, "%*s %d", &mode) != 1) {
+ ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ return -1;
}
- /* link_speed is in kbps */
- ret = wldev_get_link_speed(dev, &link_speed);
- if (ret || link_speed < 0) {
- ANDROID_ERROR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n",
- __FUNCTION__, ret, link_speed));
- goto error;
+ error = wldev_iovar_setint(dev, "scan_ps", mode);
+ if (error) {
+ ANDROID_ERROR(("%s[1]: Failed to set Mode %d, error = %d\n",
+ __FUNCTION__, mode, error));
+ return -1;
}
- output = (struct connection_stats *)command;
- output->txframe = dtoh32(cnt->txframe);
- output->txbyte = dtoh32(cnt->txbyte);
- output->txerror = dtoh32(cnt->txerror);
- output->rxframe = dtoh32(cnt->rxframe);
- output->rxbyte = dtoh32(cnt->rxbyte);
- output->txfail = dtoh32(cnt->txfail);
- output->txretry = dtoh32(cnt->txretry);
- output->txretrie = dtoh32(cnt->txretrie);
- output->txrts = dtoh32(cnt->txrts);
- output->txnocts = dtoh32(cnt->txnocts);
- output->txexptime = dtoh32(cnt->txexptime);
- output->txrate = link_speed;
+ return error;
+}
+#ifdef TEST_TX_POWER_CONTROL
+static int
+wl_android_set_tx_power(struct net_device *dev, const char* string_num)
+{
+ int err = 0;
+ s32 dbm;
+ enum nl80211_tx_power_setting type;
- /* Channel idle ratio. */
- if (wl_chanim_stats(dev, &(output->chan_idle)) < 0) {
- output->chan_idle = 0;
- };
+ dbm = bcm_atoi(string_num);
- kfree(cnt);
+ if (dbm < -1) {
+ ANDROID_ERROR(("%s: dbm is negative...\n", __FUNCTION__));
+ return -EINVAL;
+ }
- bytes_written = sizeof(struct connection_stats);
- return bytes_written;
+ if (dbm == -1)
+ type = NL80211_TX_POWER_AUTOMATIC;
+ else
+ type = NL80211_TX_POWER_FIXED;
-error:
- if (cnt) {
- kfree(cnt);
+ err = wl_set_tx_power(dev, type, dbm);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+ return err;
}
- return -1;
+
+ return 1;
}
-#endif /* CONNECTION_STATISTICS */
static int
-wl_android_set_pmk(struct net_device *dev, char *command, int total_len)
+wl_android_get_tx_power(struct net_device *dev, char *command, int total_len)
{
- uchar pmk[33];
- int error = 0;
- char smbuf[WLC_IOCTL_SMLEN];
-#ifdef OKC_DEBUG
- int i = 0;
-#endif
+ int err;
+ int bytes_written;
+ s32 dbm = 0;
- bzero(pmk, sizeof(pmk));
- memcpy((char *)pmk, command + strlen("SET_PMK "), 32);
- error = wldev_iovar_setbuf(dev, "okc_info_pmk", pmk, 32, smbuf, sizeof(smbuf), NULL);
- if (error) {
- ANDROID_ERROR(("Failed to set PMK for OKC, error = %d\n", error));
+ err = wl_get_tx_power(dev, &dbm);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+ return err;
}
-#ifdef OKC_DEBUG
- ANDROID_ERROR(("PMK is "));
- for (i = 0; i < 32; i++)
- ANDROID_ERROR(("%02X ", pmk[i]));
- ANDROID_ERROR(("\n"));
-#endif
- return error;
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_TEST_GET_TX_POWER, dbm);
+
+ ANDROID_ERROR(("%s: GET_TX_POWER: dBm=%d\n", __FUNCTION__, dbm));
+
+ return bytes_written;
}
+#endif /* TEST_TX_POWER_CONTROL */
static int
-wl_android_okc_enable(struct net_device *dev, char *command, int total_len)
+wl_android_set_sarlimit_txctrl(struct net_device *dev, const char* string_num)
{
- int error = 0;
- char okc_enable = 0;
+ int err = 0;
+ int setval = 0;
+ s32 mode = bcm_atoi(string_num);
- okc_enable = command[strlen(CMD_OKC_ENABLE) + 1] - '0';
- error = wldev_iovar_setint(dev, "okc_enable", okc_enable);
- if (error) {
- ANDROID_ERROR(("Failed to %s OKC, error = %d\n",
- okc_enable ? "enable" : "disable", error));
+ /* As Samsung specific and their requirement, '0' means activate sarlimit
+ * and '-1' means back to normal state (deactivate sarlimit)
+ */
+ if (mode == 0) {
+ ANDROID_INFO(("%s: SAR limit control activated\n", __FUNCTION__));
+ setval = 1;
+ } else if (mode == -1) {
+ ANDROID_INFO(("%s: SAR limit control deactivated\n", __FUNCTION__));
+ setval = 0;
+ } else {
+ return -EINVAL;
}
- wldev_iovar_setint(dev, "ccx_enable", 0);
-
- return error;
+ err = wldev_iovar_setint(dev, "sar_enable", setval);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+ return err;
+ }
+ return 1;
}
-
-
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
int wl_android_set_roam_mode(struct net_device *dev, char *command, int total_len)
{
kfree(config);
}
}
+#ifdef WL11ULB
+static int
+wl_android_set_ulb_mode(struct net_device *dev, char *command, int total_len)
+{
+ int mode = 0;
+
+ ANDROID_INFO(("set ulb mode (%s) \n", command));
+ if (sscanf(command, "%*s %d", &mode) != 1) {
+ ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ return -1;
+ }
+ return wl_cfg80211_set_ulb_mode(dev, mode);
+}
+static int
+wl_android_set_ulb_bw(struct net_device *dev, char *command, int total_len)
+{
+ int bw = 0;
+ u8 *pos;
+ char *ifname = NULL;
+ ANDROID_INFO(("set ulb bw (%s) \n", command));
+
+ /*
+ * For sta/ap: IFNAME=<ifname> DRIVER ULB_BW <bw> ifname
+ * For p2p: IFNAME=wlan0 DRIVER ULB_BW <bw> p2p-dev-wlan0
+ */
+ if (total_len < strlen(CMD_ULB_BW) + 2)
+ return -EINVAL;
+
+ pos = command + strlen(CMD_ULB_BW) + 1;
+ bw = bcm_atoi(pos);
+
+ if ((strlen(pos) >= 5)) {
+ ifname = pos + 2;
+ }
+ ANDROID_INFO(("[ULB] ifname:%s ulb_bw:%d \n", ifname, bw));
+ return wl_cfg80211_set_ulb_bw(dev, bw, ifname);
+}
+#endif /* WL11ULB */
static int
wl_android_set_miracast(struct net_device *dev, char *command, int total_len)
{
ANDROID_INFO(("%s: enter miracast mode %d\n", __FUNCTION__, mode));
- if (miracast_cur_mode == mode)
+ if (miracast_cur_mode == mode) {
return 0;
+ }
wl_android_iolist_resume(dev, &miracast_resume_list);
miracast_cur_mode = MIRACAST_MODE_OFF;
ANDROID_ERROR(("%s: Connected station's beacon interval: "
"%d and set mchan_algo to %d \n",
__FUNCTION__, val, config.param));
- }
- else {
+ } else {
config.param = MIRACAST_MCHAN_ALGO;
}
ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
- if (ret)
+ if (ret) {
goto resume;
+ }
/* setting mchan_bw to platform specific value */
config.iovar = "mchan_bw";
config.param = MIRACAST_MCHAN_BW;
ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
- if (ret)
+ if (ret) {
goto resume;
+ }
/* setting apmdu to platform specific value */
config.iovar = "ampdu_mpdu";
config.param = MIRACAST_AMPDU_SIZE;
ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
- if (ret)
+ if (ret) {
goto resume;
+ }
/* FALLTROUGH */
/* Source mode shares most configurations with sink mode.
* Fall through here to avoid code duplication
config.iovar = "roam_off";
config.param = 1;
ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
- if (ret)
+ if (ret) {
goto resume;
+ }
+
/* tunr off pm */
- val = 0;
- config.iovar = NULL;
- config.ioctl = WLC_GET_PM;
- config.arg = &val;
- config.len = sizeof(int);
- ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
- if (ret)
+ ret = wldev_ioctl(dev, WLC_GET_PM, &val, sizeof(val), false);
+ if (ret) {
goto resume;
+ }
+ if (val != PM_OFF) {
+ val = PM_OFF;
+ config.iovar = NULL;
+ config.ioctl = WLC_GET_PM;
+ config.arg = &val;
+ config.len = sizeof(int);
+ ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
+ if (ret) {
+ goto resume;
+ }
+ }
break;
case MIRACAST_MODE_OFF:
default:
return ret;
}
-#ifdef WLAIBSS
-static int wl_android_set_ibss_txfail_event(struct net_device *dev, char *command, int total_len)
-{
- int err = 0;
- int retry = 0;
- int pid = 0;
- aibss_txfail_config_t txfail_config = {0, 0, 0, 0};
- char smbuf[WLC_IOCTL_SMLEN];
-
- if (sscanf(command, CMD_SETIBSSTXFAILEVENT " %d %d", &retry, &pid) <= 0) {
- ANDROID_ERROR(("Failed to get Parameter from : %s\n", command));
- return -1;
- }
-
- /* set pid, and if the event was happened, let's send a notification through netlink */
- wl_cfg80211_set_txfail_pid(pid);
-
- /* If retry value is 0, it disables the functionality for TX Fail. */
- if (retry > 0) {
- txfail_config.max_tx_retry = retry;
- txfail_config.bcn_timeout = 0; /* 0 : disable tx fail from beacon */
- }
- txfail_config.version = AIBSS_TXFAIL_CONFIG_VER_0;
- txfail_config.len = sizeof(txfail_config);
-
- err = wldev_iovar_setbuf(dev, "aibss_txfail_config", (void *) &txfail_config,
- sizeof(aibss_txfail_config_t), smbuf, WLC_IOCTL_SMLEN, NULL);
- ANDROID_TRACE(("retry=%d, pid=%d, err=%d\n", retry, pid, err));
-
- return ((err == 0)?total_len:err);
-}
-
-static int wl_android_get_ibss_peer_info(struct net_device *dev, char *command,
- int total_len, bool bAll)
-{
- int error;
- int bytes_written = 0;
- void *buf = NULL;
- bss_peer_list_info_t peer_list_info;
- bss_peer_info_t *peer_info;
- int i;
- bool found = false;
- struct ether_addr mac_ea;
-
- ANDROID_TRACE(("get ibss peer info(%s)\n", bAll?"true":"false"));
-
- if (!bAll) {
- if (sscanf (command, "GETIBSSPEERINFO %02x:%02x:%02x:%02x:%02x:%02x",
- (unsigned int *)&mac_ea.octet[0], (unsigned int *)&mac_ea.octet[1],
- (unsigned int *)&mac_ea.octet[2], (unsigned int *)&mac_ea.octet[3],
- (unsigned int *)&mac_ea.octet[4], (unsigned int *)&mac_ea.octet[5]) != 6) {
- ANDROID_TRACE(("invalid MAC address\n"));
- return -1;
- }
- }
-
- if ((buf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL)) == NULL) {
- ANDROID_ERROR(("kmalloc failed\n"));
- return -1;
- }
-
- error = wldev_iovar_getbuf(dev, "bss_peer_info", NULL, 0, buf, WLC_IOCTL_MAXLEN, NULL);
- if (unlikely(error)) {
- ANDROID_ERROR(("could not get ibss peer info (%d)\n", error));
- kfree(buf);
- return -1;
- }
-
- memcpy(&peer_list_info, buf, sizeof(peer_list_info));
- peer_list_info.version = htod16(peer_list_info.version);
- peer_list_info.bss_peer_info_len = htod16(peer_list_info.bss_peer_info_len);
- peer_list_info.count = htod32(peer_list_info.count);
-
- ANDROID_TRACE(("ver:%d, len:%d, count:%d\n", peer_list_info.version,
- peer_list_info.bss_peer_info_len, peer_list_info.count));
-
- if (peer_list_info.count > 0) {
- if (bAll)
- bytes_written += sprintf(&command[bytes_written], "%u ",
- peer_list_info.count);
-
- peer_info = (bss_peer_info_t *) ((void *)buf + BSS_PEER_LIST_INFO_FIXED_LEN);
-
-
- for (i = 0; i < peer_list_info.count; i++) {
-
- ANDROID_TRACE(("index:%d rssi:%d, tx:%u, rx:%u\n", i, peer_info->rssi,
- peer_info->tx_rate, peer_info->rx_rate));
-
- if (!bAll &&
- memcmp(&mac_ea, &peer_info->ea, sizeof(struct ether_addr)) == 0) {
- found = true;
- }
-
- if (bAll || found) {
- bytes_written += sprintf(&command[bytes_written], MACF,
- ETHER_TO_MACF(peer_info->ea));
- bytes_written += sprintf(&command[bytes_written], " %u %d ",
- peer_info->tx_rate/1000, peer_info->rssi);
- }
-
- if (found)
- break;
-
- peer_info = (bss_peer_info_t *)((void *)peer_info+sizeof(bss_peer_info_t));
- }
- }
- else {
- ANDROID_ERROR(("could not get ibss peer info : no item\n"));
- }
- bytes_written += sprintf(&command[bytes_written], "%s", "\0");
-
- ANDROID_TRACE(("command(%u):%s\n", total_len, command));
- ANDROID_TRACE(("bytes_written:%d\n", bytes_written));
-
- kfree(buf);
- return bytes_written;
-}
-
-int wl_android_set_ibss_routetable(struct net_device *dev, char *command, int total_len)
-{
-
- char *pcmd = command;
- char *str = NULL;
-
- ibss_route_tbl_t *route_tbl = NULL;
- char *ioctl_buf = NULL;
- u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
- s32 err = BCME_OK;
- uint32 route_tbl_len;
- uint32 entries;
- char *endptr;
- uint32 i = 0;
- struct ipv4_addr dipaddr;
- struct ether_addr ea;
-
- route_tbl_len = sizeof(ibss_route_tbl_t) +
- (MAX_IBSS_ROUTE_TBL_ENTRY - 1) * sizeof(ibss_route_entry_t);
- route_tbl = (ibss_route_tbl_t *)kzalloc(route_tbl_len, kflags);
- if (!route_tbl) {
- ANDROID_ERROR(("Route TBL alloc failed\n"));
- return -ENOMEM;
- }
- ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
- if (!ioctl_buf) {
- ANDROID_ERROR(("ioctl memory alloc failed\n"));
- if (route_tbl) {
- kfree(route_tbl);
- }
- return -ENOMEM;
- }
- memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN);
-
- /* drop command */
- str = bcmstrtok(&pcmd, " ", NULL);
-
- /* get count */
- str = bcmstrtok(&pcmd, " ", NULL);
- if (!str) {
- ANDROID_ERROR(("Invalid number parameter %s\n", str));
- err = -EINVAL;
- goto exit;
- }
- entries = bcm_strtoul(str, &endptr, 0);
- if (*endptr != '\0') {
- ANDROID_ERROR(("Invalid number parameter %s\n", str));
- err = -EINVAL;
- goto exit;
- }
- ANDROID_INFO(("Routing table count:%d\n", entries));
- route_tbl->num_entry = entries;
-
- for (i = 0; i < entries; i++) {
- str = bcmstrtok(&pcmd, " ", NULL);
- if (!str || !bcm_atoipv4(str, &dipaddr)) {
- ANDROID_ERROR(("Invalid ip string %s\n", str));
- err = -EINVAL;
- goto exit;
- }
-
-
- str = bcmstrtok(&pcmd, " ", NULL);
- if (!str || !bcm_ether_atoe(str, &ea)) {
- ANDROID_ERROR(("Invalid ethernet string %s\n", str));
- err = -EINVAL;
- goto exit;
- }
- bcopy(&dipaddr, &route_tbl->route_entry[i].ipv4_addr, IPV4_ADDR_LEN);
- bcopy(&ea, &route_tbl->route_entry[i].nexthop, ETHER_ADDR_LEN);
- }
-
- route_tbl_len = sizeof(ibss_route_tbl_t) +
- ((!entries?0:(entries - 1)) * sizeof(ibss_route_entry_t));
- err = wldev_iovar_setbuf(dev, "ibss_route_tbl",
- route_tbl, route_tbl_len, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
- if (err != BCME_OK) {
- ANDROID_ERROR(("Fail to set iovar %d\n", err));
- err = -EINVAL;
- }
-
-exit:
- if (route_tbl)
- kfree(route_tbl);
- if (ioctl_buf)
- kfree(ioctl_buf);
- return err;
-
-}
-
-int
-wl_android_set_ibss_ampdu(struct net_device *dev, char *command, int total_len)
-{
- char *pcmd = command;
- char *str = NULL, *endptr = NULL;
- struct ampdu_aggr aggr;
- char smbuf[WLC_IOCTL_SMLEN];
- int idx;
- int err = 0;
- int wme_AC2PRIO[AC_COUNT][2] = {
- {PRIO_8021D_VO, PRIO_8021D_NC}, /* AC_VO - 3 */
- {PRIO_8021D_CL, PRIO_8021D_VI}, /* AC_VI - 2 */
- {PRIO_8021D_BK, PRIO_8021D_NONE}, /* AC_BK - 1 */
- {PRIO_8021D_BE, PRIO_8021D_EE}}; /* AC_BE - 0 */
-
- ANDROID_TRACE(("set ibss ampdu:%s\n", command));
-
- memset(&aggr, 0, sizeof(aggr));
- /* Cofigure all priorities */
- aggr.conf_TID_bmap = NBITMASK(NUMPRIO);
-
- /* acquire parameters */
- /* drop command */
- str = bcmstrtok(&pcmd, " ", NULL);
-
- for (idx = 0; idx < AC_COUNT; idx++) {
- bool on;
- str = bcmstrtok(&pcmd, " ", NULL);
- if (!str) {
- ANDROID_ERROR(("Invalid parameter : %s\n", pcmd));
- return -EINVAL;
- }
- on = bcm_strtoul(str, &endptr, 0) ? TRUE : FALSE;
- if (*endptr != '\0') {
- ANDROID_ERROR(("Invalid number format %s\n", str));
- return -EINVAL;
- }
- if (on) {
- setbit(&aggr.enab_TID_bmap, wme_AC2PRIO[idx][0]);
- setbit(&aggr.enab_TID_bmap, wme_AC2PRIO[idx][1]);
- }
- }
-
- err = wldev_iovar_setbuf(dev, "ampdu_txaggr", (void *)&aggr,
- sizeof(aggr), smbuf, WLC_IOCTL_SMLEN, NULL);
-
- return ((err == 0) ? total_len : err);
-}
-
-int wl_android_set_ibss_antenna(struct net_device *dev, char *command, int total_len)
-{
- char *pcmd = command;
- char *str = NULL;
- int txchain, rxchain;
- int err = 0;
-
- ANDROID_TRACE(("set ibss antenna:%s\n", command));
-
- /* acquire parameters */
- /* drop command */
- str = bcmstrtok(&pcmd, " ", NULL);
-
- /* TX chain */
- str = bcmstrtok(&pcmd, " ", NULL);
- if (!str) {
- ANDROID_ERROR(("Invalid parameter : %s\n", pcmd));
- return -EINVAL;
- }
- txchain = bcm_atoi(str);
-
- /* RX chain */
- str = bcmstrtok(&pcmd, " ", NULL);
- if (!str) {
- ANDROID_ERROR(("Invalid parameter : %s\n", pcmd));
- return -EINVAL;
- }
- rxchain = bcm_atoi(str);
-
- err = wldev_iovar_setint(dev, "txchain", txchain);
- if (err != 0)
- return err;
- err = wldev_iovar_setint(dev, "rxchain", rxchain);
- return ((err == 0)?total_len:err);
-}
-#endif /* WLAIBSS */
int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len)
{
return res;
}
-
static const char *
get_string_by_separator(char *result, int result_len, const char *src, char separator)
{
*result++ = *src++;
}
*result = 0;
- if (*src == separator)
+ if (*src == separator) {
++src;
+ }
return src;
}
+#ifdef WL_CFG80211
int
wl_android_set_roam_offload_bssid_list(struct net_device *dev, const char *cmd)
{
roamoffl_bssid_list_t *bssid_list;
const char *str = cmd;
char *ioctl_buf;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp();
str = get_string_by_separator(sbuf, 32, str, ',');
cnt = bcm_atoi(sbuf);
cnt = MIN(cnt, MAX_ROAMOFFL_BSSID_NUM);
- size = sizeof(int) + sizeof(struct ether_addr) * cnt;
+
+ if ((cnt > 0) &&
+ (((dhdp->op_mode & DHD_FLAG_STA_MODE) && (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
+ FALSE)) {
+ ANDROID_ERROR(("Can't set ROAMOFFL_BSSID when enabled STA-SoftAP or WES\n"));
+ return -EINVAL;
+ }
+
+ size = sizeof(int32) + sizeof(struct ether_addr) * cnt;
ANDROID_ERROR(("ROAM OFFLOAD BSSID LIST %d BSSIDs, size %d\n", cnt, size));
bssid_list = kmalloc(size, GFP_KERNEL);
if (bssid_list == NULL) {
for (i = 0; i < cnt; i++) {
str = get_string_by_separator(sbuf, 32, str, ',');
- if (bcm_ether_atoe(sbuf, &bssid_list->bssid[i]) == 0) {
- ANDROID_ERROR(("%s: Invalid station MAC Address!!!\n", __FUNCTION__));
- kfree(bssid_list);
- kfree(ioctl_buf);
- return -1;
- }
+ bcm_ether_atoe(sbuf, &bssid_list->bssid[i]);
}
- bssid_list->cnt = cnt;
+ bssid_list->cnt = (int32)cnt;
err = wldev_iovar_setbuf(dev, "roamoffl_bssid_list",
- bssid_list, size, ioctl_buf, ioctl_buf_len, NULL);
+ bssid_list, size, ioctl_buf, ioctl_buf_len, NULL);
kfree(bssid_list);
kfree(ioctl_buf);
return err;
}
+#endif
#ifdef P2PRESP_WFDIE_SRC
static int wl_android_get_wfdie_resp(struct net_device *dev, char *command, int total_len)
}
#endif /* P2PRESP_WFDIE_SRC */
-static int wl_android_get_link_status(struct net_device *dev, char *command,
- int total_len)
-{
- int bytes_written, error, result = 0, single_stream, stf = -1, i, nss = 0, mcs_map;
- uint32 rspec;
- uint encode, rate, txexp;
- struct wl_bss_info *bi;
- int datalen = sizeof(uint32) + sizeof(wl_bss_info_t);
- char buf[datalen];
-
- /* get BSS information */
- *(u32 *) buf = htod32(datalen);
- error = wldev_ioctl(dev, WLC_GET_BSS_INFO, (void *)buf, datalen, false);
- if (unlikely(error)) {
- ANDROID_ERROR(("Could not get bss info %d\n", error));
- return -1;
- }
-
- bi = (struct wl_bss_info *) (buf + sizeof(uint32));
-
- for (i = 0; i < ETHER_ADDR_LEN; i++) {
- if (bi->BSSID.octet[i] > 0) {
- break;
- }
- }
-
- if (i == ETHER_ADDR_LEN) {
- ANDROID_TRACE(("No BSSID\n"));
- return -1;
- }
-
- /* check VHT capability at beacon */
- if (bi->vht_cap) {
- if (CHSPEC_IS5G(bi->chanspec)) {
- result |= WL_ANDROID_LINK_AP_VHT_SUPPORT;
- }
- }
-
- /* get a rspec (radio spectrum) rate */
- error = wldev_iovar_getint(dev, "nrate", &rspec);
- if (unlikely(error) || rspec == 0) {
- ANDROID_ERROR(("get link status error (%d)\n", error));
- return -1;
- }
-
- encode = (rspec & WL_RSPEC_ENCODING_MASK);
- rate = (rspec & WL_RSPEC_RATE_MASK);
- txexp = (rspec & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT;
-
- switch (encode) {
- case WL_RSPEC_ENCODE_HT:
- /* check Rx MCS Map for HT */
- for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) {
- int8 bitmap = 0xFF;
- if (i == MAX_STREAMS_SUPPORTED-1) {
- bitmap = 0x7F;
- }
- if (bi->basic_mcs[i] & bitmap) {
- nss++;
- }
- }
- break;
- case WL_RSPEC_ENCODE_VHT:
- /* check Rx MCS Map for VHT */
- for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) {
- mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap));
- if (mcs_map != VHT_CAP_MCS_MAP_NONE) {
- nss++;
- }
- }
- break;
- }
+#ifdef BT_WIFI_HANDOVER
+static int
+wl_tbow_teardown(struct net_device *dev, char *command, int total_len)
+{
+ int err = BCME_OK;
+ char buf[WLC_IOCTL_SMLEN];
+ tbow_setup_netinfo_t netinfo;
+ memset(&netinfo, 0, sizeof(netinfo));
+ netinfo.opmode = TBOW_HO_MODE_TEARDOWN;
- /* check MIMO capability with nss in beacon */
- if (nss > 1) {
- result |= WL_ANDROID_LINK_AP_MIMO_SUPPORT;
+ err = wldev_iovar_setbuf_bsscfg(dev, "tbow_doho", &netinfo,
+ sizeof(tbow_setup_netinfo_t), buf, WLC_IOCTL_SMLEN, 0, NULL);
+ if (err < 0) {
+ ANDROID_ERROR(("tbow_doho iovar error %d\n", err));
+ return err;
}
+ return err;
+}
+#endif /* BT_WIFI_HANOVER */
+
+#ifdef SET_RPS_CPUS
+static int
+wl_android_set_rps_cpus(struct net_device *dev, char *command, int total_len)
+{
+ int error, enable;
- single_stream = (encode == WL_RSPEC_ENCODE_RATE) ||
- ((encode == WL_RSPEC_ENCODE_HT) && rate < 8) ||
- ((encode == WL_RSPEC_ENCODE_VHT) &&
- ((rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT) == 1);
+ enable = command[strlen(CMD_RPSMODE) + 1] - '0';
+ error = dhd_rps_cpus_enable(dev, enable);
- if (txexp == 0) {
- if ((rspec & WL_RSPEC_STBC) && single_stream) {
- stf = OLD_NRATE_STF_STBC;
+#if defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE) && defined(WL_CFG80211)
+ if (!error) {
+ void *dhdp = wl_cfg80211_get_dhdp();
+ if (enable) {
+ ANDROID_TRACE(("%s : set ack suppress. TCPACK_SUP_HOLD.\n", __FUNCTION__));
+ dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD);
} else {
- stf = (single_stream) ? OLD_NRATE_STF_SISO : OLD_NRATE_STF_SDM;
+ ANDROID_TRACE(("%s : clear ack suppress.\n", __FUNCTION__));
+ dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
}
- } else if (txexp == 1 && single_stream) {
- stf = OLD_NRATE_STF_CDD;
}
+#endif /* DHDTCPACK_SUPPRESS && BCMPCIE && WL_CFG80211 */
- /* check 11ac (VHT) */
- if (encode == WL_RSPEC_ENCODE_VHT) {
- if (CHSPEC_IS5G(bi->chanspec)) {
- result |= WL_ANDROID_LINK_VHT;
- }
- }
+ return error;
+}
+#endif /* SET_RPS_CPUS */
+#ifdef P2P_LISTEN_OFFLOADING
+s32
+wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len)
+{
+ int ret = 0;
- /* check MIMO */
- if (result & WL_ANDROID_LINK_AP_MIMO_SUPPORT) {
- switch (stf) {
- case OLD_NRATE_STF_SISO:
- break;
- case OLD_NRATE_STF_CDD:
- case OLD_NRATE_STF_STBC:
- result |= WL_ANDROID_LINK_MIMO;
- break;
- case OLD_NRATE_STF_SDM:
- if (!single_stream) {
- result |= WL_ANDROID_LINK_MIMO;
- }
- break;
- }
- }
+ ANDROID_ERROR(("Entry cmd:%s arg_len:%d \n", cmd, len));
- ANDROID_TRACE(("%s:result=%d, stf=%d, single_stream=%d, mcs map=%d\n",
- __FUNCTION__, result, stf, single_stream, nss));
+ if (strncmp(cmd, "P2P_LO_START", strlen("P2P_LO_START")) == 0) {
+ ret = wl_cfg80211_p2plo_listen_start(dev, buf, len);
+ } else if (strncmp(cmd, "P2P_LO_STOP", strlen("P2P_LO_STOP")) == 0) {
+ ret = wl_cfg80211_p2plo_listen_stop(dev);
+ } else {
+ ANDROID_ERROR(("Request for Unsupported CMD:%s \n", buf));
+ ret = -EINVAL;
+ }
+ return ret;
+}
+#endif /* P2P_LISTEN_OFFLOADING */
- bytes_written = sprintf(command, "%s %d", CMD_GET_LINK_STATUS, result);
+#ifdef WL_CFG80211
+int
+wl_android_murx_bfe_cap(struct net_device *dev, int val)
+{
+ int err = BCME_OK;
+ int iface_count = wl_cfg80211_iface_count();
- return bytes_written;
+ if (iface_count > 1) {
+ ANDROID_ERROR(("%s: murx_bfe_cap change is not allowed when "
+ "there are multiple interfaces\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ /* Now there is only single interface */
+ err = wldev_iovar_setint(dev, "murx_bfe_cap", val);
+ if (err) {
+ ANDROID_ERROR(("%s: Failed to set murx_bfe_cap IOVAR to %d,"
+ "error %d\n", __FUNCTION__, val, err));
+ err = -EINVAL;
+ }
+ return err;
}
+#endif
int
wl_android_get_channel(
}
if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
- bytes_written = wl_android_wifi_off(net);
+ bytes_written = wl_android_wifi_off(net, FALSE);
}
+#ifdef WL_CFG80211
else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
- /* TBD: SCAN-ACTIVE */
+ wl_cfg80211_set_passive_scan(net, command);
}
else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
- /* TBD: SCAN-PASSIVE */
+ wl_cfg80211_set_passive_scan(net, command);
}
+#endif
else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
}
int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
}
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
- else if (strnicmp(command, CMD_PKT_FILTER_MODE, strlen(CMD_PKT_FILTER_MODE)) == 0) {
- dhd_set_packet_filter_mode(net, &command[strlen(CMD_PKT_FILTER_MODE) + 1]);
- } else if (strnicmp(command, CMD_PKT_FILTER_PORTS, strlen(CMD_PKT_FILTER_PORTS)) == 0) {
- bytes_written = dhd_set_packet_filter_ports(net,
- &command[strlen(CMD_PKT_FILTER_PORTS) + 1]);
- ret = bytes_written;
- }
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
#endif /* PKT_FILTER_SUPPORT */
else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
/* TBD: BTCOEXSCAN-START */
}
else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
-#ifdef WL_HOST_BAND_MGMT
- s32 ret = 0;
- if ((ret = wl_cfg80211_set_band(net, band)) < 0) {
- if (ret == BCME_UNSUPPORTED) {
- /* If roam_var is unsupported, fallback to the original method */
- ANDROID_ERROR(("WL_HOST_BAND_MGMT defined, "
- "but roam_band iovar unsupported in the firmware\n"));
- } else {
- bytes_written = -1;
- goto exit;
- }
+ if (dhd_conf_get_band(dhd_get_pub(net)) != WLC_BAND_AUTO) {
+ printf("%s: Band is fixed in config.txt\n", __FUNCTION__);
+ goto exit;
}
- if ((band == WLC_BAND_AUTO) || (ret == BCME_UNSUPPORTED))
- bytes_written = wldev_set_band(net, band);
-#else
bytes_written = wldev_set_band(net, band);
-#endif /* WL_HOST_BAND_MGMT */
}
else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
#ifdef WL_CFG80211
/* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */
else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+ /*
+ * Usage examples:
+ * DRIVER COUNTRY US
+ * DRIVER COUNTRY US/7
+ */
char *country_code = command + strlen(CMD_COUNTRY) + 1;
-#ifdef CUSTOMER_HW5
- /* Customer_hw5 want to keep connections */
- bytes_written = wldev_set_country(net, country_code, true, false);
-#else
- bytes_written = wldev_set_country(net, country_code, true, true);
-#endif
+ char *rev_info_delim = country_code + 2; /* 2 bytes of country code */
+ int revinfo = -1;
+ if ((rev_info_delim) &&
+ (strnicmp(rev_info_delim, CMD_COUNTRY_DELIMITER,
+ strlen(CMD_COUNTRY_DELIMITER)) == 0) &&
+ (rev_info_delim + 1)) {
+ revinfo = bcm_atoi(rev_info_delim + 1);
+ }
+ bytes_written = wldev_set_country(net, country_code, true, true, revinfo);
+#ifdef FCC_PWR_LIMIT_2G
+ if (wldev_iovar_setint(net, "fccpwrlimit2g", FALSE)) {
+ ANDROID_ERROR(("%s: fccpwrlimit2g deactivation is failed\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: fccpwrlimit2g is deactivated\n", __FUNCTION__));
+ }
+#endif /* FCC_PWR_LIMIT_2G */
}
#endif /* WL_CFG80211 */
-
+ else if (strnicmp(command, CMD_SET_CSA, strlen(CMD_SET_CSA)) == 0) {
+ bytes_written = wl_android_set_csa(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_80211_MODE, strlen(CMD_80211_MODE)) == 0) {
+ bytes_written = wl_android_get_80211_mode(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_CHANSPEC, strlen(CMD_CHANSPEC)) == 0) {
+ bytes_written = wl_android_get_chanspec(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_DATARATE, strlen(CMD_DATARATE)) == 0) {
+ bytes_written = wl_android_get_datarate(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_ASSOC_CLIENTS, strlen(CMD_ASSOC_CLIENTS)) == 0) {
+ bytes_written = wl_android_get_assoclist(net, command, priv_cmd.total_len);
+ }
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef WLTDLS
+ else if (strnicmp(command, CMD_TDLS_RESET, strlen(CMD_TDLS_RESET)) == 0) {
+ bytes_written = wl_android_tdls_reset(net);
+ }
+#endif /* WLTDLS */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
#ifdef PNO_SUPPORT
else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip,
priv_cmd.total_len - skip);
}
-#ifdef WL_SDO
- else if (strnicmp(command, CMD_P2P_SD_OFFLOAD, strlen(CMD_P2P_SD_OFFLOAD)) == 0) {
- u8 *buf = command;
- u8 *cmd_id = NULL;
- int len;
-
- cmd_id = strsep((char **)&buf, " ");
- /* if buf == NULL, means no arg */
- if (buf == NULL)
- len = 0;
- else
- len = strlen(buf);
-
- bytes_written = wl_cfg80211_sd_offload(net, cmd_id, buf, len);
+#ifdef P2P_LISTEN_OFFLOADING
+ else if (strnicmp(command, CMD_P2P_LISTEN_OFFLOAD, strlen(CMD_P2P_LISTEN_OFFLOAD)) == 0) {
+ u8 *sub_command = strchr(command, ' ');
+ bytes_written = wl_cfg80211_p2plo_offload(net, command, sub_command,
+ sub_command ? strlen(sub_command) : 0);
}
-#endif /* WL_SDO */
+#endif /* P2P_LISTEN_OFFLOADING */
#ifdef WL_NAN
else if (strnicmp(command, CMD_NAN, strlen(CMD_NAN)) == 0) {
bytes_written = wl_cfg80211_nan_cmd_handler(net, command,
bytes_written = wl_cfg80211_set_p2p_ps(net, command + skip,
priv_cmd.total_len - skip);
}
+ else if (strnicmp(command, CMD_P2P_ECSA, strlen(CMD_P2P_ECSA)) == 0) {
+ int skip = strlen(CMD_P2P_ECSA) + 1;
+ bytes_written = wl_cfg80211_set_p2p_ecsa(net, command + skip,
+ priv_cmd.total_len - skip);
+ }
#ifdef WL_CFG80211
else if (strnicmp(command, CMD_SET_AP_WPS_P2P_IE,
strlen(CMD_SET_AP_WPS_P2P_IE)) == 0) {
bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
priv_cmd.total_len - skip, *(command + skip - 2) - '0');
}
-#ifdef WLFBT
- else if (strnicmp(command, CMD_GET_FTKEY, strlen(CMD_GET_FTKEY)) == 0) {
- wl_cfg80211_get_fbt_key(command);
- bytes_written = FBT_KEYLEN;
- }
-#endif /* WLFBT */
#endif /* WL_CFG80211 */
- else if (strnicmp(command, CMD_OKC_SET_PMK, strlen(CMD_OKC_SET_PMK)) == 0)
- bytes_written = wl_android_set_pmk(net, command, priv_cmd.total_len);
- else if (strnicmp(command, CMD_OKC_ENABLE, strlen(CMD_OKC_ENABLE)) == 0)
- bytes_written = wl_android_okc_enable(net, command, priv_cmd.total_len);
-#ifdef BCMCCX
- else if (strnicmp(command, CMD_GETCCKM_RN, strlen(CMD_GETCCKM_RN)) == 0) {
- bytes_written = wl_android_get_cckm_rn(net, command);
- }
- else if (strnicmp(command, CMD_SETCCKM_KRK, strlen(CMD_SETCCKM_KRK)) == 0) {
- bytes_written = wl_android_set_cckm_krk(net, command);
- }
- else if (strnicmp(command, CMD_GET_ASSOC_RES_IES, strlen(CMD_GET_ASSOC_RES_IES)) == 0) {
- bytes_written = wl_android_get_assoc_res_ies(net, command);
- }
-#endif /* BCMCCX */
#if defined(WL_SUPPORT_AUTO_CHANNEL)
else if (strnicmp(command, CMD_GET_BEST_CHANNELS,
strlen(CMD_GET_BEST_CHANNELS)) == 0) {
priv_cmd.total_len);
}
#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+ else if (strnicmp(command, CMD_SET_HAPD_AUTO_CHANNEL,
+ strlen(CMD_SET_HAPD_AUTO_CHANNEL)) == 0) {
+ int skip = strlen(CMD_SET_HAPD_AUTO_CHANNEL) + 1;
+ bytes_written = wl_android_set_auto_channel(net, (const char*)command+skip, command,
+ priv_cmd.total_len);
+ }
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef SUPPORT_SET_LPC
+ else if (strnicmp(command, CMD_HAPD_LPC_ENABLED,
+ strlen(CMD_HAPD_LPC_ENABLED)) == 0) {
+ int skip = strlen(CMD_HAPD_LPC_ENABLED) + 3;
+ wl_android_set_lpc(net, (const char*)command+skip);
+ }
+#endif /* SUPPORT_SET_LPC */
+#ifdef SUPPORT_TRIGGER_HANG_EVENT
+ else if (strnicmp(command, CMD_TEST_FORCE_HANG,
+ strlen(CMD_TEST_FORCE_HANG)) == 0) {
+ int skip = strlen(CMD_TEST_FORCE_HANG) + 1;
+ net_os_send_hang_message_reason(net, (const char*)command+skip);
+ }
+#endif /* SUPPORT_TRIGGER_HANG_EVENT */
+ else if (strnicmp(command, CMD_CHANGE_RL, strlen(CMD_CHANGE_RL)) == 0)
+ bytes_written = wl_android_ch_res_rl(net, true);
+ else if (strnicmp(command, CMD_RESTORE_RL, strlen(CMD_RESTORE_RL)) == 0)
+ bytes_written = wl_android_ch_res_rl(net, false);
+ else if (strnicmp(command, CMD_SET_RMC_ENABLE, strlen(CMD_SET_RMC_ENABLE)) == 0) {
+ int rmc_enable = *(command + strlen(CMD_SET_RMC_ENABLE) + 1) - '0';
+ bytes_written = wl_android_rmc_enable(net, rmc_enable);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_TXRATE, strlen(CMD_SET_RMC_TXRATE)) == 0) {
+ int rmc_txrate;
+ sscanf(command, "%*s %10d", &rmc_txrate);
+ bytes_written = wldev_iovar_setint(net, "rmc_txrate", rmc_txrate * 2);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_ACTPERIOD, strlen(CMD_SET_RMC_ACTPERIOD)) == 0) {
+ int actperiod;
+ sscanf(command, "%*s %10d", &actperiod);
+ bytes_written = wldev_iovar_setint(net, "rmc_actf_time", actperiod);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_IDLEPERIOD, strlen(CMD_SET_RMC_IDLEPERIOD)) == 0) {
+ int acktimeout;
+ sscanf(command, "%*s %10d", &acktimeout);
+ acktimeout *= 1000;
+ bytes_written = wldev_iovar_setint(net, "rmc_acktmo", acktimeout);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_LEADER, strlen(CMD_SET_RMC_LEADER)) == 0) {
+ int skip = strlen(CMD_SET_RMC_LEADER) + 1;
+ bytes_written = wl_android_rmc_set_leader(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_EVENT,
+ strlen(CMD_SET_RMC_EVENT)) == 0)
+ bytes_written = wl_android_set_rmc_event(net, command, priv_cmd.total_len);
+ else if (strnicmp(command, CMD_GET_SCSCAN, strlen(CMD_GET_SCSCAN)) == 0) {
+ bytes_written = wl_android_get_singlecore_scan(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SET_SCSCAN, strlen(CMD_SET_SCSCAN)) == 0) {
+ bytes_written = wl_android_set_singlecore_scan(net, command, priv_cmd.total_len);
+ }
+#ifdef TEST_TX_POWER_CONTROL
+ else if (strnicmp(command, CMD_TEST_SET_TX_POWER,
+ strlen(CMD_TEST_SET_TX_POWER)) == 0) {
+ int skip = strlen(CMD_TEST_SET_TX_POWER) + 1;
+ wl_android_set_tx_power(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_TEST_GET_TX_POWER,
+ strlen(CMD_TEST_GET_TX_POWER)) == 0) {
+ wl_android_get_tx_power(net, command, priv_cmd.total_len);
+ }
+#endif /* TEST_TX_POWER_CONTROL */
+ else if (strnicmp(command, CMD_SARLIMIT_TX_CONTROL,
+ strlen(CMD_SARLIMIT_TX_CONTROL)) == 0) {
+ int skip = strlen(CMD_SARLIMIT_TX_CONTROL) + 1;
+ wl_android_set_sarlimit_txctrl(net, (const char*)command+skip);
+ }
+#ifdef IPV6_NDO_SUPPORT
+ else if (strnicmp(command, CMD_NDRA_LIMIT, strlen(CMD_NDRA_LIMIT)) == 0) {
+ bytes_written = wl_android_nd_ra_limit(net, command, priv_cmd.total_len);
+ }
+#endif /* IPV6_NDO_SUPPORT */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
else if (strnicmp(command, CMD_HAPD_MAC_FILTER, strlen(CMD_HAPD_MAC_FILTER)) == 0) {
int skip = strlen(CMD_HAPD_MAC_FILTER) + 1;
wl_android_set_mac_address_filter(net, (const char*)command+skip);
#ifdef WL_CFG80211
else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0)
bytes_written = wl_android_set_miracast(net, command, priv_cmd.total_len);
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
- else if (strnicmp(command, CMD_SETMIRACAST, strlen(CMD_SETMIRACAST)) == 0)
- bytes_written = wldev_miracast_tuning(net, command, priv_cmd.total_len);
- else if (strnicmp(command, CMD_ASSOCRESPIE, strlen(CMD_ASSOCRESPIE)) == 0)
- bytes_written = wldev_get_assoc_resp_ie(net, command, priv_cmd.total_len);
- else if (strnicmp(command, CMD_RXRATESTATS, strlen(CMD_RXRATESTATS)) == 0)
- bytes_written = wldev_get_rx_rate_stats(net, command, priv_cmd.total_len);
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+#ifdef WL11ULB
+ else if (strnicmp(command, CMD_ULB_MODE, strlen(CMD_ULB_MODE)) == 0)
+ bytes_written = wl_android_set_ulb_mode(net, command, priv_cmd.total_len);
+ else if (strnicmp(command, CMD_ULB_BW, strlen(CMD_ULB_BW)) == 0)
+ bytes_written = wl_android_set_ulb_bw(net, command, priv_cmd.total_len);
+#endif /* WL11ULB */
else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0)
bytes_written = wl_android_set_ibss_beacon_ouidata(net,
command, priv_cmd.total_len);
#endif
-#ifdef WLAIBSS
- else if (strnicmp(command, CMD_SETIBSSTXFAILEVENT,
- strlen(CMD_SETIBSSTXFAILEVENT)) == 0)
- bytes_written = wl_android_set_ibss_txfail_event(net, command, priv_cmd.total_len);
- else if (strnicmp(command, CMD_GET_IBSS_PEER_INFO_ALL,
- strlen(CMD_GET_IBSS_PEER_INFO_ALL)) == 0)
- bytes_written = wl_android_get_ibss_peer_info(net, command, priv_cmd.total_len,
- TRUE);
- else if (strnicmp(command, CMD_GET_IBSS_PEER_INFO,
- strlen(CMD_GET_IBSS_PEER_INFO)) == 0)
- bytes_written = wl_android_get_ibss_peer_info(net, command, priv_cmd.total_len,
- FALSE);
- else if (strnicmp(command, CMD_SETIBSSROUTETABLE,
- strlen(CMD_SETIBSSROUTETABLE)) == 0)
- bytes_written = wl_android_set_ibss_routetable(net, command,
- priv_cmd.total_len);
- else if (strnicmp(command, CMD_SETIBSSAMPDU, strlen(CMD_SETIBSSAMPDU)) == 0)
- bytes_written = wl_android_set_ibss_ampdu(net, command, priv_cmd.total_len);
- else if (strnicmp(command, CMD_SETIBSSANTENNAMODE, strlen(CMD_SETIBSSANTENNAMODE)) == 0)
- bytes_written = wl_android_set_ibss_antenna(net, command, priv_cmd.total_len);
-#endif /* WLAIBSS */
else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
int skip = strlen(CMD_KEEP_ALIVE) + 1;
bytes_written = wl_keep_alive_set(net, command + skip, priv_cmd.total_len - skip);
command + strlen(CMD_ROAM_OFFLOAD_APLIST) + 1);
}
#endif
+#if defined(WL_VIRTUAL_APSTA)
+ else if (strnicmp(command, CMD_INTERFACE_CREATE, strlen(CMD_INTERFACE_CREATE)) == 0) {
+ char *name = (command + strlen(CMD_INTERFACE_CREATE) +1);
+ ANDROID_INFO(("Creating %s interface\n", name));
+ bytes_written = wl_cfg80211_interface_create(net, name);
+ }
+ else if (strnicmp(command, CMD_INTERFACE_DELETE, strlen(CMD_INTERFACE_DELETE)) == 0) {
+ char *name = (command + strlen(CMD_INTERFACE_DELETE) +1);
+ ANDROID_INFO(("Deleteing %s interface\n", name));
+ bytes_written = wl_cfg80211_interface_delete(net, name);
+ }
+#endif /* defined (WL_VIRTUAL_APSTA) */
#ifdef P2PRESP_WFDIE_SRC
else if (strnicmp(command, CMD_P2P_SET_WFDIE_RESP,
strlen(CMD_P2P_SET_WFDIE_RESP)) == 0) {
bytes_written = wl_android_get_wfdie_resp(net, command, priv_cmd.total_len);
}
#endif /* P2PRESP_WFDIE_SRC */
- else if (strnicmp(command, CMD_GET_LINK_STATUS, strlen(CMD_GET_LINK_STATUS)) == 0) {
- bytes_written = wl_android_get_link_status(net, command, priv_cmd.total_len);
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_DFS_AP_MOVE, strlen(CMD_DFS_AP_MOVE)) == 0) {
+ char *data = (command + strlen(CMD_DFS_AP_MOVE) +1);
+ bytes_written = wl_cfg80211_dfs_ap_move(net, data, command, priv_cmd.total_len);
}
-#ifdef CONNECTION_STATISTICS
- else if (strnicmp(command, CMD_GET_CONNECTION_STATS,
- strlen(CMD_GET_CONNECTION_STATS)) == 0) {
- bytes_written = wl_android_get_connection_stats(net, command,
- priv_cmd.total_len);
+#endif
+ else if (strnicmp(command, CMD_WBTEXT_ENABLE, strlen(CMD_WBTEXT_ENABLE)) == 0) {
+ bytes_written = wl_android_wbtext(net, command, priv_cmd.total_len);
+ }
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_WBTEXT_PROFILE_CONFIG,
+ strlen(CMD_WBTEXT_PROFILE_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_config(net, data, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_WEIGHT_CONFIG,
+ strlen(CMD_WBTEXT_WEIGHT_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_weight_config(net, data,
+ command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_TABLE_CONFIG,
+ strlen(CMD_WBTEXT_TABLE_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_table_config(net, data,
+ command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_DELTA_CONFIG,
+ strlen(CMD_WBTEXT_DELTA_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_DELTA_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_delta_config(net, data,
+ command, priv_cmd.total_len);
+ }
+#endif
+#ifdef SET_RPS_CPUS
+ else if (strnicmp(command, CMD_RPSMODE, strlen(CMD_RPSMODE)) == 0) {
+ bytes_written = wl_android_set_rps_cpus(net, command, priv_cmd.total_len);
+ }
+#endif /* SET_RPS_CPUS */
+#ifdef WLWFDS
+ else if (strnicmp(command, CMD_ADD_WFDS_HASH, strlen(CMD_ADD_WFDS_HASH)) == 0) {
+ bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 1);
+ }
+ else if (strnicmp(command, CMD_DEL_WFDS_HASH, strlen(CMD_DEL_WFDS_HASH)) == 0) {
+ bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 0);
+ }
+#endif /* WLWFDS */
+#ifdef BT_WIFI_HANDOVER
+ else if (strnicmp(command, CMD_TBOW_TEARDOWN, strlen(CMD_TBOW_TEARDOWN)) == 0) {
+ ret = wl_tbow_teardown(net, command, priv_cmd.total_len);
+ }
+#endif /* BT_WIFI_HANDOVER */
+#ifdef FCC_PWR_LIMIT_2G
+ else if (strnicmp(command, CMD_GET_FCC_PWR_LIMIT_2G,
+ strlen(CMD_GET_FCC_PWR_LIMIT_2G)) == 0) {
+ bytes_written = wl_android_get_fcc_pwr_limit_2g(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SET_FCC_PWR_LIMIT_2G,
+ strlen(CMD_SET_FCC_PWR_LIMIT_2G)) == 0) {
+ bytes_written = wl_android_set_fcc_pwr_limit_2g(net, command, priv_cmd.total_len);
+ }
+#endif /* FCC_PWR_LIMIT_2G */
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_MURX_BFE_CAP,
+ strlen(CMD_MURX_BFE_CAP)) == 0) {
+ uint val = *(command + strlen(CMD_MURX_BFE_CAP) + 1) - '0';
+ bytes_written = wl_android_murx_bfe_cap(net, val);
}
#endif
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ else if (strnicmp(command, CMD_GET_BSS_INFO, strlen(CMD_GET_BSS_INFO)) == 0) {
+ bytes_written = wl_cfg80211_get_bss_info(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_ASSOC_REJECT_INFO, strlen(CMD_GET_ASSOC_REJECT_INFO))
+ == 0) {
+ bytes_written = wl_cfg80211_get_connect_failed_status(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ else if (strnicmp(command, ENABLE_RANDOM_MAC, strlen(ENABLE_RANDOM_MAC)) == 0) {
+ bytes_written = wl_cfg80211_set_random_mac(net, TRUE);
+ } else if (strnicmp(command, DISABLE_RANDOM_MAC, strlen(DISABLE_RANDOM_MAC)) == 0) {
+ bytes_written = wl_cfg80211_set_random_mac(net, FALSE);
+ }
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+#ifdef DHD_LOG_DUMP
+ else if (strnicmp(command, CMD_NEW_DEBUG_PRINT_DUMP,
+ strlen(CMD_NEW_DEBUG_PRINT_DUMP)) == 0) {
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp();
+#ifdef DHD_TRACE_WAKE_LOCK
+ dhd_wk_lock_stats_dump(dhdp);
+#endif /* DHD_TRACE_WAKE_LOCK */
+ dhd_schedule_log_dump(dhdp);
+#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
+ dhd_bus_mem_dump(dhdp);
+#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */
+ }
+#endif /* DHD_LOG_DUMP */
else if(strnicmp(command, CMD_GET_CHANNEL, strlen(CMD_GET_CHANNEL)) == 0) {
bytes_written = wl_android_get_channel(net, command, priv_cmd.total_len);
}
ret = -EFAULT;
}
}
+#ifdef CONNECTION_STATISTICS
+ else if (strnicmp(command, CMD_GET_CONNECTION_STATS,
+ strlen(CMD_GET_CONNECTION_STATS)) == 0) {
+ bytes_written = wl_android_get_connection_stats(net, command,
+ priv_cmd.total_len);
+ }
+#endif
else {
ret = bytes_written;
}
bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
}
-#ifdef WL_GENL
- wl_genl_init();
-#endif
wl_netlink_init();
return ret;
int ret = 0;
struct io_cfg *cur, *q;
-#ifdef WL_GENL
- wl_genl_deinit();
-#endif /* WL_GENL */
wl_netlink_deinit();
list_for_each_entry_safe(cur, q, &miracast_resume_list, list) {
g_wifi_on = FALSE;
}
-#ifdef WL_GENL
-/* Generic Netlink Initializaiton */
-static int wl_genl_init(void)
-{
- int ret;
-
- ANDROID_TRACE(("GEN Netlink Init\n\n"));
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
- /* register new family */
- ret = genl_register_family(&wl_genl_family);
- if (ret != 0)
- goto failure;
-
- /* register functions (commands) of the new family */
- ret = genl_register_ops(&wl_genl_family, &wl_genl_ops);
- if (ret != 0) {
- ANDROID_ERROR(("register ops failed: %i\n", ret));
- genl_unregister_family(&wl_genl_family);
- goto failure;
- }
-
- ret = genl_register_mc_group(&wl_genl_family, &wl_genl_mcast);
-#else
- ret = genl_register_family_with_ops_groups(&wl_genl_family, wl_genl_ops, wl_genl_mcast);
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
- if (ret != 0) {
- ANDROID_ERROR(("register mc_group failed: %i\n", ret));
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
- genl_unregister_ops(&wl_genl_family, &wl_genl_ops);
-#endif
- genl_unregister_family(&wl_genl_family);
- goto failure;
- }
-
- return 0;
-
-failure:
- ANDROID_ERROR(("Registering Netlink failed!!\n"));
- return -1;
-}
-
-/* Generic netlink deinit */
-static int wl_genl_deinit(void)
-{
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
- if (genl_unregister_ops(&wl_genl_family, &wl_genl_ops) < 0)
- ANDROID_ERROR(("Unregister wl_genl_ops failed\n"));
-#endif
- if (genl_unregister_family(&wl_genl_family) < 0)
- ANDROID_ERROR(("Unregister wl_genl_ops failed\n"));
-
- return 0;
-}
-
-s32 wl_event_to_bcm_event(u16 event_type)
-{
- u16 event = -1;
-
- switch (event_type) {
- case WLC_E_SERVICE_FOUND:
- event = BCM_E_SVC_FOUND;
- break;
- case WLC_E_P2PO_ADD_DEVICE:
- event = BCM_E_DEV_FOUND;
- break;
- case WLC_E_P2PO_DEL_DEVICE:
- event = BCM_E_DEV_LOST;
- break;
- /* Above events are supported from BCM Supp ver 47 Onwards */
-#ifdef BT_WIFI_HANDOVER
- case WLC_E_BT_WIFI_HANDOVER_REQ:
- event = BCM_E_DEV_BT_WIFI_HO_REQ;
- break;
-#endif /* BT_WIFI_HANDOVER */
-
- default:
- ANDROID_ERROR(("Event not supported\n"));
- }
-
- return event;
-}
-
-s32
-wl_genl_send_msg(
- struct net_device *ndev,
- u32 event_type,
- u8 *buf,
- u16 len,
- u8 *subhdr,
- u16 subhdr_len)
-{
- int ret = 0;
- struct sk_buff *skb = NULL;
- void *msg;
- u32 attr_type = 0;
- bcm_event_hdr_t *hdr = NULL;
- int mcast = 1; /* By default sent as mutlicast type */
- int pid = 0;
- u8 *ptr = NULL, *p = NULL;
- u32 tot_len = sizeof(bcm_event_hdr_t) + subhdr_len + len;
- u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
-
-
- ANDROID_TRACE(("Enter \n"));
-
- /* Decide between STRING event and Data event */
- if (event_type == 0)
- attr_type = BCM_GENL_ATTR_STRING;
- else
- attr_type = BCM_GENL_ATTR_MSG;
-
- skb = genlmsg_new(NLMSG_GOODSIZE, kflags);
- if (skb == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- msg = genlmsg_put(skb, 0, 0, &wl_genl_family, 0, BCM_GENL_CMD_MSG);
- if (msg == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
-
- if (attr_type == BCM_GENL_ATTR_STRING) {
- /* Add a BCM_GENL_MSG attribute. Since it is specified as a string.
- * make sure it is null terminated
- */
- if (subhdr || subhdr_len) {
- ANDROID_ERROR(("No sub hdr support for the ATTR STRING type \n"));
- ret = -EINVAL;
- goto out;
- }
-
- ret = nla_put_string(skb, BCM_GENL_ATTR_STRING, buf);
- if (ret != 0) {
- ANDROID_ERROR(("nla_put_string failed\n"));
- goto out;
- }
- } else {
- /* ATTR_MSG */
-
- /* Create a single buffer for all */
- p = ptr = kzalloc(tot_len, kflags);
- if (!ptr) {
- ret = -ENOMEM;
- ANDROID_ERROR(("ENOMEM!!\n"));
- goto out;
- }
-
- /* Include the bcm event header */
- hdr = (bcm_event_hdr_t *)ptr;
- hdr->event_type = wl_event_to_bcm_event(event_type);
- hdr->len = len + subhdr_len;
- ptr += sizeof(bcm_event_hdr_t);
-
- /* Copy subhdr (if any) */
- if (subhdr && subhdr_len) {
- memcpy(ptr, subhdr, subhdr_len);
- ptr += subhdr_len;
- }
-
- /* Copy the data */
- if (buf && len) {
- memcpy(ptr, buf, len);
- }
-
- ret = nla_put(skb, BCM_GENL_ATTR_MSG, tot_len, p);
- if (ret != 0) {
- ANDROID_ERROR(("nla_put_string failed\n"));
- goto out;
- }
- }
-
- if (mcast) {
- int err = 0;
- /* finalize the message */
- genlmsg_end(skb, msg);
- /* NETLINK_CB(skb).dst_group = 1; */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
- if ((err = genlmsg_multicast(skb, 0, wl_genl_mcast.id, GFP_ATOMIC)) < 0)
-#else
- if ((err = genlmsg_multicast(&wl_genl_family, skb, 0, 0, GFP_ATOMIC)) < 0)
-#endif
- ANDROID_ERROR(("genlmsg_multicast for attr(%d) failed. Error:%d \n",
- attr_type, err));
- else
- ANDROID_TRACE(("Multicast msg sent successfully. attr_type:%d len:%d \n",
- attr_type, tot_len));
- } else {
- NETLINK_CB(skb).dst_group = 0; /* Not in multicast group */
-
- /* finalize the message */
- genlmsg_end(skb, msg);
-
- /* send the message back */
- if (genlmsg_unicast(&init_net, skb, pid) < 0)
- ANDROID_ERROR(("genlmsg_unicast failed\n"));
- }
-
-out:
- if (p)
- kfree(p);
- if (ret)
- nlmsg_free(skb);
-
- return ret;
-}
-
-static s32
-wl_genl_handle_msg(
- struct sk_buff *skb,
- struct genl_info *info)
-{
- struct nlattr *na;
- u8 *data = NULL;
-
- ANDROID_TRACE(("Enter \n"));
-
- if (info == NULL) {
- return -EINVAL;
- }
-
- na = info->attrs[BCM_GENL_ATTR_MSG];
- if (!na) {
- ANDROID_ERROR(("nlattribute NULL\n"));
- return -EINVAL;
- }
-
- data = (char *)nla_data(na);
- if (!data) {
- ANDROID_ERROR(("Invalid data\n"));
- return -EINVAL;
- } else {
- /* Handle the data */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) || defined(WL_COMPAT_WIRELESS)
- ANDROID_TRACE(("%s: Data received from pid (%d) \n", __func__,
- info->snd_pid));
-#else
- ANDROID_TRACE(("%s: Data received from pid (%d) \n", __func__,
- info->snd_portid));
-#endif /* (LINUX_VERSION < VERSION(3, 7, 0) || WL_COMPAT_WIRELESS */
- }
-
- return 0;
-}
-#endif /* WL_GENL */
-
-
#if defined(RSSIAVG)
void
wl_free_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl)
wl_rssi_cache_t *node, *prev, *leaf, **rssi_head;
int j, k=0;
int rssi, error=0;
+ scb_val_t scbval;
struct ether_addr bssid;
struct timeval now, timeout;
return 0;
}
if (error) {
- ANDROID_ERROR(("Could not get bssid (%d)\n", error));
+ ANDROID_ERROR(("%s: Could not get bssid (%d)\n", __FUNCTION__, error));
}
- error = wldev_get_rssi(net, &rssi);
+ memset(&scbval, 0, sizeof(scb_val_t));
+ error = wldev_get_rssi(net, &scbval);
+ rssi = scbval.val;
if (error) {
- ANDROID_ERROR(("Could not get rssi (%d)\n", error));
+ ANDROID_ERROR(("%s: Could not get rssi (%d)\n", __FUNCTION__, error));
return error;
}
leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL);
if (!leaf) {
ANDROID_ERROR(("%s: Memory alloc failure %d\n",
- __FUNCTION__, sizeof(wl_rssi_cache_t)));
+ __FUNCTION__, (int)sizeof(wl_rssi_cache_t)));
return 0;
}
- ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%d in the leaf\n",
+ ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d in the leaf\n",
__FUNCTION__, k, &bssid, rssi));
leaf->next = NULL;
bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info;
for (;node;) {
if (!memcmp(&node->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
- ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%d, SSID \"%s\"\n",
+ ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
__FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
for (j=0; j<RSSIAVG_LEN-1; j++)
node->RSSI[j] = node->RSSI[j+1];
leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL);
if (!leaf) {
ANDROID_ERROR(("%s: Memory alloc failure %d\n",
- __FUNCTION__, sizeof(wl_rssi_cache_t)));
+ __FUNCTION__, (int)sizeof(wl_rssi_cache_t)));
return;
}
- ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%d, SSID \"%s\" in the leaf\n",
+ ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\" in the leaf\n",
__FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
leaf->next = NULL;
int
wl_update_rssi_offset(struct net_device *net, int rssi)
{
- uint chip, chiprev;
+#if defined(RSSIOFFSET_NEW)
+ int j;
+#endif
if (!g_wifi_on)
return rssi;
- chip = dhd_conf_get_chip(dhd_get_pub(net));
- chiprev = dhd_conf_get_chiprev(dhd_get_pub(net));
- if (chip == BCM4330_CHIP_ID && chiprev == BCM4330B2_CHIP_REV) {
#if defined(RSSIOFFSET_NEW)
- int j;
- for (j=0; j<RSSI_OFFSET; j++) {
- if (rssi - (RSSI_OFFSET_MINVAL+RSSI_OFFSET_INTVAL*(j+1)) < 0)
- break;
- }
- rssi += j;
+ for (j=0; j<RSSI_OFFSET; j++) {
+ if (rssi - (RSSI_OFFSET_MINVAL+RSSI_OFFSET_INTVAL*(j+1)) < 0)
+ break;
+ }
+ rssi += j;
#else
- rssi += RSSI_OFFSET;
+ rssi += RSSI_OFFSET;
#endif
- }
return MIN(rssi, RSSI_MAXVAL);
}
#endif
#if defined(BSSCACHE)
-#define WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN 32
-
void
wl_free_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl)
{
tmp = 0;
prev->next = node->next;
}
- ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%d, SSID \"%s\"\n",
+ ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
__FUNCTION__, i, &node->results.bss_info->BSSID,
dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID));
kfree(node);
tmp = 0;
prev->next = node->next;
}
- ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%d, SSID \"%s\"\n",
+ ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
__FUNCTION__, i, &node->results.bss_info->BSSID,
dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID));
kfree(node);
}
}
+void dump_bss_cache(
+#if defined(RSSIAVG)
+ wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+#endif
+ wl_bss_cache_t *node)
+{
+ int k = 0;
+ int16 rssi;
+
+ for (;node;) {
+#if defined(RSSIAVG)
+ rssi = wl_get_avg_rssi(rssi_cache_ctrl, &node->results.bss_info->BSSID);
+#else
+ rssi = dtoh16(node->results.bss_info->RSSI);
+#endif
+ ANDROID_TRACE(("%s: dump %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ __FUNCTION__, k, &node->results.bss_info->BSSID, rssi, node->results.bss_info->SSID));
+ k++;
+ node = node->next;
+ }
+}
+
void
-wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, wl_scan_results_t *ss_list)
+wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl,
+#if defined(RSSIAVG)
+ wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+#endif
+ wl_scan_results_t *ss_list)
{
- wl_bss_cache_t *node, *prev, *leaf, *tmp, **bss_head;
+ wl_bss_cache_t *node, *prev, *leaf, **bss_head;
wl_bss_info_t *bi = NULL;
int i, k=0;
+#if defined(SORT_BSS_BY_RSSI)
+ int16 rssi, rssi_node;
+#endif
struct timeval now, timeout;
if (!ss_list->count)
for (;node;) {
if (!memcmp(&node->results.bss_info->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
- tmp = node;
- leaf = kmalloc(dtoh32(bi->length) + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN, GFP_KERNEL);
- if (!leaf) {
- ANDROID_ERROR(("%s: Memory alloc failure %d and keep old BSS info\n",
- __FUNCTION__, dtoh32(bi->length) + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN));
- break;
+ if (node == *bss_head)
+ *bss_head = node->next;
+ else {
+ prev->next = node->next;
}
-
- memcpy(leaf->results.bss_info, bi, dtoh32(bi->length));
- leaf->next = node->next;
- leaf->dirty = 0;
- leaf->tv = timeout;
- leaf->results.count = 1;
- leaf->results.version = ss_list->version;
- ANDROID_TRACE(("%s: Update %d with BSSID %pM, RSSI=%d, SSID \"%s\", length=%d\n",
- __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID, dtoh32(bi->length)));
- if (!prev)
- *bss_head = leaf;
- else
- prev->next = leaf;
- node = leaf;
- prev = node;
-
- kfree(tmp);
- k++;
break;
}
prev = node;
node = node->next;
}
- if (node)
- continue;
-
- leaf = kmalloc(dtoh32(bi->length) + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN, GFP_KERNEL);
+ leaf = kmalloc(dtoh32(bi->length) + sizeof(wl_bss_cache_t), GFP_KERNEL);
if (!leaf) {
ANDROID_ERROR(("%s: Memory alloc failure %d\n", __FUNCTION__,
- dtoh32(bi->length) + WLC_IW_SS_CACHE_CTRL_FIELD_MAXLEN));
+ dtoh32(bi->length) + (int)sizeof(wl_bss_cache_t)));
return;
}
- ANDROID_TRACE(("%s: Add %d with cached BSSID %pM, RSSI=%d, SSID \"%s\" in the leaf\n",
+ if (node) {
+ kfree(node);
+ node = NULL;
+ ANDROID_TRACE(("%s: Update %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
+ } else
+ ANDROID_TRACE(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
__FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
memcpy(leaf->results.bss_info, bi, dtoh32(bi->length));
leaf->results.version = ss_list->version;
k++;
- if (!prev)
+ if (*bss_head == NULL)
*bss_head = leaf;
- else
- prev->next = leaf;
+ else {
+#if defined(SORT_BSS_BY_RSSI)
+ node = *bss_head;
+#if defined(RSSIAVG)
+ rssi = wl_get_avg_rssi(rssi_cache_ctrl, &leaf->results.bss_info->BSSID);
+#else
+ rssi = dtoh16(leaf->results.bss_info->RSSI);
+#endif
+ for (;node;) {
+#if defined(RSSIAVG)
+ rssi_node = wl_get_avg_rssi(rssi_cache_ctrl, &node->results.bss_info->BSSID);
+#else
+ rssi_node = dtoh16(node->results.bss_info->RSSI);
+#endif
+ if (rssi > rssi_node) {
+ leaf->next = node;
+ if (node == *bss_head)
+ *bss_head = leaf;
+ else
+ prev->next = leaf;
+ break;
+ }
+ prev = node;
+ node = node->next;
+ }
+ if (node == NULL)
+ prev->next = leaf;
+#else
+ leaf->next = *bss_head;
+ *bss_head = leaf;
+#endif
+ }
}
+ dump_bss_cache(
+#if defined(RSSIAVG)
+ rssi_cache_ctrl,
+#endif
+ *bss_head);
}
void
/*
* Linux cfg80211 driver - Android related functions
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_android.h 487838 2014-06-27 05:51:44Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_android.h 608194 2015-12-24 04:34:35Z $
*/
#ifndef _wl_android_
/* If any feature uses the Generic Netlink Interface, put it here to enable WL_GENL
* automatically
*/
-#if defined(WL_SDO) || defined(BT_WIFI_HANDOVER) || defined(WL_NAN)
+#if defined(BT_WIFI_HANDOVER) || defined(WL_NAN)
#define WL_GENL
#endif
-#ifdef WL_GENL
-#include <net/genetlink.h>
-#endif
/**
* Android platform dependent functions, feel free to add Android specific functions here
int wl_android_exit(void);
void wl_android_post_init(void);
int wl_android_wifi_on(struct net_device *dev);
-int wl_android_wifi_off(struct net_device *dev);
+int wl_android_wifi_off(struct net_device *dev, bool on_failure);
int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
-#ifdef WL_GENL
-typedef struct bcm_event_hdr {
- u16 event_type;
- u16 len;
-} bcm_event_hdr_t;
-
-/* attributes (variables): the index in this enum is used as a reference for the type,
- * userspace application has to indicate the corresponding type
- * the policy is used for security considerations
- */
-enum {
- BCM_GENL_ATTR_UNSPEC,
- BCM_GENL_ATTR_STRING,
- BCM_GENL_ATTR_MSG,
- __BCM_GENL_ATTR_MAX
-};
-#define BCM_GENL_ATTR_MAX (__BCM_GENL_ATTR_MAX - 1)
-
-/* commands: enumeration of all commands (functions),
- * used by userspace application to identify command to be ececuted
- */
-enum {
- BCM_GENL_CMD_UNSPEC,
- BCM_GENL_CMD_MSG,
- __BCM_GENL_CMD_MAX
-};
-#define BCM_GENL_CMD_MAX (__BCM_GENL_CMD_MAX - 1)
-
-/* Enum values used by the BCM supplicant to identify the events */
-enum {
- BCM_E_UNSPEC,
- BCM_E_SVC_FOUND,
- BCM_E_DEV_FOUND,
- BCM_E_DEV_LOST,
- BCM_E_DEV_BT_WIFI_HO_REQ,
- BCM_E_MAX
-};
-
-s32 wl_genl_send_msg(struct net_device *ndev, u32 event_type,
- u8 *string, u16 len, u8 *hdr, u16 hdrlen);
-#endif /* WL_GENL */
s32 wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size);
/* hostap mac mode */
/* max number of assoc list */
#define MAX_NUM_OF_ASSOCLIST 64
+/* Bandwidth */
+#define WL_CH_BANDWIDTH_20MHZ 20
+#define WL_CH_BANDWIDTH_40MHZ 40
+#define WL_CH_BANDWIDTH_80MHZ 80
/* max number of mac filter list
* restrict max number to 10 as maximum cmd string size is 255
*/
#define MAX_NUM_MAC_FILT 10
int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist);
+int wl_android_set_roam_offload_bssid_list(struct net_device *dev, const char *cmd);
/* terence:
* BSSCACHE: Cache bss list
* RSSAVG: Average RSSI of BSS list
* RSSIOFFSET: RSSI offset
+ * SORT_BSS_BY_RSSI: Sort BSS by RSSI
*/
//#define BSSCACHE
//#define RSSIAVG
//#define RSSIOFFSET
//#define RSSIOFFSET_NEW
+//#define SORT_BSS_BY_RSSI
#define RSSI_MAXVAL -2
#define RSSI_MINVAL -200
void wl_delete_dirty_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl);
void wl_delete_disconnected_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, u8 *bssid);
void wl_reset_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl);
-void wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, wl_scan_results_t *ss_list);
+void wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl,
+#if defined(RSSIAVG)
+ wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
+#endif
+ wl_scan_results_t *ss_list);
void wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl);
#endif
#endif /* _wl_android_ */
/*
* Linux cfg80211 driver
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_cfg80211.c 506036 2014-10-02 11:33:14Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfg80211.c 610196 2016-01-06 11:20:45Z $
*/
/* */
#include <typedefs.h>
#include <linux/if_arp.h>
#include <asm/uaccess.h>
-#include <dngl_stats.h>
-#include <dhd.h>
-#include <dhd_linux.h>
-#include <dhdioctl.h>
-#include <wlioctl.h>
-#include <dhd_cfg80211.h>
-#ifdef PNO_SUPPORT
-#include <dhd_pno.h>
-#endif /* PNO_SUPPORT */
-
#include <proto/ethernet.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
#include <wl_android.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_linux.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
+#include <dhd_bus.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+
+#if defined(WL_VENDOR_EXT_SUPPORT)
#include <wl_cfgvendor.h>
+#endif /* defined(WL_VENDOR_EXT_SUPPORT) */
+
#ifdef WL_NAN
#include <wl_cfgnan.h>
#endif /* WL_NAN */
#include <dhd_wlfc.h>
#endif
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
+
#ifdef WL11U
#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF)
#error You should enable 'WL_ENABLE_P2P_IF' or 'WL_CFG80211_P2P_DEV_IF' \
#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */
#endif /* WL11U */
-#ifdef BCMWAPI_WPI
-/* these items should evetually go into wireless.h of the linux system headfile dir */
-#ifndef IW_ENCODE_ALG_SM4
-#define IW_ENCODE_ALG_SM4 0x20
-#endif
-
-#ifndef IW_AUTH_WAPI_ENABLED
-#define IW_AUTH_WAPI_ENABLED 0x20
-#endif
-
-#ifndef IW_AUTH_WAPI_VERSION_1
-#define IW_AUTH_WAPI_VERSION_1 0x00000008
-#endif
-
-#ifndef IW_AUTH_CIPHER_SMS4
-#define IW_AUTH_CIPHER_SMS4 0x00000020
-#endif
-
-#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
-#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
-#endif
-
-#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
-#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
-#endif
-#endif /* BCMWAPI_WPI */
-#ifdef BCMWAPI_WPI
-#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
-#else /* BCMWAPI_WPI */
#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
-#endif /* BCMWAPI_WPI */
static struct device *cfg80211_parent_dev = NULL;
/* g_bcm_cfg should be static. Do not change */
static struct bcm_cfg80211 *g_bcm_cfg = NULL;
+#ifdef CUSTOMER_HW4_DEBUG
+u32 wl_dbg_level = WL_DBG_ERR | WL_DBG_P2P_ACTION;
+#else
u32 wl_dbg_level = WL_DBG_ERR;
+#endif /* CUSTOMER_HW4_DEBUG */
+#define MAX_WAIT_TIME 1500
#ifdef WLAIBSS_MCHAN
#define IBSS_IF_NAME "ibss%d"
#endif /* WLAIBSS_MCHAN */
#else
#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM)
#endif /* WL_CFG80211_SYNC_GON */
-#define WL_IS_P2P_DEV_EVENT(e) ((e->emsg.ifidx == 0) && \
- (e->emsg.bsscfgidx == P2PAPI_BSSCFG_DEVICE))
+#define DNGL_FUNC(func, parameters) func parameters
#define COEX_DHCP
#define WLAN_EID_SSID 0
#define CH_MIN_5G_CHANNEL 34
#define CH_MIN_2G_CHANNEL 1
+#define ACTIVE_SCAN 1
+#define PASSIVE_SCAN 0
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+(entry) = list_first_entry((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+entry = container_of((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
-#ifdef WLAIBSS
-enum abiss_event_type {
- AIBSS_EVENT_TXFAIL
-};
-#endif
+#else
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+(entry) = list_first_entry((ptr), type, member); \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member); \
+
+#endif /* STRICT_GCC_WARNINGS */
enum rmc_event_type {
RMC_EVENT_NONE,
* and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
* With respect to these flags, wpa_supplicant doesn't start p2p operations on 5GHz channels.
* All the chnages in world regulatory domain are to be done here.
+ *
+ * this definition reuires disabling missing-field-initializer warning
+ * as the ieee80211_regdomain definition differs in plain linux and in Android
*/
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
+#endif
static const struct ieee80211_regdomain brcm_regdom = {
.n_reg_rules = 4,
.alpha2 = "99",
/* IEEE 802.11a, channel 100..165 */
REG_RULE(5470-10, 5850+10, 40, 6, 20, 0), }
};
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
(defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
-/*
- * Possible interface combinations supported by driver
- *
- * ADHOC Mode - #ADHOC <= 1 on channels = 1
- * SoftAP Mode - #AP <= 1 on channels = 1
- * STA + P2P Mode - #STA <= 2, #{P2P-GO, P2P-client} <= 1, #P2P-device <= 1
- * on channels = 2
- */
static const struct ieee80211_iface_limit common_if_limits[] = {
{
- .max = 1,
+ /*
+ * Driver can support up to 2 AP's
+ */
+ .max = 2,
.types = BIT(NL80211_IFTYPE_AP),
},
{
common_iface_combinations[] = {
{
.num_different_channels = NUM_DIFF_CHANNELS,
+ /*
+ * max_interfaces = 4
+ * The max no of interfaces will be used in dual p2p case.
+ * {STA, P2P Device, P2P Group 1, P2P Group 2}. Though we
+ * will not be using the STA functionality in this case, it
+ * will remain registered as it is the primary interface.
+ */
.max_interfaces = 4,
.limits = common_if_limits,
.n_limits = ARRAY_SIZE(common_if_limits),
#define WPS_CONFIG_VIRT_DISPLAY 0x2008
#define WPS_CONFIG_PHY_DISPLAY 0x4008
-#ifdef BCMCCX
-#ifndef WLAN_AKM_SUITE_CCKM
-#define WLAN_AKM_SUITE_CCKM 0x00409600
-#endif
-#define DOT11_LEAP_AUTH 0x80 /* LEAP auth frame paylod constants */
-#endif /* BCMCCX */
+#define PM_BLOCK 1
+#define PM_ENABLE 0
-#ifdef MFP
-#define WL_AKM_SUITE_MFP_1X 0x000FAC05
-#define WL_AKM_SUITE_MFP_PSK 0x000FAC06
-#define WL_MFP_CAPABLE 0x1
-#define WL_MFP_REQUIRED 0x2
-#endif /* MFP */
+
+#define WL_AKM_SUITE_SHA256_1X 0x000FAC05
+#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06
#ifndef IBSS_COALESCE_ALLOWED
#define IBSS_COALESCE_ALLOWED 0
#endif
#define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */
+#define LONG_LISTEN_TIME 2000
+
+#define MAX_SCAN_ABORT_WAIT_CNT 20
+#define WAIT_SCAN_ABORT_OSL_SLEEP_TIME 10
+
+#define IDSUP_4WAY_HANDSHAKE_TIMEOUT 10000
+enum idsup_event_type {
+ IDSUP_EVENT_SUCCESS = 0,
+ IDSUP_EVENT_4WAY_HANDSHAKE_TIMEOUT
+};
/*
* cfg80211_ops api/callback list
*/
static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request,
struct cfg80211_ssid *this_ssid);
-static s32
#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
#else
+static s32
wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request);
#endif /* WL_CFG80211_P2P_DEV_IF */
struct cfg80211_ibss_params *params);
static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
struct net_device *dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *dev, const u8 *mac,
+ struct station_info *sinfo);
+#else
static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
struct net_device *dev, u8 *mac,
struct station_info *sinfo);
+#endif
static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
struct net_device *dev, bool enabled,
s32 timeout);
struct cfg80211_connect_params *sme);
static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code);
-static s32
#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
wl_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, s32 mbm);
#else
+static s32
wl_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type, s32 dbm);
#endif /* WL_CFG80211_P2P_DEV_IF */
-static s32
#if defined(WL_CFG80211_P2P_DEV_IF)
-wl_cfg80211_get_tx_power(struct wiphy *wiphy,
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy,
struct wireless_dev *wdev, s32 *dbm);
#else
-wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
+static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
#endif /* WL_CFG80211_P2P_DEV_IF */
static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
struct net_device *dev,
2, 0))
static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
bcm_struct_cfgdev *cfgdev, u64 cookie);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+static s32 wl_cfg80211_del_station(
+ struct wiphy *wiphy, struct net_device *ndev,
+ struct station_del_parameters *params);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static s32 wl_cfg80211_del_station(struct wiphy *wiphy,
+ struct net_device *ndev, const u8* mac_addr);
+#else
static s32 wl_cfg80211_del_station(struct wiphy *wiphy,
struct net_device *ndev, u8* mac_addr);
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
+ struct net_device *dev, const u8 *mac, struct station_parameters *params);
+#else
static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
struct net_device *dev, u8 *mac, struct station_parameters *params);
+#endif
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
-static s32
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
-wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
#else
-wl_cfg80211_suspend(struct wiphy *wiphy);
-#endif
+static s32 wl_cfg80211_suspend(struct wiphy *wiphy);
+#endif /* KERNEL_VERSION(2, 6, 39) || WL_COMPAT_WIRELES */
static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa);
static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa);
static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
struct net_device *dev);
-#ifdef P2PONEINT
-void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
-#else
-void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
-#endif
+static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg);
static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
struct net_device *ndev, bool aborted, bool fw_abort);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
-#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \
+ KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
u32 peer_capability, const u8 *data, size_t len);
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, const u8 *data, size_t len);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, bool initiator, const u8 *data, size_t len);
#else
static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data,
size_t len);
#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, enum nl80211_tdls_operation oper);
+#else
static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
u8 *peer, enum nl80211_tdls_operation oper);
-#endif /* LINUX_VERSION > KERNEL_VERSION(3,2,0) || WL_COMPAT_WIRELESS */
+#endif
+#endif
#ifdef WL_SCHED_SCAN
static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev);
#endif
-#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF)
bcm_struct_cfgdev*
wl_cfg80211_create_iface(struct wiphy *wiphy, enum nl80211_iftype
iface_type, u8 *mac_addr, const char *name);
s32
wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
-#endif /* defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) */
+#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */
+
+s32 wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ enum nl80211_iftype iface_type, s32 del, u8 *addr);
+s32 wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ enum nl80211_iftype iface_type, s32 del, u8 *addr);
+chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec);
+chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
+#ifdef WL11ULB
+static s32 wl_cfg80211_get_ulb_bw(struct wireless_dev *wdev);
+static chanspec_t wl_cfg80211_ulb_get_min_bw_chspec(struct wireless_dev *wdev, s32 bssidx);
+static s32 wl_cfg80211_ulbbw_to_ulbchspec(u32 ulb_bw);
+#else
+static inline chanspec_t wl_cfg80211_ulb_get_min_bw_chspec(
+ struct wireless_dev *wdev, s32 bssidx)
+{
+ return WL_CHANSPEC_BW_20;
+}
+#endif /* WL11ULB */
/*
* event & event Q handlers for cfg80211 interfaces
static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
#endif /* PNO_SUPPORT */
+#ifdef GSCAN_SUPPORT
+static s32 wl_notify_gscan_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* GSCAN_SUPPORT */
static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
enum wl_status state, bool set);
-#ifdef WL_SDO
-static s32 wl_svc_resp_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-static s32 wl_notify_device_discovery(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+#ifdef DHD_LOSSLESS_ROAMING
+static s32 wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg);
+#endif /* DHD_LOSSLESS_ROAMING */
+#ifdef CUSTOM_EVENT_PM_WAKE
+static s32 wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
-#endif
+#endif /* CUSTOM_EVENT_PM_WAKE */
#ifdef WLTDLS
static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
* register/deregister parent device
*/
static void wl_cfg80211_clear_parent_dev(void);
-
/*
* ioctl utilites
*/
* cfg profile utilities
*/
static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data, s32 item);
+ const wl_event_msg_t *e, const void *data, s32 item);
static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev);
struct cfg80211_connect_params *sme);
static s32 wl_set_set_sharedkey(struct net_device *dev,
struct cfg80211_connect_params *sme);
-#ifdef BCMWAPI_WPI
-static s32 wl_set_set_wapi_ie(struct net_device *dev,
- struct cfg80211_connect_params *sme);
-#endif
static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev);
-static void wl_ch_to_chanspec(int ch,
+static s32 wl_ch_to_chanspec(struct net_device *dev, int ch,
struct wl_join_params *join_params, size_t *join_params_size);
+void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg);
/*
* information element utilities
*/
static void wl_rst_ie(struct bcm_cfg80211 *cfg);
static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v);
-static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam);
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size,
+ bool roam);
static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size);
static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size);
static u32 wl_get_ielen(struct bcm_cfg80211 *cfg);
-#ifdef MFP
-static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8* capa);
-#endif
#ifdef WL11U
bcm_tlv_t *
static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, void *data);
static void wl_free_wdev(struct bcm_cfg80211 *cfg);
#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
static int
+#else
+static void
+#endif /* kernel version < 3.10.11 */
wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
static s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam);
static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam);
-#ifdef P2PONEINT
-chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
-#else
-chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
-#endif
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
s32 wl_cfg80211_channel_to_freq(u32 channel);
-#if defined(DHCP_SCAN_SUPPRESS)
-static void wl_cfg80211_work_handler(struct work_struct *work);
-static void wl_cfg80211_scan_supp_timerfunc(ulong data);
-#endif /* DHCP_SCAN_SUPPRESS */
static void wl_cfg80211_work_handler(struct work_struct *work);
static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+#ifdef DHD_IFDEBUG
+void wl_dump_ifinfo(struct bcm_cfg80211 *cfg);
+#endif
+
+#ifdef P2P_LISTEN_OFFLOADING
+s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+
static int wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const struct ether_addr *bssid);
-#ifdef WL_SDO
-s32 wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg);
-s32 wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg);
-#define MAX_SDO_PROTO 5
-wl_sdo_proto_t wl_sdo_protos [] = {
- { "all", SVC_RPOTYPE_ALL },
- { "upnp", SVC_RPOTYPE_UPNP },
- { "bonjour", SVC_RPOTYPE_BONJOUR },
- { "wsd", SVC_RPOTYPE_WSD },
- { "vendor", SVC_RPOTYPE_VENDOR },
-};
-#endif
static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ,
WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ };
-#define RETURN_EIO_IF_NOT_UP(wlpriv) \
-do { \
- struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv); \
- if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) { \
- WL_INFORM(("device is not ready\n")); \
- return -EIO; \
- } \
-} while (0)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+ cfg80211_disconnected(dev, reason, ie, len, gfp);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+ cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp);
+#endif
#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \
(akm) == RSN_AKM_UNSPECIFIED || \
extern int disable_proptx;
#endif /* PROP_TXSTATUS_VSDB */
+
extern int passive_channel_skip;
+static s32
+wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+static s32
+wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0)) && (LINUX_VERSION_CODE <= (3, 7, \
+ 0)))
+struct chan_info {
+ int freq;
+ int chan_type;
+};
+#endif
+
+
#if (WL_DBG_LEVEL > 0)
#define WL_DBG_ESTR_MAX 50
static s8 wl_dbg_estr[][WL_DBG_ESTR_MAX] = {
"PFN_NET_LOST",
"RESET_COMPLETE", "JOIN_START", "ROAM_START", "ASSOC_START",
"IBSS_ASSOC",
- "RADIO", "PSM_WATCHDOG", "WLC_E_CCX_ASSOC_START", "WLC_E_CCX_ASSOC_ABORT",
+ "RADIO", "PSM_WATCHDOG",
+ "WLC_E_XXX_ASSOC_START", "WLC_E_XXX_ASSOC_ABORT",
"PROBREQ_MSG",
"SCAN_CONFIRM_IND", "PSK_SUP", "COUNTRY_CODE_CHANGED",
"EXCEEDED_MEDIUM_TIME", "ICV_ERROR",
CHAN5G(124, 0), CHAN5G(128, 0),
CHAN5G(132, 0), CHAN5G(136, 0),
CHAN5G(140, 0), CHAN5G(144, 0),
- CHAN5G(149, 0), CHAN5G(153, 0),
- CHAN5G(157, 0), CHAN5G(161, 0),
+ CHAN5G(149, 0), CHAN5G(153, 0),
+ CHAN5G(157, 0), CHAN5G(161, 0),
CHAN5G(165, 0)
};
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WLAN_CIPHER_SUITE_AES_CMAC,
-#ifdef BCMWAPI_WPI
- WLAN_CIPHER_SUITE_SMS4,
-#endif
-#if defined(WLFBT) && defined(WLAN_CIPHER_SUITE_PMK)
- WLAN_CIPHER_SUITE_PMK,
-#endif
};
#ifdef WL_SUPPORT_ACS
};
#endif
+#ifdef CUSTOMER_HW4_DEBUG
+uint prev_dhd_console_ms = 0;
+u32 prev_wl_dbg_level = 0;
+bool wl_scan_timeout_dbg_enabled = 0;
+static void wl_scan_timeout_dbg_set(void);
+static void wl_scan_timeout_dbg_clear(void);
+
+static void wl_scan_timeout_dbg_set(void)
+{
+ WL_ERR(("Enter \n"));
+ prev_dhd_console_ms = dhd_console_ms;
+ prev_wl_dbg_level = wl_dbg_level;
+
+ dhd_console_ms = 1;
+ wl_dbg_level |= (WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_SCAN);
-static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg, bool add_remove,
- enum wl_handler_del_type type)
+ wl_scan_timeout_dbg_enabled = 1;
+}
+static void wl_scan_timeout_dbg_clear(void)
+{
+ WL_ERR(("Enter \n"));
+ dhd_console_ms = prev_dhd_console_ms;
+ wl_dbg_level = prev_wl_dbg_level;
+
+ wl_scan_timeout_dbg_enabled = 0;
+}
+#endif /* CUSTOMER_HW4_DEBUG */
+
+/* watchdog timer for disconnecting when fw is not associated for FW_ASSOC_WATCHDOG_TIME ms */
+uint32 fw_assoc_watchdog_ms = 0;
+bool fw_assoc_watchdog_started = 0;
+#define FW_ASSOC_WATCHDOG_TIME 10 * 1000 /* msec */
+
+#ifdef DHD_IFDEBUG
+
+void wl_dump_ifinfo(struct bcm_cfg80211 *cfg)
+{
+ WL_ERR(("cfg=%p\n", cfg));
+ if (cfg) {
+ WL_ERR(("cfg->wdev=%p\n", bcmcfg_to_prmry_wdev(cfg)));
+ if (bcmcfg_to_prmry_wdev(cfg)) {
+ WL_ERR(("cfg->wdev->wiphy=%p\n", bcmcfg_to_wiphy(cfg)));
+ WL_ERR(("cfg->wdev->netdev=%p\n", bcmcfg_to_prmry_ndev(cfg)));
+ }
+ }
+}
+#endif
+
+static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg,
+ enum wl_pm_workq_act_type type)
{
+ u16 wq_duration = 0;
+
if (cfg == NULL)
return;
- if (cfg->pm_enable_work_on) {
- if (add_remove) {
- schedule_delayed_work(&cfg->pm_enable_work,
- msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT));
- } else {
- cancel_delayed_work_sync(&cfg->pm_enable_work);
- switch (type) {
- case WL_HANDLER_MAINTAIN:
- schedule_delayed_work(&cfg->pm_enable_work,
- msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT));
- break;
- case WL_HANDLER_PEND:
- schedule_delayed_work(&cfg->pm_enable_work,
- msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT*2));
- break;
- case WL_HANDLER_DEL:
- default:
- cfg->pm_enable_work_on = false;
- break;
- }
- }
+ mutex_lock(&cfg->pm_sync);
+ /*
+ * Make cancel and schedule work part mutually exclusive
+ * so that while cancelling, we are sure that there is no
+ * work getting scheduled.
+ */
+ if (delayed_work_pending(&cfg->pm_enable_work)) {
+ cancel_delayed_work_sync(&cfg->pm_enable_work);
+ DHD_OS_WAKE_UNLOCK(cfg->pub);
}
+
+ if (type == WL_PM_WORKQ_SHORT) {
+ wq_duration = WL_PM_ENABLE_TIMEOUT;
+ } else if (type == WL_PM_WORKQ_LONG) {
+ wq_duration = (WL_PM_ENABLE_TIMEOUT*2);
+ }
+ if (wq_duration) {
+ DHD_OS_WAKE_LOCK(cfg->pub);
+ schedule_delayed_work(&cfg->pm_enable_work,
+ msecs_to_jiffies((const unsigned int)wq_duration));
+ }
+ mutex_unlock(&cfg->pm_sync);
}
/* Return a new chanspec given a legacy chanspec
* Returns INVCHANSPEC on error
*/
chanspec_t
-wl_ch_host_to_driver(u16 channel)
+wl_ch_host_to_driver(s32 bssidx, u16 channel)
{
-
chanspec_t chanspec;
chanspec = channel & WL_CHANSPEC_CHAN_MASK;
else
chanspec |= WL_CHANSPEC_BAND_5G;
- chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= wl_cfg80211_ulb_get_min_bw_chspec(NULL, bssidx);
+
chanspec |= WL_CHANSPEC_CTL_SB_NONE;
return wl_chspec_host_to_driver(chanspec);
* a chanspec_t value
* Returns INVCHANSPEC on error
*/
-static chanspec_t
+chanspec_t
wl_chspec_driver_to_host(chanspec_t chanspec)
{
chanspec = dtohchanspec(chanspec);
return (count == ETHER_ADDR_LEN);
}
-/* convert hex string buffer to binary */
-int
-wl_cfg80211_hex_str_to_bin(unsigned char *data, int dlen, char *str)
-{
- int count, slen;
- int hvalue;
- char tmp[3] = {0};
- char *ptr = str, *endp = NULL;
-
- if (!data || !str || !dlen) {
- WL_DBG((" passed buffer is empty \n"));
- return 0;
- }
-
- slen = strlen(str);
- if (dlen * 2 < slen) {
- WL_DBG((" destination buffer too short \n"));
- return 0;
- }
-
- if (slen % 2) {
- WL_DBG((" source buffer is of odd length \n"));
- return 0;
- }
-
- for (count = 0; count < slen; count += 2) {
- memcpy(tmp, ptr, 2);
- hvalue = simple_strtol(tmp, &endp, 16);
- if (*endp != '\0') {
- WL_DBG((" non hexadecimal character encountered \n"));
- return 0;
- }
- *data++ = (unsigned char)hvalue;
- ptr += 2;
- }
-
- return (slen / 2);
-}
-
/* There isn't a lot of sense in it, but you can transmit anything you like */
static const struct ieee80211_txrx_stypes
wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
if (dbm > 0xffff)
dbm = 0xffff;
txpwrqdbm = dbm * 4;
-#ifdef SUPPORT_WL_TXPOWER
- if (type == NL80211_TX_POWER_AUTOMATIC)
- txpwrqdbm = 127;
- else
- txpwrqdbm |= WL_TXPWR_OVERRIDE;
-#endif /* SUPPORT_WL_TXPOWER */
err = wldev_iovar_setbuf_bsscfg(dev, "qtxpower", (void *)&txpwrqdbm,
sizeof(txpwrqdbm), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
&cfg->ioctl_buf_sync);
return err;
}
-chanspec_t
-#ifdef P2PONEINT
-wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
-#else
-wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
-#endif
+static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
{
chanspec_t chspec;
int err = 0;
struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
struct ether_addr bssid;
struct wl_bss_info *bss = NULL;
+ s32 bssidx = 0; /* Explicitly set to primary bssidx */
if ((err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), false))) {
/* STA interface is not associated. So start the new interface on a temp
* via set_channel (cfg80211 API).
*/
WL_DBG(("Not associated. Return a temp channel. \n"));
- return wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+ return wl_ch_host_to_driver(bssidx, WL_P2P_TEMP_CHAN);
}
if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf,
WL_EXTRA_BUF_MAX, false))) {
WL_ERR(("Failed to get associated bss info, use temp channel \n"));
- chspec = wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
+ chspec = wl_ch_host_to_driver(bssidx, WL_P2P_TEMP_CHAN);
}
else {
bss = (struct wl_bss_info *) (cfg->extra_buf + 4);
#else
char *name,
#endif /* WL_CFG80211_P2P_DEV_IF */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ unsigned char name_assign_type,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- s32 err;
+ s32 err = -ENODEV;
s32 timeout = -1;
s32 wlif_type = -1;
s32 mode = 0;
s32 val = 0;
+ s32 cfg_type;
s32 dhd_mode = 0;
chanspec_t chspec;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_device *primary_ndev;
struct net_device *new_ndev;
struct ether_addr primary_mac;
+#ifdef WL_VIRTUAL_APSTA
+ bcm_struct_cfgdev *new_cfgdev;
+#endif /* WL_VIRTUAL_APSTA */
#ifdef PROP_TXSTATUS_VSDB
#if defined(BCMSDIO)
s32 up = 1;
#endif /* PROP_TXSTATUS_VSDB */
#if defined(SUPPORT_AP_POWERSAVE)
dhd_pub_t *dhd;
-#endif
+#endif /* SUPPORT_AP_POWERSAVE */
+ bool hang_required = false;
if (!cfg)
return ERR_PTR(-EINVAL);
#endif /* PROP_TXSTATUS_VSDB */
#if defined(SUPPORT_AP_POWERSAVE)
dhd = (dhd_pub_t *)(cfg->pub);
-#endif
+#endif /* SUPPORT_AP_POWERSAVE */
/* Use primary I/F for sending cmds down to firmware */
primary_ndev = bcmcfg_to_prmry_ndev(cfg);
return wl_cfg80211_add_monitor_if((char *)name);
#if defined(WL_CFG80211_P2P_DEV_IF)
case NL80211_IFTYPE_P2P_DEVICE:
+ cfg->down_disc_if = FALSE;
return wl_cfgp2p_add_p2p_disc_if(cfg);
#endif /* WL_CFG80211_P2P_DEV_IF */
case NL80211_IFTYPE_STATION:
-#ifdef DUAL_STA
+#ifdef WL_VIRTUAL_APSTA
#ifdef WLAIBSS_MCHAN
if (cfg->ibss_cfgdev) {
WL_ERR(("AIBSS is already operational. "
" AIBSS & DUALSTA can't be used together \n"));
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
#endif /* WLAIBSS_MCHAN */
if (!name) {
WL_ERR(("Interface name not provided \n"));
- return NULL;
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (wl_cfgp2p_vif_created(cfg)) {
+ WL_ERR(("Could not create new iface."
+ "Already one p2p interface is running"));
+ return ERR_PTR(-ENODEV);
}
- return wl_cfg80211_create_iface(cfg->wdev->wiphy,
+ new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
NL80211_IFTYPE_STATION, NULL, name);
-#endif /* DUAL_STA */
+ if (!new_cfgdev)
+ return ERR_PTR(-ENOMEM);
+ else
+ return new_cfgdev;
+#endif /* WL_VIRTUAL_APSTA */
case NL80211_IFTYPE_P2P_CLIENT:
wlif_type = WL_P2P_IF_CLIENT;
mode = WL_MODE_BSS;
break;
default:
WL_ERR(("Unsupported interface type\n"));
- return NULL;
+ return ERR_PTR(-ENODEV);
break;
}
if (!name) {
WL_ERR(("name is NULL\n"));
- return NULL;
+ return ERR_PTR(-ENODEV);
}
if (cfg->p2p_supported && (wlif_type != -1)) {
ASSERT(cfg->p2p); /* ensure expectation of p2p initialization */
if (!cfg->p2p)
return ERR_PTR(-ENODEV);
+ if (cfg->cfgdev_bssidx != -1) {
+ WL_ERR(("Failed to start p2p, Maximum no of interface reached"));
+ return ERR_PTR(-ENODEV);
+ }
+
if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
p2p_on(cfg) = true;
wl_cfgp2p_set_firm_p2p(cfg);
wl_cfgp2p_init_discovery(cfg);
get_primary_mac(cfg, &primary_mac);
- wl_cfgp2p_generate_bss_mac(&primary_mac,
- &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
}
- memset(cfg->p2p->vir_ifname, 0, IFNAMSIZ);
strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1);
+ cfg->p2p->vir_ifname[IFNAMSIZ - 1] = '\0';
wl_cfg80211_scan_abort(cfg);
#ifdef PROP_TXSTATUS_VSDB
#endif
#endif /* PROP_TXSTATUS_VSDB */
+ /* Dual p2p doesn't support multiple P2PGO interfaces,
+ * p2p_go_count is the counter for GO creation
+ * requests.
+ */
+ if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) {
+ WL_ERR(("Fw doesnot support multiple Go"));
+ return ERR_PTR(-ENOMEM);
+ }
/* In concurrency case, STA may be already associated in a particular channel.
* so retrieve the current channel of primary interface and then start the virtual
* interface on that.
/* For P2P mode, use P2P-specific driver features to create the
* bss: "cfg p2p_ifadd"
*/
+ if (wl_check_dongle_idle(wiphy) != TRUE) {
+ WL_ERR(("FW is busy to add interface"));
+ return ERR_PTR(-ENOMEM);
+ }
wl_set_p2p_status(cfg, IF_ADDING);
memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
if (wlif_type == WL_P2P_IF_GO)
wldev_iovar_setint(primary_ndev, "mpc", 0);
- err = wl_cfgp2p_ifadd(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
+ cfg_type = wl_cfgp2p_get_conn_idx(cfg);
+ if (cfg_type == BCME_ERROR) {
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ WL_ERR(("Failed to get connection idx for p2p interface"));
+ goto fail;
+ }
+ err = wl_cfgp2p_ifadd(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type),
+ htod32(wlif_type), chspec);
if (unlikely(err)) {
wl_clr_p2p_status(cfg, IF_ADDING);
WL_ERR((" virtual iface add failed (%d) \n", err));
}
timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- (wl_get_p2p_status(cfg, IF_ADDING) == false),
+ ((wl_get_p2p_status(cfg, IF_ADDING) == false) &&
+ (cfg->if_event_info.valid)),
msecs_to_jiffies(MAX_WAIT_TIME));
if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
struct wireless_dev *vwdev;
int pm_mode = PM_ENABLE;
wl_if_event_info *event = &cfg->if_event_info;
-
/* IF_ADD event has come back, we can proceed to to register
* the new interface now, use the interface name provided by caller (thus
* ignore the one from wlc)
*/
- strncpy(cfg->if_event_info.name, name, IFNAMSIZ - 1);
new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname,
- event->mac, event->bssidx);
+ event->mac, event->bssidx, event->name);
if (new_ndev == NULL)
goto fail;
- wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = new_ndev;
- wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = event->bssidx;
+ wl_to_p2p_bss_ndev(cfg, cfg_type) = new_ndev;
+ wl_to_p2p_bss_bssidx(cfg, cfg_type) = event->bssidx;
vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
if (unlikely(!vwdev)) {
WL_ERR(("Could not allocate wireless device\n"));
+ err = -ENOMEM;
goto fail;
}
vwdev->wiphy = cfg->wdev->wiphy;
WL_INFORM(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname));
+ if (type == NL80211_IFTYPE_P2P_GO) {
+ cfg->p2p->p2p_go_count++;
+ }
vwdev->iftype = type;
+#ifdef DHD_IFDEBUG
+ WL_ERR(("new_ndev: %p\n", new_ndev));
+#endif
vwdev->netdev = new_ndev;
new_ndev->ieee80211_ptr = vwdev;
SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy));
wl_set_drv_status(cfg, READY, new_ndev);
- cfg->p2p->vif_created = true;
wl_set_mode_by_netdev(cfg, new_ndev, mode);
if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) {
wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+ err = -ENODEV;
+ goto fail;
+ }
+ err = wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode, event->bssidx);
+ if (unlikely(err != 0)) {
+ WL_ERR(("Allocation of netinfo failed (%d) \n", err));
goto fail;
}
- wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode);
val = 1;
/* Disable firmware roaming for P2P interface */
wldev_iovar_setint(new_ndev, "roam_off", val);
+#ifdef WL11ULB
+ if (cfg->p2p_wdev && is_p2p_group_iface(new_ndev->ieee80211_ptr)) {
+ u32 ulb_bw = wl_cfg80211_get_ulb_bw(cfg->p2p_wdev);
+ if (ulb_bw) {
+ /* Apply ULB BW settings on the newly spawned interface */
+ WL_DBG(("[ULB] Applying ULB BW for the newly"
+ "created P2P interface \n"));
+ if (wl_cfg80211_set_ulb_bw(new_ndev,
+ ulb_bw, new_ndev->name) < 0) {
+ /*
+ * If ulb_bw set failed, fail the iface creation.
+ * wl_dealloc_netinfo_by_wdev will be called by the
+ * unregister notifier.
+ */
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+ }
+#endif /* WL11ULB */
if (mode != WL_MODE_AP)
wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1);
if (mode == WL_MODE_AP) {
dhd_set_ap_powersave(dhd, 0, TRUE);
}
-#endif
+#endif /* SUPPORT_AP_POWERSAVE */
if (type == NL80211_IFTYPE_P2P_CLIENT)
dhd_mode = DHD_FLAG_P2P_GC_MODE;
else if (type == NL80211_IFTYPE_P2P_GO)
wl_clr_p2p_status(cfg, GO_NEG_PHASE);
wl_set_p2p_status(cfg, IF_DELETING);
- err = wl_cfgp2p_ifdel(cfg, &cfg->p2p->int_addr);
+ err = wl_cfgp2p_ifdel(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type));
if (err == BCME_OK) {
timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- (wl_get_p2p_status(cfg, IF_DELETING) == false),
+ ((wl_get_p2p_status(cfg, IF_DELETING) == false) &&
+ (cfg->if_event_info.valid)),
msecs_to_jiffies(MAX_WAIT_TIME));
if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
cfg->if_event_info.valid) {
+ /*
+ * Should indicate upper layer this failure case of p2p
+ * interface creation
+ */
WL_ERR(("IFDEL operation done\n"));
} else {
WL_ERR(("IFDEL didn't complete properly\n"));
- err = BCME_ERROR;
+ hang_required = true;
}
+ } else {
+ hang_required = true;
}
- if (err != BCME_OK) {
+
+ if (hang_required) {
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n",
err, ndev->name));
+ dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE;
net_os_send_hang_message(ndev);
}
memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
- cfg->p2p->vif_created = false;
+ wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1;
#ifdef PROP_TXSTATUS_VSDB
#if defined(BCMSDIO)
dhd_wlfc_get_enable(dhd, &enabled);
- if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
- dhd->op_mode != DHD_FLAG_IBSS_MODE) {
- dhd_wlfc_deinit(dhd);
- cfg->wlfc_on = false;
- }
+ if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+ dhd_wlfc_deinit(dhd);
+ cfg->wlfc_on = false;
+ }
#endif
#endif /* PROP_TXSTATUS_VSDB */
+ /*
+ * Returns -ENODEV to upperlayer to indicate that DHD
+ * failed to create p2p interface
+ */
+ err = -ENODEV;
}
}
-
fail:
if (wlif_type == WL_P2P_IF_GO)
wldev_iovar_setint(primary_ndev, "mpc", 1);
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(err);
}
static s32
s32 timeout = -1;
s32 ret = 0;
s32 index = -1;
+ s32 type = -1;
#ifdef CUSTOM_SET_CPUCORE
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
#endif /* CUSTOM_SET_CPUCORE */
if (!(dhd->chan_isvht80))
dhd_set_cpucore(dhd, FALSE);
#endif /* CUSTOM_SET_CPUCORE */
-#if defined(WL_CFG80211_P2P_DEV_IF)
+#ifdef WL_CFG80211_P2P_DEV_IF
if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
- return wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg);
+ if (dhd_download_fw_on_driverload) {
+ return wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg);
+ } else {
+ cfg->down_disc_if = TRUE;
+ return 0;
+ }
}
#endif /* WL_CFG80211_P2P_DEV_IF */
dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
return bcm_cfg80211_del_ibss_if(wiphy, cfgdev);
#endif /* WLAIBSS_MCHAN */
-#ifdef DUAL_STA
+#ifdef WL_VIRTUAL_APSTA
if (cfgdev == cfg->bss_cfgdev)
return wl_cfg80211_del_iface(wiphy, cfgdev);
-#endif /* DUAL_STA */
-
- if (wl_cfgp2p_find_idx(cfg, dev, &index) != BCME_OK) {
- WL_ERR(("Find p2p index from ndev(%p) failed\n", dev));
+#endif /* WL_VIRTUAL_APSTA */
+ if ((index = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) {
+ WL_ERR(("Find p2p index from wdev failed\n"));
+ return BCME_ERROR;
+ }
+ if (wl_check_dongle_idle(wiphy) != TRUE) {
+ WL_ERR(("FW is busy to add interface"));
return BCME_ERROR;
}
if (cfg->p2p_supported) {
- memcpy(p2p_mac.octet, cfg->p2p->int_addr.octet, ETHER_ADDR_LEN);
+ if (wl_cfgp2p_find_type(cfg, index, &type) != BCME_OK)
+ return BCME_ERROR;
+ memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet, ETHER_ADDR_LEN);
/* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases
*/
WL_DBG(("P2P: GO_NEG_PHASE status cleared "));
wl_clr_p2p_status(cfg, GO_NEG_PHASE);
- if (cfg->p2p->vif_created) {
+ if (wl_cfgp2p_vif_created(cfg)) {
if (wl_get_drv_status(cfg, SCANNING, dev)) {
wl_notify_escan_complete(cfg, dev, true, true);
}
wldev_iovar_setint(dev, "mpc", 1);
/* Delete pm_enable_work */
- wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
/* for GC */
if (wl_get_drv_status(cfg, DISCONNECTING, dev) &&
/* for GO */
if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+ cfg->p2p->p2p_go_count--;
/* disable interface before bsscfg free */
ret = wl_cfgp2p_ifdisable(cfg, &p2p_mac);
/* if fw doesn't support "ifdis",
msleep(300);
}
}
- wl_cfgp2p_clear_management_ie(cfg, index);
+ wl_cfg80211_clear_per_bss_ies(cfg, index);
if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)
wldev_iovar_setint(dev, "buf_key_b4_m4", 0);
+ memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet,
+ ETHER_ADDR_LEN);
+ CFGP2P_INFO(("primary idx %d : cfg p2p_ifdis "MACDBG"\n",
+ dev->ifindex, MAC2STRDBG(p2p_mac.octet)));
/* delete interface after link down */
ret = wl_cfgp2p_ifdel(cfg, &p2p_mac);
-
if (ret != BCME_OK) {
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n",
ret, ndev->name));
- #if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
+ dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE;
net_os_send_hang_message(ndev);
- #endif
} else {
/* Wait for IF_DEL operation to be finished */
timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- (wl_get_p2p_status(cfg, IF_DELETING) == false),
+ ((wl_get_p2p_status(cfg, IF_DELETING) == false) &&
+ (cfg->if_event_info.valid)),
msecs_to_jiffies(MAX_WAIT_TIME));
if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
cfg->if_event_info.valid) {
s32 wlif_type;
s32 mode = 0;
s32 err = BCME_OK;
+ s32 index;
+ s32 conn_idx = -1;
chanspec_t chspec;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
WL_DBG(("Enter type %d\n", type));
infra = 1;
break;
case NL80211_IFTYPE_AP:
+ dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
+ /* intentional fall through */
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_P2P_GO:
mode = WL_MODE_AP;
}
if (!dhd)
return -EINVAL;
- if (ap) {
- wl_set_mode_by_netdev(cfg, ndev, mode);
- if (cfg->p2p_supported && cfg->p2p->vif_created) {
- WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", cfg->p2p->vif_created,
- p2p_on(cfg)));
- wldev_iovar_setint(ndev, "mpc", 0);
- wl_notify_escan_complete(cfg, ndev, true, true);
+ /* If any scan is going on, abort it */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ int wait_cnt = MAX_SCAN_ABORT_WAIT_CNT;
+ WL_ERR(("Scan in progress. Aborting the scan!\n"));
+ wl_cfg80211_scan_abort(cfg);
+ while (wl_get_drv_status_all(cfg, SCANNING) && wait_cnt) {
+ WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt));
+ wait_cnt--;
+ OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME);
+ }
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+ }
+ }
+
+ if (wl_check_dongle_idle(wiphy) != TRUE) {
+ WL_ERR(("FW is busy to add interface"));
+ return -EINVAL;
+ }
+ if (ap) {
+ wl_set_mode_by_netdev(cfg, ndev, mode);
+ if (is_p2p_group_iface(ndev->ieee80211_ptr) &&
+ cfg->p2p && wl_cfgp2p_vif_created(cfg)) {
+ WL_DBG(("p2p_vif_created p2p_on (%d)\n", p2p_on(cfg)));
+ wldev_iovar_setint(ndev, "mpc", 0);
+ wl_notify_escan_complete(cfg, ndev, true, true);
+
+ /* Dual p2p doesn't support multiple P2PGO interfaces,
+ * p2p_go_count is the counter for GO creation
+ * requests.
+ */
+ if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) {
+ wl_set_mode_by_netdev(cfg, ndev, WL_MODE_BSS);
+ WL_ERR(("Fw doesnot support multiple GO "));
+ return BCME_ERROR;
+ }
/* In concurrency case, STA may be already associated in a particular
* channel. so retrieve the current channel of primary interface and
* then start the virtual interface on that.
*/
chspec = wl_cfg80211_get_shared_freq(wiphy);
+ index = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (index < 0) {
+ WL_ERR(("Find p2p index from ndev(%p) failed\n", ndev));
+ return BCME_ERROR;
+ }
+ if (wl_cfgp2p_find_type(cfg, index, &conn_idx) != BCME_OK)
+ return BCME_ERROR;
wlif_type = WL_P2P_IF_GO;
- printf("%s : ap (%d), infra (%d), iftype: (%d)\n",
- ndev->name, ap, infra, type);
+ printf("%s : ap (%d), infra (%d), iftype (%d) conn_idx (%d)\n",
+ ndev->name, ap, infra, type, conn_idx);
wl_set_p2p_status(cfg, IF_CHANGING);
wl_clr_p2p_status(cfg, IF_CHANGED);
- wl_cfgp2p_ifchange(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
+ wl_cfgp2p_ifchange(cfg, wl_to_p2p_bss_macaddr(cfg, conn_idx),
+ htod32(wlif_type), chspec, conn_idx);
wait_event_interruptible_timeout(cfg->netif_change_event,
(wl_get_p2p_status(cfg, IF_CHANGED) == true),
msecs_to_jiffies(MAX_WAIT_TIME));
wl_set_drv_status(cfg, CONNECTED, ndev);
#ifdef SUPPORT_AP_POWERSAVE
dhd_set_ap_powersave(dhd, 0, TRUE);
-#endif
- } else if (ndev == bcmcfg_to_prmry_ndev(cfg) &&
+#endif /* SUPPORT_AP_POWERSAVE */
+ } else if (((ndev == primary_ndev) ||
+ (ndev == ((struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev)))) &&
!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
wl_set_drv_status(cfg, AP_CREATING, ndev);
- if (!cfg->ap_info &&
- !(cfg->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) {
- WL_ERR(("struct ap_saved_ie allocation failed\n"));
- return -ENOMEM;
- }
} else {
WL_ERR(("Cannot change the interface for GO or SOFTAP\n"));
return -EINVAL;
}
} else {
+ /* P2P GO interface deletion is handled on the basis of role type (AP).
+ * So avoid changing role for p2p type.
+ */
+ if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ wl_set_mode_by_netdev(cfg, ndev, mode);
WL_DBG(("Change_virtual_iface for transition from GO/AP to client/STA"));
#ifdef SUPPORT_AP_POWERSAVE
dhd_set_ap_powersave(dhd, 0, FALSE);
-#endif
-#ifdef P2PONEINT
- wl_set_mode_by_netdev(cfg, ndev, mode);
- if (cfg->p2p_supported && cfg->p2p->vif_created) {
- WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", cfg->p2p->vif_created,
- p2p_on(cfg)));
- wldev_iovar_setint(ndev, "mpc", 0);
- wl_notify_escan_complete(cfg, ndev, true, true);
-
- /* In concurrency case, STA may be already associated in a particular
- * channel. so retrieve the current channel of primary interface and
- * then start the virtual interface on that.
- */
- chspec = wl_cfg80211_get_shared_freq(wiphy);
-
- wlif_type = WL_P2P_IF_CLIENT;
- WL_ERR(("%s : ap (%d), infra (%d), iftype: (%d) chspec 0x%x \n",
- ndev->name, ap, infra, type, chspec));
- wl_set_p2p_status(cfg, IF_CHANGING);
- wl_clr_p2p_status(cfg, IF_CHANGED);
- wl_cfgp2p_ifchange(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
- wait_event_interruptible_timeout(cfg->netif_change_event,
- (wl_get_p2p_status(cfg, IF_CHANGED) == true),
- msecs_to_jiffies(MAX_WAIT_TIME));
- wl_set_mode_by_netdev(cfg, ndev, mode);
- dhd->op_mode |= DHD_FLAG_P2P_GC_MODE;
- dhd->op_mode &= ~DHD_FLAG_P2P_GO_MODE;
- wl_clr_p2p_status(cfg, IF_CHANGING);
- wl_clr_p2p_status(cfg, IF_CHANGED);
-
-#define INIT_IE(IE_TYPE, BSS_TYPE) \
- do { \
- memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
- sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
- wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
- } while (0);
-
- INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION);
- INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION);
- INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION);
- INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
- INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION);
- }
-#endif /* P2PONEINT */
+#endif /* SUPPORT_AP_POWERSAVE */
}
if (ibss) {
#endif /* PROP_TXSTATUS_VSDB */
bssidx = if_event_info->bssidx;
- if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION)) {
+ if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) &&
+ bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2)) {
WL_ERR(("got IF_DEL for if %d, not owned by cfg driver\n", bssidx));
return BCME_ERROR;
}
- if (p2p_is_on(cfg) && cfg->p2p->vif_created) {
-
+ if (p2p_is_on(cfg) && wl_cfgp2p_vif_created(cfg)) {
if (cfg->scan_request && (cfg->escan_info.ndev == ndev)) {
/* Abort any pending scan requests */
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
}
memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
- if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK) {
- WL_ERR(("Find p2p type from bssidx(%d) failed\n", bssidx));
+ if (wl_cfgp2p_find_type(cfg, bssidx, &type) == BCME_OK) {
+ /* Update P2P data */
+ wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type));
+ wl_to_p2p_bss_ndev(cfg, type) = NULL;
+ wl_to_p2p_bss_bssidx(cfg, type) = -1;
+ } else if (wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr) < 0) {
+ WL_ERR(("bssidx not known for the given ndev as per net_info data \n"));
return BCME_ERROR;
}
- wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type));
- wl_to_p2p_bss_ndev(cfg, type) = NULL;
- wl_to_p2p_bss_bssidx(cfg, type) = WL_INVALID;
- cfg->p2p->vif_created = false;
#ifdef PROP_TXSTATUS_VSDB
#if defined(BCMSDIO)
#endif /* PROP_TXSTATUS_VSDB */
}
+ dhd_net_if_lock(ndev);
wl_cfg80211_remove_if(cfg, if_event_info->ifidx, ndev);
+ dhd_net_if_unlock(ndev);
+
return BCME_OK;
}
u8 *end, *pos;
s32 listen_channel;
+/* unfortunately const cast required here - function is
+ * a callback so its signature must not be changed
+ * and cascade of changing wl_cfgp2p_find_p2pie
+ * causes need for const cast in other places
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
pos = (u8 *)ie;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
if (p2p_ie == NULL)
char *ptr;
wlc_ssid_t ssid;
struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ struct wireless_dev *wdev;
memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN);
params->bss_type = DOT11_BSSTYPE_ANY;
(IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)))
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
continue;
+ if (!dhd_conf_match_channel(cfg->pub, channel))
+ continue;
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ wdev = request->wdev;
+#else
+ wdev = request->dev->ieee80211_ptr;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ chanspec = wl_cfg80211_ulb_get_min_bw_chspec(wdev, -1);
+ if (chanspec == INVCHANSPEC) {
+ WL_ERR(("Invalid chanspec! Skipping channel\n"));
+ continue;
+ }
if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
-#ifdef WL_HOST_BAND_MGMT
- if (cfg->curr_band == WLC_BAND_5G) {
- WL_DBG(("In 5G only mode, omit 2G channel:%d\n", channel));
- continue;
- }
-#endif /* WL_HOST_BAND_MGMT */
chanspec |= WL_CHANSPEC_BAND_2G;
} else {
-#ifdef WL_HOST_BAND_MGMT
- if (cfg->curr_band == WLC_BAND_2G) {
- WL_DBG(("In 2G only mode, omit 5G channel:%d\n", channel));
- continue;
- }
-#endif /* WL_HOST_BAND_MGMT */
chanspec |= WL_CHANSPEC_BAND_5G;
}
-
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
-
params->channel_list[j] = channel;
params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
params->channel_list[j] |= chanspec;
u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
u32 num_chans = 0;
s32 channel;
- s32 n_valid_chan;
+ u32 n_valid_chan;
s32 search_state = WL_P2P_DISC_ST_SCAN;
u32 i, j, n_nodfs = 0;
u16 *default_chan_list = NULL;
wl_uint32_list_t *list;
+ s32 bssidx = -1;
struct net_device *dev = NULL;
#if defined(USE_INITIAL_SHORT_DWELL_TIME)
bool is_first_init_2g_scan = false;
-#endif
+#endif
p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
scb_val_t scbval;
static int cnt = 0;
err = -ENOMEM;
goto exit;
}
+ if (cfg->active_scan == PASSIVE_SCAN) {
+ params->params.scan_type = DOT11_SCANTYPE_PASSIVE;
+ WL_DBG(("Passive scan_type %d \n", params->params.scan_type));
+ }
+
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+
err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ WL_SCAN(("%s: LEGACY_SCAN sync ID: %d, bssidx: %d\n", __FUNCTION__, params->sync_id, bssidx));
if (unlikely(err)) {
if (err == BCME_EPERM)
/* Scan Not permitted at this point of time */
goto exit;
}
if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
+#ifdef P2P_SKIP_DFS
+ int is_printed = false;
+#endif /* P2P_SKIP_DFS */
list = (wl_uint32_list_t *) chan_buf;
n_valid_chan = dtoh32(list->count);
for (i = 0; i < num_chans; i++)
{
-#ifdef WL_HOST_BAND_MGMT
- int channel_band = 0;
-#endif /* WL_HOST_BAND_MGMT */
_freq = request->channels[i]->center_freq;
channel = ieee80211_frequency_to_channel(_freq);
-#ifdef WL_HOST_BAND_MGMT
- channel_band = (channel > CH_MAX_2G_CHANNEL) ?
- WLC_BAND_5G : WLC_BAND_2G;
- if ((cfg->curr_band != WLC_BAND_AUTO) &&
- (cfg->curr_band != channel_band) &&
- !IS_P2P_SOCIAL_CHANNEL(channel))
- continue;
-#endif /* WL_HOST_BAND_MGMT */
/* ignore DFS channels */
if (request->channels[i]->flags &
| IEEE80211_CHAN_PASSIVE_SCAN))
#endif
continue;
+#ifdef P2P_SKIP_DFS
+ if (channel >= 52 && channel <= 144) {
+ if (is_printed == false) {
+ WL_ERR(("SKIP DFS CHANs(52~144)\n"));
+ is_printed = true;
+ }
+ continue;
+ }
+#endif /* P2P_SKIP_DFS */
for (j = 0; j < n_valid_chan; j++) {
/* allows only supported channel on
search_state = WL_P2P_DISC_ST_SEARCH;
p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
WL_INFORM(("P2P SEARCH PHASE START \n"));
- } else if ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) &&
- (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) {
+ } else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) &&
+ (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) ||
+ ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) &&
+ (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) {
/* If you are already a GO, then do SEARCH only */
WL_INFORM(("Already a GO. Do SEARCH Only"));
search_state = WL_P2P_DISC_ST_SEARCH;
err = -EINVAL;
goto exit;
}
- err = wl_cfgp2p_escan(cfg, ndev, cfg->active_scan, num_chans, default_chan_list,
+ err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list,
search_state, action,
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
p2p_scan_purpose);
s32 passive_scan_time_org;
wl_scan_results_t *results;
WL_SCAN(("Enter \n"));
- mutex_lock(&cfg->usr_sync);
results = wl_escan_get_buf(cfg, FALSE);
results->version = 0;
}
exit:
- mutex_unlock(&cfg->usr_sync);
return err;
}
struct net_device *remain_on_channel_ndev = NULL;
#endif
- dhd_pub_t *dhd;
-
- dhd = (dhd_pub_t *)(cfg->pub);
/*
* Hostapd triggers scan before starting automatic channel selection
- * also Dump stats IOVAR scans each channel hence returning from here.
+ * to collect channel characteristics. However firmware scan engine
+ * doesn't support any channel characteristics collection along with
+ * scan. Hence return scan success.
*/
- if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
-#ifdef WL_SUPPORT_ACS
- WL_INFORM(("Scan Command at SoftAP mode\n"));
+ if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) {
+ WL_INFORM(("Scan Command on SoftAP Interface. Ignoring...\n"));
return 0;
-#else
- WL_ERR(("Invalid Scan Command at SoftAP mode\n"));
- return -EINVAL;
-#endif /* WL_SUPPORT_ACS */
}
ndev = ndev_to_wlc_ndev(ndev, cfg);
WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
return -EOPNOTSUPP;
}
+
+#ifdef P2P_LISTEN_OFFLOADING
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ WL_ERR(("P2P_FIND: Discovery offload is in progress\n"));
+ return -EAGAIN;
+ }
+#endif /* P2P_LISTEN_OFFLOADING */
+
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
if (remain_on_channel_ndev) {
}
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
-#ifdef WL_SDO
- if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
- wl_cfg80211_pause_sdo(ndev, cfg);
- }
-#endif
/* Arm scan timeout timer */
mod_timer(&cfg->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS));
p2p_on(cfg) = true;
wl_cfgp2p_set_firm_p2p(cfg);
get_primary_mac(cfg, &primary_mac);
- wl_cfgp2p_generate_bss_mac(&primary_mac,
- &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+#if defined(P2P_IE_MISSING_FIX)
+ cfg->p2p_prb_noti = false;
+#endif
}
wl_clr_p2p_status(cfg, GO_NEG_PHASE);
WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
}
}
if (!cfg->p2p_supported || !p2p_scan(cfg)) {
-
- if (wl_cfgp2p_find_idx(cfg, ndev, &bssidx) != BCME_OK) {
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg,
+ ndev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from ndev(%p) failed\n",
ndev));
err = BCME_ERROR;
interworking_ie->data, interworking_ie->len);
if (unlikely(err)) {
- goto scan_out;
+ WL_ERR(("Failed to add interworking IE"));
}
} else if (cfg->iw_ie_len != 0) {
/* we have to clear IW IE and disable gratuitous APR */
DOT11_MNG_INTERWORKING_ID,
0, 0);
- wldev_iovar_setint_bsscfg(ndev, "grat_arp", 0,
+ (void)wldev_iovar_setint_bsscfg(ndev, "grat_arp", 0,
bssidx);
cfg->wl11u = FALSE;
+ cfg->iw_ie_len = 0;
+ memset(cfg->iw_ie, 0, IW_IES_MAX_BUF_LEN);
/* we don't care about error */
}
#endif /* WL11U */
- err = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx,
- VNDR_IE_PRBREQ_FLAG, (u8 *)request->ie,
+ err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(ndev),
+ bssidx, VNDR_IE_PRBREQ_FLAG, request->ie,
request->ie_len);
if (unlikely(err)) {
ssids = this_ssid;
}
- if (request && !p2p_scan(cfg)) {
+ if (request && cfg->p2p_supported && !p2p_scan(cfg)) {
WL_TRACE_HW4(("START SCAN\n"));
+ DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub),
+ SCAN_WAKE_LOCK_TIMEOUT);
+ DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
}
- cfg->scan_request = request;
- wl_set_drv_status(cfg, SCANNING, ndev);
-
if (cfg->p2p_supported) {
if (p2p_on(cfg) && p2p_scan(cfg)) {
-#ifdef WL_SDO
- if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
- /* We shouldn't be getting p2p_find while discovery
- * offload is in progress
- */
- WL_SD(("P2P_FIND: Discovery offload is in progress."
- " Do nothing\n"));
- err = -EINVAL;
- goto scan_out;
- }
-#endif
/* find my listen channel */
cfg->afx_hdl->my_listen_chan =
wl_find_listen_channel(cfg, request->ie,
scan_success:
busy_count = 0;
+ cfg->scan_request = request;
+ wl_set_drv_status(cfg, SCANNING, ndev);
return 0;
if (err == BCME_BUSY || err == BCME_NOTREADY) {
WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY));
err = -EBUSY;
+ } else if ((err == BCME_EPERM) && cfg->scan_suppressed) {
+ WL_ERR(("Scan not permitted due to scan suppress\n"));
+ err = -EPERM;
+ } else {
+ /* For all other fw errors, use a generic error code as return
+ * value to cfg80211 stack
+ */
+ err = -EAGAIN;
}
-#define SCAN_EBUSY_RETRY_LIMIT 10
+#define SCAN_EBUSY_RETRY_LIMIT 20
if (err == -EBUSY) {
if (busy_count++ > SCAN_EBUSY_RETRY_LIMIT) {
struct ether_addr bssid;
s32 ret = 0;
+#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */
busy_count = 0;
WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
wl_get_drv_status(cfg, SCANNING, ndev),
wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
+#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */
+
bzero(&bssid, sizeof(bssid));
if ((ret = wldev_ioctl(ndev, WLC_GET_BSSID,
&bssid, ETHER_ADDR_LEN, false)) == 0)
wl_cfg80211_scan_abort(cfg);
+ } else {
+ /* Hold the context for 400msec, so that 10 subsequent scans
+ * can give a buffer of 4sec which is enough to
+ * cover any on-going scan in the firmware
+ */
+ WL_DBG(("Enforcing delay for EBUSY case \n"));
+ msleep(500);
}
} else {
busy_count = 0;
wl_clr_drv_status(cfg, SCANNING, ndev);
if (timer_pending(&cfg->scan_timeout))
del_timer_sync(&cfg->scan_timeout);
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
cfg->scan_request = NULL;
spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
-#ifdef WL_SDO
- if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
- wl_cfg80211_resume_sdo(ndev, cfg);
- }
-#endif
return err;
}
struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
#endif /* WL_CFG80211_P2P_DEV_IF */
- WL_DBG(("Enter \n"));
+ WL_DBG(("Enter\n"));
RETURN_EIO_IF_NOT_UP(cfg);
-#ifdef P2PONEINT
- ndev = bcmcfg_to_prmry_ndev(cfg);
- WL_DBG(("scan use [dev name %s ] \n", ndev->name));
-#endif
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ if (wl_cfg_multip2p_operational(cfg)) {
+ WL_ERR(("wlan0 scan failed, p2p devices are operational"));
+ return -ENODEV;
+ }
+ }
+ mutex_lock(&cfg->usr_sync);
err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
if (unlikely(err)) {
- if ((err == BCME_EPERM) && cfg->scan_suppressed)
- WL_DBG(("scan not permitted at this time (%d)\n", err));
- else
- WL_ERR(("scan error (%d)\n", err));
- return err;
+ WL_ERR(("scan error (%d)\n", err));
}
+ mutex_unlock(&cfg->usr_sync);
return err;
}
goto fail;
event = &cfg->if_event_info;
- strncpy(event->name, name, IFNAMSIZ - 1);
/* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control
* over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux
* and will be freed by dhd_detach unless it gets unregistered before that. The
* be freed by wl_dealloc_netinfo
*/
new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name,
- event->mac, event->bssidx);
+ event->mac, event->bssidx, event->name);
if (new_ndev == NULL)
goto fail;
wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK)
goto fail;
- wl_alloc_netinfo(cfg, new_ndev, wdev, WL_MODE_IBSS, PM_ENABLE);
+ wl_alloc_netinfo(cfg, new_ndev, wdev, WL_MODE_IBSS, PM_ENABLE, event->bssidx);
cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev);
WL_ERR(("IBSS interface %s created\n", new_ndev->name));
return cfg->ibss_cfgdev;
return ret;
}
-#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+
s32
wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
struct net_device *ndev, s32 bsscfg_idx,
return ret;
}
+#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF)
/* Create a Generic Network Interface and initialize it depending up on
* the interface type
*/
struct net_device *new_ndev = NULL;
struct net_device *primary_ndev = NULL;
s32 ret = BCME_OK;
- s32 bsscfg_idx = 1;
+ s32 bsscfg_idx = 0;
u32 timeout;
wl_if_event_info *event = NULL;
struct wireless_dev *wdev = NULL;
primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+#ifdef DHD_IFDEBUG
+ WL_ERR(("cfg=%p, primary_ndev=%p, ifname=%s\n", cfg, primary_ndev, name));
+#endif
+
+ /* If any scan is going on, abort it */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ int wait_cnt = MAX_SCAN_ABORT_WAIT_CNT;
+ WL_ERR(("Scan in progress. Aborting the scan!\n"));
+ wl_cfg80211_scan_abort(cfg);
+ while (wl_get_drv_status_all(cfg, SCANNING) && wait_cnt) {
+ WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt));
+ wait_cnt--;
+ OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME);
+ }
+ if (!wait_cnt && wl_get_drv_status_all(cfg, SCANNING)) {
+ WL_ERR(("Failed to abort scan\n"));
+ return NULL;
+ }
+ }
+
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
if (likely(!mac_addr)) {
- /* Use primary MAC with the locally administered bit for the Secondary STA I/F */
+ /* Use primary MAC with the locally administered bit for the
+ * Secondary STA I/F
+ */
memcpy(addr, primary_ndev->dev_addr, ETH_ALEN);
addr[0] |= 0x02;
} else {
CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
}
+#ifdef DHD_IFDEBUG
+ WL_ERR(("call wl_cfgp2p_disable_discovery()\n"));
+#endif
wl_cfgp2p_disable_discovery(cfg);
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
p2p_on(cfg) = false;
ret = wl_cfg80211_interface_ops(cfg, primary_ndev, bsscfg_idx,
NL80211_IFTYPE_STATION, 0, addr);
if (ret == BCME_UNSUPPORTED) {
- /* Use bssidx 1 by default */
+ /* Use bssidx 1 by default */
+ bsscfg_idx = 1;
if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev,
bsscfg_idx, iface_type, 0, addr)) < 0) {
return NULL;
}
} else if (ret < 0) {
- WL_ERR(("Interface create failed!! ret:%d \n", ret));
- goto fail;
+ WL_ERR(("Interface create failed!! ret:%d \n", ret));
+ goto fail;
} else {
- /* Success */
- bsscfg_idx = ret;
+ /* Success */
+ bsscfg_idx = ret;
}
+ WL_DBG(("Interface created!! bssidx:%d \n", bsscfg_idx));
+
/*
* Wait till the firmware send a confirmation event back.
*/
* the host interface creation.
*/
event = &cfg->if_event_info;
- strncpy(event->name, name, IFNAMSIZ - 1);
new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx,
- event->name, addr, event->bssidx);
+ (char*)name, addr, event->bssidx, event->name);
if (!new_ndev) {
WL_ERR(("I/F allocation failed! \n"));
goto fail;
new_ndev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+#ifdef DHD_IFDEBUG
+ WL_ERR(("wdev=%p, new_ndev=%p\n", wdev, new_ndev));
+#endif
+
/* RTNL lock must have been acquired. */
ASSERT_RTNL();
/* Set the locally administed mac addr, if not applied already */
if (memcmp(addr, event->mac, ETH_ALEN) != 0) {
- ret = wldev_iovar_setbuf_bsscfg(primary_ndev, "cur_etheraddr", addr, ETH_ALEN,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, event->bssidx, &cfg->ioctl_buf_sync);
+ ret = wldev_iovar_setbuf_bsscfg(primary_ndev, "cur_etheraddr",
+ addr, ETH_ALEN, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ event->bssidx, &cfg->ioctl_buf_sync);
if (unlikely(ret)) {
WL_ERR(("set cur_etheraddr Error (%d)\n", ret));
goto fail;
/* Initialize with the station mode params */
wl_alloc_netinfo(cfg, new_ndev, wdev,
(iface_type == NL80211_IFTYPE_STATION) ?
- WL_MODE_BSS : WL_MODE_AP, PM_ENABLE);
+ WL_MODE_BSS : WL_MODE_AP, PM_ENABLE, event->bssidx);
cfg->bss_cfgdev = ndev_to_cfgdev(new_ndev);
cfg->cfgdev_bssidx = event->bssidx;
WL_DBG(("Host Network Interface for Secondary I/F created"));
+#ifdef DHD_IFDEBUG
+ WL_ERR(("cfg->bss_cfgdev=%p\n", cfg->bss_cfgdev));
+#endif
+
return cfg->bss_cfgdev;
fail:
cfg->bss_pending_op = FALSE;
- if (new_ndev)
- wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+ cfg->cfgdev_bssidx = -1;
if (wdev)
kfree(wdev);
+ if (new_ndev)
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+
+#ifdef DHD_IFDEBUG
+ WL_ERR(("failed!!!\n"));
+#endif
return NULL;
}
u32 ifidx;
enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION;
- WL_DBG(("Enter\n"));
+ WL_ERR(("Enter\n"));
if (!cfg->bss_cfgdev)
return 0;
/* If any scan is going on, abort it */
if (wl_get_drv_status_all(cfg, SCANNING)) {
- WL_DBG(("Scan in progress. Aborting the scan!\n"));
+ WL_ERR(("Scan in progress. Aborting the scan!\n"));
wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
}
ndev = (struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev);
primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+#ifdef DHD_IFDEBUG
+ WL_ERR(("cfg->bss_cfgdev=%p, ndev=%p, primary_ndev=%p\n",
+ cfg->bss_cfgdev, ndev, primary_ndev));
+#endif
+
cfg->bss_pending_op = TRUE;
memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
- /* Delete the firmware interface */
+ /* Delete the firmware interface. "interface_remove" command
+ * should go on the interface to be deleted
+ */
ret = wl_cfg80211_interface_ops(cfg, ndev, cfg->cfgdev_bssidx,
NL80211_IFTYPE_STATION, 1, NULL);
if (ret == BCME_UNSUPPORTED) {
if ((ret = wl_cfg80211_add_del_bss(cfg, ndev,
bsscfg_idx, iface_type, true, NULL)) < 0) {
WL_ERR(("DEL bss failed ret:%d \n", ret));
- return ret;
+ goto exit;
}
} else if (ret < 0) {
- WL_ERR(("Interface DEL failed ret:%d \n", ret));
- return ret;
+ WL_ERR(("Interface DEL failed ret:%d \n", ret));
+ goto exit;
}
timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
if (timeout <= 0 || cfg->bss_pending_op) {
WL_ERR(("timeout in waiting IF_DEL event\n"));
}
+
+exit:
ifidx = dhd_net2idx(((struct dhd_pub *)(cfg->pub))->info, ndev);
wl_cfg80211_remove_if(cfg, ifidx, ndev);
cfg->bss_cfgdev = NULL;
cfg->cfgdev_bssidx = -1;
cfg->bss_pending_op = FALSE;
- WL_DBG(("IF_DEL Done.\n"));
+ WL_ERR(("IF_DEL Done.\n"));
return ret;
}
-#endif /* defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) */
+#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */
static s32
wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
chanspec_t chanspec = 0;
u32 param[2] = {0, 0};
u32 bw_cap = 0;
-#if defined(WLAIBSS) && defined(WLAIBSS_PS)
- s32 atim = 10;
-#endif /* WLAIBSS & WLAIBSS_PS */
WL_TRACE(("In\n"));
RETURN_EIO_IF_NOT_UP(cfg);
if (chan)
cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
if (wl_get_drv_status(cfg, CONNECTED, dev)) {
- struct wlc_ssid *ssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID);
+ struct wlc_ssid *lssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID);
u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID);
u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN);
if (!params->bssid || ((memcmp(params->bssid, bssid, ETHER_ADDR_LEN) == 0) &&
- (memcmp(params->ssid, ssid->SSID, ssid->SSID_len) == 0) &&
+ (memcmp(params->ssid, lssid->SSID, lssid->SSID_len) == 0) &&
(*channel == cfg->channel))) {
WL_ERR(("Connection already existed to " MACDBG "\n",
MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID))));
return -EISCONN;
}
WL_ERR(("Ignore Previous connecton to %s (" MACDBG ")\n",
- ssid->SSID, MAC2STRDBG(bssid)));
+ lssid->SSID, MAC2STRDBG(bssid)));
}
/* remove the VSIE */
wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
wldev_iovar_setint(dev, "wsec", 0);
-#ifdef WLAIBSS
- /* Enable custom ibss features */
- err = wldev_iovar_setint(dev, "aibss", TRUE);
-
- if (unlikely(err)) {
- WL_ERR(("Enable custom IBSS mode failed (%d)\n", err));
- return err;
- }
-#ifdef WLAIBSS_PS
- err = wldev_ioctl(dev, WLC_SET_ATIM, &atim, sizeof(int), true);
- if (unlikely(err)) {
- WL_ERR(("Enable custom IBSS ATIM mode failed (%d)\n", err));
- return err;
- }
-#endif /* WLAIBSS_PS */
-#endif /* WLAIBSS */
err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
join_params_size, true);
}
wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN);
-#ifdef WLAIBSS
- cfg->aibss_txfail_seq = 0; /* initialize the sequence */
-#endif /* WLAIBSS */
cfg->rmc_event_seq = 0; /* initialize rmcfail sequence */
return err;
}
return err;
}
-#ifdef MFP
-static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8* capa)
-{
- u16 suite_count;
- wpa_suite_mcast_t *mcast;
- wpa_suite_ucast_t *ucast;
- u16 len;
- wpa_suite_auth_key_mgmt_t *mgmt;
-
- if (!wpa2ie)
- return -1;
-
- len = wpa2ie->len;
- mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
- if ((len -= WPA_SUITE_LEN) <= 0)
- return BCME_BADLEN;
- ucast = (wpa_suite_ucast_t *)&mcast[1];
- suite_count = ltoh16_ua(&ucast->count);
- if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
- (len -= (WPA_IE_SUITE_COUNT_LEN +
- (WPA_SUITE_LEN * suite_count))) <= 0)
- return BCME_BADLEN;
-
- mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
- suite_count = ltoh16_ua(&mgmt->count);
-
- if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
- (len -= (WPA_IE_SUITE_COUNT_LEN +
- (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
- capa[0] = *(u8 *)&mgmt->list[suite_count];
- capa[1] = *((u8 *)&mgmt->list[suite_count] + 1);
- } else
- return BCME_BADLEN;
-
- return 0;
-}
-#endif /* MFP */
static s32
wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
s32 val = 0;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
val = WPA_AUTH_PSK |
-#ifdef BCMCCX
- WPA_AUTH_CCKM |
-#endif
WPA_AUTH_UNSPECIFIED;
else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)
val = WPA2_AUTH_PSK|
-#ifdef BCMCCX
- WPA2_AUTH_CCKM |
-#endif
WPA2_AUTH_UNSPECIFIED;
else
val = WPA_AUTH_DISABLED;
if (is_wps_conn(sme))
val = WPA_AUTH_DISABLED;
-#ifdef BCMWAPI_WPI
- if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
- WL_DBG((" * wl_set_wpa_version, set wpa_auth"
- " to WPA_AUTH_WAPI 0x400"));
- val = WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED;
- }
-#endif
WL_DBG(("setting wpa_auth to 0x%0x\n", val));
err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
if (unlikely(err)) {
return err;
}
-#ifdef BCMWAPI_WPI
-static s32
-wl_set_set_wapi_ie(struct net_device *dev, struct cfg80211_connect_params *sme)
-{
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
- s32 err = 0;
- s32 bssidx;
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
- return BCME_ERROR;
- }
-
- WL_DBG((" %s \n", __FUNCTION__));
-
- if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
- err = wldev_iovar_setbuf_bsscfg(dev, "wapiie", sme->ie, sme->ie_len,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
-
- if (unlikely(err)) {
- WL_ERR(("===> set_wapi_ie Error (%d)\n", err));
- return err;
- }
- } else
- WL_DBG((" * skip \n"));
- return err;
-}
-#endif /* BCMWAPI_WPI */
static s32
wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
s32 val = 0;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
val = WL_AUTH_OPEN_SHARED;
WL_DBG(("automatic\n"));
break;
-#ifdef BCMCCX
- case NL80211_AUTHTYPE_NETWORK_EAP:
- WL_DBG(("network eap\n"));
- val = DOT11_LEAP_AUTH;
- break;
-#endif
default:
val = 2;
WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
s32 gval = 0;
s32 err = 0;
s32 wsec_val = 0;
-#ifdef MFP
- s32 mfp = 0;
- bcm_tlv_t *wpa2_ie;
- u8 rsn_cap[2];
-#endif /* MFP */
-#ifdef BCMWAPI_WPI
- s32 val = 0;
-#endif
s32 bssidx;
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
case WLAN_CIPHER_SUITE_AES_CMAC:
pval = AES_ENABLED;
break;
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- val = SMS4_ENABLED;
- pval = SMS4_ENABLED;
- break;
-#endif
default:
WL_ERR(("invalid cipher pairwise (%d)\n",
sme->crypto.ciphers_pairwise[0]));
return -EINVAL;
}
}
-#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
- /* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way
- * handshake.
- * Note that the FW feature flag only exists on kernels that support the
- * FT-EAP AKM suite.
- */
- if (cfg->wdev->wiphy->features & NL80211_FEATURE_FW_4WAY_HANDSHAKE) {
- if (pval == AES_ENABLED)
- err = wldev_iovar_setint_bsscfg(dev, "sup_wpa", 1, bssidx);
- else
- err = wldev_iovar_setint_bsscfg(dev, "sup_wpa", 0, bssidx);
-
- if (err) {
- WL_ERR(("FBT: Error setting sup_wpa (%d)\n", err));
- return err;
- }
- }
-#endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
if (sme->crypto.cipher_group) {
switch (sme->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_AES_CMAC:
gval = AES_ENABLED;
break;
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- val = SMS4_ENABLED;
- gval = SMS4_ENABLED;
- break;
-#endif
default:
WL_ERR(("invalid cipher group (%d)\n",
sme->crypto.cipher_group));
/* WPS-2.0 allows no security */
err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
} else {
-#ifdef BCMWAPI_WPI
- if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_SMS4) {
- WL_DBG((" NO, is_wps_conn, WAPI set to SMS4_ENABLED"));
- err = wldev_iovar_setint_bsscfg(dev, "wsec", val, bssidx);
- } else {
-#endif
WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC"));
wsec_val = pval | gval;
-#ifdef MFP
- if (pval == AES_ENABLED) {
- if (((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
- DOT11_MNG_RSN_ID)) != NULL) &&
- (wl_cfg80211_get_rsn_capa(wpa2_ie, rsn_cap) == 0)) {
-
- if (rsn_cap[0] & RSN_CAP_MFPC) {
- /* MFP Capability advertised by supplicant. Check
- * whether MFP is supported in the firmware
- */
- if ((err = wldev_iovar_getint_bsscfg(dev,
- "mfp", &mfp, bssidx)) < 0) {
- WL_ERR(("Get MFP failed! "
- "Check MFP support in FW \n"));
- return -1;
- }
-
- if ((sme->crypto.n_akm_suites == 1) &&
- ((sme->crypto.akm_suites[0] ==
- WL_AKM_SUITE_MFP_PSK) ||
- (sme->crypto.akm_suites[0] ==
- WL_AKM_SUITE_MFP_1X))) {
- wsec_val |= MFP_SHA256;
- } else if (sme->crypto.n_akm_suites > 1) {
- WL_ERR(("Multiple AKM Specified \n"));
- return -EINVAL;
- }
-
- wsec_val |= MFP_CAPABLE;
- if (rsn_cap[0] & RSN_CAP_MFPR)
- wsec_val |= MFP_REQUIRED;
-
- if (rsn_cap[0] & RSN_CAP_MFPR)
- mfp = WL_MFP_REQUIRED;
- else
- mfp = WL_MFP_CAPABLE;
- err = wldev_iovar_setint_bsscfg(dev, "mfp",
- mfp, bssidx);
- }
- }
- }
-#endif /* MFP */
WL_DBG((" Set WSEC to fW 0x%x \n", wsec_val));
err = wldev_iovar_setint_bsscfg(dev, "wsec",
wsec_val, bssidx);
-#ifdef BCMWAPI_WPI
- }
-#endif
}
if (unlikely(err)) {
WL_ERR(("error (%d)\n", err));
s32 val = 0;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
return err;
}
if (val & (WPA_AUTH_PSK |
-#ifdef BCMCCX
- WPA_AUTH_CCKM |
-#endif
WPA_AUTH_UNSPECIFIED)) {
switch (sme->crypto.akm_suites[0]) {
case WLAN_AKM_SUITE_8021X:
case WLAN_AKM_SUITE_PSK:
val = WPA_AUTH_PSK;
break;
-#ifdef BCMCCX
- case WLAN_AKM_SUITE_CCKM:
- val = WPA_AUTH_CCKM;
- break;
-#endif
default:
- WL_ERR(("invalid cipher group (%d)\n",
- sme->crypto.cipher_group));
+ WL_ERR(("invalid akm suite (0x%x)\n",
+ sme->crypto.akm_suites[0]));
return -EINVAL;
}
} else if (val & (WPA2_AUTH_PSK |
-#ifdef BCMCCX
- WPA2_AUTH_CCKM |
-#endif
WPA2_AUTH_UNSPECIFIED)) {
switch (sme->crypto.akm_suites[0]) {
case WLAN_AKM_SUITE_8021X:
val = WPA2_AUTH_UNSPECIFIED;
break;
-#ifdef MFP
- case WL_AKM_SUITE_MFP_1X:
- val = WPA2_AUTH_UNSPECIFIED;
- break;
- case WL_AKM_SUITE_MFP_PSK:
- val = WPA2_AUTH_PSK;
- break;
-#endif
case WLAN_AKM_SUITE_PSK:
val = WPA2_AUTH_PSK;
break;
-#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_8021X)
- case WLAN_AKM_SUITE_FT_8021X:
- val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT;
- break;
-#endif
-#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_PSK)
- case WLAN_AKM_SUITE_FT_PSK:
- val = WPA2_AUTH_PSK | WPA2_AUTH_FT;
- break;
-#endif
-#ifdef BCMCCX
- case WLAN_AKM_SUITE_CCKM:
- val = WPA2_AUTH_CCKM;
- break;
-#endif
- default:
- WL_ERR(("invalid cipher group (%d)\n",
- sme->crypto.cipher_group));
- return -EINVAL;
- }
- }
-#ifdef BCMWAPI_WPI
- else if (val & (WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED)) {
- switch (sme->crypto.akm_suites[0]) {
- case WLAN_AKM_SUITE_WAPI_CERT:
- val = WAPI_AUTH_UNSPECIFIED;
- break;
- case WLAN_AKM_SUITE_WAPI_PSK:
- val = WAPI_AUTH_PSK;
- break;
default:
- WL_ERR(("invalid cipher group (%d)\n",
- sme->crypto.cipher_group));
+ WL_ERR(("invalid akm suite (0x%x)\n",
+ sme->crypto.akm_suites[0]));
return -EINVAL;
}
}
-#endif
- WL_DBG(("setting wpa_auth to %d\n", val));
+
+
+ WL_DBG(("setting wpa_auth to 0x%x\n", val));
err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
if (unlikely(err)) {
s32 val;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
sec->wpa_versions, sec->cipher_pairwise));
if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
-#ifdef BCMWAPI_WPI
- NL80211_WPA_VERSION_2 | NL80211_WAPI_VERSION_1)) &&
-#else
NL80211_WPA_VERSION_2)) &&
-#endif
(sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
-#ifdef BCMWAPI_WPI
- WLAN_CIPHER_SUITE_WEP104 | WLAN_CIPHER_SUITE_SMS4)))
-#else
WLAN_CIPHER_SUITE_WEP104)))
-#endif
{
memset(&key, 0, sizeof(key));
key.len = (u32) sme->key_len;
case WLAN_CIPHER_SUITE_WEP104:
key.algo = CRYPTO_ALGO_WEP128;
break;
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- key.algo = CRYPTO_ALGO_SMS4;
- break;
-#endif
default:
WL_ERR(("Invalid algorithm (%d)\n",
sme->crypto.ciphers_pairwise[0]));
u32 wpaie_len = 0;
u32 chan_cnt = 0;
struct ether_addr bssid;
- s32 bssidx;
+ s32 bssidx = -1;
int ret;
int wait_cnt;
WL_DBG(("In\n"));
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ wl_cfg80211_set_random_mac(dev, FALSE);
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
+ if (sme->channel_hint) {
+ chan = sme->channel_hint;
+ WL_DBG(("channel_hint (%d), channel_hint center_freq (%d)\n",
+ ieee80211_frequency_to_channel(sme->channel_hint->center_freq),
+ sme->channel_hint->center_freq));
+ }
+ if (sme->bssid_hint) {
+ sme->bssid = sme->bssid_hint;
+ WL_DBG(("bssid_hint "MACDBG" \n", MAC2STRDBG(sme->bssid_hint)));
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
+
if (unlikely(!sme->ssid)) {
WL_ERR(("Invalid ssid\n"));
return -EOPNOTSUPP;
/*
* Cancel ongoing scan to sync up with sme state machine of cfg80211.
*/
-#if (!defined(ESCAN_RESULT_PATCH) || defined(CUSTOMER_HW10))
+#if (defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH))
if (cfg->scan_request) {
- wl_notify_escan_complete(cfg, dev, true, true);
+ WL_TRACE_HW4(("Aborting the scan! \n"));
+ wl_cfg80211_scan_abort(cfg);
+ wait_cnt = MAX_SCAN_ABORT_WAIT_CNT;
+ while (wl_get_drv_status(cfg, SCANNING, dev) && wait_cnt) {
+ WL_DBG(("Waiting for SCANNING terminated, wait_cnt: %d\n", wait_cnt));
+ wait_cnt--;
+ OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME);
+ }
+ if (wl_get_drv_status(cfg, SCANNING, dev)) {
+ wl_notify_escan_complete(cfg, dev, true, true);
+ }
}
#endif
#ifdef WL_SCHED_SCAN
WL_DBG(("Currently not associated!\n"));
} else {
/* if status is DISCONNECTING, wait for disconnection terminated max 500 ms */
- wait_cnt = 500/10;
+ wait_cnt = 200/10;
while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n", wait_cnt));
wait_cnt--;
OSL_SLEEP(10);
}
+ if (wl_get_drv_status(cfg, DISCONNECTING, dev)) {
+ WL_ERR(("Force clear DISCONNECTING status!\n"));
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ }
}
/* Clean BSSID */
if (p2p_is_on(cfg) && (dev != bcmcfg_to_prmry_ndev(cfg))) {
/* we only allow to connect using virtual interface in case of P2P */
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n",
+ dev->ieee80211_ptr));
return BCME_ERROR;
}
- wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+ wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
} else if (dev == bcmcfg_to_prmry_ndev(cfg)) {
/* find the RSN_IE */
}
}
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
- err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
- VNDR_IE_ASSOCREQ_FLAG, (u8 *)sme->ie, sme->ie_len);
+ err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+ VNDR_IE_ASSOCREQ_FLAG, (const u8 *)sme->ie, sme->ie_len);
if (unlikely(err)) {
return err;
}
}
if (chan) {
+ /* If RCC is not enabled, use the channel provided by userspace */
cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
chan_cnt = 1;
WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel,
chan->center_freq, chan_cnt));
- } else
+ } else {
+ /*
+ * No channel information from user space. if RCC is enabled, the RCC
+ * would prepare the channel list, else no channel would be provided
+ * and firmware would need to do a full channel scan.
+ */
+ WL_DBG(("No channel info from user space\n"));
cfg->channel = 0;
-#ifdef BCMWAPI_WPI
- WL_DBG(("1. enable wapi auth\n"));
- if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
- WL_DBG(("2. set wapi ie \n"));
- err = wl_set_set_wapi_ie(dev, sme);
- if (unlikely(err))
- return err;
- } else
- WL_DBG(("2. Not wapi ie \n"));
-#endif
+ }
WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len));
- WL_DBG(("3. set wapi version \n"));
+ WL_DBG(("3. set wpa version \n"));
err = wl_set_wpa_version(dev, sme);
if (unlikely(err)) {
WL_ERR(("Invalid wpa_version\n"));
return err;
}
-#ifdef BCMWAPI_WPI
- if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1)
- WL_DBG(("4. WAPI Dont Set wl_set_auth_type\n"));
- else {
- WL_DBG(("4. wl_set_auth_type\n"));
-#endif
err = wl_set_auth_type(dev, sme);
if (unlikely(err)) {
WL_ERR(("Invalid auth type\n"));
return err;
}
-#ifdef BCMWAPI_WPI
- }
-#endif
err = wl_set_set_cipher(dev, sme);
if (unlikely(err)) {
memcpy(&ext_join_params->assoc.bssid, ðer_bcast, ETH_ALEN);
ext_join_params->assoc.chanspec_num = chan_cnt;
if (chan_cnt) {
- u16 channel, band, bw, ctl_sb;
- chanspec_t chspec;
- channel = cfg->channel;
- band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
- : WL_CHANSPEC_BAND_5G;
- bw = WL_CHANSPEC_BW_20;
- ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
- chspec = (channel | band | bw | ctl_sb);
- ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
- ext_join_params->assoc.chanspec_list[0] |= chspec;
- ext_join_params->assoc.chanspec_list[0] =
- wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]);
+ if (cfg->channel) {
+ /*
+ * Use the channel provided by userspace
+ */
+ u16 channel, band, bw, ctl_sb;
+ chanspec_t chspec;
+ channel = cfg->channel;
+ band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
+ : WL_CHANSPEC_BAND_5G;
+
+ /* Get min_bw set for the interface */
+ bw = wl_cfg80211_ulb_get_min_bw_chspec(dev->ieee80211_ptr, bssidx);
+ if (bw == INVCHANSPEC) {
+ WL_ERR(("Invalid chanspec \n"));
+ kfree(ext_join_params);
+ return BCME_ERROR;
+ }
+
+ ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+ chspec = (channel | band | bw | ctl_sb);
+ ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ ext_join_params->assoc.chanspec_list[0] |= chspec;
+ ext_join_params->assoc.chanspec_list[0] =
+ wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]);
+ }
}
ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
}
wl_set_drv_status(cfg, CONNECTING, dev);
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
kfree(ext_join_params);
return BCME_ERROR;
}
err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
- printf("Connectting with " MACDBG " channel (%d) ssid \"%s\", len (%d)\n\n",
- MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), cfg->channel,
- ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len);
+ if (cfg->rcc_enabled) {
+ printf("Connecting with " MACDBG " ssid \"%s\", len (%d) with rcc channels \n\n",
+ MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len);
+ } else {
+ printf("Connecting with " MACDBG " ssid \"%s\", len (%d) channel=%d\n\n",
+ MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, cfg->channel);
+ }
kfree(ext_join_params);
if (err) {
else
memcpy(&join_params.params.bssid, ðer_bcast, ETH_ALEN);
- wl_ch_to_chanspec(cfg->channel, &join_params, &join_params_size);
+ if (wl_ch_to_chanspec(dev, cfg->channel, &join_params, &join_params_size) < 0) {
+ WL_ERR(("Invalid chanspec\n"));
+ return -EINVAL;
+ }
+
WL_DBG(("join_param_size %zu\n", join_params_size));
if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
act = true;
}
#endif /* ESCAN_RESULT_PATCH */
+
if (act) {
/*
* Cancel ongoing scan to sync up with sme state machine of cfg80211.
*/
-#if (!defined(ESCAN_RESULT_PATCH) || defined(CUSTOMER_HW10))
+#if !defined(ESCAN_RESULT_PATCH)
/* Let scan aborted by F/W */
if (cfg->scan_request) {
+ WL_TRACE_HW4(("Aborting the scan! \n"));
wl_notify_escan_complete(cfg, dev, true, true);
}
#endif /* ESCAN_RESULT_PATCH */
- wl_set_drv_status(cfg, DISCONNECTING, dev);
- scbval.val = reason_code;
- memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
- scbval.val = htod32(scbval.val);
- err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
- sizeof(scb_val_t), true);
- if (unlikely(err)) {
- wl_clr_drv_status(cfg, DISCONNECTING, dev);
- WL_ERR(("error (%d)\n", err));
- return err;
+ if (wl_get_drv_status(cfg, CONNECTING, dev) ||
+ wl_get_drv_status(cfg, CONNECTED, dev)) {
+ wl_set_drv_status(cfg, DISCONNECTING, dev);
+ scbval.val = reason_code;
+ memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+ scbval.val = htod32(scbval.val);
+ err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t), true);
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+#if defined(BCM4358_CHIP)
+ WL_ERR(("Wait for complete of disconnecting \n"));
+ OSL_SLEEP(200);
+#endif /* BCM4358_CHIP */
}
}
#ifdef CUSTOM_SET_CPUCORE
s32 wsec;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
s32 err = 0;
s32 bssidx;
s32 mode = wl_get_mode_by_netdev(cfg, dev);
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
memset(&key, 0, sizeof(key));
key.index = (u32) key_idx;
if (!ETHER_ISMULTI(mac_addr))
- memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN);
+ memcpy((char *)&key.ea, (const void *)mac_addr, ETHER_ADDR_LEN);
key.len = (u32) params->key_len;
/* check for key index change */
key.algo = CRYPTO_ALGO_AES_CCM;
WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
break;
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- key.algo = CRYPTO_ALGO_SMS4;
- WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
- break;
-#endif
default:
WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
return -EINVAL;
if (err)
return err;
+ if (enable) {
+ err = wldev_iovar_setint(dev, "sup_wpa_tmo", IDSUP_4WAY_HANDSHAKE_TIMEOUT);
+ if (err) {
+ WL_INFORM(("Setting 'sup_wpa_tmo' failed, err=%d\n", err));
+ }
+ }
+
bzero(&ev_buf, sizeof(wl_eventmsg_buf_t));
- wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable);
wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable);
wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable);
wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable);
return err;
}
+#if defined(WL_VIRTUAL_APSTA)
+int
+wl_cfg80211_interface_create(struct net_device *dev, char *name)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ bcm_struct_cfgdev *new_cfgdev;
+
+ new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
+ NL80211_IFTYPE_STATION, NULL, name);
+ if (!new_cfgdev) {
+ return BCME_ERROR;
+ }
+ else {
+ WL_DBG(("Iface %s created successfuly\n", name));
+ return BCME_OK;
+ }
+}
+
+int
+wl_cfg80211_interface_delete(struct net_device *dev, char *name)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ struct net_info *iter, *next;
+ int err = BCME_ERROR;
+
+ if (name == NULL) {
+ return BCME_ERROR;
+ }
+
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ if (strcmp(iter->ndev->name, name) == 0) {
+ err = wl_cfg80211_del_iface(cfg->wdev->wiphy, cfg->bss_cfgdev);
+ break;
+ }
+ }
+ }
+ if (!err) {
+ WL_DBG(("Iface %s deleted successfuly", name));
+ }
+ return err;
+}
+#endif /* defined (WL_VIRTUAL_APSTA) */
+
static s32
wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr,
WL_DBG(("key index (%d)\n", key_idx));
RETURN_EIO_IF_NOT_UP(cfg);
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
goto exit;
}
memset(&key, 0, sizeof(key));
+ /* Clear any buffered wep key */
+ memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
key.len = (u32) params->key_len;
key.index = (u32) key_idx;
val = AES_ENABLED;
WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
break;
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- key.algo = CRYPTO_ALGO_SMS4;
- WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
- val = SMS4_ENABLED;
- break;
-#endif /* BCMWAPI_WPI */
-#if defined(WLFBT) && defined(WLAN_CIPHER_SUITE_PMK)
- case WLAN_CIPHER_SUITE_PMK: {
- int j;
- wsec_pmk_t pmk;
- char keystring[WSEC_MAX_PSK_LEN + 1];
- char* charptr = keystring;
- uint len;
- struct wl_security *sec;
-
- sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
- if (sec->wpa_auth == WLAN_AKM_SUITE_8021X) {
- err = wldev_iovar_setbuf(dev, "okc_info_pmk", params->key,
- WSEC_MAX_PSK_LEN / 2, keystring, sizeof(keystring), NULL);
- if (err) {
- /* could fail in case that 'okc' is not supported */
- WL_INFORM(("Setting 'okc_info_pmk' failed, err=%d\n", err));
- }
- }
- /* copy the raw hex key to the appropriate format */
- for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
- sprintf(charptr, "%02x", params->key[j]);
- charptr += 2;
- }
- len = strlen(keystring);
- pmk.key_len = htod16(len);
- bcopy(keystring, pmk.key, len);
- pmk.flags = htod16(WSEC_PASSPHRASE);
-
- err = wldev_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk), true);
- if (err)
- return err;
- } break;
-#endif /* WLFBT && WLAN_CIPHER_SUITE_PMK */
default:
WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
return -EINVAL;
wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_NONE);
}
swap_key_from_BE(&key);
+ if ((params->cipher == WLAN_CIPHER_SUITE_WEP40) ||
+ (params->cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ /*
+ * For AP role, since we are doing a wl down before bringing up AP,
+ * the plumbed keys will be lost. So for AP once we bring up AP, we
+ * need to plumb keys again. So buffer the keys for future use. This
+ * is more like a WAR. If firmware later has the capability to do
+ * interface upgrade without doing a "wl down" and "wl apsta 0", then
+ * this will not be required.
+ */
+ WL_DBG(("Buffering WEP Keys \n"));
+ memcpy(&cfg->wep_key, &key, sizeof(struct wl_wsec_key));
+ }
err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (unlikely(err)) {
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ WL_DBG(("Enter. key_idx: %d\n", key_idx));
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
- WL_DBG(("Enter\n"));
-#ifndef IEEE80211W
if ((key_idx >= DOT11_MAX_DEFAULT_KEYS) && (key_idx < DOT11_MAX_DEFAULT_KEYS+2))
return -EINVAL;
-#endif
RETURN_EIO_IF_NOT_UP(cfg);
memset(&key, 0, sizeof(key));
key.algo = CRYPTO_ALGO_OFF;
key.index = (u32) key_idx;
- WL_DBG(("key index (%d)\n", key_idx));
/* Set the new key/index */
swap_key_from_BE(&key);
err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
s32 wsec;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
WL_DBG(("key index (%d)\n", key_idx));
swap_key_to_BE(&key);
memset(¶ms, 0, sizeof(params));
params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
- memcpy(params.key, key.data, params.key_len);
+ memcpy((void *)params.key, key.data, params.key_len);
err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
if (unlikely(err)) {
params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
break;
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- key.algo = CRYPTO_ALGO_SMS4;
- WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
- break;
-#endif
-#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
- /* to connect to mixed mode AP */
- case (AES_ENABLED | TKIP_ENABLED): /* TKIP CCMP */
- params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
- WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
- break;
-#endif
default:
WL_ERR(("Invalid algo (0x%x)\n", wsec));
return -EINVAL;
return err;
}
-// terence 20130703: Fix for wrong group_capab (timing issue)
-int p2p_disconnected = 0;
-struct ether_addr p2p_disconnected_bssid;
+static s32
+wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+ struct net_device *dev, u8 key_idx)
+{
+ WL_INFORM(("Not supported\n"));
+ return -EOPNOTSUPP;
+}
#if defined(RSSIAVG)
static wl_rssi_cache_ctrl_t g_rssi_cache_ctrl;
-static wl_rssi_cache_ctrl_t g_rssi2_cache_ctrl;
+static wl_rssi_cache_ctrl_t g_connected_rssi_cache_ctrl;
#endif
#if defined(BSSCACHE)
static wl_bss_cache_ctrl_t g_bss_cache_ctrl;
#endif
static s32
-wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
- struct net_device *dev, u8 key_idx)
-{
-#ifdef MFP
- return 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac, struct station_info *sinfo)
#else
- WL_INFORM(("Not supported\n"));
- return -EOPNOTSUPP;
-#endif /* MFP */
-}
-
-static s32
wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
+#endif
{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
scb_val_t scb_val;
s32 rate;
s32 err = 0;
sta_info_t *sta;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
s8 eabuf[ETHER_ADDR_STR_LEN];
#endif
- static int err_cnt = 0;
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ bool fw_assoc_state = FALSE;
+ u32 dhd_assoc_state = 0;
+ static int err_cnt = 0;
+
RETURN_EIO_IF_NOT_UP(cfg);
if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac,
WL_ERR(("GET STA INFO failed, %d\n", err));
return err;
}
- sinfo->filled = STATION_INFO_INACTIVE_TIME;
+ sinfo->filled = STA_INFO_BIT(INFO_INACTIVE_TIME);
sta = (sta_info_t *)cfg->ioctl_buf;
sta->len = dtoh16(sta->len);
sta->cap = dtoh16(sta->cap);
sta->idle = dtoh32(sta->idle);
sta->in = dtoh32(sta->in);
sinfo->inactive_time = sta->idle * 1000;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
if (sta->flags & WL_STA_ASSOC) {
- sinfo->filled |= STATION_INFO_CONNECTED_TIME;
+ sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME);
sinfo->connected_time = sta->in;
}
WL_INFORM(("STA %s : idle time : %d sec, connected time :%d ms\n",
if (err) {
WL_ERR(("Failed to get current BSSID\n"));
} else {
- if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
+ if (!ETHER_ISNULLADDR(&bssid.octet) &&
+ memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
/* roaming is detected */
err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
if (err)
}
}
}
- if (!wl_get_drv_status(cfg, CONNECTED, dev) ||
- (dhd_is_associated(dhd, NULL, &err) == FALSE)) {
+ dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev);
+ fw_assoc_state = dhd_is_associated(dhd, 0, &err);
+ if (!dhd_assoc_state || !fw_assoc_state) {
WL_ERR(("NOT assoc\n"));
if (err == -ERESTARTSYS)
return err;
+ if (!dhd_assoc_state) {
+ WL_TRACE_HW4(("drv state is not connected \n"));
+ }
+ if (!fw_assoc_state) {
+ WL_TRACE_HW4(("fw state is not associated \n"));
+ }
+ /* Disconnect due to fw is not associated for FW_ASSOC_WATCHDOG_TIME ms.
+ * 'err == 0' of dhd_is_associated() and '!fw_assoc_state'
+ * means that BSSID is null.
+ */
+ if (dhd_assoc_state && !fw_assoc_state && !err) {
+ if (!fw_assoc_watchdog_started) {
+ fw_assoc_watchdog_ms = OSL_SYSUPTIME();
+ fw_assoc_watchdog_started = TRUE;
+ WL_TRACE_HW4(("fw_assoc_watchdog_started \n"));
+ } else {
+ if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms >
+ FW_ASSOC_WATCHDOG_TIME) {
+ fw_assoc_watchdog_started = FALSE;
+ err = -ENODEV;
+ WL_TRACE_HW4(("fw is not associated for %d ms \n",
+ (OSL_SYSUPTIME() - fw_assoc_watchdog_ms)));
+ goto get_station_err;
+ }
+ }
+ }
err = -ENODEV;
return err;
}
+ fw_assoc_watchdog_started = FALSE;
curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
int rxpktglom;
#endif
rate = dtoh32(rate);
- sinfo->filled |= STATION_INFO_TX_BITRATE;
+ sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE);
sinfo->txrate.legacy = rate * 5;
WL_DBG(("Rate %d Mbps\n", (rate / 2)));
#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
}
rssi = dtoh32(scb_val.val);
#if defined(RSSIAVG)
- err = wl_update_connected_rssi_cache(dev, &g_rssi2_cache_ctrl, &rssi);
+ err = wl_update_connected_rssi_cache(dev, &g_connected_rssi_cache_ctrl, &rssi);
if (err) {
WL_ERR(("Could not get rssi (%d)\n", err));
goto get_station_err;
}
- wl_delete_dirty_rssi_cache(&g_rssi2_cache_ctrl);
- wl_reset_rssi_cache(&g_rssi2_cache_ctrl);
+ wl_delete_dirty_rssi_cache(&g_connected_rssi_cache_ctrl);
+ wl_reset_rssi_cache(&g_connected_rssi_cache_ctrl);
#endif
#if defined(RSSIOFFSET)
rssi = wl_update_rssi_offset(dev, rssi);
// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
rssi = MIN(rssi, RSSI_MAXVAL);
#endif
- sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL);
sinfo->signal = rssi;
WL_DBG(("RSSI %d dBm\n", rssi));
err = wldev_ioctl(dev, WLC_GET_PKTCNTS, &pktcnt,
sizeof(pktcnt), false);
if (!err) {
- sinfo->filled |= (STATION_INFO_RX_PACKETS |
- STATION_INFO_RX_DROP_MISC |
- STATION_INFO_TX_PACKETS |
- STATION_INFO_TX_FAILED);
+ sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) |
+ STA_INFO_BIT(INFO_RX_DROP_MISC) |
+ STA_INFO_BIT(INFO_TX_PACKETS) |
+ STA_INFO_BIT(INFO_TX_FAILED));
sinfo->rx_packets = pktcnt.rx_good_pkt;
sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
sinfo->tx_packets = pktcnt.tx_good_pkt;
/* Disconnect due to zero BSSID or error to get RSSI */
WL_ERR(("force cfg80211_disconnected: %d\n", err));
wl_clr_drv_status(cfg, CONNECTED, dev);
- cfg80211_disconnected(dev, 0, NULL, 0, GFP_KERNEL);
+ CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
wl_link_down(cfg);
}
}
RETURN_EIO_IF_NOT_UP(cfg);
WL_DBG(("Enter\n"));
- if (cfg->p2p_net == dev || _net_info == NULL || cfg->vsdb_mode ||
- !wl_get_drv_status(cfg, CONNECTED, dev)) {
+ if (cfg->p2p_net == dev || _net_info == NULL ||
+ !wl_get_drv_status(cfg, CONNECTED, dev) ||
+ (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_BSS &&
+ wl_get_mode_by_netdev(cfg, dev) != WL_MODE_IBSS)) {
return err;
}
-
- /* Delete pm_enable_work */
- wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_PEND);
+ /* Enlarge pm_enable_work */
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_LONG);
pm = enabled ? PM_FAST : PM_OFF;
if (_net_info->pm_block) {
dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true;
}
+void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+ if (strcmp(command, "SCAN-ACTIVE") == 0) {
+ cfg->active_scan = 1;
+ } else if (strcmp(command, "SCAN-PASSIVE") == 0) {
+ cfg->active_scan = 0;
+ } else
+ WL_ERR(("Unknown command \n"));
+}
+
static __used u32 wl_find_msb(u16 bit16)
{
u32 ret = 0;
{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- s32 err = 0;
+ s32 err = BCME_OK;
if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
WL_INFORM(("device is not ready\n"));
- return 0;
+ return err;
}
return err;
}
static s32
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
#else
wl_cfg80211_suspend(struct wiphy *wiphy)
-#endif
+#endif /* KERNEL_VERSION(2, 6, 39) || WL_COMPAT_WIRELES */
{
+ s32 err = BCME_OK;
#ifdef DHD_CLEAR_ON_SUSPEND
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_info *iter, *next;
if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
WL_INFORM(("device is not ready : status (%d)\n",
(int)cfg->status));
- return 0;
+ return err;
}
- for_each_ndev(cfg, iter, next)
- wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ for_each_ndev(cfg, iter, next) {
+ /* p2p discovery iface doesn't have a ndev associated with it (for kernel > 3.8) */
+ if (iter->ndev)
+ wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ }
spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
if (cfg->scan_request) {
cfg80211_scan_done(cfg->scan_request, true);
cfg->scan_request = NULL;
}
for_each_ndev(cfg, iter, next) {
- wl_clr_drv_status(cfg, SCANNING, iter->ndev);
- wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ if (iter->ndev) {
+ wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+ wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ }
}
spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
for_each_ndev(cfg, iter, next) {
- if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
- wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false);
+ if (iter->ndev) {
+ if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
+ wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false);
+ }
}
}
#endif /* DHD_CLEAR_ON_SUSPEND */
- return 0;
+
+
+ return err;
}
static s32
struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
if (!pmk_list) {
- printk("pmk_list is NULL\n");
+ printf("pmk_list is NULL\n");
return -EINVAL;
}
/* pmk list is supported only for STA interface i.e. primary interface
struct cfg80211_pmksa *pmksa)
{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct _pmkid_list pmkid = {0};
+
+ struct _pmkid_list pmkid = {.npmkid = 0};
s32 err = 0;
int i;
wl_scan_params_t *params;
int params_size;
int num_chans;
+ int bssidx = 0;
*out_params_size = 0;
if (channel == -1)
params->channel_list[0] = htodchanspec(channel);
else
- params->channel_list[0] = wl_ch_host_to_driver(channel);
+ params->channel_list[0] = wl_ch_host_to_driver(bssidx, channel);
/* Our scan params have 1 channel and 0 ssids */
params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
#endif /* WL_CFG80211_P2P_DEV_IF */
{
s32 target_channel;
- u32 id;
s32 err = BCME_OK;
struct ether_addr primary_mac;
struct net_device *ndev = NULL;
goto exit;
}
+#ifdef P2P_LISTEN_OFFLOADING
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ WL_ERR(("P2P_FIND: Discovery offload is in progress\n"));
+ return -EAGAIN;
+ }
+#endif /* P2P_LISTEN_OFFLOADING */
+
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
if (wl_get_drv_status_all(cfg, SCANNING)) {
wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
#if defined(WL_ENABLE_P2P_IF)
cfg->remain_on_chan_type = channel_type;
#endif /* WL_ENABLE_P2P_IF */
- id = ++cfg->last_roc_id;
- if (id == 0)
- id = ++cfg->last_roc_id;
- *cookie = id;
-
+ *cookie = wl_cfg80211_get_new_roc_id(cfg);
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
if (wl_get_drv_status(cfg, SCANNING, ndev)) {
struct timer_list *_timer;
WL_DBG(("scan is running. go to fake listen state\n"));
- wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
+ if (duration > LONG_LISTEN_TIME) {
+ wl_cfg80211_scan_abort(cfg);
+ } else {
+ wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
- if (timer_pending(&cfg->p2p->listen_timer)) {
- WL_DBG(("cancel current listen timer \n"));
- del_timer_sync(&cfg->p2p->listen_timer);
- }
+ if (timer_pending(&cfg->p2p->listen_timer)) {
+ WL_DBG(("cancel current listen timer \n"));
+ del_timer_sync(&cfg->p2p->listen_timer);
+ }
- _timer = &cfg->p2p->listen_timer;
- wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
+ _timer = &cfg->p2p->listen_timer;
+ wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
- INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration, 0);
+ INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration, 0);
- err = BCME_OK;
- goto exit;
+ err = BCME_OK;
+ goto exit;
+ }
}
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
* without turning on P2P
*/
get_primary_mac(cfg, &primary_mac);
- wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
p2p_on(cfg) = true;
}
bcm_struct_cfgdev *cfgdev, u64 cookie)
{
s32 err = 0;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
#ifdef P2PLISTEN_AP_SAMECHN
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
struct net_device *dev;
#endif /* P2PLISTEN_AP_SAMECHN */
+ RETURN_EIO_IF_NOT_UP(cfg);
#if defined(WL_CFG80211_P2P_DEV_IF)
if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
WL_DBG((" enter ) on P2P dedicated discover interface\n"));
WL_DBG(("p2p_resp_apchn_status Turn OFF \n"));
}
#endif /* P2PLISTEN_AP_SAMECHN */
+
+ if (cfg->last_roc_id == cookie) {
+ wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+ } else {
+ WL_ERR(("%s : ignore, request cookie(%llu) is not matched. (cur : %llu)\n",
+ __FUNCTION__, cookie, cfg->last_roc_id));
+ }
+
return err;
}
struct bcm_cfg80211 *cfg = g_bcm_cfg;
s32 ret = BCME_OK;
- afx_instance = container_of(work, struct afx_hdl, work);
+ BCM_SET_CONTAINER_OF(afx_instance, work, struct afx_hdl, work);
if (afx_instance != NULL && cfg->afx_hdl->is_active) {
if (cfg->afx_hdl->is_listen && cfg->afx_hdl->my_listen_chan) {
ret = wl_cfgp2p_discover_listen(cfg, cfg->afx_hdl->my_listen_chan,
wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *dev)
{
u32 max_retry = WL_CHANNEL_SYNC_RETRY;
+ bool is_p2p_gas = false;
if (dev == NULL)
return -1;
wl_set_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
cfg->afx_hdl->is_active = TRUE;
+ if (cfg->afx_hdl->pending_tx_act_frm) {
+ wl_action_frame_t *action_frame;
+ action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame);
+ if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len))
+ is_p2p_gas = true;
+ }
+
/* Loop to wait until we find a peer's channel or the
* pending action frame tx is cancelled.
*/
!(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
break;
+ if (is_p2p_gas)
+ break;
+
if (cfg->afx_hdl->my_listen_chan) {
WL_DBG(("Scheduling Listen peer in my listen channel = %d\n",
cfg->afx_hdl->my_listen_chan));
return result;
}
#endif /* WL11U */
-
+static bool
+wl_cfg80211_check_dwell_overflow(int32 requested_dwell, ulong dwell_jiffies)
+{
+ if ((requested_dwell & CUSTOM_RETRY_MASK) &&
+ (jiffies_to_msecs(jiffies - dwell_jiffies) >
+ (requested_dwell & ~CUSTOM_RETRY_MASK))) {
+ WL_ERR(("Action frame TX retry time over dwell time!\n"));
+ return true;
+ }
+ return false;
+}
static bool
wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev,
u8 category, action;
s32 tx_retry;
struct p2p_config_af_params config_af_params;
+ struct net_info *netinfo;
#ifdef VSDB
ulong off_chan_started_jiffies = 0;
#endif
+ ulong dwell_jiffies = 0;
+ bool dwell_overflow = false;
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ int32 requested_dwell = af_params->dwell_time;
/* Add the default dwell time
* Dwell time to stay off-channel to wait for a response action frame
cfg->next_af_subtype = action + 1;
af_params->dwell_time = WL_MED_DWELL_TIME;
+ if (requested_dwell & CUSTOM_RETRY_MASK) {
+ config_af_params.max_tx_retry =
+ (requested_dwell & CUSTOM_RETRY_MASK) >> 24;
+ af_params->dwell_time =
+ (requested_dwell & ~CUSTOM_RETRY_MASK);
+ WL_DBG(("Custom retry(%d) and dwell time(%d) is set.\n",
+ config_af_params.max_tx_retry,
+ af_params->dwell_time));
+ }
} else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
action == P2PSD_ACTION_ID_GAS_CRESP) {
/* configure service discovery response frame */
wldev_iovar_setint(dev, "mpc", 0);
}
+ netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
/* validate channel and p2p ies */
if (config_af_params.search_channel && IS_P2P_SOCIAL(af_params->channel) &&
- wl_to_p2p_bss_saved_ie(cfg, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) {
+ netinfo && netinfo->bss.ies.probe_req_ie_len) {
config_af_params.search_channel = true;
} else {
config_af_params.search_channel = false;
wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
}
+ /* Abort P2P listen */
+ if (discover_cfgdev(cfgdev, cfg)) {
+ if (cfg->p2p_supported && cfg->p2p) {
+ wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+ }
+ }
+
#ifdef WL11U
/* handling DFS channel exceptions */
if (!wl_cfg80211_check_DFS_channel(cfg, af_params, action_frame->data, action_frame->len)) {
/* save af_params for rx process */
cfg->afx_hdl->pending_tx_act_frm = af_params;
+ if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) {
+ WL_DBG(("Set GAS action frame config.\n"));
+ config_af_params.search_channel = false;
+ config_af_params.max_tx_retry = 1;
+ }
+
/* search peer's channel */
if (config_af_params.search_channel) {
/* initialize afx_hdl */
- if (wl_cfgp2p_find_idx(cfg, dev, &cfg->afx_hdl->bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+ if ((cfg->afx_hdl->bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
goto exit;
}
cfg->afx_hdl->dev = dev;
/* Suspend P2P discovery's search-listen to prevent it from
* starting a scan or changing the channel.
*/
- wl_cfgp2p_discover_enable_search(cfg, false);
+ if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
+ WL_ERR(("Can not disable discovery mode\n"));
+ goto exit;
+ }
/* update channel */
af_params->channel = cfg->afx_hdl->peer_chan;
wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, af_params->channel);
+ wl_cfgp2p_need_wait_actfrmae(cfg, action_frame->data, action_frame->len, true);
+
+ dwell_jiffies = jiffies;
/* Now send a tx action frame */
ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true;
+ dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies);
/* if failed, retry it. tx_retry_max value is configure by .... */
- while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry)) {
+ while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry) &&
+ !dwell_overflow) {
#ifdef VSDB
if (af_params->channel) {
if (jiffies_to_msecs(jiffies - off_chan_started_jiffies) >
#endif /* VSDB */
ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ?
false : true;
+ dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies);
}
if (ack == false) {
bool channel_type_valid,
#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0) */
unsigned int wait, const u8* buf, size_t len,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
bool no_cck,
#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
bool dont_wait_for_ack,
#endif
u64 *cookie)
bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
}
else {
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) {
+ WL_ERR(("Find p2p index failed\n"));
return BCME_ERROR;
}
}
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
s32 ie_len = len - ie_offset;
-#ifdef P2PONEINT
- if (dev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION))
- dev = bcmcfg_to_prmry_ndev(cfg);
-#endif
- if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p)
+ if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p) {
bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
- VNDR_IE_PRBRSP_FLAG, (u8 *)(buf + ie_offset), ie_len);
+ }
+ wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+ VNDR_IE_PRBRSP_FLAG, (const u8 *)(buf + ie_offset), ie_len);
cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
+#if defined(P2P_IE_MISSING_FIX)
+ if (!cfg->p2p_prb_noti) {
+ cfg->p2p_prb_noti = true;
+ WL_DBG(("%s: TX 802_1X Probe Response first time.\n",
+ __FUNCTION__));
+ }
+#endif
goto exit;
} else if (ieee80211_is_disassoc(mgmt->frame_control) ||
ieee80211_is_deauth(mgmt->frame_control)) {
{
s32 err = 0;
s32 ap_isolate = 0;
-#if defined(SUPPORT_HOSTAPD_BGN_MODE)
- dhd_pub_t *dhd;
- s32 gmode = -1, nmode = -1;
- s32 gmode_prev = -1, nmode_prev = -1;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-#if defined(WL_ENABLE_P2P_IF)
- if (cfg->p2p_net == dev)
- dev = bcmcfg_to_prmry_ndev(cfg);
-#endif
- dhd = (dhd_pub_t *)(cfg->pub);
-#endif /* SUPPORT_HOSTAPD_BGN_MODE */
if (params->use_cts_prot >= 0) {
}
}
if (params->basic_rates) {
-#if defined(SUPPORT_HOSTAPD_BGN_MODE)
- switch ((int)(params->basic_rates[params->basic_rates_len -1])) {
- case 22: /* B only , rate 11 */
- gmode = 0;
- nmode = 0;
- break;
- case 108: /* G only , rate 54 */
- gmode = 2;
- nmode = 0;
- break;
- default:
- gmode = -1;
- nmode = -1;
- break;
- }
-#endif /* SUPPORT_HOSTAPD_BGN_MODE */
}
if (params->ap_isolate >= 0) {
}
if (params->ht_opmode >= 0) {
-#if defined(SUPPORT_HOSTAPD_BGN_MODE)
- nmode = 1;
- gmode = 1;
- } else {
- nmode = 0;
-#endif /* SUPPORT_HOSTAPD_BGN_MODE */
- }
-
-#if defined(SUPPORT_HOSTAPD_BGN_MODE)
- err = wldev_iovar_getint(dev, "nmode", &nmode_prev);
- if (unlikely(err)) {
- WL_ERR(("error reading nmode (%d)\n", err));
- }
- if (nmode == nmode_prev) {
- nmode = -1;
}
- err = wldev_ioctl(dev, WLC_GET_GMODE, &gmode_prev, sizeof(gmode_prev), 0);
- if (unlikely(err)) {
- WL_ERR(("error reading gmode (%d)\n", err));
- }
- if (gmode == gmode_prev) {
- gmode = -1;
- }
-
- if (((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) == DHD_FLAG_HOSTAP_MODE) &&
- ((gmode > -1) || (nmode > -1))) {
- s32 val = 0;
- err = wldev_ioctl(dev, WLC_DOWN, &val, sizeof(s32), true);
- if (unlikely(err))
- WL_ERR(("WLC_DOWN command failed:[%d]\n", err));
-
- if (nmode > -1) {
- err = wldev_iovar_setint(dev, "nmode", nmode);
- if (unlikely(err))
- WL_ERR(("nmode command failed:mode[%d]:err[%d]\n", nmode, err));
- }
-
- if (gmode > -1) {
- err = wldev_ioctl(dev, WLC_SET_GMODE, &gmode, sizeof(s32), true);
- if (unlikely(err))
- WL_ERR(("WLC_SET_GMODE command failed:mode[%d]:err[%d]\n",
- gmode, err));
- }
-
- val = 0;
- err = wldev_ioctl(dev, WLC_UP, &val, sizeof(s32), true);
- if (unlikely(err))
- WL_ERR(("WLC_UP command failed:err[%d]\n", err));
-
- }
-#endif /* SUPPORT_HOSTAPD_BGN_MODE */
return 0;
}
static s32
-#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS))
-wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
- struct cfg80211_chan_def chandef)
-#else
wl_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
-#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */
{
s32 _chan;
chanspec_t chspec = 0;
chanspec_t fw_chspec = 0;
u32 bw = WL_CHANSPEC_BW_20;
+#ifdef WL11ULB
+ u32 ulb_bw = wl_cfg80211_get_ulb_bw(dev->ieee80211_ptr);
+#endif /* WL11ULB */
s32 err = BCME_OK;
s32 bw_cap = 0;
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
#endif /* CUSTOM_SET_CPUCORE */
-#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS))
- enum nl80211_channel_type channel_type = NL80211_CHAN_HT20;
-#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */
-
-#ifndef P2PONEINT
dev = ndev_to_wlc_ndev(dev, cfg);
-#endif
_chan = ieee80211_frequency_to_channel(chan->center_freq);
printf("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
dev->ifindex, channel_type, _chan);
-#ifdef CUSTOM_PLATFORM_NV_TEGRA
-#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_COMPAT_WIRELESS))
- WL_ERR(("chan_width = %d\n", chandef.width));
- switch (chandef.width) {
- case NL80211_CHAN_WIDTH_40:
- bw = WL_CHANSPEC_BW_40;
- break;
- case NL80211_CHAN_WIDTH_80:
- bw = WL_CHANSPEC_BW_80;
- break;
- case NL80211_CHAN_WIDTH_80P80:
- bw = WL_CHANSPEC_BW_8080;
- break;
- case NL80211_CHAN_WIDTH_160:
- bw = WL_CHANSPEC_BW_160;
- break;
- default:
- bw = WL_CHANSPEC_BW_20;
- break;
- }
- goto set_channel;
-#endif /* ((LINUX_VERSION >= VERSION(3, 8, 0) && !WL_COMPAT_WIRELESS) */
-#endif /* CUSTOM_PLATFORM_NV_TEGRA */
+#ifdef WL11ULB
+ if (ulb_bw) {
+ WL_DBG(("[ULB] setting AP/GO BW to ulb_bw 0x%x \n", ulb_bw));
+ bw = wl_cfg80211_ulbbw_to_ulbchspec(ulb_bw);
+ goto set_channel;
+ }
+#endif /* WL11ULB */
if (chan->band == IEEE80211_BAND_5GHZ) {
param.band = WLC_BAND_5G;
err = wldev_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param),
#ifdef CUSTOM_SET_CPUCORE
if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) {
WL_DBG(("SoftAP mode do not need to set cpucore\n"));
- } else if ((dev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) &&
- (chspec & WL_CHANSPEC_BW_80)) {
- /* If GO is vht80 */
- dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE;
- dhd_set_cpucore(dhd, TRUE);
+ } else if (chspec & WL_CHANSPEC_BW_80) {
+ /* SoftAp only mode do not need to set cpucore */
+ if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) &&
+ dev != bcmcfg_to_prmry_ndev(cfg)) {
+ /* Soft AP on virtual Iface (AP+STA case) */
+ dhd->chan_isvht80 |= DHD_FLAG_HOSTAP_MODE;
+ dhd_set_cpucore(dhd, TRUE);
+ } else if (is_p2p_group_iface(dev->ieee80211_ptr)) {
+ /* If P2P IF is vht80 */
+ dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE;
+ dhd_set_cpucore(dhd, TRUE);
+ }
}
#endif /* CUSTOM_SET_CPUCORE */
+ if (!err && (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) {
+ /* Update AP/GO operating channel */
+ cfg->ap_oper_channel = _chan;
+ }
return err;
}
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
static s32
-wl_validate_opensecurity(struct net_device *dev, s32 bssidx)
+wl_validate_opensecurity(struct net_device *dev, s32 bssidx, bool privacy)
{
s32 err = BCME_OK;
+ u32 wpa_val;
+ s32 wsec = 0;
/* set auth */
err = wldev_iovar_setint_bsscfg(dev, "auth", 0, bssidx);
WL_ERR(("auth error %d\n", err));
return BCME_ERROR;
}
-#ifndef CUSTOMER_HW10 /* for WEP Support */
+
+ if (privacy) {
+ /* If privacy bit is set in open mode, then WEP would be enabled */
+ wsec = WEP_ENABLED;
+ WL_DBG(("Setting wsec to %d for WEP \n", wsec));
+ }
+
/* set wsec */
- err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
if (err < 0) {
WL_ERR(("wsec error %d\n", err));
return BCME_ERROR;
}
-#endif /* CUSTOMER_HW10 */
/* set upper-layer auth */
- err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", WPA_AUTH_NONE, bssidx);
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_ADHOC)
+ wpa_val = WPA_AUTH_NONE;
+ else
+ wpa_val = WPA_AUTH_DISABLED;
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_val, bssidx);
if (err < 0) {
WL_ERR(("wpa_auth error %d\n", err));
return BCME_ERROR;
wpa_suite_auth_key_mgmt_t *mgmt;
wpa_pmkid_list_t *pmkid;
int cnt = 0;
-#ifdef MFP
- int mfp = 0;
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
-#endif /* MFP */
u16 suite_count;
u8 rsn_cap[2];
case WPA_CIPHER_AES_CCM:
gval = AES_ENABLED;
break;
-#ifdef BCMWAPI_WPI
- case WAPI_CIPHER_SMS4:
- gval = SMS4_ENABLED;
- break;
-#endif
default:
WL_ERR(("No Security Info\n"));
break;
case WPA_CIPHER_AES_CCM:
pval = AES_ENABLED;
break;
-#ifdef BCMWAPI_WPI
- case WAPI_CIPHER_SMS4:
- pval = SMS4_ENABLED;
- break;
-#endif
default:
WL_ERR(("No Security Info\n"));
}
suite_count = cnt = ltoh16_ua(&mgmt->count);
while (cnt--) {
switch (mgmt->list[cnt].type) {
- case RSN_AKM_NONE:
- wpa_auth = WPA_AUTH_NONE;
- break;
- case RSN_AKM_UNSPECIFIED:
- wpa_auth = WPA2_AUTH_UNSPECIFIED;
- break;
- case RSN_AKM_PSK:
- wpa_auth = WPA2_AUTH_PSK;
- break;
-#ifdef MFP
- case RSN_AKM_MFP_PSK:
- wpa_auth |= WPA2_AUTH_PSK;
- wsec |= MFP_SHA256;
- break;
- case RSN_AKM_MFP_1X:
+ case RSN_AKM_NONE:
+ wpa_auth |= WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
wpa_auth |= WPA2_AUTH_UNSPECIFIED;
- wsec |= MFP_SHA256;
+ break;
+ case RSN_AKM_PSK:
+ wpa_auth |= WPA2_AUTH_PSK;
break;
-#endif /* MFP */
- default:
- WL_ERR(("No Key Mgmt Info\n"));
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
}
}
wme_bss_disable = 1;
}
-#ifdef MFP
- if (rsn_cap[0] & RSN_CAP_MFPR) {
- WL_DBG(("MFP Required \n"));
- mfp = WL_MFP_REQUIRED;
- } else if (rsn_cap[0] & RSN_CAP_MFPC) {
- WL_DBG(("MFP Capable \n"));
- mfp = WL_MFP_CAPABLE;
- }
-#endif /* MFP */
/* set wme_bss_disable to sync RSN Capabilities */
err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
}
- if ((len -= RSN_CAP_LEN) >= WPA2_PMKID_COUNT_LEN) {
+ len -= RSN_CAP_LEN;
+ if (len >= WPA2_PMKID_COUNT_LEN) {
pmkid = (wpa_pmkid_list_t *)((u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN);
cnt = ltoh16_ua(&pmkid->count);
if (cnt != 0) {
/* so don't bother to send down this info to firmware */
}
-#ifdef MFP
- if ((len -= WPA2_PMKID_COUNT_LEN) >= RSN_GROUPMANAGE_CIPHER_LEN) {
- err = wldev_iovar_setbuf_bsscfg(dev, "bip",
- (void *)((u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN + WPA2_PMKID_COUNT_LEN),
- RSN_GROUPMANAGE_CIPHER_LEN,
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
- if (err < 0) {
- WL_ERR(("bip set error %d\n", err));
- return BCME_ERROR;
- }
- }
-#endif
/* set auth */
err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
WL_ERR(("auth error %d\n", err));
return BCME_ERROR;
}
+
/* set wsec */
err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
if (err < 0) {
return BCME_ERROR;
}
-#ifdef MFP
- if (mfp) {
- /* This needs to go after wsec otherwise the wsec command will
- * overwrite the values set by MFP
- */
- if ((err = wldev_iovar_setint_bsscfg(dev, "mfp", mfp, bssidx)) < 0) {
- WL_ERR(("MFP Setting failed. ret = %d \n", err));
- return err;
- }
- }
-#endif /* MFP */
/* set upper-layer auth */
err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
return 0;
}
-#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
-static u32 wl_get_cipher_type(uint8 type)
-{
- u32 ret = 0;
- switch (type) {
- case WPA_CIPHER_NONE:
- ret = 0;
- break;
- case WPA_CIPHER_WEP_40:
- case WPA_CIPHER_WEP_104:
- ret = WEP_ENABLED;
- break;
- case WPA_CIPHER_TKIP:
- ret = TKIP_ENABLED;
- break;
- case WPA_CIPHER_AES_CCM:
- ret = AES_ENABLED;
- break;
-#ifdef BCMWAPI_WPI
- case WAPI_CIPHER_SMS4:
- ret = SMS4_ENABLED;
- break;
-#endif
- default:
- WL_ERR(("No Security Info\n"));
- }
- return ret;
-}
-static u32 wl_get_suite_auth_key_mgmt_type(uint8 type)
+static s32
+wl_cfg80211_bcn_validate_sec(
+ struct net_device *dev,
+ struct parsed_ies *ies,
+ u32 dev_role,
+ s32 bssidx,
+ bool privacy)
{
- u32 ret = 0;
- switch (type) {
- case RSN_AKM_NONE:
- ret = WPA_AUTH_NONE;
- break;
- case RSN_AKM_UNSPECIFIED:
- ret = WPA_AUTH_UNSPECIFIED;
- break;
- case RSN_AKM_PSK:
- ret = WPA_AUTH_PSK;
- break;
- default:
- WL_ERR(("No Key Mgmt Info\n"));
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr);
+
+ if (!bss) {
+ WL_ERR(("cfgbss is NULL \n"));
+ return BCME_ERROR;
}
- return ret;
-}
-static s32
-wl_validate_wpaie_wpa2ie(struct net_device *dev, wpa_ie_fixed_t *wpaie,
- bcm_tlv_t *wpa2ie, s32 bssidx)
-{
- wpa_suite_mcast_t *mcast;
- wpa_suite_ucast_t *ucast;
- wpa_suite_auth_key_mgmt_t *mgmt;
- u16 auth = 0; /* d11 open authentication */
- u16 count;
- s32 err = BCME_OK;
- u32 wme_bss_disable;
- u16 suite_count;
- u8 rsn_cap[2];
- s32 len = 0;
- u32 i;
- u32 wsec1, wsec2, wsec;
- u32 pval = 0;
- u32 gval = 0;
- u32 wpa_auth = 0;
- u32 wpa_auth1 = 0;
- u32 wpa_auth2 = 0;
- u8* ptmp;
+ if (dev_role == NL80211_IFTYPE_P2P_GO && (ies->wpa2_ie)) {
+ /* For P2P GO, the sec type is WPA2-PSK */
+ WL_DBG(("P2P GO: validating wpa2_ie"));
+ if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0)
+ return BCME_ERROR;
- if (wpaie == NULL || wpa2ie == NULL)
- goto exit;
+ } else if (dev_role == NL80211_IFTYPE_AP) {
- WL_DBG(("Enter \n"));
- len = wpaie->length; /* value length */
- len -= WPA_IE_TAG_FIXED_LEN;
- /* check for multicast cipher suite */
- if (len < WPA_SUITE_LEN) {
- WL_INFORM(("no multicast cipher suite\n"));
- goto exit;
- }
+ WL_DBG(("SoftAP: validating security"));
+ /* If wpa2_ie or wpa_ie is present validate it */
- /* pick up multicast cipher */
- mcast = (wpa_suite_mcast_t *)&wpaie[1];
- len -= WPA_SUITE_LEN;
- if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
- if (IS_WPA_CIPHER(mcast->type)) {
- gval |= wl_get_cipher_type(mcast->type);
+ if ((ies->wpa2_ie || ies->wpa_ie) &&
+ ((wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 ||
+ wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0))) {
+ bss->security_mode = false;
+ return BCME_ERROR;
}
- }
- WL_ERR(("\nwpa ie validate\n"));
- WL_ERR(("wpa ie mcast cipher = 0x%X\n", gval));
- /* Check for unicast suite(s) */
- if (len < WPA_IE_SUITE_COUNT_LEN) {
- WL_INFORM(("no unicast suite\n"));
- goto exit;
- }
+ bss->security_mode = true;
+ if (bss->rsn_ie) {
+ kfree(bss->rsn_ie);
+ bss->rsn_ie = NULL;
+ }
+ if (bss->wpa_ie) {
+ kfree(bss->wpa_ie);
+ bss->wpa_ie = NULL;
+ }
+ if (bss->wps_ie) {
+ kfree(bss->wps_ie);
+ bss->wps_ie = NULL;
+ }
+ if (ies->wpa_ie != NULL) {
+ /* WPAIE */
+ bss->rsn_ie = NULL;
+ bss->wpa_ie = kmemdup(ies->wpa_ie,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else if (ies->wpa2_ie != NULL) {
+ /* RSNIE */
+ bss->wpa_ie = NULL;
+ bss->rsn_ie = kmemdup(ies->wpa2_ie,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ }
+ if (!ies->wpa2_ie && !ies->wpa_ie) {
+ wl_validate_opensecurity(dev, bssidx, privacy);
+ bss->security_mode = false;
+ }
- /* walk thru unicast cipher list and pick up what we recognize */
- ucast = (wpa_suite_ucast_t *)&mcast[1];
- count = ltoh16_ua(&ucast->count);
- len -= WPA_IE_SUITE_COUNT_LEN;
- for (i = 0; i < count && len >= WPA_SUITE_LEN;
- i++, len -= WPA_SUITE_LEN) {
- if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
- if (IS_WPA_CIPHER(ucast->list[i].type)) {
- pval |= wl_get_cipher_type(ucast->list[i].type);
- }
+ if (ies->wps_ie) {
+ bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
}
}
- WL_ERR(("wpa ie ucast count =%d, cipher = 0x%X\n", count, pval));
- /* FOR WPS , set SEC_OW_ENABLED */
- wsec1 = (pval | gval | SES_OW_ENABLED);
- WL_ERR(("wpa ie wsec = 0x%X\n", wsec1));
+ return 0;
- len -= (count - i) * WPA_SUITE_LEN;
- /* Check for auth key management suite(s) */
- if (len < WPA_IE_SUITE_COUNT_LEN) {
- WL_INFORM((" no auth key mgmt suite\n"));
- goto exit;
- }
- /* walk thru auth management suite list and pick up what we recognize */
- mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
- count = ltoh16_ua(&mgmt->count);
- len -= WPA_IE_SUITE_COUNT_LEN;
- for (i = 0; i < count && len >= WPA_SUITE_LEN;
- i++, len -= WPA_SUITE_LEN) {
- if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
- if (IS_WPA_AKM(mgmt->list[i].type)) {
+}
- wpa_auth1 |= wl_get_suite_auth_key_mgmt_type(mgmt->list[i].type);
- }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+static s32 wl_cfg80211_bcn_set_params(
+ struct cfg80211_ap_settings *info,
+ struct net_device *dev,
+ u32 dev_role, s32 bssidx)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ s32 err = BCME_OK;
+
+ WL_DBG(("interval (%d) \ndtim_period (%d) \n",
+ info->beacon_interval, info->dtim_period));
+
+ if (info->beacon_interval) {
+ if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD,
+ &info->beacon_interval, sizeof(s32), true)) < 0) {
+ WL_ERR(("Beacon Interval Set Error, %d\n", err));
+ return err;
}
+ }
+ if (info->dtim_period) {
+ if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD,
+ &info->dtim_period, sizeof(s32), true)) < 0) {
+ WL_ERR(("DTIM Interval Set Error, %d\n", err));
+ return err;
+ }
}
- WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth1));
- WL_ERR(("\nwpa2 ie validate\n"));
- pval = 0;
- gval = 0;
- len = wpa2ie->len;
- /* check the mcast cipher */
- mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
- ptmp = mcast->oui;
- gval = wl_get_cipher_type(ptmp[DOT11_OUI_LEN]);
+ if ((info->ssid) && (info->ssid_len > 0) &&
+ (info->ssid_len <= 32)) {
+ WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len));
+ if (dev_role == NL80211_IFTYPE_AP) {
+ /* Store the hostapd SSID */
+ memset(cfg->hostapd_ssid.SSID, 0x00, 32);
+ memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len);
+ cfg->hostapd_ssid.SSID_len = info->ssid_len;
+ } else {
+ /* P2P GO */
+ memset(cfg->p2p->ssid.SSID, 0x00, 32);
+ memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len);
+ cfg->p2p->ssid.SSID_len = info->ssid_len;
+ }
+ }
- WL_ERR(("wpa2 ie mcast cipher = 0x%X\n", gval));
- if ((len -= WPA_SUITE_LEN) <= 0)
- {
- WL_ERR(("P:wpa2 ie len[%d]", len));
- return BCME_BADLEN;
- }
-
- /* check the unicast cipher */
- ucast = (wpa_suite_ucast_t *)&mcast[1];
- suite_count = ltoh16_ua(&ucast->count);
- WL_ERR((" WPA2 ucast cipher count=%d\n", suite_count));
- pval |= wl_get_cipher_type(ucast->list[0].type);
-
- if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0)
- return BCME_BADLEN;
-
- WL_ERR(("wpa2 ie ucast cipher = 0x%X\n", pval));
-
- /* FOR WPS , set SEC_OW_ENABLED */
- wsec2 = (pval | gval | SES_OW_ENABLED);
- WL_ERR(("wpa2 ie wsec = 0x%X\n", wsec2));
-
- /* check the AKM */
- mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
- suite_count = ltoh16_ua(&mgmt->count);
- ptmp = (u8 *)&mgmt->list[0];
- wpa_auth2 = wl_get_suite_auth_key_mgmt_type(ptmp[DOT11_OUI_LEN]);
- WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth2));
-
- if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
- rsn_cap[0] = *(u8 *)&mgmt->list[suite_count];
- rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1);
- if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
- wme_bss_disable = 0;
- } else {
- wme_bss_disable = 1;
- }
- WL_DBG(("P:rsn_cap[0]=[0x%X]:wme_bss_disabled[%d]\n", rsn_cap[0], wme_bss_disable));
-
- /* set wme_bss_disable to sync RSN Capabilities */
- err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
- if (err < 0) {
- WL_ERR(("wme_bss_disable error %d\n", err));
- return BCME_ERROR;
- }
- } else {
- WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
- }
-
- wsec = (wsec1 | wsec2);
- wpa_auth = (wpa_auth1 | wpa_auth2);
- WL_ERR(("wpa_wpa2 wsec=0x%X wpa_auth=0x%X\n", wsec, wpa_auth));
-
- /* set auth */
- err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
- if (err < 0) {
- WL_ERR(("auth error %d\n", err));
- return BCME_ERROR;
- }
- /* set wsec */
- err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
- if (err < 0) {
- WL_ERR(("wsec error %d\n", err));
- return BCME_ERROR;
- }
- /* set upper-layer auth */
- err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
- if (err < 0) {
- WL_ERR(("wpa_auth error %d\n", err));
- return BCME_ERROR;
- }
-exit:
- return 0;
-}
-#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
-
-static s32
-wl_cfg80211_bcn_validate_sec(
- struct net_device *dev,
- struct parsed_ies *ies,
- u32 dev_role,
- s32 bssidx)
-{
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
-
- if (dev_role == NL80211_IFTYPE_P2P_GO && (ies->wpa2_ie)) {
- /* For P2P GO, the sec type is WPA2-PSK */
- WL_DBG(("P2P GO: validating wpa2_ie"));
- if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0)
- return BCME_ERROR;
-
- } else if (dev_role == NL80211_IFTYPE_AP) {
-
- WL_DBG(("SoftAP: validating security"));
- /* If wpa2_ie or wpa_ie is present validate it */
-
-#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
- if ((ies->wpa_ie != NULL && ies->wpa2_ie != NULL)) {
- if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie, ies->wpa2_ie, bssidx) < 0) {
- cfg->ap_info->security_mode = false;
- return BCME_ERROR;
- }
- }
- else {
-#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
- if ((ies->wpa2_ie || ies->wpa_ie) &&
- ((wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 ||
- wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0))) {
- cfg->ap_info->security_mode = false;
- return BCME_ERROR;
- }
-
- cfg->ap_info->security_mode = true;
- if (cfg->ap_info->rsn_ie) {
- kfree(cfg->ap_info->rsn_ie);
- cfg->ap_info->rsn_ie = NULL;
- }
- if (cfg->ap_info->wpa_ie) {
- kfree(cfg->ap_info->wpa_ie);
- cfg->ap_info->wpa_ie = NULL;
- }
- if (cfg->ap_info->wps_ie) {
- kfree(cfg->ap_info->wps_ie);
- cfg->ap_info->wps_ie = NULL;
- }
- if (ies->wpa_ie != NULL) {
- /* WPAIE */
- cfg->ap_info->rsn_ie = NULL;
- cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
- ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
- GFP_KERNEL);
- } else if (ies->wpa2_ie != NULL) {
- /* RSNIE */
- cfg->ap_info->wpa_ie = NULL;
- cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
- ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
- GFP_KERNEL);
- }
-#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
- }
-#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
- if (!ies->wpa2_ie && !ies->wpa_ie) {
- wl_validate_opensecurity(dev, bssidx);
- cfg->ap_info->security_mode = false;
- }
-
- if (ies->wps_ie) {
- cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
- }
- }
-
- return 0;
-
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
-static s32 wl_cfg80211_bcn_set_params(
- struct cfg80211_ap_settings *info,
- struct net_device *dev,
- u32 dev_role, s32 bssidx)
-{
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
- s32 err = BCME_OK;
-
- WL_DBG(("interval (%d) \ndtim_period (%d) \n",
- info->beacon_interval, info->dtim_period));
-
- if (info->beacon_interval) {
- if ((err = wldev_ioctl(dev, WLC_SET_BCNPRD,
- &info->beacon_interval, sizeof(s32), true)) < 0) {
- WL_ERR(("Beacon Interval Set Error, %d\n", err));
- return err;
- }
- }
-
- if (info->dtim_period) {
- if ((err = wldev_ioctl(dev, WLC_SET_DTIMPRD,
- &info->dtim_period, sizeof(s32), true)) < 0) {
- WL_ERR(("DTIM Interval Set Error, %d\n", err));
- return err;
- }
- }
-
- if ((info->ssid) && (info->ssid_len > 0) &&
- (info->ssid_len <= 32)) {
- WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len));
- if (dev_role == NL80211_IFTYPE_AP) {
- /* Store the hostapd SSID */
- memset(cfg->hostapd_ssid.SSID, 0x00, 32);
- memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len);
- cfg->hostapd_ssid.SSID_len = info->ssid_len;
- } else {
- /* P2P GO */
- memset(cfg->p2p->ssid.SSID, 0x00, 32);
- memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len);
- cfg->p2p->ssid.SSID_len = info->ssid_len;
- }
- }
-
- if (info->hidden_ssid) {
- if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
- WL_ERR(("failed to set hidden : %d\n", err));
- WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
+ if (info->hidden_ssid) {
+ if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
+ WL_ERR(("failed to set hidden : %d\n", err));
+ WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
}
return err;
}
-#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+#endif
static s32
wl_cfg80211_parse_ies(u8 *ptr, u32 len, struct parsed_ies *ies)
}
+#define MAX_AP_LINK_WAIT_TIME 10000
static s32
wl_cfg80211_bcn_bringup_ap(
struct net_device *dev,
{
struct bcm_cfg80211 *cfg = g_bcm_cfg;
struct wl_join_params join_params;
+ struct wiphy *wiphy;
bool is_bssup = false;
s32 infra = 1;
s32 join_params_size = 0;
s32 ap = 1;
-#ifdef DISABLE_11H_SOFTAP
- s32 spect = 0;
-#endif /* DISABLE_11H_SOFTAP */
+ s32 pm;
+ s32 wsec;
+#ifdef SOFTAP_UAPSD_OFF
+ uint32 wme_apsd = 0;
+#endif /* SOFTAP_UAPSD_OFF */
s32 err = BCME_OK;
+ s32 is_rsdb_supported = BCME_ERROR;
+ u32 timeout;
+#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */
+
+ is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
+ if (is_rsdb_supported < 0)
+ return (-ENODEV);
- WL_DBG(("Enter dev_role: %d\n", dev_role));
+ WL_DBG(("Enter dev_role:%d bssidx:%d\n", dev_role, bssidx));
/* Common code for SoftAP and P2P GO */
+ wiphy = bcmcfg_to_wiphy(cfg);
+ if (wl_check_dongle_idle(wiphy) != TRUE) {
+ WL_ERR(("FW is busy to add interface"));
+ return -EINVAL;
+ }
wldev_iovar_setint(dev, "mpc", 0);
+ wl_clr_drv_status(cfg, AP_CREATED, dev);
+
if (dev_role == NL80211_IFTYPE_P2P_GO) {
is_bssup = wl_cfgp2p_bss_isup(dev, bssidx);
if (!is_bssup && (ies->wpa2_ie != NULL)) {
WL_DBG(("Bss is already up\n"));
} else if ((dev_role == NL80211_IFTYPE_AP) &&
(wl_get_drv_status(cfg, AP_CREATING, dev))) {
+
/* Device role SoftAP */
- err = wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true);
- if (err < 0) {
- WL_ERR(("WLC_DOWN error %d\n", err));
- goto exit;
- }
- err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
- if (err < 0) {
- WL_ERR(("SET INFRA error %d\n", err));
- goto exit;
- }
- if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
- WL_ERR(("setting AP mode failed %d \n", err));
- goto exit;
+ WL_DBG(("Creating AP bssidx:%d dev_role:%d\n", bssidx, dev_role));
+
+ /* Clear the status bit after use */
+ wl_clr_drv_status(cfg, AP_CREATING, dev);
+
+ /* AP on primary Interface */
+ if (bssidx == 0) {
+ if (is_rsdb_supported) {
+ if ((err = wl_cfg80211_add_del_bss(cfg, dev, bssidx,
+ NL80211_IFTYPE_AP, 0, NULL)) < 0) {
+ WL_ERR(("wl add_del_bss returned error:%d\n", err));
+ goto exit;
+ }
+ } else if (is_rsdb_supported == 0) {
+ /* AP mode switch not supported. Try setting up AP explicitly */
+ err = wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true);
+ if (err < 0) {
+ WL_ERR(("WLC_DOWN error %d\n", err));
+ goto exit;
+ }
+ err = wldev_iovar_setint(dev, "apsta", 0);
+ if (err < 0) {
+ WL_ERR(("wl apsta 0 error %d\n", err));
+ goto exit;
+ }
+
+ if ((err = wldev_ioctl(dev,
+ WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
+ WL_ERR(("setting AP mode failed %d \n", err));
+ goto exit;
+ }
+
+ }
+
+ pm = 0;
+ if ((err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true)) != 0) {
+ WL_ERR(("wl PM 0 returned error:%d\n", err));
+ goto exit;
+ }
+
+ err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+ if (err < 0) {
+ WL_ERR(("SET INFRA error %d\n", err));
+ goto exit;
+ }
+ } else if (cfg->cfgdev_bssidx && (bssidx == cfg->cfgdev_bssidx)) {
+
+ WL_DBG(("Bringup SoftAP on virtual Interface bssidx:%d \n", bssidx));
+
+ if ((err = wl_cfg80211_add_del_bss(cfg, dev,
+ bssidx, NL80211_IFTYPE_AP, 0, NULL)) < 0) {
+ WL_ERR(("wl bss ap returned error:%d\n", err));
+ goto exit;
+ }
+
}
-#ifdef DISABLE_11H_SOFTAP
- err = wldev_ioctl(dev, WLC_SET_SPECT_MANAGMENT,
- &spect, sizeof(s32), true);
+
+#ifdef SOFTAP_UAPSD_OFF
+ err = wldev_iovar_setbuf_bsscfg(dev, "wme_apsd", &wme_apsd, sizeof(wme_apsd),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
if (err < 0) {
- WL_ERR(("SET SPECT_MANAGMENT error %d\n", err));
- goto exit;
+ WL_ERR(("failed to disable uapsd, error=%d\n", err));
}
-#endif /* DISABLE_11H_SOFTAP */
+#endif /* SOFTAP_UAPSD_OFF */
err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
if (unlikely(err)) {
goto exit;
}
+ err = wldev_iovar_getint(dev, "wsec", (s32 *)&wsec);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get wsec %d\n", err));
+ goto exit;
+ }
+ if ((wsec == WEP_ENABLED) && cfg->wep_key.len) {
+ WL_DBG(("Applying buffered WEP KEY \n"));
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &cfg->wep_key,
+ sizeof(struct wl_wsec_key), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ /* clear the key after use */
+ memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+ goto exit;
+ }
+ }
+
memset(&join_params, 0, sizeof(join_params));
/* join parameters starts with ssid */
join_params_size = sizeof(join_params.ssid);
/* create softap */
if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
- join_params_size, true)) == 0) {
- WL_DBG(("SoftAP set SSID (%s) success\n", join_params.ssid.SSID));
- wl_clr_drv_status(cfg, AP_CREATING, dev);
- wl_set_drv_status(cfg, AP_CREATED, dev);
+ join_params_size, true)) != 0) {
+ WL_ERR(("SoftAP/GO set ssid failed! \n"));
+ goto exit;
+ } else {
+ WL_DBG((" SoftAP SSID \"%s\" \n", join_params.ssid.SSID));
+ }
+
+ if (bssidx != 0) {
+ /* AP on Virtual Interface */
+ if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 1)) < 0) {
+ WL_ERR(("GO Bring up error %d\n", err));
+ goto exit;
+ }
}
- }
+ }
+ /* Wait for Linkup event to mark successful AP/GO bring up */
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ wl_get_drv_status(cfg, AP_CREATED, dev), msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
+ if (timeout <= 0 || !wl_get_drv_status(cfg, AP_CREATED, dev)) {
+ WL_ERR(("Link up didn't come for AP interface. AP/GO creation failed! \n"));
+#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_AP_LINKUP_FAILURE;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */
+ err = -ENODEV;
+ goto exit;
+ }
exit:
+ if (cfg->wep_key.len)
+ memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
return err;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
s32
wl_cfg80211_parse_ap_ies(
struct net_device *dev,
s32 err = BCME_OK;
/* Set Beacon IEs to FW */
- if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
- VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
+ VNDR_IE_BEACON_FLAG, (const u8 *)info->tail,
info->tail_len)) < 0) {
WL_ERR(("Set Beacon IE Failed \n"));
} else {
}
/* Set Probe Response IEs to FW */
- if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
VNDR_IE_PRBRSP_FLAG, vndr, vndr_ie_len)) < 0) {
WL_ERR(("Set Probe Resp IE Failed \n"));
} else {
return err;
}
-#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+#endif
static s32 wl_cfg80211_hostapd_sec(
struct net_device *dev,
{
bool update_bss = 0;
struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ wl_cfgbss_t *bss = wl_get_cfgbss_by_wdev(cfg, dev->ieee80211_ptr);
+ if (!bss) {
+ WL_ERR(("cfgbss is NULL \n"));
+ return -EINVAL;
+ }
if (ies->wps_ie) {
- if (cfg->ap_info->wps_ie &&
- memcmp(cfg->ap_info->wps_ie, ies->wps_ie, ies->wps_ie_len)) {
+ if (bss->wps_ie &&
+ memcmp(bss->wps_ie, ies->wps_ie, ies->wps_ie_len)) {
WL_DBG((" WPS IE is changed\n"));
- kfree(cfg->ap_info->wps_ie);
- cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
- } else if (cfg->ap_info->wps_ie == NULL) {
+ kfree(bss->wps_ie);
+ bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+ } else if (bss->wps_ie == NULL) {
WL_DBG((" WPS IE is added\n"));
- cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+ bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
}
-#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
- if (ies->wpa_ie != NULL && ies->wpa2_ie != NULL) {
- WL_ERR(("update bss - wpa_ie and wpa2_ie is not null\n"));
- if (!cfg->ap_info->security_mode) {
+ if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) {
+ if (!bss->security_mode) {
/* change from open mode to security mode */
update_bss = true;
- cfg->ap_info->wpa_ie =
- kmemdup(ies->wpa_ie,
+ if (ies->wpa_ie != NULL) {
+ bss->wpa_ie = kmemdup(ies->wpa_ie,
ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
GFP_KERNEL);
- cfg->ap_info->rsn_ie =
- kmemdup(ies->wpa2_ie,
+ } else {
+ bss->rsn_ie = kmemdup(ies->wpa2_ie,
ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
GFP_KERNEL);
- } else {
- /* change from (WPA or WPA2 or WPA/WPA2) to WPA/WPA2 mixed mode */
- if (cfg->ap_info->wpa_ie) {
- if (memcmp(cfg->ap_info->wpa_ie,
- ies->wpa_ie, ies->wpa_ie->length +
- WPA_RSN_IE_TAG_FIXED_LEN)) {
- kfree(cfg->ap_info->wpa_ie);
- update_bss = true;
- cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
- ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
- GFP_KERNEL);
- }
}
- else {
+ } else if (bss->wpa_ie) {
+ /* change from WPA2 mode to WPA mode */
+ if (ies->wpa_ie != NULL) {
update_bss = true;
- cfg->ap_info->wpa_ie =
- kmemdup(ies->wpa_ie,
- ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
- GFP_KERNEL);
- }
- if (cfg->ap_info->rsn_ie) {
- if (memcmp(cfg->ap_info->rsn_ie,
- ies->wpa2_ie,
- ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) {
- update_bss = true;
- kfree(cfg->ap_info->rsn_ie);
- cfg->ap_info->rsn_ie =
- kmemdup(ies->wpa2_ie,
- ies->wpa2_ie->len +
- WPA_RSN_IE_TAG_FIXED_LEN,
- GFP_KERNEL);
- }
- }
- else {
+ kfree(bss->rsn_ie);
+ bss->rsn_ie = NULL;
+ bss->wpa_ie = kmemdup(ies->wpa_ie,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else if (memcmp(bss->rsn_ie,
+ ies->wpa2_ie, ies->wpa2_ie->len
+ + WPA_RSN_IE_TAG_FIXED_LEN)) {
update_bss = true;
- cfg->ap_info->rsn_ie =
- kmemdup(ies->wpa2_ie,
- ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
- GFP_KERNEL);
+ kfree(bss->rsn_ie);
+ bss->rsn_ie = kmemdup(ies->wpa2_ie,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ bss->wpa_ie = NULL;
}
}
- WL_ERR(("update_bss=%d\n", update_bss));
if (update_bss) {
- cfg->ap_info->security_mode = true;
+ bss->security_mode = true;
wl_cfgp2p_bss(cfg, dev, bssidx, 0);
- if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie,
- ies->wpa2_ie, bssidx) < 0) {
- return BCME_ERROR;
- }
- wl_cfgp2p_bss(cfg, dev, bssidx, 1);
- }
-
- }
- else
-#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
- if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) {
- if (!cfg->ap_info->security_mode) {
- /* change from open mode to security mode */
- update_bss = true;
- if (ies->wpa_ie != NULL) {
- cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
- ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
- GFP_KERNEL);
- } else {
- cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
- ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
- GFP_KERNEL);
- }
- } else if (cfg->ap_info->wpa_ie) {
- /* change from WPA2 mode to WPA mode */
- if (ies->wpa_ie != NULL) {
- update_bss = true;
- kfree(cfg->ap_info->rsn_ie);
- cfg->ap_info->rsn_ie = NULL;
- cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
- ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
- GFP_KERNEL);
- } else if (memcmp(cfg->ap_info->rsn_ie,
- ies->wpa2_ie, ies->wpa2_ie->len
- + WPA_RSN_IE_TAG_FIXED_LEN)) {
- update_bss = true;
- kfree(cfg->ap_info->rsn_ie);
- cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
- ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
- GFP_KERNEL);
- cfg->ap_info->wpa_ie = NULL;
- }
- }
- if (update_bss) {
- cfg->ap_info->security_mode = true;
- wl_cfgp2p_bss(cfg, dev, bssidx, 0);
- if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 ||
- wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0) {
+ if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 ||
+ wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0) {
return BCME_ERROR;
}
wl_cfgp2p_bss(cfg, dev, bssidx, 1);
#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
2, 0))
static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+wl_cfg80211_del_station(
+ struct wiphy *wiphy, struct net_device *ndev,
+ struct station_del_parameters *params)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_del_station(
+ struct wiphy *wiphy,
+ struct net_device *ndev,
+ const u8* mac_addr)
+#else
wl_cfg80211_del_station(
struct wiphy *wiphy,
struct net_device *ndev,
u8* mac_addr)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
{
struct net_device *dev;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct maclist *assoc_maclist = (struct maclist *)mac_buf;
int num_associated = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+ const u8 *mac_addr = params->mac;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+
WL_DBG(("Entry\n"));
if (mac_addr == NULL) {
WL_DBG(("mac_addr is NULL ignore it\n"));
sizeof(scb_val_t), true);
if (err < 0)
WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
- printf("Disconnect STA : %s scb_val.val %d\n",
+ WL_ERR(("Disconnect STA : %s scb_val.val %d\n",
bcm_ether_ntoa((const struct ether_addr *)mac_addr, eabuf),
- scb_val.val);
+ scb_val.val));
if (num_associated > 0 && ETHER_ISBCAST(mac_addr))
wl_delay(400);
}
static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_change_station(
+ struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *mac,
+ struct station_parameters *params)
+#else
wl_cfg80211_change_station(
struct wiphy *wiphy,
struct net_device *dev,
u8 *mac,
struct station_parameters *params)
+#endif
{
int err;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ WL_DBG(("SCB_AUTHORIZE mac_addr:"MACDBG" sta_flags_mask:0x%x "
+ "sta_flags_set:0x%x iface:%s \n", MAC2STRDBG(mac),
+ params->sta_flags_mask, params->sta_flags_set, dev->name));
+
/* Processing only authorize/de-authorize flag for now */
- if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+ if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+ WL_ERR(("WLC_SCB_AUTHORIZE sta_flags_mask not set \n"));
return -ENOTSUPP;
+ }
if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+ err = wldev_ioctl(primary_ndev, WLC_SCB_DEAUTHORIZE, (u8 *)mac, ETH_ALEN, true);
+#else
err = wldev_ioctl(primary_ndev, WLC_SCB_DEAUTHORIZE, mac, ETH_ALEN, true);
+#endif
if (err)
WL_ERR(("WLC_SCB_DEAUTHORIZE error (%d)\n", err));
return err;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+ err = wldev_ioctl(primary_ndev, WLC_SCB_AUTHORIZE, (u8 *)mac, ETH_ALEN, true);
+#else
err = wldev_ioctl(primary_ndev, WLC_SCB_AUTHORIZE, mac, ETH_ALEN, true);
+#endif
if (err)
WL_ERR(("WLC_SCB_AUTHORIZE error (%d)\n", err));
+#ifdef DHD_LOSSLESS_ROAMING
+ wl_del_roam_timeout(cfg);
+#endif
return err;
}
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+static s32
+wl_cfg80211_set_scb_timings(
+ struct bcm_cfg80211 *cfg,
+ struct net_device *dev)
+{
+ int err;
+ u32 ps_pretend;
+ wl_scb_probe_t scb_probe;
+
+ bzero(&scb_probe, sizeof(wl_scb_probe_t));
+ scb_probe.scb_timeout = WL_SCB_TIMEOUT;
+ scb_probe.scb_activity_time = WL_SCB_ACTIVITY_TIME;
+ scb_probe.scb_max_probe = WL_SCB_MAX_PROBE;
+ err = wldev_iovar_setbuf(dev, "scb_probe", (void *)&scb_probe,
+ sizeof(wl_scb_probe_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("set 'scb_probe' failed, error = %d\n", err));
+ return err;
+ }
+
+ ps_pretend = MAX(WL_SCB_MAX_PROBE / 2, WL_MIN_PSPRETEND_THRESHOLD);
+ err = wldev_iovar_setint(dev, "pspretend_threshold", ps_pretend);
+ if (unlikely(err)) {
+ if (err == BCME_UNSUPPORTED) {
+ /* Ignore error if fw doesn't support the iovar */
+ WL_DBG(("wl pspretend_threshold %d set error %d\n",
+ ps_pretend, err));
+ } else {
+ WL_ERR(("wl pspretend_threshold %d set error %d\n",
+ ps_pretend, err));
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
static s32
wl_cfg80211_start_ap(
struct wiphy *wiphy,
struct parsed_ies ies;
s32 bssidx = 0;
u32 dev_role = 0;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
WL_DBG(("Enter \n"));
- if (dev == bcmcfg_to_prmry_ndev(cfg)) {
- WL_DBG(("Start AP req on primary iface: Softap\n"));
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ wl_cfg80211_set_random_mac(dev, FALSE);
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+ if ((dev == bcmcfg_to_prmry_ndev(cfg)) ||
+ (dev == ((struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev)))) {
+ WL_DBG(("Start AP req on iface: %s \n", dev->name));
dev_role = NL80211_IFTYPE_AP;
}
#if defined(WL_ENABLE_P2P_IF)
else if (dev == cfg->p2p_net) {
/* Group Add request on p2p0 */
WL_DBG(("Start AP req on P2P iface: GO\n"));
-#ifndef P2PONEINT
dev = bcmcfg_to_prmry_ndev(cfg);
-#endif
dev_role = NL80211_IFTYPE_P2P_GO;
}
#endif /* WL_ENABLE_P2P_IF */
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
- if (p2p_is_on(cfg) &&
- (bssidx == wl_to_p2p_bss_bssidx(cfg,
- P2PAPI_BSSCFG_CONNECTION))) {
+
+ if (p2p_is_on(cfg) && (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO)) {
dev_role = NL80211_IFTYPE_P2P_GO;
- WL_DBG(("Start AP req on P2P connection iface\n"));
+ } else if (dev_role == NL80211_IFTYPE_AP) {
+ dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
+ /*
+ * Enabling Softap is causing issues with STA NDO operations
+ * as NDO is not interface specific. So disable NDO while
+ * Softap is enabled
+ */
+ err = dhd_ndo_enable(dhd, FALSE);
+ WL_DBG(("%s: Disabling NDO on Hostapd mode %d\n", __FUNCTION__, err));
+ if (err) {
+ /* Non fatal error. */
+ WL_ERR(("%s: Disabling NDO Failed %d\n", __FUNCTION__, err));
+ } else {
+ cfg->revert_ndo_disable = true;
+ }
+
+#ifdef PKT_FILTER_SUPPORT
+ /* Disable packet filter */
+ if (dhd->early_suspended) {
+ WL_ERR(("Disable pkt_filter\n"));
+ dhd_enable_packet_filter(0, dhd);
+ }
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* IF SoftAP is enabled, disable arpoe */
+ dhd_arp_offload_set(dhd, 0);
+ dhd_arp_offload_enable(dhd, FALSE);
+#endif /* ARP_OFFLOAD_SUPPORT */
+ if ((dhd->op_mode & DHD_FLAG_STA_MODE) && wl_cfg80211_is_roam_offload()) {
+ WL_ERR(("Cleare roam_offload_bssid_list at STA-SoftAP MODE.\n"));
+ wl_android_set_roam_offload_bssid_list(dev, "0");
+ }
+ } else {
+ /* only AP or GO role need to be handled here. */
+ err = -EINVAL;
+ goto fail;
}
- if (!check_dev_role_integrity(cfg, dev_role))
+ if (!check_dev_role_integrity(cfg, dev_role)) {
+ err = -EINVAL;
goto fail;
+ }
-#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
if ((err = wl_cfg80211_set_channel(wiphy, dev,
dev->ieee80211_ptr->preset_chandef.chan,
- dev->ieee80211_ptr->preset_chandef) < 0)) {
+ NL80211_CHAN_HT20) < 0)) {
WL_ERR(("Set channel failed \n"));
goto fail;
}
-#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */
+#endif
if ((err = wl_cfg80211_bcn_set_params(info, dev,
dev_role, bssidx)) < 0) {
goto fail;
}
- if ((wl_cfg80211_bcn_validate_sec(dev, &ies,
- dev_role, bssidx)) < 0)
+ if ((err = wl_cfg80211_bcn_validate_sec(dev, &ies,
+ dev_role, bssidx, info->privacy)) < 0)
{
WL_ERR(("Beacon set security failed \n"));
goto fail;
goto fail;
}
+ /* Set GC/STA SCB expiry timings. */
+ if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) {
+ WL_ERR(("scb setting failed \n"));
+ goto fail;
+ }
+
WL_DBG(("** AP/GO Created **\n"));
#ifdef WL_CFG80211_ACL
if (err) {
WL_ERR(("ADD/SET beacon failed\n"));
wldev_iovar_setint(dev, "mpc", 1);
+ if (dev_role == NL80211_IFTYPE_AP) {
+ dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
+
+#ifdef PKT_FILTER_SUPPORT
+ /* Enable packet filter */
+ if (dhd->early_suspended) {
+ WL_ERR(("Enable pkt_filter\n"));
+ dhd_enable_packet_filter(1, dhd);
+ }
+#endif /* PKT_FILTER_SUPPORT */
+ }
}
return err;
int ap = 0;
s32 bssidx = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 is_rsdb_supported = BCME_ERROR;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
WL_DBG(("Enter \n"));
- if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+
+ is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
+ if (is_rsdb_supported < 0)
+ return (-ENODEV);
+
+ wl_clr_drv_status(cfg, AP_CREATING, dev);
+ wl_clr_drv_status(cfg, AP_CREATED, dev);
+ cfg->ap_oper_channel = 0;
+
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
dev_role = NL80211_IFTYPE_AP;
- }
-#if defined(WL_ENABLE_P2P_IF)
- else if (dev == cfg->p2p_net) {
- /* Group Add request on p2p0 */
-#ifndef P2PONEINT
- dev = bcmcfg_to_prmry_ndev(cfg);
-#endif
+ WL_DBG(("stopping AP operation\n"));
+ } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
dev_role = NL80211_IFTYPE_P2P_GO;
+ WL_DBG(("stopping P2P GO operation\n"));
+ } else {
+ WL_ERR(("no AP/P2P GO interface is operational.\n"));
+ return -EINVAL;
}
-#endif /* WL_ENABLE_P2P_IF */
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
- if (p2p_is_on(cfg) &&
- (bssidx == wl_to_p2p_bss_bssidx(cfg,
- P2PAPI_BSSCFG_CONNECTION))) {
- dev_role = NL80211_IFTYPE_P2P_GO;
- }
- if (!check_dev_role_integrity(cfg, dev_role))
+ if (!check_dev_role_integrity(cfg, dev_role)) {
+ WL_ERR(("role integrity check failed \n"));
+ err = -EINVAL;
goto exit;
+ }
+
+ if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 0)) < 0) {
+ WL_ERR(("bss down error %d\n", err));
+ }
if (dev_role == NL80211_IFTYPE_AP) {
- /* SoftAp on primary Interface.
- * Shut down AP and turn on MPC
- */
- if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32), true)) < 0) {
- WL_ERR(("setting AP mode failed %d \n", err));
- err = -ENOTSUPP;
- goto exit;
- }
- err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
- if (err < 0) {
- WL_ERR(("SET INFRA error %d\n", err));
- err = -ENOTSUPP;
- goto exit;
+ if (cfg->revert_ndo_disable == true) {
+ err = dhd_ndo_enable(dhd, TRUE);
+ WL_DBG(("%s: Enabling back NDO on Softap turn off %d\n",
+ __FUNCTION__, err));
+ if (err) {
+ WL_ERR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, err));
+ }
+ cfg->revert_ndo_disable = false;
}
- err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
- if (unlikely(err)) {
- WL_ERR(("WLC_UP error (%d)\n", err));
- err = -EINVAL;
- goto exit;
+#ifdef PKT_FILTER_SUPPORT
+ /* Enable packet filter */
+ if (dhd->early_suspended) {
+ WL_ERR(("Enable pkt_filter\n"));
+ dhd_enable_packet_filter(1, dhd);
+ }
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* IF SoftAP is disabled, enable arpoe back for STA mode. */
+ dhd_arp_offload_set(dhd, dhd_arp_mode);
+ dhd_arp_offload_enable(dhd, TRUE);
+#endif /* ARP_OFFLOAD_SUPPORT */
+ /*
+ * Bring down the AP interface by changing role to STA.
+ * Don't do a down or "WLC_SET_AP 0" since the shared
+ * interface may be still running
+ */
+ if (is_rsdb_supported) {
+ if ((err = wl_cfg80211_add_del_bss(cfg, dev,
+ bssidx, NL80211_IFTYPE_STATION, 0, NULL)) < 0) {
+ if ((err = wldev_ioctl(dev, WLC_SET_AP, &ap, sizeof(s32),
+ true)) < 0) {
+ WL_ERR(("setting AP mode failed %d \n", err));
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ }
+ } else if (is_rsdb_supported == 0) {
+ err = wldev_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(s32), true);
+ if (err < 0) {
+ WL_ERR(("SET INFRA error %d\n", err));
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ err = wldev_ioctl(dev, WLC_UP, &ap, sizeof(s32), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_UP error (%d)\n", err));
+ err = -EINVAL;
+ goto exit;
+ }
}
- wl_clr_drv_status(cfg, AP_CREATED, dev);
/* Turn on the MPC */
wldev_iovar_setint(dev, "mpc", 1);
- if (cfg->ap_info) {
- kfree(cfg->ap_info->wpa_ie);
- kfree(cfg->ap_info->rsn_ie);
- kfree(cfg->ap_info->wps_ie);
- kfree(cfg->ap_info);
- cfg->ap_info = NULL;
- }
+
+ wl_cfg80211_clear_per_bss_ies(cfg, bssidx);
} else {
WL_DBG(("Stopping P2P GO \n"));
DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE((dhd_pub_t *)(cfg->pub),
}
exit:
+
+ if (dev_role == NL80211_IFTYPE_AP) {
+ /* clear the AP mode */
+ dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
+ }
return err;
}
#if defined(WL_ENABLE_P2P_IF)
else if (dev == cfg->p2p_net) {
/* Group Add request on p2p0 */
-#ifndef P2PONEINT
dev = bcmcfg_to_prmry_ndev(cfg);
-#endif
dev_role = NL80211_IFTYPE_P2P_GO;
}
#endif /* WL_ENABLE_P2P_IF */
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
- if (p2p_is_on(cfg) &&
- (bssidx == wl_to_p2p_bss_bssidx(cfg,
- P2PAPI_BSSCFG_CONNECTION))) {
+
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
dev_role = NL80211_IFTYPE_P2P_GO;
}
- if (!check_dev_role_integrity(cfg, dev_role))
+ if (!check_dev_role_integrity(cfg, dev_role)) {
+ err = -EINVAL;
goto fail;
+ }
if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
WL_ERR(("P2P already down status!\n"));
struct parsed_ies ies;
bcm_tlv_t *ssid_ie;
bool pbc = 0;
+ bool privacy;
+ bool is_bss_up = 0;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n",
info->interval, info->dtim_period, info->head_len, info->tail_len));
#if defined(WL_ENABLE_P2P_IF)
else if (dev == cfg->p2p_net) {
/* Group Add request on p2p0 */
-#ifndef P2PONEINT
dev = bcmcfg_to_prmry_ndev(cfg);
-#endif
dev_role = NL80211_IFTYPE_P2P_GO;
}
#endif /* WL_ENABLE_P2P_IF */
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
- if (p2p_is_on(cfg) &&
- (bssidx == wl_to_p2p_bss_bssidx(cfg,
- P2PAPI_BSSCFG_CONNECTION))) {
+
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
dev_role = NL80211_IFTYPE_P2P_GO;
+ } else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+ dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
}
- if (!check_dev_role_integrity(cfg, dev_role))
+ if (!check_dev_role_integrity(cfg, dev_role)) {
+ err = -ENODEV;
goto fail;
+ }
if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
WL_ERR(("P2P already down status!\n"));
goto fail;
}
- if (wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
- info->tail_len) < 0) {
+ info->tail_len)) < 0) {
WL_ERR(("Beacon set IEs failed \n"));
goto fail;
} else {
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
- if (wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+ if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
VNDR_IE_PRBRSP_FLAG, (u8 *)info->proberesp_ies,
- info->proberesp_ies_len) < 0) {
+ info->proberesp_ies_len)) < 0) {
WL_ERR(("ProbeRsp set IEs failed \n"));
goto fail;
} else {
}
#endif
- if (!wl_cfgp2p_bss_isup(dev, bssidx) &&
- (wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx) < 0))
+ is_bss_up = wl_cfgp2p_bss_isup(dev, bssidx);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ privacy = info->privacy;
+#else
+ privacy = 0;
+#endif
+ if (!is_bss_up &&
+ (wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx, privacy) < 0))
{
WL_ERR(("Beacon set security failed \n"));
+ err = -EINVAL;
goto fail;
}
}
}
- if (wl_cfg80211_bcn_bringup_ap(dev, &ies, dev_role, bssidx) < 0) {
+ /* If bss is already up, skip bring up */
+ if (!is_bss_up &&
+ (err = wl_cfg80211_bcn_bringup_ap(dev, &ies, dev_role, bssidx)) < 0)
+ {
WL_ERR(("Beacon bring up AP/GO failed \n"));
goto fail;
}
+ /* Set GC/STA SCB expiry timings. */
+ if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) {
+ WL_ERR(("scb setting failed \n"));
+ goto fail;
+ }
+
if (wl_get_drv_status(cfg, AP_CREATED, dev)) {
/* Soft AP already running. Update changed params */
if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
if (err) {
WL_ERR(("ADD/SET beacon failed\n"));
wldev_iovar_setint(dev, "mpc", 1);
+ if (dev_role == NL80211_IFTYPE_AP) {
+ /* clear the AP mode */
+ dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
+ }
}
return err;
}
-#endif /* LINUX_VERSION < VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+#endif
#ifdef WL_SCHED_SCAN
#define PNO_TIME 30
#define PNO_REPEAT 4
#define PNO_FREQ_EXPO_MAX 2
+static bool
+is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count)
+{
+ int i;
+
+ if (!ssid || !ssid_list)
+ return FALSE;
+
+ for (i = 0; i < count; i++) {
+ if (ssid->ssid_len == ssid_list[i].ssid_len) {
+ if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0)
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
static int
wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
struct net_device *dev,
ushort pno_time = PNO_TIME;
int pno_repeat = PNO_REPEAT;
int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
- wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
+ wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct cfg80211_ssid *ssid = NULL;
- int ssid_count = 0;
+ struct cfg80211_ssid *hidden_ssid_list = NULL;
+ int ssid_cnt = 0;
int i;
int ret = 0;
+ if (!request) {
+ WL_ERR(("Sched scan request was NULL\n"));
+ return -EINVAL;
+ }
+
WL_DBG(("Enter \n"));
WL_PNO((">>> SCHED SCAN START\n"));
WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n",
request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max));
- if (!request || !request->n_ssids || !request->n_match_sets) {
+ if (!request->n_ssids || !request->n_match_sets) {
WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
return -EINVAL;
}
memset(&ssids_local, 0, sizeof(ssids_local));
- if (request->n_match_sets > 0) {
- for (i = 0; i < request->n_match_sets; i++) {
- ssid = &request->match_sets[i].ssid;
- memcpy(ssids_local[i].SSID, ssid->ssid, ssid->ssid_len);
- ssids_local[i].SSID_len = ssid->ssid_len;
- WL_PNO((">>> PNO filter set for ssid (%s) \n", ssid->ssid));
- ssid_count++;
- }
- }
-
if (request->n_ssids > 0) {
- for (i = 0; i < request->n_ssids; i++) {
- /* Active scan req for ssids */
- WL_PNO((">>> Active scan req for ssid (%s) \n", request->ssids[i].ssid));
-
- /* match_set ssids is a supert set of n_ssid list, so we need
- * not add these set seperately
- */
+ hidden_ssid_list = request->ssids;
+ }
+
+ for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) {
+ ssid = &request->match_sets[i].ssid;
+ /* No need to include null ssid */
+ if (ssid->ssid_len) {
+ memcpy(ssids_local[ssid_cnt].SSID, ssid->ssid, ssid->ssid_len);
+ ssids_local[ssid_cnt].SSID_len = ssid->ssid_len;
+ if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) {
+ ssids_local[ssid_cnt].hidden = TRUE;
+ WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid));
+ } else {
+ ssids_local[ssid_cnt].hidden = FALSE;
+ WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid));
+ }
+ ssid_cnt++;
}
}
- if (ssid_count) {
- if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, request->n_match_sets,
+ if (ssid_cnt) {
+ if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt,
pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) {
WL_ERR(("PNO setup failed!! ret=%d \n", ret));
return -EINVAL;
if (retry <= 0) {
WL_ERR(("failure, dump_obss IOVAR failed\n"));
- err = -BCME_ERROR;
+ err = -EINVAL;
goto exit;
}
.mgmt_tx = wl_cfg80211_mgmt_tx,
.mgmt_frame_register = wl_cfg80211_mgmt_frame_register,
.change_bss = wl_cfg80211_change_bss,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
.set_channel = wl_cfg80211_set_channel,
-#endif /* ((LINUX_VERSION < VERSION(3, 6, 0)) || WL_COMPAT_WIRELESS */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) && !defined(WL_COMPAT_WIRELESS)
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
.set_beacon = wl_cfg80211_add_set_beacon,
.add_beacon = wl_cfg80211_add_set_beacon,
#else
.change_beacon = wl_cfg80211_change_beacon,
.start_ap = wl_cfg80211_start_ap,
.stop_ap = wl_cfg80211_stop_ap,
-#endif /* LINUX_VERSION < KERNEL_VERSION(3,4,0) && !WL_COMPAT_WIRELESS */
+#endif
#ifdef WL_SCHED_SCAN
.sched_scan_start = wl_cfg80211_sched_scan_start,
.sched_scan_stop = wl_cfg80211_sched_scan_stop,
.change_station = wl_cfg80211_change_station,
.mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait,
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
.tdls_mgmt = wl_cfg80211_tdls_mgmt,
.tdls_oper = wl_cfg80211_tdls_oper,
-#endif /* LINUX_VERSION > VERSION(3, 2, 0) || WL_COMPAT_WIRELESS */
+#endif
#ifdef WL_SUPPORT_ACS
.dump_survey = wl_cfg80211_dump_survey,
#endif /* WL_SUPPORT_ACS */
}
#ifdef CONFIG_CFG80211_INTERNAL_REGDB
-static int
-wl_cfg80211_reg_notifier(
- struct wiphy *wiphy,
- struct regulatory_request *request)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
+#define WL_CFG80211_REG_NOTIFIER() static int wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+#else
+#define WL_CFG80211_REG_NOTIFIER() static void wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+#endif /* kernel version < 3.9.0 */
+#endif
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+WL_CFG80211_REG_NOTIFIER()
{
struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
int ret = 0;
+ int revinfo = -1;
if (!request || !cfg) {
WL_ERR(("Invalid arg\n"));
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
return -EINVAL;
+#else
+ return;
+#endif /* kernel version < 3.9.0 */
}
WL_DBG(("ccode: %c%c Initiator: %d\n",
((request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) ? " 11d AP" : "User")));
if ((ret = wldev_set_country(bcmcfg_to_prmry_ndev(cfg), request->alpha2,
- false, (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false))) < 0) {
+ false, (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false),
+ revinfo)) < 0) {
WL_ERR(("set country Failed :%d\n", ret));
}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
return ret;
+#else
+ return;
+#endif /* kernel version < 3.9.0 */
}
#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
#ifdef CONFIG_PM
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
static const struct wiphy_wowlan_support brcm_wowlan_support = {
.flags = WIPHY_WOWLAN_ANY,
+ .n_patterns = WL_WOWLAN_MAX_PATTERNS,
+ .pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN,
+ .pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+ .max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
};
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) */
+#if 0
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+static struct cfg80211_wowlan brcm_wowlan_config = {
+ .disconnect = true,
+ .gtk_rekey_failure = true,
+ .eap_identity_req = true,
+ .four_way_handshake = true,
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+#endif
#endif /* CONFIG_PM */
static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, void *context)
{
s32 err = 0;
-#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || defined(WL_COMPAT_WIRELESS))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
dhd_pub_t *dhd = (dhd_pub_t *)context;
BCM_REFERENCE(dhd);
#endif /* !WL_POWERSAVE_DISABLED */
wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_4ADDR_AP |
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && !defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39))
WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
#endif
WIPHY_FLAG_4ADDR_STATION;
-#if (defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && ((LINUX_VERSION_CODE >= \
- KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)) && !0
- /* Please use supplicant ver >= 76 if FW_ROAM is enabled
- * If driver advertises FW_ROAM, older supplicant wouldn't
- * send the BSSID & Freq in the connect req command. This
- * will delay the ASSOC as the FW need to do a full scan
- * before attempting to connect. Supplicant >=76 has patch
- * to allow bssid & freq to be sent down to driver even if
- * FW ROAM is advertised.
+#if ((defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && (LINUX_VERSION_CODE >= \
+ KERNEL_VERSION(3, 2, 0)))
+ /*
+ * If FW ROAM flag is advertised, upper layer wouldn't provide
+ * the bssid & freq in the connect command. This will result a
+ * delay in initial connection time due to firmware doing a full
+ * channel scan to figure out the channel & bssid. However kernel
+ * ver >= 3.15, provides bssid_hint & freq_hint and hence kernel
+ * ver >= 3.15 won't have any issue. So if this flags need to be
+ * advertised for kernel < 3.15, suggest to use RCC along with it
+ * to avoid the initial connection delay.
*/
wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || defined(WL_COMPAT_WIRELESS)
+#ifdef UNSET_FW_ROAM_WIPHY_FLAG
+ wdev->wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_FW_ROAM;
+#endif /* UNSET_FW_ROAM_WIPHY_FLAG */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_OFFCHAN_TX;
#endif
wdev->wiphy->max_acl_mac_addrs = MAX_NUM_MAC_FILT;
#endif
-#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || defined(WL_COMPAT_WIRELESS))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
/* Supplicant distinguish between the SoftAP mode and other
* modes (e.g. P2P, WPS, HS2.0) when it builds the probe
* response frame from Supplicant MR1 and Kernel 3.4.0 or
#endif
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) */
-#ifdef CONFIG_CFG80211_INTERNAL_REGDB
- wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier;
-#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
-
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
wdev->wiphy->wowlan = &brcm_wowlan_support;
+ /* If this is not provided cfg stack will get disconnect
+ * during suspend.
+ */
+ //wdev->wiphy->wowlan_config = &brcm_wowlan_config;
#else
wdev->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 10) */
+ wdev->wiphy->wowlan.n_patterns = WL_WOWLAN_MAX_PATTERNS;
+ wdev->wiphy->wowlan.pattern_min_len = WL_WOWLAN_MIN_PATTERN_LEN;
+ wdev->wiphy->wowlan.pattern_max_len = WL_WOWLAN_MAX_PATTERN_LEN;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+ wdev->wiphy->wowlan.max_pkt_offset = WL_WOWLAN_MAX_PATTERN_LEN;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */
WL_DBG(("Registering custom regulatory)\n"));
wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
#endif
wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom);
-
- WL_DBG(("Registering Vendor80211)\n"));
- err = cfgvendor_attach(wdev->wiphy);
+#if defined(WL_VENDOR_EXT_SUPPORT)
+ WL_ERR(("Registering Vendor80211\n"));
+ err = wl_cfgvendor_attach(wdev->wiphy);
if (unlikely(err < 0)) {
WL_ERR(("Couldn not attach vendor commands (%d)\n", err));
}
-
+#endif /* defined(WL_VENDOR_EXT_SUPPORT) */
/* Now we can register wiphy with cfg80211 module */
err = wiphy_register(wdev->wiphy);
if (unlikely(err < 0)) {
static void wl_free_wdev(struct bcm_cfg80211 *cfg)
{
struct wireless_dev *wdev = cfg->wdev;
- struct wiphy *wiphy;
+ struct wiphy *wiphy = NULL;
if (!wdev) {
WL_ERR(("wdev is invalid\n"));
return;
}
- wiphy = wdev->wiphy;
+ if (wdev->wiphy) {
+ wiphy = wdev->wiphy;
- cfgvendor_detach(wdev->wiphy);
-
- wiphy_unregister(wdev->wiphy);
- wdev->wiphy->dev.parent = NULL;
+#if defined(WL_VENDOR_EXT_SUPPORT)
+ wl_cfgvendor_detach(wdev->wiphy);
+#endif /* if defined(WL_VENDOR_EXT_SUPPORT) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ /* Reset wowlan & wowlan_config before Unregister to avoid Kernel Panic */
+ WL_DBG(("wl_free_wdev Clearing wowlan Config \n"));
+ wdev->wiphy->wowlan = NULL;
+ wdev->wiphy->wowlan_config = NULL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
+ wiphy_unregister(wdev->wiphy);
+ wdev->wiphy->dev.parent = NULL;
+ wdev->wiphy = NULL;
+ }
wl_delete_all_netinfo(cfg);
- wiphy_free(wiphy);
+ if (wiphy)
+ wiphy_free(wiphy);
+
/* PLEASE do NOT call any function after wiphy_free, the driver's private structure "cfg",
* which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!!
*/
bss_list = cfg->bss_list;
-#if defined(BSSCACHE)
+ /* Free cache in p2p scanning*/
if (p2p_is_on(cfg) && p2p_scan(cfg)) {
#if defined(RSSIAVG)
wl_free_rssi_cache(&g_rssi_cache_ctrl);
#endif
+#if defined(BSSCACHE)
wl_free_bss_cache(&g_bss_cache_ctrl);
+#endif
}
- wl_update_bss_cache(&g_bss_cache_ctrl, bss_list);
- wl_delete_dirty_bss_cache(&g_bss_cache_ctrl);
- wl_reset_bss_cache(&g_bss_cache_ctrl);
+
+ /* Delete disconnected cache */
+#if defined(BSSCACHE)
+ wl_delete_disconnected_bss_cache(&g_bss_cache_ctrl, (u8*)&cfg->disconnected_bssid);
+#if defined(RSSIAVG)
+ wl_delete_disconnected_rssi_cache(&g_rssi_cache_ctrl, (u8*)&cfg->disconnected_bssid);
+#endif
+ if (cfg->p2p_disconnected == 0)
+ memset(&cfg->disconnected_bssid, 0, ETHER_ADDR_LEN);
#endif
+ /* Update cache */
#if defined(RSSIAVG)
-#if defined(BSSCACHE)
- node = g_bss_cache_ctrl.m_cache_head;
- for (;node;) {
- wl_update_rssi_cache(&g_rssi_cache_ctrl, &node->results);
- node = node->next;
- }
-#else
wl_update_rssi_cache(&g_rssi_cache_ctrl, bss_list);
-#endif
if (!in_atomic())
wl_update_connected_rssi_cache(ndev, &g_rssi_cache_ctrl, &rssi);
- wl_delete_dirty_rssi_cache(&g_rssi_cache_ctrl);
- wl_reset_rssi_cache(&g_rssi_cache_ctrl);
#endif
-
#if defined(BSSCACHE)
- if (p2p_disconnected > 0) {
- // terence 20130703: Fix for wrong group_capab (timing issue)
- wl_delete_disconnected_bss_cache(&g_bss_cache_ctrl, (u8*)&p2p_disconnected_bssid);
+ wl_update_bss_cache(&g_bss_cache_ctrl,
#if defined(RSSIAVG)
- wl_delete_disconnected_rssi_cache(&g_rssi_cache_ctrl, (u8*)&p2p_disconnected_bssid);
+ &g_rssi_cache_ctrl,
#endif
- }
- WL_SCAN(("Inform cached AP list\n"));
- node = g_bss_cache_ctrl.m_cache_head;
- for (i=0; node && i<WL_AP_MAX; i++) {
- if (node->dirty > 1) {
- // just inform dirty bss
- bi = node->results.bss_info;
- err = wl_inform_single_bss(cfg, bi, false);
- }
- node = node->next;
- }
- bi = NULL;
+ bss_list);
#endif
- WL_SCAN(("scanned AP count (%d)\n", bss_list->count));
+ /* delete dirty cache */
+#if defined(RSSIAVG)
+ wl_delete_dirty_rssi_cache(&g_rssi_cache_ctrl);
+ wl_reset_rssi_cache(&g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ wl_delete_dirty_bss_cache(&g_bss_cache_ctrl);
+ wl_reset_bss_cache(&g_bss_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ if (cfg->p2p_disconnected > 0) {
+ // terence 20130703: Fix for wrong group_capab (timing issue)
+ wl_delete_disconnected_bss_cache(&g_bss_cache_ctrl, (u8*)&cfg->disconnected_bssid);
+#if defined(RSSIAVG)
+ wl_delete_disconnected_rssi_cache(&g_rssi_cache_ctrl, (u8*)&cfg->disconnected_bssid);
+#endif
+ }
+ WL_SCAN(("scanned AP count (%d)\n", bss_list->count));
+ node = g_bss_cache_ctrl.m_cache_head;
+ for (i=0; node && i<WL_AP_MAX; i++) {
+ bi = node->results.bss_info;
+ err = wl_inform_single_bss(cfg, bi, false);
+ node = node->next;
+ }
+#else
+ WL_SCAN(("scanned AP count (%d)\n", bss_list->count));
bi = next_bss(bss_list, bi);
for_each_bss(bss_list, bi, i) {
- if (p2p_disconnected > 0 && !memcmp(&bi->BSSID, &p2p_disconnected_bssid, ETHER_ADDR_LEN))
+ if (cfg->p2p_disconnected > 0 && !memcmp(&bi->BSSID, &cfg->disconnected_bssid, ETHER_ADDR_LEN))
continue;
err = wl_inform_single_bss(cfg, bi, false);
}
+#endif
- if (p2p_disconnected > 0) {
+ if (cfg->p2p_disconnected > 0) {
// terence 20130703: Fix for wrong group_capab (timing issue)
- p2p_disconnected++;
- if (p2p_disconnected >= REPEATED_SCAN_RESULT_CNT+1)
- p2p_disconnected = 0;
+ cfg->p2p_disconnected++;
+ if (cfg->p2p_disconnected >= REPEATED_SCAN_RESULT_CNT+1) {
+ cfg->p2p_disconnected = 0;
+ memset(&cfg->disconnected_bssid, 0, ETHER_ADDR_LEN);
+ }
}
return err;
offsetof(struct wl_cfg80211_bss_info, frame_buf));
notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
u.beacon.variable) + wl_get_ielen(cfg);
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
(void)band->band;
#else
return -EINVAL;
}
channel = ieee80211_get_channel(wiphy, freq);
- WL_SCAN(("BSSID %pM, channel %d, rssi %d, capa 0x04%x, mgmt_type %d, "
+ WL_SCAN(("BSSID %pM, channel %2d, rssi %3d, capa 0x04%x, mgmt_type %d, "
"frame_len %d, SSID \"%s\"\n", &bi->BSSID, notif_bss_info->channel,
notif_bss_info->rssi, mgmt->u.beacon.capab_info, mgmt_type,
notif_bss_info->frame_len, bi->SSID));
return -EINVAL;
}
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
cfg80211_put_bss(wiphy, cbss);
#else
u32 event = ntoh32(e->event_type);
u32 reason = ntoh32(e->reason);
u32 len = ntoh32(e->datalen);
+ u32 status = ntoh32(e->status);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) \
- && !defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
bool isfree = false;
u8 *mgmt_frame;
u8 bsscfgidx = e->bsscfgidx;
channel_info_t ci;
#else
struct station_info sinfo;
-#endif /* (LINUX_VERSION < VERSION(3,2,0)) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+#endif
WL_DBG(("event %d status %d reason %d\n", event, ntoh32(e->status), reason));
/* if link down, bsscfg is disabled. */
return 0;
}
+ if ((event == WLC_E_LINK) && (status == WLC_E_STATUS_SUCCESS) &&
+ (reason == WLC_E_REASON_INITIAL_ASSOC) &&
+ (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP)) {
+ if (!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+ /* AP/GO brought up successfull in firmware */
+ WL_ERR(("** AP/GO Link up event **\n"));
+ wl_set_drv_status(cfg, AP_CREATED, ndev);
+ wake_up_interruptible(&cfg->netif_change_event);
+ return 0;
+ }
+ }
+
if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
WL_ERR(("event %s(%d) status %d reason %d\n",
bcmevent_get_name(event), event, ntoh32(e->status), reason));
}
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) \
- && !defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
WL_DBG(("Enter \n"));
if (!len && (event == WLC_E_DEAUTH)) {
len = 2; /* reason code field */
kfree(body);
return -EINVAL;
}
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
freq = ieee80211_channel_to_frequency(channel);
(void)band->band;
#else
isfree = true;
if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
- defined(WL_COMPAT_WIRELESS)
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) && (LINUX_VERSION_CODE < \
+ KERNEL_VERSION(3, 18, 0)))
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len);
#else
cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */
+#endif
} else if (event == WLC_E_DISASSOC_IND) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
- defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
#else
cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */
+#endif
} else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
- defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
#else
cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */
+#endif
}
exit:
sinfo.filled = 0;
if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
reason == DOT11_SC_SUCCESS) {
- sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+ /* Linux ver >= 4.0 assoc_req_ies_len is used instead of
+ * STATION_INFO_ASSOC_REQ_IES flag
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
+ sinfo.filled = STA_INFO_BIT(INFO_ASSOC_REQ_IES);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */
if (!data) {
WL_ERR(("No IEs present in ASSOC/REASSOC_IND"));
return -EINVAL;
printf("%s: deauthenticated device "MACDBG"\n", __FUNCTION__, MAC2STRDBG(e->addr.octet));
cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
}
-#endif /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+#endif
return err;
}
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define MAX_ASSOC_REJECT_ERR_STATUS 5
+int wl_get_connect_failed_status(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
+{
+ u32 status = ntoh32(e->status);
+
+ cfg->assoc_reject_status = 0;
+
+ if (status == WLC_E_STATUS_FAIL) {
+ WL_ERR(("auth assoc status event=%d e->status %d e->reason %d \n",
+ ntoh32(cfg->event_auth_assoc.event_type),
+ (int)ntoh32(cfg->event_auth_assoc.status),
+ (int)ntoh32(cfg->event_auth_assoc.reason)));
+
+ switch ((int)ntoh32(cfg->event_auth_assoc.status)) {
+ case WLC_E_STATUS_NO_ACK:
+ cfg->assoc_reject_status = 1;
+ break;
+ case WLC_E_STATUS_FAIL:
+ cfg->assoc_reject_status = 2;
+ break;
+ case WLC_E_STATUS_UNSOLICITED:
+ cfg->assoc_reject_status = 3;
+ break;
+ case WLC_E_STATUS_TIMEOUT:
+ cfg->assoc_reject_status = 4;
+ break;
+ case WLC_E_STATUS_ABORT:
+ cfg->assoc_reject_status = 5;
+ break;
+ default:
+ break;
+ }
+ if (cfg->assoc_reject_status) {
+ if (ntoh32(cfg->event_auth_assoc.event_type) == WLC_E_ASSOC) {
+ cfg->assoc_reject_status += MAX_ASSOC_REJECT_ERR_STATUS;
+ }
+ }
+ }
+
+ WL_ERR(("assoc_reject_status %d \n", cfg->assoc_reject_status));
+
+ return 0;
+}
+
+s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ int bytes_written = 0;
+
+ cfg = g_bcm_cfg;
+
+ if (cfg == NULL) {
+ return -1;
+ }
+
+ memset(cmd, 0, total_len);
+ bytes_written = snprintf(cmd, 30, "assoc_reject.status %d", cfg->assoc_reject_status);
+
+ WL_ERR(("cmd: %s \n", cmd));
+
+ return bytes_written;
+}
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
static s32
wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e)
u32 event = ntoh32(e->event_type);
struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
WL_DBG(("event type : %d, reason : %d\n", event, reason));
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ memcpy(&cfg->event_auth_assoc, e, sizeof(wl_event_msg_t));
+ WL_ERR(("event=%d status %d reason %d \n",
+ ntoh32(cfg->event_auth_assoc.event_type),
+ ntoh32(cfg->event_auth_assoc.status),
+ ntoh32(cfg->event_auth_assoc.reason)));
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
if (sec) {
switch (event) {
case WLC_E_ASSOC:
return err;
}
WL_INFORM(("IBSS BSSID is changed from " MACDBG " to " MACDBG "\n",
- MAC2STRDBG(cur_bssid), MAC2STRDBG((u8 *)&e->addr)));
+ MAC2STRDBG(cur_bssid), MAC2STRDBG((const u8 *)&e->addr)));
wl_get_assoc_ies(cfg, ndev);
- wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+ wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
wl_update_bss_info(cfg, ndev, false);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
- cfg80211_ibss_joined(ndev, (s8 *)&e->addr, channel, GFP_KERNEL);
+ cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL);
#else
- cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
+ cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL);
#endif
}
else {
/* New connection */
- WL_INFORM(("IBSS connected to " MACDBG "\n", MAC2STRDBG((u8 *)&e->addr)));
+ WL_INFORM(("IBSS connected to " MACDBG "\n",
+ MAC2STRDBG((const u8 *)&e->addr)));
wl_link_up(cfg);
wl_get_assoc_ies(cfg, ndev);
- wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+ wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
wl_update_bss_info(cfg, ndev, false);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
- cfg80211_ibss_joined(ndev, (s8 *)&e->addr, channel, GFP_KERNEL);
+ cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL);
#else
- cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
+ cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL);
#endif
wl_set_drv_status(cfg, CONNECTED, ndev);
active = true;
- wl_update_prof(cfg, ndev, NULL, (void *)&active, WL_PROF_ACT);
+ wl_update_prof(cfg, ndev, NULL, (const void *)&active, WL_PROF_ACT);
}
} else if ((event == WLC_E_LINK && !(flags & WLC_EVENT_MSG_LINK)) ||
event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) {
return err;
}
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define WiFiALL_OUI "\x50\x6F\x9A" /* Wi-FiAll OUI */
+#define WiFiALL_OUI_LEN 3
+#define WiFiALL_OUI_TYPE 16
+
+int wl_get_bss_info(struct bcm_cfg80211 *cfg, struct net_device *dev, uint8 *mac)
+{
+ s32 err = 0;
+ struct wl_bss_info *bi;
+ uint8 eabuf[ETHER_ADDR_LEN];
+ u32 rate, channel, freq, supported_rate, nss = 0, mcs_map, mode_80211 = 0;
+ char rate_str[4];
+ u8 *ie = NULL;
+ u32 ie_len;
+ struct wiphy *wiphy;
+ struct cfg80211_bss *bss;
+ bcm_tlv_t *interworking_ie = NULL;
+ bcm_tlv_t *tlv_ie = NULL;
+ bcm_tlv_t *vht_ie = NULL;
+ vndr_ie_t *vndrie;
+ int16 ie_11u_rel_num = -1, ie_mu_mimo_cap = -1;
+ u32 i, remained_len, count = 0;
+ char roam_count_str[4], akm_str[4];
+ s32 val = 0;
+
+ /* get BSS information */
+
+ strncpy(cfg->bss_info, "x x x x x x x x x x x x x", GET_BSS_INFO_LEN);
+
+ *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+
+ err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf, WL_EXTRA_BUF_MAX, false);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get bss info %d\n", err));
+ cfg->roam_count = 0;
+ return -1;
+ }
+
+ if (!mac) {
+ WL_ERR(("mac is null \n"));
+ cfg->roam_count = 0;
+ return -1;
+ }
+
+ memcpy(eabuf, mac, ETHER_ADDR_LEN);
+
+ bi = (struct wl_bss_info *)(cfg->extra_buf + 4);
+ channel = wf_chspec_ctlchan(bi->chanspec);
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
+ freq = ieee80211_channel_to_frequency(channel);
+#else
+ if (channel > 14) {
+ freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+ } else {
+ freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+ }
+#endif
+
+ err = wldev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate), false);
+ if (err) {
+ WL_ERR(("Could not get rate (%d)\n", err));
+ snprintf(rate_str, sizeof(rate_str), "x"); // Unknown
+
+ } else {
+ rate = dtoh32(rate);
+ snprintf(rate_str, sizeof(rate_str), "%d", (rate/2));
+ }
+
+ //supported maximum rate
+ supported_rate = (bi->rateset.rates[bi->rateset.count - 1] & 0x7f) / 2;
+
+ if (supported_rate < 12) {
+ mode_80211 = 0; //11b maximum rate is 11Mbps. 11b mode
+ } else {
+ //It's not HT Capable case.
+ if (channel > 14) {
+ mode_80211 = 3; // 11a mode
+ } else {
+ mode_80211 = 1; // 11g mode
+ }
+ }
+
+ if (bi->n_cap) {
+ /* check Rx MCS Map for HT */
+ nss = 0;
+ mode_80211 = 2;
+ for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) {
+ int8 bitmap = 0xFF;
+ if (i == MAX_STREAMS_SUPPORTED-1) {
+ bitmap = 0x7F;
+ }
+ if (bi->basic_mcs[i] & bitmap) {
+ nss++;
+ }
+ }
+ }
+
+ if (bi->vht_cap) {
+ nss = 0;
+ mode_80211 = 4;
+ for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) {
+ mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap));
+ if (mcs_map != VHT_CAP_MCS_MAP_NONE) {
+ nss++;
+ }
+ }
+ }
+
+ if (nss) {
+ nss = nss - 1;
+ }
+
+ wiphy = bcmcfg_to_wiphy(cfg);
+ bss = cfg80211_get_bss(wiphy, NULL, eabuf,
+ bi->SSID, strlen(bi->SSID), WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+
+ if (!bss) {
+ WL_ERR(("Could not find the AP\n"));
+ } else {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ ie = (u8 *)bss->ies->data;
+ ie_len = bss->ies->len;
+#else
+ ie = bss->information_elements;
+ ie_len = bss->len_information_elements;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ }
+
+ if (ie) {
+ ie_mu_mimo_cap = 0;
+ ie_11u_rel_num = 0;
+
+ if (bi->vht_cap) {
+ if ((vht_ie = bcm_parse_tlvs(ie, (u32)ie_len,
+ DOT11_MNG_VHT_CAP_ID)) != NULL) {
+ ie_mu_mimo_cap = (vht_ie->data[2] & 0x08) >> 3;
+ }
+ }
+
+ if ((interworking_ie = bcm_parse_tlvs(ie, (u32)ie_len,
+ DOT11_MNG_INTERWORKING_ID)) != NULL) {
+ if ((tlv_ie = bcm_parse_tlvs(ie, (u32)ie_len, DOT11_MNG_VS_ID)) != NULL) {
+ remained_len = ie_len;
+
+ while (tlv_ie) {
+ if (count > MAX_VNDR_IE_NUMBER)
+ break;
+
+ if (tlv_ie->id == DOT11_MNG_VS_ID) {
+ vndrie = (vndr_ie_t *) tlv_ie;
+
+ if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+ WL_ERR(("%s: invalid vndr ie."
+ "length is too small %d\n",
+ __FUNCTION__, vndrie->len));
+ break;
+ }
+
+ if (!bcmp(vndrie->oui,
+ (u8*)WiFiALL_OUI, WiFiALL_OUI_LEN) &&
+ (vndrie->data[0] == WiFiALL_OUI_TYPE))
+ {
+ WL_ERR(("Found Wi-FiAll OUI oui.\n"));
+ ie_11u_rel_num = vndrie->data[1];
+ ie_11u_rel_num = (ie_11u_rel_num & 0xf0)>>4;
+ ie_11u_rel_num += 1;
+
+ break;
+ }
+ }
+ count++;
+ tlv_ie = bcm_next_tlv(tlv_ie, &remained_len);
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < bi->SSID_len; i++) {
+ if (bi->SSID[i] == ' ') {
+ bi->SSID[i] = '_';
+ }
+ }
+
+ //0 : None, 1 : OKC, 2 : FT, 3 : CCKM
+ err = wldev_iovar_getint(dev, "wpa_auth", &val);
+ if (unlikely(err)) {
+ WL_ERR(("could not get wpa_auth (%d)\n", err));
+ snprintf(akm_str, sizeof(akm_str), "x"); // Unknown
+ } else {
+ WL_ERR(("wpa_auth val %d \n", val));
+#if defined(BCMEXTCCX)
+ if (val & (WPA_AUTH_CCKM | WPA2_AUTH_CCKM)) {
+ snprintf(akm_str, sizeof(akm_str), "3");
+ } else
+#endif
+ if (val & WPA2_AUTH_FT) {
+ snprintf(akm_str, sizeof(akm_str), "2");
+ } else if (val & (WPA_AUTH_UNSPECIFIED | WPA2_AUTH_UNSPECIFIED)) {
+ snprintf(akm_str, sizeof(akm_str), "1");
+ } else {
+ snprintf(akm_str, sizeof(akm_str), "0");
+ }
+ }
+
+ if (cfg->roam_offload) {
+ snprintf(roam_count_str, sizeof(roam_count_str), "x"); // Unknown
+ } else {
+ snprintf(roam_count_str, sizeof(roam_count_str), "%d", cfg->roam_count);
+ }
+ cfg->roam_count = 0;
+
+ WL_ERR(("BSSID:" MACDBG " SSID %s \n", MAC2STRDBG(eabuf), bi->SSID));
+ WL_ERR(("freq:%d, BW:%s, RSSI:%d dBm, Rate:%d Mbps, 11mode:%d, stream:%d,"
+ "MU-MIMO:%d, Passpoint:%d, SNR:%d, Noise:%d, \n"
+ "akm:%s roam:%s \n",
+ freq, wf_chspec_to_bw_str(bi->chanspec),
+ dtoh32(bi->RSSI), (rate / 2), mode_80211, nss,
+ ie_mu_mimo_cap, ie_11u_rel_num, bi->SNR, bi->phy_noise,
+ akm_str, roam_count_str));
+
+ if (ie) {
+ snprintf(cfg->bss_info, GET_BSS_INFO_LEN,
+ "%02x:%02x:%02x %d %s %d %s %d %d %d %d %d %d %s %s",
+ eabuf[0], eabuf[1], eabuf[2],
+ freq, wf_chspec_to_bw_str(bi->chanspec),
+ dtoh32(bi->RSSI), rate_str, mode_80211, nss,
+ ie_mu_mimo_cap, ie_11u_rel_num,
+ bi->SNR, bi->phy_noise, akm_str, roam_count_str);
+ } else {
+ //ie_mu_mimo_cap and ie_11u_rel_num is unknow.
+ snprintf(cfg->bss_info, GET_BSS_INFO_LEN,
+ "%02x:%02x:%02x %d %s %d %s %d %d x x %d %d %s %s",
+ eabuf[0], eabuf[1], eabuf[2],
+ freq, wf_chspec_to_bw_str(bi->chanspec),
+ dtoh32(bi->RSSI), rate_str, mode_80211, nss,
+ bi->SNR, bi->phy_noise, akm_str, roam_count_str);
+ }
+
+
+ return 0;
+}
+
+s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+
+ cfg = g_bcm_cfg;
+
+ if (cfg == NULL) {
+ return -1;
+ }
+
+ memset(cmd, 0, total_len);
+ memcpy(cmd, cfg->bss_info, GET_BSS_INFO_LEN);
+
+ WL_ERR(("cmd: %s \n", cmd));
+
+ return GET_BSS_INFO_LEN;
+}
+
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
static s32
wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
struct net_device *ndev = NULL;
s32 err = 0;
u32 event = ntoh32(e->event_type);
+ struct wiphy *wiphy = NULL;
+ struct cfg80211_bss *bss = NULL;
+ struct wlc_ssid *ssid = NULL;
+ u8 *bssid = 0;
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
wl_get_auth_assoc_status(cfg, ndev, e);
return 0;
}
+ DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
if (wl_is_linkup(cfg, e, ndev)) {
wl_link_up(cfg);
act = true;
if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+#ifdef DHD_LOSSLESS_ROAMING
+ bool is_connected = wl_get_drv_status(cfg, CONNECTED, ndev);
+#endif
+
printf("wl_bss_connect_done succeeded with " MACDBG "\n",
- MAC2STRDBG((u8*)(&e->addr)));
+ MAC2STRDBG((const u8*)(&e->addr)));
wl_bss_connect_done(cfg, ndev, e, data, true);
- dhd_conf_set_phyoclscdenable((dhd_pub_t *)cfg->pub);
+ dhd_conf_set_fw_string_cmd(cfg->pub, "phy_oclscdenable", cfg->pub->conf->phy_oclscdenable, 0, FALSE);
WL_DBG(("joined in BSS network \"%s\"\n",
((struct wlc_ssid *)
wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID));
+#ifdef DHD_LOSSLESS_ROAMING
+ if (event == WLC_E_LINK && is_connected &&
+ !cfg->roam_offload) {
+ wl_bss_roaming_done(cfg, ndev, e, data);
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
+
}
wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
- wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
- dhd_conf_set_wme((dhd_pub_t *)cfg->pub);
+ wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
+ dhd_conf_set_wme(cfg->pub);
} else if (wl_is_linkdown(cfg, e)) {
+#ifdef DHD_LOSSLESS_ROAMING
+ wl_del_roam_timeout(cfg);
+#endif
#ifdef P2PLISTEN_AP_SAMECHN
if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
wl_cfg80211_set_p2p_resp_ap_chn(ndev, 0);
WL_DBG(("p2p_resp_apchn_status Turn OFF \n"));
}
#endif /* P2PLISTEN_AP_SAMECHN */
+ wl_cfg80211_cancel_scan(cfg);
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ wl_get_bss_info(cfg, ndev, (u8*)(&e->addr));
+ }
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+ /* Explicitly calling unlink to remove BSS in CFG */
+ wiphy = bcmcfg_to_wiphy(cfg);
+ ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+ bssid = (u8 *)wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ if (ssid && bssid) {
+ bss = cfg80211_get_bss(wiphy, NULL, bssid,
+ ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+ if (bss) {
+ cfg80211_unlink_bss(wiphy, bss);
+ }
+ }
- if (cfg->scan_request)
- wl_notify_escan_complete(cfg, ndev, true, true);
if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
scb_val_t scbval;
u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
s32 reason = 0;
+ struct ether_addr bssid_dongle;
+ struct ether_addr bssid_null = {{0, 0, 0, 0, 0, 0}};
+
if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND)
reason = ntoh32(e->reason);
/* WLAN_REASON_UNSPECIFIED is used for hang up event in Android */
printf("link down if %s may call cfg80211_disconnected. "
"event : %d, reason=%d from " MACDBG "\n",
ndev->name, event, ntoh32(e->reason),
- MAC2STRDBG((u8*)(&e->addr)));
- if (!cfg->roam_offload &&
- memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) {
- WL_ERR(("BSSID of event is not the connected BSSID"
- "(ignore it) cur: " MACDBG " event: " MACDBG"\n",
- MAC2STRDBG(curbssid), MAC2STRDBG((u8*)(&e->addr))));
- return 0;
+ MAC2STRDBG((const u8*)(&e->addr)));
+
+ /* roam offload does not sync BSSID always, get it from dongle */
+ if (cfg->roam_offload) {
+ if (wldev_ioctl(ndev, WLC_GET_BSSID, &bssid_dongle,
+ sizeof(bssid_dongle), false) == BCME_OK) {
+ /* if not roam case, it would return null bssid */
+ if (memcmp(&bssid_dongle, &bssid_null,
+ ETHER_ADDR_LEN) != 0) {
+ curbssid = (u8 *)&bssid_dongle;
+ }
+ }
+ }
+ if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) {
+ bool fw_assoc_state = TRUE;
+ dhd_pub_t *dhd = (dhd_pub_t *)cfg->pub;
+ fw_assoc_state = dhd_is_associated(dhd, e->ifidx, &err);
+ if (!fw_assoc_state) {
+ WL_ERR(("Even sends up even different BSSID"
+ " cur: " MACDBG " event: " MACDBG"\n",
+ MAC2STRDBG(curbssid),
+ MAC2STRDBG((const u8*)(&e->addr))));
+ } else {
+ WL_ERR(("BSSID of event is not the connected BSSID"
+ "(ignore it) cur: " MACDBG
+ " event: " MACDBG"\n",
+ MAC2STRDBG(curbssid),
+ MAC2STRDBG((const u8*)(&e->addr))));
+ return 0;
+ }
}
if (!memcmp(ndev->name, WL_P2P_INTERFACE_PREFIX, strlen(WL_P2P_INTERFACE_PREFIX))) {
// terence 20130703: Fix for wrong group_capab (timing issue)
- p2p_disconnected = 1;
- memcpy(&p2p_disconnected_bssid, curbssid, ETHER_ADDR_LEN);
+ cfg->p2p_disconnected = 1;
}
+ memcpy(&cfg->disconnected_bssid, curbssid, ETHER_ADDR_LEN);
wl_clr_drv_status(cfg, CONNECTED, ndev);
if (! wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
/* To make sure disconnect, explictly send dissassoc
WL_ERR(("WLC_DISASSOC error %d\n", err));
err = 0;
}
- cfg80211_disconnected(ndev, reason, NULL, 0, GFP_KERNEL);
+ CFG80211_DISCONNECTED(ndev, reason, NULL, 0,
+ false, GFP_KERNEL);
wl_link_down(cfg);
wl_init_prof(cfg, ndev);
+ memset(&cfg->last_roamed_addr, 0, ETHER_ADDR_LEN);
}
}
else if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
} else if (wl_is_nonetwork(cfg, e)) {
printf("connect failed event=%d e->status %d e->reason %d \n",
event, (int)ntoh32(e->status), (int)ntoh32(e->reason));
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ if (event == WLC_E_SET_SSID) {
+ wl_get_connect_failed_status(cfg, e);
+ }
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
/* Clean up any pending scan request */
- if (cfg->scan_request)
- wl_notify_escan_complete(cfg, ndev, true, true);
+ wl_cfg80211_cancel_scan(cfg);
if (wl_get_drv_status(cfg, CONNECTING, ndev))
wl_bss_connect_done(cfg, ndev, e, data, false);
} else {
WL_DBG(("%s nothing\n", __FUNCTION__));
}
+ DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
}
else {
WL_ERR(("Invalid ndev status %d\n", wl_get_mode_by_netdev(cfg, ndev)));
WL_DBG(("set pid for rmc event : pid=%d\n", pid));
}
-#ifdef WLAIBSS
-void wl_cfg80211_set_txfail_pid(int pid)
-{
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
- if (pid > 0)
- cfg->aibss_txfail_pid = pid;
- WL_DBG(("set pid for aibss fail event : pid=%d\n", pid));
-}
-
-static s32
-wl_notify_aibss_txfail(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- u32 evt = ntoh32(e->event_type);
- int ret = -1;
-
- if (cfg->aibss_txfail_pid != 0) {
- ret = wl_netlink_send_msg(cfg->aibss_txfail_pid, AIBSS_EVENT_TXFAIL,
- cfg->aibss_txfail_seq++, (void *)&e->addr, ETHER_ADDR_LEN);
- }
-
- WL_DBG(("txfail : evt=%d, pid=%d, ret=%d, mac=" MACF "\n",
- evt, cfg->aibss_txfail_pid, ret, ETHERP_TO_MACF(&e->addr)));
- return ret;
-}
-#endif /* WLAIBSS */
-
+#ifdef WL_RELMCAST
static s32
wl_notify_rmc_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
WL_DBG(("rmcevent : evt=%d, pid=%d, ret=%d\n", evt, cfg->rmc_event_pid, ret));
return ret;
}
-
+#endif /* WL_RELMCAST */
static s32
wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
s32 err = 0;
u32 event = be32_to_cpu(e->event_type);
u32 status = be32_to_cpu(e->status);
+#ifdef DHD_LOSSLESS_ROAMING
+ struct wl_security *sec;
+#endif
WL_DBG(("Enter \n"));
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
return err;
if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status == WLC_E_STATUS_SUCCESS) {
- if (wl_get_drv_status(cfg, CONNECTED, ndev))
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+#ifdef DHD_LOSSLESS_ROAMING
+ if (cfg->roam_offload) {
+ wl_bss_roaming_done(cfg, ndev, e, data);
+ wl_del_roam_timeout(cfg);
+ }
+ else {
+ sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ /* In order to reduce roaming delay, wl_bss_roaming_done is
+ * early called with WLC_E_LINK event. It is called from
+ * here only if WLC_E_LINK event is blocked for specific
+ * security type.
+ */
+ if (IS_AKM_SUITE_FT(sec)) {
+ wl_bss_roaming_done(cfg, ndev, e, data);
+ }
+ /* Roam timer is deleted mostly from wl_cfg80211_change_station
+ * after roaming is finished successfully. We need to delete
+ * the timer from here only for some security types that aren't
+ * using wl_cfg80211_change_station to authorize SCB
+ */
+ if (IS_AKM_SUITE_FT(sec) || IS_AKM_SUITE_CCKM(sec)) {
+ wl_del_roam_timeout(cfg);
+ }
+ }
+#else
wl_bss_roaming_done(cfg, ndev, e, data);
- else
+#endif /* DHD_LOSSLESS_ROAMING */
+ } else {
wl_bss_connect_done(cfg, ndev, e, data, true);
+ }
act = true;
wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
- wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
- dhd_conf_set_wme((dhd_pub_t *)cfg->pub);
+ wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
+ }
+#ifdef DHD_LOSSLESS_ROAMING
+ else if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status != WLC_E_STATUS_SUCCESS) {
+ wl_del_roam_timeout(cfg);
}
+#endif
return err;
}
-static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+#ifdef QOS_MAP_SET
+/* up range from low to high with up value */
+static bool
+up_table_set(uint8 *up_table, uint8 up, uint8 low, uint8 high)
+{
+ int i;
+
+ if (up > 7 || low > high || low >= UP_TABLE_MAX || high >= UP_TABLE_MAX) {
+ return FALSE;
+ }
+
+ for (i = low; i <= high; i++) {
+ up_table[i] = up;
+ }
+
+ return TRUE;
+}
+
+/* set user priority table */
+static void
+wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie)
+{
+ uint8 len;
+
+ if (up_table == NULL || qos_map_ie == NULL) {
+ return;
+ }
+
+ /* clear table to check table was set or not */
+ memset(up_table, 0xff, UP_TABLE_MAX);
+
+ /* length of QoS Map IE must be 16+n*2, n is number of exceptions */
+ if (qos_map_ie != NULL && qos_map_ie->id == DOT11_MNG_QOS_MAP_ID &&
+ (len = qos_map_ie->len) >= QOS_MAP_FIXED_LENGTH &&
+ (len % 2) == 0) {
+ uint8 *except_ptr = (uint8 *)qos_map_ie->data;
+ uint8 except_len = len - QOS_MAP_FIXED_LENGTH;
+ uint8 *range_ptr = except_ptr + except_len;
+ int i;
+
+ /* fill in ranges */
+ for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) {
+ uint8 low = range_ptr[i];
+ uint8 high = range_ptr[i + 1];
+ if (low == 255 && high == 255) {
+ continue;
+ }
+
+ if (!up_table_set(up_table, i / 2, low, high)) {
+ /* clear the table on failure */
+ memset(up_table, 0xff, UP_TABLE_MAX);
+ return;
+ }
+ }
+
+ /* update exceptions */
+ for (i = 0; i < except_len; i += 2) {
+ uint8 dscp = except_ptr[i];
+ uint8 up = except_ptr[i+1];
+
+ /* exceptions with invalid dscp/up are ignored */
+ up_table_set(up_table, up, dscp, dscp);
+ }
+ }
+
+ if (wl_dbg_level & WL_DBG_DBG) {
+ prhex("UP table", up_table, UP_TABLE_MAX);
+ }
+}
+
+/* get user priority table */
+uint8 *
+wl_get_up_table(void)
+{
+ return (uint8 *)(g_bcm_cfg->up_table);
+}
+#endif /* QOS_MAP_SET */
+
+#ifdef DHD_LOSSLESS_ROAMING
+static s32
+wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ struct wl_security *sec;
+ struct net_device *ndev;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ /* Disable Lossless Roaming for specific AKM suite
+ * Any other AKM suite can be added below if transition time
+ * is delayed because of Lossless Roaming
+ * and it causes any certication failure
+ */
+ if (IS_AKM_SUITE_FT(sec)) {
+ return err;
+ }
+
+ dhdp->dequeue_prec_map = 1 << PRIO_8021D_NC;
+ /* Restore flow control */
+ dhd_txflowcontrol(dhdp, ALL_INTERFACES, OFF);
+
+ mod_timer(&cfg->roam_timeout, jiffies + msecs_to_jiffies(WL_ROAM_TIMEOUT_MS));
+
+ return err;
+}
+#endif /* DHD_LOSSLESS_ROAMING */
+
+static s32
+wl_notify_idsup_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+#if defined(WL_VENDOR_EXT_SUPPORT)
+ u32 idsup_status;
+ u32 reason = ntoh32(e->reason);
+ struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+#endif /* defined(WL_VENDOR_EXT_SUPPORT) */
+
+ if (cfg->roam_offload) {
+#if defined(WL_VENDOR_EXT_SUPPORT)
+ switch (reason) {
+ case WLC_E_SUP_WPA_PSK_TMO:
+ idsup_status = IDSUP_EVENT_4WAY_HANDSHAKE_TIMEOUT;
+ break;
+ case WLC_E_SUP_OTHER:
+ idsup_status = IDSUP_EVENT_SUCCESS;
+ break;
+ default:
+ WL_ERR(("Other type at IDSUP. "
+ "event=%d e->status %d e->reason %d \n",
+ (int)ntoh32(e->event_type), (int)ntoh32(e->status),
+ (int)ntoh32(e->reason)));
+ return err;
+ }
+
+ err = wl_cfgvendor_send_async_event(wiphy, ndev,
+ BRCM_VENDOR_EVENT_IDSUP_STATUS, &idsup_status, sizeof(u32));
+#endif /* defined(WL_VENDOR_EXT_SUPPORT) */
+ }
+ return err;
+}
+
+#ifdef CUSTOM_EVENT_PM_WAKE
+static s32
+wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ struct net_device *ndev = NULL;
+ u8 *pbuf = NULL;
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ pbuf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+ if (pbuf == NULL) {
+ WL_ERR(("failed to allocate local pbuf\n"));
+ return -ENOMEM;
+ }
+
+ err = wldev_iovar_getbuf_bsscfg(ndev, "dump",
+ "pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err) {
+ WL_ERR(("dump ioctl err = %d", err));
+ } else {
+ WL_ERR(("PM status : %s\n", pbuf));
+ }
+
+ if (pbuf) {
+ kfree(pbuf);
+ }
+ return err;
+}
+#endif /* CUSTOM_EVENT_PM_WAKE */
+
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
wl_assoc_info_t assoc_info;
struct wl_connect_info *conn_info = wl_to_conn(cfg);
s32 err = 0;
+#ifdef QOS_MAP_SET
+ bcm_tlv_t * qos_map_ie = NULL;
+#endif /* QOS_MAP_SET */
WL_DBG(("Enter \n"));
err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, cfg->extra_buf,
return err;
}
conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp);
- if (conn_info->resp_ie_len <= MAX_REQ_LINE)
+ if (conn_info->resp_ie_len <= MAX_REQ_LINE) {
memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
- else {
+ } else {
WL_ERR(("IE size %d above max %d size \n",
conn_info->resp_ie_len, MAX_REQ_LINE));
return err;
}
+
+#ifdef QOS_MAP_SET
+ /* find qos map set ie */
+ if ((qos_map_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+ DOT11_MNG_QOS_MAP_ID)) != NULL) {
+ WL_DBG((" QoS map set IE found in assoc response\n"));
+ if (!cfg->up_table) {
+ cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL);
+ }
+ wl_set_up_table(cfg->up_table, qos_map_ie);
+ } else {
+ kfree(cfg->up_table);
+ cfg->up_table = NULL;
+ }
+#endif /* QOS_MAP_SET */
} else {
conn_info->resp_ie_len = 0;
}
return err;
}
-static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
+static s32 wl_ch_to_chanspec(struct net_device *dev, int ch, struct wl_join_params *join_params,
size_t *join_params_size)
{
- chanspec_t chanspec = 0;
+ struct bcm_cfg80211 *cfg;
+ s32 bssidx = -1;
+ chanspec_t chanspec = 0, chspec;
+
if (ch != 0) {
- join_params->params.chanspec_num = 1;
- join_params->params.chanspec_list[0] = ch;
+ cfg = (struct bcm_cfg80211 *)wiphy_priv(dev->ieee80211_ptr->wiphy);
+ if (cfg && cfg->rcc_enabled) {
+ } else {
+ join_params->params.chanspec_num = 1;
+ join_params->params.chanspec_list[0] = ch;
- if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
+ if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ /* Get the min_bw set for the interface */
+ chspec = wl_cfg80211_ulb_get_min_bw_chspec(dev->ieee80211_ptr, bssidx);
+ if (chspec == INVCHANSPEC) {
+ WL_ERR(("Invalid chanspec \n"));
+ return -EINVAL;
+ }
+ chanspec |= chspec;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+ *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+ join_params->params.chanspec_num * sizeof(chanspec_t);
- *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
- join_params->params.chanspec_num * sizeof(chanspec_t);
+ join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ join_params->params.chanspec_list[0] |= chanspec;
+ join_params->params.chanspec_list[0] =
+ wl_chspec_host_to_driver(join_params->params.chanspec_list[0]);
- join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
- join_params->params.chanspec_list[0] |= chanspec;
- join_params->params.chanspec_list[0] =
- wl_chspec_host_to_driver(join_params->params.chanspec_list[0]);
+ join_params->params.chanspec_num =
+ htod32(join_params->params.chanspec_num);
+ }
- join_params->params.chanspec_num =
- htod32(join_params->params.chanspec_num);
WL_DBG(("join_params->params.chanspec_list[0]= %X, %d channels\n",
join_params->params.chanspec_list[0],
join_params->params.chanspec_num));
}
+ return 0;
}
static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam)
{
- struct cfg80211_bss *bss;
struct wl_bss_info *bi;
struct wlc_ssid *ssid;
struct bcm_tlv *tim;
s32 err = 0;
struct wiphy *wiphy;
u32 channel;
- struct ieee80211_channel *cur_channel;
- u32 freq, band;
wiphy = bcmcfg_to_wiphy(cfg);
bi = (struct wl_bss_info *)(cfg->extra_buf + 4);
channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
wl_update_prof(cfg, ndev, NULL, &channel, WL_PROF_CHAN);
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
- freq = ieee80211_channel_to_frequency(channel);
-#else
- band = (channel <= CH_MAX_2G_CHANNEL) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- freq = ieee80211_channel_to_frequency(channel, band);
-#endif
- cur_channel = ieee80211_get_channel(wiphy, freq);
-
- bss = cfg80211_get_bss(wiphy, cur_channel, curbssid,
- ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
- WLAN_CAPABILITY_ESS);
-
- if (!bss) {
- WL_DBG(("Could not find the AP\n"));
- if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
- WL_ERR(("Bssid doesn't match\n"));
- err = -EIO;
- goto update_bss_info_out;
- }
- err = wl_inform_single_bss(cfg, bi, roam);
- if (unlikely(err))
- goto update_bss_info_out;
- ie = ((u8 *)bi) + bi->ie_offset;
- ie_len = bi->ie_length;
- beacon_interval = cpu_to_le16(bi->beacon_period);
- } else {
- WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
-#if defined(WL_CFG80211_P2P_DEV_IF)
- ie = (u8 *)bss->ies->data;
- ie_len = bss->ies->len;
-#else
- ie = bss->information_elements;
- ie_len = bss->len_information_elements;
-#endif /* WL_CFG80211_P2P_DEV_IF */
- beacon_interval = bss->beacon_interval;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
- cfg80211_put_bss(wiphy, bss);
-#else
- cfg80211_put_bss(bss);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+ if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
+ WL_ERR(("Bssid doesn't match\n"));
+ err = -EIO;
+ goto update_bss_info_out;
}
+ err = wl_inform_single_bss(cfg, bi, roam);
+ if (unlikely(err))
+ goto update_bss_info_out;
+ ie = ((u8 *)bi) + bi->ie_offset;
+ ie_len = bi->ie_length;
+ beacon_interval = cpu_to_le16(bi->beacon_period);
tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
if (tim) {
dtim_period = tim->data[1];
struct wl_connect_info *conn_info = wl_to_conn(cfg);
s32 err = 0;
u8 *curbssid;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
struct ieee80211_supported_band *band;
struct ieee80211_channel *notify_channel = NULL;
u32 *channel;
u32 freq;
-#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
+#endif
-#ifdef WLFBT
- uint32 data_len = 0;
- if (data)
- data_len = ntoh32(e->datalen);
-#endif /* WLFBT */
+
+ if (memcmp(&cfg->last_roamed_addr, &e->addr, ETHER_ADDR_LEN) == 0) {
+ WL_INFORM(("BSSID already updated\n"));
+ return err;
+ }
+
+ /* Skip calling cfg80211_roamed If current bssid and
+ * roamed bssid are same. Also clear timer roam_timeout.
+ */
+ curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0) {
+ WL_ERR(("BSS already present, Skipping roamed event to upper layer\n"));
+#ifdef DHD_LOSSLESS_ROAMING
+ wl_del_roam_timeout(cfg);
+#endif /* DHD_LOSSLESS_ROAMING */
+ return err;
+ }
wl_get_assoc_ies(cfg, ndev);
- wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet), WL_PROF_BSSID);
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
wl_update_bss_info(cfg, ndev, true);
wl_update_pmklist(ndev, cfg->pmk_list, err);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
/* channel info for cfg80211_roamed introduced in 2.6.39-rc1 */
channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
if (*channel <= CH_MAX_2G_CHANNEL)
band = wiphy->bands[IEEE80211_BAND_5GHZ];
freq = ieee80211_channel_to_frequency(*channel, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
-#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
-#ifdef WLFBT
- /* back up the given FBT key for the further supplicant request,
- * currently not checking the FBT is enabled for current BSS in DHD,
- * because the supplicant decides to take it or not.
- */
- if (data && (data_len == FBT_KEYLEN)) {
- memcpy(cfg->fbt_key, data, FBT_KEYLEN);
- }
-#endif /* WLFBT */
+#endif
printf("wl_bss_roaming_done succeeded to " MACDBG "\n",
- MAC2STRDBG((u8*)(&e->addr)));
- dhd_conf_set_wme((dhd_pub_t *)cfg->pub);
+ MAC2STRDBG((const u8*)(&e->addr)));
+ dhd_conf_set_wme(cfg->pub);
cfg80211_roamed(ndev,
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
notify_channel,
#endif
curbssid,
conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
WL_DBG(("Report roaming result\n"));
+ memcpy(&cfg->last_roamed_addr, (void *)&e->addr, ETHER_ADDR_LEN);
wl_set_drv_status(cfg, CONNECTED, ndev);
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ cfg->roam_count++;
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
return err;
}
wl_clr_drv_status(cfg, CONNECTING, ndev);
if (completed) {
wl_get_assoc_ies(cfg, ndev);
- wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet),
+ WL_PROF_BSSID);
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
wl_update_bss_info(cfg, ndev, false);
wl_update_pmklist(ndev, cfg->pmk_list, err);
if (wl_get_chan_isvht80(ndev, dhd)) {
if (ndev == bcmcfg_to_prmry_ndev(cfg))
dhd->chan_isvht80 |= DHD_FLAG_STA_MODE; /* STA mode */
- else if (ndev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION))
+ else if (is_p2p_group_iface(ndev->ieee80211_ptr))
dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; /* p2p mode */
dhd_set_cpucore(dhd, TRUE);
}
GFP_KERNEL);
if (completed) {
WL_INFORM(("Report connect result - connection succeeded\n"));
- dhd_conf_set_wme((dhd_pub_t *)cfg->pub);
+ dhd_conf_set_wme(cfg->pub);
} else
WL_ERR(("Report connect result - connection failed\n"));
}
else
key_type = NL80211_KEYTYPE_PAIRWISE;
- cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
+ cfg80211_michael_mic_failure(ndev, (const u8 *)&e->addr, key_type, -1,
NULL, GFP_KERNEL);
mutex_unlock(&cfg->usr_sync);
#ifndef WL_SCHED_SCAN
mutex_lock(&cfg->usr_sync);
/* TODO: Use cfg80211_sched_scan_results(wiphy); */
- cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL);
+ CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
mutex_unlock(&cfg->usr_sync);
#else
/* If cfg80211 scheduled scan is supported, report the pno results via sched
}
#endif /* PNO_SUPPORT */
+#ifdef GSCAN_SUPPORT
+static s32
+wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ void *ptr;
+ int send_evt_bytes = 0;
+ int batch_event_result_dummy = 0;
+ struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ u32 len = ntoh32(e->datalen);
+
+ switch (event) {
+ case WLC_E_PFN_SWC:
+ ptr = dhd_dev_swc_scan_event(ndev, data, &send_evt_bytes);
+ if (send_evt_bytes) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_GSCAN_SIGNIFICANT_EVENT, ptr, send_evt_bytes);
+ kfree(ptr);
+ }
+ break;
+ case WLC_E_PFN_BEST_BATCHING:
+ err = dhd_dev_retrieve_batch_scan(ndev);
+ if (err < 0) {
+ WL_ERR(("Batch retrieval already in progress %d\n", err));
+ } else {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_GSCAN_BATCH_SCAN_EVENT,
+ &batch_event_result_dummy, sizeof(int));
+ }
+ break;
+ case WLC_E_PFN_SCAN_COMPLETE:
+ batch_event_result_dummy = WIFI_SCAN_COMPLETE;
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_COMPLETE_EVENT,
+ &batch_event_result_dummy, sizeof(int));
+ break;
+ case WLC_E_PFN_BSSID_NET_FOUND:
+ ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+ HOTLIST_FOUND);
+ if (ptr) {
+ wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+ ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT);
+ dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND);
+ }
+ break;
+ case WLC_E_PFN_BSSID_NET_LOST:
+ /* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE
+ * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore
+ */
+ if (len) {
+ ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+ HOTLIST_LOST);
+ if (ptr) {
+ wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+ ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT);
+ dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST);
+ }
+ }
+ break;
+ case WLC_E_PFN_GSCAN_FULL_RESULT:
+ ptr = dhd_dev_process_full_gscan_result(ndev, data, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes);
+ kfree(ptr);
+ }
+ break;
+ default:
+ WL_ERR(("%s: Unexpected event! - %d\n", __FUNCTION__, event));
+
+ }
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
+
static s32
wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
#endif /* WL_CFG80211_SYNC_GON */
}
+#if defined(WLTDLS)
+bool wl_cfg80211_is_tdls_tunneled_frame(void *frame, u32 frame_len)
+{
+ unsigned char *data;
+
+ if (frame == NULL) {
+ WL_ERR(("Invalid frame \n"));
+ return false;
+ }
+
+ if (frame_len < 5) {
+ WL_ERR(("Invalid frame length [%d] \n", frame_len));
+ return false;
+ }
+
+ data = frame;
+
+ if (!memcmp(data, TDLS_TUNNELED_PRB_REQ, 5) ||
+ !memcmp(data, TDLS_TUNNELED_PRB_RESP, 5)) {
+ WL_DBG(("TDLS Vendor Specific Received type\n"));
+ return true;
+ }
+
+ return false;
+}
+#endif /* WLTDLS */
+
int wl_cfg80211_get_ioctl_version(void)
{
wifi_p2p_pub_act_frame_t *act_frm = NULL;
wifi_p2p_action_frame_t *p2p_act_frm = NULL;
wifi_p2psd_gas_pub_act_frame_t *sd_act_frm = NULL;
+#if defined(WLTDLS) && defined(TDLS_MSG_ONLY_WFD)
+ dhd_pub_t *dhdp;
+#endif /* WLTDLS && TDLS_MSG_ONLY_WFD */
wl_event_rx_frame_data_t *rxframe =
(wl_event_rx_frame_data_t*)data;
u32 event = ntoh32(e->event_type);
u8 bsscfgidx = e->bsscfgidx;
u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK));
- bool retval;
memset(&bssid, 0, ETHER_ADDR_LEN);
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-#ifdef P2PONEINT
- WL_DBG((" device name is ndev %s \n", ndev->name));
-#endif
if (channel <= CH_MAX_2G_CHANNEL)
band = wiphy->bands[IEEE80211_BAND_2GHZ];
WL_ERR(("No valid band\n"));
return -EINVAL;
}
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
freq = ieee80211_channel_to_frequency(channel);
(void)band->band;
#else
(void) p2p_act_frm;
} else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
-#ifdef WL_SDO
- if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
- WL_ERR(("SD offload is in progress. Don't report the"
- "frame via rx_mgmt path\n"));
- goto exit;
- }
-#endif
sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)
(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
}
(void) sd_act_frm;
#ifdef WLTDLS
- } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) {
- WL_DBG((" TDLS Action Frame Received type = %d \n",
- mgmt_frame[DOT11_MGMT_HDR_LEN + 1]));
-
+ } else if ((mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) ||
+ (wl_cfg80211_is_tdls_tunneled_frame(
+ &mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN))) {
+ if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_AF_CATEGORY) {
+ WL_ERR((" TDLS Action Frame Received type = %d \n",
+ mgmt_frame[DOT11_MGMT_HDR_LEN + 1]));
+ }
+#ifdef TDLS_MSG_ONLY_WFD
+ dhdp = (dhd_pub_t *)(cfg->pub);
+ if (!dhdp->tdls_mode) {
+ WL_DBG((" TDLS Frame filtered \n"));
+ return 0;
+ }
+#else
if (mgmt_frame[DOT11_MGMT_HDR_LEN + 1] == TDLS_ACTION_SETUP_RESP) {
cfg->tdls_mgmt_frame = mgmt_frame;
cfg->tdls_mgmt_frame_len = mgmt_frame_len;
cfg->tdls_mgmt_freq = freq;
return 0;
}
-
- } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == TDLS_VENDOR_SPECIFIC) {
- WL_DBG((" TDLS Vendor Specific Received type \n"));
-#endif
+#endif /* TDLS_MSG_ONLY_WFD */
+#endif /* WLTDLS */
+#ifdef QOS_MAP_SET
+ } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == DOT11_ACTION_CAT_QOS) {
+ /* update QoS map set table */
+ bcm_tlv_t * qos_map_ie = NULL;
+ if ((qos_map_ie = bcm_parse_tlvs(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN,
+ DOT11_MNG_QOS_MAP_ID)) != NULL) {
+ WL_DBG((" QoS map set IE found in QoS action frame\n"));
+ if (!cfg->up_table) {
+ cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL);
+ }
+ wl_set_up_table(cfg->up_table, qos_map_ie);
+ } else {
+ kfree(cfg->up_table);
+ cfg->up_table = NULL;
+ }
+#endif /* QOS_MAP_SET */
} else {
+ /*
+ * if we got normal action frame and ndev is p2p0,
+ * we have to change ndev from p2p0 to wlan0
+ */
+
if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
u8 action = 0;
* GO-NEG Phase
*/
if (cfg->p2p &&
+#if defined(P2P_IE_MISSING_FIX)
+ cfg->p2p_prb_noti &&
+#endif
wl_get_p2p_status(cfg, GO_NEG_PHASE)) {
WL_DBG(("Filtering P2P probe_req while "
"being in GO-Neg state\n"));
}
}
-#ifdef P2PONEINT
- if (ndev == cfg->p2p_net && ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
- ndev = bcmcfg_to_prmry_ndev(cfg);
- cfgdev = ndev_to_cfgdev(ndev);
- }
- WL_DBG((" device name is ndev %s \n", ndev->name));
-#endif
+ if (discover_cfgdev(cfgdev, cfg))
+ WL_DBG(("Rx Managment frame For P2P Discovery Interface \n"));
+ else
+ WL_DBG(("Rx Managment frame For Iface (%s) \n", ndev->name));
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- retval = cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
- retval = cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC);
+ cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0);
+#elif(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+ cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
defined(WL_COMPAT_WIRELESS)
- retval = cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+ cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
#else
- retval = cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */
+ cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3, 14, 0) */
- WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d) retval (%d)\n",
- mgmt_frame_len, ntoh32(e->datalen), channel, freq, retval));
+ WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n",
+ mgmt_frame_len, ntoh32(e->datalen), channel, freq));
exit:
if (isfree)
kfree(mgmt_frame);
#ifdef PNO_SUPPORT
cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status;
#endif /* PNO_SUPPORT */
-#ifdef WL_SDO
- cfg->evt_handler[WLC_E_SERVICE_FOUND] = wl_svc_resp_handler;
- cfg->evt_handler[WLC_E_P2PO_ADD_DEVICE] = wl_notify_device_discovery;
- cfg->evt_handler[WLC_E_P2PO_DEL_DEVICE] = wl_notify_device_discovery;
-#endif
+#ifdef GSCAN_SUPPORT
+ cfg->evt_handler[WLC_E_PFN_BEST_BATCHING] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_PFN_SCAN_COMPLETE] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_PFN_GSCAN_FULL_RESULT] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_PFN_SWC] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_PFN_BSSID_NET_FOUND] = wl_notify_gscan_event;
+ cfg->evt_handler[WLC_E_PFN_BSSID_NET_LOST] = wl_notify_gscan_event;
+#endif /* GSCAN_SUPPORT */
#ifdef WLTDLS
cfg->evt_handler[WLC_E_TDLS_PEER_EVENT] = wl_tdls_event_handler;
#endif /* WLTDLS */
cfg->evt_handler[WLC_E_BSSID] = wl_notify_roaming_status;
-#ifdef WLAIBSS
- cfg->evt_handler[WLC_E_AIBSS_TXFAIL] = wl_notify_aibss_txfail;
-#endif /* WLAIBSS */
+#ifdef WL_RELMCAST
+ cfg->evt_handler[WLC_E_RMC_EVENT] = wl_notify_rmc_status;
+#endif
#ifdef BT_WIFI_HANDOVER
cfg->evt_handler[WLC_E_BT_WIFI_HANDOVER_REQ] = wl_notify_bt_wifi_handover_req;
#endif
cfg->evt_handler[WLC_E_NAN] = wl_cfgnan_notify_nan_status;
cfg->evt_handler[WLC_E_PROXD] = wl_cfgnan_notify_proxd_status;
#endif /* WL_NAN */
- cfg->evt_handler[WLC_E_RMC_EVENT] = wl_notify_rmc_status;
+ cfg->evt_handler[WLC_E_CSA_COMPLETE_IND] = wl_csa_complete_ind;
+#ifdef DHD_LOSSLESS_ROAMING
+ cfg->evt_handler[WLC_E_ROAM_PREP] = wl_notify_roam_prep_status;
+#endif
+ cfg->evt_handler[WLC_E_AP_STARTED] = wl_ap_start_ind;
+#ifdef CUSTOM_EVENT_PM_WAKE
+ cfg->evt_handler[WLC_E_EXCESS_PM_WAKE_EVENT] = wl_check_pmstatus;
+#endif /* CUSTOM_EVENT_PM_WAKE */
+ cfg->evt_handler[WLC_E_PSK_SUP] = wl_notify_idsup_status;
}
#if defined(STATIC_WL_PRIV_STRUCT)
static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg)
{
WL_DBG(("Enter \n"));
+
cfg->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
if (unlikely(!cfg->scan_results)) {
WL_ERR(("Scan results alloc failed\n"));
WL_ERR(("pmk list alloc failed\n"));
goto init_priv_mem_out;
}
- cfg->sta_info = (void *)kzalloc(sizeof(*cfg->sta_info), GFP_KERNEL);
- if (unlikely(!cfg->sta_info)) {
- WL_ERR(("sta info alloc failed\n"));
- goto init_priv_mem_out;
- }
-
#if defined(STATIC_WL_PRIV_STRUCT)
cfg->conn_info = (void *)kzalloc(sizeof(*cfg->conn_info), GFP_KERNEL);
if (unlikely(!cfg->conn_info)) {
INIT_WORK(&cfg->afx_hdl->work, wl_cfg80211_afx_handler);
}
+#ifdef WLTDLS
+ if (cfg->tdls_mgmt_frame) {
+ kfree(cfg->tdls_mgmt_frame);
+ cfg->tdls_mgmt_frame = NULL;
+ }
+#endif /* WLTDLS */
return 0;
init_priv_mem_out:
cfg->extra_buf = NULL;
kfree(cfg->pmk_list);
cfg->pmk_list = NULL;
- kfree(cfg->sta_info);
- cfg->sta_info = NULL;
#if defined(STATIC_WL_PRIV_STRUCT)
kfree(cfg->conn_info);
cfg->conn_info = NULL;
cfg->afx_hdl = NULL;
}
- if (cfg->ap_info) {
- kfree(cfg->ap_info->wpa_ie);
- kfree(cfg->ap_info->rsn_ie);
- kfree(cfg->ap_info->wps_ie);
- kfree(cfg->ap_info);
- cfg->ap_info = NULL;
- }
-#ifdef WLTDLS
- if (cfg->tdls_mgmt_frame) {
- kfree(cfg->tdls_mgmt_frame);
- cfg->tdls_mgmt_frame = NULL;
- }
-#endif /* WLTDLS */
}
static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg)
PROC_STOP(&cfg->event_tsk);
}
+void wl_terminate_event_handler(void)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+ if (cfg) {
+ wl_destroy_event_handler(cfg);
+ wl_flush_eq(cfg);
+ }
+}
+
static void wl_scan_timeout(unsigned long data)
{
wl_event_msg_t msg;
struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
-// struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ struct wireless_dev *wdev = NULL;
+ struct net_device *ndev = NULL;
+ struct wl_scan_results *bss_list;
+ struct wl_bss_info *bi = NULL;
+ s32 i;
+ u32 channel;
+#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ uint32 prev_memdump_mode = dhdp->memdump_enabled;
+#endif /* DHD_DEBUG && BCMPCIE */
if (!(cfg->scan_request)) {
WL_ERR(("timer expired but no scan request\n"));
return;
}
- bzero(&msg, sizeof(wl_event_msg_t));
+
+ bss_list = wl_escan_get_buf(cfg, FALSE);
+ if (!bss_list) {
+ WL_ERR(("bss_list is null. Didn't receive any partial scan results\n"));
+ } else {
+ WL_ERR(("scanned AP count (%d)\n", bss_list->count));
+
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+ WL_ERR(("SSID :%s Channel :%d\n", bi->SSID, channel));
+ }
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+ if (cfg->scan_request->dev)
+ wdev = cfg->scan_request->dev->ieee80211_ptr;
+#else
+ wdev = cfg->scan_request->wdev;
+#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */
+ if (!wdev) {
+ WL_ERR(("No wireless_dev present\n"));
+ return;
+ }
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+
+ bzero(&msg, sizeof(wl_event_msg_t));
WL_ERR(("timer expired\n"));
+#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_enabled = DUMP_MEMFILE;
+ dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT;
+ dhd_bus_mem_dump(dhdp);
+ dhdp->memdump_enabled = prev_memdump_mode;
+ }
+#endif /* DHD_DEBUG && BCMPCIE */
msg.event_type = hton32(WLC_E_ESCAN_RESULT);
msg.status = hton32(WLC_E_STATUS_TIMEOUT);
msg.reason = 0xFFFFFFFF;
- wl_cfg80211_event(bcmcfg_to_prmry_ndev(cfg), &msg, NULL);
+ wl_cfg80211_event(ndev, &msg, NULL);
+#ifdef CUSTOMER_HW4_DEBUG
+ if (!wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_set();
+#endif /* CUSTOMER_HW4_DEBUG */
// terence 20130729: workaround to fix out of memory in firmware
// if (dhd_conf_get_chip(dhd_get_pub(dev)) == BCM43362_CHIP_ID) {
// }
}
+#ifdef DHD_LOSSLESS_ROAMING
+static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ /* restore prec_map to ALLPRIO */
+ dhdp->dequeue_prec_map = ALLPRIO;
+ if (timer_pending(&cfg->roam_timeout)) {
+ del_timer_sync(&cfg->roam_timeout);
+ }
+
+}
+
+static void wl_roam_timeout(unsigned long data)
+{
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+
+ WL_ERR(("roam timer expired\n"));
+
+ /* restore prec_map to ALLPRIO */
+ dhdp->dequeue_prec_map = ALLPRIO;
+}
+
+#endif /* DHD_LOSSLESS_ROAMING */
+
static s32
wl_cfg80211_netdev_notifier_call(struct notifier_block * nb,
- unsigned long state,
- void *ptr)
+ unsigned long state, void *ptr)
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0))
struct net_device *dev = ptr;
#else
- // terence 20150701: fix for p2p connection issue
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-#endif
- struct wireless_dev *wdev = dev->ieee80211_ptr;
+#endif /* LINUX_VERSION < VERSION(3, 11, 0) */
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
struct bcm_cfg80211 *cfg = g_bcm_cfg;
- WL_DBG(("Enter \n"));
+#ifdef DHD_IFDEBUG
+ WL_ERR(("Enter \n"));
+#endif
if (!wdev || !cfg || dev == bcmcfg_to_prmry_ndev(cfg))
return NOTIFY_DONE;
int max_wait_count = 100;
int refcnt = 0;
unsigned long limit = jiffies + max_wait_timeout * HZ;
+#ifdef DHD_IFDEBUG
+ WL_ERR(("NETDEV_DOWN(+) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev));
+#endif
while (work_pending(&wdev->cleanup_work)) {
if (refcnt%5 == 0) {
WL_ERR(("[NETDEV_DOWN] wait for "
set_current_state(TASK_RUNNING);
refcnt++;
}
-#endif /* LINUX_VERSION < VERSION(3, 14, 0) */
+#ifdef DHD_IFDEBUG
+ WL_ERR(("NETDEV_DOWN(-) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev));
+#endif
+#endif /* LINUX_VERSION < VERSION(3, 14, 0) */
break;
}
-
case NETDEV_UNREGISTER:
+#ifdef DHD_IFDEBUG
+ WL_ERR(("NETDEV_UNREGISTER(+) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev));
+#endif
/* after calling list_del_rcu(&wdev->list) */
- wl_dealloc_netinfo(cfg, dev);
+ wl_cfg80211_clear_per_bss_ies(cfg,
+ wl_get_bssidx_by_wdev(cfg, wdev));
+ wl_dealloc_netinfo_by_wdev(cfg, wdev);
+#ifdef DHD_IFDEBUG
+ WL_ERR(("NETDEV_UNREGISTER(-) wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev));
+#endif
break;
case NETDEV_GOING_DOWN:
- /* At NETDEV_DOWN state, wdev_cleanup_work work will be called.
- * In front of door, the function checks
- * whether current scan is working or not.
- * If the scanning is still working, wdev_cleanup_work call WARN_ON and
- * make the scan done forcibly.
- */
+ /*
+ * At NETDEV_DOWN state, wdev_cleanup_work work will be called.
+ * In front of door, the function checks whether current scan
+ * is working or not. If the scanning is still working,
+ * wdev_cleanup_work call WARN_ON and make the scan done forcibly.
+ */
+#ifdef DHD_IFDEBUG
+ WL_ERR(("NETDEV_GOING_DOWN wdev=%p, cfg=%p, dev=%p\n", wdev, cfg, dev));
+#endif
if (wl_get_drv_status(cfg, SCANNING, dev))
wl_notify_escan_complete(cfg, dev, true, true);
break;
}
return NOTIFY_DONE;
}
+
static struct notifier_block wl_cfg80211_netdev_notifier = {
.notifier_call = wl_cfg80211_netdev_notifier_call,
};
-/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+
+/*
+ * to make sure we won't register the same notifier twice, otherwise a loop is likely to be
* created in kernel notifier link list (with 'next' pointing to itself)
*/
static bool wl_cfg80211_netdev_notifier_registered = FALSE;
-void
-#ifdef P2PONEINT
-wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
+static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg)
+{
+ struct wireless_dev *wdev = NULL;
+ struct net_device *ndev = NULL;
+
+ if (!cfg->scan_request)
+ return;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+ if (cfg->scan_request->dev)
+ wdev = cfg->scan_request->dev->ieee80211_ptr;
#else
-wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
-#endif
+ wdev = cfg->scan_request->wdev;
+#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */
+
+ if (!wdev) {
+ WL_ERR(("No wireless_dev present\n"));
+ return;
+ }
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+ wl_notify_escan_complete(cfg, ndev, true, true);
+ WL_ERR(("Scan aborted! \n"));
+}
+
+static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
{
wl_scan_params_t *params = NULL;
s32 params_size = 0;
kfree(params);
}
}
+#ifdef WLTDLS
+ if (cfg->tdls_mgmt_frame) {
+ kfree(cfg->tdls_mgmt_frame);
+ cfg->tdls_mgmt_frame = NULL;
+ }
+#endif /* WLTDLS */
}
static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
struct net_device *dev;
WL_DBG(("Enter \n"));
+
+ mutex_lock(&cfg->scan_complete);
+
if (!ndev) {
WL_ERR(("ndev is null\n"));
err = BCME_ERROR;
- return err;
+ goto out;
}
if (cfg->escan_info.ndev != ndev) {
WL_ERR(("ndev is different %p %p\n", cfg->escan_info.ndev, ndev));
err = BCME_ERROR;
- return err;
+ goto out;
}
if (cfg->scan_request) {
#if defined(WL_ENABLE_P2P_IF)
if (cfg->scan_request->dev != cfg->p2p_net)
dev = cfg->scan_request->dev;
+#elif defined(WL_CFG80211_P2P_DEV_IF)
+ if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE) {
+#ifdef DHD_IFDEBUG
+ WL_ERR(("%s: dev: %p\n", __FUNCTION__, cfg->scan_request->wdev->netdev));
+#endif
+ dev = cfg->scan_request->wdev->netdev;
+ }
#endif /* WL_ENABLE_P2P_IF */
}
else {
if (likely(cfg->scan_request)) {
cfg80211_scan_done(cfg->scan_request, aborted);
cfg->scan_request = NULL;
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
}
if (p2p_is_on(cfg))
wl_clr_p2p_status(cfg, SCANNING);
wl_clr_drv_status(cfg, SCANNING, dev);
spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
-#ifdef WL_SDO
- if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS) && !in_atomic()) {
- wl_cfg80211_resume_sdo(ndev, cfg);
- }
-#endif
+out:
+ mutex_unlock(&cfg->scan_complete);
return err;
}
WL_ERR(("No valid band\n"));
goto exit;
}
- if (!dhd_conf_match_channel((dhd_pub_t *)cfg->pub, channel))
+ if (!dhd_conf_match_channel(cfg->pub, channel))
goto exit;
/* ----- terence 20130524: skip invalid bss */
list = wl_escan_get_buf(cfg, FALSE);
if (scan_req_match(cfg)) {
-#ifdef WL_HOST_BAND_MGMT
- s32 channel = 0;
- s32 channel_band = 0;
- chanspec_t chspec;
-#endif /* WL_HOST_BAND_MGMT */
/* p2p scan && allow only probe response */
if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
" response/beacon\n"));
goto exit;
}
-#ifdef WL_HOST_BAND_MGMT
- chspec = wl_chspec_driver_to_host(bi->chanspec);
- channel = wf_chspec_ctlchan(chspec);
- channel_band = CHSPEC2WLC_BAND(chspec);
-
- if ((cfg->curr_band == WLC_BAND_5G) &&
- (channel_band == WLC_BAND_2G)) {
- /* Avoid sending the GO results in band conflict */
- if (wl_cfgp2p_retreive_p2pattrib(p2p_ie,
- P2P_SEID_GROUP_ID) != NULL)
- goto exit;
- }
-#endif /* WL_HOST_BAND_MGMT */
}
#ifdef ESCAN_BUF_OVERFLOW_MGMT
if (bi_length > ESCAN_BUF_SIZE - list->buflen)
goto exit;
#endif /* ESCAN_BUF_OVERFLOW_MGMT */
}
- if (strlen(bi->SSID) == 0) { // terence: fix for hidden SSID
- WL_SCAN(("Skip hidden SSID %pM\n", &bi->BSSID));
- goto exit;
- }
memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
list->version = dtoh32(bi->version);
list->buflen += bi_length;
list->count++;
+ /*
+ * !Broadcast && number of ssid = 1 && number of channels =1
+ * means specific scan to association
+ */
+ if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
+ WL_ERR(("P2P assoc scan fast aborted.\n"));
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false, true);
+ goto exit;
+ }
}
-
}
else if (status == WLC_E_STATUS_SUCCESS) {
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-#if defined(P2P_DISCOVERY_WAR)
- if (cfg->p2p_net && cfg->scan_request &&
- cfg->scan_request->dev == cfg->p2p_net &&
- !cfg->p2p->vif_created) {
- if (wldev_iovar_setint(wl_to_prmry_ndev(cfg), "mpc", 1) < 0) {
- WL_ERR(("mpc enabling back failed\n"));
- }
- }
-#endif /* defined(P2P_DISCOVERY_WAR) */
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
escan_result->sync_id);
wl_notify_escan_complete(cfg, ndev, false, false);
}
wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
- }
- else if (status == WLC_E_STATUS_ABORT) {
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-#if defined(P2P_DISCOVERY_WAR)
- if (cfg->p2p_net && cfg->scan_request &&
- cfg->scan_request->dev == cfg->p2p_net &&
- !cfg->p2p->vif_created) {
- if (wldev_iovar_setint(wl_to_prmry_ndev(cfg), "mpc", 1) < 0) {
- WL_ERR(("mpc enabling back failed\n"));
- }
- }
-#endif /* defined(P2P_DISCOVERY_WAR) */
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+#ifdef CUSTOMER_HW4_DEBUG
+ if (wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_clear();
+#endif /* CUSTOMER_HW4_DEBUG */
+ } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+ (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+ (status == WLC_E_STATUS_NEWASSOC)) {
+ /* Handle all cases of scan abort */
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
wl_escan_print_sync_id(status, escan_result->sync_id,
cfg->escan_info.cur_sync_id);
+ WL_DBG(("ESCAN ABORT reason: %d\n", status));
if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
WL_INFORM(("ACTION FRAME SCAN DONE\n"));
wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
WL_INFORM(("ESCAN ABORTED\n"));
cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
if (!scan_req_match(cfg)) {
- WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
+ WL_TRACE_HW4(("scan_req_match=0: scanned AP count=%d\n",
cfg->bss_list->count));
}
wl_inform_bss(cfg);
wl_notify_escan_complete(cfg, ndev, true, false);
+ } else {
+ /* If there is no pending host initiated scan, do nothing */
+ WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n"));
}
wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
- } else if (status == WLC_E_STATUS_NEWSCAN) {
- WL_ERR(("WLC_E_STATUS_NEWSCAN : scan_request[%p]\n", cfg->scan_request));
- WL_ERR(("sync_id[%d], bss_count[%d]\n", escan_result->sync_id,
- escan_result->bss_count));
} else if (status == WLC_E_STATUS_TIMEOUT) {
WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
WL_ERR(("reason[0x%x]\n", e->reason));
static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
{
u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
+ bool p2p_connected = wl_cfgp2p_vif_created(cfg);
struct net_info *iter, *next;
- int err;
if (!cfg->roamoff_on_concurrent)
return;
- if (enable && connected_cnt > 1) {
+ if (enable && (p2p_connected||(connected_cnt > 1))) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
for_each_ndev(cfg, iter, next) {
- /* Save the current roam setting */
- if ((err = wldev_iovar_getint(iter->ndev, "roam_off",
- (s32 *)&iter->roam_off)) != BCME_OK) {
- WL_ERR(("%s:Failed to get current roam setting err %d\n",
- iter->ndev->name, err));
- continue;
- }
- if ((err = wldev_iovar_setint(iter->ndev, "roam_off", 1)) != BCME_OK) {
- WL_ERR((" %s:failed to set roam_off : %d\n",
- iter->ndev->name, err));
+ if (iter->ndev && iter->wdev &&
+ iter->wdev->iftype == NL80211_IFTYPE_STATION) {
+ if (wldev_iovar_setint(iter->ndev, "roam_off", TRUE)
+ == BCME_OK) {
+ iter->roam_off = TRUE;
+ }
+ else {
+ WL_ERR(("error to enable roam_off\n"));
+ }
}
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
}
else if (!enable) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
for_each_ndev(cfg, iter, next) {
- if (iter->roam_off != WL_INVALID) {
- if ((err = wldev_iovar_setint(iter->ndev, "roam_off",
- iter->roam_off)) == BCME_OK)
- iter->roam_off = WL_INVALID;
- else {
- WL_ERR((" %s:failed to set roam_off : %d\n",
- iter->ndev->name, err));
+ if (iter->ndev && iter->wdev &&
+ iter->wdev->iftype == NL80211_IFTYPE_STATION) {
+ if (iter->roam_off != WL_INVALID) {
+ if (wldev_iovar_setint(iter->ndev, "roam_off", FALSE)
+ == BCME_OK) {
+ iter->roam_off = FALSE;
+ }
+ else {
+ WL_ERR(("error to disable roam_off\n"));
+ }
}
}
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
}
return;
}
if (connected_cnt <= 1) {
return;
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
for_each_ndev(cfg, iter, next) {
- chanspec = 0;
- ctl_chan = 0;
- if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
- if (wldev_iovar_getint(iter->ndev, "chanspec",
- (s32 *)&chanspec) == BCME_OK) {
- chanspec = wl_chspec_driver_to_host(chanspec);
- ctl_chan = wf_chspec_ctlchan(chanspec);
- wl_update_prof(cfg, iter->ndev, NULL,
- &ctl_chan, WL_PROF_CHAN);
- }
- if (!cfg->vsdb_mode) {
- if (!pre_ctl_chan && ctl_chan)
- pre_ctl_chan = ctl_chan;
- else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) {
- cfg->vsdb_mode = true;
+ /* p2p discovery iface ndev could be null */
+ if (iter->ndev) {
+ chanspec = 0;
+ ctl_chan = 0;
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ if (wldev_iovar_getint(iter->ndev, "chanspec",
+ (s32 *)&chanspec) == BCME_OK) {
+ chanspec = wl_chspec_driver_to_host(chanspec);
+ ctl_chan = wf_chspec_ctlchan(chanspec);
+ wl_update_prof(cfg, iter->ndev, NULL,
+ &ctl_chan, WL_PROF_CHAN);
+ }
+ if (!cfg->vsdb_mode) {
+ if (!pre_ctl_chan && ctl_chan)
+ pre_ctl_chan = ctl_chan;
+ else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) {
+ cfg->vsdb_mode = true;
+ }
}
}
}
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
printf("%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel");
return;
}
+#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
+extern int g_frameburst;
+#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
+
static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
enum wl_status state, bool set)
{
s32 err = BCME_OK;
u32 mode;
u32 chan = 0;
- struct net_info *iter, *next;
struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ WL_ERR(("%s : busstate is DHD_BUS_DOWN!\n", __FUNCTION__));
+ return 0;
+ }
WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
state, set, _net_info->pm_restore, _net_info->ndev->name));
mode = wl_get_mode_by_netdev(cfg, _net_info->ndev);
if (set) {
wl_cfg80211_concurrent_roam(cfg, 1);
-
+ wl_cfg80211_determine_vsdb_mode(cfg);
if (mode == WL_MODE_AP) {
-
if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false))
WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
}
- wl_cfg80211_determine_vsdb_mode(cfg);
- if (cfg->vsdb_mode || _net_info->pm_block) {
- /* Delete pm_enable_work */
- wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_MAINTAIN);
- /* save PM_FAST in _net_info to restore this
- * if _net_info->pm_block is false
- */
- if (!_net_info->pm_block && (mode == WL_MODE_BSS)) {
- _net_info->pm = PM_FAST;
- if (dhd_conf_get_pm(dhd) >= 0)
- _net_info->pm = dhd_conf_get_pm(dhd);
- _net_info->pm_restore = true;
- }
- pm = PM_OFF;
- for_each_ndev(cfg, iter, next) {
- if (iter->pm_restore)
- continue;
- /* Save the current power mode */
- err = wldev_ioctl(iter->ndev, WLC_GET_PM, &iter->pm,
- sizeof(iter->pm), false);
- WL_DBG(("%s:power save %s\n", iter->ndev->name,
- iter->pm ? "enabled" : "disabled"));
- if (!err && iter->pm) {
- iter->pm_restore = true;
- }
-
- }
- for_each_ndev(cfg, iter, next) {
- if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev))
- continue;
- if (pm != PM_OFF && dhd_conf_get_pm(dhd) >= 0)
- pm = dhd_conf_get_pm(dhd);
- if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm,
- sizeof(pm), true)) != 0) {
- if (err == -ENODEV)
- WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
- else
- WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
- wl_cfg80211_update_power_mode(iter->ndev);
- }
- }
- } else {
- /* add PM Enable timer to go to power save mode
- * if supplicant control pm mode, it will be cleared or
- * updated by wl_cfg80211_set_power_mgmt() if not - for static IP & HW4 P2P,
- * PM will be configured when timer expired
- */
-
- /*
- * before calling pm_enable_timer, we need to set PM -1 for all ndev
- */
- pm = PM_OFF;
- if (!_net_info->pm_block) {
- for_each_ndev(cfg, iter, next) {
- if (iter->pm_restore)
- continue;
- /* Save the current power mode */
- err = wldev_ioctl(iter->ndev, WLC_GET_PM, &iter->pm,
- sizeof(iter->pm), false);
- WL_DBG(("%s:power save %s\n", iter->ndev->name,
- iter->pm ? "enabled" : "disabled"));
- if (!err && iter->pm) {
- iter->pm_restore = true;
- }
- }
- }
- for_each_ndev(cfg, iter, next) {
- if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev))
- continue;
- if (pm != PM_OFF && dhd_conf_get_pm(dhd) >= 0)
- pm = dhd_conf_get_pm(dhd);
- if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm,
- sizeof(pm), true)) != 0) {
- if (err == -ENODEV)
- WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
- else
- WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
- }
- }
- if (cfg->pm_enable_work_on) {
- wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
- }
+ pm = PM_OFF;
+ if ((err = wldev_ioctl(_net_info->ndev, WLC_SET_PM, &pm,
+ sizeof(pm), true)) != 0) {
+ if (err == -ENODEV)
+ WL_DBG(("%s:netdev not ready\n",
+ _net_info->ndev->name));
+ else
+ WL_ERR(("%s:error (%d)\n",
+ _net_info->ndev->name, err));
- cfg->pm_enable_work_on = true;
- wl_add_remove_pm_enable_work(cfg, TRUE, WL_HANDLER_NOTUSE);
+ wl_cfg80211_update_power_mode(_net_info->ndev);
}
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_SHORT);
#if defined(WLTDLS)
-#if defined(DISABLE_TDLS_IN_P2P)
- if (cfg->vsdb_mode || p2p_is_on(cfg))
-#else
- if (cfg->vsdb_mode)
-#endif /* defined(DISABLE_TDLS_IN_P2P) */
- {
-
+ if (wl_cfg80211_is_concurrent_mode()) {
err = wldev_iovar_setint(primary_dev, "tdls_enable", 0);
}
#endif /* defined(WLTDLS) */
- }
- else { /* clear */
+
+#ifdef DISABLE_FRAMEBURST_VSDB
+#ifdef USE_WFA_CERT_CONF
+ if (g_frameburst)
+#endif /* USE_WFA_CERT_CONF */
+ {
+ if (wl_cfg80211_is_concurrent_mode()) {
+ int frameburst = 0;
+ if (wldev_ioctl(primary_dev, WLC_SET_FAKEFRAG, &frameburst,
+ sizeof(frameburst), true) != 0) {
+ WL_DBG(("frameburst set error\n"));
+ }
+ WL_DBG(("Frameburst Disabled\n"));
+ }
+ }
+#endif /* DISABLE_FRAMEBURST_VSDB */
+ } else { /* clear */
chan = 0;
/* clear chan information when the net device is disconnected */
wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN);
wl_cfg80211_determine_vsdb_mode(cfg);
- for_each_ndev(cfg, iter, next) {
- if (iter->pm_restore && iter->pm) {
- WL_DBG(("%s:restoring power save %s\n",
- iter->ndev->name, (iter->pm ? "enabled" : "disabled")));
- if (iter->pm != PM_OFF && dhd_conf_get_pm(dhd) >= 0)
- iter->pm = dhd_conf_get_pm(dhd);
- err = wldev_ioctl(iter->ndev,
- WLC_SET_PM, &iter->pm, sizeof(iter->pm), true);
- if (unlikely(err)) {
- if (err == -ENODEV)
- WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
- else
- WL_ERR(("%s:error(%d)\n", iter->ndev->name, err));
- break;
- }
- iter->pm_restore = 0;
- wl_cfg80211_update_power_mode(iter->ndev);
+ if (primary_dev == _net_info->ndev) {
+ pm = PM_FAST;
+ if ((err = wldev_ioctl(_net_info->ndev, WLC_SET_PM, &pm,
+ sizeof(pm), true)) != 0) {
+ if (err == -ENODEV)
+ WL_DBG(("%s:netdev not ready\n",
+ _net_info->ndev->name));
+ else
+ WL_ERR(("%s:error (%d)\n",
+ _net_info->ndev->name, err));
+
+ wl_cfg80211_update_power_mode(_net_info->ndev);
}
}
+
wl_cfg80211_concurrent_roam(cfg, 0);
#if defined(WLTDLS)
- if (!cfg->vsdb_mode) {
+ if (!wl_cfg80211_is_concurrent_mode()) {
err = wldev_iovar_setint(primary_dev, "tdls_enable", 1);
}
#endif /* defined(WLTDLS) */
+
+#ifdef DISABLE_FRAMEBURST_VSDB
+#ifdef USE_WFA_CERT_CONF
+ if (g_frameburst)
+#endif /* USE_WFA_CERT_CONF */
+ {
+ int frameburst = 1;
+ if (wldev_ioctl(primary_dev, WLC_SET_FAKEFRAG, &frameburst,
+ sizeof(frameburst), true) != 0) {
+ WL_DBG(("frameburst set error\n"));
+ }
+ WL_DBG(("Frameburst Enabled\n"));
+ }
+#endif /* DISABLE_FRAMEBURST_VSDB */
}
return err;
}
return err;
}
+#ifdef DHD_LOSSLESS_ROAMING
+static s32 wl_init_roam_timeout(struct bcm_cfg80211 *cfg)
+{
+ int err = 0;
+
+ /* Init roam timer */
+ init_timer(&cfg->roam_timeout);
+ cfg->roam_timeout.data = (unsigned long) cfg;
+ cfg->roam_timeout.function = wl_roam_timeout;
+
+ return err;
+}
+#endif /* DHD_LOSSLESS_ROAMING */
+
static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
{
struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
#endif
cfg->roamoff_on_concurrent = true;
cfg->disable_roam_event = false;
+ cfg->cfgdev_bssidx = -1;
/* register interested state */
set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state);
spin_lock_init(&cfg->cfgdrv_lock);
wl_init_event_handler(cfg);
mutex_init(&cfg->usr_sync);
mutex_init(&cfg->event_sync);
+ mutex_init(&cfg->scan_complete);
err = wl_init_scan(cfg);
if (err)
return err;
+#ifdef DHD_LOSSLESS_ROAMING
+ err = wl_init_roam_timeout(cfg);
+ if (err) {
+ return err;
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
wl_init_conf(cfg->conf);
wl_init_prof(cfg, ndev);
wl_link_down(cfg);
wl_flush_eq(cfg);
wl_link_down(cfg);
del_timer_sync(&cfg->scan_timeout);
+#ifdef DHD_LOSSLESS_ROAMING
+ del_timer_sync(&cfg->roam_timeout);
+#endif
wl_deinit_priv_mem(cfg);
if (wl_cfg80211_netdev_notifier_registered) {
wl_cfg80211_netdev_notifier_registered = FALSE;
}
}
-#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
-struct net_device *wl0dot1_dev;
-#endif /* CUSTOMER_HW20 && WLANAUDIO */
-
-#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT) || \
- defined(P2PONEINT)
+#if defined(WL_ENABLE_P2P_IF)
static s32 wl_cfg80211_attach_p2p(void)
{
struct bcm_cfg80211 *cfg = g_bcm_cfg;
return -ENODEV;
}
-#if defined(CUSTOMER_HW20) && defined(WLANAUDIO)
- wl0dot1_dev = cfg->p2p_net;
-#endif /* CUSTOMER_HW20 && WLANAUDIO */
-
return 0;
}
-#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT || P2PONEINT */
-#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
static s32 wl_cfg80211_detach_p2p(void)
{
struct bcm_cfg80211 *cfg = g_bcm_cfg;
} else
wdev = cfg->p2p_wdev;
-#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
if (!wdev) {
WL_ERR(("Invalid Ptr\n"));
return -EINVAL;
}
-#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
wl_cfgp2p_unregister_ndev(cfg);
cfg->p2p_wdev = NULL;
cfg->p2p_net = NULL;
-#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
- WL_DBG(("Freeing 0x%08x \n", (unsigned int)wdev));
+ WL_DBG(("Freeing 0x%p \n", wdev));
kfree(wdev);
-#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
return 0;
}
-#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+#endif
s32 wl_cfg80211_attach_post(struct net_device *ndev)
{
if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
goto fail;
-#ifdef P2PONEINT
- if (!cfg->p2p_net) {
- cfg->p2p_supported = true;
-
- err = wl_cfg80211_attach_p2p();
- if (err)
- goto fail;
-
- cfg->p2p_supported = true;
- }
-#endif
-#if defined(WL_ENABLE_P2P_IF) || defined(P2PONEINT)
+#if defined(WL_ENABLE_P2P_IF)
if (cfg->p2p_net) {
/* Update MAC addr for p2p0 interface here. */
memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
return -ENODEV;
}
#endif /* WL_ENABLE_P2P_IF */
-#ifndef P2PONEINT
cfg->p2p_supported = true;
-#endif
} else if (ret == 0) {
if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
goto fail;
cfg->wdev = wdev;
cfg->pub = context;
INIT_LIST_HEAD(&cfg->net_list);
+ spin_lock_init(&cfg->net_list_sync);
ndev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
cfg->state_notifier = wl_notifier_change_state;
- err = wl_alloc_netinfo(cfg, ndev, wdev, WL_MODE_BSS, PM_ENABLE);
+ err = wl_alloc_netinfo(cfg, ndev, wdev, WL_MODE_BSS, PM_ENABLE, 0);
if (err) {
WL_ERR(("Failed to alloc net_info (%d)\n", err));
goto cfg80211_attach_out;
cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev);
if (!cfg->btcoex_info)
goto cfg80211_attach_out;
-#endif
-
+#endif
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ cfg->random_mac_enabled = FALSE;
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
g_bcm_cfg = cfg;
-#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
-#ifndef P2PONEINT
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+ wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier;
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#if defined(WL_ENABLE_P2P_IF)
err = wl_cfg80211_attach_p2p();
if (err)
goto cfg80211_attach_out;
-#endif
-#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+#endif
+
+ INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
+ mutex_init(&cfg->pm_sync);
return err;
WL_TRACE(("In\n"));
- wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
#if defined(COEX_DHCP)
wl_cfg80211_btcoex_deinit();
if (timer_pending(&cfg->scan_timeout))
del_timer_sync(&cfg->scan_timeout);
+#ifdef DHD_LOSSLESS_ROAMING
+ if (timer_pending(&cfg->roam_timeout)) {
+ del_timer_sync(&cfg->roam_timeout);
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
#if defined(WL_CFG80211_P2P_DEV_IF)
- wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+ if (cfg->p2p_wdev)
+ wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
#endif /* WL_CFG80211_P2P_DEV_IF */
-#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+#if defined(WL_ENABLE_P2P_IF)
wl_cfg80211_detach_p2p();
-#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+#endif
wl_cfg80211_ibss_vsie_free(cfg);
+ wl_cfg80211_clear_mgmt_vndr_ies(cfg);
wl_deinit_priv(cfg);
g_bcm_cfg = NULL;
wl_cfg80211_clear_parent_dev();
wl_free_wdev(cfg);
#if defined(RSSIAVG)
wl_free_rssi_cache(&g_rssi_cache_ctrl);
- wl_free_rssi_cache(&g_rssi2_cache_ctrl);
+ wl_free_rssi_cache(&g_connected_rssi_cache_ctrl);
#endif
#if defined(BSSCACHE)
wl_release_bss_cache_ctrl(&g_bss_cache_ctrl);
static void wl_wakeup_event(struct bcm_cfg80211 *cfg)
{
- if (cfg->event_tsk.thr_pid >= 0) {
- DHD_OS_WAKE_LOCK(cfg->pub);
- up(&cfg->event_tsk.sema);
- }
-}
-
-#if defined(P2PONEINT) || defined(WL_ENABLE_P2P_IF)
-static int wl_is_p2p_event(struct wl_event_q *e)
-{
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
-
- switch (e->etype) {
- case WLC_E_IF:
- WL_TRACE(("P2P event(%d) on interface(ifidx:%d)\n",
- e->etype, e->emsg.ifidx));
-
- (void)schedule_timeout(20);
-
- if (wl_get_p2p_status(cfg, IF_ADDING) ||
- wl_get_p2p_status(cfg, IF_DELETING) ||
- wl_get_p2p_status(cfg, IF_CHANGING) ||
- wl_get_p2p_status(cfg, IF_CHANGED)) {
- WL_TRACE(("P2P Event on Primary I/F (ifidx:%d)."
- " Sent it to p2p0 \n", e->emsg.ifidx));
- return TRUE;
- } else {
- WL_TRACE(("Event is Not p2p event return False \n"));
- return FALSE;
- }
-
- case WLC_E_P2P_PROBREQ_MSG:
- case WLC_E_P2P_DISC_LISTEN_COMPLETE:
- case WLC_E_ACTION_FRAME_RX:
- case WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE:
- case WLC_E_ACTION_FRAME_COMPLETE:
-
- if (e->emsg.ifidx != 0) {
- WL_TRACE(("P2P event(%d) on virtual interface(ifidx:%d)\n",
- e->etype, e->emsg.ifidx));
- return FALSE;
- } else {
- WL_TRACE(("P2P event(%d) on interface(ifidx:%d)\n",
- e->etype, e->emsg.ifidx));
- return TRUE;
- }
- break;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- default:
- WL_TRACE(("NON-P2P event(%d) on interface(ifidx:%d)\n",
- e->etype, e->emsg.ifidx));
- return FALSE;
+ if (dhd->up && (cfg->event_tsk.thr_pid >= 0)) {
+ up(&cfg->event_tsk.sema);
}
}
-#endif
static s32 wl_event_handler(void *data)
{
struct bcm_cfg80211 *cfg = NULL;
struct wl_event_q *e;
tsk_ctl_t *tsk = (tsk_ctl_t *)data;
- bcm_struct_cfgdev *cfgdev = NULL;
+ struct wireless_dev *wdev = NULL;
cfg = (struct bcm_cfg80211 *)tsk->parent;
while (down_interruptible (&tsk->sema) == 0) {
SMP_RD_BARRIER_DEPENDS();
- if (tsk->terminated)
+ if (tsk->terminated) {
break;
+ }
while ((e = wl_deq_event(cfg))) {
- WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx));
- /* All P2P device address related events comes on primary interface since
- * there is no corresponding bsscfg for P2P interface. Map it to p2p0
- * interface.
- */
-#if defined(WL_CFG80211_P2P_DEV_IF)
-#ifdef P2PONEINT
- if ((wl_is_p2p_event(e) == TRUE) && (cfg->p2p_wdev))
-#else
- if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_wdev))
-#endif
- {
- cfgdev = bcmcfg_to_p2p_wdev(cfg);
- } else {
- struct net_device *ndev = NULL;
-
- ndev = dhd_idx2net((struct dhd_pub *)(cfg->pub), e->emsg.ifidx);
- if (ndev)
- cfgdev = ndev_to_wdev(ndev);
-#ifdef P2PONEINT
- else if (e->etype == WLC_E_IF) {
- wl_put_event(e);
- DHD_OS_WAKE_UNLOCK(cfg->pub);
- continue;
- }
+ WL_DBG(("event type (%d), ifidx: %d bssidx: %d \n",
+ e->etype, e->emsg.ifidx, e->emsg.bsscfgidx));
- if (cfgdev == NULL) {
- if (e->etype == WLC_E_IF)
- cfgdev = bcmcfg_to_prmry_wdev(cfg);
- else {
- cfgdev = ndev_to_wdev(wl_to_p2p_bss_ndev(cfg,
- P2PAPI_BSSCFG_CONNECTION));
- }
- }
-#endif
- }
-#elif defined(WL_ENABLE_P2P_IF)
- // terence 20150116: fix for p2p connection in kernel 3.4
-// if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_net)) {
- if ((wl_is_p2p_event(e) == TRUE) && (cfg->p2p_net)) {
- cfgdev = cfg->p2p_net;
- } else {
- cfgdev = dhd_idx2net((struct dhd_pub *)(cfg->pub),
- e->emsg.ifidx);
+ if (e->emsg.ifidx > WL_MAX_IFS) {
+ WL_ERR((" Event ifidx not in range. val:%d \n", e->emsg.ifidx));
+ goto fail;
}
-#endif /* WL_CFG80211_P2P_DEV_IF */
- if (!cfgdev) {
-#if defined(WL_CFG80211_P2P_DEV_IF)
- cfgdev = bcmcfg_to_prmry_wdev(cfg);
-#else
- cfgdev = bcmcfg_to_prmry_ndev(cfg);
-#endif /* WL_CFG80211_P2P_DEV_IF */
- }
- if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
- cfg->evt_handler[e->etype] (cfg, cfgdev, &e->emsg, e->edata);
+ if (!(wdev = wl_get_wdev_by_bssidx(cfg, e->emsg.bsscfgidx))) {
+ /* For WLC_E_IF would be handled by wl_host_event */
+ if (e->etype != WLC_E_IF)
+ WL_ERR(("No wdev corresponding to bssidx: 0x%x found!"
+ " Ignoring event.\n", e->emsg.bsscfgidx));
+ } else if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ WL_ERR((": BUS is DOWN.\n"));
+ } else {
+#ifdef DHD_IFDEBUG
+ if (cfg->iface_cnt == 0) {
+ wl_dump_ifinfo(cfg);
+ }
+#endif
+ cfg->evt_handler[e->etype](cfg, wdev_to_cfgdev(wdev),
+ &e->emsg, e->edata);
+ }
} else {
WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
}
+fail:
wl_put_event(e);
+ DHD_EVENT_WAKE_UNLOCK(cfg->pub);
}
- DHD_OS_WAKE_UNLOCK(cfg->pub);
}
printf("%s: was terminated\n", __FUNCTION__);
complete_and_exit(&tsk->completed, 0);
{
u32 event_type = ntoh32(e->event_type);
struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ struct net_info *netinfo;
#if (WL_DBG_LEVEL > 0)
s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ?
WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr));
#endif /* (WL_DBG_LEVEL > 0) */
+ if (cfg->event_tsk.thr_pid == -1) {
+ WL_ERR(("Event handler is not created\n"));
+ return;
+ }
+
+ if ((cfg == NULL) || (cfg->p2p_supported && cfg->p2p == NULL)) {
+ WL_ERR(("Stale event ignored\n"));
+ return;
+ }
+
if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) {
WL_ERR(("during IF change, ignore event %d\n", event_type));
return;
}
- if (ndev != bcmcfg_to_prmry_ndev(cfg) && cfg->p2p_supported) {
- if ((cfg->bss_cfgdev) &&
- (ndev == cfgdev_to_wlc_ndev(cfg->bss_cfgdev, cfg))) {
- /* Event is corresponding to the secondary STA interface */
- WL_DBG(("DualSta event (%d), proceed to enqueue it \n", event_type));
- } else if (ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) &&
-#if defined(WL_ENABLE_P2P_IF)
- (ndev != (cfg->p2p_net ? cfg->p2p_net :
- wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE))) &&
-#else
- (ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE)) &&
-#endif /* WL_ENABLE_P2P_IF */
- TRUE) {
- WL_ERR(("ignore event %d, not interested\n", event_type));
- return;
- }
+#ifdef DHD_IFDEBUG
+ if (event_type != WLC_E_ESCAN_RESULT) {
+ WL_ERR(("Event_type %d , status : %d, reason : %d, bssidx:%d \n",
+ event_type, ntoh32(e->status), ntoh32(e->reason), e->bsscfgidx));
+ }
+#endif
+ netinfo = wl_get_netinfo_by_bssidx(cfg, e->bsscfgidx);
+ if (!netinfo) {
+ /* Since the netinfo entry is not there, the netdev entry is not
+ * created via cfg80211 interface. so the event is not of interest
+ * to the cfg80211 layer.
+ */
+ WL_ERR(("ignore event %d, not interested\n", event_type));
+ return;
}
if (event_type == WLC_E_PFN_NET_FOUND) {
WL_DBG((" PNOEVENT: PNO_NET_LOST\n"));
}
- if (likely(!wl_enq_event(cfg, ndev, event_type, e, data)))
+ DHD_EVENT_WAKE_LOCK(cfg->pub);
+ if (likely(!wl_enq_event(cfg, ndev, event_type, e, data))) {
wl_wakeup_event(cfg);
+ } else {
+ DHD_EVENT_WAKE_UNLOCK(cfg->pub);
+ }
}
static void wl_init_eq(struct bcm_cfg80211 *cfg)
unsigned long flags;
flags = wl_lock_eq(cfg);
- while (!list_empty(&cfg->eq_list)) {
- e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list);
+ while (!list_empty_careful(&cfg->eq_list)) {
+ BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list);
list_del(&e->eq_list);
kfree(e);
}
flags = wl_lock_eq(cfg);
if (likely(!list_empty(&cfg->eq_list))) {
- e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list);
+ BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list);
list_del(&e->eq_list);
}
wl_unlock_eq(cfg, flags);
index = j;
else
index = *n_cnt;
+ if (!dhd_conf_match_channel(cfg->pub, channel))
+ continue;
if (index < array_size) {
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
band_chan_arr[index].center_freq =
ieee80211_channel_to_frequency(channel);
#else
ieee80211_channel_to_frequency(channel, band);
#endif
band_chan_arr[index].hw_value = channel;
+ WL_DBG(("channel = %d\n", channel));
if (CHSPEC_IS40(c) && ht40_allowed) {
/* assuming the order is HT20, HT40 Upper,
s32 err = 0;
s32 index = 0;
s32 nmode = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ u32 j = 0;
+ s32 vhtmode = 0;
+ s32 txstreams = 0;
+ s32 rxstreams = 0;
+ s32 ldpc_cap = 0;
+ s32 stbc_rx = 0;
+ s32 stbc_tx = 0;
+ s32 txbf_bfe_cap = 0;
+ s32 txbf_bfr_cap = 0;
+#endif
bool rollback_lock = false;
s32 bw_cap = 0;
s32 cur_band = -1;
err = wldev_iovar_getint(dev, "nmode", &nmode);
if (unlikely(err)) {
WL_ERR(("error reading nmode (%d)\n", err));
- } else {
- /* For nmodeonly check bw cap */
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ err = wldev_iovar_getint(dev, "vhtmode", &vhtmode);
+ if (unlikely(err)) {
+ WL_ERR(("error reading vhtmode (%d)\n", err));
+ }
+
+ if (vhtmode) {
+ err = wldev_iovar_getint(dev, "txstreams", &txstreams);
+ if (unlikely(err)) {
+ WL_ERR(("error reading txstreams (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "rxstreams", &rxstreams);
+ if (unlikely(err)) {
+ WL_ERR(("error reading rxstreams (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "ldpc_cap", &ldpc_cap);
+ if (unlikely(err)) {
+ WL_ERR(("error reading ldpc_cap (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "stbc_rx", &stbc_rx);
+ if (unlikely(err)) {
+ WL_ERR(("error reading stbc_rx (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "stbc_tx", &stbc_tx);
+ if (unlikely(err)) {
+ WL_ERR(("error reading stbc_tx (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "txbf_bfe_cap", &txbf_bfe_cap);
+ if (unlikely(err)) {
+ WL_ERR(("error reading txbf_bfe_cap (%d)\n", err));
+ }
+
+ err = wldev_iovar_getint(dev, "txbf_bfr_cap", &txbf_bfr_cap);
+ if (unlikely(err)) {
+ WL_ERR(("error reading txbf_bfr_cap (%d)\n", err));
+ }
+ }
+#endif
+
+ /* For nmode and vhtmode check bw cap */
+ if (nmode ||
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ vhtmode ||
+#endif
+ 0) {
err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
if (unlikely(err)) {
WL_ERR(("error get mimo_bw_cap (%d)\n", err));
bands[IEEE80211_BAND_5GHZ] =
&__wl_band_5ghz_a;
index = IEEE80211_BAND_5GHZ;
- if (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G)
+ if (nmode && (bw_cap == WLC_N_BW_40ALL || bw_cap == WLC_N_BW_20IN2G_40IN5G))
bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ /* VHT capabilities. */
+ if (vhtmode) {
+ /* Supported */
+ bands[index]->vht_cap.vht_supported = TRUE;
+
+ for (j = 1; j <= VHT_CAP_MCS_MAP_NSS_MAX; j++) {
+ /* TX stream rates. */
+ if (j <= txstreams) {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9,
+ bands[index]->vht_cap.vht_mcs.tx_mcs_map);
+ } else {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE,
+ bands[index]->vht_cap.vht_mcs.tx_mcs_map);
+ }
+
+ /* RX stream rates. */
+ if (j <= rxstreams) {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_0_9,
+ bands[index]->vht_cap.vht_mcs.rx_mcs_map);
+ } else {
+ VHT_MCS_MAP_SET_MCS_PER_SS(j, VHT_CAP_MCS_MAP_NONE,
+ bands[index]->vht_cap.vht_mcs.rx_mcs_map);
+ }
+ }
+
+
+ /* Capabilities */
+ /* 80 MHz is mandatory */
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_80;
+
+ if (WL_BW_CAP_160MHZ(bw_cap)) {
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160;
+ }
+
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+
+ if (ldpc_cap)
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_RXLDPC;
+
+ if (stbc_tx)
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_TXSTBC;
+
+ if (stbc_rx)
+ bands[index]->vht_cap.cap |=
+ (stbc_rx << VHT_CAP_INFO_RX_STBC_SHIFT);
+
+ if (txbf_bfe_cap)
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+
+ if (txbf_bfr_cap) {
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
+ }
+
+ if (txbf_bfe_cap || txbf_bfr_cap) {
+ bands[index]->vht_cap.cap |=
+ (2 << VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT);
+ bands[index]->vht_cap.cap |=
+ ((txstreams - 1) <<
+ VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT);
+ bands[index]->vht_cap.cap |=
+ IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB;
+ }
+
+ /* AMPDU length limit, support max 1MB (2 ^ (13 + 7)) */
+ bands[index]->vht_cap.cap |=
+ (7 << VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT);
+ WL_INFORM(("%s band[%d] vht_enab=%d vht_cap=%08x "
+ "vht_rx_mcs_map=%04x vht_tx_mcs_map=%04x\n",
+ __FUNCTION__, index,
+ bands[index]->vht_cap.vht_supported,
+ bands[index]->vht_cap.cap,
+ bands[index]->vht_cap.vht_mcs.rx_mcs_map,
+ bands[index]->vht_cap.vht_mcs.tx_mcs_map));
+ }
+#endif
}
else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) {
bands[IEEE80211_BAND_2GHZ] =
static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg)
{
s32 err = 0;
-#ifdef WL_HOST_BAND_MGMT
- s32 ret = 0;
-#endif /* WL_HOST_BAND_MGMT */
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
struct wireless_dev *wdev = ndev->ieee80211_ptr;
WL_DBG(("In\n"));
+ err = wl_create_event_handler(cfg);
+ if (err) {
+ WL_ERR(("wl_create_event_handler failed\n"));
+ return err;
+ }
+ wl_init_event_handler(cfg);
+
err = dhd_config_dongle(cfg);
if (unlikely(err))
return err;
return err;
}
}
+
+ err = wl_init_scan(cfg);
+ if (err) {
+ WL_ERR(("wl_init_scan failed\n"));
+ return err;
+ }
err = wl_update_wiphybands(cfg, true);
if (unlikely(err)) {
WL_ERR(("wl_update_wiphybands failed\n"));
return err;
}
}
+#ifdef DHD_LOSSLESS_ROAMING
+ if (timer_pending(&cfg->roam_timeout)) {
+ del_timer_sync(&cfg->roam_timeout);
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
err = dhd_monitor_init(cfg->pub);
-#ifdef WL_HOST_BAND_MGMT
- /* By default the curr_band is initialized to BAND_AUTO */
- if ((ret = wl_cfg80211_set_band(ndev, WLC_BAND_AUTO)) < 0) {
- if (ret == BCME_UNSUPPORTED) {
- /* Don't fail the initialization, lets just
- * fall back to the original method
- */
- WL_ERR(("WL_HOST_BAND_MGMT defined, "
- "but roam_band iovar not supported \n"));
- } else {
- WL_ERR(("roam_band failed. ret=%d", ret));
- err = -1;
- }
- }
-#endif /* WL_HOST_BAND_MGMT */
-#if defined(DHCP_SCAN_SUPPRESS)
- /* wlan scan_supp timer and work thread info */
- init_timer(&cfg->scan_supp_timer);
- cfg->scan_supp_timer.data = (ulong)cfg;
- cfg->scan_supp_timer.function = wl_cfg80211_scan_supp_timerfunc;
- INIT_WORK(&cfg->wlan_work, wl_cfg80211_work_handler);
-#endif /* DHCP_SCAN_SUPPRESS */
- INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
wl_set_drv_status(cfg, READY, ndev);
return err;
}
unsigned long flags;
struct net_info *iter, *next;
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
-#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF)|| \
- defined(WL_NEWCFG_PRIVCMD_SUPPORT))
+#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF)
struct net_device *p2p_net = cfg->p2p_net;
-#endif /* WL_CFG80211 && (WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT) */
- u32 bssidx = 0;
+#endif
#ifdef PROP_TXSTATUS_VSDB
#if defined(BCMSDIO)
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
-#endif
+#endif
#endif /* PROP_TXSTATUS_VSDB */
WL_DBG(("In\n"));
/* Delete pm_enable_work */
- wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
#ifdef WL_NAN
- wl_cfgnan_stop_handler(ndev, g_bcm_cfg, NULL, NULL);
+ wl_cfgnan_stop_handler(ndev, g_bcm_cfg, NULL, 0, NULL);
#endif /* WL_NAN */
if (cfg->p2p_supported) {
wl_clr_p2p_status(cfg, GO_NEG_PHASE);
#ifdef PROP_TXSTATUS_VSDB
#if defined(BCMSDIO)
- if (cfg->p2p->vif_created) {
+ if (wl_cfgp2p_vif_created(cfg)) {
bool enabled = false;
dhd_wlfc_get_enable(dhd, &enabled);
if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
#endif /* PROP_TXSTATUS_VSDB */
}
-#if defined(DHCP_SCAN_SUPPRESS)
- /* Force clear of scan_suppress */
- if (cfg->scan_suppressed)
- wl_cfg80211_scan_suppress(ndev, 0);
- if (timer_pending(&cfg->scan_supp_timer))
- del_timer_sync(&cfg->scan_supp_timer);
- cancel_work_sync(&cfg->wlan_work);
-#endif /* DHCP_SCAN_SUPPRESS */
/* If primary BSS is operational (for e.g SoftAP), bring it down */
- if (!(wl_cfgp2p_find_idx(cfg, ndev, &bssidx)) &&
- wl_cfgp2p_bss_isup(ndev, bssidx)) {
- if (wl_cfgp2p_bss(cfg, ndev, bssidx, 0) < 0)
+ if (wl_cfgp2p_bss_isup(ndev, 0)) {
+ if (wl_cfgp2p_bss(cfg, ndev, 0, 0) < 0)
WL_ERR(("BSS down failed \n"));
}
/* Check if cfg80211 interface is already down */
if (!wl_get_drv_status(cfg, READY, ndev))
return err; /* it is even not ready */
- for_each_ndev(cfg, iter, next)
- wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
-#ifdef WL_SDO
- wl_cfg80211_sdo_deinit(cfg);
+ /* clear all the security setting on primary Interface */
+ wl_cfg80211_clear_security(cfg);
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) /* p2p discovery iface is null */
+ wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
#endif
+#ifdef P2P_LISTEN_OFFLOADING
+ wl_cfg80211_p2plo_deinit(cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
+
spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
if (cfg->scan_request) {
cfg80211_scan_done(cfg->scan_request, true);
cfg->scan_request = NULL;
}
spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
-
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
for_each_ndev(cfg, iter, next) {
+ /* p2p discovery iface ndev ptr could be null */
+ if (iter->ndev == NULL)
+ continue;
wl_clr_drv_status(cfg, READY, iter->ndev);
wl_clr_drv_status(cfg, SCANNING, iter->ndev);
wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
wl_clr_drv_status(cfg, AP_CREATED, iter->ndev);
wl_clr_drv_status(cfg, AP_CREATING, iter->ndev);
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype =
NL80211_IFTYPE_STATION;
-#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF)|| \
- defined(WL_NEWCFG_PRIVCMD_SUPPORT))
+#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF)
if (p2p_net)
dev_close(p2p_net);
-#endif /* WL_CFG80211 && (WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT) */
+#endif
+
+ /* Avoid deadlock from wl_cfg80211_down */
+ mutex_unlock(&cfg->usr_sync);
+ wl_destroy_event_handler(cfg);
+ mutex_lock(&cfg->usr_sync);
wl_flush_eq(cfg);
wl_link_down(cfg);
- if (cfg->p2p_supported)
+ if (cfg->p2p_supported) {
+ if (timer_pending(&cfg->p2p->listen_timer))
+ del_timer_sync(&cfg->p2p->listen_timer);
wl_cfgp2p_down(cfg);
- if (cfg->ap_info) {
- kfree(cfg->ap_info->wpa_ie);
- kfree(cfg->ap_info->rsn_ie);
- kfree(cfg->ap_info->wps_ie);
- kfree(cfg->ap_info);
- cfg->ap_info = NULL;
}
+
+ if (timer_pending(&cfg->scan_timeout)) {
+ del_timer_sync(&cfg->scan_timeout);
+ }
+
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+
dhd_monitor_uninit();
#ifdef WLAIBSS_MCHAN
bcm_cfg80211_del_ibss_if(cfg->wdev->wiphy, cfg->ibss_cfgdev);
#endif /* WLAIBSS_MCHAN */
-#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF)
/* Clean up if not removed already */
if (cfg->bss_cfgdev)
wl_cfg80211_del_iface(cfg->wdev->wiphy, cfg->bss_cfgdev);
-#endif /* defined (DUAL_STA) || defined (DUAL_STA_STATIC_IF) */
+#endif /* defined (WL_VIRTUAL_APSTA) || defined (DUAL_STA_STATIC_IF) */
+
+#ifdef WL11U
+ /* Clear interworking element. */
+ if (cfg->wl11u) {
+ cfg->wl11u = FALSE;
+ cfg->iw_ie_len = 0;
+ memset(cfg->iw_ie, 0, IW_IES_MAX_BUF_LEN);
+ }
+#endif /* WL11U */
+
+#ifdef CUSTOMER_HW4_DEBUG
+ if (wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_clear();
+#endif /* CUSTOMER_HW4_DEBUG */
+
+ cfg->disable_roam_event = false;
DNGL_FUNC(dhd_cfg80211_down, (cfg));
+#ifdef DHD_IFDEBUG
+ /* Printout all netinfo entries */
+ wl_probe_wdev_all(cfg);
+#endif /* DHD_IFDEBUG */
+
return err;
}
s32 err = 0;
int val = 1;
dhd_pub_t *dhd;
+#ifdef DISABLE_PM_BCNRX
+ s32 interr = 0;
+ uint param = 0;
+ s8 iovbuf[WLC_IOCTL_SMLEN];
+#endif /* DISABLE_PM_BCNRX */
(void)para;
WL_DBG(("In\n"));
dhd = (dhd_pub_t *)(cfg->pub);
if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg));
- if (unlikely(err))
+ if (unlikely(err)) {
+ mutex_unlock(&cfg->usr_sync);
return err;
+ }
}
-#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
- if (dhd->fw_4way_handshake)
- cfg->wdev->wiphy->features |= NL80211_FEATURE_FW_4WAY_HANDSHAKE;
-#endif
err = __wl_cfg80211_up(cfg);
if (unlikely(err))
WL_ERR(("__wl_cfg80211_up failed\n"));
+
+
+
+ /* IOVAR configurations with 'up' condition */
+#ifdef DISABLE_PM_BCNRX
+ bcm_mkiovar("pm_bcnrx", (char *)¶m, 4, iovbuf, sizeof(iovbuf));
+ interr = wldev_ioctl(bcmcfg_to_prmry_ndev(cfg), WLC_SET_VAR, iovbuf, sizeof(iovbuf), true);
+ if (unlikely(interr))
+ WL_ERR(("Set pm_bcnrx returned (%d)\n", interr));
+#endif /* DISABLE_PM_BCNRX */
+
mutex_unlock(&cfg->usr_sync);
#ifdef WLAIBSS_MCHAN
#endif /* WLAIBSS_MCHAN */
#ifdef DUAL_STA_STATIC_IF
-#ifdef DUAL_STA
-#error "Both DUAL_STA and DUAL_STA_STATIC_IF can't be enabled together"
+#ifdef WL_VIRTUAL_APSTA
+#error "Both DUAL STA and DUAL_STA_STATIC_IF can't be enabled together"
#endif
/* Static Interface support is currently supported only for STA only builds (without P2P) */
wl_cfg80211_create_iface(cfg->wdev->wiphy, NL80211_IFTYPE_STATION, NULL, "wlan%d");
int wl_cfg80211_hang(struct net_device *dev, u16 reason)
{
struct bcm_cfg80211 *cfg;
+ dhd_pub_t *dhd;
+#if defined(SOFTAP_SEND_HANGEVT)
+ /* specifc mac address used for hang event */
+ uint8 hang_mac[ETHER_ADDR_LEN] = {0x11, 0x11, 0x11, 0x11, 0x11, 0x11};
+#endif /* SOFTAP_SEND_HANGEVT */
+ if (!g_bcm_cfg) {
+ return BCME_ERROR;
+ }
+
cfg = g_bcm_cfg;
+ dhd = (dhd_pub_t *)(cfg->pub);
+
+#ifdef DHD_USE_EXTENDED_HANG_REASON
+ if (dhd->hang_reason != 0) {
+ reason = dhd->hang_reason;
+ }
+#endif /* DHD_USE_EXTENDED_HANG_REASON */
- WL_ERR(("In : chip crash eventing\n"));
- wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
- cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL);
+ WL_ERR(("In : chip crash eventing, reason=0x%x\n", (uint32)(dhd->hang_reason)));
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+#if defined(SOFTAP_SEND_HANGEVT)
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ cfg80211_del_sta(dev, hang_mac, GFP_ATOMIC);
+ } else
+#endif /* SOFTAP_SEND_HANGEVT */
+ {
+ CFG80211_DISCONNECTED(dev, reason, NULL, 0, false, GFP_KERNEL);
+ }
#if defined(RSSIAVG)
wl_free_rssi_cache(&g_rssi_cache_ctrl);
#endif
static s32
wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data, s32 item)
+ const wl_event_msg_t *e, const void *data, s32 item)
{
s32 err = 0;
- struct wlc_ssid *ssid;
+ const struct wlc_ssid *ssid;
unsigned long flags;
struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
switch (item) {
case WL_PROF_SSID:
- ssid = (wlc_ssid_t *) data;
+ ssid = (const wlc_ssid_t *) data;
memset(profile->ssid.SSID, 0,
sizeof(profile->ssid.SSID));
memcpy(profile->ssid.SSID, ssid->SSID, ssid->SSID_len);
memcpy(&profile->sec, data, sizeof(profile->sec));
break;
case WL_PROF_ACT:
- profile->active = *(bool *)data;
+ profile->active = *(const bool *)data;
break;
case WL_PROF_BEACONINT:
- profile->beacon_interval = *(u16 *)data;
+ profile->beacon_interval = *(const u16 *)data;
break;
case WL_PROF_DTIMPERIOD:
- profile->dtim_period = *(u8 *)data;
+ profile->dtim_period = *(const u8 *)data;
break;
case WL_PROF_CHAN:
- profile->channel = *(u32*)data;
+ profile->channel = *(const u32*)data;
break;
default:
err = -EOPNOTSUPP;
return err;
}
-static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam)
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size,
+ bool roam)
{
u8 *ssidie;
+ /* cfg80211_find_ie defined in kernel returning const u8 */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
if (!ssidie)
return;
if (ssidie[1] != bi->SSID_len) {
s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
{
struct bcm_cfg80211 *cfg = g_bcm_cfg;
- struct ether_addr p2pif_addr;
struct ether_addr primary_mac;
if (!cfg->p2p)
return -1;
if (!p2p_is_on(cfg)) {
get_primary_mac(cfg, &primary_mac);
- wl_cfgp2p_generate_bss_mac(&primary_mac, p2pdev_addr, &p2pif_addr);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
} else {
- memcpy(p2pdev_addr->octet,
- cfg->p2p->dev_addr.octet, ETHER_ADDR_LEN);
+ memcpy(p2pdev_addr->octet, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE).octet,
+ ETHER_ADDR_LEN);
}
-
return 0;
}
s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len);
}
+
+s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len)
+{
+ struct bcm_cfg80211 *cfg;
+ cfg = g_bcm_cfg;
+
+ return wl_cfgp2p_set_p2p_ecsa(cfg, net, buf, len);
+}
+
#ifdef P2PLISTEN_AP_SAMECHN
s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable)
{
{
int freq = 0;
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
freq = ieee80211_channel_to_frequency(channel);
#else
{
return freq;
}
-#ifdef WL_SDO
-#define MAX_QR_LEN NLMSG_GOODSIZE
-
-typedef struct wl_cfg80211_dev_info {
- u16 band;
- u16 freq;
- s16 rssi;
- u16 ie_len;
- u8 bssid[ETH_ALEN];
-} wl_cfg80211_dev_info_t;
+#ifdef WLTDLS
static s32
-wl_notify_device_discovery(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- int err = 0;
- u32 event = ntoh32(e->event_type);
- wl_cfg80211_dev_info_t info;
- struct wl_bss_info *bi = NULL;
- struct net_device *ndev = NULL;
- u8 *buf = NULL;
- u32 buflen = 0;
- u16 channel = 0;
- wl_escan_result_t *escan_result;
-
- WL_SD(("Enter. type:%d \n", event));
+wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data) {
- if ((event != WLC_E_P2PO_ADD_DEVICE) && (event != WLC_E_P2PO_DEL_DEVICE)) {
- WL_ERR(("Unknown Event\n"));
- return -EINVAL;
- }
+ struct net_device *ndev = NULL;
+ u32 reason = ntoh32(e->reason);
+ s8 *msg = NULL;
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- mutex_lock(&cfg->usr_sync);
- if (event == WLC_E_P2PO_DEL_DEVICE) {
- WL_SD(("DEV_LOST MAC:"MACDBG" \n", MAC2STRDBG(e->addr.octet)));
- err = wl_genl_send_msg(ndev, event, (u8 *)e->addr.octet, ETH_ALEN, 0, 0);
- } else {
-
- escan_result = (wl_escan_result_t *) data;
-
- if (dtoh16(escan_result->bss_count) != 1) {
- WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
- err = -EINVAL;
- goto exit;
+ switch (reason) {
+ case WLC_E_TDLS_PEER_DISCOVERED :
+ msg = " TDLS PEER DISCOVERD ";
+ break;
+ case WLC_E_TDLS_PEER_CONNECTED :
+#ifdef PCIE_FULL_DONGLE
+ dhd_tdls_update_peer_info(ndev, TRUE, (uint8 *)&e->addr.octet[0]);
+#endif /* PCIE_FULL_DONGLE */
+ if (cfg->tdls_mgmt_frame) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, 0);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
+ 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
+ GFP_ATOMIC);
+#else
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
+ GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */
}
-
- bi = escan_result->bss_info;
- buflen = dtoh32(bi->length);
- if (unlikely(buflen > WL_BSS_INFO_MAX)) {
- WL_DBG(("Beacon is larger than buffer. Discarding\n"));
- err = -EINVAL;
- goto exit;
+ msg = " TDLS PEER CONNECTED ";
+ break;
+ case WLC_E_TDLS_PEER_DISCONNECTED :
+#ifdef PCIE_FULL_DONGLE
+ dhd_tdls_update_peer_info(ndev, FALSE, (uint8 *)&e->addr.octet[0]);
+#endif /* PCIE_FULL_DONGLE */
+ if (cfg->tdls_mgmt_frame) {
+ kfree(cfg->tdls_mgmt_frame);
+ cfg->tdls_mgmt_frame = NULL;
+ cfg->tdls_mgmt_freq = 0;
}
-
- /* Update sub-header */
- bzero(&info, sizeof(wl_cfg80211_dev_info_t));
- channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
- info.freq = wl_cfg80211_channel_to_freq(channel);
- info.rssi = dtoh16(bi->RSSI);
-#if defined(RSSIOFFSET)
- info.rssi = wl_update_rssi_offset(ndev, info.rssi);
-#endif
-#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
- // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
- info->rssi = MIN(info->rssi, RSSI_MAXVAL);
-#endif
- memcpy(info.bssid, &bi->BSSID, ETH_ALEN);
- info.ie_len = buflen;
-
- WL_SD(("DEV_FOUND band:%x Freq:%d rssi:%x "MACDBG" \n",
- info.band, info.freq, info.rssi, MAC2STRDBG(info.bssid)));
-
- buf = ((u8 *) bi) + bi->ie_offset;
- err = wl_genl_send_msg(ndev, event, buf,
- buflen, (u8 *)&info, sizeof(wl_cfg80211_dev_info_t));
- }
-exit:
- mutex_unlock(&cfg->usr_sync);
- return err;
-}
-
-s32
-wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg)
-{
- u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
-
- if (cfg->sdo) {
- WL_SD(("SDO already initialized\n"));
- return 0;
+ msg = "TDLS PEER DISCONNECTED ";
+ break;
}
-
- cfg->sdo = kzalloc(sizeof(sd_offload_t), kflags);
- if (!cfg->sdo) {
- WL_ERR(("malloc failed for SDO \n"));
- return -ENOMEM;
+ if (msg) {
+ WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((u8*)(&e->addr)),
+ (bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary"));
}
+ return 0;
- return 0;
}
+#endif /* WLTDLS */
-s32
-wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
+static s32
+#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \
+ KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, const u8 *data, size_t len)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, const u8 *data, size_t len)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, bool initiator, const u8 *data, size_t len)
+#else
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data,
+ size_t len)
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
{
- s32 bssidx;
- int ret = 0;
- int sdo_pause = 0;
- if (!cfg || !cfg->p2p) {
- WL_ERR(("Wl %p or cfg->p2p %p is null\n",
- cfg, cfg ? cfg->p2p : 0));
- return 0;
- }
+ s32 ret = 0;
+#ifdef WLTDLS
+ struct bcm_cfg80211 *cfg;
+ tdls_wfd_ie_iovar_t info;
+ memset(&info, 0, sizeof(tdls_wfd_ie_iovar_t));
+ cfg = g_bcm_cfg;
- bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- if (!cfg->sdo) {
- WL_DBG(("SDO Not Initialized. Do nothing. \n"));
- return 0;
- }
- if (cfg->sdo->dd_state &&
- (ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg),
- "p2po_stop", (void*)&sdo_pause, sizeof(sdo_pause),
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL)) < 0) {
- WL_ERR(("p2po_stop Failed :%d\n", ret));
+#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
+ /* Some customer platform back ported this feature from kernel 3.15 to kernel 3.10
+ * and that cuases build error
+ */
+ BCM_REFERENCE(peer_capability);
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+
+ switch (action_code) {
+ /* We need to set TDLS Wifi Display IE to firmware
+ * using tdls_wfd_ie iovar
+ */
+ case WLAN_TDLS_SET_PROBE_WFD_IE:
+ WL_ERR(("%s WLAN_TDLS_SET_PROBE_WFD_IE\n", __FUNCTION__));
+ info.mode = TDLS_WFD_PROBE_IE_TX;
+ memcpy(&info.data, data, len);
+ info.length = len;
+ break;
+ case WLAN_TDLS_SET_SETUP_WFD_IE:
+ WL_ERR(("%s WLAN_TDLS_SET_SETUP_WFD_IE\n", __FUNCTION__));
+ info.mode = TDLS_WFD_IE_TX;
+ memcpy(&info.data, data, len);
+ info.length = len;
+ break;
+ case WLAN_TDLS_SET_WFD_ENABLED:
+ WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_ENABLED\n", __FUNCTION__));
+ dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), true);
+ goto out;
+ case WLAN_TDLS_SET_WFD_DISABLED:
+ WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_DISABLED\n", __FUNCTION__));
+ dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), false);
+ goto out;
+ default:
+ WL_ERR(("Unsupported action code : %d\n", action_code));
+ goto out;
}
- kfree(cfg->sdo);
- cfg->sdo = NULL;
- WL_SD(("SDO Deinit Done \n"));
+ ret = wldev_iovar_setbuf(dev, "tdls_wfd_ie", &info, sizeof(info),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- return 0;
+ if (ret) {
+ WL_ERR(("tdls_wfd_ie error %d\n", ret));
+ }
+out:
+#endif /* WLTDLS */
+ return ret;
}
-s32
-wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg)
+static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, enum nl80211_tdls_operation oper)
+#else
+wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation oper)
+#endif
{
- wl_sd_listen_t sd_listen;
- int ret = 0;
- s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
-
- WL_DBG(("Enter\n"));
-
- if (!cfg->sdo) {
- return -EINVAL;
+ s32 ret = 0;
+#ifdef WLTDLS
+ struct bcm_cfg80211 *cfg;
+ tdls_iovar_t info;
+ dhd_pub_t *dhdp;
+ bool tdls_auto_mode = false;
+ cfg = g_bcm_cfg;
+ dhdp = (dhd_pub_t *)(cfg->pub);
+ memset(&info, 0, sizeof(tdls_iovar_t));
+ if (peer) {
+ memcpy(&info.ea, peer, ETHER_ADDR_LEN);
+ } else {
+ return -1;
}
-
- if (dev == NULL)
- dev = bcmcfg_to_prmry_ndev(cfg);
-
- /* Disable back the ESCAN events for the offload */
- wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
-
- /* Resume according to the saved state */
- if (cfg->sdo->dd_state == WL_DD_STATE_SEARCH) {
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_find", NULL, 0,
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL)) < 0) {
- WL_ERR(("p2po_find Failed :%d\n", ret));
+ switch (oper) {
+ case NL80211_TDLS_DISCOVERY_REQ:
+ /* If the discovery request is broadcast then we need to set
+ * info.mode to Tunneled Probe Request
+ */
+ if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) {
+ info.mode = TDLS_MANUAL_EP_WFD_TPQ;
+ WL_ERR(("%s TDLS TUNNELED PRBOBE REQUEST\n", __FUNCTION__));
+ } else {
+ info.mode = TDLS_MANUAL_EP_DISCOVERY;
}
- } else if (cfg->sdo->dd_state == WL_DD_STATE_LISTEN) {
- sd_listen.interval = cfg->sdo->sd_listen.interval;
- sd_listen.period = cfg->sdo->sd_listen.period;
-
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&sd_listen,
- sizeof(wl_sd_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, NULL)) < 0) {
- WL_ERR(("p2po_listen Failed :%d\n", ret));
+ break;
+ case NL80211_TDLS_SETUP:
+ if (dhdp->tdls_mode == true) {
+ info.mode = TDLS_MANUAL_EP_CREATE;
+ tdls_auto_mode = false;
+ ret = dhd_tdls_enable(dev, false, tdls_auto_mode, NULL);
+ if (ret < 0) {
+ return ret;
+ }
+ } else {
+ tdls_auto_mode = true;
}
-
+ break;
+ case NL80211_TDLS_TEARDOWN:
+ info.mode = TDLS_MANUAL_EP_DELETE;
+ break;
+ default:
+ WL_ERR(("Unsupported operation : %d\n", oper));
+ goto out;
}
-
- /* p2po_stop clears of the eventmask for GAS. Set it back */
- wl_add_remove_eventmsg(dev, WLC_E_SERVICE_FOUND, true);
- wl_add_remove_eventmsg(dev, WLC_E_GAS_FRAGMENT_RX, true);
- wl_add_remove_eventmsg(dev, WLC_E_GAS_COMPLETE, true);
-
- WL_SD(("SDO Resumed \n"));
-
- return ret;
-}
-
-s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg)
-{
-
- int ret = 0;
- s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- int sdo_pause = 1;
-
- WL_DBG(("Enter \n"));
-
- if (!cfg->sdo) {
- WL_ERR(("SDO not initialized \n"));
- return -EINVAL;
+ /* turn on TDLS */
+ ret = dhd_tdls_enable(dev, true, tdls_auto_mode, NULL);
+ if (ret < 0) {
+ return ret;
}
-
- if (dev == NULL)
- dev = bcmcfg_to_prmry_ndev(cfg);
-
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop",
- (void*)&sdo_pause, sizeof(sdo_pause),
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("p2po_stop Failed :%d\n", ret));
+ if (info.mode) {
+ ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret) {
+ WL_ERR(("tdls_endpoint error %d\n", ret));
+ }
}
-
- /* Enable back the ESCAN events for the SCAN */
- wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true);
-
- WL_SD(("SDO Paused \n"));
-
+out:
+#endif /* WLTDLS */
return ret;
}
+#endif
-static s32
-wl_svc_resp_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
+s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *ndev, char *buf, int len,
+ enum wl_management_type type)
{
- u32 event = ntoh32(e->event_type);
- struct net_device *ndev = NULL;
- u8 *dst_mac = (u8 *)e->addr.octet;
- int ret = 0;
- wl_event_sd_t *gas = NULL;
- int status = ntoh32(e->status);
- sdo_event_t sdo_hdr;
- u32 data_len = ntoh32(e->datalen);
- u8 *data_ptr = NULL;
- u32 tot_len = 0;
-
-
- WL_SD(("Enter event_type:%d status:%d\n", event, status));
-
- if (!cfg->sdo) {
- WL_ERR(("SDO Not initialized \n"));
- return -EINVAL;
- }
+ struct bcm_cfg80211 *cfg;
+ s32 ret = 0;
+ struct ether_addr primary_mac;
+ s32 bssidx = 0;
+ s32 pktflag = 0;
+ cfg = g_bcm_cfg;
- if (!(cfg->sdo->sd_state & WL_SD_SEARCH_SVC)) {
- /* We are not searching for any service. Drop
- * any bogus Event
+ if (wl_get_drv_status(cfg, AP_CREATING, ndev)) {
+ /* Vendor IEs should be set to FW
+ * after SoftAP interface is brought up
*/
- WL_ERR(("Bogus SDO Event. Do nothing.. \n"));
- return -1;
- }
-
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-
- mutex_lock(&cfg->usr_sync);
- if (event == WLC_E_SERVICE_FOUND) {
-
- if ((status != WLC_E_STATUS_SUCCESS) && (status != WLC_E_STATUS_PARTIAL)) {
- WL_ERR(("WLC_E_SERVICE_FOUND: unknown status \n"));
- goto exit;
+ WL_DBG(("Skipping set IE since AP is not up \n"));
+ goto exit;
+ } else if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ /* Either stand alone AP case or P2P discovery */
+ if (wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+ /* Stand alone AP case on primary interface */
+ WL_DBG(("Apply IEs for Primary AP Interface \n"));
+ bssidx = 0;
+ } else {
+ /* P2P Discovery case (p2p listen) */
+ if (!cfg->p2p->on) {
+ /* Turn on Discovery interface */
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+ p2p_on(cfg) = true;
+ ret = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
+ if (unlikely(ret)) {
+ WL_ERR(("Enable discovery failed \n"));
+ goto exit;
+ }
+ }
+ WL_DBG(("Apply IEs for P2P Discovery Iface \n"));
+ ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
}
+ } else {
+ /* Virtual AP/ P2P Group Interface */
+ WL_DBG(("Apply IEs for iface:%s\n", ndev->name));
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ }
- gas = (wl_event_sd_t *)data;
- if (!gas) {
- ret = -EINVAL;
- goto exit;
+ if (ndev != NULL) {
+ switch (type) {
+ case WL_BEACON:
+ pktflag = VNDR_IE_BEACON_FLAG;
+ break;
+ case WL_PROBE_RESP:
+ pktflag = VNDR_IE_PRBRSP_FLAG;
+ break;
+ case WL_ASSOC_RESP:
+ pktflag = VNDR_IE_ASSOCRSP_FLAG;
+ break;
}
-
- bzero(&sdo_hdr, sizeof(sdo_event_t));
- sdo_hdr.freq = wl_cfg80211_channel_to_freq(gas->channel);
- sdo_hdr.count = gas->count;
- memcpy(sdo_hdr.addr, dst_mac, ETH_ALEN);
- data_ptr = (char *)gas->tlv;
- tot_len = data_len - (sizeof(wl_event_sd_t) - sizeof(wl_sd_tlv_t));
-
- WL_SD(("WLC_E_SERVICE_FOUND "MACDBG" data_len:%d tlv_count:%d \n",
- MAC2STRDBG(dst_mac), data_len, sdo_hdr.count));
-
- if (tot_len > NLMSG_DEFAULT_SIZE) {
- WL_ERR(("size(%u) > %lu not supported \n", tot_len, NLMSG_DEFAULT_SIZE));
- ret = -ENOMEM;
- goto exit;
+ if (pktflag) {
+ ret = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(ndev), bssidx, pktflag, buf, len);
}
-
- if (wl_genl_send_msg(ndev, event, data_ptr,
- tot_len, (u8 *)&sdo_hdr, sizeof(sdo_event_t)) < 0)
- WL_ERR(("Couldn't send up the NETLINK Event \n"));
- else
- WL_SD(("GAS event sent up \n"));
- } else {
- WL_ERR(("Unsupported Event: %d \n", event));
}
-
exit:
- mutex_unlock(&cfg->usr_sync);
return ret;
}
-s32 wl_cfg80211_DsdOffloadParseProto(char* proto_str, u8* proto)
-{
- s32 len = -1;
- int i = 0;
-
- for (i = 0; i < MAX_SDO_PROTO; i++) {
- if (strncmp(proto_str, wl_sdo_protos[i].str, strlen(wl_sdo_protos[i].str)) == 0) {
- WL_SD(("Matching proto (%d) found \n", wl_sdo_protos[i].val));
- *proto = wl_sdo_protos[i].val;
- len = strlen(wl_sdo_protos[i].str);
- break;
- }
- }
- return len;
-}
-
-/*
- * register to search for a UPnP service
- * ./DRIVER P2P_SD_REQ upnp 0x10urn:schemas-upnporg:device:InternetGatewayDevice:1
- *
- * Enable discovery
- * ./cfg p2po_find
-*/
-#define UPNP_QUERY_VER_OFFSET 3
-s32 wl_sd_handle_sd_req(
- struct net_device *dev,
- u8 * buf,
- int len)
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+static s32
+wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev)
{
+ u32 val = 0;
+ s32 ret = BCME_ERROR;
struct bcm_cfg80211 *cfg = g_bcm_cfg;
- s32 bssidx = 0;
- wl_sd_qr_t *sdreq;
- u8 proto = 0;
- s32 ret = 0;
- u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
- u32 tot_len = len + sizeof(wl_sd_qr_t);
- u16 version = 0;
-
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("find_idx failed\n"));
- return -EINVAL;
- }
- /* Check for the least arg length expected */
- if (!buf || (len < strlen("all"))) {
- WL_ERR(("Wrong Arg\n"));
- return -EINVAL;
- }
-
- if (tot_len > WLC_IOCTL_MAXLEN) {
- WL_ERR(("Length > %lu not supported \n", MAX_QR_LEN));
- return -EINVAL;
- }
+ struct wiphy *wiphy;
+ /* Disable mpc, to avoid automatic interface down. */
+ val = 0;
- sdreq = kzalloc(tot_len, kflags);
- if (!sdreq) {
- WL_ERR(("malloc failed\n"));
- return -ENOMEM;
+ wiphy = bcmcfg_to_wiphy(cfg);
+ if (wl_check_dongle_idle(wiphy) != TRUE) {
+ WL_ERR(("FW is busy to add interface"));
+ return ret;
}
-
- WL_SD(("%s Len: %d\n", buf, len));
- if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) {
- WL_ERR(("Unknown proto \n"));
- goto exit;
+ ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
+ sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+ &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ WL_ERR(("set 'mpc' failed, error = %d\n", ret));
+ goto done;
}
- sdreq->protocol = proto;
- buf += ret;
- buf++; /* skip the space */
- sdreq->transaction_id = simple_strtoul(buf, NULL, 16);
- WL_SD(("transaction_id:%d\n", sdreq->transaction_id));
- buf += sizeof(sdreq->transaction_id);
-
- if (*buf == '\0') {
- WL_SD(("No Query present. Proto:%d \n", proto));
- sdreq->query_len = 0;
- } else {
- buf++; /* skip the space */
- /* UPNP version needs to put as binary val */
- if (sdreq->protocol == SVC_RPOTYPE_UPNP) {
- /* Extract UPNP version */
- version = simple_strtoul(buf, NULL, 16);
- buf = buf + UPNP_QUERY_VER_OFFSET;
- buf[0] = version;
- WL_SD(("Upnp version: 0x%x \n", version));
- }
+ /* Set interface up, explicitly. */
+ val = 1;
- len = strlen(buf);
- WL_SD(("Len after stripping proto: %d Query: %s\n", len, buf));
- /* copy the query part */
- memcpy(sdreq->qrbuf, buf, len);
- sdreq->query_len = len;
+ ret = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true);
+ if (ret < 0) {
+ WL_ERR(("set interface up failed, error = %d\n", ret));
+ goto done;
}
- /* Enable discovery */
- if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
- WL_ERR(("cfgp2p_enable discovery failed"));
- goto exit;
+ /* Stop all scan explicitly, till auto channel selection complete. */
+ wl_set_drv_status(cfg, SCANNING, ndev);
+ if (cfg->escan_info.ndev == NULL) {
+ ret = BCME_OK;
+ goto done;
}
-
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_sd_req_resp", (void*)sdreq,
- tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
- bssidx, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("Find SVC Failed \n"));
- goto exit;
+ ret = wl_notify_escan_complete(cfg, ndev, true, true);
+ if (ret < 0) {
+ WL_ERR(("set scan abort failed, error = %d\n", ret));
+ goto done;
}
- cfg->sdo->sd_state |= WL_SD_SEARCH_SVC;
-
-exit:
- kfree(sdreq);
+done:
return ret;
}
-s32 wl_sd_handle_sd_cancel_req(
- struct net_device *dev,
- u8 *buf)
+static bool
+wl_cfg80211_valid_channel_p2p(int channel)
{
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
- s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ bool valid = false;
- if (wldev_iovar_setbuf_bsscfg(dev, "p2po_sd_cancel", NULL,
- 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &cfg->ioctl_buf_sync) < 0) {
- WL_ERR(("Cancel SD Failed \n"));
- return -EINVAL;
+ /* channel 1 to 14 */
+ if ((channel >= 1) && (channel <= 14)) {
+ valid = true;
+ }
+ /* channel 36 to 48 */
+ else if ((channel >= 36) && (channel <= 48)) {
+ valid = true;
+ }
+ /* channel 149 to 161 */
+ else if ((channel >= 149) && (channel <= 161)) {
+ valid = true;
+ }
+ else {
+ valid = false;
+ WL_INFORM(("invalid P2P chanspec, channel = %d\n", channel));
}
- cfg->sdo->sd_state &= ~WL_SD_SEARCH_SVC;
-
- return 0;
+ return valid;
}
-/*
- * register a UPnP service to be discovered
- * ./cfg P2P_SD_SVC_ADD upnp 0x10urn:schemas-upnporg:device:InternetGatewayDevice:1 0x10uu
- * id:6859dede-8574-59ab-9332-123456789012::urn:schemas-upnporg:device:InternetGate
- * wayDevice:1
-*/
-s32 wl_sd_handle_sd_add_svc(
- struct net_device *dev,
- u8 * buf,
- int len)
+s32
+wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen)
{
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
- s32 bssidx = 0;
- wl_sd_qr_t *sdreq;
- u8 proto = 0;
- u16 version = 0;
- s32 ret = 0;
- u8 *resp = NULL;
- u8 *query = NULL;
- u32 tot_len = len + sizeof(wl_sd_qr_t);
- u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ s32 ret = BCME_ERROR;
+ struct bcm_cfg80211 *cfg = NULL;
+ wl_uint32_list_t *list = NULL;
+ chanspec_t chanspec = 0;
- if (!buf || !len)
- return -EINVAL;
+ memset(buf, 0, buflen);
- WL_SD(("%s Len: %d\n", buf, len));
- if (tot_len > WLC_IOCTL_MAXLEN) {
- WL_ERR(("Query-Resp length > %d not supported \n", WLC_IOCTL_MAXLEN));
- return -ENOMEM;
- }
+ cfg = g_bcm_cfg;
+ list = (wl_uint32_list_t *)buf;
+ list->count = htod32(WL_NUMCHANSPECS);
- sdreq = kzalloc(tot_len, kflags);
- if (!sdreq) {
- WL_ERR(("malloc failed\n"));
- return -ENOMEM;
- }
+ /* Restrict channels to 2.4GHz, 20MHz BW, no SB. */
+ chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 |
+ WL_CHANSPEC_CTL_SB_NONE);
+ chanspec = wl_chspec_host_to_driver(chanspec);
- if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) {
- WL_ERR(("Unknown Proto \n"));
- goto exit;
+ ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+ sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
}
- sdreq->protocol = proto;
- buf += ret;
+ return ret;
+}
- if (*buf == '\0') {
- WL_ERR(("No Query Resp pair present \n"));
- ret = -EINVAL;
- goto exit;
- }
+s32
+wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen)
+{
+ u32 channel = 0;
+ s32 ret = BCME_ERROR;
+ s32 i = 0;
+ s32 j = 0;
+ struct bcm_cfg80211 *cfg = NULL;
+ wl_uint32_list_t *list = NULL;
+ chanspec_t chanspec = 0;
- buf++; /* Skip the space */
- len = strlen(buf);
- query = strsep((char **)&buf, " ");
- if (!query || !buf) {
- WL_ERR(("No Query RESP Present\n"));
- ret = -EINVAL;
- goto exit;
- }
- resp = buf;
+ memset(buf, 0, buflen);
+
+ cfg = g_bcm_cfg;
+ list = (wl_uint32_list_t *)buf;
+ list->count = htod32(WL_NUMCHANSPECS);
+
+ /* Restrict channels to 5GHz, 20MHz BW, no SB. */
+ chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 |
+ WL_CHANSPEC_CTL_SB_NONE);
+ chanspec = wl_chspec_host_to_driver(chanspec);
- if (sdreq->protocol == SVC_RPOTYPE_UPNP) {
- /* Extract UPNP version */
- version = simple_strtoul(query, NULL, 16);
- query = query + UPNP_QUERY_VER_OFFSET;
- resp = resp + UPNP_QUERY_VER_OFFSET;
- query[0] = version;
- resp[0] = version;
- WL_SD(("Upnp version: 0x%x \n", version));
+ ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+ sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+ goto done;
}
- sdreq->query_len = strlen(query);
- sdreq->response_len = strlen(buf);
- WL_SD(("query:%s len:%u \n", query, sdreq->query_len));
- WL_SD(("resp:%s len:%u \n", buf, sdreq->response_len));
+ /* Skip DFS and inavlid P2P channel. */
+ for (i = 0, j = 0; i < dtoh32(list->count); i++) {
+ chanspec = (chanspec_t) dtoh32(list->element[i]);
+ channel = CHSPEC_CHANNEL(chanspec);
- memcpy(sdreq->qrbuf, query, sdreq->query_len);
- memcpy((sdreq->qrbuf + sdreq->query_len), resp, sdreq->response_len);
+ ret = wldev_iovar_getint(ndev, "per_chan_info", &channel);
+ if (ret < 0) {
+ WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret));
+ goto done;
+ }
- /* Enable discovery */
- if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
- WL_ERR(("cfgp2p_enable discovery failed"));
- goto exit;
- }
+ if (CHANNEL_IS_RADAR(channel) ||
+ !(wl_cfg80211_valid_channel_p2p(CHSPEC_CHANNEL(chanspec)))) {
+ continue;
+ } else {
+ list->element[j] = list->element[i];
+ }
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_addsvc", (void*)sdreq,
- tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
- bssidx, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("FW Failed in doing p2po_addsvc. RET:%d \n", ret));
- goto exit;
+ j++;
}
- cfg->sdo->sd_state |= WL_SD_ADV_SVC;
+ list->count = j;
-exit:
- kfree(sdreq);
+done:
return ret;
}
-s32 wl_sd_handle_sd_del_svc(
- struct net_device *dev,
- u8 * buf,
- int len)
+static s32
+wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen,
+ int *channel)
{
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
- s32 bssidx = 0;
- wl_sd_qr_t *sdreq;
- u8 proto = 0;
- s32 ret = 0;
- u32 tot_len = len + sizeof(wl_sd_qr_t);
- u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
- u16 version = 0;
+ s32 ret = BCME_ERROR;
+ int chosen = 0;
+ int retry = 0;
+ uint chip;
- if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("find_idx failed\n"));
- return -EINVAL;
+ /* Start auto channel selection scan. */
+ ret = wldev_ioctl(ndev, WLC_START_CHANNEL_SEL, buf, buflen, true);
+ if (ret < 0) {
+ WL_ERR(("can't start auto channel scan, error = %d\n", ret));
+ *channel = 0;
+ goto done;
}
- sdreq = (wl_sd_qr_t *)kzalloc(tot_len, kflags);
- if (!sdreq) {
- WL_ERR(("malloc failed\n"));
- ret = -ENOMEM;
- goto exit;
- }
+ /* Wait for auto channel selection, worst case possible delay is 5250ms. */
+ retry = CHAN_SEL_RETRY_COUNT;
- /* Check for the least arg length expected */
- if (buf && len >= strlen("all")) {
- WL_DBG(("%s Len: %d\n", buf, len));
- if ((ret = wl_cfg80211_DsdOffloadParseProto(buf, &proto)) < 0) {
- WL_ERR(("Unknown Proto \n"));
- goto exit;
- }
- sdreq->protocol = proto;
- buf += ret;
+ while (retry--) {
+ OSL_SLEEP(CHAN_SEL_IOCTL_DELAY);
- if (*buf == ' ') {
- /* Query present */
- buf++; /* Skip the space */
- /* UPNP version needs to put as binary val */
- if (sdreq->protocol == SVC_RPOTYPE_UPNP) {
- /* Extract UPNP version */
- version = simple_strtoul(buf, NULL, 16);
- buf = buf + UPNP_QUERY_VER_OFFSET;
- buf[0] = version;
- WL_SD(("Upnp version: 0x%x \n", version));
- }
- memcpy(sdreq->qrbuf, buf, strlen(buf));
- sdreq->query_len = strlen(buf);
- WL_SD(("Query to be deleted:%s len:%d\n", buf, sdreq->query_len));
+ ret = wldev_ioctl(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen),
+ false);
+ if ((ret == 0) && (dtoh32(chosen) != 0)) {
+ chip = dhd_conf_get_chip(dhd_get_pub(ndev));
+ if (chip != BCM43362_CHIP_ID && chip != BCM4330_CHIP_ID) {
+ u32 chanspec = 0;
+ int ctl_chan;
+ chanspec = wl_chspec_driver_to_host(chosen);
+ printf("selected chanspec = 0x%x\n", chanspec);
+ ctl_chan = wf_chspec_ctlchan(chanspec);
+ printf("selected ctl_chan = 0x%x\n", ctl_chan);
+ *channel = (u16)(ctl_chan & 0x00FF);
+ } else
+ *channel = (u16)(chosen & 0x00FF);
+ WL_INFORM(("selected channel = %d\n", *channel));
+ break;
}
- } else {
- /* ALL */
- proto = 0;
+ WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n",
+ (CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen)));
}
- sdreq->protocol = proto;
- WL_SD(("Proto: %d \n", proto));
-
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_delsvc", (void*)sdreq,
- tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
- bssidx, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("FW Failed in doing sd_delsvc. ret=%d \n", ret));
- goto exit;
+ if (retry <= 0) {
+ WL_ERR(("failure, auto channel selection timed out\n"));
+ *channel = 0;
+ ret = BCME_ERROR;
}
- cfg->sdo->sd_state &= ~WL_SD_ADV_SVC;
-
-exit:
- if (sdreq)
- kfree(sdreq);
-
+done:
return ret;
}
-s32 wl_sd_handle_sd_stop_discovery(
- struct net_device *dev,
- u8 * buf,
- int len)
+static s32
+wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev)
{
+ u32 val = 0;
+ s32 ret = BCME_ERROR;
struct bcm_cfg80211 *cfg = g_bcm_cfg;
- s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- int ret = 0;
- int sdo_pause = 0;
-
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", (void*)&sdo_pause,
- sizeof(sdo_pause), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("p2po_stop Failed :%d\n", ret));
- return -1;
- }
- if (wldev_iovar_setint(dev, "mpc", 1) < 0) {
- /* Setting of MPC failed */
- WL_ERR(("mpc enabling back failed\n"));
- return -1;
- }
-
- /* clear the states */
- cfg->sdo->dd_state = WL_DD_STATE_IDLE;
- wl_clr_p2p_status(cfg, DISC_IN_PROGRESS);
+ /* Clear scan stop driver status. */
+ wl_clr_drv_status(cfg, SCANNING, ndev);
- bzero(&cfg->sdo->sd_listen, sizeof(wl_sd_listen_t));
+ /* Enable mpc back to 1, irrespective of initial state. */
+ val = 1;
- /* Remove ESCAN from waking up the host if ofind/olisten is enabled */
- wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true);
+ ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
+ sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+ &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ WL_ERR(("set 'mpc' failed, error = %d\n", ret));
+ }
return ret;
}
-s32 wl_sd_handle_sd_find(
- struct net_device *dev,
- u8 * buf,
- int len)
+s32
+wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len)
{
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
- s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- int ret = 0;
- s32 disc_bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- vndr_ie_setbuf_t *ie_setbuf;
- vndr_ie_t *vndrie;
- vndr_ie_buf_t *vndriebuf;
- u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
- int tot_len = 0;
- uint channel = 0;
+ int channel = 0, band, band_cur;
+ s32 ret = BCME_ERROR;
+ u8 *buf = NULL;
+ char *pos = cmd;
+ struct bcm_cfg80211 *cfg = NULL;
+ struct net_device *ndev = NULL;
- u8 p2pie_buf[] = {
- 0x09, 0x02, 0x02, 0x00, 0x27, 0x0c, 0x06, 0x05, 0x00,
- 0x55, 0x53, 0x04, 0x51, 0x0b, 0x11, 0x05, 0x00, 0x55,
- 0x53, 0x04, 0x51, 0x0b
- };
+ memset(cmd, 0, total_len);
- /* Enable discovery */
- if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
- WL_ERR(("cfgp2p_enable discovery failed"));
- return -1;
+ buf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
+ if (buf == NULL) {
+ WL_ERR(("failed to allocate chanspec buffer\n"));
+ return -ENOMEM;
}
- if (buf && strncmp(buf, "chan=", strlen("chan=")) == 0) {
- buf += strlen("chan=");
- channel = simple_strtol(buf, NULL, 10);
- WL_SD(("listen_chan to be set:%d\n", channel));
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
- sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("p2po_listen_channel Failed :%d\n", ret));
- return -1;
- }
- }
+ /*
+ * Always use primary interface, irrespective of interface on which
+ * command came.
+ */
+ cfg = g_bcm_cfg;
+ ndev = bcmcfg_to_prmry_ndev(cfg);
- tot_len = sizeof(vndr_ie_setbuf_t) + sizeof(p2pie_buf);
- ie_setbuf = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags);
- if (!ie_setbuf) {
- WL_ERR(("IE memory alloc failed\n"));
- return -ENOMEM;
+ /*
+ * Make sure that FW and driver are in right state to do auto channel
+ * selection scan.
+ */
+ ret = wl_cfg80211_set_auto_channel_scan_state(ndev);
+ if (ret < 0) {
+ WL_ERR(("can't set auto channel scan state, error = %d\n", ret));
+ goto done;
}
- /* Apply the p2p_ie for p2po_find */
- strcpy(ie_setbuf->cmd, "add");
-
- vndriebuf = &ie_setbuf->vndr_ie_buffer;
- vndriebuf->iecount = htod32(1);
- vndriebuf->vndr_ie_list[0].pktflag = htod32(16);
+ /* Best channel selection in 2.4GHz band. */
+ ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+ if (ret < 0) {
+ WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret));
+ goto done;
+ }
- vndrie = &vndriebuf->vndr_ie_list[0].vndr_ie_data;
+ ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+ &channel);
+ if (ret < 0) {
+ WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret));
+ goto done;
+ }
- vndrie->id = (uchar) DOT11_MNG_PROPR_ID;
- vndrie->len = sizeof(p2pie_buf);
- memcpy(vndrie->oui, WFA_OUI, WFA_OUI_LEN);
- memcpy(vndrie->data, p2pie_buf, sizeof(p2pie_buf));
+ if (CHANNEL_IS_2G(channel)) {
+ channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+ } else {
+ WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel));
+ channel = 0;
+ }
- /* Remove ESCAN from waking up the host if SDO is enabled */
- wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
+ pos += snprintf(pos, total_len, "%04d ", channel);
- if (wldev_iovar_setbuf_bsscfg(dev, "ie", (void*)ie_setbuf,
- tot_len, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- disc_bssidx, &cfg->ioctl_buf_sync) < 0) {
- WL_ERR(("p2p add_ie failed \n"));
- ret = -EINVAL;
- goto exit;
- } else
- WL_SD(("p2p add_ie applied successfully len:%d \n", tot_len));
+ // terence 20140120: fix for some chipsets only return 2.4GHz channel (4330b2/43341b0/4339a0)
+ ret = wldev_ioctl(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur), false);
+ band = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G;
+ ret = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true);
+ if (ret < 0)
+ WL_ERR(("WLC_SET_BAND error %d\n", ret));
- if (wldev_iovar_setint(dev, "mpc", 0) < 0) {
- /* Setting of MPC failed */
- WL_ERR(("mpc disabling faild\n"));
- ret = -1;
- goto exit;
+ /* Best channel selection in 5GHz band. */
+ ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+ if (ret < 0) {
+ WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret));
+ goto done;
}
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_find", NULL, 0,
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("p2po_find Failed :%d\n", ret));
- ret = -1;
+ ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+ &channel);
+ if (ret < 0) {
+ WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret));
+ goto done;
+ }
+
+ if (CHANNEL_IS_5G(channel)) {
+ channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+ } else {
+ WL_ERR(("invalid 5GHz channel, channel = %d\n", channel));
+ channel = 0;
+ }
+
+ ret = wldev_ioctl(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur), true);
+ if (ret < 0)
+ WL_ERR(("WLC_SET_BAND error %d\n", ret));
+
+ pos += snprintf(pos, total_len, "%04d ", channel);
+
+ /* Set overall best channel same as 5GHz best channel. */
+ pos += snprintf(pos, total_len, "%04d ", channel);
+
+done:
+ if (NULL != buf) {
+ kfree(buf);
+ }
+
+ /* Restore FW and driver back to normal state. */
+ ret = wl_cfg80211_restore_auto_channel_scan_state(ndev);
+ if (ret < 0) {
+ WL_ERR(("can't restore auto channel scan state, error = %d\n", ret));
+ }
+
+ printf("%s: channel %s\n", __FUNCTION__, cmd);
+
+ return (pos - cmd);
+}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+static const struct rfkill_ops wl_rfkill_ops = {
+ .set_block = wl_rfkill_set
+};
+
+static int wl_rfkill_set(void *data, bool blocked)
+{
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+
+ WL_DBG(("Enter \n"));
+ WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+
+ if (!cfg)
+ return -EINVAL;
+
+ cfg->rf_blocked = blocked;
+
+ return 0;
+}
+
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup)
+{
+ s32 err = 0;
+
+ WL_DBG(("Enter \n"));
+ if (!cfg)
+ return -EINVAL;
+ if (setup) {
+ cfg->rfkill = rfkill_alloc("brcmfmac-wifi",
+ wl_cfg80211_get_parent_dev(),
+ RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg);
+
+ if (!cfg->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = rfkill_register(cfg->rfkill);
+
+ if (err)
+ rfkill_destroy(cfg->rfkill);
+ } else {
+ if (!cfg->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ rfkill_unregister(cfg->rfkill);
+ rfkill_destroy(cfg->rfkill);
+ }
+
+err_out:
+ return err;
+}
+
+#ifdef DEBUGFS_CFG80211
+/**
+* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level
+* to turn on SCAN and DBG log.
+* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level
+* To see current setting of debug level,
+* cat /sys/kernel/debug/dhd/debug_level
+*/
+static ssize_t
+wl_debuglevel_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL];
+ char *params, *token, *colon;
+ uint i, tokens, log_on = 0;
+ memset(tbuf, 0, sizeof(tbuf));
+ memset(sublog, 0, sizeof(sublog));
+ if (copy_from_user(&tbuf, userbuf, min_t(size_t, (sizeof(tbuf) - 1), count)))
+ return -EFAULT;
+
+ params = &tbuf[0];
+ colon = strchr(params, '\n');
+ if (colon != NULL)
+ *colon = '\0';
+ while ((token = strsep(¶ms, " ")) != NULL) {
+ memset(sublog, 0, sizeof(sublog));
+ if (token == NULL || !*token)
+ break;
+ if (*token == '\0')
+ continue;
+ colon = strchr(token, ':');
+ if (colon != NULL) {
+ *colon = ' ';
+ }
+ tokens = sscanf(token, "%s %u", sublog, &log_on);
+ if (colon != NULL)
+ *colon = ':';
+
+ if (tokens == 2) {
+ for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+ if (!strncmp(sublog, sublogname_map[i].sublogname,
+ strlen(sublogname_map[i].sublogname))) {
+ if (log_on)
+ wl_dbg_level |=
+ (sublogname_map[i].log_level);
+ else
+ wl_dbg_level &=
+ ~(sublogname_map[i].log_level);
+ }
+ }
+ } else
+ WL_ERR(("%s: can't parse '%s' as a "
+ "SUBMODULE:LEVEL (%d tokens)\n",
+ tbuf, token, tokens));
+
+
+ }
+ return count;
+}
+
+static ssize_t
+wl_debuglevel_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *param;
+ char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)];
+ uint i;
+ memset(tbuf, 0, sizeof(tbuf));
+ param = &tbuf[0];
+ for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+ param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ",
+ sublogname_map[i].sublogname,
+ (wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0);
+ }
+ *param = '\n';
+ return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0]));
+
+}
+static const struct file_operations fops_debuglevel = {
+ .open = NULL,
+ .write = wl_debuglevel_write,
+ .read = wl_debuglevel_read,
+ .owner = THIS_MODULE,
+ .llseek = NULL,
+};
+
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg)
+{
+ s32 err = 0;
+ struct dentry *_dentry;
+ if (!cfg)
+ return -EINVAL;
+ cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!cfg->debugfs || IS_ERR(cfg->debugfs)) {
+ if (cfg->debugfs == ERR_PTR(-ENODEV))
+ WL_ERR(("Debugfs is not enabled on this kernel\n"));
+ else
+ WL_ERR(("Can not create debugfs directory\n"));
+ cfg->debugfs = NULL;
goto exit;
+
+ }
+ _dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR,
+ cfg->debugfs, cfg, &fops_debuglevel);
+ if (!_dentry || IS_ERR(_dentry)) {
+ WL_ERR(("failed to create debug_level debug file\n"));
+ wl_free_debugfs(cfg);
+ }
+exit:
+ return err;
+}
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg)
+{
+ if (!cfg)
+ return -EINVAL;
+ if (cfg->debugfs)
+ debugfs_remove_recursive(cfg->debugfs);
+ cfg->debugfs = NULL;
+ return 0;
+}
+#endif /* DEBUGFS_CFG80211 */
+
+struct device *wl_cfg80211_get_parent_dev(void)
+{
+ return cfg80211_parent_dev;
+}
+
+void wl_cfg80211_set_parent_dev(void *dev)
+{
+ cfg80211_parent_dev = dev;
+}
+
+static void wl_cfg80211_clear_parent_dev(void)
+{
+ cfg80211_parent_dev = NULL;
+}
+
+void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+ wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL,
+ 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+ memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+}
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ if (((dev_role == NL80211_IFTYPE_AP) &&
+ !(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
+ ((dev_role == NL80211_IFTYPE_P2P_GO) &&
+ !(dhd->op_mode & DHD_FLAG_P2P_GO_MODE)))
+ {
+ WL_ERR(("device role select failed role:%d op_mode:%d \n", dev_role, dhd->op_mode));
+ return false;
}
+ return true;
+}
+
+int wl_cfg80211_do_driver_init(struct net_device *net)
+{
+ struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+
+ if (!cfg || !cfg->wdev)
+ return -EINVAL;
+
+ if (dhd_do_driver_init(cfg->wdev->netdev) < 0)
+ return -1;
+
+ return 0;
+}
+
+void wl_cfg80211_enable_trace(u32 level)
+{
+ wl_dbg_level = level;
+ printf("%s: wl_dbg_level = 0x%x\n", __FUNCTION__, wl_dbg_level);
+}
+
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+ 2, 0))
+static s32
+wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+ bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+ /* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION
+ * is passed with CMD_FRAME. This callback is supposed to cancel
+ * the OFFCHANNEL Wait. Since we are already taking care of that
+ * with the tx_mgmt logic, do nothing here.
+ */
+
+ return 0;
+}
+#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
+
+#ifdef WL11U
+bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
+
+ while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_INTERWORKING_ID))) {
+ return (bcm_tlv_t *)ie;
+ }
+ return NULL;
+}
+
+
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+ uint8 ie_id, uint8 *data, uint8 data_len)
+{
+ s32 err = BCME_OK;
+ s32 buf_len;
+ s32 iecount;
+ ie_setbuf_t *ie_setbuf;
+
+ if (ie_id != DOT11_MNG_INTERWORKING_ID)
+ return BCME_UNSUPPORTED;
+
+ /* Validate the pktflag parameter */
+ if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+ VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
+ VNDR_IE_CUSTOM_FLAG))) {
+ WL_ERR(("cfg80211 Add IE: Invalid packet flag 0x%x\n", pktflag));
+ return -1;
+ }
+
+ /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
+ pktflag = htod32(pktflag);
+
+ buf_len = sizeof(ie_setbuf_t) + data_len - 1;
+ ie_setbuf = (ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL);
+
+ if (!ie_setbuf) {
+ WL_ERR(("Error allocating buffer for IE\n"));
+ return -ENOMEM;
+ }
+
+ if (cfg->iw_ie_len == data_len && !memcmp(cfg->iw_ie, data, data_len)) {
+ WL_ERR(("Previous IW IE is equals to current IE\n"));
+ err = BCME_OK;
+ goto exit;
+ }
+
+ strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1);
+ ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+
+ /* Buffer contains only 1 IE */
+ iecount = htod32(1);
+ memcpy((void *)&ie_setbuf->ie_buffer.iecount, &iecount, sizeof(int));
+ memcpy((void *)&ie_setbuf->ie_buffer.ie_list[0].pktflag, &pktflag, sizeof(uint32));
+
+ /* Now, add the IE to the buffer */
+ ie_setbuf->ie_buffer.ie_list[0].ie_data.id = ie_id;
+
+ /* if already set with previous values, delete it first */
+ if (cfg->iw_ie_len != 0) {
+ WL_DBG(("Different IW_IE was already set. clear first\n"));
+
+ ie_setbuf->ie_buffer.ie_list[0].ie_data.len = 0;
+
+ err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK)
+ goto exit;
+ }
+
+ ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
+ memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len);
+
+ err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+ if (err == BCME_OK) {
+ memcpy(cfg->iw_ie, data, data_len);
+ cfg->iw_ie_len = data_len;
+ cfg->wl11u = TRUE;
- /* set the states */
- cfg->sdo->dd_state = WL_DD_STATE_SEARCH;
- wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
+ err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
+ }
exit:
if (ie_setbuf)
kfree(ie_setbuf);
-
- /* Incase of failure enable back the ESCAN event */
- if (ret)
- wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true);
-
- return ret;
+ return err;
}
+#endif /* WL11U */
-s32 wl_sd_handle_sd_listen(
- struct net_device *dev,
- u8 *buf,
- int len)
+s32
+wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data, char *command, int total_len)
{
struct bcm_cfg80211 *cfg = g_bcm_cfg;
- s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- wl_sd_listen_t sd_listen;
- int ret = 0;
- u8 * ptr = NULL;
- uint channel = 0;
+ char ioctl_buf[50];
+ int err = 0;
+ uint32 val = 0;
+ chanspec_t chanspec = 0;
+ int abort;
+ int bytes_written = 0;
+ wl_dfs_ap_move_status_t *status;
+ char chanbuf[CHANSPEC_STR_LEN];
+ const char *dfs_state_str[DFS_SCAN_S_MAX] = {
+ "Radar Free On Channel",
+ "Radar Found On Channel",
+ "Radar Scan In Progress",
+ "Radar Scan Aborted",
+ "RSDB Mode switch in Progress For Scan"
+ };
+ if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
+ bytes_written = snprintf(command, total_len, "AP is not UP\n");
+ return bytes_written;
+ }
+ if (!*data) {
+ if ((err = wldev_iovar_getbuf(ndev, "dfs_ap_move", NULL, 0,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("setting dfs_ap_move failed with err=%d \n", err));
+ return err;
+ }
+ status = (wl_dfs_ap_move_status_t *)cfg->ioctl_buf;
- /* Just in case if it is not enabled */
- if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
- WL_ERR(("cfgp2p_enable discovery failed"));
- return -1;
- }
+ if (status->version != WL_DFS_AP_MOVE_VERSION) {
+ err = BCME_UNSUPPORTED;
+ WL_ERR(("err=%d version=%d\n", err, status->version));
+ return err;
+ }
- if (wldev_iovar_setint(dev, "mpc", 0) < 0) {
- /* Setting of MPC failed */
- WL_ERR(("mpc disabling faild\n"));
- return -1;
- }
+ if (status->move_status != (int8) DFS_SCAN_S_IDLE) {
+ chanspec = wl_chspec_driver_to_host(status->chanspec);
+ if (chanspec != 0 && chanspec != INVCHANSPEC) {
+ wf_chspec_ntoa(chanspec, chanbuf);
+ bytes_written = snprintf(command, total_len,
+ "AP Target Chanspec %s (0x%x)\n", chanbuf, chanspec);
+ }
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "%s\n", dfs_state_str[status->move_status]);
+ return bytes_written;
+ } else {
+ bytes_written = snprintf(command, total_len, "dfs AP move in IDLE state\n");
+ return bytes_written;
+ }
- bzero(&sd_listen, sizeof(wl_sd_listen_t));
+ }
- if (len) {
- ptr = strsep((char **)&buf, " ");
- if (ptr == NULL) {
- /* period and duration given wrongly */
- WL_ERR(("Arguments in wrong format \n"));
- return -EINVAL;
- }
- else if (strncmp(ptr, "chan=", strlen("chan=")) == 0) {
- sd_listen.interval = 65535;
- sd_listen.period = 65535;
- ptr += strlen("chan=");
- channel = simple_strtol(ptr, NULL, 10);
+ abort = bcm_atoi(data);
+ if (abort == -1) {
+ if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &abort,
+ sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+ WL_ERR(("seting dfs_ap_move failed with err %d\n", err));
+ return err;
}
- else {
- sd_listen.period = simple_strtol(ptr, NULL, 10);
- ptr = strsep((char **)&buf, " ");
- if (ptr == NULL) {
- WL_ERR(("Arguments in wrong format \n"));
- return -EINVAL;
- }
- sd_listen.interval = simple_strtol(ptr, NULL, 10);
- if (buf && strncmp(buf, "chan=", strlen("chan=")) == 0) {
- buf += strlen("chan=");
- channel = simple_strtol(buf, NULL, 10);
+ } else {
+ chanspec = wf_chspec_aton(data);
+ if (chanspec != 0) {
+ val = wl_chspec_host_to_driver(chanspec);
+ if (val != INVCHANSPEC) {
+ if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &val,
+ sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+ WL_ERR(("seting dfs_ap_move failed with err %d\n", err));
+ return err;
+ }
+ WL_DBG((" set dfs_ap_move successfull"));
+ } else {
+ err = BCME_USAGE_ERROR;
}
}
- WL_SD(("listen_period:%d, listen_interval:%d and listen_channel:%d\n",
- sd_listen.period, sd_listen.interval, channel));
- }
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
- sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("p2po_listen_channel Failed :%d\n", ret));
- return -1;
- }
-
- WL_SD(("p2po_listen period:%d interval:%d \n",
- sd_listen.period, sd_listen.interval));
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&sd_listen,
- sizeof(wl_sd_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("p2po_listen Failed :%d\n", ret));
- return -1;
}
-
- /* Remove ESCAN from waking up the host if ofind/olisten is enabled */
- wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
-
- /* Store the extended listen values for use in sdo_resume */
- cfg->sdo->sd_listen.interval = sd_listen.interval;
- cfg->sdo->sd_listen.period = sd_listen.period;
-
- /* set the states */
- cfg->sdo->dd_state = WL_DD_STATE_LISTEN;
- wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
-
- return 0;
+ return err;
}
-s32 wl_cfg80211_sd_offload(struct net_device *dev, char *cmd, char* buf, int len)
+s32
+wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, char *command, int total_len)
{
- int ret = 0;
+ uint i = 0;
struct bcm_cfg80211 *cfg = g_bcm_cfg;
-
- WL_SD(("Entry cmd:%s arg_len:%d \n", cmd, len));
-
- if (!cfg->sdo) {
- WL_SD(("Initializing SDO \n"));
- if ((ret = wl_cfg80211_sdo_init(cfg)) < 0)
- goto exit;
+ wl_roam_prof_band_t *rp;
+ int err = -EINVAL, bytes_written = 0;
+ size_t len = strlen(data);
+ int rp_len = 0;
+ data[len] = '\0';
+ rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp)
+ * WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL);
+ if (unlikely(!rp)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
}
- if (strncmp(cmd, "P2P_SD_REQ", strlen("P2P_SD_REQ")) == 0) {
- ret = wl_sd_handle_sd_req(dev, buf, len);
- } else if (strncmp(cmd, "P2P_SD_CANCEL_REQ", strlen("P2P_SD_CANCEL_REQ")) == 0) {
- ret = wl_sd_handle_sd_cancel_req(dev, buf);
- } else if (strncmp(cmd, "P2P_SD_SVC_ADD", strlen("P2P_SD_SVC_ADD")) == 0) {
- ret = wl_sd_handle_sd_add_svc(dev, buf, len);
- } else if (strncmp(cmd, "P2P_SD_SVC_DEL", strlen("P2P_SD_SVC_DEL")) == 0) {
- ret = wl_sd_handle_sd_del_svc(dev, buf, len);
- } else if (strncmp(cmd, "P2P_SD_FIND", strlen("P2P_SD_FIND")) == 0) {
- ret = wl_sd_handle_sd_find(dev, buf, len);
- } else if (strncmp(cmd, "P2P_SD_LISTEN", strlen("P2P_SD_LISTEN")) == 0) {
- ret = wl_sd_handle_sd_listen(dev, buf, len);
- } else if (strncmp(cmd, "P2P_SD_STOP", strlen("P2P_STOP")) == 0) {
- ret = wl_sd_handle_sd_stop_discovery(dev, buf, len);
+ rp->ver = WL_MAX_ROAM_PROF_VER;
+ if (*data && (!strncmp(data, "b", 1))) {
+ rp->band = WLC_BAND_2G;
+ } else if (*data && (!strncmp(data, "a", 1))) {
+ rp->band = WLC_BAND_5G;
} else {
- WL_ERR(("Request for Unsupported CMD:%s \n", buf));
- ret = -EINVAL;
+ err = snprintf(command, total_len, "Missing band\n");
+ goto exit;
+ }
+ data++;
+ rp->len = 0;
+ /* Getting roam profile from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("Getting roam_profile failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy(rp, cfg->ioctl_buf, sizeof(*rp) * WL_MAX_ROAM_PROF_BRACKETS);
+ /* roam_prof version get */
+ if (rp->ver != WL_MAX_ROAM_PROF_VER) {
+ WL_ERR(("bad version (=%d) in return data\n", rp->ver));
+ err = -EINVAL;
+ goto exit;
+ }
+ if ((rp->len % sizeof(wl_roam_prof_t)) != 0) {
+ WL_ERR(("bad length (=%d) in return data\n", rp->len));
+ err = -EINVAL;
+ goto exit;
}
+ if (!*data) {
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ /* printing contents of roam profile data from fw and exits
+ * if code hits any of one of the below condtion. If remaining
+ * length of buffer is less than roam profile size or
+ * if there is no valid entry.
+ */
+ if (((i * sizeof(wl_roam_prof_t)) > rp->len) ||
+ (rp->roam_prof[i].fullscan_period == 0)) {
+ break;
+ }
+ bytes_written += snprintf(command+bytes_written,
+ total_len, "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)\n",
+ rp->roam_prof[i].roam_trigger, rp->roam_prof[i].rssi_lower,
+ rp->roam_prof[i].channel_usage,
+ rp->roam_prof[i].cu_avg_calc_dur);
+ }
+ err = bytes_written;
+ goto exit;
+ } else {
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ /* reading contents of roam profile data from fw and exits
+ * if code hits any of one of the below condtion, If remaining
+ * length of buffer is less than roam profile size or if there
+ * is no valid entry.
+ */
+ if (((i * sizeof(wl_roam_prof_t)) > rp->len) ||
+ (rp->roam_prof[i].fullscan_period == 0)) {
+ break;
+ }
+ }
+ /* Do not set roam_prof from upper layer if fw doesn't have 2 rows */
+ if (i != 2) {
+ WL_ERR(("FW must have 2 rows to fill roam_prof\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ /* setting roam profile to fw */
+ data++;
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ rp->roam_prof[i].roam_trigger = simple_strtol(data, &data, 10);
+ data++;
+ rp->roam_prof[i].rssi_lower = simple_strtol(data, &data, 10);
+ data++;
+ rp->roam_prof[i].channel_usage = simple_strtol(data, &data, 10);
+ data++;
+ rp->roam_prof[i].cu_avg_calc_dur = simple_strtol(data, &data, 10);
+
+ rp_len += sizeof(wl_roam_prof_t);
+ if (*data == '\0') {
+ break;
+ }
+ data++;
+ }
+ if (i != 1) {
+ WL_ERR(("Only two roam_prof rows supported.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ rp->len = rp_len;
+ if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
+ sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) {
+ WL_ERR(("seting roam_profile failed with err %d\n", err));
+ }
+ }
exit:
- return ret;
+ if (rp) {
+ kfree(rp);
+ }
+ return err;
}
-#endif /* WL_SDO */
-#ifdef WLTDLS
-static s32
-wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data) {
+int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data,
+ char *command, int total_len)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ int bytes_written = 0, err = -EINVAL, argc = 0;
+ char rssi[5], band[5], weight[5];
+ char *endptr = NULL;
+ wnm_bss_select_weight_cfg_t *bwcfg;
+
+ bwcfg = kzalloc(sizeof(*bwcfg), GFP_KERNEL);
+ if (unlikely(!bwcfg)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+ bwcfg->version = WNM_BSSLOAD_MONITOR_VERSION;
+ bwcfg->type = 0;
+ bwcfg->weight = 0;
- struct net_device *ndev = NULL;
- u32 reason = ntoh32(e->reason);
- s8 *msg = NULL;
+ argc = sscanf(data, "%s %s %s", rssi, band, weight);
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ if (!strcasecmp(rssi, "rssi"))
+ bwcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
+ else if (!strcasecmp(rssi, "cu"))
+ bwcfg->type = WNM_BSS_SELECT_TYPE_CU;
+ else {
+ /* Usage DRIVER WBTEXT_WEIGHT_CONFIG <rssi/cu> <band> <weight> */
+ WL_ERR(("%s: Command usage error\n", __func__));
+ goto exit;
+ }
- switch (reason) {
- case WLC_E_TDLS_PEER_DISCOVERED :
- msg = " TDLS PEER DISCOVERD ";
- break;
- case WLC_E_TDLS_PEER_CONNECTED :
-#ifdef PCIE_FULL_DONGLE
- dhd_tdls_update_peer_info(ndev, TRUE, (uint8 *)&e->addr.octet[0]);
-#endif /* PCIE_FULL_DONGLE */
- if (cfg->tdls_mgmt_frame) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
- cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
- cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
- cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
- 0, GFP_ATOMIC);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
- defined(WL_COMPAT_WIRELESS)
- cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
- cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
- GFP_ATOMIC);
-#else
- cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq,
- cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
- GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */
+ if (!strcasecmp(band, "a"))
+ bwcfg->band = WLC_BAND_5G;
+ else if (!strcasecmp(band, "b"))
+ bwcfg->band = WLC_BAND_2G;
+ else if (!strcasecmp(band, "all"))
+ bwcfg->band = WLC_BAND_ALL;
+ else {
+ WL_ERR(("%s: Command usage error\n", __func__));
+ goto exit;
+ }
+
+ if (argc == 2) {
+ /* If there is no data after band, getting wnm_bss_select_weight from fw */
+ if (bwcfg->band == WLC_BAND_ALL) {
+ WL_ERR(("band option \"all\" is for set only, not get\n"));
+ goto exit;
}
- msg = " TDLS PEER CONNECTED ";
- break;
- case WLC_E_TDLS_PEER_DISCONNECTED :
-#ifdef PCIE_FULL_DONGLE
- dhd_tdls_update_peer_info(ndev, FALSE, (uint8 *)&e->addr.octet[0]);
-#endif /* PCIE_FULL_DONGLE */
- if (cfg->tdls_mgmt_frame) {
- kfree(cfg->tdls_mgmt_frame);
- cfg->tdls_mgmt_frame = NULL;
- cfg->tdls_mgmt_freq = 0;
+ if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_weight", bwcfg,
+ sizeof(*bwcfg),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("Getting wnm_bss_select_weight failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy(bwcfg, cfg->ioctl_buf, sizeof(*bwcfg));
+ bytes_written = snprintf(command, total_len, "%s %s weight = %d\n",
+ (bwcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU",
+ (bwcfg->band == WLC_BAND_2G) ? "2G" : "5G", bwcfg->weight);
+ err = bytes_written;
+ goto exit;
+ } else {
+ /* if weight is non integer returns command usage error */
+ bwcfg->weight = simple_strtol(weight, &endptr, 0);
+ if (*endptr != '\0') {
+ WL_ERR(("%s: Command usage error", __func__));
+ goto exit;
+ }
+ /* setting weight for iovar wnm_bss_select_weight to fw */
+ if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_weight", bwcfg,
+ sizeof(*bwcfg),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("Getting wnm_bss_select_weight failed with err=%d\n", err));
}
- msg = "TDLS PEER DISCONNECTED ";
- break;
}
- if (msg) {
- WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((u8*)(&e->addr)),
- (bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary"));
+exit:
+ if (bwcfg) {
+ kfree(bwcfg);
}
- return 0;
-
+ return err;
}
-#endif /* WLTDLS */
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
-static s32
-#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
-wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
- u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
- u32 peer_capability, const u8 *data, size_t len)
-#else
-wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
- u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *data,
- size_t len)
-#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+/* WBTEXT_TUPLE_MIN_LEN_CHECK :strlen(low)+" "+strlen(high)+" "+strlen(factor) */
+#define WBTEXT_TUPLE_MIN_LEN_CHECK 5
+
+int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data,
+ char *command, int total_len)
{
- s32 ret = 0;
-#ifdef WLTDLS
- struct bcm_cfg80211 *cfg;
- tdls_wfd_ie_iovar_t info;
- memset(&info, 0, sizeof(tdls_wfd_ie_iovar_t));
- cfg = g_bcm_cfg;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ int bytes_written = 0, err = -EINVAL;
+ char rssi[5], band[5];
+ int btcfg_len = 0, i = 0, parsed_len = 0;
+ wnm_bss_select_factor_cfg_t *btcfg;
+ size_t slen = strlen(data);
+ char *start_addr = NULL;
+ data[slen] = '\0';
+
+ btcfg = kzalloc((sizeof(*btcfg) + sizeof(*btcfg) *
+ WL_FACTOR_TABLE_MAX_LIMIT), GFP_KERNEL);
+ if (unlikely(!btcfg)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
-#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
- /* Some customer platform back ported this feature from kernel 3.15 to kernel 3.10
- * and that cuases build error
- */
- BCM_REFERENCE(peer_capability);
-#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+ btcfg->version = WNM_BSS_SELECT_FACTOR_VERSION;
+ btcfg->band = WLC_BAND_AUTO;
+ btcfg->type = 0;
+ btcfg->count = 0;
- switch (action_code) {
- /* We need to set TDLS Wifi Display IE to firmware
- * using tdls_wfd_ie iovar
- */
- case WLAN_TDLS_SET_PROBE_WFD_IE:
- info.mode = TDLS_WFD_PROBE_IE_TX;
- memcpy(&info.data, data, len);
- info.length = len;
- break;
- case WLAN_TDLS_SET_SETUP_WFD_IE:
- info.mode = TDLS_WFD_IE_TX;
- memcpy(&info.data, data, len);
- info.length = len;
- break;
- default:
- WL_ERR(("Unsupported action code : %d\n", action_code));
- goto out;
- }
+ sscanf(data, "%s %s", rssi, band);
- ret = wldev_iovar_setbuf(dev, "tdls_wfd_ie", &info, sizeof(info),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (!strcasecmp(rssi, "rssi")) {
+ btcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
+ }
+ else if (!strcasecmp(rssi, "cu")) {
+ btcfg->type = WNM_BSS_SELECT_TYPE_CU;
+ }
+ else {
+ WL_ERR(("%s: Command usage error\n", __func__));
+ goto exit;
+ }
- if (ret) {
- WL_ERR(("tdls_wfd_ie error %d\n", ret));
+ if (!strcasecmp(band, "a")) {
+ btcfg->band = WLC_BAND_5G;
+ }
+ else if (!strcasecmp(band, "b")) {
+ btcfg->band = WLC_BAND_2G;
+ }
+ else if (!strcasecmp(band, "all")) {
+ btcfg->band = WLC_BAND_ALL;
+ }
+ else {
+ WL_ERR(("%s: Command usage, Wrong band\n", __func__));
+ goto exit;
}
-out:
-#endif /* WLTDLS */
- return ret;
-}
-static s32
-wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
- u8 *peer, enum nl80211_tdls_operation oper)
-{
- s32 ret = 0;
-#ifdef WLTDLS
- struct bcm_cfg80211 *cfg;
- tdls_iovar_t info;
- cfg = g_bcm_cfg;
- memset(&info, 0, sizeof(tdls_iovar_t));
- if (peer)
- memcpy(&info.ea, peer, ETHER_ADDR_LEN);
- switch (oper) {
- case NL80211_TDLS_DISCOVERY_REQ:
- /* turn on TDLS */
- ret = dhd_tdls_enable(dev, true, false, NULL);
- if (ret < 0)
- return ret;
- /* If the discovery request is broadcast then we need to set
- * info.mode to Tunneled Probe Request
- */
- if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) {
- info.mode = TDLS_MANUAL_EP_WFD_TPQ;
+ if ((slen - 1) == (strlen(rssi) + strlen(band))) {
+ /* Getting factor table using iovar 'wnm_bss_select_table' from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_table", btcfg,
+ sizeof(*btcfg),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("Getting wnm_bss_select_table failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy(btcfg, cfg->ioctl_buf, sizeof(*btcfg));
+ memcpy(btcfg, cfg->ioctl_buf, (btcfg->count+1) * sizeof(*btcfg));
+
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "No of entries in table: %d\n", btcfg->count);
+ bytes_written += snprintf(command + bytes_written, total_len, "%s factor table\n",
+ (btcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU");
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "low\thigh\tfactor\n");
+ for (i = 0; i <= btcfg->count-1; i++) {
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "%d\t%d\t%d\n", btcfg->params[i].low, btcfg->params[i].high,
+ btcfg->params[i].factor);
+ }
+ err = bytes_written;
+ goto exit;
+ } else {
+ memset(btcfg->params, 0, sizeof(*btcfg) * WL_FACTOR_TABLE_MAX_LIMIT);
+ data += (strlen(rssi) + strlen(band) + 2);
+ start_addr = data;
+ slen = slen - (strlen(rssi) + strlen(band) + 2);
+ for (i = 0; i < WL_FACTOR_TABLE_MAX_LIMIT; i++) {
+ if (parsed_len + WBTEXT_TUPLE_MIN_LEN_CHECK <= slen) {
+ btcfg->params[i].low = simple_strtol(data, &data, 10);
+ data++;
+ btcfg->params[i].high = simple_strtol(data, &data, 10);
+ data++;
+ btcfg->params[i].factor = simple_strtol(data, &data, 10);
+ btcfg->count++;
+ if (*data == '\0') {
+ break;
+ }
+ data++;
+ parsed_len = data - start_addr;
+ } else {
+ WL_ERR(("%s:Command usage:less no of args\n", __func__));
+ goto exit;
+ }
}
- else {
- info.mode = TDLS_MANUAL_EP_DISCOVERY;
+ btcfg_len = sizeof(*btcfg) + ((btcfg->count) * sizeof(*btcfg));
+ if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_table", btcfg, btcfg_len,
+ cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) {
+ WL_ERR(("seting wnm_bss_select_table failed with err %d\n", err));
+ goto exit;
}
- break;
- case NL80211_TDLS_SETUP:
- /* auto mode on */
- ret = dhd_tdls_enable(dev, true, true, (struct ether_addr *)peer);
- if (ret < 0)
- return ret;
- break;
- case NL80211_TDLS_TEARDOWN:
- info.mode = TDLS_MANUAL_EP_DELETE;
- /* auto mode off */
- ret = dhd_tdls_enable(dev, true, false, (struct ether_addr *)peer);
- if (ret < 0)
- return ret;
- break;
- default:
- WL_ERR(("Unsupported operation : %d\n", oper));
- goto out;
}
- if (info.mode) {
- ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (ret) {
- WL_ERR(("tdls_endpoint error %d\n", ret));
- }
+exit:
+ if (btcfg) {
+ kfree(btcfg);
}
-out:
-#endif /* WLTDLS */
- return ret;
+ return err;
}
-#endif /* LINUX_VERSION > VERSION(3,2,0) || WL_COMPAT_WIRELESS */
-s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
- enum wl_management_type type)
+s32
+wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, char *command, int total_len)
{
- struct bcm_cfg80211 *cfg;
- struct net_device *ndev = NULL;
- struct ether_addr primary_mac;
- s32 ret = 0;
- s32 bssidx = 0;
- s32 pktflag = 0;
- cfg = g_bcm_cfg;
-
- if (wl_get_drv_status(cfg, AP_CREATING, net)) {
- /* Vendor IEs should be set to FW
- * after SoftAP interface is brought up
- */
+ uint i = 0;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ int err = -EINVAL, bytes_written = 0, argc = 0, val, len = 0;
+ char delta[5], band[5], *endptr = NULL;
+ wl_roam_prof_band_t *rp;
+
+ rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp)
+ * WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL);
+ if (unlikely(!rp)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
goto exit;
- } else if (wl_get_drv_status(cfg, AP_CREATED, net)) {
- ndev = net;
- bssidx = 0;
- } else if (cfg->p2p) {
- net = ndev_to_wlc_ndev(net, cfg);
- if (!cfg->p2p->on) {
- get_primary_mac(cfg, &primary_mac);
- wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr,
- &cfg->p2p->int_addr);
- /* In case of p2p_listen command, supplicant send remain_on_channel
- * without turning on P2P
- */
+ }
- p2p_on(cfg) = true;
- ret = wl_cfgp2p_enable_discovery(cfg, net, NULL, 0);
+ argc = sscanf(data, "%s %s", band, delta);
+ if (!strcasecmp(band, "a"))
+ rp->band = WLC_BAND_5G;
+ else if (!strcasecmp(band, "b"))
+ rp->band = WLC_BAND_2G;
+ else {
+ WL_ERR(("%s: Missing band\n", __func__));
+ goto exit;
+ }
+ /* Getting roam profile from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("Getting roam_profile failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy(rp, cfg->ioctl_buf, sizeof(wl_roam_prof_band_t));
+ if (rp->ver != WL_MAX_ROAM_PROF_VER) {
+ WL_ERR(("bad version (=%d) in return data\n", rp->ver));
+ err = -EINVAL;
+ goto exit;
+ }
+ if ((rp->len % sizeof(wl_roam_prof_t)) != 0) {
+ WL_ERR(("bad length (=%d) in return data\n", rp->len));
+ err = -EINVAL;
+ goto exit;
+ }
- if (unlikely(ret)) {
- goto exit;
- }
+ if (argc == 2) {
+ /* if delta is non integer returns command usage error */
+ val = simple_strtol(delta, &endptr, 0);
+ if (*endptr != '\0') {
+ WL_ERR(("%s: Command usage error", __func__));
+ goto exit;
}
- if (net != bcmcfg_to_prmry_ndev(cfg)) {
- if (wl_get_mode_by_netdev(cfg, net) == WL_MODE_AP) {
- ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
- bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION);
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ /*
+ * Checking contents of roam profile data from fw and exits
+ * if code hits below condtion. If remaining length of buffer is
+ * less than roam profile size or if there is no valid entry.
+ */
+ if (((i * sizeof(wl_roam_prof_t)) > rp->len) ||
+ (rp->roam_prof[i].fullscan_period == 0)) {
+ break;
}
- } else {
- ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
- bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ if (rp->roam_prof[i].channel_usage != 0) {
+ rp->roam_prof[i].roam_delta = val;
+ }
+ len += sizeof(wl_roam_prof_t);
}
}
- if (ndev != NULL) {
- switch (type) {
- case WL_BEACON:
- pktflag = VNDR_IE_BEACON_FLAG;
- break;
- case WL_PROBE_RESP:
- pktflag = VNDR_IE_PRBRSP_FLAG;
- break;
- case WL_ASSOC_RESP:
- pktflag = VNDR_IE_ASSOCRSP_FLAG;
- break;
+ else {
+ if (rp->roam_prof[i].channel_usage != 0) {
+ bytes_written = snprintf(command, total_len,
+ "%s Delta %d\n", (rp->band == WLC_BAND_2G) ? "2G" : "5G",
+ rp->roam_prof[0].roam_delta);
}
- if (pktflag)
- ret = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, pktflag, buf, len);
+ err = bytes_written;
+ goto exit;
}
-exit:
- return ret;
+ rp->len = len;
+ if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
+ sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL)) < 0) {
+ WL_ERR(("seting roam_profile failed with err %d\n", err));
+ }
+exit :
+ if (rp) {
+ kfree(rp);
+ }
+ return err;
}
-#ifdef WL_SUPPORT_AUTO_CHANNEL
-static s32
-wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev)
+
+int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev)
{
- u32 val = 0;
- s32 ret = BCME_ERROR;
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ struct bcm_cfg80211 *cfg = NULL;
+ struct net_device *ndev = NULL;
+ unsigned long flags;
+ int clear_flag = 0;
+ int ret = 0;
- /* Disable mpc, to avoid automatic interface down. */
- val = 0;
+ WL_TRACE(("Enter\n"));
- ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
- sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
- &cfg->ioctl_buf_sync);
- if (ret < 0) {
- WL_ERR(("set 'mpc' failed, error = %d\n", ret));
- goto done;
- }
+ cfg = g_bcm_cfg;
+ if (!cfg)
+ return -EINVAL;
- /* Set interface up, explicitly. */
- val = 1;
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- ret = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true);
- if (ret < 0) {
- WL_ERR(("set interface up failed, error = %d\n", ret));
- goto done;
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_CFG80211_P2P_DEV_IF
+ if (cfg->scan_request && cfg->scan_request->wdev == cfgdev)
+#else
+ if (cfg->scan_request && cfg->scan_request->dev == cfgdev)
+#endif
+ {
+ cfg80211_scan_done(cfg->scan_request, true);
+ cfg->scan_request = NULL;
+ clear_flag = 1;
}
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
- /* Stop all scan explicitly, till auto channel selection complete. */
- wl_set_drv_status(cfg, SCANNING, ndev);
- if (cfg->escan_info.ndev == NULL) {
- ret = BCME_OK;
- goto done;
- }
- ret = wl_notify_escan_complete(cfg, ndev, true, true);
- if (ret < 0) {
- WL_ERR(("set scan abort failed, error = %d\n", ret));
- ret = BCME_OK; // terence 20140115: fix escan_complete error
- goto done;
- }
+ if (clear_flag)
+ wl_clr_drv_status(cfg, SCANNING, ndev);
-done:
return ret;
}
-static bool
-wl_cfg80211_valid_chanspec_p2p(chanspec_t chanspec)
+bool wl_cfg80211_is_concurrent_mode(void)
{
- bool valid = false;
- char chanbuf[CHANSPEC_STR_LEN];
-
- /* channel 1 to 14 */
- if ((chanspec >= 0x2b01) && (chanspec <= 0x2b0e)) {
- valid = true;
- }
- /* channel 36 to 48 */
- else if ((chanspec >= 0x1b24) && (chanspec <= 0x1b30)) {
- valid = true;
+ if ((g_bcm_cfg) && (wl_get_drv_status_all(g_bcm_cfg, CONNECTED) > 1)) {
+ return true;
+ } else {
+ return false;
}
- /* channel 149 to 161 */
- else if ((chanspec >= 0x1b95) && (chanspec <= 0x1ba1)) {
- valid = true;
+}
+
+void* wl_cfg80211_get_dhdp()
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+ return cfg->pub;
+}
+
+bool wl_cfg80211_is_p2p_active(void)
+{
+ return (g_bcm_cfg && g_bcm_cfg->p2p);
+}
+
+bool wl_cfg80211_is_roam_offload(void)
+{
+ return (g_bcm_cfg && g_bcm_cfg->roam_offload);
+}
+
+bool wl_cfg80211_is_event_from_connected_bssid(const wl_event_msg_t *e, int ifidx)
+{
+ dhd_pub_t *dhd = NULL;
+ struct net_device *ndev = NULL;
+ u8 *curbssid = NULL;
+
+ dhd = (dhd_pub_t *)(g_bcm_cfg->pub);
+
+ if (dhd) {
+ ndev = dhd_idx2net(dhd, ifidx);
}
- else {
- valid = false;
- WL_INFORM(("invalid P2P chanspec, chanspec = %s\n",
- wf_chspec_ntoa_ex(chanspec, chanbuf)));
+
+ if (!dhd || !ndev) {
+ return false;
}
- return valid;
+ curbssid = wl_read_prof(g_bcm_cfg, ndev, WL_PROF_BSSID);
+
+ return memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0;
}
-static s32
-wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen)
+static void wl_cfg80211_work_handler(struct work_struct * work)
{
- s32 ret = BCME_ERROR;
struct bcm_cfg80211 *cfg = NULL;
- wl_uint32_list_t *list = NULL;
- chanspec_t chanspec = 0;
+ struct net_info *iter, *next;
+ s32 err = BCME_OK;
+ s32 pm = PM_FAST;
+ BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, pm_enable_work.work);
+ WL_DBG(("Enter \n"));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ for_each_ndev(cfg, iter, next) {
+ /* p2p discovery iface ndev could be null */
+ if (iter->ndev) {
+ if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
+ (wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS &&
+ wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_IBSS))
+ continue;
+ if (iter->ndev) {
+ if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM,
+ &pm, sizeof(pm), true)) != 0) {
+ if (err == -ENODEV)
+ WL_DBG(("%s:netdev not ready\n",
+ iter->ndev->name));
+ else
+ WL_ERR(("%s:error (%d)\n",
+ iter->ndev->name, err));
+ } else
+ wl_cfg80211_update_power_mode(iter->ndev);
+ }
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+ _Pragma("GCC diagnostic pop")
+#endif
+ DHD_OS_WAKE_UNLOCK(cfg->pub);
+}
- memset(buf, 0, buflen);
+u8
+wl_get_action_category(void *frame, u32 frame_len)
+{
+ u8 category;
+ u8 *ptr = (u8 *)frame;
+ if (frame == NULL)
+ return DOT11_ACTION_CAT_ERR_MASK;
+ if (frame_len < DOT11_ACTION_HDR_LEN)
+ return DOT11_ACTION_CAT_ERR_MASK;
+ category = ptr[DOT11_ACTION_CAT_OFF];
+ WL_INFORM(("Action Category: %d\n", category));
+ return category;
+}
- cfg = g_bcm_cfg;
- list = (wl_uint32_list_t *)buf;
- list->count = htod32(WL_NUMCHANSPECS);
+int
+wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action)
+{
+ u8 *ptr = (u8 *)frame;
+ if (frame == NULL || ret_action == NULL)
+ return BCME_ERROR;
+ if (frame_len < DOT11_ACTION_HDR_LEN)
+ return BCME_ERROR;
+ if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len))
+ return BCME_ERROR;
+ *ret_action = ptr[DOT11_ACTION_ACT_OFF];
+ WL_INFORM(("Public Action : %d\n", *ret_action));
+ return BCME_OK;
+}
- /* Restrict channels to 2.4GHz, 20MHz BW, no SB. */
- chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 |
- WL_CHANSPEC_CTL_SB_NONE);
- chanspec = wl_chspec_host_to_driver(chanspec);
- ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
- sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
- if (ret < 0) {
- WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
- }
+static int
+wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const struct ether_addr *bssid)
+{
+ s32 err;
+ wl_event_msg_t e;
+
+ bzero(&e, sizeof(e));
+ e.event_type = cpu_to_be32(WLC_E_BSSID);
+ memcpy(&e.addr, bssid, ETHER_ADDR_LEN);
+ /* trigger the roam event handler */
+ WL_INFORM(("Delayed roam to " MACDBG "\n", MAC2STRDBG((u8*)(bssid))));
+ err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL);
- return ret;
+ return err;
}
static s32
-wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen)
+wl_cfg80211_parse_vndr_ies(u8 *parse, u32 len,
+ struct parsed_vndr_ies *vndr_ies)
{
- u32 channel = 0;
- s32 ret = BCME_ERROR;
- s32 i = 0;
- s32 j = 0;
- struct bcm_cfg80211 *cfg = NULL;
- wl_uint32_list_t *list = NULL;
- chanspec_t chanspec = 0;
-
- memset(buf, 0, buflen);
-
- cfg = g_bcm_cfg;
- list = (wl_uint32_list_t *)buf;
- list->count = htod32(WL_NUMCHANSPECS);
-
- /* Restrict channels to 5GHz, 20MHz BW, no SB. */
- chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 |
- WL_CHANSPEC_CTL_SB_NONE);
- chanspec = wl_chspec_host_to_driver(chanspec);
-
- ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
- sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
- if (ret < 0) {
- WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
- goto done;
- }
+ s32 err = BCME_OK;
+ vndr_ie_t *vndrie;
+ bcm_tlv_t *ie;
+ struct parsed_vndr_ie_info *parsed_info;
+ u32 count = 0;
+ s32 remained_len;
+
+ remained_len = (s32)len;
+ memset(vndr_ies, 0, sizeof(*vndr_ies));
+
+ WL_INFORM(("---> len %d\n", len));
+ ie = (bcm_tlv_t *) parse;
+ if (!bcm_valid_tlv(ie, remained_len))
+ ie = NULL;
+ while (ie) {
+ if (count >= MAX_VNDR_IE_NUMBER)
+ break;
+ if (ie->id == DOT11_MNG_VS_ID) {
+ vndrie = (vndr_ie_t *) ie;
+ /* len should be bigger than OUI length + one data length at least */
+ if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+ WL_ERR(("%s: invalid vndr ie. length is too small %d\n",
+ __FUNCTION__, vndrie->len));
+ goto end;
+ }
+ /* if wpa or wme ie, do not add ie */
+ if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
+ ((vndrie->data[0] == WPA_OUI_TYPE) ||
+ (vndrie->data[0] == WME_OUI_TYPE))) {
+ CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n"));
+ goto end;
+ }
- /* Skip DFS and inavlid P2P channel. */
- for (i = 0, j = 0; i < dtoh32(list->count); i++) {
- chanspec = (chanspec_t) dtoh32(list->element[i]);
- channel = CHSPEC_CHANNEL(chanspec);
+ parsed_info = &vndr_ies->ie_info[count++];
- ret = wldev_iovar_getint(ndev, "per_chan_info", &channel);
- if (ret < 0) {
- WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret));
- goto done;
- }
+ /* save vndr ie information */
+ parsed_info->ie_ptr = (char *)vndrie;
+ parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
+ memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
+ vndr_ies->count = count;
- if (CHANNEL_IS_RADAR(channel) ||
- !(wl_cfg80211_valid_chanspec_p2p(chanspec))) {
- continue;
- } else {
- list->element[j] = list->element[i];
+ WL_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x len:%d\n",
+ parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1],
+ parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0],
+ parsed_info->ie_len));
}
-
- j++;
+end:
+ ie = bcm_next_tlv(ie, &remained_len);
}
-
- list->count = j;
-
-done:
- return ret;
+ return err;
}
-static s32
-wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen,
- int *channel)
+s32
+wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx)
{
- s32 ret = BCME_ERROR;
- int chosen = 0;
- int retry = 0;
- uint chip;
-
- /* Start auto channel selection scan. */
- ret = wldev_ioctl(ndev, WLC_START_CHANNEL_SEL, buf, buflen, true);
- if (ret < 0) {
- WL_ERR(("can't start auto channel scan, error = %d\n", ret));
- *channel = 0;
- goto done;
- }
-
- /* Wait for auto channel selection, worst case possible delay is 5250ms. */
- retry = CHAN_SEL_RETRY_COUNT;
+ s32 index;
+ struct net_info *netinfo;
+ s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG,
+ VNDR_IE_ASSOCRSP_FLAG, VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
- while (retry--) {
- OSL_SLEEP(CHAN_SEL_IOCTL_DELAY);
-
- ret = wldev_ioctl(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen),
- false);
- if ((ret == 0) && (dtoh32(chosen) != 0)) {
- chip = dhd_conf_get_chip(dhd_get_pub(ndev));
- if (chip != BCM43362_CHIP_ID && chip != BCM4330_CHIP_ID) {
- u32 chanspec = 0;
- int ctl_chan;
- chanspec = wl_chspec_driver_to_host(chosen);
- printf("selected chanspec = 0x%x\n", chanspec);
- ctl_chan = wf_chspec_ctlchan(chanspec);
- printf("selected ctl_chan = 0x%x\n", ctl_chan);
- *channel = (u16)(ctl_chan & 0x00FF);
- } else
- *channel = (u16)(chosen & 0x00FF);
- WL_INFORM(("selected channel = %d\n", *channel));
- break;
- }
- WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n",
- (CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen)));
+ netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
+ if (!netinfo || !netinfo->wdev) {
+ WL_ERR(("netinfo or netinfo->wdev is NULL\n"));
+ return -1;
}
- if (retry <= 0) {
- WL_ERR(("failure, auto channel selection timed out\n"));
- *channel = 0;
- ret = BCME_ERROR;
+ WL_DBG(("clear management vendor IEs for bssidx:%d \n", bssidx));
+ /* Clear the IEs set in the firmware so that host is in sync with firmware */
+ for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
+ if (wl_cfg80211_set_mgmt_vndr_ies(cfg, wdev_to_cfgdev(netinfo->wdev),
+ bssidx, vndrie_flag[index], NULL, 0) < 0)
+ WL_ERR(("vndr_ies clear failed. Ignoring.. \n"));
}
-done:
- return ret;
+ return 0;
}
-static s32
-wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev)
+s32
+wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg)
{
- u32 val = 0;
- s32 ret = BCME_ERROR;
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
-
- /* Clear scan stop driver status. */
- wl_clr_drv_status(cfg, SCANNING, ndev);
-
- /* Enable mpc back to 1, irrespective of initial state. */
- val = 1;
+ struct net_info *iter, *next;
- ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
- sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
- &cfg->ioctl_buf_sync);
- if (ret < 0) {
- WL_ERR(("set 'mpc' failed, error = %d\n", ret));
+ WL_DBG(("clear management vendor IEs \n"));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ for_each_ndev(cfg, iter, next) {
+ wl_cfg80211_clear_per_bss_ies(cfg, iter->bssidx);
}
-
- return ret;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ return 0;
}
-s32
-wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len)
+#define WL_VNDR_IE_MAXLEN 2048
+static s8 g_mgmt_ie_buf[WL_VNDR_IE_MAXLEN];
+int
+wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ s32 bssidx, s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
{
- int channel = 0, band, band_cur;
- s32 ret = BCME_ERROR;
- u8 *buf = NULL;
- char *pos = cmd;
- struct bcm_cfg80211 *cfg = NULL;
struct net_device *ndev = NULL;
+ s32 ret = BCME_OK;
+ u8 *curr_ie_buf = NULL;
+ u8 *mgmt_ie_buf = NULL;
+ u32 mgmt_ie_buf_len = 0;
+ u32 *mgmt_ie_len = 0;
+ u32 del_add_ie_buf_len = 0;
+ u32 total_ie_buf_len = 0;
+ u32 parsed_ie_buf_len = 0;
+ struct parsed_vndr_ies old_vndr_ies;
+ struct parsed_vndr_ies new_vndr_ies;
+ s32 i;
+ u8 *ptr;
+ s32 remained_buf_len;
+ wl_bss_vndr_ies_t *ies = NULL;
+ struct net_info *netinfo;
- memset(cmd, 0, total_len);
-
- buf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
- if (buf == NULL) {
- WL_ERR(("failed to allocate chanspec buffer\n"));
- return -ENOMEM;
- }
+ WL_DBG(("Enter. pktflag:0x%x bssidx:%x vnd_ie_len:%d \n",
+ pktflag, bssidx, vndr_ie_len));
- /*
- * Always use primary interface, irrespective of interface on which
- * command came.
- */
- cfg = g_bcm_cfg;
- ndev = bcmcfg_to_prmry_ndev(cfg);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- /*
- * Make sure that FW and driver are in right state to do auto channel
- * selection scan.
- */
- ret = wl_cfg80211_set_auto_channel_scan_state(ndev);
- if (ret < 0) {
- WL_ERR(("can't set auto channel scan state, error = %d\n", ret));
- goto done;
+ if (bssidx > WL_MAX_IFS) {
+ WL_ERR(("bssidx > supported concurrent Ifaces \n"));
+ return -EINVAL;
}
- /* Best channel selection in 2.4GHz band. */
- ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
- if (ret < 0) {
- WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret));
- goto done;
+ netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
+ if (!netinfo) {
+ WL_ERR(("net_info ptr is NULL \n"));
+ return -EINVAL;
}
- ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
- &channel);
- if (ret < 0) {
- WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret));
- goto done;
+ /* Clear the global buffer */
+ memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf));
+ curr_ie_buf = g_mgmt_ie_buf;
+ ies = &netinfo->bss.ies;
+
+ switch (pktflag) {
+ case VNDR_IE_PRBRSP_FLAG :
+ mgmt_ie_buf = ies->probe_res_ie;
+ mgmt_ie_len = &ies->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->probe_res_ie);
+ break;
+ case VNDR_IE_ASSOCRSP_FLAG :
+ mgmt_ie_buf = ies->assoc_res_ie;
+ mgmt_ie_len = &ies->assoc_res_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->assoc_res_ie);
+ break;
+ case VNDR_IE_BEACON_FLAG :
+ mgmt_ie_buf = ies->beacon_ie;
+ mgmt_ie_len = &ies->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->beacon_ie);
+ break;
+ case VNDR_IE_PRBREQ_FLAG :
+ mgmt_ie_buf = ies->probe_req_ie;
+ mgmt_ie_len = &ies->probe_req_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->probe_req_ie);
+ break;
+ case VNDR_IE_ASSOCREQ_FLAG :
+ mgmt_ie_buf = ies->assoc_req_ie;
+ mgmt_ie_len = &ies->assoc_req_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->assoc_req_ie);
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ WL_ERR(("not suitable packet type (%d)\n", pktflag));
+ return BCME_ERROR;
}
- if (CHANNEL_IS_2G(channel)) {
- channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+ if (vndr_ie_len > mgmt_ie_buf_len) {
+ WL_ERR(("extra IE size too big\n"));
+ ret = -ENOMEM;
} else {
- WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel));
- channel = 0;
- }
+ /* parse and save new vndr_ie in curr_ie_buff before comparing it */
+ if (vndr_ie && vndr_ie_len && curr_ie_buf) {
+ ptr = curr_ie_buf;
+/* must discard vndr_ie constness, attempt to change vndr_ie arg to non-const
+ * causes cascade of errors in other places, fix involves const casts there
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ if ((ret = wl_cfg80211_parse_vndr_ies((u8 *)vndr_ie,
+ vndr_ie_len, &new_vndr_ies)) < 0) {
+ WL_ERR(("parse vndr ie failed \n"));
+ goto exit;
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ for (i = 0; i < new_vndr_ies.count; i++) {
+ struct parsed_vndr_ie_info *vndrie_info =
+ &new_vndr_ies.ie_info[i];
+
+ if ((parsed_ie_buf_len + vndrie_info->ie_len) > WL_VNDR_IE_MAXLEN) {
+ WL_ERR(("IE size is too big (%d > %d)\n",
+ parsed_ie_buf_len, WL_VNDR_IE_MAXLEN));
+ ret = -EINVAL;
+ goto exit;
+ }
- sprintf(pos, "%04d ", channel);
- pos += 5;
+ memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
+ vndrie_info->ie_len);
+ parsed_ie_buf_len += vndrie_info->ie_len;
+ }
+ }
- // terence 20140120: fix for some chipsets only return 2.4GHz channel (4330b2/43341b0/4339a0)
- ret = wldev_ioctl(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur), false);
- band = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G;
- ret = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true);
- if (ret < 0)
- WL_ERR(("WLC_SET_BAND error %d\n", ret));
+ if (mgmt_ie_buf != NULL) {
+ if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
+ (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) {
+ WL_INFORM(("Previous mgmt IE is equals to current IE"));
+ goto exit;
+ }
- /* Best channel selection in 5GHz band. */
- ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
- if (ret < 0) {
- WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret));
- goto done;
- }
+ /* parse old vndr_ie */
+ if ((ret = wl_cfg80211_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len,
+ &old_vndr_ies)) < 0) {
+ WL_ERR(("parse vndr ie failed \n"));
+ goto exit;
+ }
+ /* make a command to delete old ie */
+ for (i = 0; i < old_vndr_ies.count; i++) {
+ struct parsed_vndr_ie_info *vndrie_info =
+ &old_vndr_ies.ie_info[i];
+
+ WL_INFORM(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+ vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+ vndrie_info->vndrie.oui[2]));
+
+ del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+ pktflag, vndrie_info->vndrie.oui,
+ vndrie_info->vndrie.id,
+ vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+ vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+ "del");
+
+ curr_ie_buf += del_add_ie_buf_len;
+ total_ie_buf_len += del_add_ie_buf_len;
+ }
+ }
- ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
- &channel);
- if (ret < 0) {
- WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret));
- goto done;
+ *mgmt_ie_len = 0;
+ /* Add if there is any extra IE */
+ if (mgmt_ie_buf && parsed_ie_buf_len) {
+ ptr = mgmt_ie_buf;
+
+ remained_buf_len = mgmt_ie_buf_len;
+
+ /* make a command to add new ie */
+ for (i = 0; i < new_vndr_ies.count; i++) {
+ struct parsed_vndr_ie_info *vndrie_info =
+ &new_vndr_ies.ie_info[i];
+
+ WL_INFORM(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+ vndrie_info->ie_len - 2,
+ vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+ vndrie_info->vndrie.oui[2]));
+
+ del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+ pktflag, vndrie_info->vndrie.oui,
+ vndrie_info->vndrie.id,
+ vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+ vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+ "add");
+
+ /* verify remained buf size before copy data */
+ if (remained_buf_len >= vndrie_info->ie_len) {
+ remained_buf_len -= vndrie_info->ie_len;
+ } else {
+ WL_ERR(("no space in mgmt_ie_buf: pktflag = %d, "
+ "found vndr ies # = %d(cur %d), remained len %d, "
+ "cur mgmt_ie_len %d, new ie len = %d\n",
+ pktflag, new_vndr_ies.count, i, remained_buf_len,
+ *mgmt_ie_len, vndrie_info->ie_len));
+ break;
+ }
+
+ /* save the parsed IE in cfg struct */
+ memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
+ vndrie_info->ie_len);
+ *mgmt_ie_len += vndrie_info->ie_len;
+ curr_ie_buf += del_add_ie_buf_len;
+ total_ie_buf_len += del_add_ie_buf_len;
+ }
+ }
+
+ if (total_ie_buf_len && cfg->ioctl_buf != NULL) {
+ ret = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf,
+ total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync);
+ if (ret)
+ WL_ERR(("vndr ie set error : %d\n", ret));
+ }
}
+exit:
- if (CHANNEL_IS_5G(channel)) {
- channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
- } else {
- WL_ERR(("invalid 5GHz channel, channel = %d\n", channel));
- channel = 0;
+return ret;
+}
+
+#ifdef WL_CFG80211_ACL
+static int
+wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
+ const struct cfg80211_acl_data *acl)
+{
+ int i;
+ int ret = 0;
+ int macnum = 0;
+ int macmode = MACLIST_MODE_DISABLED;
+ struct maclist *list;
+
+ /* get the MAC filter mode */
+ if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) {
+ macmode = MACLIST_MODE_ALLOW;
+ } else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
+ acl->n_acl_entries) {
+ macmode = MACLIST_MODE_DENY;
}
- ret = wldev_ioctl(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur), true);
- if (ret < 0)
- WL_ERR(("WLC_SET_BAND error %d\n", ret));
+ /* if acl == NULL, macmode is still disabled.. */
+ if (macmode == MACLIST_MODE_DISABLED) {
+ if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
+ WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
- sprintf(pos, "%04d ", channel);
- pos += 5;
+ return ret;
+ }
- /* Set overall best channel same as 5GHz best channel. */
- sprintf(pos, "%04d ", channel);
- pos += 5;
+ macnum = acl->n_acl_entries;
+ if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+ WL_ERR(("%s : invalid number of MAC address entries %d\n",
+ __FUNCTION__, macnum));
+ return -1;
+ }
-done:
- if (NULL != buf) {
- kfree(buf);
+ /* allocate memory for the MAC list */
+ list = (struct maclist*)kmalloc(sizeof(int) +
+ sizeof(struct ether_addr) * macnum, GFP_KERNEL);
+ if (!list) {
+ WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__));
+ return -1;
}
- /* Restore FW and driver back to normal state. */
- ret = wl_cfg80211_restore_auto_channel_scan_state(ndev);
- if (ret < 0) {
- WL_ERR(("can't restore auto channel scan state, error = %d\n", ret));
+ /* prepare the MAC list */
+ list->count = htod32(macnum);
+ for (i = 0; i < macnum; i++) {
+ memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN);
}
+ /* set the list */
+ if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
+ WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
- printf("%s: channel %s\n", __FUNCTION__, cmd);
+ kfree(list);
- return (pos - cmd);
+ return ret;
}
-#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#endif /* WL_CFG80211_ACL */
-static const struct rfkill_ops wl_rfkill_ops = {
- .set_block = wl_rfkill_set
-};
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+int wl_chspec_chandef(chanspec_t chanspec,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ struct cfg80211_chan_def *chandef,
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+ struct chan_info *chaninfo,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) */
+struct wiphy *wiphy)
-static int wl_rfkill_set(void *data, bool blocked)
{
- struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+ uint16 freq = 0;
+ int chan_type = 0;
+ int channel = 0;
+ struct ieee80211_channel *chan;
- WL_DBG(("Enter \n"));
- WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+ if (!chandef) {
+ return -1;
+ }
+ channel = CHSPEC_CHANNEL(chanspec);
- if (!cfg)
- return -EINVAL;
+ switch (CHSPEC_BW(chanspec)) {
+ case WL_CHANSPEC_BW_20:
+ chan_type = NL80211_CHAN_HT20;
+ break;
+ case WL_CHANSPEC_BW_40:
+ {
+ if (CHSPEC_SB_UPPER(chanspec)) {
+ channel += CH_10MHZ_APART;
+ } else {
+ channel -= CH_10MHZ_APART;
+ }
+ }
+ chan_type = NL80211_CHAN_HT40PLUS;
+ break;
- cfg->rf_blocked = blocked;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ case WL_CHANSPEC_BW_80:
+ case WL_CHANSPEC_BW_8080:
+ {
+ uint16 sb = CHSPEC_CTL_SB(chanspec);
+
+ if (sb == WL_CHANSPEC_CTL_SB_LL) {
+ channel -= (CH_10MHZ_APART + CH_20MHZ_APART);
+ } else if (sb == WL_CHANSPEC_CTL_SB_LU) {
+ channel -= CH_10MHZ_APART;
+ } else if (sb == WL_CHANSPEC_CTL_SB_UL) {
+ channel += CH_10MHZ_APART;
+ } else {
+ /* WL_CHANSPEC_CTL_SB_UU */
+ channel += (CH_10MHZ_APART + CH_20MHZ_APART);
+ }
- return 0;
-}
+ if (sb == WL_CHANSPEC_CTL_SB_LL || sb == WL_CHANSPEC_CTL_SB_LU)
+ chan_type = NL80211_CHAN_HT40MINUS;
+ else if (sb == WL_CHANSPEC_CTL_SB_UL || sb == WL_CHANSPEC_CTL_SB_UU)
+ chan_type = NL80211_CHAN_HT40PLUS;
+ }
+ break;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+ default:
+ chan_type = NL80211_CHAN_HT20;
+ break;
-static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup)
-{
- s32 err = 0;
+ }
- WL_DBG(("Enter \n"));
- if (!cfg)
- return -EINVAL;
- if (setup) {
- cfg->rfkill = rfkill_alloc("brcmfmac-wifi",
- wl_cfg80211_get_parent_dev(),
- RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg);
+ if (CHSPEC_IS5G(chanspec))
+ freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_5GHZ);
+ else
+ freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
- if (!cfg->rfkill) {
- err = -ENOMEM;
- goto err_out;
- }
+ chan = ieee80211_get_channel(wiphy, freq);
+ WL_DBG(("channel:%d freq:%d chan_type: %d chan_ptr:%p \n",
+ channel, freq, chan_type, chan));
- err = rfkill_register(cfg->rfkill);
+ if (unlikely(!chan)) {
+ /* fw and cfg80211 channel lists are not in sync */
+ WL_ERR(("Couldn't find matching channel in wiphy channel list \n"));
+ ASSERT(0);
+ return -EINVAL;
+ }
- if (err)
- rfkill_destroy(cfg->rfkill);
- } else {
- if (!cfg->rfkill) {
- err = -ENOMEM;
- goto err_out;
- }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ cfg80211_chandef_create(chandef, chan, chan_type);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
+ \
+ \
+ \
+ 0)))
+ chaninfo->freq = freq;
+ chaninfo->chan_type = chan_type;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+ return 0;
+}
- rfkill_unregister(cfg->rfkill);
- rfkill_destroy(cfg->rfkill);
+void
+wl_cfg80211_ch_switch_notify(struct net_device *dev, uint16 chanspec, struct wiphy *wiphy)
+{
+ u32 freq;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ struct cfg80211_chan_def chandef;
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
+ \
+ \
+ \
+ 0)))
+ struct chan_info chaninfo;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+
+ if (!wiphy) {
+ printf("wiphy is null\n");
+ return;
+ }
+#ifndef ALLOW_CHSW_EVT
+ /* Channel switch support is only for AP/GO/ADHOC/MESH */
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION ||
+ dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ WL_ERR(("No channel switch notify support for STA/GC\n"));
+ return;
+ }
+#endif /* !ALLOW_CHSW_EVT */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ if (wl_chspec_chandef(chanspec, &chandef, wiphy))
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+ if (wl_chspec_chandef(chanspec, &chaninfo, wiphy))
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+ {
+ WL_ERR(("%s:chspec_chandef failed\n", __FUNCTION__));
+ return;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ freq = chandef.chan ? chandef.chan->center_freq : chandef.center_freq1;
+ cfg80211_ch_switch_notify(dev, &chandef);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+ freq = chan_info.freq;
+ cfg80211_ch_switch_notify(dev, chan_info.freq, chan_info.chan_type);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
-err_out:
- return err;
+ WL_ERR(("Channel switch notification for freq: %d chanspec: 0x%x\n", freq, chanspec));
+ return;
}
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
-#ifdef DEBUGFS_CFG80211
-/**
-* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level
-* to turn on SCAN and DBG log.
-* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level
-* To see current setting of debug level,
-* cat /sys/kernel/debug/dhd/debug_level
-*/
-static ssize_t
-wl_debuglevel_write(struct file *file, const char __user *userbuf,
- size_t count, loff_t *ppos)
+#ifdef WL11ULB
+s32
+wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode)
{
- char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL];
- char *params, *token, *colon;
- uint i, tokens, log_on = 0;
- memset(tbuf, 0, sizeof(tbuf));
- memset(sublog, 0, sizeof(sublog));
- if (copy_from_user(&tbuf, userbuf, min_t(size_t, (sizeof(tbuf) - 1), count)))
- return -EFAULT;
+ int ret;
+ int cur_mode;
- params = &tbuf[0];
- colon = strchr(params, '\n');
- if (colon != NULL)
- *colon = '\0';
- while ((token = strsep(¶ms, " ")) != NULL) {
- memset(sublog, 0, sizeof(sublog));
- if (token == NULL || !*token)
- break;
- if (*token == '\0')
- continue;
- colon = strchr(token, ':');
- if (colon != NULL) {
- *colon = ' ';
- }
- tokens = sscanf(token, "%s %u", sublog, &log_on);
- if (colon != NULL)
- *colon = ':';
+ ret = wldev_iovar_getint(dev, "ulb_mode", &cur_mode);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] ulb_mode get failed. ret:%d \n", ret));
+ return ret;
+ }
- if (tokens == 2) {
- for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
- if (!strncmp(sublog, sublogname_map[i].sublogname,
- strlen(sublogname_map[i].sublogname))) {
- if (log_on)
- wl_dbg_level |=
- (sublogname_map[i].log_level);
- else
- wl_dbg_level &=
- ~(sublogname_map[i].log_level);
- }
- }
- } else
- WL_ERR(("%s: can't parse '%s' as a "
- "SUBMODULE:LEVEL (%d tokens)\n",
- tbuf, token, tokens));
+ if (cur_mode == mode) {
+ /* If request mode is same as that of the current mode, then
+ * do nothing (Avoid unnecessary wl down and up).
+ */
+ WL_INFORM(("[ULB] No change in ulb_mode. Do nothing.\n"));
+ return 0;
+ }
+ /* setting of ulb_mode requires wl to be down */
+ ret = wldev_ioctl(dev, WLC_DOWN, NULL, 0, true);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret));
+ return ret;
+ }
+ if (mode >= MAX_SUPP_ULB_MODES) {
+ WL_ERR(("[ULB] unsupported ulb_mode :[%d]\n", mode));
+ return -EINVAL;
}
- return count;
-}
-static ssize_t
-wl_debuglevel_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char *param;
- char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)];
- uint i;
- memset(tbuf, 0, sizeof(tbuf));
- param = &tbuf[0];
- for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
- param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ",
- sublogname_map[i].sublogname,
- (wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0);
+ ret = wldev_iovar_setint(dev, "ulb_mode", mode);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] ulb_mode set failed. ret:%d \n", ret));
+ return ret;
+ }
+
+ ret = wldev_ioctl(dev, WLC_UP, NULL, 0, true);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret));
+ return ret;
}
- *param = '\n';
- return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0]));
+ WL_DBG(("[ULB] ulb_mode set to %d successfully \n", mode));
+
+ return ret;
}
-static const struct file_operations fops_debuglevel = {
- .open = NULL,
- .write = wl_debuglevel_write,
- .read = wl_debuglevel_read,
- .owner = THIS_MODULE,
- .llseek = NULL,
-};
-static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg)
-{
- s32 err = 0;
- struct dentry *_dentry;
- if (!cfg)
+static s32
+wl_cfg80211_ulbbw_to_ulbchspec(u32 bw)
+{
+ if (bw == ULB_BW_DISABLED) {
+ return WL_CHANSPEC_BW_20;
+ } else if (bw == ULB_BW_10MHZ) {
+ return WL_CHANSPEC_BW_10;
+ } else if (bw == ULB_BW_5MHZ) {
+ return WL_CHANSPEC_BW_5;
+ } else if (bw == ULB_BW_2P5MHZ) {
+ return WL_CHANSPEC_BW_2P5;
+ } else {
+ WL_ERR(("[ULB] unsupported value for ulb_bw \n"));
return -EINVAL;
- cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!cfg->debugfs || IS_ERR(cfg->debugfs)) {
- if (cfg->debugfs == ERR_PTR(-ENODEV))
- WL_ERR(("Debugfs is not enabled on this kernel\n"));
- else
- WL_ERR(("Can not create debugfs directory\n"));
- cfg->debugfs = NULL;
- goto exit;
-
- }
- _dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR,
- cfg->debugfs, cfg, &fops_debuglevel);
- if (!_dentry || IS_ERR(_dentry)) {
- WL_ERR(("failed to create debug_level debug file\n"));
- wl_free_debugfs(cfg);
}
-exit:
- return err;
}
-static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg)
+
+static chanspec_t
+wl_cfg80211_ulb_get_min_bw_chspec(struct wireless_dev *wdev, s32 bssidx)
{
- if (!cfg)
- return -EINVAL;
- if (cfg->debugfs)
- debugfs_remove_recursive(cfg->debugfs);
- cfg->debugfs = NULL;
- return 0;
-}
-#endif /* DEBUGFS_CFG80211 */
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ struct net_info *_netinfo;
+
+ /*
+ * Return the chspec value corresponding to the
+ * BW setting for a particular interface
+ */
+ if (wdev) {
+ /* if wdev is provided, use it */
+ _netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
+ } else if (bssidx >= 0) {
+ /* if wdev is not provided, use it */
+ _netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
+ } else {
+ WL_ERR(("[ULB] wdev/bssidx not provided\n"));
+ return INVCHANSPEC;
+ }
+
+ if (unlikely(!_netinfo)) {
+ WL_ERR(("[ULB] net_info is null \n"));
+ return INVCHANSPEC;
+ }
-struct device *wl_cfg80211_get_parent_dev(void)
-{
- return cfg80211_parent_dev;
+ if (_netinfo->ulb_bw) {
+ WL_DBG(("[ULB] wdev_ptr:%p ulb_bw:0x%x \n", _netinfo->wdev, _netinfo->ulb_bw));
+ return wl_cfg80211_ulbbw_to_ulbchspec(_netinfo->ulb_bw);
+ } else {
+ return WL_CHANSPEC_BW_20;
+ }
}
-void wl_cfg80211_set_parent_dev(void *dev)
+static s32
+wl_cfg80211_get_ulb_bw(struct wireless_dev *wdev)
{
- cfg80211_parent_dev = dev;
-}
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ struct net_info *_netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
-static void wl_cfg80211_clear_parent_dev(void)
-{
- cfg80211_parent_dev = NULL;
-}
+ /*
+ * Return the ulb_bw setting for a
+ * particular interface
+ */
+ if (unlikely(!_netinfo)) {
+ WL_ERR(("[ULB] net_info is null \n"));
+ return -1;
+ }
-void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
-{
- wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL,
- 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
- memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+ return _netinfo->ulb_bw;
}
-static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
+
+s32
+wl_cfg80211_set_ulb_bw(struct net_device *dev,
+ u32 ulb_bw, char *ifname)
{
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- if (((dev_role == NL80211_IFTYPE_AP) &&
- !(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
- ((dev_role == NL80211_IFTYPE_P2P_GO) &&
- !(dhd->op_mode & DHD_FLAG_P2P_GO_MODE)))
- {
- WL_ERR(("device role select failed\n"));
- return false;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ int ret;
+ int mode;
+ struct net_info *_netinfo = NULL, *iter, *next;
+ u32 bssidx;
+ enum nl80211_iftype iftype;
+
+ if (!ifname)
+ return -EINVAL;
+
+ WL_DBG(("[ULB] Enter. bw_type:%d \n", ulb_bw));
+
+ ret = wldev_iovar_getint(dev, "ulb_mode", &mode);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] ulb_mode not supported \n"));
+ return ret;
}
- return true;
-}
-int wl_cfg80211_do_driver_init(struct net_device *net)
-{
- struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+ if (mode != ULB_MODE_STD_ALONE_MODE) {
+ WL_ERR(("[ULB] ulb bw modification allowed only in stand-alone mode\n"));
+ return -EINVAL;
+ }
- if (!cfg || !cfg->wdev)
+ if (ulb_bw >= MAX_SUPP_ULB_BW) {
+ WL_ERR(("[ULB] unsupported value (%d) for ulb_bw \n", ulb_bw));
return -EINVAL;
+ }
-#if !defined(P2PONEINT)
- if (dhd_do_driver_init(cfg->wdev->netdev) < 0)
- return -1;
-#endif /* BCMDONGLEHOST */
+#ifdef WL_CFG80211_P2P_DEV_IF
+ if (strcmp(ifname, "p2p-dev-wlan0") == 0) {
+ iftype = NL80211_IFTYPE_P2P_DEVICE;
+ /* Use wdev corresponding to the dedicated p2p discovery interface */
+ if (likely(cfg->p2p_wdev)) {
+ _netinfo = wl_get_netinfo_by_wdev(cfg, cfg->p2p_wdev);
+ } else {
+ return -ENODEV;
+ }
+ }
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ if (!_netinfo) {
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ if (strncmp(iter->ndev->name, ifname, strlen(ifname)) == 0) {
+ _netinfo = wl_get_netinfo_by_netdev(cfg, iter->ndev);
+ iftype = NL80211_IFTYPE_STATION;
+ }
+ }
+ }
+ }
- return 0;
+ if (!_netinfo)
+ return -ENODEV;
+ bssidx = _netinfo->bssidx;
+ _netinfo->ulb_bw = ulb_bw;
+
+
+ WL_DBG(("[ULB] Applying ulb_bw:%d for bssidx:%d \n", ulb_bw, bssidx));
+ ret = wldev_iovar_setbuf_bsscfg(dev, "ulb_bw", (void *)&ulb_bw, 4,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx,
+ &cfg->ioctl_buf_sync);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] ulb_bw set failed. ret:%d \n", ret));
+ return ret;
+ }
+
+ return ret;
}
+#endif /* WL11ULB */
-void wl_cfg80211_enable_trace(u32 level)
+static void
+wl_ap_channel_ind(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev,
+ chanspec_t chanspec)
{
- wl_dbg_level = level;
- printf("%s: wl_dbg_level = 0x%x\n", __FUNCTION__, wl_dbg_level);
+ u32 channel = LCHSPEC_CHANNEL(chanspec);
+
+ WL_DBG(("(%s) AP channel:%d chspec:0x%x \n",
+ ndev->name, channel, chanspec));
+ if (cfg->ap_oper_channel && (cfg->ap_oper_channel != channel)) {
+ /*
+ * If cached channel is different from the channel indicated
+ * by the event, notify user space about the channel switch.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg));
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+ cfg->ap_oper_channel = channel;
+ }
}
-#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
- 2, 0))
static s32
-wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
- bcm_struct_cfgdev *cfgdev, u64 cookie)
+wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+const wl_event_msg_t *e, void *data)
{
- /* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION
- * is passed with CMD_FRAME. This callback is supposed to cancel
- * the OFFCHANNEL Wait. Since we are already taking care of that
- * with the tx_mgmt logic, do nothing here.
- */
+ struct net_device *ndev = NULL;
+ chanspec_t chanspec;
+ u32 channel;
- return 0;
-}
-#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
+ WL_DBG(("Enter\n"));
+ if (unlikely(e->status)) {
+ WL_ERR(("status:0x%x \n", e->status));
+ return -1;
+ }
-#ifdef WL11U
-bcm_tlv_t *
-wl_cfg80211_find_interworking_ie(u8 *parse, u32 len)
-{
- bcm_tlv_t *ie;
+ if (!data) {
+ return -EINVAL;
+ }
- while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_INTERWORKING_ID))) {
- return (bcm_tlv_t *)ie;
+ if (likely(cfgdev)) {
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ chanspec = *((chanspec_t *)data);
+ channel = LCHSPEC_CHANNEL(chanspec);
+
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ /* For AP/GO role */
+ wl_ap_channel_ind(cfg, ndev, chanspec);
+ }
}
- return NULL;
-}
+ return 0;
+}
static s32
-wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
- uint8 ie_id, uint8 *data, uint8 data_len)
+wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+const wl_event_msg_t *e, void *data)
{
- s32 err = BCME_OK;
- s32 buf_len;
- s32 iecount;
- ie_setbuf_t *ie_setbuf;
-
- if (ie_id != DOT11_MNG_INTERWORKING_ID)
- return BCME_UNSUPPORTED;
+ int error = 0;
+ u32 chanspec = 0;
+ struct net_device *ndev = NULL;
+ struct wiphy *wiphy = NULL;
- /* Validate the pktflag parameter */
- if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
- VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
- VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
- VNDR_IE_CUSTOM_FLAG))) {
- WL_ERR(("cfg80211 Add IE: Invalid packet flag 0x%x\n", pktflag));
+ WL_DBG(("Enter\n"));
+ if (unlikely(e->status)) {
+ WL_ERR(("status:0x%x \n", e->status));
return -1;
}
- /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
- pktflag = htod32(pktflag);
-
- buf_len = sizeof(ie_setbuf_t) + data_len - 1;
- ie_setbuf = (ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL);
+ if (likely(cfgdev)) {
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ wiphy = bcmcfg_to_wiphy(cfg);
+ error = wldev_iovar_getint(ndev, "chanspec", &chanspec);
+ if (unlikely(error)) {
+ WL_ERR(("Get chanspec error: %d \n", error));
+ return -1;
+ }
- if (!ie_setbuf) {
- WL_ERR(("Error allocating buffer for IE\n"));
- return -ENOMEM;
- }
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ /* For AP/GO role */
+ wl_ap_channel_ind(cfg, ndev, chanspec);
+ } else {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ wl_cfg80211_ch_switch_notify(ndev, chanspec, wiphy);
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+ }
- if (cfg->iw_ie_len == data_len && !memcmp(cfg->iw_ie, data, data_len)) {
- WL_ERR(("Previous IW IE is equals to current IE\n"));
- err = BCME_OK;
- goto exit;
}
- strncpy(ie_setbuf->cmd, "add", VNDR_IE_CMD_LEN - 1);
- ie_setbuf->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+ return 0;
+}
- /* Buffer contains only 1 IE */
- iecount = htod32(1);
- memcpy((void *)&ie_setbuf->ie_buffer.iecount, &iecount, sizeof(int));
- memcpy((void *)&ie_setbuf->ie_buffer.ie_list[0].pktflag, &pktflag, sizeof(uint32));
+#ifdef WL_NAN
+int
+wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, int cmd_len)
+{
+ return wl_cfgnan_cmd_handler(ndev, g_bcm_cfg, cmd, cmd_len);
+}
+#endif /* WL_NAN */
- /* Now, add the IE to the buffer */
- ie_setbuf->ie_buffer.ie_list[0].ie_data.id = ie_id;
+void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg)
+{
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ int err;
- /* if already set with previous values, delete it first */
- if (cfg->iw_ie_len != 0) {
- WL_DBG(("Different IW_IE was already set. clear first\n"));
+ /* Clear the security settings on the primary Interface */
+ err = wldev_iovar_setint(dev, "wsec", 0);
+ if (unlikely(err)) {
+ WL_ERR(("wsec clear failed \n"));
+ }
+ err = wldev_iovar_setint(dev, "auth", 0);
+ if (unlikely(err)) {
+ WL_ERR(("auth clear failed \n"));
+ }
+ err = wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
+ if (unlikely(err)) {
+ WL_ERR(("wpa_auth clear failed \n"));
+ }
+}
- ie_setbuf->ie_buffer.ie_list[0].ie_data.len = 0;
+#ifdef WL_CFG80211_P2P_DEV_IF
+void wl_cfg80211_del_p2p_wdev(void)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ struct wireless_dev *wdev = NULL;
- err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ WL_DBG(("Enter \n"));
+ if (!cfg) {
+ WL_ERR(("Invalid Ptr\n"));
+ return;
+ } else {
+ wdev = cfg->p2p_wdev;
+ }
- if (err != BCME_OK)
- goto exit;
+ if (wdev && cfg->down_disc_if) {
+ wl_cfgp2p_del_p2p_disc_if(wdev, cfg);
+ cfg->down_disc_if = FALSE;
}
+}
+#endif /* WL_CFG80211_P2P_DEV_IF */
- ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
- memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len);
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+int
+wl_cfg80211_set_spect(struct net_device *dev, int spect)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ int down = 1;
+ int up = 1;
+ int err = BCME_OK;
- err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (!wl_get_drv_status_all(cfg, CONNECTED)) {
+ err = wldev_ioctl(dev, WLC_DOWN, &down, sizeof(down), true);
+ if (err) {
+ WL_ERR(("%s: WLC_DOWN failed: code: %d\n", __func__, err));
+ return err;
+ }
- if (err == BCME_OK) {
- memcpy(cfg->iw_ie, data, data_len);
- cfg->iw_ie_len = data_len;
- cfg->wl11u = TRUE;
+ err = wldev_ioctl(dev, WLC_SET_SPECT_MANAGMENT, &spect, sizeof(spect), true);
+ if (err) {
+ WL_ERR(("%s: error setting spect: code: %d\n", __func__, err));
+ return err;
+ }
- err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
+ err = wldev_ioctl(dev, WLC_UP, &up, sizeof(up), true);
+ if (err) {
+ WL_ERR(("%s: WLC_UP failed: code: %d\n", __func__, err));
+ return err;
+ }
}
-
-exit:
- if (ie_setbuf)
- kfree(ie_setbuf);
return err;
}
-#endif /* WL11U */
-#ifdef WL_HOST_BAND_MGMT
-s32
-wl_cfg80211_set_band(struct net_device *ndev, int band)
+int
+wl_cfg80211_get_sta_channel(void)
{
- struct bcm_cfg80211 *cfg = g_bcm_cfg;
- int ret = 0;
- s32 roam_off;
- char ioctl_buf[50];
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(g_bcm_cfg);
+ int channel = 0;
- if ((band < WLC_BAND_AUTO) || (band > WLC_BAND_2G)) {
- WL_ERR(("Invalid band\n"));
- return -EINVAL;
+ if (wl_get_drv_status(g_bcm_cfg, CONNECTED, ndev)) {
+ channel = g_bcm_cfg->channel;
}
-
- if ((ret = wldev_iovar_getint(ndev, "roam_off", &roam_off)) < 0) {
- WL_ERR(("geting roam_off failed code=%d\n", ret));
- return ret;
- } else if (roam_off == 1) {
- WL_DBG(("Roaming off, no need to set roam_band\n"));
- cfg->curr_band = band;
+ return channel;
+}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#ifdef P2P_LISTEN_OFFLOADING
+s32
+wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg)
+{
+ s32 bssidx;
+ int ret = 0;
+ int p2plo_pause = 0;
+ if (!cfg || !cfg->p2p) {
+ WL_ERR(("Wl %p or cfg->p2p %p is null\n",
+ cfg, cfg ? cfg->p2p : 0));
return 0;
}
- if ((ret = wldev_iovar_setbuf(ndev, "roam_band", &band,
- sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
- WL_ERR(("seting roam_band failed code=%d\n", ret));
- return ret;
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg),
+ "p2po_stop", (void*)&p2plo_pause, sizeof(p2plo_pause),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL);
+ if (ret < 0) {
+ WL_ERR(("p2po_stop Failed :%d\n", ret));
}
- WL_DBG(("Setting band to %d\n", band));
- cfg->curr_band = band;
-
- return 0;
+ return ret;
}
-#endif /* WL_HOST_BAND_MGMT */
-
-#if defined(DHCP_SCAN_SUPPRESS)
-static void wl_cfg80211_scan_supp_timerfunc(ulong data)
+s32
+wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len)
{
- struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ wl_p2plo_listen_t p2plo_listen;
+ int ret = -EAGAIN;
+ int channel = 0;
+ int period = 0;
+ int interval = 0;
+ int count = 0;
- WL_DBG(("Enter \n"));
- schedule_work(&cfg->wlan_work);
-}
+ if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+ WL_ERR(("Sending Action Frames. Try it again.\n"));
+ goto exit;
+ }
-int wl_cfg80211_scan_suppress(struct net_device *dev, int suppress)
-{
- int ret = 0;
- struct wireless_dev *wdev;
- struct bcm_cfg80211 *cfg;
- if (!dev || ((suppress != 0) && (suppress != 1))) {
- ret = -EINVAL;
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ WL_ERR(("Scanning already\n"));
goto exit;
}
- wdev = ndev_to_wdev(dev);
- if (!wdev) {
- ret = -EINVAL;
+
+ if (wl_get_drv_status(cfg, SCAN_ABORTING, dev)) {
+ WL_ERR(("Scanning being aborted\n"));
goto exit;
}
- cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
- if (!cfg) {
- ret = -EINVAL;
+
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ WL_ERR(("p2p listen offloading already running\n"));
goto exit;
}
- if (suppress == cfg->scan_suppressed) {
- WL_DBG(("No change in scan_suppress state. Ignoring cmd..\n"));
- return 0;
+ /* Just in case if it is not enabled */
+ if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+ WL_ERR(("cfgp2p_enable discovery failed"));
+ goto exit;
}
- if (timer_pending(&cfg->scan_supp_timer))
- del_timer_sync(&cfg->scan_supp_timer);
+ bzero(&p2plo_listen, sizeof(wl_p2plo_listen_t));
+
+ if (len) {
+ sscanf(buf, " %10d %10d %10d %10d", &channel, &period, &interval, &count);
+ if ((channel == 0) || (period == 0) ||
+ (interval == 0) || (count == 0)) {
+ WL_ERR(("Wrong argument %d/%d/%d/%d \n",
+ channel, period, interval, count));
+ ret = -EAGAIN;
+ goto exit;
+ }
+ p2plo_listen.period = period;
+ p2plo_listen.interval = interval;
+ p2plo_listen.count = count;
- if ((ret = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS,
- &suppress, sizeof(int), true)) < 0) {
- WL_ERR(("Scan suppress setting failed ret:%d \n", ret));
+ WL_ERR(("channel:%d period:%d, interval:%d count:%d\n",
+ channel, period, interval, count));
} else {
- WL_DBG(("Scan suppress %s \n", suppress ? "Enabled" : "Disabled"));
- cfg->scan_suppressed = suppress;
+ WL_ERR(("Argument len is wrong.\n"));
+ ret = -EAGAIN;
+ goto exit;
}
- /* If scan_suppress is set, Start a timer to monitor it (just incase) */
- if (cfg->scan_suppressed) {
- if (ret) {
- WL_ERR(("Retry scan_suppress reset at a later time \n"));
- mod_timer(&cfg->scan_supp_timer,
- jiffies + msecs_to_jiffies(WL_SCAN_SUPPRESS_RETRY));
- } else {
- WL_DBG(("Start wlan_timer to clear of scan_suppress \n"));
- mod_timer(&cfg->scan_supp_timer,
- jiffies + msecs_to_jiffies(WL_SCAN_SUPPRESS_TIMEOUT));
- }
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
+ sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_listen_channel Failed :%d\n", ret));
+ goto exit;
}
-exit:
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&p2plo_listen,
+ sizeof(wl_p2plo_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_listen Failed :%d\n", ret));
+ goto exit;
+ }
+
+ wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
+ cfg->last_roc_id = P2PO_COOKIE;
+exit :
return ret;
}
-#endif /* DHCP_SCAN_SUPPRESS */
-
-int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev)
+s32
+wl_cfg80211_p2plo_listen_stop(struct net_device *dev)
{
- struct bcm_cfg80211 *cfg = NULL;
- struct net_device *ndev = NULL;
- unsigned long flags;
- int clear_flag = 0;
- int ret = 0;
-
- WL_TRACE(("Enter\n"));
-
- cfg = g_bcm_cfg;
- if (!cfg)
- return -EINVAL;
-
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ int ret = -EAGAIN;
- spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
-#ifdef WL_CFG80211_P2P_DEV_IF
- if (cfg->scan_request && cfg->scan_request->wdev == cfgdev)
-#else
- if (cfg->scan_request && cfg->scan_request->dev == cfgdev)
-#endif
- {
- cfg80211_scan_done(cfg->scan_request, true);
- cfg->scan_request = NULL;
- clear_flag = 1;
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", NULL,
+ 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_stop Failed :%d\n", ret));
+ goto exit;
}
- spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
-
- if (clear_flag)
- wl_clr_drv_status(cfg, SCANNING, ndev);
+exit:
return ret;
}
-
-bool wl_cfg80211_is_vsdb_mode(void)
+#endif /* P2P_LISTEN_OFFLOADING */
+u64
+wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg)
{
- return (g_bcm_cfg && g_bcm_cfg->vsdb_mode);
+ u64 id = 0;
+ id = ++cfg->last_roc_id;
+#ifdef P2P_LISTEN_OFFLOADING
+ if (id == P2PO_COOKIE) {
+ id = ++cfg->last_roc_id;
+ }
+#endif /* P2P_LISTEN_OFFLOADING */
+ if (id == 0)
+ id = ++cfg->last_roc_id;
+ return id;
}
-void* wl_cfg80211_get_dhdp()
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+int
+wl_cfg80211_set_random_mac(struct net_device *dev, bool enable)
{
struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ int ret;
- return cfg->pub;
-}
+ if (cfg->random_mac_enabled == enable) {
+ WL_ERR(("Random MAC already %s\n", enable ? "Enabled" : "Disabled"));
+ return BCME_OK;
+ }
-bool wl_cfg80211_is_p2p_active(void)
-{
- return (g_bcm_cfg && g_bcm_cfg->p2p);
+ if (enable) {
+ ret = wl_cfg80211_random_mac_enable(dev);
+ } else {
+ ret = wl_cfg80211_random_mac_disable(dev);
+ }
+
+ if (!ret) {
+ cfg->random_mac_enabled = enable;
+ }
+
+ return ret;
}
-static void wl_cfg80211_work_handler(struct work_struct * work)
-{
- struct bcm_cfg80211 *cfg = NULL;
- struct net_info *iter, *next;
- s32 err = BCME_OK;
- s32 pm = PM_FAST;
- dhd_pub_t *dhd;
+int
+wl_cfg80211_random_mac_enable(struct net_device *dev)
+{
+ u8 current_mac[ETH_ALEN] = {0, };
+ s32 err = BCME_ERROR;
+ uint8 buffer[20] = {0, };
+ wl_scanmac_t *sm = NULL;
+ int len = 0;
+ wl_scanmac_enable_t *sm_enable = NULL;
+ wl_scanmac_config_t *sm_config = NULL;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
- cfg = container_of(work, struct bcm_cfg80211, pm_enable_work.work);
- WL_DBG(("Enter \n"));
- if (cfg->pm_enable_work_on) {
- cfg->pm_enable_work_on = false;
- for_each_ndev(cfg, iter, next) {
- if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
- (wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS))
- continue;
- if (iter->ndev) {
- dhd = (dhd_pub_t *)(cfg->pub);
- if (pm != PM_OFF && dhd_conf_get_pm(dhd) >= 0)
- pm = dhd_conf_get_pm(dhd);
- if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM,
- &pm, sizeof(pm), true)) != 0) {
- if (err == -ENODEV)
- WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
- else
- WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
- } else
- wl_cfg80211_update_power_mode(iter->ndev);
- }
- }
+ if (wl_get_drv_status_all(cfg, CONNECTED) || wl_get_drv_status_all(cfg, CONNECTING) ||
+ wl_get_drv_status_all(cfg, AP_CREATED) || wl_get_drv_status_all(cfg, AP_CREATING)) {
+ WL_ERR(("Fail to Set random mac, current state is wrong\n"));
+ return err;
}
-#if defined(DHCP_SCAN_SUPPRESS)
- else if (cfg->scan_suppressed) {
- /* There is pending scan_suppress. Clean it */
- WL_ERR(("Clean up from timer after %d msec\n", WL_SCAN_SUPPRESS_TIMEOUT));
- wl_cfg80211_scan_suppress(bcmcfg_to_prmry_ndev(cfg), 0);
+
+ /* Read current mac address */
+ err = wldev_iovar_getbuf_bsscfg(dev, "cur_etheraddr",
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed to get current dongle mac address\n"));
+ return err;
}
-#endif /* DHCP_SCAN_SUPPRESS */
-}
-u8
-wl_get_action_category(void *frame, u32 frame_len)
-{
- u8 category;
- u8 *ptr = (u8 *)frame;
- if (frame == NULL)
- return DOT11_ACTION_CAT_ERR_MASK;
- if (frame_len < DOT11_ACTION_HDR_LEN)
- return DOT11_ACTION_CAT_ERR_MASK;
- category = ptr[DOT11_ACTION_CAT_OFF];
- WL_INFORM(("Action Category: %d\n", category));
- return category;
-}
+ memcpy(current_mac, cfg->ioctl_buf, ETH_ALEN);
-int
-wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action)
-{
- u8 *ptr = (u8 *)frame;
- if (frame == NULL || ret_action == NULL)
- return BCME_ERROR;
- if (frame_len < DOT11_ACTION_HDR_LEN)
- return BCME_ERROR;
- if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len))
- return BCME_ERROR;
- *ret_action = ptr[DOT11_ACTION_ACT_OFF];
- WL_INFORM(("Public Action : %d\n", *ret_action));
- return BCME_OK;
-}
+ /* Enable scan mac */
+ sm = (wl_scanmac_t *)buffer;
+ sm_enable = (wl_scanmac_enable_t *)sm->data;
+ sm->len = sizeof(*sm_enable);
+ sm_enable->enable = 1;
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
-#ifdef WLFBT
-void
-wl_cfg80211_get_fbt_key(uint8 *key)
-{
- memcpy(key, g_bcm_cfg->fbt_key, FBT_KEYLEN);
-}
-#endif /* WLFBT */
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
-static int
-wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const struct ether_addr *bssid)
-{
- s32 err;
- wl_event_msg_t e;
+ if (err != BCME_OK) {
+ WL_ERR(("failed to enable scanmac, err=%d\n", err));
+ return err;
+ }
- bzero(&e, sizeof(e));
- e.event_type = cpu_to_be32(WLC_E_BSSID);
- memcpy(&e.addr, bssid, ETHER_ADDR_LEN);
- /* trigger the roam event handler */
- err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL);
+ /* Configure scanmac */
+ memset(buffer, 0x0, sizeof(buffer));
+ sm_config = (wl_scanmac_config_t *)sm->data;
+ sm->len = sizeof(*sm_config);
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_CONFIG;
+ sm_config->scan_bitmap = WL_SCANMAC_SCAN_UNASSOC;
+
+ /* Set current mac address */
+ memcpy(&sm_config->mac.octet, current_mac, ETH_ALEN);
+ sm_config->mac.octet[3] = 0x0;
+ sm_config->mac.octet[4] = 0x0;
+ sm_config->mac.octet[5] = 0x0;
+
+ /* Set randomize mac address(last 3bytes) */
+ memset(&sm_config->random_mask.octet, 0x0, ETH_ALEN);
+ sm_config->random_mask.octet[3] = 0xff;
+ sm_config->random_mask.octet[4] = 0xff;
+ sm_config->random_mask.octet[5] = 0xff;
+
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
+
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed scanmac configuration\n"));
+
+ /* Disable scan mac for clean-up */
+ wl_cfg80211_random_mac_disable(dev);
+ return err;
+ }
+ WL_ERR(("random MAC enable done"));
return err;
}
-#ifdef WL_CFG80211_ACL
-static int
-wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
- const struct cfg80211_acl_data *acl)
+int
+wl_cfg80211_random_mac_disable(struct net_device *dev)
{
- int i;
- int ret = 0;
- int macnum = 0;
- int macmode = MACLIST_MODE_DISABLED;
- struct maclist *list;
+ s32 err = BCME_ERROR;
+ uint8 buffer[20] = {0, };
+ wl_scanmac_t *sm = NULL;
+ int len = 0;
+ wl_scanmac_enable_t *sm_enable = NULL;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
- /* get the MAC filter mode */
- if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) {
- macmode = MACLIST_MODE_ALLOW;
- } else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
- acl->n_acl_entries) {
- macmode = MACLIST_MODE_DENY;
- }
+ sm = (wl_scanmac_t *)buffer;
+ sm_enable = (wl_scanmac_enable_t *)sm->data;
+ sm->len = sizeof(*sm_enable);
+ sm_enable->enable = 0;
+ len = OFFSETOF(wl_scanmac_t, data) + sm->len;
- /* if acl == NULL, macmode is still disabled.. */
- if (macmode == MACLIST_MODE_DISABLED) {
- if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
- WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+ sm->subcmd_id = WL_SCANMAC_SUBCMD_ENABLE;
- return ret;
- }
+ err = wldev_iovar_setbuf_bsscfg(dev, "scanmac",
+ sm, len, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
- macnum = acl->n_acl_entries;
- if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
- WL_ERR(("%s : invalid number of MAC address entries %d\n",
- __FUNCTION__, macnum));
- return -1;
+ if (err != BCME_OK) {
+ WL_ERR(("failed to disable scanmac, err=%d\n", err));
+ return err;
}
- /* allocate memory for the MAC list */
- list = (struct maclist*)kmalloc(sizeof(int) +
- sizeof(struct ether_addr) * macnum, GFP_KERNEL);
- if (!list) {
- WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__));
- return -1;
- }
+ WL_ERR(("random MAC disable done\n"));
+ return err;
+}
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
- /* prepare the MAC list */
- list->count = htod32(macnum);
- for (i = 0; i < macnum; i++) {
- memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN);
- }
- /* set the list */
- if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
- WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+int
+wl_cfg80211_iface_count(void)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ struct net_info *iter, *next;
+ int iface_count = 0;
- kfree(list);
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ iface_count++;
+ }
+ }
+ return iface_count;
+}
- return ret;
+#ifdef DHD_LOG_DUMP
+struct bcm_cfg80211*
+wl_get_bcm_cfg80211_ptr(void)
+{
+ return g_bcm_cfg;
}
-#endif /* WL_CFG80211_ACL */
+#endif /* DHD_LOG_DUMP */
-#ifdef WL_NAN
+#define CHECK_DONGLE_IDLE_TIME 50
+#define CHECK_DONGLE_IDLE_CNT 100
int
-wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, int cmd_len)
+wl_check_dongle_idle(struct wiphy *wiphy)
{
- return wl_cfgnan_cmd_handler(ndev, g_bcm_cfg, cmd, cmd_len);
+ int error = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *primary_ndev;
+ int retry = 0;
+ struct channel_info ci;
+ if (!cfg)
+ return FALSE;
+ /* Use primary I/F for sending cmds down to firmware */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ while (retry++ < CHECK_DONGLE_IDLE_CNT) {
+ error = wldev_ioctl(primary_ndev, WLC_GET_CHANNEL, &ci, sizeof(ci), false);
+ if (error != BCME_OK || ci.scan_channel != 0) {
+ WL_ERR(("Firmware is busy(err:%d scan channel:%d). wait %dms\n",
+ error, ci.scan_channel, CHECK_DONGLE_IDLE_TIME));
+ } else {
+ break;
+ }
+ wl_delay(CHECK_DONGLE_IDLE_TIME);
+ }
+ if (retry >= CHECK_DONGLE_IDLE_CNT) {
+ WL_ERR(("DONGLE is BUSY too long\n"));
+ return FALSE;
+ }
+ WL_DBG(("DONGLE is idle\n"));
+ return TRUE;
}
-#endif /* WL_NAN */
/*
* Linux cfg80211 driver
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_cfg80211.h 505096 2014-09-26 12:49:04Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfg80211.h 608788 2015-12-29 10:59:33Z $
*/
/**
#include <net/cfg80211.h>
#include <linux/rfkill.h>
+#include <dngl_stats.h>
+#include <dhd.h>
#include <wl_cfgp2p.h>
+#include <linux/time.h>
struct wl_conf;
struct wl_iface;
#define htod32(i) (i)
#define htod16(i) (i)
+#define dtoh64(i) (i)
#define dtoh32(i) (i)
#define dtoh16(i) (i)
#define htodchanspec(i) (i)
#define WL_DBG_INFO (1 << 1)
#define WL_DBG_ERR (1 << 0)
+#ifdef DHD_LOG_DUMP
+extern void dhd_log_dump_print(const char *fmt, ...);
+extern char *dhd_log_dump_get_timestamp(void);
+struct bcm_cfg80211 *wl_get_bcm_cfg80211_ptr(void);
+#endif /* DHD_LOG_DUMP */
+
/* 0 invalidates all debug messages. default is 1 */
#define WL_DBG_LEVEL 0xFF
+#ifdef CUSTOMER_HW4_DEBUG
+#define CFG80211_ERROR_TEXT "CFG80211-INFO2) "
+#else
#define CFG80211_ERROR_TEXT "CFG80211-ERROR) "
-
-#define MAX_WAIT_TIME 1500
-#define DNGL_FUNC(func, parameters) func parameters;
-
-#define PM_BLOCK 1
-#define PM_ENABLE 0
+#endif /* CUSTOMER_HW4_DEBUG */
#if defined(DHD_DEBUG)
+#ifdef DHD_LOG_DUMP
+#define WL_ERR(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
+ printk args; \
+ dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
+ dhd_log_dump_print args; \
+ } \
+} while (0)
+#else
#define WL_ERR(args) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
printk args; \
} \
} while (0)
+#endif /* DHD_LOG_DUMP */
#else /* defined(DHD_DEBUG) */
#define WL_ERR(args) \
do { \
#ifdef WL_TRACE_HW4
#undef WL_TRACE_HW4
#endif
+#ifdef CUSTOMER_HW4_DEBUG
+#define WL_TRACE_HW4(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printk(KERN_INFO "CFG80211-TRACE) %s : ", __func__); \
+ printk args; \
+ } \
+} while (0)
+#else
#define WL_TRACE_HW4 WL_TRACE
+#endif /* CUSTOMER_HW4_DEBUG */
#if (WL_DBG_LEVEL > 0)
#define WL_DBG(args) \
do { \
#define WL_IOCTL_LEN_MAX 2048
#define WL_EXTRA_BUF_MAX 2048
#define WL_SCAN_ERSULTS_LAST (WL_SCAN_RESULTS_NO_MEM+1)
-#define WL_AP_MAX 256
+#define WL_AP_MAX 256
#define WL_FILE_NAME_MAX 256
-#define WL_DWELL_TIME 200
-#define WL_MED_DWELL_TIME 400
+#define WL_DWELL_TIME 200
+#define WL_MED_DWELL_TIME 400
#define WL_MIN_DWELL_TIME 100
-#define WL_LONG_DWELL_TIME 1000
-#define IFACE_MAX_CNT 2
-#define WL_SCAN_CONNECT_DWELL_TIME_MS 200
-#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
-#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
-#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400
-#define WL_AF_TX_MAX_RETRY 5
+#define WL_LONG_DWELL_TIME 1000
+#define IFACE_MAX_CNT 4
+#define WL_SCAN_CONNECT_DWELL_TIME_MS 200
+#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
+#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
+#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400
+#define WL_AF_TX_MAX_RETRY 5
-#define WL_AF_SEARCH_TIME_MAX 450
-#define WL_AF_TX_EXTRA_TIME_MAX 200
+#define WL_AF_SEARCH_TIME_MAX 450
+#define WL_AF_TX_EXTRA_TIME_MAX 200
#define WL_SCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */
#define WL_CHANNEL_SYNC_RETRY 5
#define WL_INVALID -1
+#ifdef DHD_LOSSLESS_ROAMING
+#define WL_ROAM_TIMEOUT_MS 1000 /* Roam timeout */
+#endif
/* Bring down SCB Timeout to 20secs from 60secs default */
#ifndef WL_SCB_TIMEOUT
-#define WL_SCB_TIMEOUT 20
+#define WL_SCB_TIMEOUT 20
+#endif
+
+#ifndef WL_SCB_ACTIVITY_TIME
+#define WL_SCB_ACTIVITY_TIME 5
+#endif
+
+#ifndef WL_SCB_MAX_PROBE
+#define WL_SCB_MAX_PROBE 3
+#endif
+
+#ifndef WL_MIN_PSPRETEND_THRESHOLD
+#define WL_MIN_PSPRETEND_THRESHOLD 2
#endif
/* SCAN_SUPPRESS timer values in ms */
#define WL_PM_ENABLE_TIMEOUT 10000
-#ifdef WLAIBSS
-/* Custom AIBSS beacon parameters */
-#define AIBSS_INITIAL_MIN_BCN_DUR 500
-#define AIBSS_MIN_BCN_DUR 5000
-#define AIBSS_BCN_FLOOD_DUR 5000
-#endif /* WLAIBSS */
+/* cfg80211 wowlan definitions */
+#define WL_WOWLAN_MAX_PATTERNS 8
+#define WL_WOWLAN_MIN_PATTERN_LEN 1
+#define WL_WOWLAN_MAX_PATTERN_LEN 255
+#define WL_WOWLAN_PKT_FILTER_ID_FIRST 201
+#define WL_WOWLAN_PKT_FILTER_ID_LAST (WL_WOWLAN_PKT_FILTER_ID_FIRST + \
+ WL_WOWLAN_MAX_PATTERNS - 1)
+
+#ifdef WLTDLS
+#define TDLS_TUNNELED_PRB_REQ "\x7f\x50\x6f\x9a\04"
+#define TDLS_TUNNELED_PRB_RESP "\x7f\x50\x6f\x9a\05"
+#endif /* WLTDLS */
+
/* driver status */
enum wl_status {
/* donlge escan state */
enum wl_escan_state {
- WL_ESCAN_STATE_IDLE,
- WL_ESCAN_STATE_SCANING
+ WL_ESCAN_STATE_IDLE,
+ WL_ESCAN_STATE_SCANING
};
/* fw downloading status */
enum wl_fw_status {
WL_ASSOC_RESP = 0x4
};
-enum wl_handler_del_type {
- WL_HANDLER_NOTUSE,
- WL_HANDLER_DEL,
- WL_HANDLER_MAINTAIN,
- WL_HANDLER_PEND
+enum wl_pm_workq_act_type {
+ WL_PM_WORKQ_SHORT,
+ WL_PM_WORKQ_LONG,
+ WL_PM_WORKQ_DEL
};
/* beacon / probe_response */
u8 channel;
};
+typedef struct wl_bss_vndr_ies {
+ u8 probe_req_ie[VNDR_IES_BUF_LEN];
+ u8 probe_res_ie[VNDR_IES_MAX_BUF_LEN];
+ u8 assoc_req_ie[VNDR_IES_BUF_LEN];
+ u8 assoc_res_ie[VNDR_IES_BUF_LEN];
+ u8 beacon_ie[VNDR_IES_MAX_BUF_LEN];
+ u32 probe_req_ie_len;
+ u32 probe_res_ie_len;
+ u32 assoc_req_ie_len;
+ u32 assoc_res_ie_len;
+ u32 beacon_ie_len;
+} wl_bss_vndr_ies_t;
+
+typedef struct wl_cfgbss {
+ u8 *wpa_ie;
+ u8 *rsn_ie;
+ u8 *wps_ie;
+ bool security_mode;
+ struct wl_bss_vndr_ies ies; /* Common for STA, P2P GC, GO, AP, P2P Disc Interface */
+} wl_cfgbss_t;
+
/* cfg driver profile */
struct wl_profile {
u32 mode;
bool pm_restore;
bool pm_block;
s32 pm;
+ s32 bssidx;
+ wl_cfgbss_t bss;
+ u32 ulb_bw;
struct list_head list; /* list of all net_info structure */
};
pmkid_t foo[MAXPMKID - 1];
};
+#ifdef DHD_MAX_IFS
+#define WL_MAX_IFS DHD_MAX_IFS
+#else
+#define WL_MAX_IFS 16
+#endif
#define ESCAN_BUF_SIZE (64 * 1024)
} removal_element_t;
#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-struct ap_info {
-/* Structure to hold WPS, WPA IEs for a AP */
- u8 probe_res_ie[VNDR_IES_MAX_BUF_LEN];
- u8 beacon_ie[VNDR_IES_MAX_BUF_LEN];
- u8 assoc_res_ie[VNDR_IES_MAX_BUF_LEN];
- u32 probe_res_ie_len;
- u32 beacon_ie_len;
- u32 assoc_res_ie_len;
- u8 *wpa_ie;
- u8 *rsn_ie;
- u8 *wps_ie;
- bool security_mode;
-};
-
-struct sta_info {
- /* Structure to hold WPS IE for a STA */
- u8 probe_req_ie[VNDR_IES_BUF_LEN];
- u8 assoc_req_ie[VNDR_IES_BUF_LEN];
- u32 probe_req_ie_len;
- u32 assoc_req_ie_len;
-};
-
struct afx_hdl {
wl_af_params_t *pending_tx_act_frm;
struct ether_addr tx_dst_addr;
struct net_device *dev;
struct work_struct work;
- u32 bssidx;
+ s32 bssidx;
u32 retry;
s32 peer_chan;
s32 peer_listen_chan; /* search channel: configured by upper layer */
u32 wpa2_ie_len;
};
-#ifdef WL_SDO
-/* Service discovery */
-typedef struct {
- uint8 transaction_id; /* Transaction ID */
- uint8 protocol; /* Service protocol type */
- uint16 query_len; /* Length of query */
- uint16 response_len; /* Length of response */
- uint8 qrbuf[1];
-} wl_sd_qr_t;
+#ifdef P2P_LISTEN_OFFLOADING
typedef struct {
- uint16 period; /* extended listen period */
- uint16 interval; /* extended listen interval */
-} wl_sd_listen_t;
-
-#define WL_SD_STATE_IDLE 0x0000
-#define WL_SD_SEARCH_SVC 0x0001
-#define WL_SD_ADV_SVC 0x0002
-
-enum wl_dd_state {
- WL_DD_STATE_IDLE,
- WL_DD_STATE_SEARCH,
- WL_DD_STATE_LISTEN
-};
-
-#define MAX_SDO_PROTO_STR_LEN 20
-typedef struct wl_sdo_proto {
- char str[MAX_SDO_PROTO_STR_LEN];
- u32 val;
-} wl_sdo_proto_t;
-
-typedef struct sd_offload {
- u32 sd_state;
- enum wl_dd_state dd_state;
- wl_sd_listen_t sd_listen;
-} sd_offload_t;
-
-typedef struct sdo_event {
- u8 addr[ETH_ALEN];
- uint16 freq; /* channel Freq */
- uint8 count; /* Tlv count */
- uint16 update_ind;
-} sdo_event_t;
-#endif /* WL_SDO */
+ uint16 period; /* listen offload period */
+ uint16 interval; /* listen offload interval */
+ uint16 count; /* listen offload count */
+ uint16 pad; /* pad for 32bit align */
+} wl_p2plo_listen_t;
+#endif /* P2P_LISTEN_OFFLOADING */
#ifdef WL11U
/* Max length of Interworking element */
#define IW_IES_MAX_BUF_LEN 9
#endif
-#ifdef WLFBT
-#define FBT_KEYLEN 32
-#endif
#define MAX_EVENT_BUF_NUM 16
typedef struct wl_eventmsg_buf {
- u16 num;
- struct {
+ u16 num;
+ struct {
u16 type;
bool set;
} event [MAX_EVENT_BUF_NUM];
char name[IFNAMSIZ+1];
} wl_if_event_info;
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define GET_BSS_INFO_LEN 90
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
/* private data of cfg80211 interface */
struct bcm_cfg80211 {
struct wireless_dev *wdev; /* representing cfg cfg80211 device */
EVENT_HANDLER evt_handler[WLC_E_LAST];
struct list_head eq_list; /* used for event queue */
struct list_head net_list; /* used for struct net_info */
+ spinlock_t net_list_sync; /* to protect scan status (and others if needed) */
spinlock_t eq_lock; /* for event queue synchronization */
spinlock_t cfgdrv_lock; /* to protect scan status (and others if needed) */
struct completion act_frm_scan;
struct completion iface_disable;
struct completion wait_next_af;
struct mutex usr_sync; /* maily for up/down synchronization */
+ struct mutex scan_complete; /* serialize scan_complete call */
struct wl_scan_results *bss_list;
struct wl_scan_results *scan_results;
#endif /* DEBUGFS_CFG80211 */
struct wl_pmk_list *pmk_list; /* wpa2 pmk list */
tsk_ctl_t event_tsk; /* task of main event handler thread */
- void *pub;
+ dhd_pub_t *pub;
u32 iface_cnt;
u32 channel; /* current channel */
u32 af_sent_channel; /* channel action frame is sent */
wl_if_event_info if_event_info;
struct completion send_af_done;
struct afx_hdl *afx_hdl;
- struct ap_info *ap_info;
- struct sta_info *sta_info;
struct p2p_info *p2p;
bool p2p_supported;
void *btcoex_info;
struct timer_list scan_timeout; /* Timer for catch scan event timeout */
+#if defined(P2P_IE_MISSING_FIX)
+ bool p2p_prb_noti;
+#endif
s32(*state_notifier) (struct bcm_cfg80211 *cfg,
struct net_info *_net_info, enum wl_status state, bool set);
unsigned long interrested_state;
wlc_ssid_t hostapd_ssid;
-#ifdef WL_SDO
- sd_offload_t *sdo;
-#endif
#ifdef WL11U
bool wl11u;
u8 iw_ie[IW_IES_MAX_BUF_LEN];
#ifdef WL_SCHED_SCAN
struct cfg80211_sched_scan_request *sched_scan_req; /* scheduled scan req */
#endif /* WL_SCHED_SCAN */
-#ifdef WL_HOST_BAND_MGMT
- u8 curr_band;
-#endif /* WL_HOST_BAND_MGMT */
bool scan_suppressed;
struct timer_list scan_supp_timer;
struct work_struct wlan_work;
struct mutex event_sync; /* maily for up/down synchronization */
bool disable_roam_event;
- bool pm_enable_work_on;
struct delayed_work pm_enable_work;
+ struct mutex pm_sync; /* mainly for pm work synchronization */
+
vndr_ie_setbuf_t *ibss_vsie; /* keep the VSIE for IBSS */
int ibss_vsie_len;
-#ifdef WLAIBSS
- u32 aibss_txfail_pid;
- u32 aibss_txfail_seq;
-#endif /* WLAIBSS */
u32 rmc_event_pid;
u32 rmc_event_seq;
#ifdef WLAIBSS_MCHAN
bcm_struct_cfgdev *bss_cfgdev; /* For DUAL STA/STA+AP */
s32 cfgdev_bssidx;
bool bss_pending_op; /* indicate where there is a pending IF operation */
-#ifdef WLFBT
- uint8 fbt_key[FBT_KEYLEN];
-#endif
int roam_offload;
+#ifdef WL_NAN
+ bool nan_enable;
bool nan_running;
+#endif /* WL_NAN */
+#ifdef WL_CFG80211_P2P_DEV_IF
+ bool down_disc_if;
+#endif /* WL_CFG80211_P2P_DEV_IF */
#ifdef P2PLISTEN_AP_SAMECHN
bool p2p_resp_apchn_status;
#endif /* P2PLISTEN_AP_SAMECHN */
+ struct wl_wsec_key wep_key;
#ifdef WLTDLS
u8 *tdls_mgmt_frame;
u32 tdls_mgmt_frame_len;
s32 tdls_mgmt_freq;
#endif /* WLTDLS */
+ bool need_wait_afrx;
+#ifdef QOS_MAP_SET
+ uint8 *up_table; /* user priority table, size is UP_TABLE_MAX */
+#endif /* QOS_MAP_SET */
+ struct ether_addr last_roamed_addr;
+#ifdef DHD_LOSSLESS_ROAMING
+ struct timer_list roam_timeout; /* Timer for catch roam timeout */
+#endif
+ bool rcc_enabled; /* flag for Roam channel cache feature */
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ char bss_info[GET_BSS_INFO_LEN];
+ wl_event_msg_t event_auth_assoc;
+ u32 assoc_reject_status;
+ u32 roam_count;
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+ u16 ap_oper_channel;
+ bool revert_ndo_disable;
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ bool random_mac_enabled;
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+ int p2p_disconnected; // terence 20130703: Fix for wrong group_capab (timing issue)
+ struct ether_addr disconnected_bssid;
};
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+
+#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+list_for_each_entry_safe((pos), (next), (head), member) \
+_Pragma("GCC diagnostic pop") \
+
+#else
+#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \
+list_for_each_entry_safe((pos), (next), (head), member) \
+
+#endif /* STRICT_GCC_WARNINGS */
static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
{
return bss = bss ?
(struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
}
+
+static inline void
+wl_probe_wdev_all(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+ int idx = 0;
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next,
+ &cfg->net_list, list) {
+ WL_ERR(("%s: net_list[%d] bssidx: %d, "
+ "ndev: %p, wdev: %p \n", __FUNCTION__,
+ idx++, _net_info->bssidx,
+ _net_info->ndev, _net_info->wdev));
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return;
+}
+
+static inline struct net_info *
+wl_get_netinfo_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+ struct net_info *_net_info, *next, *info = NULL;
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if ((bssidx >= 0) && (_net_info->bssidx == bssidx)) {
+ info = _net_info;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return info;
+}
+
+static inline void
+wl_dealloc_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+
+#ifdef DHD_IFDEBUG
+ WL_ERR(("dealloc_netinfo enter wdev=%p \n", wdev));
+#endif
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (wdev && (_net_info->wdev == wdev)) {
+ wl_cfgbss_t *bss = &_net_info->bss;
+
+ kfree(bss->wpa_ie);
+ bss->wpa_ie = NULL;
+ kfree(bss->rsn_ie);
+ bss->rsn_ie = NULL;
+ kfree(bss->wps_ie);
+ bss->wps_ie = NULL;
+ list_del(&_net_info->list);
+ cfg->iface_cnt--;
+ kfree(_net_info);
+ }
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+#ifdef DHD_IFDEBUG
+ WL_ERR(("dealloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
+#endif
+}
+
static inline s32
wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- struct wireless_dev * wdev, s32 mode, bool pm_block)
+ struct wireless_dev * wdev, s32 mode, bool pm_block, u8 bssidx)
{
struct net_info *_net_info;
s32 err = 0;
+ unsigned long int flags;
+#ifdef DHD_IFDEBUG
+ WL_ERR(("alloc_netinfo enter bssidx=%d wdev=%p ndev=%p\n", bssidx, wdev, ndev));
+#endif
+ /* Check whether there is any duplicate entry for the
+ * same bssidx *
+ */
+ if ((_net_info = wl_get_netinfo_by_bssidx(cfg, bssidx))) {
+ /* We have a duplicate entry for the same bssidx
+ * already present which shouldn't have been the case.
+ * Attempt recovery.
+ */
+ WL_ERR(("Duplicate entry for bssidx=%d present\n", bssidx));
+ wl_probe_wdev_all(cfg);
+#ifdef DHD_DEBUG
+ ASSERT(0);
+#endif /* DHD_DEBUG */
+ WL_ERR(("Removing the Dup entry for bssidx=%d \n", bssidx));
+ wl_dealloc_netinfo_by_wdev(cfg, _net_info->wdev);
+ }
if (cfg->iface_cnt == IFACE_MAX_CNT)
return -ENOMEM;
_net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL);
_net_info->pm = 0;
_net_info->pm_block = pm_block;
_net_info->roam_off = WL_INVALID;
+ _net_info->bssidx = bssidx;
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
cfg->iface_cnt++;
list_add(&_net_info->list, &cfg->net_list);
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
}
+#ifdef DHD_IFDEBUG
+ WL_ERR(("alloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
+#endif
return err;
}
-static inline void
-wl_dealloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev)
-{
- struct net_info *_net_info, *next;
- list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
- if (ndev && (_net_info->ndev == ndev)) {
- list_del(&_net_info->list);
- cfg->iface_cnt--;
- kfree(_net_info);
- }
- }
-
-}
static inline void
wl_delete_all_netinfo(struct bcm_cfg80211 *cfg)
{
struct net_info *_net_info, *next;
-
- list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ wl_cfgbss_t *bss = &_net_info->bss;
+
+ kfree(bss->wpa_ie);
+ bss->wpa_ie = NULL;
+ kfree(bss->rsn_ie);
+ bss->rsn_ie = NULL;
+ kfree(bss->wps_ie);
+ bss->wps_ie = NULL;
list_del(&_net_info->list);
- if (_net_info->wdev)
- kfree(_net_info->wdev);
- kfree(_net_info);
+ if (_net_info->wdev)
+ kfree(_net_info->wdev);
+ kfree(_net_info);
}
cfg->iface_cnt = 0;
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
}
static inline u32
wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status)
{
struct net_info *_net_info, *next;
u32 cnt = 0;
- list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
if (_net_info->ndev &&
test_bit(status, &_net_info->sme_state))
cnt++;
}
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return cnt;
}
static inline void
wl_set_status_all(struct bcm_cfg80211 *cfg, s32 status, u32 op)
{
struct net_info *_net_info, *next;
- list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
switch (op) {
case 1:
- return; /* set all status is not allowed */
+ break; /* set all status is not allowed */
case 2:
+ /*
+ * Release the spinlock before calling notifier. Else there
+ * will be nested calls
+ */
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
clear_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
cfg->state_notifier(cfg, _net_info, status, false);
- break;
+ return;
case 4:
- return; /* change all status is not allowed */
+ break; /* change all status is not allowed */
default:
- return; /* unknown operation */
+ break; /* unknown operation */
}
}
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
}
static inline void
wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
{
struct net_info *_net_info, *next;
+ unsigned long int flags;
- list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
if (ndev && (_net_info->ndev == ndev)) {
switch (op) {
case 1:
+ /*
+ * Release the spinlock before calling notifier. Else there
+ * will be nested calls
+ */
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
set_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
cfg->state_notifier(cfg, _net_info, status, true);
- break;
+ return;
case 2:
+ /*
+ * Release the spinlock before calling notifier. Else there
+ * will be nested calls
+ */
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
clear_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
cfg->state_notifier(cfg, _net_info, status, false);
- break;
+ return;
case 4:
change_bit(status, &_net_info->sme_state);
break;
}
}
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+
+}
+
+static inline wl_cfgbss_t *
+wl_get_cfgbss_by_wdev(struct bcm_cfg80211 *cfg,
+ struct wireless_dev *wdev)
+{
+ struct net_info *_net_info, *next;
+ wl_cfgbss_t *bss = NULL;
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (wdev && (_net_info->wdev == wdev)) {
+ bss = &_net_info->bss;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return bss;
}
static inline u32
struct net_device *ndev)
{
struct net_info *_net_info, *next;
+ u32 stat = 0;
+ unsigned long int flags;
- list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
- if (ndev && (_net_info->ndev == ndev))
- return test_bit(status, &_net_info->sme_state);
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev)) {
+ stat = test_bit(status, &_net_info->sme_state);
+ break;
+ }
}
- return 0;
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return stat;
}
static inline s32
wl_get_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
struct net_info *_net_info, *next;
+ s32 mode = -1;
+ unsigned long int flags;
- list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
- if (ndev && (_net_info->ndev == ndev))
- return _net_info->mode;
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev)) {
+ mode = _net_info->mode;
+ break;
+ }
}
- return -1;
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return mode;
}
-
static inline void
wl_set_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev,
s32 mode)
{
struct net_info *_net_info, *next;
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev))
+ _net_info->mode = mode;
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+}
+
+static inline s32
+wl_get_bssidx_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+{
+ struct net_info *_net_info, *next;
+ s32 bssidx = -1;
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (_net_info->wdev && (_net_info->wdev == wdev)) {
+ bssidx = _net_info->bssidx;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return bssidx;
+}
- list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
- if (ndev && (_net_info->ndev == ndev))
- _net_info->mode = mode;
+static inline struct wireless_dev *
+wl_get_wdev_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+ struct net_info *_net_info, *next;
+ struct wireless_dev *wdev = NULL;
+ unsigned long int flags;
+
+ if (bssidx < 0)
+ return NULL;
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (_net_info->bssidx == bssidx) {
+ wdev = _net_info->wdev;
+ break;
+ }
}
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return wdev;
}
+
static inline struct wl_profile *
wl_get_profile_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
struct net_info *_net_info, *next;
+ struct wl_profile *prof = NULL;
+ unsigned long int flags;
- list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
- if (ndev && (_net_info->ndev == ndev))
- return &_net_info->profile;
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev)) {
+ prof = &_net_info->profile;
+ break;
+ }
}
- return NULL;
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return prof;
}
static inline struct net_info *
wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
- struct net_info *_net_info, *next;
+ struct net_info *_net_info, *next, *info = NULL;
+ unsigned long int flags;
- list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
- if (ndev && (_net_info->ndev == ndev))
- return _net_info;
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev)) {
+ info = _net_info;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return info;
+}
+
+static inline struct net_info *
+wl_get_netinfo_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+{
+ struct net_info *_net_info, *next, *info = NULL;
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (wdev && (_net_info->wdev == wdev)) {
+ info = _net_info;
+ break;
+ }
}
- return NULL;
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+ return info;
}
+
+#define is_p2p_group_iface(wdev) (((wdev->iftype == NL80211_IFTYPE_P2P_GO) || \
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) ? 1 : 0)
#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy)
#define bcmcfg_to_prmry_ndev(cfg) (cfg->wdev->netdev)
#define bcmcfg_to_prmry_wdev(cfg) (cfg->wdev)
#define ndev_to_wlc_ndev(ndev, cfg) (ndev)
#endif /* WL_ENABLE_P2P_IF */
-#if defined(WL_CFG80211_P2P_DEV_IF)
#define wdev_to_wlc_ndev(wdev, cfg) \
- ((wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) ? \
- bcmcfg_to_prmry_ndev(cfg) : wdev_to_ndev(wdev))
+ (wdev_to_ndev(wdev) ? \
+ wdev_to_ndev(wdev) : bcmcfg_to_prmry_ndev(cfg))
+#if defined(WL_CFG80211_P2P_DEV_IF)
#define cfgdev_to_wlc_ndev(cfgdev, cfg) wdev_to_wlc_ndev(cfgdev, cfg)
#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_wdev(cfg)
#elif defined(WL_ENABLE_P2P_IF)
#endif /* WL_CFG80211_P2P_DEV_IF */
#if defined(WL_CFG80211_P2P_DEV_IF)
+#define cfgdev_to_wdev(cfgdev) (cfgdev)
#define ndev_to_cfgdev(ndev) ndev_to_wdev(ndev)
-#define cfgdev_to_ndev(cfgdev) cfgdev ? (cfgdev->netdev) : NULL
+#define cfgdev_to_ndev(cfgdev) (cfgdev ? (cfgdev->netdev) : NULL)
+#define wdev_to_cfgdev(cfgdev) (cfgdev)
#define discover_cfgdev(cfgdev, cfg) (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE)
#else
+#define cfgdev_to_wdev(cfgdev) (cfgdev->ieee80211_ptr)
+#define wdev_to_cfgdev(cfgdev) cfgdev ? (cfgdev->netdev) : NULL
#define ndev_to_cfgdev(ndev) (ndev)
#define cfgdev_to_ndev(cfgdev) (cfgdev)
#define discover_cfgdev(cfgdev, cfg) (cfgdev == cfg->p2p_net)
true : false)
#endif /* WL_CFG80211_P2P_DEV_IF */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#define scan_req_iftype(req) (req->dev->ieee80211_ptr->iftype)
+#else
+#define scan_req_iftype(req) (req->wdev->iftype)
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) */
+
#define wl_to_sr(w) (w->scan_req_int)
#if defined(STATIC_WL_PRIV_STRUCT)
#define wl_to_ie(w) (w->ie)
#define for_each_ndev(cfg, iter, next) \
list_for_each_entry_safe(iter, next, &cfg->net_list, list)
-
/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0.
* In addtion to that, wpa_version is WPA_VERSION_1
*/
((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \
(!_sme->crypto.n_ciphers_pairwise) && \
(!_sme->crypto.cipher_group))
+
+#define IS_AKM_SUITE_FT(sec) false
+
+#define IS_AKM_SUITE_CCKM(sec) false
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
+#define STA_INFO_BIT(info) (1ul << NL80211_STA_ ## info)
+#define strnicmp(str1, str2, len) strncasecmp((str1), (str2), (len))
+#else
+#define STA_INFO_BIT(info) (STATION_ ## info)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */
+
extern s32 wl_cfg80211_attach(struct net_device *ndev, void *context);
extern s32 wl_cfg80211_attach_post(struct net_device *ndev);
extern void wl_cfg80211_detach(void *para);
void wl_cfg80211_set_parent_dev(void *dev);
struct device *wl_cfg80211_get_parent_dev(void);
+/* clear IEs */
+extern s32 wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx);
+
extern s32 wl_cfg80211_up(void *para);
extern s32 wl_cfg80211_down(void *para);
extern s32 wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx);
extern s32 wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx);
extern s32 wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx);
extern struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name,
- uint8 *mac, uint8 bssidx);
+ uint8 *mac, uint8 bssidx, char *dngl_name);
extern int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev);
extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev);
extern int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev);
-extern bool wl_cfg80211_is_vsdb_mode(void);
+extern bool wl_cfg80211_is_concurrent_mode(void);
extern void* wl_cfg80211_get_dhdp(void);
extern bool wl_cfg80211_is_p2p_active(void);
+extern bool wl_cfg80211_is_roam_offload(void);
+extern bool wl_cfg80211_is_event_from_connected_bssid(const wl_event_msg_t *e, int ifidx);
extern void wl_cfg80211_dbg_level(u32 level);
extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len);
extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
enum wl_management_type type);
extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
+extern s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len);
+#ifdef WL11ULB
+extern s32 wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode);
+extern s32 wl_cfg80211_set_ulb_bw(struct net_device *dev,
+ u32 ulb_bw, char *ifname);
+#endif /* WL11ULB */
#ifdef P2PLISTEN_AP_SAMECHN
extern s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable);
#endif /* P2PLISTEN_AP_SAMECHN */
void* wl_cfg80211_btcoex_init(struct net_device *ndev);
void wl_cfg80211_btcoex_deinit(void);
-#ifdef WL_SDO
-extern s32 wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg);
-extern s32 wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg);
-extern s32 wl_cfg80211_sd_offload(struct net_device *net, char *cmd, char* buf, int len);
-extern s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg);
-extern s32 wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg);
-
-#endif
-
#ifdef WL_SUPPORT_AUTO_CHANNEL
#define CHANSPEC_BUF_SIZE 1024
#define CHAN_SEL_IOCTL_DELAY 300
extern s32 wl_cfg80211_get_best_channels(struct net_device *dev, char* command,
int total_len);
#endif /* WL_SUPPORT_AUTO_CHANNEL */
-
extern int wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n);
-extern int wl_cfg80211_hex_str_to_bin(unsigned char *data, int dlen, char *str);
extern int wl_cfg80211_hang(struct net_device *dev, u16 reason);
extern s32 wl_mode_to_nl80211_iftype(s32 mode);
int wl_cfg80211_do_driver_init(struct net_device *net);
void wl_cfg80211_enable_trace(u32 level);
extern s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
extern s32 wl_cfg80211_if_is_group_owner(void);
-extern chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
-extern chanspec_t wl_ch_host_to_driver(u16 channel);
+extern chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
+extern chanspec_t wl_ch_host_to_driver(s32 bssidx, u16 channel);
extern s32 wl_set_tx_power(struct net_device *dev,
enum nl80211_tx_power_setting type, s32 dbm);
extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm);
extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev);
-#ifdef WL_HOST_BAND_MGMT
-extern s32 wl_cfg80211_set_band(struct net_device *ndev, int band);
-#endif /* WL_HOST_BAND_MGMT */
-#if defined(DHCP_SCAN_SUPPRESS)
-extern int wl_cfg80211_scan_suppress(struct net_device *dev, int suppress);
-#endif /* OEM_ANDROID */
extern void wl_cfg80211_add_to_eventbuffer(wl_eventmsg_buf_t *ev, u16 event, bool set);
extern s32 wl_cfg80211_apply_eventbuffer(struct net_device *ndev,
struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev);
extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
extern void wl_cfg80211_update_power_mode(struct net_device *dev);
+extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command);
+extern void wl_terminate_event_handler(void);
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+extern s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len);
+extern s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len);
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+
#define SCAN_BUF_CNT 2
#define SCAN_BUF_NEXT 1
#define WL_SCANTYPE_LEGACY 0x1
#define wl_escan_init_sync_id(a)
extern void wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len);
extern s32 wl_cfg80211_ibss_vsie_delete(struct net_device *dev);
-#ifdef WLAIBSS
-extern void wl_cfg80211_set_txfail_pid(int pid);
-#endif /* WLAIBSS */
extern void wl_cfg80211_set_rmc_pid(int pid);
+extern int wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, s32 bssidx, s32 pktflag,
+ const u8 *vndr_ie, u32 vndr_ie_len);
-#ifdef WLFBT
-extern void wl_cfg80211_get_fbt_key(uint8 *key);
-#endif
/* Action frame specific functions */
extern u8 wl_get_action_category(void *frame, u32 frame_len);
extern int wl_cfg80211_get_ioctl_version(void);
extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable);
+extern s32 wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern s32 wl_cfg80211_wbtext_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern s32 wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern s32 wl_cfg80211_get_chanspecs_2g(struct net_device *ndev,
+ void *buf, s32 buflen);
+extern s32 wl_cfg80211_get_chanspecs_5g(struct net_device *ndev,
+ void *buf, s32 buflen);
+#if defined(WL_VIRTUAL_APSTA)
+extern int wl_cfg80211_interface_create(struct net_device *dev, char *name);
+extern int wl_cfg80211_interface_delete(struct net_device *dev, char *name);
+#endif /* defined (WL_VIRTUAL_APSTA) */
#ifdef WL_NAN
extern int wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd,
extern void wl_cfg80211_del_p2p_wdev(void);
#endif /* WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+extern int wl_cfg80211_set_spect(struct net_device *dev, int spect);
+extern int wl_cfg80211_get_sta_channel(void);
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+#ifdef P2P_LISTEN_OFFLOADING
+extern s32 wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len);
+extern s32 wl_cfg80211_p2plo_listen_stop(struct net_device *dev);
+#endif /* P2P_LISTEN_OFFLOADING */
+
+#define RETURN_EIO_IF_NOT_UP(wlpriv) \
+do { \
+ struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv); \
+ if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) { \
+ WL_INFORM(("device is not ready\n")); \
+ return -EIO; \
+ } \
+} while (0)
+
+#ifdef QOS_MAP_SET
+extern uint8 *wl_get_up_table(void);
+#endif /* QOS_MAP_SET */
+
+#define P2PO_COOKIE 65535
+u64 wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg);
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+int wl_cfg80211_set_random_mac(struct net_device *dev, bool enable);
+int wl_cfg80211_random_mac_enable(struct net_device *dev);
+int wl_cfg80211_random_mac_disable(struct net_device *dev);
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+int wl_cfg80211_iface_count(void);
+int wl_check_dongle_idle(struct wiphy *wiphy);
#endif /* _wl_cfg80211_h_ */
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_cfg_btcoex.c 467328 2014-04-03 01:23:40Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfg_btcoex.c 514727 2014-11-12 03:02:48Z $
*/
#include <net/rtnetlink.h>
if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
WL_TRACE_HW4(("DHCP session starts\n"));
-#if defined(DHCP_SCAN_SUPPRESS)
- /* Suppress scan during the DHCP */
- wl_cfg80211_scan_suppress(dev, 1);
-#endif /* OEM_ANDROID */
#ifdef PKT_FILTER_SUPPORT
dhd->dhcp_in_progress = 1;
else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
-#if defined(DHCP_SCAN_SUPPRESS)
- /* Since DHCP is complete, enable the scan back */
- wl_cfg80211_scan_suppress(dev, 0);
-#endif /* OEM_ANDROID */
#ifdef PKT_FILTER_SUPPORT
dhd->dhcp_in_progress = 0;
/*
* Linux cfgp2p driver
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_cfgp2p.c 504573 2014-09-24 15:21:25Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfgp2p.c 604795 2015-12-08 13:45:42Z $
*
*/
#include <typedefs.h>
#include <wl_cfgp2p.h>
#include <wldev_common.h>
#include <wl_android.h>
-
-#if defined(P2PONEINT)
#include <dngl_stats.h>
#include <dhd.h>
-#endif
+#include <dhd_linux.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <dhd_cfg80211.h>
static s8 scanparambuf[WLC_IOCTL_SMLEN];
-static s8 g_mgmt_ie_buf[2048];
static bool
wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
-static u32
-wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
- s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd);
static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
struct wireless_dev *wdev, bool notify);
-#ifdef P2PONEINT
-void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
-chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
-s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val);
-int wl_cfgp2p_if_open(struct net_device *net);
-int wl_cfgp2p_if_stop(struct net_device *net);
-#endif
-
#if defined(WL_ENABLE_P2P_IF)
static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
-int wl_cfgp2p_if_open(struct net_device *net);
-int wl_cfgp2p_if_stop(struct net_device *net);
+static int wl_cfgp2p_if_open(struct net_device *net);
+static int wl_cfgp2p_if_stop(struct net_device *net);
static const struct net_device_ops wl_cfgp2p_if_ops = {
.ndo_open = wl_cfgp2p_if_open,
.ndo_stop = wl_cfgp2p_if_stop,
.ndo_do_ioctl = wl_cfgp2p_do_ioctl,
-#ifndef P2PONEINT
.ndo_start_xmit = wl_cfgp2p_start_xmit,
-#endif
};
#endif /* WL_ENABLE_P2P_IF */
-#if defined(WL_NEWCFG_PRIVCMD_SUPPORT)
-static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
-static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
-
-static int wl_cfgp2p_if_dummy(struct net_device *net)
-{
- return 0;
-}
-
-static const struct net_device_ops wl_cfgp2p_if_ops = {
- .ndo_open = wl_cfgp2p_if_dummy,
- .ndo_stop = wl_cfgp2p_if_dummy,
- .ndo_do_ioctl = wl_cfgp2p_do_ioctl,
- .ndo_start_xmit = wl_cfgp2p_start_xmit,
-};
-#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len)
{
if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
return false;
+#ifdef WL11U
+ if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP)
+ return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
+ (u8 *)sd_act_frm->query_data + GAS_RESP_OFFSET,
+ frame_len);
+
+ else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
+ return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
+ (u8 *)sd_act_frm->query_data + GAS_CRESP_OFFSET,
+ frame_len);
+ else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
+ sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ)
+ return true;
+ else
+ return false;
+#else
if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
return true;
else
return false;
+#endif /* WL11U */
}
+
+bool wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len)
+{
+
+ wifi_p2psd_gas_pub_act_frame_t *sd_act_frm;
+
+ if (frame == NULL)
+ return false;
+
+ sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
+ if (frame_len < (sizeof(wifi_p2psd_gas_pub_act_frame_t) - 1))
+ return false;
+ if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
+ return false;
+
+ if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ)
+ return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
+ (u8 *)sd_act_frm->query_data,
+ frame_len);
+ else
+ return false;
+}
+
void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel)
{
wifi_p2p_pub_act_frame_t *pact_frm;
" channel=%d\n", (tx)? "TX": "RX", channel));
break;
default:
- CFGP2P_ACTION(("%s Unknown P2P Public Action Frame,"
+ CFGP2P_ACTION(("%s Unknown Public Action Frame,"
" channel=%d\n", (tx)? "TX": "RX", channel));
}
sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)frame;
switch (sd_act_frm->action) {
case P2PSD_ACTION_ID_GAS_IREQ:
- CFGP2P_ACTION(("%s P2P GAS Initial Request,"
+ CFGP2P_ACTION(("%s GAS Initial Request,"
" channel=%d\n", (tx)? "TX" : "RX", channel));
break;
case P2PSD_ACTION_ID_GAS_IRESP:
- CFGP2P_ACTION(("%s P2P GAS Initial Response,"
+ CFGP2P_ACTION(("%s GAS Initial Response,"
" channel=%d\n", (tx)? "TX" : "RX", channel));
break;
case P2PSD_ACTION_ID_GAS_CREQ:
- CFGP2P_ACTION(("%s P2P GAS Comback Request,"
+ CFGP2P_ACTION(("%s GAS Comback Request,"
" channel=%d\n", (tx)? "TX" : "RX", channel));
break;
case P2PSD_ACTION_ID_GAS_CRESP:
- CFGP2P_ACTION(("%s P2P GAS Comback Response,"
+ CFGP2P_ACTION(("%s GAS Comback Response,"
" channel=%d\n", (tx)? "TX" : "RX", channel));
break;
default:
- CFGP2P_ACTION(("%s Unknown P2P GAS Frame,"
+ CFGP2P_ACTION(("%s Unknown GAS Frame,"
" channel=%d\n", (tx)? "TX" : "RX", channel));
}
CFGP2P_ERR(("struct p2p_info allocation failed\n"));
return -ENOMEM;
}
-#define INIT_IE(IE_TYPE, BSS_TYPE) \
- do { \
- memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
- sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
- wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
- } while (0);
-
- INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY);
- INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY);
- INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY);
- INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY);
- INIT_IE(beacon, P2PAPI_BSSCFG_PRIMARY);
- INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE);
- INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE);
- INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE);
- INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE);
- INIT_IE(beacon, P2PAPI_BSSCFG_DEVICE);
- INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION);
- INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION);
- INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION);
- INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
- INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION);
-#undef INIT_IE
+
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg);
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0;
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
- wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = NULL;
- wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = 0;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1) = NULL;
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) = -1;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2) = NULL;
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) = -1;
return BCME_OK;
}
void
wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg)
{
- CFGP2P_DBG(("In\n"));
+ CFGP2P_ERR(("In\n"));
if (cfg->p2p) {
kfree(cfg->p2p);
cfg->p2p = NULL;
CFGP2P_ERR(("WLC_DOWN error %d\n", ret));
return ret;
}
- wldev_iovar_setint(ndev, "apsta", val);
+
+ ret = wldev_iovar_setint(ndev, "apsta", val);
+ if (ret < 0) {
+ /* return error and fail the initialization */
+ CFGP2P_ERR(("wl apsta %d set error. ret: %d\n", val, ret));
+ return ret;
+ }
+
ret = wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true);
if (ret < 0) {
CFGP2P_ERR(("WLC_UP error %d\n", ret));
return ret;
}
+int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg)
+{
+ if (!cfg->p2p) {
+ CFGP2P_DBG(("p2p not enabled! \n"));
+ return false;
+ }
+
+ if ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) &&
+ (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1))
+ return true;
+ else
+ return false;
+}
+
/* Create a new P2P BSS.
* Parameters:
* @mac : MAC address of the BSS to create
{
wl_p2p_if_t ifreq;
s32 err;
- u32 scb_timeout = WL_SCB_TIMEOUT;
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
ifreq.type = if_type;
err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
-
- if (unlikely(err < 0))
+ if (unlikely(err < 0)) {
printk("'cfg p2p_ifadd' error %d\n", err);
- else if (if_type == WL_P2P_IF_GO) {
- err = wldev_ioctl(ndev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
- if (unlikely(err < 0))
- printk("'cfg scb_timeout' error %d\n", err);
+ return err;
}
+
return err;
}
*/
s32
wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
- chanspec_t chspec)
+ chanspec_t chspec, s32 conn_idx)
{
wl_p2p_if_t ifreq;
s32 err;
- u32 scb_timeout = WL_SCB_TIMEOUT;
- struct net_device *netdev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+ struct net_device *netdev = wl_to_p2p_bss_ndev(cfg, conn_idx);
ifreq.type = if_type;
ifreq.chspec = chspec;
err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
-
if (unlikely(err < 0)) {
printk("'cfg p2p_ifupd' error %d\n", err);
} else if (if_type == WL_P2P_IF_GO) {
- err = wldev_ioctl(netdev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
- if (unlikely(err < 0))
- printk("'cfg scb_timeout' error %d\n", err);
+ cfg->p2p->p2p_go_count++;
}
return err;
}
}
#endif /* P2PLISTEN_AP_SAMECHN */
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-#if defined(P2P_DISCOVERY_WAR)
- if (mode == WL_P2P_DISC_ST_LISTEN || mode == WL_P2P_DISC_ST_SEARCH) {
- if (!cfg->p2p->vif_created) {
- if (wldev_iovar_setint(wl_to_prmry_ndev(cfg), "mpc", 0) < 0) {
- WL_ERR(("mpc disabling failed\n"));
- }
- }
- }
-#endif /* defined(P2P_DISCOVERY_WAR) */
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
-
/* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
discovery_mode.state = mode;
- discovery_mode.chspec = wl_ch_host_to_driver(channel);
+ discovery_mode.chspec = wl_ch_host_to_driver(bssidx, channel);
discovery_mode.dwell = listen_ms;
ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
sizeof(discovery_mode), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
return ret;
}
+int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg)
+{
+ int i;
+ s32 connected_cnt;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ if (!dhd)
+ return (-ENODEV);
+ for (i = P2PAPI_BSSCFG_CONNECTION1; i < P2PAPI_BSSCFG_MAX; i++) {
+ if (wl_to_p2p_bss_bssidx(cfg, i) == -1) {
+ if (i == P2PAPI_BSSCFG_CONNECTION2) {
+ if (!(dhd->op_mode & DHD_FLAG_MP2P_MODE)) {
+ CFGP2P_ERR(("Multi p2p not supported"));
+ return BCME_ERROR;
+ }
+ if ((connected_cnt = wl_get_drv_status_all(cfg, CONNECTED)) > 1) {
+ CFGP2P_ERR(("Failed to create second p2p interface"
+ "Already one connection exists"));
+ return BCME_ERROR;
+ }
+ }
+ return i;
+ }
+ }
+ return BCME_ERROR;
+}
+
s32
wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg)
{
- s32 index = 0;
+ s32 bssidx = 0;
s32 ret = BCME_OK;
CFGP2P_DBG(("enter\n"));
return ret;
}
/* Enable P2P Discovery in the WL Driver */
- ret = wl_cfgp2p_get_disc_idx(cfg, &index);
+ ret = wl_cfgp2p_get_disc_idx(cfg, &bssidx);
if (ret < 0) {
return ret;
}
+ /* In case of CFG80211 case, check if p2p_discovery interface has allocated p2p_wdev */
+ if (!cfg->p2p_wdev) {
+ CFGP2P_ERR(("p2p_wdev is NULL.\n"));
+ return BCME_NODEVICE;
+ }
+ /* Make an entry in the netinfo */
+ wl_alloc_netinfo(cfg, NULL, cfg->p2p_wdev, WL_MODE_BSS, 0, bssidx);
+
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) =
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
- wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = index;
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = bssidx;
/* Set the initial discovery state to SCAN */
ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
wl_cfgp2p_deinit_discovery(struct bcm_cfg80211 *cfg)
{
s32 ret = BCME_OK;
- CFGP2P_DBG(("enter\n"));
+ s32 bssidx;
- if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) <= 0) {
+ CFGP2P_DBG(("enter\n"));
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ if (bssidx <= 0) {
CFGP2P_ERR(("do nothing, not initialized\n"));
return -1;
}
+
+ /* Clear our saved WPS and P2P IEs for the discovery BSS */
+ wl_cfg80211_clear_per_bss_ies(cfg, bssidx);
+
/* Set the discovery state to SCAN */
- ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
- wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+ wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ bssidx);
/* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
ret = wl_cfgp2p_set_discovery(cfg, 0);
- /* Clear our saved WPS and P2P IEs for the discovery BSS. The driver
- * deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery
- * BSS.
- */
+ /* Remove the p2p disc entry in the netinfo */
+#ifdef DHD_IFDEBUG
+ WL_ERR(("dealloc_net_info by wdev=%p\n", cfg->p2p_wdev));
+#endif
+ wl_dealloc_netinfo_by_wdev(cfg, cfg->p2p_wdev);
- /* Clear the saved bsscfg index of the discovery BSSCFG to indicate we
- * have no discovery BSS.
- */
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = WL_INVALID;
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
s32 ret = BCME_OK;
s32 bssidx;
+ CFGP2P_DBG(("enter\n"));
if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
goto set_ie;
}
- wl_set_p2p_status(cfg, DISCOVERY_ON);
-
- CFGP2P_DBG(("enter\n"));
-
ret = wl_cfgp2p_init_discovery(cfg);
if (unlikely(ret < 0)) {
CFGP2P_ERR((" init discovery error %d\n", ret));
goto exit;
}
+
+ wl_set_p2p_status(cfg, DISCOVERY_ON);
/* Set wsec to any non-zero value in the discovery bsscfg to ensure our
* P2P probe responses have the privacy bit set in the 802.11 WPA IE.
* Some peer devices may not initiate WPS with us if this bit is not set.
}
set_ie:
if (ie_len) {
+
if (bcmcfg_to_prmry_ndev(cfg) == dev) {
bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- } else if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+ } else if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfg->p2p_wdev)) < 0) {
+ WL_ERR(("Find p2p index from wdev(%p) failed\n", cfg->p2p_wdev));
return BCME_ERROR;
}
- ret = wl_cfgp2p_set_management_ie(cfg, dev,
+ ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev),
bssidx,
VNDR_IE_PRBREQ_FLAG, ie, ie_len);
wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg)
{
s32 ret = BCME_OK;
+ s32 bssidx;
+
CFGP2P_DBG((" enter\n"));
wl_clr_p2p_status(cfg, DISCOVERY_ON);
goto exit;
}
- if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) == 0) {
+#ifdef DHD_IFDEBUG
+ WL_ERR(("%s: (cfg)->p2p->bss[type].bssidx: %d\n",
+ __FUNCTION__, (cfg)->p2p->bss[P2PAPI_BSSCFG_DEVICE].bssidx));
+#endif
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ if (bssidx <= 0) {
CFGP2P_ERR((" do nothing, not initialized\n"));
- goto exit;
+ return 0;
}
ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
- wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
-
+ bssidx);
if (unlikely(ret < 0)) {
-
CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
}
/* Do a scan abort to stop the driver's scan engine in case it is still
(num_chans & WL_SCAN_PARAMS_COUNT_MASK));
for (i = 0; i < num_chans; i++) {
- eparams->params.channel_list[i] = wl_ch_host_to_driver(channels[i]);
+ eparams->params.channel_list[i] = wl_ch_host_to_driver(bssidx, channels[i]);
}
eparams->version = htod32(ESCAN_REQ_VERSION);
eparams->action = htod16(action);
ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+ WL_SCAN(("P2P_SEARCH sync ID: %d, bssidx: %d\n", eparams->sync_id, bssidx));
if (ret == BCME_OK)
wl_set_p2p_status(cfg, SCANNING);
return ret;
u16 *default_chan_list = NULL;
p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_AFX_PEER_NORMAL;
if (!p2p_is_on(cfg) || ndev == NULL || bssidx == WL_INVALID)
- return -BCME_ERROR;
+ return -EINVAL;
WL_TRACE_HW4((" Enter\n"));
if (bssidx == wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY))
bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
chan_cnt = AF_PEER_SEARCH_CNT;
else
chan_cnt = SOCIAL_CHAN_CNT;
+
+ if (cfg->afx_hdl->pending_tx_act_frm && cfg->afx_hdl->is_active) {
+ wl_action_frame_t *action_frame;
+ action_frame = &(cfg->afx_hdl->pending_tx_act_frm->action_frame);
+ if (wl_cfgp2p_is_p2p_gas_action(action_frame->data, action_frame->len)) {
+ chan_cnt = 1;
+ p2p_scan_purpose = P2P_SCAN_AFX_PEER_REDUCED;
+ }
+ }
+
default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL);
if (default_chan_list == NULL) {
CFGP2P_ERR(("channel list allocation failed \n"));
#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD)
-static s32
-wl_cfgp2p_parse_vndr_ies(u8 *parse, u32 len,
- struct parsed_vndr_ies *vndr_ies)
-{
- s32 err = BCME_OK;
- vndr_ie_t *vndrie;
- bcm_tlv_t *ie;
- struct parsed_vndr_ie_info *parsed_info;
- u32 count = 0;
- s32 remained_len;
-
- remained_len = (s32)len;
- memset(vndr_ies, 0, sizeof(*vndr_ies));
-
- WL_INFORM(("---> len %d\n", len));
- ie = (bcm_tlv_t *) parse;
- if (!bcm_valid_tlv(ie, remained_len))
- ie = NULL;
- while (ie) {
- if (count >= MAX_VNDR_IE_NUMBER)
- break;
- if (ie->id == DOT11_MNG_VS_ID) {
- vndrie = (vndr_ie_t *) ie;
- /* len should be bigger than OUI length + one data length at least */
- if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
- CFGP2P_ERR(("%s: invalid vndr ie. length is too small %d\n",
- __FUNCTION__, vndrie->len));
- goto end;
- }
- /* if wpa or wme ie, do not add ie */
- if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
- ((vndrie->data[0] == WPA_OUI_TYPE) ||
- (vndrie->data[0] == WME_OUI_TYPE))) {
- CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n"));
- goto end;
- }
-
- parsed_info = &vndr_ies->ie_info[count++];
-
- /* save vndr ie information */
- parsed_info->ie_ptr = (char *)vndrie;
- parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
- memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
-
- vndr_ies->count = count;
-
- CFGP2P_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x \n",
- parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1],
- parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0]));
- }
-end:
- ie = bcm_next_tlv(ie, &remained_len);
- }
- return err;
-}
-
-
-/* Delete and Set a management vndr ie to firmware
- * Parameters:
- * @cfg : wl_private data
- * @ndev : net device for bssidx
- * @bssidx : bssidx for BSS
- * @pktflag : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
- * VNDR_IE_ASSOCREQ_FLAG)
- * @ie : VNDR IE (such as P2P IE , WPS IE)
- * @ie_len : VNDR IE Length
- * Returns 0 if success.
- */
-
-s32
-wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
- s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
-{
- s32 ret = BCME_OK;
- u8 *curr_ie_buf = NULL;
- u8 *mgmt_ie_buf = NULL;
- u32 mgmt_ie_buf_len = 0;
- u32 *mgmt_ie_len = 0;
- u32 del_add_ie_buf_len = 0;
- u32 total_ie_buf_len = 0;
- u32 parsed_ie_buf_len = 0;
- struct parsed_vndr_ies old_vndr_ies;
- struct parsed_vndr_ies new_vndr_ies;
- s32 i;
- u8 *ptr;
- s32 type = -1;
- s32 remained_buf_len;
-#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie)
-#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie_len)
- memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf));
- curr_ie_buf = g_mgmt_ie_buf;
- CFGP2P_DBG((" bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag));
-
-#ifdef DUAL_STA
- if ((cfg->p2p != NULL) && ((bssidx == 0) || (bssidx != cfg->cfgdev_bssidx)))
-#else
- if (cfg->p2p != NULL)
-#endif
- {
- if (wl_cfgp2p_find_type(cfg, bssidx, &type)) {
- CFGP2P_ERR(("cannot find type from bssidx : %d\n", bssidx));
- return BCME_ERROR;
- }
-
- switch (pktflag) {
- case VNDR_IE_PRBREQ_FLAG :
- mgmt_ie_buf = IE_TYPE(probe_req, type);
- mgmt_ie_len = &IE_TYPE_LEN(probe_req, type);
- mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, type));
- break;
- case VNDR_IE_PRBRSP_FLAG :
- mgmt_ie_buf = IE_TYPE(probe_res, type);
- mgmt_ie_len = &IE_TYPE_LEN(probe_res, type);
- mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, type));
- break;
- case VNDR_IE_ASSOCREQ_FLAG :
- mgmt_ie_buf = IE_TYPE(assoc_req, type);
- mgmt_ie_len = &IE_TYPE_LEN(assoc_req, type);
- mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, type));
- break;
- case VNDR_IE_ASSOCRSP_FLAG :
- mgmt_ie_buf = IE_TYPE(assoc_res, type);
- mgmt_ie_len = &IE_TYPE_LEN(assoc_res, type);
- mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, type));
- break;
- case VNDR_IE_BEACON_FLAG :
- mgmt_ie_buf = IE_TYPE(beacon, type);
- mgmt_ie_len = &IE_TYPE_LEN(beacon, type);
- mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, type));
- break;
- default:
- mgmt_ie_buf = NULL;
- mgmt_ie_len = NULL;
- CFGP2P_ERR(("not suitable type\n"));
- return BCME_ERROR;
- }
- } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
- if (cfg->ap_info == NULL) {
- CFGP2P_ERR(("hostapd ap_info null ptr refrence while setting IE\n"));
- return BCME_ERROR;
-
- }
- switch (pktflag) {
- case VNDR_IE_PRBRSP_FLAG :
- mgmt_ie_buf = cfg->ap_info->probe_res_ie;
- mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
- mgmt_ie_buf_len = sizeof(cfg->ap_info->probe_res_ie);
- break;
- case VNDR_IE_BEACON_FLAG :
- mgmt_ie_buf = cfg->ap_info->beacon_ie;
- mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
- mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
- break;
- case VNDR_IE_ASSOCRSP_FLAG :
- /* WPS-AP WSC2.0 assoc res includes wps_ie */
- mgmt_ie_buf = cfg->ap_info->assoc_res_ie;
- mgmt_ie_len = &cfg->ap_info->assoc_res_ie_len;
- mgmt_ie_buf_len = sizeof(cfg->ap_info->assoc_res_ie);
- break;
- default:
- mgmt_ie_buf = NULL;
- mgmt_ie_len = NULL;
- CFGP2P_ERR(("not suitable type\n"));
- return BCME_ERROR;
- }
- bssidx = 0;
- } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) {
- switch (pktflag) {
- case VNDR_IE_PRBREQ_FLAG :
- mgmt_ie_buf = cfg->sta_info->probe_req_ie;
- mgmt_ie_len = &cfg->sta_info->probe_req_ie_len;
- mgmt_ie_buf_len = sizeof(cfg->sta_info->probe_req_ie);
- break;
- case VNDR_IE_ASSOCREQ_FLAG :
- mgmt_ie_buf = cfg->sta_info->assoc_req_ie;
- mgmt_ie_len = &cfg->sta_info->assoc_req_ie_len;
- mgmt_ie_buf_len = sizeof(cfg->sta_info->assoc_req_ie);
- break;
- default:
- mgmt_ie_buf = NULL;
- mgmt_ie_len = NULL;
- CFGP2P_ERR(("not suitable type\n"));
- return BCME_ERROR;
- }
- bssidx = 0;
- } else {
- CFGP2P_ERR(("not suitable type\n"));
- return BCME_ERROR;
- }
-
- if (vndr_ie_len > mgmt_ie_buf_len) {
- CFGP2P_ERR(("extra IE size too big\n"));
- ret = -ENOMEM;
- } else {
- /* parse and save new vndr_ie in curr_ie_buff before comparing it */
- if (vndr_ie && vndr_ie_len && curr_ie_buf) {
- ptr = curr_ie_buf;
-
- wl_cfgp2p_parse_vndr_ies((u8*)vndr_ie,
- vndr_ie_len, &new_vndr_ies);
-
- for (i = 0; i < new_vndr_ies.count; i++) {
- struct parsed_vndr_ie_info *vndrie_info =
- &new_vndr_ies.ie_info[i];
-
- memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
- vndrie_info->ie_len);
- parsed_ie_buf_len += vndrie_info->ie_len;
- }
- }
-
- if (mgmt_ie_buf != NULL) {
- if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
- (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) {
- CFGP2P_INFO(("Previous mgmt IE is equals to current IE\n"));
- goto exit;
- }
-
- /* parse old vndr_ie */
- wl_cfgp2p_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len,
- &old_vndr_ies);
-
- /* make a command to delete old ie */
- for (i = 0; i < old_vndr_ies.count; i++) {
- struct parsed_vndr_ie_info *vndrie_info =
- &old_vndr_ies.ie_info[i];
-
- CFGP2P_INFO(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
- vndrie_info->vndrie.id, vndrie_info->vndrie.len,
- vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
- vndrie_info->vndrie.oui[2]));
-
- del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
- pktflag, vndrie_info->vndrie.oui,
- vndrie_info->vndrie.id,
- vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
- vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
- "del");
-
- curr_ie_buf += del_add_ie_buf_len;
- total_ie_buf_len += del_add_ie_buf_len;
- }
- }
-
- *mgmt_ie_len = 0;
- /* Add if there is any extra IE */
- if (mgmt_ie_buf && parsed_ie_buf_len) {
- ptr = mgmt_ie_buf;
-
- remained_buf_len = mgmt_ie_buf_len;
-
- /* make a command to add new ie */
- for (i = 0; i < new_vndr_ies.count; i++) {
- struct parsed_vndr_ie_info *vndrie_info =
- &new_vndr_ies.ie_info[i];
-
- CFGP2P_INFO(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n",
- vndrie_info->vndrie.id, vndrie_info->vndrie.len,
- vndrie_info->ie_len - 2,
- vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
- vndrie_info->vndrie.oui[2]));
-
- del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
- pktflag, vndrie_info->vndrie.oui,
- vndrie_info->vndrie.id,
- vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
- vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
- "add");
-
- /* verify remained buf size before copy data */
- if (remained_buf_len >= vndrie_info->ie_len) {
- remained_buf_len -= vndrie_info->ie_len;
- } else {
- CFGP2P_ERR(("no space in mgmt_ie_buf: pktflag = %d, "
- "found vndr ies # = %d(cur %d), remained len %d, "
- "cur mgmt_ie_len %d, new ie len = %d\n",
- pktflag, new_vndr_ies.count, i, remained_buf_len,
- *mgmt_ie_len, vndrie_info->ie_len));
- break;
- }
-
- /* save the parsed IE in cfg struct */
- memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
- vndrie_info->ie_len);
- *mgmt_ie_len += vndrie_info->ie_len;
-
- curr_ie_buf += del_add_ie_buf_len;
- total_ie_buf_len += del_add_ie_buf_len;
- }
- }
- if (total_ie_buf_len) {
- ret = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf,
- total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
- bssidx, &cfg->ioctl_buf_sync);
- if (ret)
- CFGP2P_ERR(("vndr ie set error : %d\n", ret));
- }
- }
-#undef IE_TYPE
-#undef IE_TYPE_LEN
-exit:
- return ret;
-}
-
-/* Clear the manament IE buffer of BSSCFG
- * Parameters:
- * @cfg : wl_private data
- * @bssidx : bssidx for BSS
- *
- * Returns 0 if success.
- */
-s32
-wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx)
-{
-
- s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
- VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
- s32 index = -1;
- s32 type = -1;
- struct net_device *ndev = wl_cfgp2p_find_ndev(cfg, bssidx);
-#define INIT_IE(IE_TYPE, BSS_TYPE) \
- do { \
- memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
- sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
- wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
- } while (0);
-
- if (bssidx < 0 || ndev == NULL) {
- CFGP2P_ERR(("invalid %s\n", (bssidx < 0) ? "bssidx" : "ndev"));
- return BCME_BADARG;
- }
-
- if (wl_cfgp2p_find_type(cfg, bssidx, &type)) {
- CFGP2P_ERR(("invalid argument\n"));
- return BCME_BADARG;
- }
- for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
- /* clean up vndr ies in dongle */
- wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, vndrie_flag[index], NULL, 0);
- }
- INIT_IE(probe_req, type);
- INIT_IE(probe_res, type);
- INIT_IE(assoc_req, type);
- INIT_IE(assoc_res, type);
- INIT_IE(beacon, type);
- return BCME_OK;
-}
-
/* Is any of the tlvs the expected entry? If
* not update the tlvs buffer pointer/length.
}
return NULL;
}
-static u32
+u32
wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd)
{
}
-/*
- * Search the bssidx based on dev argument
- * Parameters:
- * @cfg : wl_private data
- * @ndev : net device to search bssidx
- * @bssidx : output arg to store bssidx of the bsscfg of firmware.
- * Returns error
- */
-s32
-wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *bssidx)
-{
- u32 i;
- if (ndev == NULL || bssidx == NULL) {
- CFGP2P_ERR((" argument is invalid\n"));
- return BCME_BADARG;
- }
- if (!cfg->p2p_supported) {
- *bssidx = P2PAPI_BSSCFG_PRIMARY;
- return BCME_OK;
- }
- /* we cannot find the bssidx of DISCOVERY BSS
- * because the ndev is same with ndev of PRIMARY BSS.
- */
- for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
- if (ndev == wl_to_p2p_bss_ndev(cfg, i)) {
- *bssidx = wl_to_p2p_bss_bssidx(cfg, i);
- return BCME_OK;
- }
- }
-
-#ifdef DUAL_STA
- if (cfg->bss_cfgdev && (cfg->bss_cfgdev == ndev_to_cfgdev(ndev))) {
- CFGP2P_INFO(("cfgdev is present, return the bssidx"));
- *bssidx = cfg->cfgdev_bssidx;
- return BCME_OK;
- }
-#endif
-
- return BCME_BADARG;
-
-}
struct net_device *
wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx)
{
}
/*
* Search the driver array idx based on bssidx argument
- * Parameters:
+ * Parameters: Note that this idx is applicable only
+ * for primary and P2P interfaces. The virtual AP/STA is not
+ * covered here.
* @cfg : wl_private data
* @bssidx : bssidx which indicate bsscfg->idx of firmware.
* @type : output arg to store array idx of p2p->bss.
}
}
-#ifdef DUAL_STA
- if (bssidx == cfg->cfgdev_bssidx) {
- CFGP2P_DBG(("bssidx matching with the virtual I/F \n"));
- *type = 1;
- return BCME_OK;
- }
-#endif
-
exit:
return BCME_BADARG;
}
s32 ret = BCME_OK;
struct net_device *ndev = NULL;
- if (!cfg || !cfg->p2p)
+ if (!cfg || !cfg->p2p || !cfgdev)
return BCME_ERROR;
CFGP2P_DBG((" Enter\n"));
+#ifdef DHD_IFDEBUG
+ WL_ERR(("%s: cfg: %p, cfgdev: %p, cfg->wdev: %p, cfg->p2p_wdev: %p\n",
+ __FUNCTION__, cfg, cfgdev, cfg->wdev, cfg->p2p_wdev));
+#endif
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-#if defined(P2P_DISCOVERY_WAR)
- if (!cfg->p2p->vif_created) {
- if (wldev_iovar_setint(ndev, "mpc", 1) < 0) {
- WL_ERR(("mpc enabling back failed\n"));
+#ifdef P2P_LISTEN_OFFLOADING
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ wl_clr_p2p_status(cfg, DISC_IN_PROGRESS);
+ CFGP2P_ERR(("DISC_IN_PROGRESS cleared\n"));
+ if (ndev && (ndev->ieee80211_ptr != NULL)) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy) {
+ cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+ &cfg->remain_on_chan, GFP_KERNEL);
+ } else {
+ CFGP2P_ERR(("Invalid cfgdev. Dropping the"
+ "remain_on_channel_expired event.\n"));
+ }
+#else
+ cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+ &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
+#endif /* WL_CFG80211_P2P_DEV_IF */
}
}
-#endif /* defined(P2P_DISCOVERY_WAR) */
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
+#endif /* P2P_LISTEN_OFFLOADING */
if (wl_get_p2p_status(cfg, LISTEN_EXPIRED) == 0) {
wl_set_p2p_status(cfg, LISTEN_EXPIRED);
wl_get_drv_status_all(cfg, FAKE_REMAINING_ON_CHANNEL))
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
{
- WL_DBG(("Listen DONE for ramain on channel expired\n"));
+ WL_DBG(("Listen DONE for remain on channel expired\n"));
wl_clr_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
wl_clr_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
if (ndev && (ndev->ieee80211_ptr != NULL)) {
#if defined(WL_CFG80211_P2P_DEV_IF)
- cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg),
- cfg->last_roc_id, &cfg->remain_on_chan, GFP_KERNEL);
+ if (cfgdev && ((struct wireless_dev *)cfgdev)->wiphy) {
+ /*
+ * To prevent kernel panic,
+ * if cfgdev->wiphy may be invalid, adding explicit check
+ */
+ cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+ &cfg->remain_on_chan, GFP_KERNEL);
+ } else {
+ CFGP2P_ERR(("Invalid cfgdev. Dropping the"
+ "remain_on_channel_expired event.\n"));
+ }
#else
cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
&cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
CFGP2P_DBG((" Enter\n"));
bzero(&msg, sizeof(wl_event_msg_t));
msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
+ msg.bsscfgidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
#if defined(WL_ENABLE_P2P_IF)
wl_cfg80211_event(cfg->p2p_net ? cfg->p2p_net :
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg, NULL);
del_timer_sync(&cfg->p2p->listen_timer);
if (notify) {
#if defined(WL_CFG80211_P2P_DEV_IF)
-#ifdef P2PONEINT
- if (wdev == NULL)
- wdev = bcmcfg_to_p2p_wdev(cfg);
-#endif
if (wdev)
- cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg),
- cfg->last_roc_id, &cfg->remain_on_chan, GFP_KERNEL);
+ cfg80211_remain_on_channel_expired(wdev, cfg->last_roc_id,
+ &cfg->remain_on_chan, GFP_KERNEL);
#else
if (ndev && ndev->ieee80211_ptr)
cfg80211_remain_on_channel_expired(ndev, cfg->last_roc_id,
if (status == WLC_E_STATUS_SUCCESS) {
wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n"));
+ if (!cfg->need_wait_afrx && cfg->af_sent_channel) {
+ CFGP2P_DBG(("no need to wait next AF.\n"));
+ wl_stop_wait_next_action_frame(cfg, ndev);
+ }
}
else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
wl_set_p2p_status(cfg, ACTION_TX_NOACK);
* MAC address.
*/
void
-wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr,
- struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr)
+wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr)
{
- memset(out_dev_addr, 0, sizeof(*out_dev_addr));
- memset(out_int_addr, 0, sizeof(*out_int_addr));
+ struct ether_addr *mac_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE);
+ struct ether_addr *int_addr;
- /* Generate the P2P Device Address. This consists of the device's
- * primary MAC address with the locally administered bit set.
- */
- memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr));
- out_dev_addr->octet[0] |= 0x02;
+ memcpy(mac_addr, primary_addr, sizeof(struct ether_addr));
+ mac_addr->octet[0] |= 0x02;
+ WL_DBG(("P2P Discovery address:"MACDBG "\n", MAC2STRDBG(mac_addr->octet)));
- /* Generate the P2P Interface Address. If the discovery and connection
- * BSSCFGs need to simultaneously co-exist, then this address must be
- * different from the P2P Device Address.
- */
- memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr));
-#ifndef P2PONEINT
- out_int_addr->octet[4] ^= 0x80;
-#endif
+ int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION1);
+ memcpy(int_addr, mac_addr, sizeof(struct ether_addr));
+ int_addr->octet[4] ^= 0x80;
+ WL_DBG(("Primary P2P Interface address:"MACDBG "\n", MAC2STRDBG(int_addr->octet)));
+ int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION2);
+ memcpy(int_addr, mac_addr, sizeof(struct ether_addr));
+ int_addr->octet[4] ^= 0x90;
}
/* P2P IF Address change to Virtual Interface MAC Address */
/* Bring up or down a BSS */
s32
-wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up)
+wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 is_up)
{
s32 ret = BCME_OK;
- s32 val = up ? 1 : 0;
+ s32 val = is_up ? 1 : 0;
struct {
s32 cfg;
bss_setbuf.cfg = htod32(bsscfg_idx);
bss_setbuf.val = htod32(val);
- CFGP2P_INFO(("---cfg bss -C %d %s\n", bsscfg_idx, up ? "up" : "down"));
+ CFGP2P_INFO(("---cfg bss -C %d %s\n", bsscfg_idx, is_up ? "up" : "down"));
ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (ret != 0) {
- CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret));
+ CFGP2P_ERR(("'bss %d' failed with %d\n", is_up, ret));
}
return ret;
s32 i = 0, index = -1;
#if defined(WL_CFG80211_P2P_DEV_IF)
- wdev = bcmcfg_to_p2p_wdev(cfg);
-#ifdef P2PONEINT
- ndev = wdev_to_ndev(wdev);
-#else
ndev = bcmcfg_to_prmry_ndev(cfg);
-#endif
+ wdev = bcmcfg_to_p2p_wdev(cfg);
#elif defined(WL_ENABLE_P2P_IF)
ndev = cfg->p2p_net ? cfg->p2p_net : bcmcfg_to_prmry_ndev(cfg);
wdev = ndev_to_wdev(ndev);
#endif /* WL_CFG80211_P2P_DEV_IF */
wl_cfgp2p_cancel_listen(cfg, ndev, wdev, TRUE);
+ wl_cfgp2p_disable_discovery(cfg);
+
+#if defined(WL_CFG80211_P2P_DEV_IF) && !defined(KEEP_WIFION_OPTION)
+ if (cfg->p2p_wdev) {
+ /* If p2p wdev is left out, clean it up */
+ WL_ERR(("Clean up the p2p discovery IF\n"));
+ wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+ }
+#endif /* WL_CFG80211_P2P_DEV_IF !defined(KEEP_WIFION_OPTION) */
+
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
index = wl_to_p2p_bss_bssidx(cfg, i);
if (index != WL_INVALID)
- wl_cfgp2p_clear_management_ie(cfg, index);
+ wl_cfg80211_clear_per_bss_ies(cfg, index);
}
wl_cfgp2p_deinit_priv(cfg);
return 0;
}
+
+int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg)
+{
+ if (cfg->p2p && ((wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) != -1) ||
+ (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2) != -1)))
+ return true;
+ else
+ return false;
+
+}
+
s32
wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
{
s32 ret = -1;
int count, start, duration;
wl_p2p_sched_t dongle_noa;
-
+ s32 bssidx, type;
+ int iovar_len = sizeof(dongle_noa);
CFGP2P_DBG((" Enter\n"));
memset(&dongle_noa, 0, sizeof(dongle_noa));
- if (cfg->p2p && cfg->p2p->vif_created) {
-
+ if (wl_cfgp2p_vif_created(cfg)) {
cfg->p2p->noa.desc[0].start = 0;
sscanf(buf, "%10d %10d %10d", &count, &start, &duration);
}
else {
/* Continuous NoA interval. */
- dongle_noa.action = WL_P2P_SCHED_ACTION_NONE;
+ dongle_noa.action = WL_P2P_SCHED_ACTION_DOZE;
dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
if ((cfg->p2p->noa.desc[0].interval == 102) ||
(cfg->p2p->noa.desc[0].interval == 100)) {
dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration*1000);
}
dongle_noa.desc[0].interval = htod32(cfg->p2p->noa.desc[0].interval*1000);
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK)
+ return BCME_ERROR;
+
+ if (dongle_noa.action == WL_P2P_SCHED_ACTION_RESET) {
+ iovar_len -= sizeof(wl_p2p_sched_desc_t);
+ }
- ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION),
- "p2p_noa", &dongle_noa, sizeof(dongle_noa), cfg->ioctl_buf,
+ ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, type),
+ "p2p_noa", &dongle_noa, iovar_len, cfg->ioctl_buf,
WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (ret < 0) {
CFGP2P_DBG((" Enter\n"));
buf[0] = '\0';
- if (cfg->p2p && cfg->p2p->vif_created) {
+ if (wl_cfgp2p_vif_created(cfg)) {
if (cfg->p2p->noa.desc[0].count || cfg->p2p->ops.ops) {
_buf[0] = 1; /* noa index */
_buf[1] = (cfg->p2p->ops.ops ? 0x80: 0) |
int ps, ctw;
int ret = -1;
s32 legacy_ps;
+ s32 conn_idx;
+ s32 bssidx;
struct net_device *dev;
CFGP2P_DBG((" Enter\n"));
- if (cfg->p2p && cfg->p2p->vif_created) {
+ if (wl_cfgp2p_vif_created(cfg)) {
sscanf(buf, "%10d %10d %10d", &legacy_ps, &ps, &ctw);
CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw));
- dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK)
+ return BCME_ERROR;
+ dev = wl_to_p2p_bss_ndev(cfg, conn_idx);
if (ctw != -1) {
cfg->p2p->ops.ctw = ctw;
ret = 0;
return ret;
}
+s32
+wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
+{
+ int ch, bw;
+ s32 conn_idx;
+ s32 bssidx;
+ struct net_device *dev;
+ char smbuf[WLC_IOCTL_SMLEN];
+ wl_chan_switch_t csa_arg;
+ u32 chnsp = 0;
+ int err = 0;
+
+ CFGP2P_DBG((" Enter\n"));
+ if (wl_cfgp2p_vif_created(cfg)) {
+ sscanf(buf, "%10d %10d", &ch, &bw);
+ CFGP2P_DBG(("Enter ch %d bw %d\n", ch, bw));
+
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (wl_cfgp2p_find_type(cfg, bssidx, &conn_idx) != BCME_OK) {
+ return BCME_ERROR;
+ }
+ dev = wl_to_p2p_bss_ndev(cfg, conn_idx);
+ if (ch <= 0 || bw <= 0) {
+ CFGP2P_ERR(("Negative value not permitted!\n"));
+ return BCME_ERROR;
+ }
+
+ csa_arg.mode = DOT11_CSA_MODE_ADVISORY;
+ csa_arg.count = P2P_ECSA_CNT;
+ csa_arg.reg = 0;
+
+ sprintf(buf, "%d/%d", ch, bw);
+ chnsp = wf_chspec_aton(buf);
+ if (chnsp == 0) {
+ CFGP2P_ERR(("%s:chsp is not correct\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ chnsp = wl_chspec_host_to_driver(chnsp);
+ csa_arg.chspec = chnsp;
+
+ err = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg),
+ smbuf, sizeof(smbuf), NULL);
+ if (err) {
+ CFGP2P_ERR(("%s:set p2p_ecsa failed:%d\n", __FUNCTION__, err));
+ return BCME_ERROR;
+ }
+ } else {
+ CFGP2P_ERR(("ERROR: set_p2p_ecsa in non-p2p mode\n"));
+ return BCME_ERROR;
+ }
+ return BCME_OK;
+}
+
u8 *
wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id)
{
};
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
-#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT) || \
- defined(P2PONEINT)
-#ifdef P2PONEINT
-s32
-wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg)
-{
-
- struct net_device *_ndev;
- struct ether_addr primary_mac;
- struct net_device *new_ndev;
- chanspec_t chspec;
- uint8 name[IFNAMSIZ];
- s32 mode = 0;
- s32 val = 0;
-
-
- s32 wlif_type = -1;
- s32 err, timeout = -1;
-
- memset(name, 0, IFNAMSIZ);
- strncpy(name, "p2p0", 4);
- name[IFNAMSIZ - 1] = '\0';
-
- if (cfg->p2p_net) {
- CFGP2P_ERR(("p2p_net defined already.\n"));
- return -EINVAL;
- }
-
- if (!cfg->p2p)
- return -EINVAL;
-
- if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
- p2p_on(cfg) = true;
- wl_cfgp2p_set_firm_p2p(cfg);
- wl_cfgp2p_init_discovery(cfg);
- get_primary_mac(cfg, &primary_mac);
- wl_cfgp2p_generate_bss_mac(&primary_mac,
- &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
- }
-
- _ndev = bcmcfg_to_prmry_ndev(cfg);
- memset(cfg->p2p->vir_ifname, 0, IFNAMSIZ);
- strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1);
-
- wl_cfg80211_scan_abort(cfg);
-
-
- /* In concurrency case, STA may be already associated in a particular channel.
- * so retrieve the current channel of primary interface and then start the virtual
- * interface on that.
- */
- chspec = wl_cfg80211_get_shared_freq(cfg->wdev->wiphy);
-
- /* For P2P mode, use P2P-specific driver features to create the
- * bss: "cfg p2p_ifadd"
- */
- wl_set_p2p_status(cfg, IF_ADDING);
- memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
- wlif_type = WL_P2P_IF_CLIENT;
-
-
- err = wl_cfgp2p_ifadd(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
- if (unlikely(err)) {
- wl_clr_p2p_status(cfg, IF_ADDING);
- WL_ERR((" virtual iface add failed (%d) \n", err));
- return -ENOMEM;
- }
-
- timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- (wl_get_p2p_status(cfg, IF_ADDING) == false),
- msecs_to_jiffies(MAX_WAIT_TIME));
-
-
- if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
- struct wireless_dev *vwdev;
- int pm_mode = PM_ENABLE;
- wl_if_event_info *event = &cfg->if_event_info;
-
- /* IF_ADD event has come back, we can proceed to to register
- * the new interface now, use the interface name provided by caller (thus
- * ignore the one from wlc)
- */
- strncpy(cfg->if_event_info.name, name, IFNAMSIZ - 1);
- new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname,
- event->mac, event->bssidx);
- if (new_ndev == NULL)
- goto fail;
-
- wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = new_ndev;
- wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = event->bssidx;
-
- vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
- if (unlikely(!vwdev)) {
- WL_ERR(("Could not allocate wireless device\n"));
- goto fail;
- }
- vwdev->wiphy = cfg->wdev->wiphy;
- WL_TRACE(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname));
- vwdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
- vwdev->netdev = new_ndev;
- new_ndev->ieee80211_ptr = vwdev;
- SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy));
- wl_set_drv_status(cfg, READY, new_ndev);
- cfg->p2p->vif_created = true;
- wl_set_mode_by_netdev(cfg, new_ndev, mode);
-
- if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) {
- wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
- goto fail;
- }
-
- wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode);
- val = 1;
- /* Disable firmware roaming for P2P interface */
- wldev_iovar_setint(new_ndev, "roam_off", val);
-
- if (mode != WL_MODE_AP)
- wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1);
-
- WL_ERR((" virtual interface(%s) is "
- "created net attach done\n", cfg->p2p->vir_ifname));
-
- /* reinitialize completion to clear previous count */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
- INIT_COMPLETION(cfg->iface_disable);
-#else
- init_completion(&cfg->iface_disable);
-#endif
- cfg->p2p_net = new_ndev;
- cfg->p2p_wdev = vwdev;
-
- return 0;
- } else {
- wl_clr_p2p_status(cfg, IF_ADDING);
- WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname));
- memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
- cfg->p2p->vif_created = false;
- }
-
-
-fail:
- if (wlif_type == WL_P2P_IF_GO)
- wldev_iovar_setint(_ndev, "mpc", 1);
- return -ENODEV;
-
-}
-#else
+#if defined(WL_ENABLE_P2P_IF)
s32
wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg)
{
int ret = 0;
struct net_device* net = NULL;
-#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
struct wireless_dev *wdev = NULL;
-#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 };
if (cfg->p2p_net) {
return -ENODEV;
}
-#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
if (unlikely(!wdev)) {
WL_ERR(("Could not allocate wireless device\n"));
free_netdev(net);
return -ENOMEM;
}
-#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
strncpy(net->name, "p2p%d", sizeof(net->name) - 1);
net->name[IFNAMSIZ - 1] = '\0';
/* Register with a dummy MAC addr */
memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
-#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
wdev->wiphy = cfg->wdev->wiphy;
wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
net->ieee80211_ptr = wdev;
-#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
net->ethtool_ops = &cfgp2p_ethtool_ops;
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
-#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy));
/* Associate p2p0 network interface with new wdev */
wdev->netdev = net;
-#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
ret = register_netdev(net);
if (ret) {
CFGP2P_ERR((" register_netdevice failed (%d)\n", ret));
free_netdev(net);
-#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
kfree(wdev);
-#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
return -ENODEV;
}
/* store p2p net ptr for further reference. Note that iflist won't have this
* entry as there corresponding firmware interface is a "Hidden" interface.
*/
-#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
cfg->p2p_wdev = wdev;
-#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
cfg->p2p_net = net;
- printf("%s: P2P Interface Registered\n", net->name);
+ printk("%s: P2P Interface Registered\n", net->name);
return ret;
}
-#endif /* P2PONEINT */
s32
wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg)
return 0;
}
-
-#ifndef P2PONEINT
static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
return ret;
}
-#endif /* P2PONEINT */
-#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT || defined(P2PONEINT) */
+#endif
-#if defined(WL_ENABLE_P2P_IF) || defined(P2PONEINT)
-int
-#ifdef P2PONEINT
-wl_cfgp2p_if_open(struct net_device *net)
-#else
-wl_cfgp2p_if_open(struct net_device *net)
-#endif
+#if defined(WL_ENABLE_P2P_IF)
+static int wl_cfgp2p_if_open(struct net_device *net)
{
struct wireless_dev *wdev = net->ieee80211_ptr;
return 0;
}
-int
-#ifdef P2PONEINT
-wl_cfgp2p_if_stop(struct net_device *net)
-#else
-wl_cfgp2p_if_stop(struct net_device *net)
-#endif
+static int wl_cfgp2p_if_stop(struct net_device *net)
{
struct wireless_dev *wdev = net->ieee80211_ptr;
-#ifdef P2PONEINT
- bcm_struct_cfgdev *cfgdev;
-#endif
+
if (!wdev)
return -EINVAL;
-#ifdef P2PONEINT
- cfgdev = ndev_to_cfgdev(net);
- wl_cfg80211_scan_stop(cfgdev);
-#else
wl_cfg80211_scan_stop(net);
-#endif
#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes)
#endif /* !WL_IFACE_COMB_NUM_CHANNELS */
return 0;
}
-#endif /* defined(WL_ENABLE_P2P_IF) || defined(P2PONEINT) */
-#if defined(WL_ENABLE_P2P_IF)
bool wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops)
{
return (if_ops == &wl_cfgp2p_if_ops);
WL_TRACE(("Enter\n"));
if (cfg->p2p_wdev) {
+#ifndef EXPLICIT_DISCIF_CLEANUP
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* EXPLICIT_DISCIF_CLEANUP */
+ /*
+ * This is not expected. This can happen due to
+ * supplicant crash/unclean de-initialization which
+ * didn't free the p2p discovery interface. Indicate
+ * driver hang to user space so that the framework
+ * can rei-init the Wi-Fi.
+ */
CFGP2P_ERR(("p2p_wdev defined already.\n"));
-//#if (defined(CUSTOMER_HW10) && defined(CONFIG_ARCH_ODIN))
-#if 1 // after android stop; start, wpa_supplicant start fail because of "Failed to create a P2P Device interface p2p-dev-wlan0", huweiguo
+ wl_probe_wdev_all(cfg);
+#ifdef EXPLICIT_DISCIF_CLEANUP
+ /*
+ * CUSTOMER_HW4 design doesn't delete the p2p discovery
+ * interface on ifconfig wlan0 down context which comes
+ * without a preceeding NL80211_CMD_DEL_INTERFACE for p2p
+ * discovery. But during supplicant crash the DEL_IFACE
+ * command will not happen and will cause a left over iface
+ * even after ifconfig wlan0 down. So delete the iface
+ * first and then indicate the HANG event
+ */
wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
- CFGP2P_ERR(("p2p_wdev deleted.\n"));
#else
- return ERR_PTR(-ENFILE);
-#endif
+ dhd->hang_reason = HANG_REASON_P2P_IFACE_DEL_FAILURE;
+ net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+ return ERR_PTR(-ENODEV);
+#endif /* EXPLICIT_DISCIF_CLEANUP */
}
wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
memset(&primary_mac, 0, sizeof(primary_mac));
get_primary_mac(cfg, &primary_mac);
- wl_cfgp2p_generate_bss_mac(&primary_mac,
- &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
wdev->wiphy = cfg->wdev->wiphy;
wdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
- memcpy(wdev->address, &cfg->p2p->dev_addr, ETHER_ADDR_LEN);
+ memcpy(wdev->address, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE), ETHER_ADDR_LEN);
-#if defined(WL_NEWCFG_PRIVCMD_SUPPORT)
- if (cfg->p2p_net)
- memcpy(cfg->p2p_net->dev_addr, &cfg->p2p->dev_addr, ETHER_ADDR_LEN);
-#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
/* store p2p wdev ptr for further reference. */
cfg->p2p_wdev = wdev;
- WL_TRACE(("P2P interface registered\n"));
+ printf("P2P interface registered\n");
+ printf("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev);
return wdev;
}
}
p2p_on(cfg) = true;
+#if defined(P2P_IE_MISSING_FIX)
+ cfg->p2p_prb_noti = false;
+#endif
- CFGP2P_DBG(("P2P interface started\n"));
+ printf("P2P interface started\n");
exit:
return ret;
if (!cfg)
return;
- WL_TRACE(("Enter\n"));
+ CFGP2P_DBG(("Enter\n"));
ret = wl_cfg80211_scan_stop(wdev);
if (unlikely(ret < 0)) {
p2p_on(cfg) = false;
- CFGP2P_DBG(("P2P interface stopped\n"));
+ printf("Exit. P2P interface stopped\n");
return;
}
if (!wdev)
return -EINVAL;
-#ifdef P2PONEINT
- return -EINVAL;
-#endif
-
WL_TRACE(("Enter\n"));
+ printf("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev);
if (!rtnl_is_locked()) {
rtnl_lock();
if (rollback_lock)
rtnl_unlock();
+ synchronize_rcu();
+
kfree(wdev);
if (cfg)
cfg->p2p_wdev = NULL;
- CFGP2P_ERR(("P2P interface unregistered\n"));
+ printf("P2P interface unregistered\n");
return 0;
}
#endif /* WL_CFG80211_P2P_DEV_IF */
+
+void
+wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx)
+{
+ wifi_p2p_pub_act_frame_t *pact_frm;
+ int status = 0;
+
+ if (!frame || (frame_len < (sizeof(*pact_frm) + WL_P2P_AF_STATUS_OFFSET - 1))) {
+ return;
+ }
+
+ if (wl_cfgp2p_is_pub_action(frame, frame_len)) {
+ pact_frm = (wifi_p2p_pub_act_frame_t *)frame;
+ if (pact_frm->subtype == P2P_PAF_GON_RSP && tx) {
+ CFGP2P_ACTION(("Check TX P2P Group Owner Negotiation Rsp Frame status\n"));
+ status = pact_frm->elts[WL_P2P_AF_STATUS_OFFSET];
+ if (status) {
+ cfg->need_wait_afrx = false;
+ return;
+ }
+ }
+ }
+
+ cfg->need_wait_afrx = true;
+ return;
+}
+
+int
+wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request)
+{
+ if (request && (request->n_ssids == 1) &&
+ (request->n_channels == 1) &&
+ IS_P2P_SSID(request->ssids[0].ssid, WL_P2P_WILDCARD_SSID_LEN) &&
+ (request->ssids[0].ssid_len > WL_P2P_WILDCARD_SSID_LEN)) {
+ return true;
+ }
+ return false;
+}
/*
* Linux cfgp2p driver
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_cfgp2p.h 497431 2014-08-19 11:03:27Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfgp2p.h 608203 2015-12-24 05:30:44Z $
*/
#ifndef _wl_cfgp2p_h_
#define _wl_cfgp2p_h_
typedef enum {
P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */
P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */
- P2PAPI_BSSCFG_CONNECTION, /* maps to driver's P2P connection bsscfg */
+ P2PAPI_BSSCFG_CONNECTION1, /* maps to driver's P2P connection bsscfg */
+ P2PAPI_BSSCFG_CONNECTION2,
P2PAPI_BSSCFG_MAX
} p2p_bsscfg_type_t;
/* normal vendor ies buffer length */
#define VNDR_IES_BUF_LEN 512
-/* Structure to hold all saved P2P and WPS IEs for a BSSCFG */
-struct p2p_saved_ie {
- u8 p2p_probe_req_ie[VNDR_IES_BUF_LEN];
- u8 p2p_probe_res_ie[VNDR_IES_MAX_BUF_LEN];
- u8 p2p_assoc_req_ie[VNDR_IES_BUF_LEN];
- u8 p2p_assoc_res_ie[VNDR_IES_BUF_LEN];
- u8 p2p_beacon_ie[VNDR_IES_MAX_BUF_LEN];
- u32 p2p_probe_req_ie_len;
- u32 p2p_probe_res_ie_len;
- u32 p2p_assoc_req_ie_len;
- u32 p2p_assoc_res_ie_len;
- u32 p2p_beacon_ie_len;
-};
-
struct p2p_bss {
s32 bssidx;
struct net_device *dev;
- struct p2p_saved_ie saved_ie;
void *private_data;
+ struct ether_addr mac_addr;
};
struct p2p_info {
bool on; /* p2p on/off switch */
bool scan;
int16 search_state;
- bool vif_created;
s8 vir_ifname[IFNAMSIZ];
unsigned long status;
- struct ether_addr dev_addr;
- struct ether_addr int_addr;
struct p2p_bss bss[P2PAPI_BSSCFG_MAX];
struct timer_list listen_timer;
wl_p2p_sched_t noa;
wl_p2p_ops_t ops;
wlc_ssid_t ssid;
+ s8 p2p_go_count;
};
-#define MAX_VNDR_IE_NUMBER 5
+#define MAX_VNDR_IE_NUMBER 10
struct parsed_vndr_ie_info {
char *ie_ptr;
#define wl_to_p2p_bss_ndev(cfg, type) ((cfg)->p2p->bss[type].dev)
#define wl_to_p2p_bss_bssidx(cfg, type) ((cfg)->p2p->bss[type].bssidx)
+#define wl_to_p2p_bss_macaddr(cfg, type) &((cfg)->p2p->bss[type].mac_addr)
#define wl_to_p2p_bss_saved_ie(cfg, type) ((cfg)->p2p->bss[type].saved_ie)
#define wl_to_p2p_bss_private(cfg, type) ((cfg)->p2p->bss[type].private_data)
#define wl_to_p2p_bss(cfg, type) ((cfg)->p2p->bss[type])
/* dword align allocation */
#define WLC_IOCTL_MAXLEN 8192
+#ifdef CUSTOMER_HW4_DEBUG
+#define CFGP2P_ERROR_TEXT "CFGP2P-INFO2) "
+#else
#define CFGP2P_ERROR_TEXT "CFGP2P-ERROR) "
+#endif /* CUSTOMER_HW4_DEBUG */
-
+#ifdef DHD_LOG_DUMP
#define CFGP2P_ERR(args) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__); \
printk args; \
+ dhd_log_dump_print("[%s] %s: ", \
+ dhd_log_dump_get_timestamp(), __func__); \
+ dhd_log_dump_print args; \
} \
} while (0)
+#else
+#define CFGP2P_ERR(args) \
+ do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__); \
+ printk args; \
+ } \
+ } while (0)
+#endif /* DHD_LOG_DUMP */
#define CFGP2P_INFO(args) \
do { \
if (wl_dbg_level & WL_DBG_INFO) { \
#endif /* (LINUX_VERSION >= VERSION(3, 8, 0)) */
#ifndef WL_CFG80211_P2P_DEV_IF
-#ifdef WL_NEWCFG_PRIVCMD_SUPPORT
-#undef WL_NEWCFG_PRIVCMD_SUPPORT
-#endif
#endif /* WL_CFG80211_P2P_DEV_IF */
#if defined(WL_ENABLE_P2P_IF) && (defined(WL_CFG80211_P2P_DEV_IF) || \
#define bcm_struct_cfgdev struct net_device
#endif /* WL_CFG80211_P2P_DEV_IF */
+#define P2P_ECSA_CNT 50
+
extern void
wl_cfgp2p_listen_expired(unsigned long data);
extern bool
wl_cfgp2p_is_gas_action(void *frame, u32 frame_len);
extern bool
wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len);
+extern bool
+wl_cfgp2p_is_p2p_gas_action(void *frame, u32 frame_len);
extern void
wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel);
extern s32
extern s32
wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
extern s32
-wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, chanspec_t chspec);
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
+ chanspec_t chspec, s32 conn_idx);
extern s32
wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index);
extern s32
wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx);
-extern s32
-wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *index);
extern struct net_device *
wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx);
extern s32
wl_af_params_t *af_params, s32 bssidx);
extern void
-wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, struct ether_addr *out_dev_addr,
- struct ether_addr *out_int_addr);
+wl_cfgp2p_generate_bss_mac(struct bcm_cfg80211 *cfg, struct ether_addr *primary_addr);
extern void
wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id);
extern s32
wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+extern s32
+wl_cfgp2p_set_p2p_ecsa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
+
extern u8 *
wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id);
extern bool
wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops);
+extern u32
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
+ s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd);
+
+extern int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg);
+
+extern
+int wl_cfg_multip2p_operational(struct bcm_cfg80211 *cfg);
+
+extern
+int wl_cfgp2p_vif_created(struct bcm_cfg80211 *cfg);
+
#if defined(WL_CFG80211_P2P_DEV_IF)
extern struct wireless_dev *
wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg);
extern int
wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg);
+
#endif /* WL_CFG80211_P2P_DEV_IF */
+extern void
+wl_cfgp2p_need_wait_actfrmae(struct bcm_cfg80211 *cfg, void *frame, u32 frame_len, bool tx);
+
+extern int
+wl_cfgp2p_is_p2p_specific_scan(struct cfg80211_scan_request *request);
+
/* WiFi Direct */
#define SOCIAL_CHAN_1 1
#define SOCIAL_CHAN_2 6
#define WL_P2P_WILDCARD_SSID_LEN 7
#define WL_P2P_INTERFACE_PREFIX "p2p"
#define WL_P2P_TEMP_CHAN 11
+#define WL_P2P_AF_STATUS_OFFSET 9
/* If the provision discovery is for JOIN operations,
* or the device discoverablity frame is destined to GO
--- /dev/null
+/*
+ * Linux cfg80211 Vendor Extension Code
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfgvendor.c 605796 2015-12-11 13:45:36Z $
+ */
+
+/*
+ * New vendor interface additon to nl80211/cfg80211 to allow vendors
+ * to implement proprietary features over the cfg80211 stack.
+*/
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+
+#include <bcmutils.h>
+#include <bcmwifi_channels.h>
+#include <bcmendian.h>
+#include <proto/ethernet.h>
+#include <proto/802.11.h>
+#include <linux/if_arp.h>
+#include <asm/uaccess.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+#include <wlioctl_utils.h>
+#include <dhd_cfg80211.h>
+#ifdef PNO_SUPPORT
+#include <dhd_pno.h>
+#endif /* PNO_SUPPORT */
+#ifdef RTT_SUPPORT
+#include <dhd_rtt.h>
+#endif /* RTT_SUPPORT */
+#include <proto/ethernet.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/wait.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+
+#include <wlioctl.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgp2p.h>
+#include <wl_android.h>
+#include <wl_cfgvendor.h>
+
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+#include <brcm_nl80211.h>
+
+#if defined(WL_VENDOR_EXT_SUPPORT)
+/*
+ * This API is to be used for asynchronous vendor events. This
+ * shouldn't be used in response to a vendor command from its
+ * do_it handler context (instead wl_cfgvendor_send_cmd_reply should
+ * be used).
+ */
+int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
+ struct net_device *dev, int event_id, const void *data, int len)
+{
+ u16 kflags;
+ struct sk_buff *skb;
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+ /* Alloc the SKB for vendor_event */
+#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, len, event_id, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, len, event_id, kflags);
+#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return -ENOMEM;
+ }
+
+ /* Push the data to the skb */
+ nla_put_nohdr(skb, len, data);
+
+ cfg80211_vendor_event(skb, kflags);
+
+ return 0;
+}
+
+static int
+wl_cfgvendor_send_cmd_reply(struct wiphy *wiphy,
+ struct net_device *dev, const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ return -ENOMEM;
+ }
+
+ /* Push the data to the skb */
+ nla_put_nohdr(skb, len, data);
+
+ return cfg80211_vendor_cmd_reply(skb);
+}
+
+static int
+wl_cfgvendor_get_feature_set(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int reply;
+
+ reply = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg));
+
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ &reply, sizeof(int));
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+
+ return err;
+}
+
+static int
+wl_cfgvendor_get_feature_set_matrix(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct sk_buff *skb;
+ int *reply;
+ int num, mem_needed, i;
+
+ reply = dhd_dev_get_feature_set_matrix(bcmcfg_to_prmry_ndev(cfg), &num);
+
+ if (!reply) {
+ WL_ERR(("Could not get feature list matrix\n"));
+ err = -EINVAL;
+ return err;
+ }
+ mem_needed = VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * num) +
+ ATTRIBUTE_U32_LEN;
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, num);
+ for (i = 0; i < num; i++) {
+ nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET, reply[i]);
+ }
+
+ err = cfg80211_vendor_cmd_reply(skb);
+
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+
+exit:
+ kfree(reply);
+ return err;
+}
+
+static int
+wl_cfgvendor_set_pno_mac_oui(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int type;
+ uint8 pno_random_mac_oui[DOT11_OUI_LEN];
+
+ type = nla_type(data);
+
+ if (type == ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI) {
+ memcpy(pno_random_mac_oui, nla_data(data), DOT11_OUI_LEN);
+
+ err = dhd_dev_pno_set_mac_oui(bcmcfg_to_prmry_ndev(cfg), pno_random_mac_oui);
+
+ if (unlikely(err))
+ WL_ERR(("Bad OUI, could not set:%d \n", err));
+
+
+ } else {
+ err = -1;
+ }
+
+ return err;
+}
+
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+static int
+wl_cfgvendor_set_nodfs_flag(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int type;
+ u32 nodfs;
+
+ type = nla_type(data);
+ if (type == ANDR_WIFI_ATTRIBUTE_NODFS_SET) {
+ nodfs = nla_get_u32(data);
+ err = dhd_dev_set_nodfs(bcmcfg_to_prmry_ndev(cfg), nodfs);
+ } else {
+ err = -1;
+ }
+ return err;
+}
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+
+#ifdef GSCAN_SUPPORT
+int
+wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
+ struct net_device *dev, void *data, int len, wl_vendor_event_t event)
+{
+ u16 kflags;
+ const void *ptr;
+ struct sk_buff *skb;
+ int malloc_len, total, iter_cnt_to_send, cnt;
+ gscan_results_cache_t *cache = (gscan_results_cache_t *)data;
+ total = len/sizeof(wifi_gscan_result_t);
+ while (total > 0) {
+ malloc_len = (total * sizeof(wifi_gscan_result_t)) + VENDOR_DATA_OVERHEAD;
+ if (malloc_len > NLMSG_DEFAULT_SIZE) {
+ malloc_len = NLMSG_DEFAULT_SIZE;
+ }
+ iter_cnt_to_send =
+ (malloc_len - VENDOR_DATA_OVERHEAD)/sizeof(wifi_gscan_result_t);
+ total = total - iter_cnt_to_send;
+
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
+ /* Alloc the SKB for vendor_event */
+#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, malloc_len, event, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, malloc_len, event, kflags);
+#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ return -ENOMEM;
+ }
+
+ while (cache && iter_cnt_to_send) {
+ ptr = (const void *) &cache->results[cache->tot_consumed];
+
+ if (iter_cnt_to_send < (cache->tot_count - cache->tot_consumed)) {
+ cnt = iter_cnt_to_send;
+ } else {
+ cnt = (cache->tot_count - cache->tot_consumed);
+ }
+
+ iter_cnt_to_send -= cnt;
+ cache->tot_consumed += cnt;
+ /* Push the data to the skb */
+ nla_append(skb, cnt * sizeof(wifi_gscan_result_t), ptr);
+ if (cache->tot_consumed == cache->tot_count) {
+ cache = cache->next;
+ }
+
+ }
+
+ cfg80211_vendor_event(skb, kflags);
+ }
+
+ return 0;
+}
+
+
+static int
+wl_cfgvendor_gscan_get_capabilities(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pno_gscan_capabilities_t *reply = NULL;
+ uint32 reply_len = 0;
+
+
+ reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_GET_CAPABILITIES, NULL, &reply_len);
+ if (!reply) {
+ WL_ERR(("Could not get capabilities\n"));
+ err = -EINVAL;
+ return err;
+ }
+
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ reply, reply_len);
+
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ }
+
+ kfree(reply);
+ return err;
+}
+
+static int
+wl_cfgvendor_gscan_get_channel_list(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0, type, band;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ uint16 *reply = NULL;
+ uint32 reply_len = 0, num_channels, mem_needed;
+ struct sk_buff *skb;
+
+ type = nla_type(data);
+
+ if (type == GSCAN_ATTRIBUTE_BAND) {
+ band = nla_get_u32(data);
+ } else {
+ return -EINVAL;
+ }
+
+ reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_GET_CHANNEL_LIST, &band, &reply_len);
+
+ if (!reply) {
+ WL_ERR(("Could not get channel list\n"));
+ err = -EINVAL;
+ return err;
+ }
+ num_channels = reply_len/ sizeof(uint32);
+ mem_needed = reply_len + VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 2);
+
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_CHANNELS, num_channels);
+ nla_put(skb, GSCAN_ATTRIBUTE_CHANNEL_LIST, reply_len, reply);
+
+ err = cfg80211_vendor_cmd_reply(skb);
+
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ }
+exit:
+ kfree(reply);
+ return err;
+}
+
+static int
+wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ gscan_results_cache_t *results, *iter;
+ uint32 reply_len, complete = 0, num_results_iter;
+ int32 mem_needed;
+ wifi_gscan_result_t *ptr;
+ uint16 num_scan_ids, num_results;
+ struct sk_buff *skb;
+ struct nlattr *scan_hdr;
+
+ dhd_dev_wait_batch_results_complete(bcmcfg_to_prmry_ndev(cfg));
+ dhd_dev_pno_lock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ results = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len);
+
+ if (!results) {
+ WL_ERR(("No results to send %d\n", err));
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ results, 0);
+
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ return err;
+ }
+ num_scan_ids = reply_len & 0xFFFF;
+ num_results = (reply_len & 0xFFFF0000) >> 16;
+ mem_needed = (num_results * sizeof(wifi_gscan_result_t)) +
+ (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) +
+ VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN;
+
+ if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) {
+ mem_needed = (int32)NLMSG_DEFAULT_SIZE;
+ complete = 0;
+ } else {
+ complete = 1;
+ }
+
+ WL_TRACE(("complete %d mem_needed %d max_mem %d\n", complete, mem_needed,
+ (int)NLMSG_DEFAULT_SIZE));
+ /* Alloc the SKB for vendor_event */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
+ if (unlikely(!skb)) {
+ WL_ERR(("skb alloc failed"));
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+ return -ENOMEM;
+ }
+ iter = results;
+
+ nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, complete);
+ mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD);
+ while (iter && ((mem_needed - GSCAN_BATCH_RESULT_HDR_LEN) > 0)) {
+
+ scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS);
+ nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id);
+ nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag);
+
+ num_results_iter =
+ (mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)/sizeof(wifi_gscan_result_t);
+
+ if ((iter->tot_count - iter->tot_consumed) < num_results_iter)
+ num_results_iter = iter->tot_count - iter->tot_consumed;
+ nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter);
+ if (num_results_iter) {
+ ptr = &iter->results[iter->tot_consumed];
+ iter->tot_consumed += num_results_iter;
+ nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS,
+ num_results_iter * sizeof(wifi_gscan_result_t), ptr);
+ }
+ nla_nest_end(skb, scan_hdr);
+ mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN +
+ (num_results_iter * sizeof(wifi_gscan_result_t));
+ iter = iter->next;
+ }
+
+ dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
+ dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
+
+ return cfg80211_vendor_cmd_reply(skb);
+}
+
+static int
+wl_cfgvendor_initiate_gscan(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int type, tmp = len;
+ int run = 0xFF;
+ int flush = 0;
+ const struct nlattr *iter;
+
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ if (type == GSCAN_ATTRIBUTE_ENABLE_FEATURE)
+ run = nla_get_u32(iter);
+ else if (type == GSCAN_ATTRIBUTE_FLUSH_FEATURE)
+ flush = nla_get_u32(iter);
+ }
+
+ if (run != 0xFF) {
+ err = dhd_dev_pno_run_gscan(bcmcfg_to_prmry_ndev(cfg), run, flush);
+
+ if (unlikely(err)) {
+ WL_ERR(("Could not run gscan:%d \n", err));
+ }
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+
+}
+
+static int
+wl_cfgvendor_enable_full_scan_result(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int type;
+ bool real_time = FALSE;
+
+ type = nla_type(data);
+
+ if (type == GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS) {
+ real_time = nla_get_u32(data);
+
+ err = dhd_dev_pno_enable_full_scan_result(bcmcfg_to_prmry_ndev(cfg), real_time);
+
+ if (unlikely(err)) {
+ WL_ERR(("Could not run gscan:%d \n", err));
+ }
+
+ } else {
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int
+wl_cfgvendor_set_scan_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ gscan_scan_params_t *scan_param;
+ int j = 0;
+ int type, tmp, tmp1, tmp2, k = 0;
+ const struct nlattr *iter, *iter1, *iter2;
+ struct dhd_pno_gscan_channel_bucket *ch_bucket;
+
+ scan_param = kzalloc(sizeof(gscan_scan_params_t), GFP_KERNEL);
+ if (!scan_param) {
+ WL_ERR(("Could not set GSCAN scan cfg, mem alloc failure\n"));
+ err = -EINVAL;
+ return err;
+
+ }
+
+ scan_param->scan_fr = PNO_SCAN_MIN_FW_SEC;
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+
+ if (j >= GSCAN_MAX_CH_BUCKETS) {
+ break;
+ }
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_BASE_PERIOD:
+ scan_param->scan_fr = nla_get_u32(iter)/1000;
+ break;
+ case GSCAN_ATTRIBUTE_NUM_BUCKETS:
+ scan_param->nchannel_buckets = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_CH_BUCKET_1:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_2:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_3:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_4:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_5:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_6:
+ case GSCAN_ATTRIBUTE_CH_BUCKET_7:
+ nla_for_each_nested(iter1, iter, tmp1) {
+ type = nla_type(iter1);
+ ch_bucket =
+ scan_param->channel_bucket;
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_BUCKET_ID:
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_PERIOD:
+ ch_bucket[j].bucket_freq_multiple =
+ nla_get_u32(iter1)/1000;
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS:
+ ch_bucket[j].num_channels =
+ nla_get_u32(iter1);
+ break;
+ case GSCAN_ATTRIBUTE_BUCKET_CHANNELS:
+ nla_for_each_nested(iter2, iter1, tmp2) {
+ if (k >= PFN_SWC_RSSI_WINDOW_MAX)
+ break;
+ ch_bucket[j].chan_list[k] =
+ nla_get_u32(iter2);
+ k++;
+ }
+ k = 0;
+ break;
+ case GSCAN_ATTRIBUTE_BUCKETS_BAND:
+ ch_bucket[j].band = (uint16)
+ nla_get_u32(iter1);
+ break;
+ case GSCAN_ATTRIBUTE_REPORT_EVENTS:
+ ch_bucket[j].report_flag = (uint8)
+ nla_get_u32(iter1);
+ break;
+ default:
+ WL_ERR(("bucket attribute type error %d\n",
+ type));
+ break;
+ }
+ }
+ j++;
+ break;
+ default:
+ WL_ERR(("Unknown type %d\n", type));
+ break;
+ }
+ }
+
+ if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_SCAN_CFG_ID, scan_param, 0) < 0) {
+ WL_ERR(("Could not set GSCAN scan cfg\n"));
+ err = -EINVAL;
+ }
+
+ kfree(scan_param);
+ return err;
+
+}
+
+static int
+wl_cfgvendor_hotlist_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ gscan_hotlist_scan_params_t *hotlist_params;
+ int tmp, tmp1, tmp2, type, j = 0, dummy;
+ const struct nlattr *outer, *inner, *iter;
+ uint8 flush = 0;
+ struct bssid_t *pbssid;
+
+ hotlist_params = (gscan_hotlist_scan_params_t *)kzalloc(len, GFP_KERNEL);
+ if (!hotlist_params) {
+ WL_ERR(("Cannot Malloc mem to parse config commands size - %d bytes \n", len));
+ return -ENOMEM;
+ }
+
+ hotlist_params->lost_ap_window = GSCAN_LOST_AP_WINDOW_DEFAULT;
+
+ nla_for_each_attr(iter, data, len, tmp2) {
+ type = nla_type(iter);
+ switch (type) {
+ case GSCAN_ATTRIBUTE_HOTLIST_BSSIDS:
+ pbssid = hotlist_params->bssid;
+ nla_for_each_nested(outer, iter, tmp) {
+ nla_for_each_nested(inner, outer, tmp1) {
+ type = nla_type(inner);
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_BSSID:
+ memcpy(&(pbssid[j].macaddr),
+ nla_data(inner), ETHER_ADDR_LEN);
+ break;
+ case GSCAN_ATTRIBUTE_RSSI_LOW:
+ pbssid[j].rssi_reporting_threshold =
+ (int8) nla_get_u8(inner);
+ break;
+ case GSCAN_ATTRIBUTE_RSSI_HIGH:
+ dummy = (int8) nla_get_u8(inner);
+ break;
+ default:
+ WL_ERR(("ATTR unknown %d\n",
+ type));
+ break;
+ }
+ }
+ j++;
+ }
+ hotlist_params->nbssid = j;
+ break;
+ case GSCAN_ATTRIBUTE_HOTLIST_FLUSH:
+ flush = nla_get_u8(iter);
+ break;
+ case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE:
+ hotlist_params->lost_ap_window = nla_get_u32(iter);
+ break;
+ default:
+ WL_ERR(("Unknown type %d\n", type));
+ break;
+ }
+
+ }
+
+ if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_GEOFENCE_SCAN_CFG_ID,
+ hotlist_params, flush) < 0) {
+ WL_ERR(("Could not set GSCAN HOTLIST cfg\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+exit:
+ kfree(hotlist_params);
+ return err;
+}
+static int
+wl_cfgvendor_set_batch_scan_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0, tmp, type;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ gscan_batch_params_t batch_param;
+ const struct nlattr *iter;
+
+ batch_param.mscan = batch_param.bestn = 0;
+ batch_param.buffer_threshold = GSCAN_BATCH_NO_THR_SET;
+
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN:
+ batch_param.bestn = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE:
+ batch_param.mscan = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_REPORT_THRESHOLD:
+ batch_param.buffer_threshold = nla_get_u32(iter);
+ break;
+ default:
+ WL_ERR(("Unknown type %d\n", type));
+ break;
+ }
+ }
+
+ if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_BATCH_SCAN_CFG_ID,
+ &batch_param, 0) < 0) {
+ WL_ERR(("Could not set batch cfg\n"));
+ err = -EINVAL;
+ return err;
+ }
+
+ return err;
+}
+
+static int
+wl_cfgvendor_significant_change_cfg(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ gscan_swc_params_t *significant_params;
+ int tmp, tmp1, tmp2, type, j = 0;
+ const struct nlattr *outer, *inner, *iter;
+ uint8 flush = 0;
+ wl_pfn_significant_bssid_t *bssid;
+
+ significant_params = (gscan_swc_params_t *) kzalloc(len, GFP_KERNEL);
+ if (!significant_params) {
+ WL_ERR(("Cannot Malloc mem to parse config commands size - %d bytes \n", len));
+ return -ENOMEM;
+ }
+
+ nla_for_each_attr(iter, data, len, tmp2) {
+ type = nla_type(iter);
+
+ switch (type) {
+ case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH:
+ flush = nla_get_u8(iter);
+ break;
+ case GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE:
+ significant_params->rssi_window = nla_get_u16(iter);
+ break;
+ case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE:
+ significant_params->lost_ap_window = nla_get_u16(iter);
+ break;
+ case GSCAN_ATTRIBUTE_MIN_BREACHING:
+ significant_params->swc_threshold = nla_get_u16(iter);
+ break;
+ case GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS:
+ bssid = significant_params->bssid_elem_list;
+ nla_for_each_nested(outer, iter, tmp) {
+ nla_for_each_nested(inner, outer, tmp1) {
+ switch (nla_type(inner)) {
+ case GSCAN_ATTRIBUTE_BSSID:
+ memcpy(&(bssid[j].macaddr),
+ nla_data(inner),
+ ETHER_ADDR_LEN);
+ break;
+ case GSCAN_ATTRIBUTE_RSSI_HIGH:
+ bssid[j].rssi_high_threshold
+ = (int8) nla_get_u8(inner);
+ break;
+ case GSCAN_ATTRIBUTE_RSSI_LOW:
+ bssid[j].rssi_low_threshold
+ = (int8) nla_get_u8(inner);
+ break;
+ default:
+ WL_ERR(("ATTR unknown %d\n",
+ type));
+ break;
+ }
+ }
+ j++;
+ }
+ break;
+ default:
+ WL_ERR(("Unknown type %d\n", type));
+ break;
+ }
+ }
+ significant_params->nbssid = j;
+
+ if (dhd_dev_pno_set_cfg_gscan(bcmcfg_to_prmry_ndev(cfg),
+ DHD_PNO_SIGNIFICANT_SCAN_CFG_ID,
+ significant_params, flush) < 0) {
+ WL_ERR(("Could not set GSCAN significant cfg\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+exit:
+ kfree(significant_params);
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+#ifdef RTT_SUPPORT
+void
+wl_cfgvendor_rtt_evt(void *ctx, void *rtt_data)
+{
+ struct wireless_dev *wdev = (struct wireless_dev *)ctx;
+ struct wiphy *wiphy;
+ struct sk_buff *skb;
+ uint32 tot_len = NLMSG_DEFAULT_SIZE, entry_len = 0;
+ gfp_t kflags;
+ rtt_report_t *rtt_report = NULL;
+ rtt_result_t *rtt_result = NULL;
+ struct list_head *rtt_list;
+ wiphy = wdev->wiphy;
+
+ WL_DBG(("In\n"));
+ /* Push the data to the skb */
+ if (!rtt_data) {
+ WL_ERR(("rtt_data is NULL\n"));
+ goto exit;
+ }
+ rtt_list = (struct list_head *)rtt_data;
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ /* Alloc the SKB for vendor_event */
+#if defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, tot_len, GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#else
+ skb = cfg80211_vendor_event_alloc(wiphy, tot_len, GOOGLE_RTT_COMPLETE_EVENT, kflags);
+#endif /* CONFIG_ARCH_MSM && SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC */
+ if (!skb) {
+ WL_ERR(("skb alloc failed"));
+ goto exit;
+ }
+ /* fill in the rtt results on each entry */
+ list_for_each_entry(rtt_result, rtt_list, list) {
+ entry_len = 0;
+ entry_len = sizeof(rtt_report_t);
+ rtt_report = kzalloc(entry_len, kflags);
+ if (!rtt_report) {
+ WL_ERR(("rtt_report alloc failed"));
+ kfree_skb(skb);
+ goto exit;
+ }
+ rtt_report->addr = rtt_result->peer_mac;
+ rtt_report->num_measurement = 1; /* ONE SHOT */
+ rtt_report->status = rtt_result->err_code;
+ rtt_report->type =
+ (rtt_result->TOF_type == TOF_TYPE_ONE_WAY) ? RTT_ONE_WAY: RTT_TWO_WAY;
+ rtt_report->peer = rtt_result->target_info->peer;
+ rtt_report->channel = rtt_result->target_info->channel;
+ rtt_report->rssi = rtt_result->avg_rssi;
+ /* tx_rate */
+ rtt_report->tx_rate = rtt_result->tx_rate;
+ /* RTT */
+ rtt_report->rtt = rtt_result->meanrtt;
+ rtt_report->rtt_sd = rtt_result->sdrtt/10;
+ /* convert to centi meter */
+ if (rtt_result->distance != 0xffffffff)
+ rtt_report->distance = (rtt_result->distance >> 2) * 25;
+ else /* invalid distance */
+ rtt_report->distance = -1;
+ rtt_report->ts = rtt_result->ts;
+ nla_append(skb, entry_len, rtt_report);
+ kfree(rtt_report);
+ }
+ cfg80211_vendor_event(skb, kflags);
+exit:
+ return;
+}
+
+static int
+wl_cfgvendor_rtt_set_config(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len) {
+ int err = 0, rem, rem1, rem2, type;
+ rtt_config_params_t rtt_param;
+ rtt_target_info_t* rtt_target = NULL;
+ const struct nlattr *iter, *iter1, *iter2;
+ int8 eabuf[ETHER_ADDR_STR_LEN];
+ int8 chanbuf[CHANSPEC_STR_LEN];
+ int32 feature_set = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ feature_set = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg));
+
+ WL_DBG(("In\n"));
+ err = dhd_dev_rtt_register_noti_callback(wdev->netdev, wdev, wl_cfgvendor_rtt_evt);
+ if (err < 0) {
+ WL_ERR(("failed to register rtt_noti_callback\n"));
+ goto exit;
+ }
+ memset(&rtt_param, 0, sizeof(rtt_param));
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case RTT_ATTRIBUTE_TARGET_CNT:
+ rtt_param.rtt_target_cnt = nla_get_u8(iter);
+ if (rtt_param.rtt_target_cnt > RTT_MAX_TARGET_CNT) {
+ WL_ERR(("exceed max target count : %d\n",
+ rtt_param.rtt_target_cnt));
+ err = BCME_RANGE;
+ goto exit;
+ }
+ break;
+ case RTT_ATTRIBUTE_TARGET_INFO:
+ rtt_target = rtt_param.target_info;
+ nla_for_each_nested(iter1, iter, rem1) {
+ nla_for_each_nested(iter2, iter1, rem2) {
+ type = nla_type(iter2);
+ switch (type) {
+ case RTT_ATTRIBUTE_TARGET_MAC:
+ memcpy(&rtt_target->addr, nla_data(iter2),
+ ETHER_ADDR_LEN);
+ break;
+ case RTT_ATTRIBUTE_TARGET_TYPE:
+ rtt_target->type = nla_get_u8(iter2);
+ if (!(feature_set & WIFI_FEATURE_D2D_RTT)) {
+ if (rtt_target->type == RTT_TWO_WAY ||
+ rtt_target->type == RTT_INVALID) {
+ WL_ERR(("doesn't support RTT type"
+ " : %d\n",
+ rtt_target->type));
+ err = -EINVAL;
+ goto exit;
+ } else if (rtt_target->type == RTT_AUTO) {
+ rtt_target->type = RTT_ONE_WAY;
+ }
+ } else if (rtt_target->type == RTT_INVALID) {
+ WL_ERR(("doesn't support RTT type"
+ " : %d\n",
+ rtt_target->type));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case RTT_ATTRIBUTE_TARGET_PEER:
+ rtt_target->peer = nla_get_u8(iter2);
+ if (rtt_target->peer != RTT_PEER_AP) {
+ WL_ERR(("doesn't support peer type : %d\n",
+ rtt_target->peer));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ case RTT_ATTRIBUTE_TARGET_CHAN:
+ memcpy(&rtt_target->channel, nla_data(iter2),
+ sizeof(rtt_target->channel));
+ break;
+ case RTT_ATTRIBUTE_TARGET_MODE:
+ rtt_target->continuous = nla_get_u8(iter2);
+ break;
+ case RTT_ATTRIBUTE_TARGET_INTERVAL:
+ rtt_target->interval = nla_get_u32(iter2);
+ break;
+ case RTT_ATTRIBUTE_TARGET_NUM_MEASUREMENT:
+ rtt_target->measure_cnt = nla_get_u32(iter2);
+ break;
+ case RTT_ATTRIBUTE_TARGET_NUM_PKT:
+ rtt_target->ftm_cnt = nla_get_u32(iter2);
+ break;
+ case RTT_ATTRIBUTE_TARGET_NUM_RETRY:
+ rtt_target->retry_cnt = nla_get_u32(iter2);
+ }
+ }
+ /* convert to chanspec value */
+ rtt_target->chanspec =
+ dhd_rtt_convert_to_chspec(rtt_target->channel);
+ if (rtt_target->chanspec == 0) {
+ WL_ERR(("Channel is not valid \n"));
+ goto exit;
+ }
+ WL_INFORM(("Target addr %s, Channel : %s for RTT \n",
+ bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr,
+ eabuf),
+ wf_chspec_ntoa(rtt_target->chanspec, chanbuf)));
+ rtt_target++;
+ }
+ break;
+ }
+ }
+ WL_DBG(("leave :target_cnt : %d\n", rtt_param.rtt_target_cnt));
+ if (dhd_dev_rtt_set_cfg(bcmcfg_to_prmry_ndev(cfg), &rtt_param) < 0) {
+ WL_ERR(("Could not set RTT configuration\n"));
+ err = -EINVAL;
+ }
+exit:
+ return err;
+}
+
+static int
+wl_cfgvendor_rtt_cancel_config(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ int err = 0, rem, type, target_cnt = 0;
+ int target_cnt_chk = 0;
+ const struct nlattr *iter;
+ struct ether_addr *mac_list = NULL, *mac_addr = NULL;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ nla_for_each_attr(iter, data, len, rem) {
+ type = nla_type(iter);
+ switch (type) {
+ case RTT_ATTRIBUTE_TARGET_CNT:
+ if (mac_list != NULL) {
+ WL_ERR(("mac_list is not NULL\n"));
+ goto exit;
+ }
+ target_cnt = nla_get_u8(iter);
+ mac_list = (struct ether_addr *)kzalloc(target_cnt * ETHER_ADDR_LEN,
+ GFP_KERNEL);
+ if (mac_list == NULL) {
+ WL_ERR(("failed to allocate mem for mac list\n"));
+ goto exit;
+ }
+ mac_addr = &mac_list[0];
+ break;
+ case RTT_ATTRIBUTE_TARGET_MAC:
+ if (mac_addr) {
+ memcpy(mac_addr++, nla_data(iter), ETHER_ADDR_LEN);
+ target_cnt_chk++;
+ if (target_cnt_chk > target_cnt) {
+ WL_ERR(("over target count\n"));
+ goto exit;
+ }
+ break;
+ } else {
+ WL_ERR(("mac_list is NULL\n"));
+ goto exit;
+ }
+ }
+ }
+ if (dhd_dev_rtt_cancel_cfg(bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) {
+ WL_ERR(("Could not cancel RTT configuration\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ if (mac_list) {
+ kfree(mac_list);
+ }
+ return err;
+}
+static int
+wl_cfgvendor_rtt_get_capability(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int len)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ rtt_capabilities_t capability;
+
+ err = dhd_dev_rtt_capability(bcmcfg_to_prmry_ndev(cfg), &capability);
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ goto exit;
+ }
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ &capability, sizeof(capability));
+
+ if (unlikely(err)) {
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ }
+exit:
+ return err;
+}
+
+#endif /* RTT_SUPPORT */
+
+static int
+wl_cfgvendor_priv_string_handler(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int ret = 0;
+ int ret_len = 0, payload = 0, msglen;
+ const struct bcm_nlmsg_hdr *nlioc = data;
+ void *buf = NULL, *cur;
+ int maxmsglen = PAGE_SIZE - 0x100;
+ struct sk_buff *reply;
+
+ WL_ERR(("entry: cmd = %d\n", nlioc->cmd));
+
+ len -= sizeof(struct bcm_nlmsg_hdr);
+ ret_len = nlioc->len;
+ if (ret_len > 0 || len > 0) {
+ if (len > DHD_IOCTL_MAXLEN) {
+ WL_ERR(("oversize input buffer %d\n", len));
+ len = DHD_IOCTL_MAXLEN;
+ }
+ if (ret_len > DHD_IOCTL_MAXLEN) {
+ WL_ERR(("oversize return buffer %d\n", ret_len));
+ ret_len = DHD_IOCTL_MAXLEN;
+ }
+ payload = max(ret_len, len) + 1;
+ buf = vzalloc(payload);
+ if (!buf) {
+ return -ENOMEM;
+ }
+ memcpy(buf, (void *)nlioc + nlioc->offset, len);
+ *(char *)(buf + len) = '\0';
+ }
+
+ ret = dhd_cfgvendor_priv_string_handler(cfg, wdev, nlioc, buf);
+ if (ret) {
+ WL_ERR(("dhd_cfgvendor returned error %d", ret));
+ vfree(buf);
+ return ret;
+ }
+ cur = buf;
+ while (ret_len > 0) {
+ msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len;
+ ret_len -= msglen;
+ payload = msglen + sizeof(msglen);
+ reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
+ if (!reply) {
+ WL_ERR(("Failed to allocate reply msg\n"));
+ ret = -ENOMEM;
+ break;
+ }
+
+ if (nla_put(reply, BCM_NLATTR_DATA, msglen, cur) ||
+ nla_put_u16(reply, BCM_NLATTR_LEN, msglen)) {
+ kfree_skb(reply);
+ ret = -ENOBUFS;
+ break;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(reply);
+ if (ret) {
+ WL_ERR(("testmode reply failed:%d\n", ret));
+ break;
+ }
+ cur += msglen;
+ }
+
+ return ret;
+}
+
+static int
+wl_cfgvendor_priv_bcm_handler(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int err = 0;
+ int data_len = 0;
+
+ WL_INFORM(("%s: Enter \n", __func__));
+
+ bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
+
+ if (strncmp((char *)data, BRCM_VENDOR_SCMD_CAPA, strlen(BRCM_VENDOR_SCMD_CAPA)) == 0) {
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "cap", NULL, 0,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ data_len = strlen(cfg->ioctl_buf);
+ cfg->ioctl_buf[data_len] = '\0';
+ }
+
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ cfg->ioctl_buf, data_len+1);
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ else
+ WL_INFORM(("Vendor Command reply sent successfully!\n"));
+
+ return err;
+}
+
+#ifdef LINKSTAT_SUPPORT
+#define NUM_RATE 32
+#define NUM_PEER 1
+#define NUM_CHAN 11
+#define HEADER_SIZE sizeof(ver_len)
+static int wl_cfgvendor_lstats_get_info(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ static char iovar_buf[WLC_IOCTL_MAXLEN];
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int err = 0, i;
+ wifi_iface_stat *iface;
+ wifi_radio_stat *radio;
+ wl_wme_cnt_t *wl_wme_cnt;
+ wl_cnt_v_le10_mcst_t *macstat_cnt;
+ wl_cnt_wlc_t *wlc_cnt;
+ scb_val_t scbval;
+ char *output;
+
+ WL_INFORM(("%s: Enter \n", __func__));
+ RETURN_EIO_IF_NOT_UP(cfg);
+
+ bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
+ bzero(iovar_buf, WLC_IOCTL_MAXLEN);
+
+ output = cfg->ioctl_buf;
+
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "radiostat", NULL, 0,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+ WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wifi_radio_stat)));
+ return err;
+ }
+ radio = (wifi_radio_stat *)iovar_buf;
+ radio->num_channels = NUM_CHAN;
+ memcpy(output, iovar_buf+HEADER_SIZE, sizeof(wifi_radio_stat)-HEADER_SIZE);
+
+ output += (sizeof(wifi_radio_stat) - HEADER_SIZE);
+ output += (NUM_CHAN*sizeof(wifi_channel_stat));
+
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "wme_counters", NULL, 0,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+ wl_wme_cnt = (wl_wme_cnt_t *)iovar_buf;
+ iface = (wifi_iface_stat *)output;
+
+ iface->ac[WIFI_AC_VO].ac = WIFI_AC_VO;
+ iface->ac[WIFI_AC_VO].tx_mpdu = wl_wme_cnt->tx[AC_VO].packets;
+ iface->ac[WIFI_AC_VO].rx_mpdu = wl_wme_cnt->rx[AC_VO].packets;
+ iface->ac[WIFI_AC_VO].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_VO].packets;
+
+ iface->ac[WIFI_AC_VI].ac = WIFI_AC_VI;
+ iface->ac[WIFI_AC_VI].tx_mpdu = wl_wme_cnt->tx[AC_VI].packets;
+ iface->ac[WIFI_AC_VI].rx_mpdu = wl_wme_cnt->rx[AC_VI].packets;
+ iface->ac[WIFI_AC_VI].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_VI].packets;
+
+ iface->ac[WIFI_AC_BE].ac = WIFI_AC_BE;
+ iface->ac[WIFI_AC_BE].tx_mpdu = wl_wme_cnt->tx[AC_BE].packets;
+ iface->ac[WIFI_AC_BE].rx_mpdu = wl_wme_cnt->rx[AC_BE].packets;
+ iface->ac[WIFI_AC_BE].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_BE].packets;
+
+ iface->ac[WIFI_AC_BK].ac = WIFI_AC_BK;
+ iface->ac[WIFI_AC_BK].tx_mpdu = wl_wme_cnt->tx[AC_BK].packets;
+ iface->ac[WIFI_AC_BK].rx_mpdu = wl_wme_cnt->rx[AC_BK].packets;
+ iface->ac[WIFI_AC_BK].mpdu_lost = wl_wme_cnt->tx_failed[WIFI_AC_BK].packets;
+ bzero(iovar_buf, WLC_IOCTL_MAXLEN);
+
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "counters", NULL, 0,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wl_cnt_wlc_t)));
+ return err;
+ }
+
+ /* Translate traditional (ver <= 10) counters struct to new xtlv type struct */
+ err = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0);
+ if (err != BCME_OK) {
+ WL_ERR(("%s wl_cntbuf_to_xtlv_format ERR %d\n", __FUNCTION__, err));
+ return err;
+ }
+
+ if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) {
+ WL_ERR(("%s wlc_cnt NULL!\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ iface->ac[WIFI_AC_BE].retries = wlc_cnt->txretry;
+
+ if ((macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data,
+ ((wl_cnt_info_t *)iovar_buf)->datalen,
+ WL_CNT_XTLV_CNTV_LE10_UCODE, NULL,
+ BCM_XTLV_OPTION_ALIGN32)) == NULL) {
+ macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data,
+ ((wl_cnt_info_t *)iovar_buf)->datalen,
+ WL_CNT_XTLV_LT40_UCODE_V1, NULL,
+ BCM_XTLV_OPTION_ALIGN32);
+ }
+
+ if (macstat_cnt == NULL) {
+ printf("wlmTxGetAckedPackets: macstat_cnt NULL!\n");
+ return FALSE;
+ }
+
+ iface->beacon_rx = macstat_cnt->rxbeaconmbss;
+
+ err = wldev_get_rssi(bcmcfg_to_prmry_ndev(cfg), &scbval);
+ if (unlikely(err)) {
+ WL_ERR(("get_rssi error (%d)\n", err));
+ return err;
+ }
+ iface->rssi_mgmt = scbval.val;
+
+ iface->num_peers = NUM_PEER;
+ iface->peer_info->num_rate = NUM_RATE;
+
+ bzero(iovar_buf, WLC_IOCTL_MAXLEN);
+ output = (char *)iface + sizeof(wifi_iface_stat) + NUM_PEER*sizeof(wifi_peer_info);
+
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "ratestat", NULL, 0,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+ WL_ERR(("error (%d) - size = %zu\n", err, NUM_RATE*sizeof(wifi_rate_stat)));
+ return err;
+ }
+ for (i = 0; i < NUM_RATE; i++)
+ memcpy(output, iovar_buf+HEADER_SIZE+i*sizeof(wifi_rate_stat),
+ sizeof(wifi_rate_stat)-HEADER_SIZE);
+
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ cfg->ioctl_buf,
+ sizeof(wifi_radio_stat)-HEADER_SIZE +
+ NUM_CHAN*sizeof(wifi_channel_stat) +
+ sizeof(wifi_iface_stat)+NUM_PEER*sizeof(wifi_peer_info) +
+ NUM_RATE*(sizeof(wifi_rate_stat)-HEADER_SIZE));
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+
+ return err;
+}
+#endif /* LINKSTAT_SUPPORT */
+
+static const struct wiphy_vendor_command wl_vendor_cmds [] = {
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_PRIV_STR
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_priv_string_handler
+ },
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_BCM_STR
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_priv_bcm_handler
+ },
+#ifdef GSCAN_SUPPORT
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_GET_CAPABILITIES
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_gscan_get_capabilities
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_SET_CONFIG
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_scan_cfg
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_SET_SCAN_CONFIG
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_batch_scan_cfg
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_ENABLE_GSCAN
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_initiate_gscan
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_enable_full_scan_result
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_SET_HOTLIST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_hotlist_cfg
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_significant_change_cfg
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_GET_SCAN_RESULTS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_gscan_get_batch_results
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = GSCAN_SUBCMD_GET_CHANNEL_LIST
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_gscan_get_channel_list
+ },
+#endif /* GSCAN_SUPPORT */
+#ifdef RTT_SUPPORT
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = RTT_SUBCMD_SET_CONFIG
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_rtt_set_config
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = RTT_SUBCMD_CANCEL_CONFIG
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_rtt_cancel_config
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = RTT_SUBCMD_GETCAPABILITY
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_rtt_get_capability
+ },
+#endif /* RTT_SUPPORT */
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_get_feature_set
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_get_feature_set_matrix
+ },
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = ANDR_WIFI_PNO_RANDOM_MAC_OUI
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_pno_mac_oui
+ },
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = ANDR_WIFI_NODFS_CHANNELS
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_set_nodfs_flag
+
+ },
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+#ifdef LINKSTAT_SUPPORT
+ {
+ {
+ .vendor_id = OUI_GOOGLE,
+ .subcmd = LSTATS_SUBCMD_GET_INFO
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = wl_cfgvendor_lstats_get_info
+ },
+#endif /* LINKSTAT_SUPPORT */
+};
+
+static const struct nl80211_vendor_cmd_info wl_vendor_events [] = {
+ { OUI_BRCM, BRCM_VENDOR_EVENT_UNSPEC },
+ { OUI_BRCM, BRCM_VENDOR_EVENT_PRIV_STR },
+#ifdef GSCAN_SUPPORT
+ { OUI_GOOGLE, GOOGLE_GSCAN_SIGNIFICANT_EVENT },
+ { OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT },
+ { OUI_GOOGLE, GOOGLE_GSCAN_BATCH_SCAN_EVENT },
+ { OUI_GOOGLE, GOOGLE_SCAN_FULL_RESULTS_EVENT },
+#endif /* GSCAN_SUPPORT */
+#ifdef RTT_SUPPORT
+ { OUI_GOOGLE, GOOGLE_RTT_COMPLETE_EVENT },
+#endif /* RTT_SUPPORT */
+#ifdef GSCAN_SUPPORT
+ { OUI_GOOGLE, GOOGLE_SCAN_COMPLETE_EVENT },
+ { OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT },
+#endif /* GSCAN_SUPPORT */
+ { OUI_BRCM, BRCM_VENDOR_EVENT_IDSUP_STATUS }
+};
+
+int wl_cfgvendor_attach(struct wiphy *wiphy)
+{
+
+ WL_INFORM(("Vendor: Register BRCM cfg80211 vendor cmd(0x%x) interface \n",
+ NL80211_CMD_VENDOR));
+
+ wiphy->vendor_commands = wl_vendor_cmds;
+ wiphy->n_vendor_commands = ARRAY_SIZE(wl_vendor_cmds);
+ wiphy->vendor_events = wl_vendor_events;
+ wiphy->n_vendor_events = ARRAY_SIZE(wl_vendor_events);
+
+ return 0;
+}
+
+int wl_cfgvendor_detach(struct wiphy *wiphy)
+{
+ WL_INFORM(("Vendor: Unregister BRCM cfg80211 vendor interface \n"));
+
+ wiphy->vendor_commands = NULL;
+ wiphy->vendor_events = NULL;
+ wiphy->n_vendor_commands = 0;
+ wiphy->n_vendor_events = 0;
+
+ return 0;
+}
+#endif /* defined(WL_VENDOR_EXT_SUPPORT) */
/*
* Linux cfg80211 Vendor Extension Code
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_cfgvendor.h 455257 2014-02-20 08:10:24Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_cfgvendor.h 605796 2015-12-11 13:45:36Z $
*/
#ifndef _wl_cfgvendor_h_
#define _wl_cfgvendor_h_
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) && !defined(VENDOR_EXT_SUPPORT)
-#define VENDOR_EXT_SUPPORT
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0) && !VENDOR_EXT_SUPPORT */
+#define OUI_BRCM 0x001018
+#define OUI_GOOGLE 0x001A11
+#define BRCM_VENDOR_SUBCMD_PRIV_STR 1
+#define ATTRIBUTE_U32_LEN (NLA_HDRLEN + 4)
+#define VENDOR_ID_OVERHEAD ATTRIBUTE_U32_LEN
+#define VENDOR_SUBCMD_OVERHEAD ATTRIBUTE_U32_LEN
+#define VENDOR_DATA_OVERHEAD (NLA_HDRLEN)
+
+#define SCAN_RESULTS_COMPLETE_FLAG_LEN ATTRIBUTE_U32_LEN
+#define SCAN_INDEX_HDR_LEN (NLA_HDRLEN)
+#define SCAN_ID_HDR_LEN ATTRIBUTE_U32_LEN
+#define SCAN_FLAGS_HDR_LEN ATTRIBUTE_U32_LEN
+#define GSCAN_NUM_RESULTS_HDR_LEN ATTRIBUTE_U32_LEN
+#define GSCAN_RESULTS_HDR_LEN (NLA_HDRLEN)
+#define GSCAN_BATCH_RESULT_HDR_LEN (SCAN_INDEX_HDR_LEN + SCAN_ID_HDR_LEN + \
+ SCAN_FLAGS_HDR_LEN + \
+ GSCAN_NUM_RESULTS_HDR_LEN + \
+ GSCAN_RESULTS_HDR_LEN)
+
+#define VENDOR_REPLY_OVERHEAD (VENDOR_ID_OVERHEAD + \
+ VENDOR_SUBCMD_OVERHEAD + \
+ VENDOR_DATA_OVERHEAD)
+
+#define GSCAN_ATTR_SET1 10
+#define GSCAN_ATTR_SET2 20
+#define GSCAN_ATTR_SET3 30
+#define GSCAN_ATTR_SET4 40
+#define GSCAN_ATTR_SET5 50
+#define GSCAN_ATTR_SET6 60
+
+typedef enum {
+ /* don't use 0 as a valid subcommand */
+ VENDOR_NL80211_SUBCMD_UNSPECIFIED,
+
+ /* define all vendor startup commands between 0x0 and 0x0FFF */
+ VENDOR_NL80211_SUBCMD_RANGE_START = 0x0001,
+ VENDOR_NL80211_SUBCMD_RANGE_END = 0x0FFF,
+
+ /* define all GScan related commands between 0x1000 and 0x10FF */
+ ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START = 0x1000,
+ ANDROID_NL80211_SUBCMD_GSCAN_RANGE_END = 0x10FF,
+
+ /* define all NearbyDiscovery related commands between 0x1100 and 0x11FF */
+ ANDROID_NL80211_SUBCMD_NBD_RANGE_START = 0x1100,
+ ANDROID_NL80211_SUBCMD_NBD_RANGE_END = 0x11FF,
+
+ /* define all RTT related commands between 0x1100 and 0x11FF */
+ ANDROID_NL80211_SUBCMD_RTT_RANGE_START = 0x1100,
+ ANDROID_NL80211_SUBCMD_RTT_RANGE_END = 0x11FF,
+
+ ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START = 0x1200,
+ ANDROID_NL80211_SUBCMD_LSTATS_RANGE_END = 0x12FF,
+
+ ANDROID_NL80211_SUBCMD_TDLS_RANGE_START = 0x1300,
+ ANDROID_NL80211_SUBCMD_TDLS_RANGE_END = 0x13FF,
+ /* This is reserved for future usage */
+
+} ANDROID_VENDOR_SUB_COMMAND;
+
+enum andr_vendor_subcmd {
+ GSCAN_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START,
+ GSCAN_SUBCMD_SET_CONFIG,
+ GSCAN_SUBCMD_SET_SCAN_CONFIG,
+ GSCAN_SUBCMD_ENABLE_GSCAN,
+ GSCAN_SUBCMD_GET_SCAN_RESULTS,
+ GSCAN_SUBCMD_SCAN_RESULTS,
+ GSCAN_SUBCMD_SET_HOTLIST,
+ GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG,
+ GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS,
+ GSCAN_SUBCMD_GET_CHANNEL_LIST,
+ /* ANDR_WIFI_XXX although not related to gscan are defined here */
+ ANDR_WIFI_SUBCMD_GET_FEATURE_SET,
+ ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX,
+ ANDR_WIFI_PNO_RANDOM_MAC_OUI,
+ ANDR_WIFI_NODFS_CHANNELS,
+ RTT_SUBCMD_SET_CONFIG = ANDROID_NL80211_SUBCMD_RTT_RANGE_START,
+ RTT_SUBCMD_CANCEL_CONFIG,
+ RTT_SUBCMD_GETCAPABILITY,
+
+ LSTATS_SUBCMD_GET_INFO = ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START,
+ /* Add more sub commands here */
+ VENDOR_SUBCMD_MAX
+};
+
+enum gscan_attributes {
+ GSCAN_ATTRIBUTE_NUM_BUCKETS = GSCAN_ATTR_SET1,
+ GSCAN_ATTRIBUTE_BASE_PERIOD,
+ GSCAN_ATTRIBUTE_BUCKETS_BAND,
+ GSCAN_ATTRIBUTE_BUCKET_ID,
+ GSCAN_ATTRIBUTE_BUCKET_PERIOD,
+ GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS,
+ GSCAN_ATTRIBUTE_BUCKET_CHANNELS,
+ GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN,
+ GSCAN_ATTRIBUTE_REPORT_THRESHOLD,
+ GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE,
+ GSCAN_ATTRIBUTE_BAND = GSCAN_ATTRIBUTE_BUCKETS_BAND,
+
+ GSCAN_ATTRIBUTE_ENABLE_FEATURE = GSCAN_ATTR_SET2,
+ GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE,
+ GSCAN_ATTRIBUTE_FLUSH_FEATURE,
+ GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS,
+ GSCAN_ATTRIBUTE_REPORT_EVENTS,
+ /* remaining reserved for additional attributes */
+ GSCAN_ATTRIBUTE_NUM_OF_RESULTS = GSCAN_ATTR_SET3,
+ GSCAN_ATTRIBUTE_FLUSH_RESULTS,
+ GSCAN_ATTRIBUTE_SCAN_RESULTS, /* flat array of wifi_scan_result */
+ GSCAN_ATTRIBUTE_SCAN_ID, /* indicates scan number */
+ GSCAN_ATTRIBUTE_SCAN_FLAGS, /* indicates if scan was aborted */
+ GSCAN_ATTRIBUTE_AP_FLAGS, /* flags on significant change event */
+ GSCAN_ATTRIBUTE_NUM_CHANNELS,
+ GSCAN_ATTRIBUTE_CHANNEL_LIST,
-enum wl_vendor_event {
+ /* remaining reserved for additional attributes */
+
+ GSCAN_ATTRIBUTE_SSID = GSCAN_ATTR_SET4,
+ GSCAN_ATTRIBUTE_BSSID,
+ GSCAN_ATTRIBUTE_CHANNEL,
+ GSCAN_ATTRIBUTE_RSSI,
+ GSCAN_ATTRIBUTE_TIMESTAMP,
+ GSCAN_ATTRIBUTE_RTT,
+ GSCAN_ATTRIBUTE_RTTSD,
+
+ /* remaining reserved for additional attributes */
+
+ GSCAN_ATTRIBUTE_HOTLIST_BSSIDS = GSCAN_ATTR_SET5,
+ GSCAN_ATTRIBUTE_RSSI_LOW,
+ GSCAN_ATTRIBUTE_RSSI_HIGH,
+ GSCAN_ATTRIBUTE_HOSTLIST_BSSID_ELEM,
+ GSCAN_ATTRIBUTE_HOTLIST_FLUSH,
+
+ /* remaining reserved for additional attributes */
+ GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE = GSCAN_ATTR_SET6,
+ GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE,
+ GSCAN_ATTRIBUTE_MIN_BREACHING,
+ GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_BSSIDS,
+ GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH,
+ GSCAN_ATTRIBUTE_MAX
+};
+
+enum gscan_bucket_attributes {
+ GSCAN_ATTRIBUTE_CH_BUCKET_1,
+ GSCAN_ATTRIBUTE_CH_BUCKET_2,
+ GSCAN_ATTRIBUTE_CH_BUCKET_3,
+ GSCAN_ATTRIBUTE_CH_BUCKET_4,
+ GSCAN_ATTRIBUTE_CH_BUCKET_5,
+ GSCAN_ATTRIBUTE_CH_BUCKET_6,
+ GSCAN_ATTRIBUTE_CH_BUCKET_7
+};
+
+enum gscan_ch_attributes {
+ GSCAN_ATTRIBUTE_CH_ID_1,
+ GSCAN_ATTRIBUTE_CH_ID_2,
+ GSCAN_ATTRIBUTE_CH_ID_3,
+ GSCAN_ATTRIBUTE_CH_ID_4,
+ GSCAN_ATTRIBUTE_CH_ID_5,
+ GSCAN_ATTRIBUTE_CH_ID_6,
+ GSCAN_ATTRIBUTE_CH_ID_7
+};
+
+enum rtt_attributes {
+ RTT_ATTRIBUTE_TARGET_CNT,
+ RTT_ATTRIBUTE_TARGET_INFO,
+ RTT_ATTRIBUTE_TARGET_MAC,
+ RTT_ATTRIBUTE_TARGET_TYPE,
+ RTT_ATTRIBUTE_TARGET_PEER,
+ RTT_ATTRIBUTE_TARGET_CHAN,
+ RTT_ATTRIBUTE_TARGET_MODE,
+ RTT_ATTRIBUTE_TARGET_INTERVAL,
+ RTT_ATTRIBUTE_TARGET_NUM_MEASUREMENT,
+ RTT_ATTRIBUTE_TARGET_NUM_PKT,
+ RTT_ATTRIBUTE_TARGET_NUM_RETRY
+};
+
+typedef enum wl_vendor_event {
BRCM_VENDOR_EVENT_UNSPEC,
- BRCM_VENDOR_EVENT_PRIV_STR
+ BRCM_VENDOR_EVENT_PRIV_STR,
+ GOOGLE_GSCAN_SIGNIFICANT_EVENT,
+ GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT,
+ GOOGLE_GSCAN_BATCH_SCAN_EVENT,
+ GOOGLE_SCAN_FULL_RESULTS_EVENT,
+ GOOGLE_RTT_COMPLETE_EVENT,
+ GOOGLE_SCAN_COMPLETE_EVENT,
+ GOOGLE_GSCAN_GEOFENCE_LOST_EVENT,
+ BRCM_VENDOR_EVENT_IDSUP_STATUS
+} wl_vendor_event_t;
+
+enum andr_wifi_attr {
+ ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET,
+ ANDR_WIFI_ATTRIBUTE_FEATURE_SET,
+ ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI,
+ ANDR_WIFI_ATTRIBUTE_NODFS_SET
};
+typedef enum wl_vendor_gscan_attribute {
+ ATTR_START_GSCAN,
+ ATTR_STOP_GSCAN,
+ ATTR_SET_SCAN_BATCH_CFG_ID, /* set batch scan params */
+ ATTR_SET_SCAN_GEOFENCE_CFG_ID, /* set list of bssids to track */
+ ATTR_SET_SCAN_SIGNIFICANT_CFG_ID, /* set list of bssids, rssi threshold etc.. */
+ ATTR_SET_SCAN_CFG_ID, /* set common scan config params here */
+ ATTR_GET_GSCAN_CAPABILITIES_ID,
+ /* Add more sub commands here */
+ ATTR_GSCAN_MAX
+} wl_vendor_gscan_attribute_t;
+
+typedef enum gscan_batch_attribute {
+ ATTR_GSCAN_BATCH_BESTN,
+ ATTR_GSCAN_BATCH_MSCAN,
+ ATTR_GSCAN_BATCH_BUFFER_THRESHOLD
+} gscan_batch_attribute_t;
+
+typedef enum gscan_geofence_attribute {
+ ATTR_GSCAN_NUM_HOTLIST_BSSID,
+ ATTR_GSCAN_HOTLIST_BSSID
+} gscan_geofence_attribute_t;
+
+typedef enum gscan_complete_event {
+ WIFI_SCAN_BUFFER_FULL,
+ WIFI_SCAN_COMPLETE
+} gscan_complete_event_t;
+
/* Capture the BRCM_VENDOR_SUBCMD_PRIV_STRINGS* here */
#define BRCM_VENDOR_SCMD_CAPA "cap"
-#ifdef VENDOR_EXT_SUPPORT
-extern int cfgvendor_attach(struct wiphy *wiphy);
-extern int cfgvendor_detach(struct wiphy *wiphy);
+#if defined(WL_VENDOR_EXT_SUPPORT) || defined(CONFIG_BCMDHD_VENDOR_EXT)
+extern int wl_cfgvendor_attach(struct wiphy *wiphy);
+extern int wl_cfgvendor_detach(struct wiphy *wiphy);
+extern int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
+ struct net_device *dev, int event_id, const void *data, int len);
+extern int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
+ struct net_device *dev, void *data, int len, wl_vendor_event_t event);
#else
static INLINE int cfgvendor_attach(struct wiphy *wiphy) { return 0; }
static INLINE int cfgvendor_detach(struct wiphy *wiphy) { return 0; }
-#endif /* VENDOR_EXT_SUPPORT */
+#endif /* defined(WL_VENDOR_EXT_SUPPORT) */
#endif /* _wl_cfgvendor_h_ */
* Minimal debug/trace/assert driver definitions for
* Broadcom 802.11 Networking Adapter.
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_dbg.h 472390 2014-04-23 23:32:01Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_dbg.h 519338 2014-12-05 21:23:30Z $
*/
#define WL_TIMESTAMP()
-#if 0 && (VERSION_MAJOR > 9)
-extern int osl_printf(const char *fmt, ...);
-#include <IOKit/apple80211/IO8Log.h>
-#define WL_PRINT(args) do { osl_printf args; } while (0)
-#define RELEASE_PRINT(args) do { WL_PRINT(args); IO8Log args; } while (0)
-#else
#define WL_PRINT(args) do { WL_TIMESTAMP(); printf args; } while (0)
-#endif
#if defined(EVENT_LOG_COMPILE) && defined(WLMSG_SRSCAN)
#define _WL_SRSCAN(fmt, ...) EVENT_LOG(EVENT_LOG_TAG_SRSCAN, fmt, ##__VA_ARGS__)
#define WL_MCNX(args)
#define WL_PROT(args)
#define WL_PSTA(args)
+#define WL_WFDS(m, b, n)
#define WL_TRF_MGMT(args)
#define WL_L2FILTER(args)
#define WL_MQ(args)
#define WL_TXBF(args)
#define WL_P2PO(args)
-#define WL_NET_DETECT(args)
#define WL_ROAM(args)
#define WL_WNM(args)
#define WL_WSEC(args)
#define WL_WSEC_DUMP(args)
#define WL_PCIE(args)
-#define WL_CHANLOG(w, s, i, j)
+#define WL_TSLOG(w, s, i, j)
+#define WL_FBT(args)
#define WL_ERROR_ON() (wl_msg_level & WL_ERROR_VAL)
#define WL_TRACE_ON() 0
#define WL_L2FILTER_ON() 0
#define WL_TXBF_ON() 0
#define WL_P2PO_ON() 0
-#define WL_CHANLOG_ON() 0
-#define WL_NET_DETECT_ON() 0
+#define WL_TSLOG_ON() 0
#define WL_WNM_ON() 0
#define WL_PCIE_ON() 0
#define WL_ERROR(args)
#define WL_TRACE(args)
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-#ifdef WLMSG_PRHDRS
-#define WL_PRHDRS_MSG(args) WL_PRINT(args)
-#define WL_PRHDRS(i, p, f, t, r, l) wlc_print_hdrs(i, p, f, t, r, l)
-#else
-#define WL_PRHDRS_MSG(args)
-#define WL_PRHDRS(i, p, f, t, r, l)
-#endif
-#ifdef WLMSG_PRPKT
-#define WL_PRPKT(m, b, n) prhex(m, b, n)
-#else
-#define WL_PRPKT(m, b, n)
-#endif
-#ifdef WLMSG_INFORM
-#define WL_INFORM(args) WL_PRINT(args)
-#else
-#define WL_INFORM(args)
-#endif
-#define WL_TMP(args)
-#ifdef WLMSG_OID
-#define WL_OID(args) WL_PRINT(args)
-#else
-#define WL_OID(args)
-#endif
-#define WL_RATE(args)
-#ifdef WLMSG_ASSOC
-#define WL_ASSOC(args) WL_PRINT(args)
-#else
-#define WL_ASSOC(args)
-#endif
-#define WL_PRUSR(m, b, n)
-#ifdef WLMSG_PS
-#define WL_PS(args) WL_PRINT(args)
-#else
-#define WL_PS(args)
-#endif
-#ifdef WLMSG_ROAM
-#define WL_ROAM(args) WL_PRINT(args)
-#else
-#define WL_ROAM(args)
-#endif
-#define WL_PORT(args)
-#define WL_DUAL(args)
-#define WL_REGULATORY(args)
-
-#ifdef WLMSG_MPC
-#define WL_MPC(args) WL_PRINT(args)
-#else
-#define WL_MPC(args)
-#endif
-#define WL_APSTA(args)
-#define WL_APSTA_BCN(args)
-#define WL_APSTA_TX(args)
-#define WL_APSTA_TSF(args)
-#define WL_APSTA_BSSID(args)
-#define WL_BA(args)
-#define WL_MBSS(args)
-#define WL_MODE_SWITCH(args)
-#define WL_PROTO(args)
-
-#define WL_CAC(args)
-#define WL_AMSDU(args)
-#define WL_AMPDU(args)
-#define WL_FFPLD(args)
-#define WL_MCHAN(args)
-
-/* Define WLMSG_DFS automatically for WLTEST builds */
-
-#ifdef WLMSG_DFS
-#define WL_DFS(args) do {if (wl_msg_level & WL_DFS_VAL) WL_PRINT(args);} while (0)
-#else /* WLMSG_DFS */
-#define WL_DFS(args)
-#endif /* WLMSG_DFS */
-#define WL_WOWL(args)
-#ifdef WLMSG_SCAN
-#define WL_SCAN(args) WL_PRINT(args)
-#else
-#define WL_SCAN(args)
-#endif
-#define WL_COEX(args)
-#define WL_RTDC(w, s, i, j)
-#define WL_RTDC2(w, s, i, j)
-#define WL_CHANINT(args)
-#ifdef WLMSG_BTA
-#define WL_BTA(args) WL_PRINT(args)
-#else
-#define WL_BTA(args)
-#endif
-#define WL_WMF(args)
-#define WL_P2P(args)
-#define WL_ITFR(args)
-#define WL_TDLS(args)
-#define WL_MCNX(args)
-#define WL_PROT(args)
-#define WL_PSTA(args)
-#define WL_TBTT(args)
-#define WL_TRF_MGMT(args)
-#define WL_L2FILTER(args)
-#define WL_MQ(args)
-#define WL_P2PO(args)
-#define WL_WNM(args)
-#define WL_TXBF(args)
-#define WL_CHANLOG(w, s, i, j)
-#define WL_NET_DETECT(args)
-
-#define WL_ERROR_ON() 0
-#define WL_TRACE_ON() 0
-#ifdef WLMSG_PRHDRS
-#define WL_PRHDRS_ON() 1
-#else
-#define WL_PRHDRS_ON() 0
-#endif
-#ifdef WLMSG_PRPKT
-#define WL_PRPKT_ON() 1
-#else
-#define WL_PRPKT_ON() 0
-#endif
-#ifdef WLMSG_INFORM
-#define WL_INFORM_ON() 1
-#else
-#define WL_INFORM_ON() 0
-#endif
-#ifdef WLMSG_OID
-#define WL_OID_ON() 1
-#else
-#define WL_OID_ON() 0
-#endif
-#define WL_TMP_ON() 0
-#define WL_RATE_ON() 0
-#ifdef WLMSG_ASSOC
-#define WL_ASSOC_ON() 1
-#else
-#define WL_ASSOC_ON() 0
-#endif
-#define WL_PORT_ON() 0
-#ifdef WLMSG_WSEC
-#define WL_WSEC_ON() 1
-#define WL_WSEC_DUMP_ON() 1
-#else
-#define WL_WSEC_ON() 0
-#define WL_WSEC_DUMP_ON() 0
-#endif
-#ifdef WLMSG_MPC
-#define WL_MPC_ON() 1
-#else
-#define WL_MPC_ON() 0
-#endif
-#define WL_REGULATORY_ON() 0
-
-#define WL_APSTA_ON() 0
-#define WL_BA_ON() 0
-#define WL_MBSS_ON() 0
-#define WL_MODE_SWITCH_ON() 0
-#ifdef WLMSG_DFS
-#define WL_DFS_ON() 1
-#else /* WLMSG_DFS */
-#define WL_DFS_ON() 0
-#endif /* WLMSG_DFS */
-#ifdef WLMSG_SCAN
-#define WL_SCAN_ON() 1
-#else
-#define WL_SCAN_ON() 0
-#endif
-#ifdef WLMSG_BTA
-#define WL_BTA_ON() 1
-#else
-#define WL_BTA_ON() 0
-#endif
-#define WL_WMF_ON() 0
-#define WL_P2P_ON() 0
-#define WL_MCHAN_ON() 0
-#define WL_TDLS_ON() 0
-#define WL_MCNX_ON() 0
-#define WL_PROT_ON() 0
-#define WL_TBTT_ON() 0
-#define WL_PWRSEL_ON() 0
-#define WL_L2FILTER_ON() 0
-#define WL_MQ_ON() 0
-#define WL_P2PO_ON() 0
-#define WL_TXBF_ON() 0
-#define WL_CHANLOG_ON() 0
-
-#define WL_AMPDU_UPDN(args)
-#define WL_AMPDU_RX(args)
-#define WL_AMPDU_ERR(args)
-#define WL_AMPDU_TX(args)
-#define WL_AMPDU_CTL(args)
-#define WL_AMPDU_HW(args)
-#define WL_AMPDU_HWTXS(args)
-#define WL_AMPDU_HWDBG(args)
-#define WL_AMPDU_STAT(args)
-#define WL_AMPDU_ERR_ON() 0
-#define WL_AMPDU_HW_ON() 0
-#define WL_AMPDU_HWTXS_ON() 0
-
-#define WL_WNM_ON() 0
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
#define WL_APSTA_UPDN(args)
#define WL_APSTA_RX(args)
#ifdef WLMSG_WSEC
#endif
#define WL_PCIE(args) do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0)
#define WL_PCIE_ON() (wl_msg_level2 & WL_PCIE_VAL)
+#define WL_PFN(args) do {if (wl_msg_level & WL_PFN_VAL) WL_PRINT(args);} while (0)
+#define WL_PFN_ON() (wl_msg_level & WL_PFN_VAL)
#endif
extern uint32 wl_msg_level;
/*
* Linux Wireless Extensions support
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_iw.c 467328 2014-04-03 01:23:40Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_iw.c 591286 2015-10-07 11:59:26Z $
*/
#if defined(USE_IW)
#include <linux/if_arp.h>
#include <asm/uaccess.h>
-typedef const struct si_pub si_t;
#include <wlioctl.h>
+#ifdef WL_NAN
+#include <wlioctl_utils.h>
+#endif
#include <wl_android.h>
+typedef const struct si_pub si_t;
/* message levels */
#define WL_ERROR_LEVEL 0x0001
#include <wl_iw.h>
-#ifdef BCMWAPI_WPI
-/* these items should evetually go into wireless.h of the linux system headfile dir */
-#ifndef IW_ENCODE_ALG_SM4
-#define IW_ENCODE_ALG_SM4 0x20
-#endif
-
-#ifndef IW_AUTH_WAPI_ENABLED
-#define IW_AUTH_WAPI_ENABLED 0x20
-#endif
-
-#ifndef IW_AUTH_WAPI_VERSION_1
-#define IW_AUTH_WAPI_VERSION_1 0x00000008
-#endif
-
-#ifndef IW_AUTH_CIPHER_SMS4
-#define IW_AUTH_CIPHER_SMS4 0x00000020
-#endif
-
-#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
-#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
-#endif
-
-#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
-#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
-#endif
-#endif /* BCMWAPI_WPI */
/* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */
#ifndef IW_AUTH_KEY_MGMT_FT_802_1X
ioc.buf = arg;
ioc.len = len;
- strcpy(ifr.ifr_name, dev->name);
+ strncpy(ifr.ifr_name, dev->name, sizeof(ifr.ifr_name));
+ ifr.ifr_name[sizeof(ifr.ifr_name) - 1] = '\0';
ifr.ifr_data = (caddr_t) &ioc;
fs = get_fs();
error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
return error;
}
-
-#if WIRELESS_EXT > 17
-#endif /* WIRELESS_EXT > 17 */
#endif /* WIRELESS_EXT > 12 */
int
if (strlen(flag) > sizeof(extra))
return -1;
- strcpy(extra, flag);
+ strncpy(extra, flag, sizeof(extra));
+ extra[sizeof(extra) - 1] = '\0';
wrqu.data.length = strlen(extra);
wireless_send_event(dev, cmd, &wrqu, extra);
WL_TRACE(("Send IWEVCUSTOM Event as %s\n", extra));
band[0] = dtoh32(band[0]);
switch (phytype) {
case WLC_PHY_TYPE_A:
- strcpy(cap, "a");
+ strncpy(cap, "a", sizeof(cap));
break;
case WLC_PHY_TYPE_B:
- strcpy(cap, "b");
+ strncpy(cap, "b", sizeof(cap));
break;
- case WLC_PHY_TYPE_LP:
case WLC_PHY_TYPE_G:
if (band[0] >= 2)
- strcpy(cap, "abg");
+ strncpy(cap, "abg", sizeof(cap));
else
- strcpy(cap, "bg");
+ strncpy(cap, "bg", sizeof(cap));
break;
case WLC_PHY_TYPE_N:
if (band[0] >= 2)
- strcpy(cap, "abgn");
+ strncpy(cap, "abgn", sizeof(cap));
else
- strcpy(cap, "bgn");
+ strncpy(cap, "bgn", sizeof(cap));
break;
}
done:
- snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap);
+ (void)snprintf(cwrq->name, IFNAMSIZ, "IEEE 802.11%s", cap);
+
return 0;
}
return error;
if ((error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype))))
return error;
- if (nmode == 1 && ((phytype == WLC_PHY_TYPE_SSN) || (phytype == WLC_PHY_TYPE_LCN) ||
- (phytype == WLC_PHY_TYPE_LCN40))) {
+ if (nmode == 1 && (((phytype == WLC_PHY_TYPE_LCN) ||
+ (phytype == WLC_PHY_TYPE_LCN40)))) {
if ((error = dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap)))
return error;
if ((error = dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx)))
}
#endif /* WIRELESS_EXT > 17 */
-#ifdef BCMWAPI_WPI
-static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data,
- size_t len, int uppercase)
-{
- size_t i;
- char *pos = buf, *end = buf + buf_size;
- int ret;
- if (buf_size == 0)
- return 0;
- for (i = 0; i < len; i++) {
- ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x",
- data[i]);
- if (ret < 0 || ret >= end - pos) {
- end[-1] = '\0';
- return pos - buf;
- }
- pos += ret;
- }
- end[-1] = '\0';
- return pos - buf;
-}
-
-/**
- * wpa_snprintf_hex - Print data as a hex string into a buffer
- * @buf: Memory area to use as the output buffer
- * @buf_size: Maximum buffer size in bytes (should be at least 2 * len + 1)
- * @data: Data to be printed
- * @len: Length of data in bytes
- * Returns: Number of bytes written
- */
-static int
-wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len)
-{
- return _wpa_snprintf_hex(buf, buf_size, data, len, 0);
-}
-#endif /* BCMWAPI_WPI */
static int
wl_iw_handle_scanresults_ies(char **event_p, char *end,
#if WIRELESS_EXT > 17
struct iw_event iwe;
char *event;
-#ifdef BCMWAPI_WPI
- char *buf;
- int custom_event_len;
-#endif
event = *event_p;
if (bi->ie_length) {
/* look for wpa/rsn ies in the ie list... */
bcm_tlv_t *ie;
- uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ uint8 *ptr = ((uint8 *)bi) + bi->ie_offset;
int ptr_len = bi->ie_length;
/* OSEN IE */
iwe.u.data.length = ie->len + 2;
event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
}
- ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr = ((uint8 *)bi) + bi->ie_offset;
if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
iwe.cmd = IWEVGENIE;
iwe.u.data.length = ie->len + 2;
event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
}
- ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr = ((uint8 *)bi) + bi->ie_offset;
if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_MDIE_ID))) {
iwe.cmd = IWEVGENIE;
iwe.u.data.length = ie->len + 2;
event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
}
- ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr = ((uint8 *)bi) + bi->ie_offset;
while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
/* look for WPS IE */
}
}
- ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+ ptr = ((uint8 *)bi) + bi->ie_offset;
ptr_len = bi->ie_length;
while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
if (ie_is_wpa_ie(((uint8 **)&ie), &ptr, &ptr_len)) {
}
}
-#ifdef BCMWAPI_WPI
- ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
- ptr_len = bi->ie_length;
-
- while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) {
- WL_TRACE(("%s: found a WAPI IE...\n", __FUNCTION__));
-#ifdef WAPI_IE_USE_GENIE
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = ie->len + 2;
- event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
-#else /* using CUSTOM event */
- iwe.cmd = IWEVCUSTOM;
- custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2);
- iwe.u.data.length = custom_event_len;
-
- buf = kmalloc(custom_event_len+1, GFP_KERNEL);
- if (buf == NULL)
- {
- WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len));
- break;
- }
-
- memcpy(buf, "wapi_ie=", 8);
- wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1);
- wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1);
- wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len);
- event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf);
- kfree(buf);
-#endif /* WAPI_IE_USE_GENIE */
- break;
- }
-#endif /* BCMWAPI_WPI */
- *event_p = event;
+ *event_p = event;
}
#endif /* WIRELESS_EXT > 17 */
return 0;
}
+
static int
wl_iw_get_scan(
struct net_device *dev,
/* Channel */
iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = wf_channel2mhz(channel,
+
+ iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
(CHSPEC_IS2G(bi->chanspec)) ?
WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
iwe.u.freq.e = 6;
iwe.u.qual.noise = 0x100 + bi->phy_noise;
event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
- /* WPA, WPA2, WPS, WAPI IEs */
wl_iw_handle_scanresults_ies(&event, end, info, bi);
/* Encryption */
/* Channel */
iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = wf_channel2mhz(channel,
+ iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
(CHSPEC_IS2G(bi->chanspec)) ?
WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
iwe.u.freq.e = 6;
iwe.u.qual.noise = 0x100 + bi->phy_noise;
event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
- /* WPA, WPA2, WPS, WAPI IEs */
wl_iw_handle_scanresults_ies(&event, end, info, bi);
/* Encryption */
char *extra
)
{
-#if defined(BCMWAPI_WPI)
- uchar buf[WLC_IOCTL_SMLEN] = {0};
- uchar *p = buf;
- int wapi_ie_size;
-
- WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
-
- if (extra[0] == DOT11_MNG_WAPI_ID)
- {
- wapi_ie_size = iwp->length;
- memcpy(p, extra, iwp->length);
- dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size);
- }
- else
-#endif
dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
return 0;
/* copy the raw hex key to the appropriate format */
for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
- sprintf(charptr, "%02x", iwe->key[j]);
+ (void)snprintf(charptr, 3, "%02x", iwe->key[j]);
charptr += 2;
}
len = strlen(keystring);
bcopy(keystring, pmk.key, len);
pmk.flags = htod16(WSEC_PASSPHRASE);
+ WL_WSEC(("%s: set key %s\n", __FUNCTION__, keystring));
error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
- if (error)
+ if (error) {
+ WL_ERROR(("%s: WLC_SET_WSEC_PMK error %d\n", __FUNCTION__, error));
return error;
+ }
}
else {
case IW_ENCODE_ALG_CCMP:
key.algo = CRYPTO_ALGO_AES_CCM;
break;
-#ifdef BCMWAPI_WPI
- case IW_ENCODE_ALG_SM4:
- key.algo = CRYPTO_ALGO_SMS4;
- if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
- key.flags &= ~WL_PRIMARY_KEY;
- }
- break;
-#endif
default:
break;
}
}
-#if WIRELESS_EXT > 17
struct {
pmkid_list_t pmkids;
pmkid_t foo[MAXPMKID-1];
dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list));
return 0;
}
-#endif /* WIRELESS_EXT > 17 */
static int
wl_iw_get_encodeext(
val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
-#ifdef BCMWAPI_WPI
- else if (paramval & IW_AUTH_WAPI_VERSION_1)
- val = WAPI_AUTH_UNSPECIFIED;
-#endif
WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val));
if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
return error;
iw->gwsec = paramval;
}
- if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
+ if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) {
+ WL_ERROR(("%s: wsec error %d\n", __FUNCTION__, error));
return error;
+ }
+ WL_WSEC(("%s: get wsec=0x%x\n", __FUNCTION__, val));
cipher_combined = iw->gwsec | iw->pwsec;
val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED);
val |= TKIP_ENABLED;
if (cipher_combined & IW_AUTH_CIPHER_CCMP)
val |= AES_ENABLED;
-#ifdef BCMWAPI_WPI
- val &= ~SMS4_ENABLED;
- if (cipher_combined & IW_AUTH_CIPHER_SMS4)
- val |= SMS4_ENABLED;
-#endif
if (iw->privacy_invoked && !val) {
WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming "
}
}
- if ((error = dev_wlc_intvar_set(dev, "wsec", val)))
+ WL_WSEC(("%s: set wsec=0x%x\n", __FUNCTION__, val));
+ if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
+ WL_ERROR(("%s: wsec error %d\n", __FUNCTION__, error));
return error;
+ }
/* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way
* handshake.
*/
if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
+ WL_WSEC(("%s: get fbt_cap=0x%x\n", __FUNCTION__, fbt_cap));
if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) {
- if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1)))
+ if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1))) {
+ WL_ERROR(("%s: sup_wpa 1 error %d\n", __FUNCTION__, error));
return error;
+ }
}
else if (val == 0) {
- if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0)))
+ if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0))) {
+ WL_ERROR(("%s: sup_wpa 0 error %d\n", __FUNCTION__, error));
return error;
+ }
}
}
}
}
case IW_AUTH_KEY_MGMT:
- if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val)))
+ if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) {
+ WL_ERROR(("%s: wpa_auth error %d\n", __FUNCTION__, error));
return error;
+ }
+ WL_WSEC(("%s: get wpa_auth to %d\n", __FUNCTION__, val));
if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
val |= WPA2_AUTH_FT;
}
-#ifdef BCMWAPI_WPI
- if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT))
- val = WAPI_AUTH_UNSPECIFIED;
-#endif
WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
return error;
#endif /* WIRELESS_EXT > 17 */
-#ifdef BCMWAPI_WPI
-
- case IW_AUTH_WAPI_ENABLED:
- if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
- return error;
- if (paramval) {
- val |= SMS4_ENABLED;
- if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
- WL_ERROR(("%s: setting wsec to 0x%0x returned error %d\n",
- __FUNCTION__, val, error));
- return error;
- }
- if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) {
- WL_ERROR(("%s: setting wpa_auth(%d) returned %d\n",
- __FUNCTION__, WAPI_AUTH_UNSPECIFIED,
- error));
- return error;
- }
- }
-
- break;
-
-#endif /* BCMWAPI_WPI */
default:
break;
WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV,
WL_IW_SET_VLANMODE,
WL_IW_SET_PM,
-#if WIRELESS_EXT > 17
-#endif /* WIRELESS_EXT > 17 */
WL_IW_SET_LAST
};
wl_iw_set_leddc,
wl_iw_set_vlanmode,
wl_iw_set_pm,
-#if WIRELESS_EXT > 17
-#endif /* WIRELESS_EXT > 17 */
NULL
};
0,
"set_pm"
},
-#if WIRELESS_EXT > 17
-#endif /* WIRELESS_EXT > 17 */
{ 0, 0, 0, { 0 } }
};
.num_standard = ARRAYSIZE(wl_iw_handler),
.num_private = ARRAY_SIZE(wl_iw_priv_handler),
.num_private_args = ARRAY_SIZE(wl_iw_priv_args),
- .standard = (iw_handler *) wl_iw_handler,
+ .standard = (const iw_handler *) wl_iw_handler,
.private = wl_iw_priv_handler,
.private_args = wl_iw_priv_args,
#if WIRELESS_EXT >= 19
/* If found, generate a connection failure string and return TRUE */
if (cause) {
memset(stringBuf, 0, buflen);
- snprintf(stringBuf, buflen, "%s %s %02d %02d",
- name, cause, status, reason);
+ (void)snprintf(stringBuf, buflen, "%s %s %02d %02d", name, cause, status, reason);
WL_TRACE(("Connection status: %s\n", stringBuf));
return TRUE;
} else {
break;
case WLC_E_LINK:
- case WLC_E_NDIS_LINK:
cmd = SIOCGIWAP;
wrqu.data.length = strlen(extra);
if (!(flags & WLC_EVENT_MSG_LINK)) {
#endif /* WIRELESS_EXT > 13 */
}
+#ifdef WL_NAN
+static int wl_iw_get_wireless_stats_cbfn(void *ctx, uint8 *data, uint16 type, uint16 len)
+{
+ struct iw_statistics *wstats = ctx;
+ int res = BCME_OK;
+
+ switch (type) {
+ case WL_CNT_XTLV_WLC: {
+ wl_cnt_wlc_t *cnt = (wl_cnt_wlc_t *)data;
+ if (len > sizeof(wl_cnt_wlc_t)) {
+ printf("counter structure length invalid! %d > %d\n",
+ len, (int)sizeof(wl_cnt_wlc_t));
+ }
+ wstats->discard.nwid = 0;
+ wstats->discard.code = dtoh32(cnt->rxundec);
+ wstats->discard.fragment = dtoh32(cnt->rxfragerr);
+ wstats->discard.retries = dtoh32(cnt->txfail);
+ wstats->discard.misc = dtoh32(cnt->rxrunt) + dtoh32(cnt->rxgiant);
+ wstats->miss.beacon = 0;
+ WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+ dtoh32(cnt->txframe), dtoh32(cnt->txbyte)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n",
+ dtoh32(cnt->rxundec)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n",
+ dtoh32(cnt->txfail)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n",
+ dtoh32(cnt->rxfragerr)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n",
+ dtoh32(cnt->rxrunt)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n",
+ dtoh32(cnt->rxgiant)));
+ break;
+ }
+ case WL_CNT_XTLV_CNTV_LE10_UCODE:
+ case WL_CNT_XTLV_LT40_UCODE_V1:
+ case WL_CNT_XTLV_GE40_UCODE_V1:
+ {
+ /* Offsets of rxfrmtoolong and rxbadplcp are the same in
+ * wl_cnt_v_le10_mcst_t, wl_cnt_lt40mcst_v1_t, and wl_cnt_ge40mcst_v1_t.
+ * So we can just cast to wl_cnt_v_le10_mcst_t here.
+ */
+ wl_cnt_v_le10_mcst_t *cnt = (wl_cnt_v_le10_mcst_t *)data;
+ if (len != WL_CNT_MCST_STRUCT_SZ) {
+ printf("counter structure length mismatch! %d != %d\n",
+ len, WL_CNT_MCST_STRUCT_SZ);
+ }
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n",
+ dtoh32(cnt->rxfrmtoolong)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n",
+ dtoh32(cnt->rxbadplcp)));
+ BCM_REFERENCE(cnt);
+ break;
+ }
+ default:
+ WL_ERROR(("%s %d: Unsupported type %d\n", __FUNCTION__, __LINE__, type));
+ break;
+ }
+ return res;
+}
+#endif
+
int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats)
{
int res = 0;
- wl_cnt_t cnt;
int phy_noise;
int rssi;
scb_val_t scb_val;
+#if WIRELESS_EXT > 11
+ char *cntbuf = NULL;
+ wl_cnt_info_t *cntinfo;
+ uint16 ver;
+ uint32 corerev = 0;
+#endif /* WIRELESS_EXT > 11 */
phy_noise = 0;
if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise))))
#endif /* WIRELESS_EXT > 18 */
#if WIRELESS_EXT > 11
- WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", (int)sizeof(wl_cnt_t)));
+ WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n *****", WL_CNTBUF_MAX_SIZE));
+
+ if (WL_CNTBUF_MAX_SIZE > MAX_WLIW_IOCTL_LEN)
+ {
+ WL_ERROR(("wl_iw_get_wireless_stats buffer too short %d < %d\n",
+ WL_CNTBUF_MAX_SIZE, MAX_WLIW_IOCTL_LEN));
+ res = BCME_BUFTOOSHORT;
+ goto done;
+ }
+
+ cntbuf = kmalloc(WL_CNTBUF_MAX_SIZE, GFP_KERNEL);
+ if (!cntbuf) {
+ res = BCME_NOMEM;
+ goto done;
+ }
- memset(&cnt, 0, sizeof(wl_cnt_t));
- res = dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
+ memset(cntbuf, 0, WL_CNTBUF_MAX_SIZE);
+ res = dev_wlc_bufvar_get(dev, "counters", cntbuf, WL_CNTBUF_MAX_SIZE);
if (res)
{
WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d ****** \n", res));
goto done;
}
- cnt.version = dtoh16(cnt.version);
- if (cnt.version != WL_CNT_T_VERSION) {
+ cntinfo = (wl_cnt_info_t *)cntbuf;
+ cntinfo->version = dtoh16(cntinfo->version);
+ cntinfo->datalen = dtoh16(cntinfo->datalen);
+ ver = cntinfo->version;
+ if (ver > WL_CNT_T_VERSION) {
WL_TRACE(("\tIncorrect version of counters struct: expected %d; got %d\n",
- WL_CNT_T_VERSION, cnt.version));
+ WL_CNT_T_VERSION, ver));
+ res = BCME_VERSION;
goto done;
}
- wstats->discard.nwid = 0;
- wstats->discard.code = dtoh32(cnt.rxundec);
- wstats->discard.fragment = dtoh32(cnt.rxfragerr);
- wstats->discard.retries = dtoh32(cnt.txfail);
- wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
- wstats->miss.beacon = 0;
+ if (ver == WL_CNT_VERSION_11) {
+ wlc_rev_info_t revinfo;
+ memset(&revinfo, 0, sizeof(revinfo));
+ res = dev_wlc_ioctl(dev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
+ if (res) {
+ WL_ERROR(("%s: WLC_GET_REVINFO failed %d\n", __FUNCTION__, res));
+ goto done;
+ }
+ corerev = dtoh32(revinfo.corerev);
+ }
- WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
- dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n", dtoh32(cnt.rxfrmtoolong)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n", dtoh32(cnt.rxbadplcp)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n", dtoh32(cnt.rxundec)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n", dtoh32(cnt.rxfragerr)));
- WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n", dtoh32(cnt.txfail)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n", dtoh32(cnt.rxrunt)));
- WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n", dtoh32(cnt.rxgiant)));
+#ifdef WL_NAN
+ res = wl_cntbuf_to_xtlv_format(NULL, cntinfo, WL_CNTBUF_MAX_SIZE, corerev);
+ if (res) {
+ WL_ERROR(("%s: wl_cntbuf_to_xtlv_format failed %d\n", __FUNCTION__, res));
+ goto done;
+ }
+ if ((res = bcm_unpack_xtlv_buf(wstats, cntinfo->data, cntinfo->datalen,
+ BCM_XTLV_OPTION_ALIGN32, wl_iw_get_wireless_stats_cbfn))) {
+ goto done;
+ }
+#endif
#endif /* WIRELESS_EXT > 11 */
done:
+#if WIRELESS_EXT > 11
+ if (cntbuf) {
+ kfree(cntbuf);
+ }
+#endif /* WIRELESS_EXT > 11 */
return res;
}
/*
* Linux Wireless Extensions support
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_iw.h 488316 2014-06-30 15:22:21Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_iw.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _wl_iw_h_
#define TXPOWER_SET_CMD "TXPOWER"
#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
-#define MACSTR "%02X:%02X:%02X:%02X:%02X:%02X"
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
/* Structure to keep global parameters */
typedef struct wl_iw_extra_params {
/*
* Broadcom Dongle Host Driver (DHD), Linux monitor network interface
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wl_linux_mon.c 467328 2014-04-03 01:23:40Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_linux_mon.c 514727 2014-11-12 03:02:48Z $
*/
#include <osl.h>
--- /dev/null
+/*
+ * Linux roam cache
+ *
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wl_roam.c 599089 2015-11-12 10:41:33Z $
+ */
/*
* Common function shared by Linux WEXT, cfg80211 and p2p drivers
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wldev_common.c 504503 2014-09-24 11:28:56Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wldev_common.c 585478 2015-09-10 13:33:58Z $
*/
#include <osl.h>
#define WLDEV_ERROR(args) \
do { \
- printk(KERN_ERR "WLDEV-ERROR) %s : ", __func__); \
+ printk(KERN_ERR "WLDEV-ERROR) "); \
+ printk args; \
+ } while (0)
+
+#define WLDEV_INFO(args) \
+ do { \
+ printk(KERN_INFO "WLDEV-INFO) "); \
printk args; \
} while (0)
* wl_iw, wl_cfg80211 and wl_cfgp2p
*/
static s32 wldev_mkiovar(
- s8 *iovar_name, s8 *param, s32 paramlen,
+ const s8 *iovar_name, s8 *param, s32 paramlen,
s8 *iovar_buf, u32 buflen)
{
s32 iolen = 0;
u32 iolen;
if (bssidx == 0) {
- return wldev_mkiovar((s8*)iovar_name, (s8 *)param, paramlen,
- (s8 *) iovar_buf, buflen);
+ return wldev_mkiovar(iovar_name, param, paramlen,
+ iovar_buf, buflen);
}
prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
}
int wldev_get_rssi(
- struct net_device *dev, int *prssi)
+ struct net_device *dev, scb_val_t *scb_val)
{
- scb_val_t scb_val;
int error;
- if (!prssi)
+ if (!scb_val)
return -ENOMEM;
- bzero(&scb_val, sizeof(scb_val_t));
- error = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0);
+ error = wldev_ioctl(dev, WLC_GET_RSSI, scb_val, sizeof(scb_val_t), 0);
if (unlikely(error))
return error;
- *prssi = dtoh32(scb_val.val);
return error;
}
}
return error;
}
+int wldev_get_datarate(struct net_device *dev, int *datarate)
+{
+ int error = 0;
+
+ error = wldev_ioctl(dev, WLC_GET_RATE, datarate, sizeof(int), false);
+ if (error) {
+ return -1;
+ } else {
+ *datarate = dtoh32(*datarate);
+ }
+
+ return error;
+}
+
+extern chanspec_t
+wl_chspec_driver_to_host(chanspec_t chanspec);
+#define WL_EXTRA_BUF_MAX 2048
+int wldev_get_mode(
+ struct net_device *dev, uint8 *cap)
+{
+ int error = 0;
+ int chanspec = 0;
+ uint16 band = 0;
+ uint16 bandwidth = 0;
+ wl_bss_info_t *bss = NULL;
+ char* buf = kmalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+ if (!buf)
+ return -1;
+ *(u32*) buf = htod32(WL_EXTRA_BUF_MAX);
+ error = wldev_ioctl(dev, WLC_GET_BSS_INFO, (void*)buf, WL_EXTRA_BUF_MAX, false);
+ if (error) {
+ WLDEV_ERROR(("%s:failed:%d\n", __FUNCTION__, error));
+ return -1;
+ }
+ bss = (struct wl_bss_info *)(buf + 4);
+ chanspec = wl_chspec_driver_to_host(bss->chanspec);
+
+ band = chanspec & WL_CHANSPEC_BAND_MASK;
+ bandwidth = chanspec & WL_CHANSPEC_BW_MASK;
+
+ if (band == WL_CHANSPEC_BAND_2G) {
+ if (bss->n_cap)
+ strcpy(cap, "n");
+ else
+ strcpy(cap, "bg");
+ } else if (band == WL_CHANSPEC_BAND_5G) {
+ if (bandwidth == WL_CHANSPEC_BW_80)
+ strcpy(cap, "ac");
+ else if ((bandwidth == WL_CHANSPEC_BW_40) || (bandwidth == WL_CHANSPEC_BW_20)) {
+ if ((bss->nbss_cap & 0xf00) && (bss->n_cap))
+ strcpy(cap, "n|ac");
+ else if (bss->n_cap)
+ strcpy(cap, "n");
+ else if (bss->vht_cap)
+ strcpy(cap, "ac");
+ else
+ strcpy(cap, "a");
+ } else {
+ WLDEV_ERROR(("%s:Mode get failed\n", __FUNCTION__));
+ return -1;
+ }
+ }
+ return error;
+}
int wldev_set_country(
- struct net_device *dev, char *country_code, bool notify, bool user_enforced)
+ struct net_device *dev, char *country_code, bool notify, bool user_enforced, int revinfo)
{
int error = -1;
wl_country_t cspec = {{0}, 0, {0}};
}
if ((error < 0) ||
- (strncmp(country_code, cspec.country_abbrev, WLC_CNTRY_BUF_SZ) != 0)) {
+ dhd_force_country_change(dev) ||
+ (strncmp(country_code, cspec.ccode, WLC_CNTRY_BUF_SZ) != 0)) {
if (user_enforced) {
bzero(&scbval, sizeof(scb_val_t));
}
}
- cspec.rev = -1;
+ cspec.rev = revinfo;
memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
- dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec);
+ error = dhd_conf_get_country_from_config(dhd_get_pub(dev), &cspec);\r
+ if (error)\r
+ dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec);\r
error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec),
smbuf, sizeof(smbuf), NULL);
if (error < 0) {
dhd_conf_fix_country(dhd_get_pub(dev));
dhd_conf_get_country(dhd_get_pub(dev), &cspec);
dhd_bus_country_set(dev, &cspec, notify);
- WLDEV_ERROR(("%s: set country for %s as %s rev %d\n",
- __FUNCTION__, country_code, cspec.ccode, cspec.rev));
+ printf("%s: set country for %s as %s rev %d\n",
+ __FUNCTION__, country_code, cspec.ccode, cspec.rev);
}
return 0;
}
/*
* Common function shared by Linux WEXT, cfg80211 and p2p drivers
*
- * $Copyright Open Broadcom Corporation$
+ * Copyright (C) 1999-2016, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: wldev_common.h 504503 2014-09-24 11:28:56Z $
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wldev_common.h 556083 2015-05-12 14:03:00Z $
*/
#ifndef __WLDEV_COMMON_H__
#define __WLDEV_COMMON_H__
extern void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
wl_country_t *cspec);
extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify);
+extern bool dhd_force_country_change(struct net_device *dev);
extern void dhd_bus_band_set(struct net_device *dev, uint band);
extern int wldev_set_country(struct net_device *dev, char *country_code, bool notify,
- bool user_enforced);
+ bool user_enforced, int revinfo);
extern int net_os_wake_lock(struct net_device *dev);
extern int net_os_wake_unlock(struct net_device *dev);
extern int net_os_wake_lock_timeout(struct net_device *dev);
extern int net_os_set_dtim_skip(struct net_device *dev, int val);
extern int net_os_set_suspend_disable(struct net_device *dev, int val);
extern int net_os_set_suspend(struct net_device *dev, int val, int force);
-extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid,
+extern int wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid,
int max, int *bytes_left);
/* Get the link speed from dongle, speed is in kpbs */
int wldev_get_link_speed(struct net_device *dev, int *plink_speed);
-int wldev_get_rssi(struct net_device *dev, int *prssi);
+int wldev_get_rssi(struct net_device *dev, scb_val_t *prssi);
int wldev_get_ssid(struct net_device *dev, wlc_ssid_t *pssid);
int wldev_get_band(struct net_device *dev, uint *pband);
-
+int wldev_get_mode(struct net_device *dev, uint8 *pband);
+int wldev_get_datarate(struct net_device *dev, int *datarate);
int wldev_set_band(struct net_device *dev, uint band);
-#if defined(CUSTOM_PLATFORM_NV_TEGRA)
-int wldev_miracast_tuning(struct net_device *dev, char *command, int total_len);
-int wldev_get_assoc_resp_ie(struct net_device *dev, char *command, int total_len);
-int wldev_get_rx_rate_stats(struct net_device *dev, char *command, int total_len);
-#endif /* defined(CUSTOM_PLATFORM_NV_TEGRA) */
-
#endif /* __WLDEV_COMMON_H__ */